Spaces:
Running
on
T4
Running
on
T4
Update app.py
Browse files
app.py
CHANGED
|
@@ -52,24 +52,10 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed):
|
|
| 52 |
image = pipe(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=.99).images[0]
|
| 53 |
torch.cuda.empty_cache()
|
| 54 |
return image
|
| 55 |
-
|
| 56 |
-
torch.cuda.max_memory_allocated(device=device)
|
| 57 |
-
torch.cuda.empty_cache()
|
| 58 |
-
pipe = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3.5-medium", torch_dtype=torch.float16).to(device)
|
| 59 |
-
pipe.enable_xformers_memory_efficient_attention()
|
| 60 |
-
torch.cuda.empty_cache()
|
| 61 |
-
image = pipe(
|
| 62 |
-
prompt=Prompt,
|
| 63 |
-
height=height,
|
| 64 |
-
width=width,
|
| 65 |
-
negative_prompt=negative_prompt,
|
| 66 |
-
guidance_scale=scale,
|
| 67 |
-
num_images_per_prompt=1,
|
| 68 |
-
num_inference_steps=steps).images[0]
|
| 69 |
-
torch.cuda.empty_cache()
|
| 70 |
return image
|
| 71 |
|
| 72 |
-
gr.Interface(fn=genie, inputs=[gr.Radio(['PhotoReal', 'Animagine XL 4', "FXL"
|
| 73 |
gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
|
| 74 |
gr.Textbox(label='What you Do Not want the AI to generate. 77 Token Limit'),
|
| 75 |
gr.Slider(512, 1024, 768, step=128, label='Height'),
|
|
|
|
| 52 |
image = pipe(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=.99).images[0]
|
| 53 |
torch.cuda.empty_cache()
|
| 54 |
return image
|
| 55 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
return image
|
| 57 |
|
| 58 |
+
gr.Interface(fn=genie, inputs=[gr.Radio(['PhotoReal', 'Animagine XL 4', "FXL"], value='PhotoReal', label='Choose Model'),
|
| 59 |
gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
|
| 60 |
gr.Textbox(label='What you Do Not want the AI to generate. 77 Token Limit'),
|
| 61 |
gr.Slider(512, 1024, 768, step=128, label='Height'),
|