Spaces:
Runtime error
Runtime error
| import os | |
| import gradio as gr | |
| import numpy as np | |
| import torch | |
| import random | |
| from PIL import Image | |
| from typing import Iterable | |
| from gradio.themes import Soft | |
| from gradio.themes.utils import colors, fonts, sizes | |
| try: | |
| import spaces | |
| except ImportError: | |
| class MockSpaces: | |
| def GPU(self, duration=0): | |
| def decorator(func): | |
| return func | |
| return decorator | |
| spaces = MockSpaces() | |
| if torch.cuda.is_available(): | |
| print("🚀 RunPod/Local GPU detected: Bypassing Hugging Face Spaces queue.") | |
| def gpu_bypass_decorator(duration=0): | |
| def decorator(func): | |
| return func | |
| return decorator | |
| spaces.GPU = gpu_bypass_decorator | |
| else: | |
| print("🐢 No GPU detected: Using standard Spaces logic (or Build Mode).") | |
| # ---------------------------------------- | |
| colors.steel_blue = colors.Color( | |
| name="steel_blue", | |
| c50="#EBF3F8", | |
| c100="#D3E5F0", | |
| c200="#A8CCE1", | |
| c300="#7DB3D2", | |
| c400="#529AC3", | |
| c500="#4682B4", | |
| c600="#3E72A0", | |
| c700="#36638C", | |
| c800="#2E5378", | |
| c900="#264364", | |
| c950="#1E3450", | |
| ) | |
| class SteelBlueTheme(Soft): | |
| def __init__( | |
| self, | |
| *, | |
| primary_hue: colors.Color | str = colors.gray, | |
| secondary_hue: colors.Color | str = colors.steel_blue, | |
| neutral_hue: colors.Color | str = colors.slate, | |
| text_size: sizes.Size | str = sizes.text_lg, | |
| font: fonts.Font | str | Iterable[fonts.Font | str] = ( | |
| fonts.GoogleFont("Outfit"), "Arial", "sans-serif", | |
| ), | |
| font_mono: fonts.Font | str | Iterable[fonts.Font | str] = ( | |
| fonts.GoogleFont("IBM Plex Mono"), "ui-monospace", "monospace", | |
| ), | |
| ): | |
| super().__init__( | |
| primary_hue=primary_hue, | |
| secondary_hue=secondary_hue, | |
| neutral_hue=neutral_hue, | |
| text_size=text_size, | |
| font=font, | |
| font_mono=font_mono, | |
| ) | |
| super().set( | |
| background_fill_primary="*primary_50", | |
| background_fill_primary_dark="*primary_900", | |
| body_background_fill="linear-gradient(135deg, *primary_200, *primary_100)", | |
| body_background_fill_dark="linear-gradient(135deg, *primary_900, *primary_800)", | |
| button_primary_text_color="white", | |
| button_primary_text_color_hover="white", | |
| button_primary_background_fill="linear-gradient(90deg, *secondary_500, *secondary_600)", | |
| button_primary_background_fill_hover="linear-gradient(90deg, *secondary_600, *secondary_700)", | |
| button_primary_background_fill_dark="linear-gradient(90deg, *secondary_600, *secondary_800)", | |
| button_primary_background_fill_hover_dark="linear-gradient(90deg, *secondary_500, *secondary_500)", | |
| button_secondary_text_color="black", | |
| button_secondary_text_color_hover="white", | |
| button_secondary_background_fill="linear-gradient(90deg, *primary_300, *primary_300)", | |
| button_secondary_background_fill_hover="linear-gradient(90deg, *primary_400, *primary_400)", | |
| button_secondary_background_fill_dark="linear-gradient(90deg, *primary_500, *primary_600)", | |
| button_secondary_background_fill_hover_dark="linear-gradient(90deg, *primary_500, *primary_500)", | |
| slider_color="*secondary_500", | |
| slider_color_dark="*secondary_600", | |
| block_title_text_weight="600", | |
| block_border_width="3px", | |
| block_shadow="*shadow_drop_lg", | |
| button_primary_shadow="*shadow_drop_lg", | |
| button_large_padding="11px", | |
| color_accent_soft="*primary_100", | |
| block_label_background_fill="*primary_200", | |
| ) | |
| steel_blue_theme = SteelBlueTheme() | |
| from diffusers import FlowMatchEulerDiscreteScheduler | |
| from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline | |
| from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel | |
| from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3 | |
| pipe = None | |
| if torch.cuda.is_available(): | |
| print("🚀 GPU detected! Initializing model for RunPod Environment...") | |
| dtype = torch.bfloat16 | |
| print("Loading Transformer...") | |
| transformer_model = QwenImageTransformer2DModel.from_pretrained( | |
| "linoyts/Qwen-Image-Edit-Rapid-AIO", | |
| subfolder='transformer', | |
| torch_dtype=dtype, | |
| device_map="auto" | |
| ) | |
| # 2. Load Pipeline (device_map="balanced") | |
| print("Loading Pipeline...") | |
| pipe = QwenImageEditPlusPipeline.from_pretrained( | |
| "Qwen/Qwen-Image-Edit-2509", | |
| transformer=transformer_model, | |
| torch_dtype=dtype, | |
| device_map="balanced" | |
| ) | |
| # 3. Load LoRAs | |
| print("Loading LoRAs...") | |
| pipe.load_lora_weights("autoweeb/Qwen-Image-Edit-2509-Photo-to-Anime", weight_name="Qwen-Image-Edit-2509-Photo-to-Anime_000001000.safetensors", adapter_name="anime") | |
| pipe.load_lora_weights("dx8152/Qwen-Edit-2509-Multiple-angles", weight_name="镜头转换.safetensors", adapter_name="multiple-angles") | |
| pipe.load_lora_weights("dx8152/Qwen-Image-Edit-2509-Light_restoration", weight_name="移除光影.safetensors", adapter_name="light-restoration") | |
| pipe.load_lora_weights("dx8152/Qwen-Image-Edit-2509-Relight", weight_name="Qwen-Edit-Relight.safetensors", adapter_name="relight") | |
| pipe.load_lora_weights("dx8152/Qwen-Edit-2509-Multi-Angle-Lighting", weight_name="多角度灯光-251116.safetensors", adapter_name="multi-angle-lighting") | |
| pipe.load_lora_weights("tlennon-ie/qwen-edit-skin", weight_name="qwen-edit-skin_1.1_000002750.safetensors", adapter_name="edit-skin") | |
| pipe.load_lora_weights("lovis93/next-scene-qwen-image-lora-2509", weight_name="next-scene_lora-v2-3000.safetensors", adapter_name="next-scene") | |
| pipe.load_lora_weights("vafipas663/Qwen-Edit-2509-Upscale-LoRA", weight_name="qwen-edit-enhance_64-v3_000001000.safetensors", adapter_name="upscale-image") | |
| try: | |
| pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3()) | |
| except Exception as e: | |
| print(f"Warning: FA3 set skipped: {e}") | |
| else: | |
| print("🐢 No GPU detected (HF Build Environment). SKIPPING MODEL LOAD.") | |
| MAX_SEED = np.iinfo(np.int32).max | |
| def update_dimensions_on_upload(image): | |
| if image is None: | |
| return 1024, 1024 | |
| original_width, original_height = image.size | |
| if original_width > original_height: | |
| new_width = 1024 | |
| aspect_ratio = original_height / original_width | |
| new_height = int(new_width * aspect_ratio) | |
| else: | |
| new_height = 1024 | |
| aspect_ratio = original_width / original_height | |
| new_width = int(new_height * aspect_ratio) | |
| new_width = (new_width // 8) * 8 | |
| new_height = (new_height // 8) * 8 | |
| return new_width, new_height | |
| def infer(input_image, prompt, lora_adapter, seed, randomize_seed, guidance_scale, steps, progress=gr.Progress(track_tqdm=True)): | |
| if pipe is None: | |
| raise gr.Error("Model not loaded. Are you running on GPU?") | |
| if input_image is None: | |
| raise gr.Error("Please upload an image to edit.") | |
| adapters_map = { | |
| "Photo-to-Anime": "anime", | |
| "Multiple-Angles": "multiple-angles", | |
| "Light-Restoration": "light-restoration", | |
| "Relight": "relight", | |
| "Multi-Angle-Lighting": "multi-angle-lighting", | |
| "Edit-Skin": "edit-skin", | |
| "Next-Scene": "next-scene", | |
| "Upscale-Image": "upscale-image" | |
| } | |
| if lora_adapter in adapters_map: | |
| pipe.set_adapters([adapters_map[lora_adapter]], adapter_weights=[1.0]) | |
| if randomize_seed: | |
| seed = random.randint(0, MAX_SEED) | |
| generator = torch.Generator(device=pipe.device).manual_seed(seed) | |
| negative_prompt = "worst quality, low quality, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry" | |
| original_image = input_image.convert("RGB") | |
| width, height = update_dimensions_on_upload(original_image) | |
| result = pipe( | |
| image=original_image, | |
| prompt=prompt, | |
| negative_prompt=negative_prompt, | |
| height=height, | |
| width=width, | |
| num_inference_steps=steps, | |
| generator=generator, | |
| true_cfg_scale=guidance_scale, | |
| ).images[0] | |
| return result, seed | |
| def infer_example(input_image, prompt, lora_adapter): | |
| if pipe is None: return None, 0 | |
| input_pil = input_image.convert("RGB") | |
| result, seed = infer(input_pil, prompt, lora_adapter, 0, True, 1.0, 4) | |
| return result, seed | |
| css=""" | |
| #col-container { margin: 0 auto; max-width: 960px; } | |
| #main-title h1 { font-size: 2.1em !important; } | |
| """ | |
| with gr.Blocks(css=css, theme=steel_blue_theme) as demo: | |
| with gr.Column(elem_id="col-container"): | |
| gr.Markdown("# **Qwen-Image-Edit-2509 (2x A40 Ready)**", elem_id="main-title") | |
| with gr.Row(equal_height=True): | |
| with gr.Column(): | |
| input_image = gr.Image(label="Upload Image", type="pil", height=290) | |
| prompt = gr.Text(label="Edit Prompt", show_label=True, placeholder="e.g., transform into anime..") | |
| run_button = gr.Button("Edit Image", variant="primary") | |
| with gr.Column(): | |
| output_image = gr.Image(label="Output Image", interactive=False, format="png", height=350) | |
| with gr.Row(): | |
| lora_adapter = gr.Dropdown( | |
| label="Choose Editing Style", | |
| choices=["Photo-to-Anime", "Multiple-Angles", "Light-Restoration", "Multi-Angle-Lighting", "Upscale-Image", "Relight", "Next-Scene", "Edit-Skin"], | |
| value="Photo-to-Anime" | |
| ) | |
| with gr.Accordion("Advanced Settings", open=False, visible=False): | |
| seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0) | |
| randomize_seed = gr.Checkbox(label="Randomize Seed", value=True) | |
| guidance_scale = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=1.0) | |
| steps = gr.Slider(label="Inference Steps", minimum=1, maximum=50, step=1, value=4) | |
| run_button.click(fn=infer, inputs=[input_image, prompt, lora_adapter, seed, randomize_seed, guidance_scale, steps], outputs=[output_image, seed]) | |
| if __name__ == "__main__": | |
| demo.queue(max_size=30).launch(server_name="0.0.0.0", server_port=7860, ssr_mode=False) |