Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -8,7 +8,7 @@ from typing import Iterable
|
|
| 8 |
from gradio.themes import Soft
|
| 9 |
from gradio.themes.utils import colors, fonts, sizes
|
| 10 |
|
| 11 |
-
|
| 12 |
class MockSpaces:
|
| 13 |
def GPU(self, duration=0):
|
| 14 |
def decorator(func):
|
|
@@ -16,6 +16,7 @@ class MockSpaces:
|
|
| 16 |
return decorator
|
| 17 |
spaces = MockSpaces()
|
| 18 |
|
|
|
|
| 19 |
colors.steel_blue = colors.Color(
|
| 20 |
name="steel_blue",
|
| 21 |
c50="#EBF3F8",
|
|
@@ -83,6 +84,7 @@ class SteelBlueTheme(Soft):
|
|
| 83 |
)
|
| 84 |
steel_blue_theme = SteelBlueTheme()
|
| 85 |
|
|
|
|
| 86 |
print("CUDA_VISIBLE_DEVICES:", os.environ.get("CUDA_VISIBLE_DEVICES"))
|
| 87 |
print("GPU Count:", torch.cuda.device_count())
|
| 88 |
|
|
@@ -91,39 +93,52 @@ from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
|
|
| 91 |
from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
|
| 92 |
from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
|
| 93 |
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 97 |
transformer_model = QwenImageTransformer2DModel.from_pretrained(
|
| 98 |
"linoyts/Qwen-Image-Edit-Rapid-AIO",
|
| 99 |
subfolder='transformer',
|
| 100 |
torch_dtype=dtype,
|
| 101 |
-
device_map=
|
| 102 |
)
|
| 103 |
|
| 104 |
print("Loading Pipeline...")
|
| 105 |
-
|
| 106 |
pipe = QwenImageEditPlusPipeline.from_pretrained(
|
| 107 |
"Qwen/Qwen-Image-Edit-2509",
|
| 108 |
transformer=transformer_model,
|
| 109 |
torch_dtype=dtype,
|
| 110 |
-
device_map=
|
| 111 |
)
|
| 112 |
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
pipe.load_lora_weights("
|
| 117 |
-
pipe.load_lora_weights("dx8152/Qwen-
|
| 118 |
-
pipe.load_lora_weights("dx8152/Qwen-Edit-2509-
|
| 119 |
-
pipe.load_lora_weights("
|
| 120 |
-
pipe.load_lora_weights("
|
| 121 |
-
pipe.load_lora_weights("
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
|
|
|
|
|
|
| 127 |
|
| 128 |
MAX_SEED = np.iinfo(np.int32).max
|
| 129 |
|
|
@@ -148,6 +163,10 @@ def infer(input_image, prompt, lora_adapter, seed, randomize_seed, guidance_scal
|
|
| 148 |
if input_image is None:
|
| 149 |
raise gr.Error("Please upload an image to edit.")
|
| 150 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 151 |
adapters_map = {
|
| 152 |
"Photo-to-Anime": "anime",
|
| 153 |
"Multiple-Angles": "multiple-angles",
|
|
@@ -185,6 +204,9 @@ def infer(input_image, prompt, lora_adapter, seed, randomize_seed, guidance_scal
|
|
| 185 |
|
| 186 |
@spaces.GPU(duration=30)
|
| 187 |
def infer_example(input_image, prompt, lora_adapter):
|
|
|
|
|
|
|
|
|
|
| 188 |
input_pil = input_image.convert("RGB")
|
| 189 |
result, seed = infer(input_pil, prompt, lora_adapter, 0, True, 1.0, 4)
|
| 190 |
return result, seed
|
|
@@ -196,8 +218,7 @@ css="""
|
|
| 196 |
|
| 197 |
with gr.Blocks(css=css, theme=steel_blue_theme) as demo:
|
| 198 |
with gr.Column(elem_id="col-container"):
|
| 199 |
-
gr.Markdown("# **Qwen-Image-Edit-2509-LoRAs-Fast (2xA40
|
| 200 |
-
gr.Markdown("Forked & Optimized for RunPod 2xGPU Split.")
|
| 201 |
|
| 202 |
with gr.Row(equal_height=True):
|
| 203 |
with gr.Column():
|
|
|
|
| 8 |
from gradio.themes import Soft
|
| 9 |
from gradio.themes.utils import colors, fonts, sizes
|
| 10 |
|
| 11 |
+
# --- Mock Spaces ---
|
| 12 |
class MockSpaces:
|
| 13 |
def GPU(self, duration=0):
|
| 14 |
def decorator(func):
|
|
|
|
| 16 |
return decorator
|
| 17 |
spaces = MockSpaces()
|
| 18 |
|
| 19 |
+
# --- Theme Setup ---
|
| 20 |
colors.steel_blue = colors.Color(
|
| 21 |
name="steel_blue",
|
| 22 |
c50="#EBF3F8",
|
|
|
|
| 84 |
)
|
| 85 |
steel_blue_theme = SteelBlueTheme()
|
| 86 |
|
| 87 |
+
# --- Debug Info ---
|
| 88 |
print("CUDA_VISIBLE_DEVICES:", os.environ.get("CUDA_VISIBLE_DEVICES"))
|
| 89 |
print("GPU Count:", torch.cuda.device_count())
|
| 90 |
|
|
|
|
| 93 |
from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
|
| 94 |
from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
|
| 95 |
|
| 96 |
+
# --- FIX: Determine Device Strategy ---
|
| 97 |
+
# 如果在 HF 构建环境(无 GPU),使用 CPU 防止报错
|
| 98 |
+
# 如果在 RunPod (有 GPU),使用 "balanced" 策略 (Pipeline 不支持 "auto")
|
| 99 |
+
if torch.cuda.device_count() > 0:
|
| 100 |
+
device_strategy = "balanced" # Pipeline level strategy
|
| 101 |
+
transformer_strategy = "auto" # Transformer level strategy
|
| 102 |
+
dtype = torch.bfloat16
|
| 103 |
+
print(f"Running on GPU with strategy: {device_strategy}")
|
| 104 |
+
else:
|
| 105 |
+
device_strategy = "cpu"
|
| 106 |
+
transformer_strategy = "cpu"
|
| 107 |
+
dtype = torch.float32 # CPU usually prefers float32
|
| 108 |
+
print("Running on CPU (Build Environment detected)")
|
| 109 |
+
|
| 110 |
+
print("Loading Transformer...")
|
| 111 |
transformer_model = QwenImageTransformer2DModel.from_pretrained(
|
| 112 |
"linoyts/Qwen-Image-Edit-Rapid-AIO",
|
| 113 |
subfolder='transformer',
|
| 114 |
torch_dtype=dtype,
|
| 115 |
+
device_map=transformer_strategy
|
| 116 |
)
|
| 117 |
|
| 118 |
print("Loading Pipeline...")
|
|
|
|
| 119 |
pipe = QwenImageEditPlusPipeline.from_pretrained(
|
| 120 |
"Qwen/Qwen-Image-Edit-2509",
|
| 121 |
transformer=transformer_model,
|
| 122 |
torch_dtype=dtype,
|
| 123 |
+
device_map=device_strategy # <--- 这里必须是 balanced 或 cpu,不能是 auto
|
| 124 |
)
|
| 125 |
|
| 126 |
+
# Only load LoRAs and optimization if on GPU to avoid build errors
|
| 127 |
+
if torch.cuda.device_count() > 0:
|
| 128 |
+
print("Loading LoRAs...")
|
| 129 |
+
pipe.load_lora_weights("autoweeb/Qwen-Image-Edit-2509-Photo-to-Anime", weight_name="Qwen-Image-Edit-2509-Photo-to-Anime_000001000.safetensors", adapter_name="anime")
|
| 130 |
+
pipe.load_lora_weights("dx8152/Qwen-Edit-2509-Multiple-angles", weight_name="镜头转换.safetensors", adapter_name="multiple-angles")
|
| 131 |
+
pipe.load_lora_weights("dx8152/Qwen-Image-Edit-2509-Light_restoration", weight_name="移除光影.safetensors", adapter_name="light-restoration")
|
| 132 |
+
pipe.load_lora_weights("dx8152/Qwen-Image-Edit-2509-Relight", weight_name="Qwen-Edit-Relight.safetensors", adapter_name="relight")
|
| 133 |
+
pipe.load_lora_weights("dx8152/Qwen-Edit-2509-Multi-Angle-Lighting", weight_name="多角度灯光-251116.safetensors", adapter_name="multi-angle-lighting")
|
| 134 |
+
pipe.load_lora_weights("tlennon-ie/qwen-edit-skin", weight_name="qwen-edit-skin_1.1_000002750.safetensors", adapter_name="edit-skin")
|
| 135 |
+
pipe.load_lora_weights("lovis93/next-scene-qwen-image-lora-2509", weight_name="next-scene_lora-v2-3000.safetensors", adapter_name="next-scene")
|
| 136 |
+
pipe.load_lora_weights("vafipas663/Qwen-Edit-2509-Upscale-LoRA", weight_name="qwen-edit-enhance_64-v3_000001000.safetensors", adapter_name="upscale-image")
|
| 137 |
+
|
| 138 |
+
try:
|
| 139 |
+
pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
|
| 140 |
+
except Exception as e:
|
| 141 |
+
print(f"Warning: FA3 set skipped: {e}")
|
| 142 |
|
| 143 |
MAX_SEED = np.iinfo(np.int32).max
|
| 144 |
|
|
|
|
| 163 |
if input_image is None:
|
| 164 |
raise gr.Error("Please upload an image to edit.")
|
| 165 |
|
| 166 |
+
# 如果没有 GPU (比如在 HF 预览界面),直接报错提示
|
| 167 |
+
if torch.cuda.device_count() == 0:
|
| 168 |
+
raise gr.Error("Running on CPU-only environment. Please run on GPU.")
|
| 169 |
+
|
| 170 |
adapters_map = {
|
| 171 |
"Photo-to-Anime": "anime",
|
| 172 |
"Multiple-Angles": "multiple-angles",
|
|
|
|
| 204 |
|
| 205 |
@spaces.GPU(duration=30)
|
| 206 |
def infer_example(input_image, prompt, lora_adapter):
|
| 207 |
+
# 如果是 HF 预览构建过程,跳过
|
| 208 |
+
if torch.cuda.device_count() == 0:
|
| 209 |
+
return None, 0
|
| 210 |
input_pil = input_image.convert("RGB")
|
| 211 |
result, seed = infer(input_pil, prompt, lora_adapter, 0, True, 1.0, 4)
|
| 212 |
return result, seed
|
|
|
|
| 218 |
|
| 219 |
with gr.Blocks(css=css, theme=steel_blue_theme) as demo:
|
| 220 |
with gr.Column(elem_id="col-container"):
|
| 221 |
+
gr.Markdown("# **Qwen-Image-Edit-2509-LoRAs-Fast (2xA40 Ready)**", elem_id="main-title")
|
|
|
|
| 222 |
|
| 223 |
with gr.Row(equal_height=True):
|
| 224 |
with gr.Column():
|