rahul7star commited on
Commit
76410ab
·
verified ·
1 Parent(s): 157240a

Update app_exp.py

Browse files
Files changed (1) hide show
  1. app_exp.py +129 -124
app_exp.py CHANGED
@@ -5,53 +5,46 @@ import sys
5
  import subprocess
6
  import tempfile
7
  import numpy as np
8
- import spaces
9
- import importlib
10
  import site
 
11
  from PIL import Image
12
  from huggingface_hub import snapshot_download, hf_hub_download
13
 
14
  # ============================================================
15
- # 1️⃣ FlashAttention 3 Setup (Auto-install from HF repo)
16
  # ============================================================
17
- # try:
18
- # print("Attempting to download and install FlashAttention 3 wheel...")
19
- # fa3_wheel = hf_hub_download(
20
- # repo_id="rahul7star/flash-attn-3",
21
- # repo_type="model",
22
- # filename="128/flash_attn_3-3.0.0b1-cp39-abi3-linux_x86_64.whl",
23
- # )
24
- # subprocess.run(["pip", "install", fa3_wheel], check=True)
25
- # site.addsitedir(site.getsitepackages()[0])
26
- # importlib.invalidate_caches()
27
- # print("✅ FlashAttention 3 installed successfully.")
28
- # except Exception as e:
29
- # print(f"⚠️ FlashAttention install failed: {e}")
30
- # print("Proceeding without FA3 acceleration...")
 
 
31
 
32
  # ============================================================
33
- # 2️⃣ Define model and repo paths
34
  # ============================================================
35
  REPO_PATH = "LongCat-Video"
36
  CHECKPOINT_DIR = os.path.join(REPO_PATH, "weights", "LongCat-Video")
37
 
38
- # ============================================================
39
- # 3️⃣ Clone the model repo if needed
40
- # ============================================================
41
  if not os.path.exists(REPO_PATH):
42
  print(f"Cloning LongCat-Video repository to '{REPO_PATH}'...")
43
  subprocess.run(
44
  ["git", "clone", "https://github.com/meituan-longcat/LongCat-Video.git", REPO_PATH],
45
  check=True
46
  )
47
- print("✅ Repository cloned successfully.")
48
 
49
- # Make repo importable
50
  sys.path.insert(0, os.path.abspath(REPO_PATH))
51
 
52
- # ============================================================
53
- # 4️⃣ Import model modules after repo setup
54
- # ============================================================
55
  from longcat_video.pipeline_longcat_video import LongCatVideoPipeline
56
  from longcat_video.modules.scheduling_flow_match_euler_discrete import FlowMatchEulerDiscreteScheduler
57
  from longcat_video.modules.autoencoder_kl_wan import AutoencoderKLWan
@@ -60,9 +53,7 @@ from longcat_video.context_parallel import context_parallel_util
60
  from transformers import AutoTokenizer, UMT5EncoderModel
61
  from diffusers.utils import export_to_video
62
 
63
- # ============================================================
64
- # 5️⃣ Download weights (snapshot)
65
- # ============================================================
66
  if not os.path.exists(CHECKPOINT_DIR):
67
  print(f"Downloading model weights to '{CHECKPOINT_DIR}'...")
68
  snapshot_download(
@@ -71,33 +62,33 @@ if not os.path.exists(CHECKPOINT_DIR):
71
  local_dir_use_symlinks=False,
72
  ignore_patterns=["*.md", "*.gitattributes", "assets/*"]
73
  )
74
- print("✅ Model weights ready.")
75
 
76
  # ============================================================
77
- # 6️⃣ Initialize model pipeline
78
  # ============================================================
79
- pipe = None
80
  device = "cuda" if torch.cuda.is_available() else "cpu"
81
  torch_dtype = torch.bfloat16 if device == "cuda" else torch.float32
82
 
83
- print("--- Initializing Models (once at startup) ---")
 
 
84
  try:
85
  cp_split_hw = context_parallel_util.get_optimal_split(1)
86
 
87
  tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT_DIR, subfolder="tokenizer", torch_dtype=torch_dtype)
88
  text_encoder = UMT5EncoderModel.from_pretrained(CHECKPOINT_DIR, subfolder="text_encoder", torch_dtype=torch_dtype)
 
89
  vae = AutoencoderKLWan.from_pretrained(CHECKPOINT_DIR, subfolder="vae", torch_dtype=torch_dtype)
90
  scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(CHECKPOINT_DIR, subfolder="scheduler", torch_dtype=torch_dtype)
91
 
92
- # ✅ Enable FA3 acceleration
93
  dit = LongCatVideoTransformer3DModel.from_pretrained(
94
  CHECKPOINT_DIR,
95
- enable_flashattn3=True,
96
  enable_flashattn2=False,
97
  enable_xformers=True,
98
  subfolder="dit",
99
  cp_split_hw=cp_split_hw,
100
- torch_dtype=torch_dtype,
101
  )
102
 
103
  pipe = LongCatVideoPipeline(
@@ -106,43 +97,27 @@ try:
106
  vae=vae,
107
  scheduler=scheduler,
108
  dit=dit,
109
- ).to(device)
 
 
 
 
 
110
 
111
- # Load LoRAs
112
- lora_dir = os.path.join(CHECKPOINT_DIR, "lora")
113
- pipe.dit.load_lora(os.path.join(lora_dir, "cfg_step_lora.safetensors"), "cfg_step_lora")
114
- pipe.dit.load_lora(os.path.join(lora_dir, "refinement_lora.safetensors"), "refinement_lora")
115
 
116
- print("✅ Models loaded successfully.")
117
  except Exception as e:
118
- print(f"❌ FATAL: Model initialization failed.\n{e}")
119
  pipe = None
120
 
121
  # ============================================================
122
- # 7️⃣ GPU cleanup utility
123
  # ============================================================
124
  def torch_gc():
125
  if torch.cuda.is_available():
126
  torch.cuda.empty_cache()
127
  torch.cuda.ipc_collect()
128
 
129
- # ============================================================
130
- # 8️⃣ Dynamic GPU duration logic
131
- # ============================================================
132
- def compute_duration(mode, prompt, neg_prompt, image, height, width, resolution, seed, use_distill, use_refine, progress):
133
- """
134
- Adaptive GPU time allocation based on resolution & refinement usage.
135
- """
136
- base = 120 # baseline (seconds)
137
- if resolution == "720p": base += 60
138
- if use_refine: base += 60
139
- if use_distill: base -= 30
140
- return min(base, 240) # cap at 4 min
141
-
142
- # ============================================================
143
- # 9️⃣ Generation function
144
- # ============================================================
145
- @spaces.GPU(duration=compute_duration)
146
  def generate_video(
147
  mode,
148
  prompt,
@@ -152,117 +127,147 @@ def generate_video(
152
  seed,
153
  use_distill,
154
  use_refine,
 
155
  progress=gr.Progress(track_tqdm=True)
156
  ):
157
  if pipe is None:
158
- raise gr.Error("⚠️ Models failed to load. Restart the app.")
159
 
 
 
 
160
  generator = torch.Generator(device=device).manual_seed(int(seed))
161
- num_frames = 48 # shorter for faster test runs
162
-
163
  is_distill = use_distill or use_refine
164
- pipe.dit.enable_loras(["cfg_step_lora"] if is_distill else [])
 
 
 
165
 
166
  num_inference_steps = 12 if is_distill else 24
167
  guidance_scale = 2.0 if is_distill else 4.0
 
168
 
169
- # --- Stage 1 ---
170
- progress(0.2, desc="Stage 1: Generating Base Video...")
171
  if mode == "t2v":
172
  output = pipe.generate_t2v(
173
  prompt=prompt,
174
- negative_prompt=neg_prompt,
175
  height=height,
176
  width=width,
177
  num_frames=num_frames,
178
  num_inference_steps=num_inference_steps,
179
  use_distill=is_distill,
180
  guidance_scale=guidance_scale,
181
- generator=generator,
182
  )[0]
183
  else:
184
  pil_img = Image.fromarray(image)
185
  output = pipe.generate_i2v(
186
  image=pil_img,
187
  prompt=prompt,
188
- negative_prompt=neg_prompt,
189
  resolution=resolution,
190
  num_frames=num_frames,
191
  num_inference_steps=num_inference_steps,
192
  use_distill=is_distill,
193
  guidance_scale=guidance_scale,
194
- generator=generator,
195
  )[0]
196
 
197
  pipe.dit.disable_all_loras()
198
  torch_gc()
199
 
200
- # --- Stage 2 ---
201
  if use_refine:
202
- progress(0.6, desc="Stage 2: Refining Video...")
203
- pipe.dit.enable_loras(["refinement_lora"])
204
- refined = pipe.generate_refine(
205
- image=Image.fromarray(image) if mode == "i2v" else None,
 
 
 
 
 
 
206
  prompt=prompt,
207
- stage1_video=[Image.fromarray((f * 255).astype(np.uint8)) for f in output],
208
- num_cond_frames=1 if mode == "i2v" else 0,
209
- num_inference_steps=20,
210
- generator=generator,
211
  )[0]
212
- output = refined
213
  pipe.dit.disable_all_loras()
 
214
  torch_gc()
215
 
216
- # --- Export ---
217
- progress(1.0, desc="Exporting video...")
218
- with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmp_vid:
219
- export_to_video(output, tmp_vid.name, fps=24)
220
- return tmp_vid.name
221
 
222
  # ============================================================
223
- # 🔟 Gradio UI
224
  # ============================================================
225
- css = ".fillable{max-width:960px!important}"
 
226
  with gr.Blocks(css=css) as demo:
227
- gr.Markdown("# 🎬 LongCat-Video + FA3 Accelerated 🚀")
228
- gr.Markdown("13.6B parameter dense video model with FlashAttention 3 for speed ⚡")
229
 
230
- with gr.Tabs():
231
- # Text-to-Video
232
  with gr.TabItem("Text-to-Video"):
233
- prompt_t2v = gr.Textbox(label="Prompt", lines=3, placeholder="A cinematic shot of a corgi running on the beach.")
234
- neg_t2v = gr.Textbox(label="Negative Prompt", value="ugly, blurry, static")
235
- h_t2v = gr.Slider(256, 1024, 480, step=64, label="Height")
236
- w_t2v = gr.Slider(256, 1024, 832, step=64, label="Width")
237
- seed_t2v = gr.Number(value=42, label="Seed")
238
- distill_t2v = gr.Checkbox(label="Distill Mode", value=True)
239
- refine_t2v = gr.Checkbox(label="Refine Mode", value=False)
240
- btn_t2v = gr.Button("Generate Video", variant="primary")
241
- out_t2v = gr.Video(label="Output Video")
242
-
243
- btn_t2v.click(
244
- generate_video,
245
- inputs=["t2v", prompt_t2v, neg_t2v, gr.State(None), h_t2v, w_t2v, gr.State("480p"), seed_t2v, distill_t2v, refine_t2v],
246
- outputs=out_t2v,
247
- )
248
-
249
- # Image-to-Video
250
  with gr.TabItem("Image-to-Video"):
251
- img_i2v = gr.Image(type="numpy", label="Input Image")
252
- prompt_i2v = gr.Textbox(label="Prompt", placeholder="The cat in the image blinks.")
253
- neg_i2v = gr.Textbox(label="Negative Prompt", value="ugly, blurry")
254
- resolution_i2v = gr.Dropdown(["480p", "720p"], value="480p", label="Resolution")
255
- seed_i2v = gr.Number(value=42, label="Seed")
256
- distill_i2v = gr.Checkbox(label="Distill Mode", value=True)
257
- refine_i2v = gr.Checkbox(label="Refine Mode", value=False)
258
- btn_i2v = gr.Button("Generate Video", variant="primary")
259
- out_i2v = gr.Video(label="Output Video")
260
-
261
- btn_i2v.click(
262
- generate_video,
263
- inputs=["i2v", prompt_i2v, neg_i2v, img_i2v, gr.State(None), gr.State(None), resolution_i2v, seed_i2v, distill_i2v, refine_i2v],
264
- outputs=out_i2v,
265
- )
 
 
 
 
 
 
 
 
 
266
 
 
 
 
 
 
 
 
 
 
 
 
267
  if __name__ == "__main__":
268
  demo.launch()
 
5
  import subprocess
6
  import tempfile
7
  import numpy as np
 
 
8
  import site
9
+ import importlib
10
  from PIL import Image
11
  from huggingface_hub import snapshot_download, hf_hub_download
12
 
13
  # ============================================================
14
+ # 0️⃣ FlashAttention 3 Setup
15
  # ============================================================
16
+ try:
17
+ print("Attempting to download and install FlashAttention wheel...")
18
+ flash_attention_wheel = hf_hub_download(
19
+ repo_id="rahul7star/flash-attn-3",
20
+ repo_type="model",
21
+ filename="128/flash_attn_3-3.0.0b1-cp39-abi3-linux_x86_64.whl",
22
+ )
23
+ subprocess.run(["pip", "install", flash_attention_wheel], check=True)
24
+ site.addsitedir(site.getsitepackages()[0])
25
+ importlib.invalidate_caches()
26
+ print("✅ FlashAttention installed successfully.")
27
+ enable_fa3 = True
28
+ except Exception as e:
29
+ print(f"⚠️ Could not install FlashAttention: {e}")
30
+ print("Continuing without FlashAttention...")
31
+ enable_fa3 = False
32
 
33
  # ============================================================
34
+ # 1️⃣ Repository Setup
35
  # ============================================================
36
  REPO_PATH = "LongCat-Video"
37
  CHECKPOINT_DIR = os.path.join(REPO_PATH, "weights", "LongCat-Video")
38
 
 
 
 
39
  if not os.path.exists(REPO_PATH):
40
  print(f"Cloning LongCat-Video repository to '{REPO_PATH}'...")
41
  subprocess.run(
42
  ["git", "clone", "https://github.com/meituan-longcat/LongCat-Video.git", REPO_PATH],
43
  check=True
44
  )
 
45
 
 
46
  sys.path.insert(0, os.path.abspath(REPO_PATH))
47
 
 
 
 
48
  from longcat_video.pipeline_longcat_video import LongCatVideoPipeline
49
  from longcat_video.modules.scheduling_flow_match_euler_discrete import FlowMatchEulerDiscreteScheduler
50
  from longcat_video.modules.autoencoder_kl_wan import AutoencoderKLWan
 
53
  from transformers import AutoTokenizer, UMT5EncoderModel
54
  from diffusers.utils import export_to_video
55
 
56
+ # Download weights if not present
 
 
57
  if not os.path.exists(CHECKPOINT_DIR):
58
  print(f"Downloading model weights to '{CHECKPOINT_DIR}'...")
59
  snapshot_download(
 
62
  local_dir_use_symlinks=False,
63
  ignore_patterns=["*.md", "*.gitattributes", "assets/*"]
64
  )
 
65
 
66
  # ============================================================
67
+ # 2️⃣ Device & Models
68
  # ============================================================
 
69
  device = "cuda" if torch.cuda.is_available() else "cpu"
70
  torch_dtype = torch.bfloat16 if device == "cuda" else torch.float32
71
 
72
+ print(f"Device: {device}, dtype: {torch_dtype}")
73
+
74
+ pipe = None
75
  try:
76
  cp_split_hw = context_parallel_util.get_optimal_split(1)
77
 
78
  tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT_DIR, subfolder="tokenizer", torch_dtype=torch_dtype)
79
  text_encoder = UMT5EncoderModel.from_pretrained(CHECKPOINT_DIR, subfolder="text_encoder", torch_dtype=torch_dtype)
80
+
81
  vae = AutoencoderKLWan.from_pretrained(CHECKPOINT_DIR, subfolder="vae", torch_dtype=torch_dtype)
82
  scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(CHECKPOINT_DIR, subfolder="scheduler", torch_dtype=torch_dtype)
83
 
 
84
  dit = LongCatVideoTransformer3DModel.from_pretrained(
85
  CHECKPOINT_DIR,
86
+ enable_flashattn3=enable_fa3,
87
  enable_flashattn2=False,
88
  enable_xformers=True,
89
  subfolder="dit",
90
  cp_split_hw=cp_split_hw,
91
+ torch_dtype=torch_dtype
92
  )
93
 
94
  pipe = LongCatVideoPipeline(
 
97
  vae=vae,
98
  scheduler=scheduler,
99
  dit=dit,
100
+ )
101
+ pipe.to(device)
102
+
103
+ # Load LoRA weights
104
+ pipe.dit.load_lora(os.path.join(CHECKPOINT_DIR, 'lora/cfg_step_lora.safetensors'), 'cfg_step_lora')
105
+ pipe.dit.load_lora(os.path.join(CHECKPOINT_DIR, 'lora/refinement_lora.safetensors'), 'refinement_lora')
106
 
107
+ print("✅ Models loaded successfully")
 
 
 
108
 
 
109
  except Exception as e:
110
+ print(f"❌ Failed to load models: {e}")
111
  pipe = None
112
 
113
  # ============================================================
114
+ # 3️⃣ Generation Helpers
115
  # ============================================================
116
  def torch_gc():
117
  if torch.cuda.is_available():
118
  torch.cuda.empty_cache()
119
  torch.cuda.ipc_collect()
120
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
  def generate_video(
122
  mode,
123
  prompt,
 
127
  seed,
128
  use_distill,
129
  use_refine,
130
+ duration_sec,
131
  progress=gr.Progress(track_tqdm=True)
132
  ):
133
  if pipe is None:
134
+ raise gr.Error(" Models failed to load")
135
 
136
+ # Adaptive FPS for faster testing
137
+ fps = 15 if use_distill else 30
138
+ num_frames = int(duration_sec * fps)
139
  generator = torch.Generator(device=device).manual_seed(int(seed))
 
 
140
  is_distill = use_distill or use_refine
141
+
142
+ # Stage 1
143
+ progress(0.2, desc="Stage 1: Base Video Generation")
144
+ pipe.dit.enable_loras(['cfg_step_lora'] if is_distill else [])
145
 
146
  num_inference_steps = 12 if is_distill else 24
147
  guidance_scale = 2.0 if is_distill else 4.0
148
+ curr_neg_prompt = "" if is_distill else neg_prompt
149
 
 
 
150
  if mode == "t2v":
151
  output = pipe.generate_t2v(
152
  prompt=prompt,
153
+ negative_prompt=curr_neg_prompt,
154
  height=height,
155
  width=width,
156
  num_frames=num_frames,
157
  num_inference_steps=num_inference_steps,
158
  use_distill=is_distill,
159
  guidance_scale=guidance_scale,
160
+ generator=generator
161
  )[0]
162
  else:
163
  pil_img = Image.fromarray(image)
164
  output = pipe.generate_i2v(
165
  image=pil_img,
166
  prompt=prompt,
167
+ negative_prompt=curr_neg_prompt,
168
  resolution=resolution,
169
  num_frames=num_frames,
170
  num_inference_steps=num_inference_steps,
171
  use_distill=is_distill,
172
  guidance_scale=guidance_scale,
173
+ generator=generator
174
  )[0]
175
 
176
  pipe.dit.disable_all_loras()
177
  torch_gc()
178
 
179
+ # Stage 2: Optional refinement
180
  if use_refine:
181
+ progress(0.5, desc="Stage 2: Refinement")
182
+ pipe.dit.enable_loras(['refinement_lora'])
183
+ pipe.dit.enable_bsa()
184
+
185
+ stage1_video_pil = [(frame * 255).astype(np.uint8) for frame in output]
186
+ stage1_video_pil = [Image.fromarray(img) for img in stage1_video_pil]
187
+ refine_image = Image.fromarray(image) if mode == 'i2v' else None
188
+
189
+ output = pipe.generate_refine(
190
+ image=refine_image,
191
  prompt=prompt,
192
+ stage1_video=stage1_video_pil,
193
+ num_cond_frames=1 if mode=='i2v' else 0,
194
+ num_inference_steps=50,
195
+ generator=generator
196
  )[0]
197
+
198
  pipe.dit.disable_all_loras()
199
+ pipe.dit.disable_bsa()
200
  torch_gc()
201
 
202
+ # Export video
203
+ progress(1.0, desc="Exporting video")
204
+ with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as temp_video_file:
205
+ export_to_video(output, temp_video_file.name, fps=fps)
206
+ return temp_video_file.name
207
 
208
  # ============================================================
209
+ # 4️⃣ Gradio UI
210
  # ============================================================
211
+ css = ".fillable{max-width: 960px !important}"
212
+
213
  with gr.Blocks(css=css) as demo:
214
+ gr.Markdown("# 🎬 LongCat-Video")
215
+ gr.Markdown("13.6B parameter dense video-generation model by Meituan [[Model](https://huggingface.co/meituan-longcat/LongCat-Video)]")
216
 
217
+ with gr.Tabs() as tabs:
 
218
  with gr.TabItem("Text-to-Video"):
219
+ mode_t2v = gr.State("t2v")
220
+ with gr.Row():
221
+ with gr.Column(scale=2):
222
+ prompt_t2v = gr.Textbox(label="Prompt", lines=4)
223
+ neg_prompt_t2v = gr.Textbox(label="Negative Prompt", lines=2, value="blurry, low quality")
224
+ height_t2v = gr.Slider(256, 1024, step=64, value=480, label="Height")
225
+ width_t2v = gr.Slider(256, 1024, step=64, value=832, label="Width")
226
+ seed_t2v = gr.Number(value=42, label="Seed")
227
+ distill_t2v = gr.Checkbox(value=True, label="Use Distill Mode")
228
+ refine_t2v = gr.Checkbox(value=False, label="Use Refine Mode")
229
+ duration_t2v = gr.Slider(1, 20, step=1, value=2, label="Video Duration (seconds)")
230
+
231
+ t2v_button = gr.Button("Generate Video")
232
+ with gr.Column(scale=3):
233
+ video_output_t2v = gr.Video(label="Generated Video")
234
+
 
235
  with gr.TabItem("Image-to-Video"):
236
+ mode_i2v = gr.State("i2v")
237
+ with gr.Row():
238
+ with gr.Column(scale=2):
239
+ image_i2v = gr.Image(type="numpy", label="Input Image")
240
+ prompt_i2v = gr.Textbox(label="Prompt", lines=4)
241
+ neg_prompt_i2v = gr.Textbox(label="Negative Prompt", lines=2, value="blurry, low quality")
242
+ resolution_i2v = gr.Dropdown(["480p","720p"], value="480p", label="Resolution")
243
+ seed_i2v = gr.Number(value=42, label="Seed")
244
+ distill_i2v = gr.Checkbox(value=True, label="Use Distill Mode")
245
+ refine_i2v = gr.Checkbox(value=False, label="Use Refine Mode")
246
+ duration_i2v = gr.Slider(1, 20, step=1, value=2, label="Video Duration (seconds)")
247
+
248
+ i2v_button = gr.Button("Generate Video")
249
+ with gr.Column(scale=3):
250
+ video_output_i2v = gr.Video(label="Generated Video")
251
+
252
+ # Event binding
253
+ t2v_button.click(
254
+ generate_video,
255
+ inputs=[mode_t2v, prompt_t2v, neg_prompt_t2v, gr.State(None),
256
+ height_t2v, width_t2v, gr.State("480p"),
257
+ seed_t2v, distill_t2v, refine_t2v, duration_t2v],
258
+ outputs=video_output_t2v
259
+ )
260
 
261
+ i2v_button.click(
262
+ generate_video,
263
+ inputs=[mode_i2v, prompt_i2v, neg_prompt_i2v, image_i2v,
264
+ gr.State(None), gr.State(None), resolution_i2v,
265
+ seed_i2v, distill_i2v, refine_i2v, duration_i2v],
266
+ outputs=video_output_i2v
267
+ )
268
+
269
+ # ============================================================
270
+ # 5️⃣ Launch
271
+ # ============================================================
272
  if __name__ == "__main__":
273
  demo.launch()