diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000000000000000000000000000000000000..a9bb6def3011631d6ffd10b0f4bbd11f277ed36b
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,38 @@
+*.7z filter=lfs diff=lfs merge=lfs -text
+*.arrow filter=lfs diff=lfs merge=lfs -text
+*.bin filter=lfs diff=lfs merge=lfs -text
+*.bz2 filter=lfs diff=lfs merge=lfs -text
+*.ckpt filter=lfs diff=lfs merge=lfs -text
+*.ftz filter=lfs diff=lfs merge=lfs -text
+*.gz filter=lfs diff=lfs merge=lfs -text
+*.h5 filter=lfs diff=lfs merge=lfs -text
+*.joblib filter=lfs diff=lfs merge=lfs -text
+*.lfs.* filter=lfs diff=lfs merge=lfs -text
+*.mlmodel filter=lfs diff=lfs merge=lfs -text
+*.model filter=lfs diff=lfs merge=lfs -text
+*.msgpack filter=lfs diff=lfs merge=lfs -text
+*.npy filter=lfs diff=lfs merge=lfs -text
+*.npz filter=lfs diff=lfs merge=lfs -text
+*.onnx filter=lfs diff=lfs merge=lfs -text
+*.ot filter=lfs diff=lfs merge=lfs -text
+*.parquet filter=lfs diff=lfs merge=lfs -text
+*.pb filter=lfs diff=lfs merge=lfs -text
+*.pickle filter=lfs diff=lfs merge=lfs -text
+*.pkl filter=lfs diff=lfs merge=lfs -text
+*.pt filter=lfs diff=lfs merge=lfs -text
+*.pth filter=lfs diff=lfs merge=lfs -text
+*.rar filter=lfs diff=lfs merge=lfs -text
+*.safetensors filter=lfs diff=lfs merge=lfs -text
+saved_model/**/* filter=lfs diff=lfs merge=lfs -text
+*.tar.* filter=lfs diff=lfs merge=lfs -text
+*.tar filter=lfs diff=lfs merge=lfs -text
+*.tflite filter=lfs diff=lfs merge=lfs -text
+*.tgz filter=lfs diff=lfs merge=lfs -text
+*.wasm filter=lfs diff=lfs merge=lfs -text
+*.xz filter=lfs diff=lfs merge=lfs -text
+*.zip filter=lfs diff=lfs merge=lfs -text
+*.zst filter=lfs diff=lfs merge=lfs -text
+*tfevents* filter=lfs diff=lfs merge=lfs -text
+*.egg filter=lfs diff=lfs merge=lfs -text
+**/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+*.mp4 filter=lfs diff=lfs merge=lfs -text
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..e64dee44bc268b02596b2c18768ebc83a310bf6c
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,81 @@
+
+
+
+
+*mp4
+!examples/*.mp4
+data/*
+logs/*
+
+
+
+*pyc
+
+checkpoints/*
+
+
+
+
+
+*egg-info
+
+frames
+
+
+
+
+*png
+
+*gif
+
+
+*ipynb
+daniel_tools
+daniel_tools/*
+
+
+*jpg
+
+
+build
+
+
+run*sh
+
+
+.m*
+
+
+
+
+
+
+
+scripts/*
+
+
+
+*.sh
+wandb
+benchmark
+*jsonl
+*json
+*npz
+DKT_models
+trash
+gradio
+tmp*
+*.webp
+*.ico
+*.model
+__pycache__/
+*.pyc
+**/tokenizer_configs/**/vocab.txt
+**/tokenizer_configs/**/spiece.model
+**/tokenizer_configs/**/tokenizer.model
+
+
+
+dist
+build
+
diff --git a/README.md b/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..525f43f6047f39d398d6d06c6126de41b75d134c
--- /dev/null
+++ b/README.md
@@ -0,0 +1,14 @@
+---
+title: DKT 1
+emoji: 🏃
+colorFrom: green
+colorTo: gray
+sdk: gradio
+sdk_version: 6.0.2
+app_file: app.py
+pinned: false
+license: apache-2.0
+short_description: DKT-1
+---
+
+Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/app.py b/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..74f5ef905143acfea7929c847eb8d022ed24ebd7
--- /dev/null
+++ b/app.py
@@ -0,0 +1,690 @@
+
+import os
+
+import os
+import subprocess
+
+def install(package):
+ subprocess.check_call([os.sys.executable, "-m", "pip", "install", package])
+
+install("torchvision")
+install("loguru")
+install("imageio")
+install("modelscope")
+install("einops")
+install("safetensors")
+install("transformers")
+install("ftfy")
+install("accelerate")
+install("sentencepiece")
+install("spaces")
+install("opencv-python")
+install("trimesh")
+install("gradio_litmodel3d")
+install("open3d")
+
+
+
+
+
+import gradio as gr
+import numpy as np
+import torch
+from PIL import Image
+from loguru import logger
+from tqdm import tqdm
+from tools.common_utils import save_video
+from dkt.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+try:
+ import gradio_client.utils as _gc_utils
+ if hasattr(_gc_utils, "get_type"):
+ _orig_get_type = _gc_utils.get_type
+ def _get_type_safe(schema):
+ if not isinstance(schema, dict):
+ return "Any"
+ return _orig_get_type(schema)
+ _gc_utils.get_type = _get_type_safe
+except Exception:
+ pass
+
+# Additional guard: handle boolean JSON Schemas and parsing errors
+try:
+ import gradio_client.utils as _gc_utils
+ # Wrap the internal _json_schema_to_python_type if present
+ if hasattr(_gc_utils, "_json_schema_to_python_type"):
+ _orig_internal = _gc_utils._json_schema_to_python_type
+ def _json_schema_to_python_type_safe(schema, defs=None):
+ if isinstance(schema, bool):
+ return "Any"
+ try:
+ return _orig_internal(schema, defs)
+ except Exception:
+ return "Any"
+ _gc_utils._json_schema_to_python_type = _json_schema_to_python_type_safe
+
+ # Also wrap the public json_schema_to_python_type to be extra defensive
+ if hasattr(_gc_utils, "json_schema_to_python_type"):
+ _orig_public = _gc_utils.json_schema_to_python_type
+ def json_schema_to_python_type_safe(schema):
+ try:
+ return _orig_public(schema)
+ except Exception:
+ return "Any"
+ _gc_utils.json_schema_to_python_type = json_schema_to_python_type_safe
+except Exception:
+ pass
+
+import cv2
+import copy
+import trimesh
+from gradio_litmodel3d import LitModel3D
+from os.path import join
+from tools.depth2pcd import depth2pcd
+
+try:
+ from moge.model.v2 import MoGeModel
+
+except:
+ os.system('pip install git+https://github.com/microsoft/MoGe.git -i https://pypi.org/simple/ --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host files.pythonhosted.org')
+ from moge.model.v2 import MoGeModel
+
+
+from tools.eval_utils import transfer_pred_disp2depth, colorize_depth_map
+import glob
+import datetime
+import shutil
+import tempfile
+import spaces
+
+
+PIPE_1_3B = None
+MOGE_MODULE = None
+#* better for bg: logs/outs/train/remote/sft-T2SQNet_glassverse_cleargrasp_HISS_DREDS_DREDS_glassverse_interiorverse-4gpus-origin-lora128-1.3B-rgb_depth-w832-h480-Wan2.1-Fun-Control-2025-10-28-23:26:41/epoch-0-20000.safetensors
+PROMPT = 'depth'
+NEGATIVE_PROMPT = ''
+
+
+
+def resize_frame(frame, height, width):
+ frame = np.array(frame)
+ frame = torch.from_numpy(frame).permute(2, 0, 1).unsqueeze(0).float() / 255.0
+ frame = torch.nn.functional.interpolate(frame, (height, width), mode="bicubic", align_corners=False, antialias=True)
+ frame = (frame.squeeze(0).permute(1, 2, 0).clamp(0, 1) * 255).byte().numpy()
+ frame = Image.fromarray(frame)
+ return frame
+
+
+
+def pmap_to_glb(point_map, valid_mask, frame) -> trimesh.Scene:
+ pts_3d = point_map[valid_mask] * np.array([-1, -1, 1])
+ pts_rgb = frame[valid_mask]
+
+ # Initialize a 3D scene
+ scene_3d = trimesh.Scene()
+
+ # Add point cloud data to the scene
+ point_cloud_data = trimesh.PointCloud(
+ vertices=pts_3d, colors=pts_rgb
+ )
+
+ scene_3d.add_geometry(point_cloud_data)
+ return scene_3d
+
+
+
+def create_simple_glb_from_pointcloud(points, colors, glb_filename):
+ try:
+ if len(points) == 0:
+ logger.warning(f"No valid points to create GLB for {glb_filename}")
+ return False
+
+ if colors is not None:
+ # logger.info(f"Adding colors to GLB: shape={colors.shape}, range=[{colors.min():.3f}, {colors.max():.3f}]")
+ pts_rgb = colors
+ else:
+ logger.info("No colors provided, adding default white colors")
+ pts_rgb = np.ones((len(points), 3))
+
+ valid_mask = np.ones(len(points), dtype=bool)
+
+ scene_3d = pmap_to_glb(points, valid_mask, pts_rgb)
+
+ scene_3d.export(glb_filename)
+ # logger.info(f"Saved GLB file using trimesh: {glb_filename}")
+
+ return True
+
+ except Exception as e:
+ logger.error(f"Error creating GLB from pointcloud using trimesh: {str(e)}")
+ return False
+
+
+
+
+
+def extract_frames_from_video_file(video_path):
+ try:
+ cap = cv2.VideoCapture(video_path)
+ frames = []
+
+ fps = cap.get(cv2.CAP_PROP_FPS)
+ if fps <= 0:
+ fps = 15.0
+
+ while True:
+ ret, frame = cap.read()
+ if not ret:
+ break
+ frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
+ frame_rgb = Image.fromarray(frame_rgb)
+ frames.append(frame_rgb)
+
+ cap.release()
+ return frames, fps
+ except Exception as e:
+ logger.error(f"Error extracting frames from {video_path}: {str(e)}")
+ return [], 15.0
+
+
+
+def load_moge_model(device="cuda:0"):
+ global MOGE_MODULE
+ if MOGE_MODULE is not None:
+ return MOGE_MODULE
+ logger.info(f"Loading MoGe model on {device}...")
+ MOGE_MODULE = MoGeModel.from_pretrained('Ruicheng/moge-2-vitl-normal').to(device)
+ return MOGE_MODULE
+
+
+def load_model_1_3b(device="cuda:0"):
+ global PIPE_1_3B
+
+ if PIPE_1_3B is not None:
+ return PIPE_1_3B
+
+ logger.info(f"Loading 1.3B model on {device}...")
+
+ pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device=device,
+ model_configs=[
+ ModelConfig(
+ model_id="PAI/Wan2.1-Fun-1.3B-Control",
+ origin_file_pattern="diffusion_pytorch_model*.safetensors",
+ offload_device="cpu",
+ ),
+ ModelConfig(
+ model_id="PAI/Wan2.1-Fun-1.3B-Control",
+ origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth",
+ offload_device="cpu",
+ ),
+ ModelConfig(
+ model_id="PAI/Wan2.1-Fun-1.3B-Control",
+ origin_file_pattern="Wan2.1_VAE.pth",
+ offload_device="cpu",
+ ),
+ ModelConfig(
+ model_id="PAI/Wan2.1-Fun-1.3B-Control",
+ origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth",
+ offload_device="cpu",
+ ),
+ ],
+ training_strategy="origin",
+ )
+
+
+ lora_config = ModelConfig(
+ model_id="Daniellesry/DKT-Depth-1-3B",
+ origin_file_pattern="dkt-1-3B.safetensors",
+ offload_device="cpu",
+ )
+
+ lora_config.download_if_necessary(use_usp=False)
+
+ pipe.load_lora(pipe.dit, lora_config.path, alpha=1.0)#todo is it work?
+ pipe.enable_vram_management()
+
+
+ PIPE_1_3B = pipe
+
+ return pipe
+
+
+
+
+
+
+
+def get_model(model_size):
+ if model_size == "1.3B":
+ assert PIPE_1_3B is not None, "1.3B model not initialized"
+ return PIPE_1_3B
+ else:
+ raise ValueError(f"Unsupported model size: {model_size}")
+
+
+
+def process_video(
+ video_file,
+ model_size,
+ height,
+ width,
+ num_inference_steps,
+ window_size,
+ overlap
+):
+ try:
+ pipe = get_model(model_size)
+ if pipe is None:
+ return None, f"Model {model_size} not initialized. Please restart the application."
+
+ tmp_video_path = video_file
+ timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
+
+ # 使用临时目录存储所有文件
+ cur_save_dir = tempfile.mkdtemp(prefix=f'dkt_{timestamp}_{model_size}_')
+
+
+ original_filename = f"input_{timestamp}.mp4"
+ dst_path = os.path.join(cur_save_dir, original_filename)
+ shutil.copy2(tmp_video_path, dst_path)
+ origin_frames, input_fps = extract_frames_from_video_file(tmp_video_path)
+
+ if not origin_frames:
+ return None, "Failed to extract frames from video"
+
+ logger.info(f"Extracted {len(origin_frames)} frames from video")
+
+
+ original_width, original_height = origin_frames[0].size
+ ROTATE = False
+ if original_width < original_height:
+ ROTATE = True
+ origin_frames = [x.transpose(Image.ROTATE_90) for x in origin_frames]
+ tmp = original_width
+ original_width = original_height
+ original_height = tmp
+
+
+
+ frames = [resize_frame(frame, height, width) for frame in origin_frames]
+ frame_length = len(frames)
+ if (frame_length - 1) % 4 != 0:
+ new_len = ((frame_length - 1) // 4 + 1) * 4 + 1
+ frames = frames + [copy.deepcopy(frames[-1]) for _ in range(new_len - frame_length)]
+
+
+ control_video = frames
+ video, vae_outs = pipe(
+ prompt=PROMPT,
+ negative_prompt=NEGATIVE_PROMPT,
+ control_video=control_video,
+ height=height,
+ width=width,
+ num_frames=len(control_video),
+ seed=1,
+ tiled=False,
+ num_inference_steps=num_inference_steps,
+ sliding_window_size=window_size,
+ sliding_window_stride=window_size - overlap,
+ cfg_scale=1.0,
+ )
+
+ #* moge process
+ torch.cuda.empty_cache()
+ processed_video = video[:frame_length]
+
+
+ processed_video = [resize_frame(frame, original_height, original_width) for frame in processed_video]
+ if ROTATE:
+ processed_video = [x.transpose(Image.ROTATE_270) for x in processed_video]
+ origin_frames = [x.transpose(Image.ROTATE_270) for x in origin_frames]
+
+
+ output_filename = f"output_{timestamp}.mp4"
+ output_path = os.path.join(cur_save_dir, output_filename)
+ color_predictions = []
+ if PROMPT == 'depth':
+ prediced_depth_map_np = [np.array(item).astype(np.float32).mean(-1) for item in processed_video]
+ prediced_depth_map_np = np.stack(prediced_depth_map_np)
+ prediced_depth_map_np = prediced_depth_map_np/ 255.0
+ __min = prediced_depth_map_np.min()
+ __max = prediced_depth_map_np.max()
+ prediced_depth_map_np = (prediced_depth_map_np - __min) / (__max - __min)
+ color_predictions = [colorize_depth_map(item) for item in prediced_depth_map_np]
+ else:
+ color_predictions = processed_video
+ save_video(color_predictions, output_path, fps=input_fps, quality=5)
+
+
+
+ frame_num = len(origin_frames)
+ resize_W,resize_H = origin_frames[0].size
+
+ vis_pc_num = 4
+ indices = np.linspace(0, frame_num-1, vis_pc_num)
+ indices = np.round(indices).astype(np.int32)
+ pc_save_dir = os.path.join(cur_save_dir, 'pointclouds')
+ os.makedirs(pc_save_dir, exist_ok=True)
+
+ glb_files = []
+ moge_device = MOGE_MODULE.device if MOGE_MODULE is not None else torch.device("cuda:0")
+ for idx in tqdm(indices):
+ orgin_rgb_frame = origin_frames[idx]
+ predicted_depth = processed_video[idx]
+
+ # Read the input image and convert to tensor (3, H, W) with RGB values normalized to [0, 1]
+ input_image_np = np.array(orgin_rgb_frame) # Convert PIL Image to numpy array
+ input_image = torch.tensor(input_image_np / 255, dtype=torch.float32, device=moge_device).permute(2, 0, 1)
+
+ output = MOGE_MODULE.infer(input_image)
+ #* "dict_keys(['points', 'intrinsics', 'depth', 'mask', 'normal'])"
+ moge_intrinsics = output['intrinsics'].cpu().numpy()
+ moge_mask = output['mask'].cpu().numpy()
+ moge_depth = output['depth'].cpu().numpy()
+
+ predicted_depth = np.array(predicted_depth)
+ predicted_depth = predicted_depth.mean(-1) / 255.0
+
+ metric_depth = transfer_pred_disp2depth(predicted_depth, moge_depth, moge_mask)
+
+ moge_intrinsics[0, 0] *= resize_W
+ moge_intrinsics[1, 1] *= resize_H
+ moge_intrinsics[0, 2] *= resize_W
+ moge_intrinsics[1, 2] *= resize_H
+
+ # pcd = depth2pcd(metric_depth, moge_intrinsics, color=cv2.cvtColor(input_image_np, cv2.COLOR_BGR2RGB), input_mask=moge_mask, ret_pcd=True)
+ pcd = depth2pcd(metric_depth, moge_intrinsics, color=input_image_np, input_mask=moge_mask, ret_pcd=True)
+
+ # pcd.points = o3d.utility.Vector3dVector(np.asarray(pcd.points) * np.array([1, -1, -1], dtype=np.float32))
+
+ apply_filter = True
+ if apply_filter:
+ cl, ind = pcd.remove_statistical_outlier(nb_neighbors=20, std_ratio=3.0)
+ pcd = pcd.select_by_index(ind)
+
+ #* save pcd: o3d.io.write_point_cloud(f'{pc_save_dir}/{timestamp}_{idx:02d}.ply', pcd)
+ points = np.asarray(pcd.points)
+ colors = np.asarray(pcd.colors) if pcd.has_colors() else None
+ glb_filename = os.path.join(pc_save_dir, f'{timestamp}_{idx:02d}.glb')
+ success = create_simple_glb_from_pointcloud(points, colors, glb_filename)
+ if not success:
+ logger.warning(f"Failed to save GLB file: {glb_filename}")
+
+ glb_files.append(glb_filename)
+
+ return output_path, glb_files
+
+ except Exception as e:
+ logger.error(f"Error processing video: {str(e)}")
+ return None, f"Error: {str(e)}"
+
+
+
+
+def main():
+
+
+
+ #* gradio creation and initialization
+
+
+ css = """
+ #video-display-container {
+ max-height: 100vh;
+ }
+ #video-display-input {
+ max-height: 80vh;
+ }
+ #video-display-output {
+ max-height: 80vh;
+ }
+ #download {
+ height: 62px;
+ }
+ .title {
+ text-align: center;
+ }
+ .description {
+ text-align: center;
+ }
+ .gradio-examples {
+ max-height: 400px;
+ overflow-y: auto;
+ }
+ .gradio-examples .examples-container {
+ display: grid;
+ grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
+ gap: 10px;
+ padding: 10px;
+ }
+ .gradio-container .gradio-examples .pagination,
+ .gradio-container .gradio-examples .pagination button,
+ div[data-testid="examples"] .pagination,
+ div[data-testid="examples"] .pagination button {
+ font-size: 28px !important;
+ font-weight: bold !important;
+ padding: 15px 20px !important;
+ min-width: 60px !important;
+ height: 60px !important;
+ border-radius: 10px !important;
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
+ color: white !important;
+ border: none !important;
+ cursor: pointer !important;
+ margin: 8px !important;
+ display: inline-block !important;
+ box-shadow: 0 4px 8px rgba(0,0,0,0.2) !important;
+ transition: all 0.3s ease !important;
+ }
+
+ div[data-testid="examples"] .pagination button:not(.active),
+ .gradio-container .gradio-examples .pagination button:not(.active) {
+ font-size: 32px !important;
+ font-weight: bold !important;
+ padding: 15px 20px !important;
+ min-width: 60px !important;
+ height: 60px !important;
+ background: linear-gradient(135deg, #8a9cf0 0%, #9a6bb2 100%) !important;
+ opacity: 0.8 !important;
+ }
+
+ div[data-testid="examples"] .pagination button:hover,
+ .gradio-container .gradio-examples .pagination button:hover {
+ background: linear-gradient(135deg, #5a6fd8 0%, #6a4190 100%) !important;
+ transform: translateY(-2px) !important;
+ box-shadow: 0 6px 12px rgba(0,0,0,0.3) !important;
+ opacity: 1 !important;
+ }
+
+ div[data-testid="examples"] .pagination button.active,
+ .gradio-container .gradio-examples .pagination button.active {
+ background: linear-gradient(135deg, #11998e 0%, #38ef7d 100%) !important;
+ box-shadow: 0 4px 8px rgba(17,153,142,0.4) !important;
+ opacity: 1 !important;
+ }
+
+ button[class*="pagination"],
+ button[class*="page"] {
+ font-size: 28px !important;
+ font-weight: bold !important;
+ padding: 15px 20px !important;
+ min-width: 60px !important;
+ height: 60px !important;
+ border-radius: 10px !important;
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
+ color: white !important;
+ border: none !important;
+ cursor: pointer !important;
+ margin: 8px !important;
+ box-shadow: 0 4px 8px rgba(0,0,0,0.2) !important;
+ transition: all 0.3s ease !important;
+ }
+ """
+
+
+
+ head_html = """
+
+
+
+
+ """
+
+
+ # title = "# Diffusion Knows Transparency: Repurposing Video Diffusion for Transparent Object Depth and Normal Estimation "
+ # description = """Official demo for **DKT **."""
+
+ # with gr.Blocks(css=css, title="DKT - Diffusion Knows Transparency", favicon_path="favicon.ico") as demo:
+
+ height = 480
+ width = 832
+ window_size = 21
+ with gr.Blocks(css=css, title="DKT", head=head_html) as demo:
+ # gr.Markdown(title, elem_classes=["title"])
+ # gr.Markdown(description, elem_classes=["description"])
+ # gr.Markdown("### Video Processing Demo", elem_classes=["description"])
+
+ with gr.Row():
+ with gr.Column():
+ input_video = gr.Video(label="Input Video", elem_id='video-display-input')
+
+ model_size = gr.Radio(
+ choices=["1.3B", "14B"],
+ value="1.3B",
+ label="Model Size"
+ )
+
+
+ with gr.Accordion("Advanced Parameters", open=False):
+ num_inference_steps = gr.Slider(
+ minimum=1, maximum=50, value=5, step=1,
+ label="Number of Inference Steps"
+ )
+ overlap = gr.Slider(
+ minimum=1, maximum=20, value=3, step=1,
+ label="Overlap"
+ )
+
+ submit = gr.Button(value="Compute Depth", variant="primary")
+
+ with gr.Column():
+ output_video = gr.Video(
+ label="Depth Outputs",
+ elem_id='video-display-output',
+ autoplay=True
+ )
+ vis_video = gr.Video(
+ label="Visualization Video",
+ visible=False,
+ autoplay=True
+ )
+
+ with gr.Row():
+ gr.Markdown("### 3D Point Cloud Visualization", elem_classes=["title"])
+
+ with gr.Row(equal_height=True):
+ with gr.Column(scale=1):
+ output_point_map0 = LitModel3D(
+ label="Point Cloud Key Frame 1",
+ clear_color=[1.0, 1.0, 1.0, 1.0],
+ interactive=False,
+ # height=400,
+
+ )
+ with gr.Column(scale=1):
+ output_point_map1 = LitModel3D(
+ label="Point Cloud Key Frame 2",
+ clear_color=[1.0, 1.0, 1.0, 1.0],
+ interactive=False
+ )
+
+
+ with gr.Row(equal_height=True):
+
+ with gr.Column(scale=1):
+ output_point_map2 = LitModel3D(
+ label="Point Cloud Key Frame 3",
+ clear_color=[1.0, 1.0, 1.0, 1.0],
+ interactive=False
+ )
+ with gr.Column(scale=1):
+ output_point_map3 = LitModel3D(
+ label="Point Cloud Key Frame 4",
+ clear_color=[1.0, 1.0, 1.0, 1.0],
+ interactive=False
+ )
+
+ def on_submit(video_file, model_size, num_inference_steps, overlap):
+ if video_file is None:
+ return None, None, None, None, None, None, "Please upload a video file"
+
+ try:
+
+ output_path, glb_files = process_video(
+ video_file, model_size, height, width, num_inference_steps, window_size, overlap
+ )
+
+
+
+ if output_path is None:
+ return None, None, None, None, None, None, glb_files
+
+ model3d_outputs = [None] * 4
+ if glb_files:
+ for i, glb_file in enumerate(glb_files[:4]):
+ if os.path.exists(glb_file):
+ model3d_outputs[i] = glb_file
+
+
+
+ return output_path, None, *model3d_outputs
+
+ except Exception as e:
+ return None, None, None, None, None, None, f"Error: {str(e)}"
+
+
+ submit.click(
+ on_submit,
+ inputs=[
+ input_video, model_size, num_inference_steps, overlap
+ ],
+ outputs=[
+ output_video, vis_video,
+ output_point_map0, output_point_map1, output_point_map2, output_point_map3
+ ]
+ )
+
+
+
+ example_files = glob.glob('examples/*')
+ if example_files:
+ example_inputs = []
+ for file_path in example_files:
+ example_inputs.append([file_path, "1.3B"])
+
+ examples = gr.Examples(
+ examples=example_inputs,
+ inputs=[input_video, model_size],
+ outputs=[
+ output_video, vis_video,
+ output_point_map0, output_point_map1, output_point_map2, output_point_map3
+ ],
+ fn=on_submit,
+ examples_per_page=6
+ )
+
+
+ #* main code, model and moge model initialization
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+ load_model_1_3b(device=device)
+ load_moge_model(device=device)
+ torch.cuda.empty_cache()
+
+ demo.queue().launch(share = True,server_name="0.0.0.0", server_port=7860)
+
+
+if __name__ == '__main__':
+ process_video = spaces.GPU(process_video)
+
+ main()
diff --git a/dkt/__init__.py b/dkt/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e8a5c754359f9d146648e9d25caedd723c005bae
--- /dev/null
+++ b/dkt/__init__.py
@@ -0,0 +1,4 @@
+from .models import *
+from .prompters import *
+from .schedulers import *
+from .pipelines import *
diff --git a/dkt/configs/__init__.py b/dkt/configs/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/dkt/configs/model_config.py b/dkt/configs/model_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..24a64168d5dba143917f802b17ac5e90196dedd8
--- /dev/null
+++ b/dkt/configs/model_config.py
@@ -0,0 +1,68 @@
+from typing_extensions import Literal, TypeAlias
+
+
+from ..models.wan_video_dit import WanModel
+from ..models.wan_video_text_encoder import WanTextEncoder
+from ..models.wan_video_image_encoder import WanImageEncoder
+from ..models.wan_video_vae import WanVideoVAE, WanVideoVAE38
+from ..models.wan_video_motion_controller import WanMotionControllerModel
+from ..models.wan_video_vace import VaceWanModel
+model_loader_configs = [
+ # These configs are provided for detecting model type automatically.
+ # The format is (state_dict_keys_hash, state_dict_keys_hash_with_shape, model_names, model_classes, model_resource)
+ (None, "9269f8db9040a9d860eaca435be61814", ["wan_video_dit"], [WanModel], "civitai"),
+ (None, "aafcfd9672c3a2456dc46e1cb6e52c70", ["wan_video_dit"], [WanModel], "civitai"),
+ (None, "6bfcfb3b342cb286ce886889d519a77e", ["wan_video_dit"], [WanModel], "civitai"),
+ (None, "6d6ccde6845b95ad9114ab993d917893", ["wan_video_dit"], [WanModel], "civitai"),
+ (None, "6bfcfb3b342cb286ce886889d519a77e", ["wan_video_dit"], [WanModel], "civitai"),
+ (None, "349723183fc063b2bfc10bb2835cf677", ["wan_video_dit"], [WanModel], "civitai"),
+ (None, "efa44cddf936c70abd0ea28b6cbe946c", ["wan_video_dit"], [WanModel], "civitai"),
+ (None, "3ef3b1f8e1dab83d5b71fd7b617f859f", ["wan_video_dit"], [WanModel], "civitai"),
+ (None, "70ddad9d3a133785da5ea371aae09504", ["wan_video_dit"], [WanModel], "civitai"),
+ (None, "26bde73488a92e64cc20b0a7485b9e5b", ["wan_video_dit"], [WanModel], "civitai"),
+ (None, "ac6a5aa74f4a0aab6f64eb9a72f19901", ["wan_video_dit"], [WanModel], "civitai"),
+ (None, "b61c605c2adbd23124d152ed28e049ae", ["wan_video_dit"], [WanModel], "civitai"),
+ (None, "1f5ab7703c6fc803fdded85ff040c316", ["wan_video_dit"], [WanModel], "civitai"),
+ (None, "5b013604280dd715f8457c6ed6d6a626", ["wan_video_dit"], [WanModel], "civitai"),
+ (None, "a61453409b67cd3246cf0c3bebad47ba", ["wan_video_dit", "wan_video_vace"], [WanModel, VaceWanModel], "civitai"),
+ (None, "7a513e1f257a861512b1afd387a8ecd9", ["wan_video_dit", "wan_video_vace"], [WanModel, VaceWanModel], "civitai"),
+ (None, "cb104773c6c2cb6df4f9529ad5c60d0b", ["wan_video_dit"], [WanModel], "diffusers"),
+ (None, "9c8818c2cbea55eca56c7b447df170da", ["wan_video_text_encoder"], [WanTextEncoder], "civitai"),
+ (None, "5941c53e207d62f20f9025686193c40b", ["wan_video_image_encoder"], [WanImageEncoder], "civitai"),
+ (None, "1378ea763357eea97acdef78e65d6d96", ["wan_video_vae"], [WanVideoVAE], "civitai"),
+ (None, "ccc42284ea13e1ad04693284c7a09be6", ["wan_video_vae"], [WanVideoVAE], "civitai"),
+ (None, "e1de6c02cdac79f8b739f4d3698cd216", ["wan_video_vae"], [WanVideoVAE38], "civitai"),
+ (None, "dbd5ec76bbf977983f972c151d545389", ["wan_video_motion_controller"], [WanMotionControllerModel], "civitai"),
+]
+huggingface_model_loader_configs = [
+ # These configs are provided for detecting model type automatically.
+ # The format is (architecture_in_huggingface_config, huggingface_lib, model_name, redirected_architecture)
+ ("ChatGLMModel", "dkt.models.kolors_text_encoder", "kolors_text_encoder", None),
+ ("MarianMTModel", "transformers.models.marian.modeling_marian", "translator", None),
+ ("BloomForCausalLM", "transformers.models.bloom.modeling_bloom", "beautiful_prompt", None),
+ ("Qwen2ForCausalLM", "transformers.models.qwen2.modeling_qwen2", "qwen_prompt", None),
+ # ("LlamaForCausalLM", "transformers.models.llama.modeling_llama", "omost_prompt", None),
+ ("T5EncoderModel", "dkt.models.flux_text_encoder", "flux_text_encoder_2", "FluxTextEncoder2"),
+ ("CogVideoXTransformer3DModel", "dkt.models.cog_dit", "cog_dit", "CogDiT"),
+ ("SiglipModel", "transformers.models.siglip.modeling_siglip", "siglip_vision_model", "SiglipVisionModel"),
+ ("LlamaForCausalLM", "dkt.models.hunyuan_video_text_encoder", "hunyuan_video_text_encoder_2", "HunyuanVideoLLMEncoder"),
+ ("LlavaForConditionalGeneration", "dkt.models.hunyuan_video_text_encoder", "hunyuan_video_text_encoder_2", "HunyuanVideoMLLMEncoder"),
+ ("Step1Model", "dkt.models.stepvideo_text_encoder", "stepvideo_text_encoder_2", "STEP1TextEncoder"),
+ ("Qwen2_5_VLForConditionalGeneration", "dkt.models.qwenvl", "qwenvl", "Qwen25VL_7b_Embedder"),
+]
+patch_model_loader_configs = [
+ # These configs are provided for detecting model type automatically.
+ # The format is (state_dict_keys_hash_with_shape, model_name, model_class, extra_kwargs)
+ # ("9a4ab6869ac9b7d6e31f9854e397c867", ["svd_unet"], [SVDUNet], {"add_positional_conv": 128}),
+]
+
+preset_models_on_huggingface = {
+
+}
+preset_models_on_modelscope = {
+
+}
+Preset_model_id: TypeAlias = Literal[
+ ...
+
+]
diff --git a/dkt/lora/__init__.py b/dkt/lora/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..b2ce96e6e4ed72c267b6e2c2e50d6881a260f766
--- /dev/null
+++ b/dkt/lora/__init__.py
@@ -0,0 +1,45 @@
+import torch
+
+
+
+class GeneralLoRALoader:
+ def __init__(self, device="cpu", torch_dtype=torch.float32):
+ self.device = device
+ self.torch_dtype = torch_dtype
+
+
+ def get_name_dict(self, lora_state_dict):
+ lora_name_dict = {}
+ for key in lora_state_dict:
+ if ".lora_B." not in key:
+ continue
+ keys = key.split(".")
+ if len(keys) > keys.index("lora_B") + 2:
+ keys.pop(keys.index("lora_B") + 1)
+ keys.pop(keys.index("lora_B"))
+ if keys[0] == "diffusion_model":
+ keys.pop(0)
+ keys.pop(-1)
+ target_name = ".".join(keys)
+ lora_name_dict[target_name] = (key, key.replace(".lora_B.", ".lora_A."))
+ return lora_name_dict
+
+
+ def load(self, model: torch.nn.Module, state_dict_lora, alpha=1.0):
+ updated_num = 0
+ lora_name_dict = self.get_name_dict(state_dict_lora)
+ for name, module in model.named_modules():
+ if name in lora_name_dict:
+ weight_up = state_dict_lora[lora_name_dict[name][0]].to(device=self.device, dtype=self.torch_dtype)
+ weight_down = state_dict_lora[lora_name_dict[name][1]].to(device=self.device, dtype=self.torch_dtype)
+ if len(weight_up.shape) == 4:
+ weight_up = weight_up.squeeze(3).squeeze(2)
+ weight_down = weight_down.squeeze(3).squeeze(2)
+ weight_lora = alpha * torch.mm(weight_up, weight_down).unsqueeze(2).unsqueeze(3)
+ else:
+ weight_lora = alpha * torch.mm(weight_up, weight_down)
+ state_dict = module.state_dict()
+ state_dict["weight"] = state_dict["weight"].to(device=self.device, dtype=self.torch_dtype) + weight_lora
+ module.load_state_dict(state_dict)
+ updated_num += 1
+ print(f"{updated_num} tensors are updated by LoRA.")
\ No newline at end of file
diff --git a/dkt/models/__init__.py b/dkt/models/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..96707b666371c39d4ba59a839d5ddfeafb5d1d43
--- /dev/null
+++ b/dkt/models/__init__.py
@@ -0,0 +1 @@
+from .model_manager import *
diff --git a/dkt/models/attention.py b/dkt/models/attention.py
new file mode 100644
index 0000000000000000000000000000000000000000..eb90e1ed1a28a0541a8d9df8313997a7d3f14da7
--- /dev/null
+++ b/dkt/models/attention.py
@@ -0,0 +1,89 @@
+import torch
+from einops import rearrange
+
+
+def low_version_attention(query, key, value, attn_bias=None):
+ scale = 1 / query.shape[-1] ** 0.5
+ query = query * scale
+ attn = torch.matmul(query, key.transpose(-2, -1))
+ if attn_bias is not None:
+ attn = attn + attn_bias
+ attn = attn.softmax(-1)
+ return attn @ value
+
+
+class Attention(torch.nn.Module):
+
+ def __init__(self, q_dim, num_heads, head_dim, kv_dim=None, bias_q=False, bias_kv=False, bias_out=False):
+ super().__init__()
+ dim_inner = head_dim * num_heads
+ kv_dim = kv_dim if kv_dim is not None else q_dim
+ self.num_heads = num_heads
+ self.head_dim = head_dim
+
+ self.to_q = torch.nn.Linear(q_dim, dim_inner, bias=bias_q)
+ self.to_k = torch.nn.Linear(kv_dim, dim_inner, bias=bias_kv)
+ self.to_v = torch.nn.Linear(kv_dim, dim_inner, bias=bias_kv)
+ self.to_out = torch.nn.Linear(dim_inner, q_dim, bias=bias_out)
+
+ def interact_with_ipadapter(self, hidden_states, q, ip_k, ip_v, scale=1.0):
+ batch_size = q.shape[0]
+ ip_k = ip_k.view(batch_size, -1, self.num_heads, self.head_dim).transpose(1, 2)
+ ip_v = ip_v.view(batch_size, -1, self.num_heads, self.head_dim).transpose(1, 2)
+ ip_hidden_states = torch.nn.functional.scaled_dot_product_attention(q, ip_k, ip_v)
+ hidden_states = hidden_states + scale * ip_hidden_states
+ return hidden_states
+
+ def torch_forward(self, hidden_states, encoder_hidden_states=None, attn_mask=None, ipadapter_kwargs=None, qkv_preprocessor=None):
+ if encoder_hidden_states is None:
+ encoder_hidden_states = hidden_states
+
+ batch_size = encoder_hidden_states.shape[0]
+
+ q = self.to_q(hidden_states)
+ k = self.to_k(encoder_hidden_states)
+ v = self.to_v(encoder_hidden_states)
+
+ q = q.view(batch_size, -1, self.num_heads, self.head_dim).transpose(1, 2)
+ k = k.view(batch_size, -1, self.num_heads, self.head_dim).transpose(1, 2)
+ v = v.view(batch_size, -1, self.num_heads, self.head_dim).transpose(1, 2)
+
+ if qkv_preprocessor is not None:
+ q, k, v = qkv_preprocessor(q, k, v)
+
+ hidden_states = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=attn_mask)
+ if ipadapter_kwargs is not None:
+ hidden_states = self.interact_with_ipadapter(hidden_states, q, **ipadapter_kwargs)
+ hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, self.num_heads * self.head_dim)
+ hidden_states = hidden_states.to(q.dtype)
+
+ hidden_states = self.to_out(hidden_states)
+
+ return hidden_states
+
+ def xformers_forward(self, hidden_states, encoder_hidden_states=None, attn_mask=None):
+ if encoder_hidden_states is None:
+ encoder_hidden_states = hidden_states
+
+ q = self.to_q(hidden_states)
+ k = self.to_k(encoder_hidden_states)
+ v = self.to_v(encoder_hidden_states)
+
+ q = rearrange(q, "b f (n d) -> (b n) f d", n=self.num_heads)
+ k = rearrange(k, "b f (n d) -> (b n) f d", n=self.num_heads)
+ v = rearrange(v, "b f (n d) -> (b n) f d", n=self.num_heads)
+
+ if attn_mask is not None:
+ hidden_states = low_version_attention(q, k, v, attn_bias=attn_mask)
+ else:
+ import xformers.ops as xops
+ hidden_states = xops.memory_efficient_attention(q, k, v)
+ hidden_states = rearrange(hidden_states, "(b n) f d -> b f (n d)", n=self.num_heads)
+
+ hidden_states = hidden_states.to(q.dtype)
+ hidden_states = self.to_out(hidden_states)
+
+ return hidden_states
+
+ def forward(self, hidden_states, encoder_hidden_states=None, attn_mask=None, ipadapter_kwargs=None, qkv_preprocessor=None):
+ return self.torch_forward(hidden_states, encoder_hidden_states=encoder_hidden_states, attn_mask=attn_mask, ipadapter_kwargs=ipadapter_kwargs, qkv_preprocessor=qkv_preprocessor)
\ No newline at end of file
diff --git a/dkt/models/downloader.py b/dkt/models/downloader.py
new file mode 100644
index 0000000000000000000000000000000000000000..d694623b9de0de798740cb5cefc5160dd2b2ddce
--- /dev/null
+++ b/dkt/models/downloader.py
@@ -0,0 +1,111 @@
+from huggingface_hub import hf_hub_download
+from modelscope import snapshot_download
+import os, shutil
+from typing_extensions import Literal, TypeAlias
+from typing import List
+from ..configs.model_config import preset_models_on_huggingface, preset_models_on_modelscope, Preset_model_id
+
+
+def download_from_modelscope(model_id, origin_file_path, local_dir):
+ os.makedirs(local_dir, exist_ok=True)
+ file_name = os.path.basename(origin_file_path)
+ if file_name in os.listdir(local_dir):
+ print(f" {file_name} has been already in {local_dir}.")
+ else:
+ print(f" Start downloading {os.path.join(local_dir, file_name)}")
+ snapshot_download(model_id, allow_file_pattern=origin_file_path, local_dir=local_dir)
+ downloaded_file_path = os.path.join(local_dir, origin_file_path)
+ target_file_path = os.path.join(local_dir, os.path.split(origin_file_path)[-1])
+ if downloaded_file_path != target_file_path:
+ shutil.move(downloaded_file_path, target_file_path)
+ shutil.rmtree(os.path.join(local_dir, origin_file_path.split("/")[0]))
+
+
+def download_from_huggingface(model_id, origin_file_path, local_dir):
+ os.makedirs(local_dir, exist_ok=True)
+ file_name = os.path.basename(origin_file_path)
+ if file_name in os.listdir(local_dir):
+ print(f" {file_name} has been already in {local_dir}.")
+ else:
+ print(f" Start downloading {os.path.join(local_dir, file_name)}")
+ hf_hub_download(model_id, origin_file_path, local_dir=local_dir)
+ downloaded_file_path = os.path.join(local_dir, origin_file_path)
+ target_file_path = os.path.join(local_dir, file_name)
+ if downloaded_file_path != target_file_path:
+ shutil.move(downloaded_file_path, target_file_path)
+ shutil.rmtree(os.path.join(local_dir, origin_file_path.split("/")[0]))
+
+
+Preset_model_website: TypeAlias = Literal[
+ "HuggingFace",
+ "ModelScope",
+]
+website_to_preset_models = {
+ "HuggingFace": preset_models_on_huggingface,
+ "ModelScope": preset_models_on_modelscope,
+}
+website_to_download_fn = {
+ "HuggingFace": download_from_huggingface,
+ "ModelScope": download_from_modelscope,
+}
+
+
+def download_customized_models(
+ model_id,
+ origin_file_path,
+ local_dir,
+ downloading_priority: List[Preset_model_website] = ["ModelScope", "HuggingFace"],
+):
+ downloaded_files = []
+ for website in downloading_priority:
+ # Check if the file is downloaded.
+ file_to_download = os.path.join(local_dir, os.path.basename(origin_file_path))
+ if file_to_download in downloaded_files:
+ continue
+ # Download
+ website_to_download_fn[website](model_id, origin_file_path, local_dir)
+ if os.path.basename(origin_file_path) in os.listdir(local_dir):
+ downloaded_files.append(file_to_download)
+ return downloaded_files
+
+
+def download_models(
+ model_id_list: List[Preset_model_id] = [],
+ downloading_priority: List[Preset_model_website] = ["ModelScope", "HuggingFace"],
+):
+ print(f"Downloading models: {model_id_list}")
+ downloaded_files = []
+ load_files = []
+
+ for model_id in model_id_list:
+ for website in downloading_priority:
+ if model_id in website_to_preset_models[website]:
+
+ # Parse model metadata
+ model_metadata = website_to_preset_models[website][model_id]
+ if isinstance(model_metadata, list):
+ file_data = model_metadata
+ else:
+ file_data = model_metadata.get("file_list", [])
+
+ # Try downloading the model from this website.
+ model_files = []
+ for model_id, origin_file_path, local_dir in file_data:
+ # Check if the file is downloaded.
+ file_to_download = os.path.join(local_dir, os.path.basename(origin_file_path))
+ if file_to_download in downloaded_files:
+ continue
+ # Download
+ website_to_download_fn[website](model_id, origin_file_path, local_dir)
+ if os.path.basename(origin_file_path) in os.listdir(local_dir):
+ downloaded_files.append(file_to_download)
+ model_files.append(file_to_download)
+
+ # If the model is successfully downloaded, break.
+ if len(model_files) > 0:
+ if isinstance(model_metadata, dict) and "load_path" in model_metadata:
+ model_files = model_metadata["load_path"]
+ load_files.extend(model_files)
+ break
+
+ return load_files
\ No newline at end of file
diff --git a/dkt/models/lora.py b/dkt/models/lora.py
new file mode 100644
index 0000000000000000000000000000000000000000..513efff651e9d411e22ad26275e370f8e315ffd8
--- /dev/null
+++ b/dkt/models/lora.py
@@ -0,0 +1,197 @@
+import torch
+from .wan_video_dit import WanModel
+
+
+
+class LoRAFromCivitai:
+ def __init__(self):
+ self.supported_model_classes = []
+ self.lora_prefix = []
+ self.renamed_lora_prefix = {}
+ self.special_keys = {}
+
+
+ def convert_state_dict(self, state_dict, lora_prefix="lora_unet_", alpha=1.0):
+ for key in state_dict:
+ if ".lora_up" in key:
+ return self.convert_state_dict_up_down(state_dict, lora_prefix, alpha)
+ return self.convert_state_dict_AB(state_dict, lora_prefix, alpha)
+
+
+ def convert_state_dict_up_down(self, state_dict, lora_prefix="lora_unet_", alpha=1.0):
+ renamed_lora_prefix = self.renamed_lora_prefix.get(lora_prefix, "")
+ state_dict_ = {}
+ for key in state_dict:
+ if ".lora_up" not in key:
+ continue
+ if not key.startswith(lora_prefix):
+ continue
+ weight_up = state_dict[key].to(device="cuda", dtype=torch.float16)
+ weight_down = state_dict[key.replace(".lora_up", ".lora_down")].to(device="cuda", dtype=torch.float16)
+ if len(weight_up.shape) == 4:
+ weight_up = weight_up.squeeze(3).squeeze(2).to(torch.float32)
+ weight_down = weight_down.squeeze(3).squeeze(2).to(torch.float32)
+ lora_weight = alpha * torch.mm(weight_up, weight_down).unsqueeze(2).unsqueeze(3)
+ else:
+ lora_weight = alpha * torch.mm(weight_up, weight_down)
+ target_name = key.split(".")[0].replace(lora_prefix, renamed_lora_prefix).replace("_", ".") + ".weight"
+ for special_key in self.special_keys:
+ target_name = target_name.replace(special_key, self.special_keys[special_key])
+ state_dict_[target_name] = lora_weight.cpu()
+ return state_dict_
+
+
+ def convert_state_dict_AB(self, state_dict, lora_prefix="", alpha=1.0, device="cuda", torch_dtype=torch.float16):
+ state_dict_ = {}
+ for key in state_dict:
+ if ".lora_B." not in key:
+ continue
+ if not key.startswith(lora_prefix):
+ continue
+ weight_up = state_dict[key].to(device=device, dtype=torch_dtype)
+ weight_down = state_dict[key.replace(".lora_B.", ".lora_A.")].to(device=device, dtype=torch_dtype)
+ if len(weight_up.shape) == 4:
+ weight_up = weight_up.squeeze(3).squeeze(2)
+ weight_down = weight_down.squeeze(3).squeeze(2)
+ lora_weight = alpha * torch.mm(weight_up, weight_down).unsqueeze(2).unsqueeze(3)
+ else:
+ lora_weight = alpha * torch.mm(weight_up, weight_down)
+ keys = key.split(".")
+ keys.pop(keys.index("lora_B"))
+ target_name = ".".join(keys)
+ target_name = target_name[len(lora_prefix):]
+ state_dict_[target_name] = lora_weight.cpu()
+ return state_dict_
+
+
+ def load(self, model, state_dict_lora, lora_prefix, alpha=1.0, model_resource=None):
+ state_dict_model = model.state_dict()
+ state_dict_lora = self.convert_state_dict(state_dict_lora, lora_prefix=lora_prefix, alpha=alpha)
+ if model_resource == "diffusers":
+ state_dict_lora = model.__class__.state_dict_converter().from_diffusers(state_dict_lora)
+ elif model_resource == "civitai":
+ state_dict_lora = model.__class__.state_dict_converter().from_civitai(state_dict_lora)
+ if isinstance(state_dict_lora, tuple):
+ state_dict_lora = state_dict_lora[0]
+ if len(state_dict_lora) > 0:
+ print(f" {len(state_dict_lora)} tensors are updated.")
+ for name in state_dict_lora:
+ fp8=False
+ if state_dict_model[name].dtype == torch.float8_e4m3fn:
+ state_dict_model[name]= state_dict_model[name].to(state_dict_lora[name].dtype)
+ fp8=True
+ state_dict_model[name] += state_dict_lora[name].to(
+ dtype=state_dict_model[name].dtype, device=state_dict_model[name].device)
+ if fp8:
+ state_dict_model[name] = state_dict_model[name].to(torch.float8_e4m3fn)
+ model.load_state_dict(state_dict_model)
+
+
+ def match(self, model, state_dict_lora):
+ for lora_prefix, model_class in zip(self.lora_prefix, self.supported_model_classes):
+ if not isinstance(model, model_class):
+ continue
+ state_dict_model = model.state_dict()
+ for model_resource in ["diffusers", "civitai"]:
+ try:
+ state_dict_lora_ = self.convert_state_dict(state_dict_lora, lora_prefix=lora_prefix, alpha=1.0)
+ converter_fn = model.__class__.state_dict_converter().from_diffusers if model_resource == "diffusers" \
+ else model.__class__.state_dict_converter().from_civitai
+ state_dict_lora_ = converter_fn(state_dict_lora_)
+ if isinstance(state_dict_lora_, tuple):
+ state_dict_lora_ = state_dict_lora_[0]
+ if len(state_dict_lora_) == 0:
+ continue
+ for name in state_dict_lora_:
+ if name not in state_dict_model:
+ break
+ else:
+ return lora_prefix, model_resource
+ except:
+ pass
+ return None
+
+
+class GeneralLoRAFromPeft:
+ def __init__(self):
+ self.supported_model_classes = [ WanModel]
+
+
+ def get_name_dict(self, lora_state_dict):
+ lora_name_dict = {}
+ for key in lora_state_dict:
+ if ".lora_B." not in key:
+ continue
+ keys = key.split(".")
+ if len(keys) > keys.index("lora_B") + 2:
+ keys.pop(keys.index("lora_B") + 1)
+ keys.pop(keys.index("lora_B"))
+ if keys[0] == "diffusion_model":
+ keys.pop(0)
+ target_name = ".".join(keys)
+ lora_name_dict[target_name] = (key, key.replace(".lora_B.", ".lora_A."))
+ return lora_name_dict
+
+
+ def match(self, model: torch.nn.Module, state_dict_lora):
+ lora_name_dict = self.get_name_dict(state_dict_lora)
+ model_name_dict = {name: None for name, _ in model.named_parameters()}
+ matched_num = sum([i in model_name_dict for i in lora_name_dict])
+ if matched_num == len(lora_name_dict):
+ return "", ""
+ else:
+ return None
+
+
+ def fetch_device_and_dtype(self, state_dict):
+ device, dtype = None, None
+ for name, param in state_dict.items():
+ device, dtype = param.device, param.dtype
+ break
+ computation_device = device
+ computation_dtype = dtype
+ if computation_device == torch.device("cpu"):
+ if torch.cuda.is_available():
+ computation_device = torch.device("cuda")
+ if computation_dtype == torch.float8_e4m3fn:
+ computation_dtype = torch.float32
+ return device, dtype, computation_device, computation_dtype
+
+
+ def load(self, model, state_dict_lora, lora_prefix="", alpha=1.0, model_resource=""):
+ state_dict_model = model.state_dict()
+ device, dtype, computation_device, computation_dtype = self.fetch_device_and_dtype(state_dict_model)
+ lora_name_dict = self.get_name_dict(state_dict_lora)
+ for name in lora_name_dict:
+ weight_up = state_dict_lora[lora_name_dict[name][0]].to(device=computation_device, dtype=computation_dtype)
+ weight_down = state_dict_lora[lora_name_dict[name][1]].to(device=computation_device, dtype=computation_dtype)
+ if len(weight_up.shape) == 4:
+ weight_up = weight_up.squeeze(3).squeeze(2)
+ weight_down = weight_down.squeeze(3).squeeze(2)
+ weight_lora = alpha * torch.mm(weight_up, weight_down).unsqueeze(2).unsqueeze(3)
+ else:
+ weight_lora = alpha * torch.mm(weight_up, weight_down)
+ weight_model = state_dict_model[name].to(device=computation_device, dtype=computation_dtype)
+ weight_patched = weight_model + weight_lora
+ state_dict_model[name] = weight_patched.to(device=device, dtype=dtype)
+ print(f" {len(lora_name_dict)} tensors are updated.")
+ model.load_state_dict(state_dict_model)
+
+
+
+class WanLoRAConverter:
+ def __init__(self):
+ pass
+
+ @staticmethod
+ def align_to_opensource_format(state_dict, **kwargs):
+ state_dict = {"diffusion_model." + name.replace(".default.", "."): param for name, param in state_dict.items()}
+ return state_dict
+
+ @staticmethod
+ def align_to_dkt_format(state_dict, **kwargs):
+ state_dict = {name.replace("diffusion_model.", "").replace(".lora_A.weight", ".lora_A.default.weight").replace(".lora_B.weight", ".lora_B.default.weight"): param for name, param in state_dict.items()}
+ return state_dict
+
+def get_lora_loaders():
+ return [GeneralLoRAFromPeft()]
diff --git a/dkt/models/model_manager.py b/dkt/models/model_manager.py
new file mode 100644
index 0000000000000000000000000000000000000000..dacac99b3d22ab4d3ce93ec1ca2e1b6b3fdb034e
--- /dev/null
+++ b/dkt/models/model_manager.py
@@ -0,0 +1,421 @@
+import os, torch, json, importlib
+from typing import List
+
+from .downloader import download_models
+from .lora import get_lora_loaders
+
+from ..configs.model_config import model_loader_configs
+from .utils import load_state_dict, init_weights_on_device, hash_state_dict_keys, split_state_dict_with_prefix
+
+from .downloader import Preset_model_id, Preset_model_website
+
+def load_model_from_single_file(state_dict, model_names, model_classes, model_resource, torch_dtype, device):
+ loaded_model_names, loaded_models = [], []
+ for model_name, model_class in zip(model_names, model_classes):
+ print(f" model_name: {model_name} model_class: {model_class.__name__}")
+ state_dict_converter = model_class.state_dict_converter()
+ if model_resource == "civitai":
+ state_dict_results = state_dict_converter.from_civitai(state_dict)
+ elif model_resource == "diffusers":
+ state_dict_results = state_dict_converter.from_diffusers(state_dict)
+ if isinstance(state_dict_results, tuple):
+ model_state_dict, extra_kwargs = state_dict_results
+ print(f" This model is initialized with extra kwargs: {extra_kwargs}")
+ else:
+ model_state_dict, extra_kwargs = state_dict_results, {}
+ torch_dtype = torch.float32 if extra_kwargs.get("upcast_to_float32", False) else torch_dtype
+ with init_weights_on_device():
+ model = model_class(**extra_kwargs)
+ if hasattr(model, "eval"):
+ model = model.eval()
+ model.load_state_dict(model_state_dict, assign=True)
+ model = model.to(dtype=torch_dtype, device=device)
+ loaded_model_names.append(model_name)
+ loaded_models.append(model)
+ return loaded_model_names, loaded_models
+
+
+def load_model_from_huggingface_folder(file_path, model_names, model_classes, torch_dtype, device):
+ loaded_model_names, loaded_models = [], []
+ for model_name, model_class in zip(model_names, model_classes):
+ if torch_dtype in [torch.float32, torch.float16, torch.bfloat16]:
+ model = model_class.from_pretrained(file_path, torch_dtype=torch_dtype).eval()
+ else:
+ model = model_class.from_pretrained(file_path).eval().to(dtype=torch_dtype)
+ if torch_dtype == torch.float16 and hasattr(model, "half"):
+ model = model.half()
+ try:
+ model = model.to(device=device)
+ except:
+ pass
+ loaded_model_names.append(model_name)
+ loaded_models.append(model)
+ return loaded_model_names, loaded_models
+
+
+def load_single_patch_model_from_single_file(state_dict, model_name, model_class, base_model, extra_kwargs, torch_dtype, device):
+ print(f" model_name: {model_name} model_class: {model_class.__name__} extra_kwargs: {extra_kwargs}")
+ base_state_dict = base_model.state_dict()
+ base_model.to("cpu")
+ del base_model
+ model = model_class(**extra_kwargs)
+ model.load_state_dict(base_state_dict, strict=False)
+ model.load_state_dict(state_dict, strict=False)
+ model.to(dtype=torch_dtype, device=device)
+ return model
+
+
+def load_patch_model_from_single_file(state_dict, model_names, model_classes, extra_kwargs, model_manager, torch_dtype, device):
+ loaded_model_names, loaded_models = [], []
+ for model_name, model_class in zip(model_names, model_classes):
+ while True:
+ for model_id in range(len(model_manager.model)):
+ base_model_name = model_manager.model_name[model_id]
+ if base_model_name == model_name:
+ base_model_path = model_manager.model_path[model_id]
+ base_model = model_manager.model[model_id]
+ print(f" Adding patch model to {base_model_name} ({base_model_path})")
+ patched_model = load_single_patch_model_from_single_file(
+ state_dict, model_name, model_class, base_model, extra_kwargs, torch_dtype, device)
+ loaded_model_names.append(base_model_name)
+ loaded_models.append(patched_model)
+ model_manager.model.pop(model_id)
+ model_manager.model_path.pop(model_id)
+ model_manager.model_name.pop(model_id)
+ break
+ else:
+ break
+ return loaded_model_names, loaded_models
+
+
+
+class ModelDetectorTemplate:
+ def __init__(self):
+ pass
+
+ def match(self, file_path="", state_dict={}):
+ return False
+
+ def load(self, file_path="", state_dict={}, device="cuda", torch_dtype=torch.float16, **kwargs):
+ return [], []
+
+
+
+class ModelDetectorFromSingleFile:
+ def __init__(self, model_loader_configs=[]):
+ self.keys_hash_with_shape_dict = {}
+ self.keys_hash_dict = {}
+ for metadata in model_loader_configs:
+ self.add_model_metadata(*metadata)
+
+
+ def add_model_metadata(self, keys_hash, keys_hash_with_shape, model_names, model_classes, model_resource):
+ self.keys_hash_with_shape_dict[keys_hash_with_shape] = (model_names, model_classes, model_resource)
+ if keys_hash is not None:
+ self.keys_hash_dict[keys_hash] = (model_names, model_classes, model_resource)
+
+
+ def match(self, file_path="", state_dict={}):
+ if isinstance(file_path, str) and os.path.isdir(file_path):
+ return False
+ if len(state_dict) == 0:
+ state_dict = load_state_dict(file_path)
+ keys_hash_with_shape = hash_state_dict_keys(state_dict, with_shape=True)
+ if keys_hash_with_shape in self.keys_hash_with_shape_dict:
+ return True
+ keys_hash = hash_state_dict_keys(state_dict, with_shape=False)
+ if keys_hash in self.keys_hash_dict:
+ return True
+ return False
+
+
+ def load(self, file_path="", state_dict={}, device="cuda", torch_dtype=torch.float16, **kwargs):
+ if len(state_dict) == 0:
+ state_dict = load_state_dict(file_path)
+
+ # Load models with strict matching
+ keys_hash_with_shape = hash_state_dict_keys(state_dict, with_shape=True)
+ if keys_hash_with_shape in self.keys_hash_with_shape_dict:
+ model_names, model_classes, model_resource = self.keys_hash_with_shape_dict[keys_hash_with_shape]
+ loaded_model_names, loaded_models = load_model_from_single_file(state_dict, model_names, model_classes, model_resource, torch_dtype, device)
+ return loaded_model_names, loaded_models
+
+ # Load models without strict matching
+ # (the shape of parameters may be inconsistent, and the state_dict_converter will modify the model architecture)
+ keys_hash = hash_state_dict_keys(state_dict, with_shape=False)
+ if keys_hash in self.keys_hash_dict:
+ model_names, model_classes, model_resource = self.keys_hash_dict[keys_hash]
+ loaded_model_names, loaded_models = load_model_from_single_file(state_dict, model_names, model_classes, model_resource, torch_dtype, device)
+ return loaded_model_names, loaded_models
+
+ return loaded_model_names, loaded_models
+
+
+
+class ModelDetectorFromSplitedSingleFile(ModelDetectorFromSingleFile):
+ def __init__(self, model_loader_configs=[]):
+ super().__init__(model_loader_configs)
+
+
+ def match(self, file_path="", state_dict={}):
+ if isinstance(file_path, str) and os.path.isdir(file_path):
+ return False
+ if len(state_dict) == 0:
+ state_dict = load_state_dict(file_path)
+ splited_state_dict = split_state_dict_with_prefix(state_dict)
+ for sub_state_dict in splited_state_dict:
+ if super().match(file_path, sub_state_dict):
+ return True
+ return False
+
+
+ def load(self, file_path="", state_dict={}, device="cuda", torch_dtype=torch.float16, **kwargs):
+ # Split the state_dict and load from each component
+ splited_state_dict = split_state_dict_with_prefix(state_dict)
+ valid_state_dict = {}
+ for sub_state_dict in splited_state_dict:
+ if super().match(file_path, sub_state_dict):
+ valid_state_dict.update(sub_state_dict)
+ if super().match(file_path, valid_state_dict):
+ loaded_model_names, loaded_models = super().load(file_path, valid_state_dict, device, torch_dtype)
+ else:
+ loaded_model_names, loaded_models = [], []
+ for sub_state_dict in splited_state_dict:
+ if super().match(file_path, sub_state_dict):
+ loaded_model_names_, loaded_models_ = super().load(file_path, valid_state_dict, device, torch_dtype)
+ loaded_model_names += loaded_model_names_
+ loaded_models += loaded_models_
+ return loaded_model_names, loaded_models
+
+
+
+class ModelDetectorFromHuggingfaceFolder:
+ def __init__(self, model_loader_configs=[]):
+ self.architecture_dict = {}
+ for metadata in model_loader_configs:
+ self.add_model_metadata(*metadata)
+
+
+ def add_model_metadata(self, architecture, huggingface_lib, model_name, redirected_architecture):
+ self.architecture_dict[architecture] = (huggingface_lib, model_name, redirected_architecture)
+
+
+ def match(self, file_path="", state_dict={}):
+ if not isinstance(file_path, str) or os.path.isfile(file_path):
+ return False
+ file_list = os.listdir(file_path)
+ if "config.json" not in file_list:
+ return False
+ with open(os.path.join(file_path, "config.json"), "r") as f:
+ config = json.load(f)
+ if "architectures" not in config and "_class_name" not in config:
+ return False
+ return True
+
+
+ def load(self, file_path="", state_dict={}, device="cuda", torch_dtype=torch.float16, **kwargs):
+ with open(os.path.join(file_path, "config.json"), "r") as f:
+ config = json.load(f)
+ loaded_model_names, loaded_models = [], []
+ architectures = config["architectures"] if "architectures" in config else [config["_class_name"]]
+ for architecture in architectures:
+ huggingface_lib, model_name, redirected_architecture = self.architecture_dict[architecture]
+ if redirected_architecture is not None:
+ architecture = redirected_architecture
+ model_class = importlib.import_module(huggingface_lib).__getattribute__(architecture)
+ loaded_model_names_, loaded_models_ = load_model_from_huggingface_folder(file_path, [model_name], [model_class], torch_dtype, device)
+ loaded_model_names += loaded_model_names_
+ loaded_models += loaded_models_
+ return loaded_model_names, loaded_models
+
+
+
+class ModelDetectorFromPatchedSingleFile:
+ def __init__(self, model_loader_configs=[]):
+ self.keys_hash_with_shape_dict = {}
+ for metadata in model_loader_configs:
+ self.add_model_metadata(*metadata)
+
+
+ def add_model_metadata(self, keys_hash_with_shape, model_name, model_class, extra_kwargs):
+ self.keys_hash_with_shape_dict[keys_hash_with_shape] = (model_name, model_class, extra_kwargs)
+
+
+ def match(self, file_path="", state_dict={}):
+ if not isinstance(file_path, str) or os.path.isdir(file_path):
+ return False
+ if len(state_dict) == 0:
+ state_dict = load_state_dict(file_path)
+ keys_hash_with_shape = hash_state_dict_keys(state_dict, with_shape=True)
+ if keys_hash_with_shape in self.keys_hash_with_shape_dict:
+ return True
+ return False
+
+
+ def load(self, file_path="", state_dict={}, device="cuda", torch_dtype=torch.float16, model_manager=None, **kwargs):
+ if len(state_dict) == 0:
+ state_dict = load_state_dict(file_path)
+
+ # Load models with strict matching
+ loaded_model_names, loaded_models = [], []
+ keys_hash_with_shape = hash_state_dict_keys(state_dict, with_shape=True)
+ if keys_hash_with_shape in self.keys_hash_with_shape_dict:
+ model_names, model_classes, extra_kwargs = self.keys_hash_with_shape_dict[keys_hash_with_shape]
+ loaded_model_names_, loaded_models_ = load_patch_model_from_single_file(
+ state_dict, model_names, model_classes, extra_kwargs, model_manager, torch_dtype, device)
+ loaded_model_names += loaded_model_names_
+ loaded_models += loaded_models_
+ return loaded_model_names, loaded_models
+
+
+
+class ModelManager:
+ def __init__(
+ self,
+ torch_dtype=torch.float16,
+ device="cuda",
+ model_id_list: List[Preset_model_id] = [],
+ downloading_priority: List[Preset_model_website] = ["ModelScope", "HuggingFace"],
+ file_path_list: List[str] = [],
+ ):
+ self.torch_dtype = torch_dtype
+ self.device = device
+ self.model = []
+ self.model_path = []
+ self.model_name = []
+ downloaded_files = download_models(model_id_list, downloading_priority) if len(model_id_list) > 0 else []
+ self.model_detector = [
+ ModelDetectorFromSingleFile(model_loader_configs),
+ ModelDetectorFromSplitedSingleFile(model_loader_configs),
+ ]
+ self.load_models(downloaded_files + file_path_list)
+
+
+ def load_model_from_single_file(self, file_path="", state_dict={}, model_names=[], model_classes=[], model_resource=None):
+ print(f"Loading models from file: {file_path}")
+ if len(state_dict) == 0:
+ state_dict = load_state_dict(file_path)
+ model_names, models = load_model_from_single_file(state_dict, model_names, model_classes, model_resource, self.torch_dtype, self.device)
+ for model_name, model in zip(model_names, models):
+ self.model.append(model)
+ self.model_path.append(file_path)
+ self.model_name.append(model_name)
+ print(f" The following models are loaded: {model_names}.")
+
+
+ def load_model_from_huggingface_folder(self, file_path="", model_names=[], model_classes=[]):
+ print(f"Loading models from folder: {file_path}")
+ model_names, models = load_model_from_huggingface_folder(file_path, model_names, model_classes, self.torch_dtype, self.device)
+ for model_name, model in zip(model_names, models):
+ self.model.append(model)
+ self.model_path.append(file_path)
+ self.model_name.append(model_name)
+ print(f" The following models are loaded: {model_names}.")
+
+
+ def load_patch_model_from_single_file(self, file_path="", state_dict={}, model_names=[], model_classes=[], extra_kwargs={}):
+ print(f"Loading patch models from file: {file_path}")
+ model_names, models = load_patch_model_from_single_file(
+ state_dict, model_names, model_classes, extra_kwargs, self, self.torch_dtype, self.device)
+ for model_name, model in zip(model_names, models):
+ self.model.append(model)
+ self.model_path.append(file_path)
+ self.model_name.append(model_name)
+ print(f" The following patched models are loaded: {model_names}.")
+
+
+ def load_lora(self, file_path="", state_dict={}, lora_alpha=1.0):
+ if isinstance(file_path, list):
+ for file_path_ in file_path:
+ self.load_lora(file_path_, state_dict=state_dict, lora_alpha=lora_alpha)
+ else:
+ print(f"Loading LoRA models from file: {file_path}")
+ is_loaded = False
+ if len(state_dict) == 0:
+ state_dict = load_state_dict(file_path)
+ for model_name, model, model_path in zip(self.model_name, self.model, self.model_path):
+ for lora in get_lora_loaders():
+ match_results = lora.match(model, state_dict)
+ if match_results is not None:
+ print(f" Adding LoRA to {model_name} ({model_path}).")
+ lora_prefix, model_resource = match_results
+ lora.load(model, state_dict, lora_prefix, alpha=lora_alpha, model_resource=model_resource)
+ is_loaded = True
+ break
+ if not is_loaded:
+ print(f" Cannot load LoRA: {file_path}")
+
+
+ def load_model(self, file_path, model_names=None, device=None, torch_dtype=None):
+ print(f"Loading models from: {file_path}")
+ if device is None: device = self.device
+ if torch_dtype is None: torch_dtype = self.torch_dtype
+ if isinstance(file_path, list):
+ state_dict = {}
+ for path in file_path:
+ state_dict.update(load_state_dict(path))
+ elif os.path.isfile(file_path):
+ state_dict = load_state_dict(file_path)
+ else:
+ state_dict = None
+ for model_detector in self.model_detector:
+ if model_detector.match(file_path, state_dict):
+ model_names, models = model_detector.load(
+ file_path, state_dict,
+ device=device, torch_dtype=torch_dtype,
+ allowed_model_names=model_names, model_manager=self
+ )
+ for model_name, model in zip(model_names, models):
+ self.model.append(model)
+ self.model_path.append(file_path)
+ self.model_name.append(model_name)
+ print(f" The following models are loaded: {model_names}.")
+ break
+ else:
+ print(f" We cannot detect the model type. No models are loaded.")
+
+
+ def load_models(self, file_path_list, model_names=None, device=None, torch_dtype=None):
+ for file_path in file_path_list:
+ self.load_model(file_path, model_names, device=device, torch_dtype=torch_dtype)
+
+
+ def fetch_model(self, model_name, file_path=None, require_model_path=False, index=None):
+ fetched_models = []
+ fetched_model_paths = []
+ for model, model_path, model_name_ in zip(self.model, self.model_path, self.model_name):
+ if file_path is not None and file_path != model_path:
+ continue
+ if model_name == model_name_:
+ fetched_models.append(model)
+ fetched_model_paths.append(model_path)
+ if len(fetched_models) == 0:
+ print(f"No {model_name} models available.")
+ return None
+ if len(fetched_models) == 1:
+ print(f"Using {model_name} from {fetched_model_paths[0]}.")
+ model = fetched_models[0]
+ path = fetched_model_paths[0]
+ else:
+ if index is None:
+ model = fetched_models[0]
+ path = fetched_model_paths[0]
+ print(f"More than one {model_name} models are loaded in model manager: {fetched_model_paths}. Using {model_name} from {fetched_model_paths[0]}.")
+ elif isinstance(index, int):
+ model = fetched_models[:index]
+ path = fetched_model_paths[:index]
+ print(f"More than one {model_name} models are loaded in model manager: {fetched_model_paths}. Using {model_name} from {fetched_model_paths[:index]}.")
+ else:
+ model = fetched_models
+ path = fetched_model_paths
+ print(f"More than one {model_name} models are loaded in model manager: {fetched_model_paths}. Using {model_name} from {fetched_model_paths}.")
+ if require_model_path:
+ return model, path
+ else:
+ return model
+
+
+ def to(self, device):
+ for model in self.model:
+ model.to(device)
+
diff --git a/dkt/models/tiler.py b/dkt/models/tiler.py
new file mode 100644
index 0000000000000000000000000000000000000000..dff5ebf2674b504f0b66a6ba7aba800e048f5099
--- /dev/null
+++ b/dkt/models/tiler.py
@@ -0,0 +1,234 @@
+import torch
+from einops import rearrange, repeat
+
+
+class TileWorker:
+ def __init__(self):
+ pass
+
+
+ def mask(self, height, width, border_width):
+ # Create a mask with shape (height, width).
+ # The centre area is filled with 1, and the border line is filled with values in range (0, 1].
+ x = torch.arange(height).repeat(width, 1).T
+ y = torch.arange(width).repeat(height, 1)
+ mask = torch.stack([x + 1, height - x, y + 1, width - y]).min(dim=0).values
+ mask = (mask / border_width).clip(0, 1)
+ return mask
+
+
+ def tile(self, model_input, tile_size, tile_stride, tile_device, tile_dtype):
+ # Convert a tensor (b, c, h, w) to (b, c, tile_size, tile_size, tile_num)
+ batch_size, channel, _, _ = model_input.shape
+ model_input = model_input.to(device=tile_device, dtype=tile_dtype)
+ unfold_operator = torch.nn.Unfold(
+ kernel_size=(tile_size, tile_size),
+ stride=(tile_stride, tile_stride)
+ )
+ model_input = unfold_operator(model_input)
+ model_input = model_input.view((batch_size, channel, tile_size, tile_size, -1))
+
+ return model_input
+
+
+ def tiled_inference(self, forward_fn, model_input, tile_batch_size, inference_device, inference_dtype, tile_device, tile_dtype):
+ # Call y=forward_fn(x) for each tile
+ tile_num = model_input.shape[-1]
+ model_output_stack = []
+
+ for tile_id in range(0, tile_num, tile_batch_size):
+
+ # process input
+ tile_id_ = min(tile_id + tile_batch_size, tile_num)
+ x = model_input[:, :, :, :, tile_id: tile_id_]
+ x = x.to(device=inference_device, dtype=inference_dtype)
+ x = rearrange(x, "b c h w n -> (n b) c h w")
+
+ # process output
+ y = forward_fn(x)
+ y = rearrange(y, "(n b) c h w -> b c h w n", n=tile_id_-tile_id)
+ y = y.to(device=tile_device, dtype=tile_dtype)
+ model_output_stack.append(y)
+
+ model_output = torch.concat(model_output_stack, dim=-1)
+ return model_output
+
+
+ def io_scale(self, model_output, tile_size):
+ # Determine the size modification happened in forward_fn
+ # We only consider the same scale on height and width.
+ io_scale = model_output.shape[2] / tile_size
+ return io_scale
+
+
+ def untile(self, model_output, height, width, tile_size, tile_stride, border_width, tile_device, tile_dtype):
+ # The reversed function of tile
+ mask = self.mask(tile_size, tile_size, border_width)
+ mask = mask.to(device=tile_device, dtype=tile_dtype)
+ mask = rearrange(mask, "h w -> 1 1 h w 1")
+ model_output = model_output * mask
+
+ fold_operator = torch.nn.Fold(
+ output_size=(height, width),
+ kernel_size=(tile_size, tile_size),
+ stride=(tile_stride, tile_stride)
+ )
+ mask = repeat(mask[0, 0, :, :, 0], "h w -> 1 (h w) n", n=model_output.shape[-1])
+ model_output = rearrange(model_output, "b c h w n -> b (c h w) n")
+ model_output = fold_operator(model_output) / fold_operator(mask)
+
+ return model_output
+
+
+ def tiled_forward(self, forward_fn, model_input, tile_size, tile_stride, tile_batch_size=1, tile_device="cpu", tile_dtype=torch.float32, border_width=None):
+ # Prepare
+ inference_device, inference_dtype = model_input.device, model_input.dtype
+ height, width = model_input.shape[2], model_input.shape[3]
+ border_width = int(tile_stride*0.5) if border_width is None else border_width
+
+ # tile
+ model_input = self.tile(model_input, tile_size, tile_stride, tile_device, tile_dtype)
+
+ # inference
+ model_output = self.tiled_inference(forward_fn, model_input, tile_batch_size, inference_device, inference_dtype, tile_device, tile_dtype)
+
+ # resize
+ io_scale = self.io_scale(model_output, tile_size)
+ height, width = int(height*io_scale), int(width*io_scale)
+ tile_size, tile_stride = int(tile_size*io_scale), int(tile_stride*io_scale)
+ border_width = int(border_width*io_scale)
+
+ # untile
+ model_output = self.untile(model_output, height, width, tile_size, tile_stride, border_width, tile_device, tile_dtype)
+
+ # Done!
+ model_output = model_output.to(device=inference_device, dtype=inference_dtype)
+ return model_output
+
+
+
+class FastTileWorker:
+ def __init__(self):
+ pass
+
+
+ def build_mask(self, data, is_bound):
+ _, _, H, W = data.shape
+ h = repeat(torch.arange(H), "H -> H W", H=H, W=W)
+ w = repeat(torch.arange(W), "W -> H W", H=H, W=W)
+ border_width = (H + W) // 4
+ pad = torch.ones_like(h) * border_width
+ mask = torch.stack([
+ pad if is_bound[0] else h + 1,
+ pad if is_bound[1] else H - h,
+ pad if is_bound[2] else w + 1,
+ pad if is_bound[3] else W - w
+ ]).min(dim=0).values
+ mask = mask.clip(1, border_width)
+ mask = (mask / border_width).to(dtype=data.dtype, device=data.device)
+ mask = rearrange(mask, "H W -> 1 H W")
+ return mask
+
+
+ def tiled_forward(self, forward_fn, model_input, tile_size, tile_stride, tile_device="cpu", tile_dtype=torch.float32, border_width=None):
+ # Prepare
+ B, C, H, W = model_input.shape
+ border_width = int(tile_stride*0.5) if border_width is None else border_width
+ weight = torch.zeros((1, 1, H, W), dtype=tile_dtype, device=tile_device)
+ values = torch.zeros((B, C, H, W), dtype=tile_dtype, device=tile_device)
+
+ # Split tasks
+ tasks = []
+ for h in range(0, H, tile_stride):
+ for w in range(0, W, tile_stride):
+ if (h-tile_stride >= 0 and h-tile_stride+tile_size >= H) or (w-tile_stride >= 0 and w-tile_stride+tile_size >= W):
+ continue
+ h_, w_ = h + tile_size, w + tile_size
+ if h_ > H: h, h_ = H - tile_size, H
+ if w_ > W: w, w_ = W - tile_size, W
+ tasks.append((h, h_, w, w_))
+
+ # Run
+ for hl, hr, wl, wr in tasks:
+ # Forward
+ hidden_states_batch = forward_fn(hl, hr, wl, wr).to(dtype=tile_dtype, device=tile_device)
+
+ mask = self.build_mask(hidden_states_batch, is_bound=(hl==0, hr>=H, wl==0, wr>=W))
+ values[:, :, hl:hr, wl:wr] += hidden_states_batch * mask
+ weight[:, :, hl:hr, wl:wr] += mask
+ values /= weight
+ return values
+
+
+
+class TileWorker2Dto3D:
+ """
+ Process 3D tensors, but only enable TileWorker on 2D.
+ """
+ def __init__(self):
+ pass
+
+
+ def build_mask(self, T, H, W, dtype, device, is_bound, border_width):
+ t = repeat(torch.arange(T), "T -> T H W", T=T, H=H, W=W)
+ h = repeat(torch.arange(H), "H -> T H W", T=T, H=H, W=W)
+ w = repeat(torch.arange(W), "W -> T H W", T=T, H=H, W=W)
+ border_width = (H + W) // 4 if border_width is None else border_width
+ pad = torch.ones_like(h) * border_width
+ mask = torch.stack([
+ pad if is_bound[0] else t + 1,
+ pad if is_bound[1] else T - t,
+ pad if is_bound[2] else h + 1,
+ pad if is_bound[3] else H - h,
+ pad if is_bound[4] else w + 1,
+ pad if is_bound[5] else W - w
+ ]).min(dim=0).values
+ mask = mask.clip(1, border_width)
+ mask = (mask / border_width).to(dtype=dtype, device=device)
+ mask = rearrange(mask, "T H W -> 1 1 T H W")
+ return mask
+
+
+ def tiled_forward(
+ self,
+ forward_fn,
+ model_input,
+ tile_size, tile_stride,
+ tile_device="cpu", tile_dtype=torch.float32,
+ computation_device="cuda", computation_dtype=torch.float32,
+ border_width=None, scales=[1, 1, 1, 1],
+ progress_bar=lambda x:x
+ ):
+ B, C, T, H, W = model_input.shape
+ scale_C, scale_T, scale_H, scale_W = scales
+ tile_size_H, tile_size_W = tile_size
+ tile_stride_H, tile_stride_W = tile_stride
+
+ value = torch.zeros((B, int(C*scale_C), int(T*scale_T), int(H*scale_H), int(W*scale_W)), dtype=tile_dtype, device=tile_device)
+ weight = torch.zeros((1, 1, int(T*scale_T), int(H*scale_H), int(W*scale_W)), dtype=tile_dtype, device=tile_device)
+
+ # Split tasks
+ tasks = []
+ for h in range(0, H, tile_stride_H):
+ for w in range(0, W, tile_stride_W):
+ if (h-tile_stride_H >= 0 and h-tile_stride_H+tile_size_H >= H) or (w-tile_stride_W >= 0 and w-tile_stride_W+tile_size_W >= W):
+ continue
+ h_, w_ = h + tile_size_H, w + tile_size_W
+ if h_ > H: h, h_ = max(H - tile_size_H, 0), H
+ if w_ > W: w, w_ = max(W - tile_size_W, 0), W
+ tasks.append((h, h_, w, w_))
+
+ # Run
+ for hl, hr, wl, wr in progress_bar(tasks):
+ mask = self.build_mask(
+ int(T*scale_T), int((hr-hl)*scale_H), int((wr-wl)*scale_W),
+ tile_dtype, tile_device,
+ is_bound=(True, True, hl==0, hr>=H, wl==0, wr>=W),
+ border_width=border_width
+ )
+ grid_input = model_input[:, :, :, hl:hr, wl:wr].to(dtype=computation_dtype, device=computation_device)
+ grid_output = forward_fn(grid_input).to(dtype=tile_dtype, device=tile_device)
+ value[:, :, :, int(hl*scale_H):int(hr*scale_H), int(wl*scale_W):int(wr*scale_W)] += grid_output * mask
+ weight[:, :, :, int(hl*scale_H):int(hr*scale_H), int(wl*scale_W):int(wr*scale_W)] += mask
+ value = value / weight
+ return value
\ No newline at end of file
diff --git a/dkt/models/utils.py b/dkt/models/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..86104d043a54350dfaf74eb5626bfcc0eb1d68e2
--- /dev/null
+++ b/dkt/models/utils.py
@@ -0,0 +1,182 @@
+import torch, os
+from safetensors import safe_open
+from contextlib import contextmanager
+import hashlib
+
+@contextmanager
+def init_weights_on_device(device = torch.device("meta"), include_buffers :bool = False):
+
+ old_register_parameter = torch.nn.Module.register_parameter
+ if include_buffers:
+ old_register_buffer = torch.nn.Module.register_buffer
+
+ def register_empty_parameter(module, name, param):
+ old_register_parameter(module, name, param)
+ if param is not None:
+ param_cls = type(module._parameters[name])
+ kwargs = module._parameters[name].__dict__
+ kwargs["requires_grad"] = param.requires_grad
+ module._parameters[name] = param_cls(module._parameters[name].to(device), **kwargs)
+
+ def register_empty_buffer(module, name, buffer, persistent=True):
+ old_register_buffer(module, name, buffer, persistent=persistent)
+ if buffer is not None:
+ module._buffers[name] = module._buffers[name].to(device)
+
+ def patch_tensor_constructor(fn):
+ def wrapper(*args, **kwargs):
+ kwargs["device"] = device
+ return fn(*args, **kwargs)
+
+ return wrapper
+
+ if include_buffers:
+ tensor_constructors_to_patch = {
+ torch_function_name: getattr(torch, torch_function_name)
+ for torch_function_name in ["empty", "zeros", "ones", "full"]
+ }
+ else:
+ tensor_constructors_to_patch = {}
+
+ try:
+ torch.nn.Module.register_parameter = register_empty_parameter
+ if include_buffers:
+ torch.nn.Module.register_buffer = register_empty_buffer
+ for torch_function_name in tensor_constructors_to_patch.keys():
+ setattr(torch, torch_function_name, patch_tensor_constructor(getattr(torch, torch_function_name)))
+ yield
+ finally:
+ torch.nn.Module.register_parameter = old_register_parameter
+ if include_buffers:
+ torch.nn.Module.register_buffer = old_register_buffer
+ for torch_function_name, old_torch_function in tensor_constructors_to_patch.items():
+ setattr(torch, torch_function_name, old_torch_function)
+
+def load_state_dict_from_folder(file_path, torch_dtype=None):
+ state_dict = {}
+ for file_name in os.listdir(file_path):
+ if "." in file_name and file_name.split(".")[-1] in [
+ "safetensors", "bin", "ckpt", "pth", "pt"
+ ]:
+ state_dict.update(load_state_dict(os.path.join(file_path, file_name), torch_dtype=torch_dtype))
+ return state_dict
+
+
+def load_state_dict(file_path, torch_dtype=None, device="cpu"):
+ if file_path.endswith(".safetensors"):
+ return load_state_dict_from_safetensors(file_path, torch_dtype=torch_dtype, device=device)
+ else:
+ return load_state_dict_from_bin(file_path, torch_dtype=torch_dtype, device=device)
+
+
+def load_state_dict_from_safetensors(file_path, torch_dtype=None, device="cpu"):
+ state_dict = {}
+ with safe_open(file_path, framework="pt", device=str(device)) as f:
+ for k in f.keys():
+ state_dict[k] = f.get_tensor(k)
+ if torch_dtype is not None:
+ state_dict[k] = state_dict[k].to(torch_dtype)
+ return state_dict
+
+
+def load_state_dict_from_bin(file_path, torch_dtype=None, device="cpu"):
+ state_dict = torch.load(file_path, map_location=device, weights_only=True)
+ if torch_dtype is not None:
+ for i in state_dict:
+ if isinstance(state_dict[i], torch.Tensor):
+ state_dict[i] = state_dict[i].to(torch_dtype)
+ return state_dict
+
+
+def search_for_embeddings(state_dict):
+ embeddings = []
+ for k in state_dict:
+ if isinstance(state_dict[k], torch.Tensor):
+ embeddings.append(state_dict[k])
+ elif isinstance(state_dict[k], dict):
+ embeddings += search_for_embeddings(state_dict[k])
+ return embeddings
+
+
+def search_parameter(param, state_dict):
+ for name, param_ in state_dict.items():
+ if param.numel() == param_.numel():
+ if param.shape == param_.shape:
+ if torch.dist(param, param_) < 1e-3:
+ return name
+ else:
+ if torch.dist(param.flatten(), param_.flatten()) < 1e-3:
+ return name
+ return None
+
+
+def build_rename_dict(source_state_dict, target_state_dict, split_qkv=False):
+ matched_keys = set()
+ with torch.no_grad():
+ for name in source_state_dict:
+ rename = search_parameter(source_state_dict[name], target_state_dict)
+ if rename is not None:
+ print(f'"{name}": "{rename}",')
+ matched_keys.add(rename)
+ elif split_qkv and len(source_state_dict[name].shape)>=1 and source_state_dict[name].shape[0]%3==0:
+ length = source_state_dict[name].shape[0] // 3
+ rename = []
+ for i in range(3):
+ rename.append(search_parameter(source_state_dict[name][i*length: i*length+length], target_state_dict))
+ if None not in rename:
+ print(f'"{name}": {rename},')
+ for rename_ in rename:
+ matched_keys.add(rename_)
+ for name in target_state_dict:
+ if name not in matched_keys:
+ print("Cannot find", name, target_state_dict[name].shape)
+
+
+def search_for_files(folder, extensions):
+ files = []
+ if os.path.isdir(folder):
+ for file in sorted(os.listdir(folder)):
+ files += search_for_files(os.path.join(folder, file), extensions)
+ elif os.path.isfile(folder):
+ for extension in extensions:
+ if folder.endswith(extension):
+ files.append(folder)
+ break
+ return files
+
+
+def convert_state_dict_keys_to_single_str(state_dict, with_shape=True):
+ keys = []
+ for key, value in state_dict.items():
+ if isinstance(key, str):
+ if isinstance(value, torch.Tensor):
+ if with_shape:
+ shape = "_".join(map(str, list(value.shape)))
+ keys.append(key + ":" + shape)
+ keys.append(key)
+ elif isinstance(value, dict):
+ keys.append(key + "|" + convert_state_dict_keys_to_single_str(value, with_shape=with_shape))
+ keys.sort()
+ keys_str = ",".join(keys)
+ return keys_str
+
+
+def split_state_dict_with_prefix(state_dict):
+ keys = sorted([key for key in state_dict if isinstance(key, str)])
+ prefix_dict = {}
+ for key in keys:
+ prefix = key if "." not in key else key.split(".")[0]
+ if prefix not in prefix_dict:
+ prefix_dict[prefix] = []
+ prefix_dict[prefix].append(key)
+ state_dicts = []
+ for prefix, keys in prefix_dict.items():
+ sub_state_dict = {key: state_dict[key] for key in keys}
+ state_dicts.append(sub_state_dict)
+ return state_dicts
+
+
+def hash_state_dict_keys(state_dict, with_shape=True):
+ keys_str = convert_state_dict_keys_to_single_str(state_dict, with_shape=with_shape)
+ keys_str = keys_str.encode(encoding="UTF-8")
+ return hashlib.md5(keys_str).hexdigest()
\ No newline at end of file
diff --git a/dkt/models/wan_video_camera_controller.py b/dkt/models/wan_video_camera_controller.py
new file mode 100644
index 0000000000000000000000000000000000000000..026b5581c4284abace2fcab8db2d33d8e518dd35
--- /dev/null
+++ b/dkt/models/wan_video_camera_controller.py
@@ -0,0 +1,202 @@
+import torch
+import torch.nn as nn
+import numpy as np
+from einops import rearrange
+import os
+from typing_extensions import Literal
+
+class SimpleAdapter(nn.Module):
+ def __init__(self, in_dim, out_dim, kernel_size, stride, num_residual_blocks=1):
+ super(SimpleAdapter, self).__init__()
+
+ # Pixel Unshuffle: reduce spatial dimensions by a factor of 8
+ self.pixel_unshuffle = nn.PixelUnshuffle(downscale_factor=8)
+
+ # Convolution: reduce spatial dimensions by a factor
+ # of 2 (without overlap)
+ self.conv = nn.Conv2d(in_dim * 64, out_dim, kernel_size=kernel_size, stride=stride, padding=0)
+
+ # Residual blocks for feature extraction
+ self.residual_blocks = nn.Sequential(
+ *[ResidualBlock(out_dim) for _ in range(num_residual_blocks)]
+ )
+
+ def forward(self, x):
+ # Reshape to merge the frame dimension into batch
+ bs, c, f, h, w = x.size()
+ x = x.permute(0, 2, 1, 3, 4).contiguous().view(bs * f, c, h, w)
+
+ # Pixel Unshuffle operation
+ x_unshuffled = self.pixel_unshuffle(x)
+
+ # Convolution operation
+ x_conv = self.conv(x_unshuffled)
+
+ # Feature extraction with residual blocks
+ out = self.residual_blocks(x_conv)
+
+ # Reshape to restore original bf dimension
+ out = out.view(bs, f, out.size(1), out.size(2), out.size(3))
+
+ # Permute dimensions to reorder (if needed), e.g., swap channels and feature frames
+ out = out.permute(0, 2, 1, 3, 4)
+
+ return out
+
+ def process_camera_coordinates(
+ self,
+ direction: Literal["Left", "Right", "Up", "Down", "LeftUp", "LeftDown", "RightUp", "RightDown"],
+ length: int,
+ height: int,
+ width: int,
+ speed: float = 1/54,
+ origin=(0, 0.532139961, 0.946026558, 0.5, 0.5, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0)
+ ):
+ if origin is None:
+ origin = (0, 0.532139961, 0.946026558, 0.5, 0.5, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0)
+ coordinates = generate_camera_coordinates(direction, length, speed, origin)
+ plucker_embedding = process_pose_file(coordinates, width, height)
+ return plucker_embedding
+
+
+
+class ResidualBlock(nn.Module):
+ def __init__(self, dim):
+ super(ResidualBlock, self).__init__()
+ self.conv1 = nn.Conv2d(dim, dim, kernel_size=3, padding=1)
+ self.relu = nn.ReLU(inplace=True)
+ self.conv2 = nn.Conv2d(dim, dim, kernel_size=3, padding=1)
+
+ def forward(self, x):
+ residual = x
+ out = self.relu(self.conv1(x))
+ out = self.conv2(out)
+ out += residual
+ return out
+
+class Camera(object):
+ """Copied from https://github.com/hehao13/CameraCtrl/blob/main/inference.py
+ """
+ def __init__(self, entry):
+ fx, fy, cx, cy = entry[1:5]
+ self.fx = fx
+ self.fy = fy
+ self.cx = cx
+ self.cy = cy
+ w2c_mat = np.array(entry[7:]).reshape(3, 4)
+ w2c_mat_4x4 = np.eye(4)
+ w2c_mat_4x4[:3, :] = w2c_mat
+ self.w2c_mat = w2c_mat_4x4
+ self.c2w_mat = np.linalg.inv(w2c_mat_4x4)
+
+def get_relative_pose(cam_params):
+ """Copied from https://github.com/hehao13/CameraCtrl/blob/main/inference.py
+ """
+ abs_w2cs = [cam_param.w2c_mat for cam_param in cam_params]
+ abs_c2ws = [cam_param.c2w_mat for cam_param in cam_params]
+ cam_to_origin = 0
+ target_cam_c2w = np.array([
+ [1, 0, 0, 0],
+ [0, 1, 0, -cam_to_origin],
+ [0, 0, 1, 0],
+ [0, 0, 0, 1]
+ ])
+ abs2rel = target_cam_c2w @ abs_w2cs[0]
+ ret_poses = [target_cam_c2w, ] + [abs2rel @ abs_c2w for abs_c2w in abs_c2ws[1:]]
+ ret_poses = np.array(ret_poses, dtype=np.float32)
+ return ret_poses
+
+def custom_meshgrid(*args):
+ # torch>=2.0.0 only
+ return torch.meshgrid(*args, indexing='ij')
+
+
+def ray_condition(K, c2w, H, W, device):
+ """Copied from https://github.com/hehao13/CameraCtrl/blob/main/inference.py
+ """
+ # c2w: B, V, 4, 4
+ # K: B, V, 4
+
+ B = K.shape[0]
+
+ j, i = custom_meshgrid(
+ torch.linspace(0, H - 1, H, device=device, dtype=c2w.dtype),
+ torch.linspace(0, W - 1, W, device=device, dtype=c2w.dtype),
+ )
+ i = i.reshape([1, 1, H * W]).expand([B, 1, H * W]) + 0.5 # [B, HxW]
+ j = j.reshape([1, 1, H * W]).expand([B, 1, H * W]) + 0.5 # [B, HxW]
+
+ fx, fy, cx, cy = K.chunk(4, dim=-1) # B,V, 1
+
+ zs = torch.ones_like(i) # [B, HxW]
+ xs = (i - cx) / fx * zs
+ ys = (j - cy) / fy * zs
+ zs = zs.expand_as(ys)
+
+ directions = torch.stack((xs, ys, zs), dim=-1) # B, V, HW, 3
+ directions = directions / directions.norm(dim=-1, keepdim=True) # B, V, HW, 3
+
+ rays_d = directions @ c2w[..., :3, :3].transpose(-1, -2) # B, V, 3, HW
+ rays_o = c2w[..., :3, 3] # B, V, 3
+ rays_o = rays_o[:, :, None].expand_as(rays_d) # B, V, 3, HW
+ # c2w @ dirctions
+ rays_dxo = torch.linalg.cross(rays_o, rays_d)
+ plucker = torch.cat([rays_dxo, rays_d], dim=-1)
+ plucker = plucker.reshape(B, c2w.shape[1], H, W, 6) # B, V, H, W, 6
+ # plucker = plucker.permute(0, 1, 4, 2, 3)
+ return plucker
+
+
+def process_pose_file(cam_params, width=672, height=384, original_pose_width=1280, original_pose_height=720, device='cpu', return_poses=False):
+ if return_poses:
+ return cam_params
+ else:
+ cam_params = [Camera(cam_param) for cam_param in cam_params]
+
+ sample_wh_ratio = width / height
+ pose_wh_ratio = original_pose_width / original_pose_height # Assuming placeholder ratios, change as needed
+
+ if pose_wh_ratio > sample_wh_ratio:
+ resized_ori_w = height * pose_wh_ratio
+ for cam_param in cam_params:
+ cam_param.fx = resized_ori_w * cam_param.fx / width
+ else:
+ resized_ori_h = width / pose_wh_ratio
+ for cam_param in cam_params:
+ cam_param.fy = resized_ori_h * cam_param.fy / height
+
+ intrinsic = np.asarray([[cam_param.fx * width,
+ cam_param.fy * height,
+ cam_param.cx * width,
+ cam_param.cy * height]
+ for cam_param in cam_params], dtype=np.float32)
+
+ K = torch.as_tensor(intrinsic)[None] # [1, 1, 4]
+ c2ws = get_relative_pose(cam_params) # Assuming this function is defined elsewhere
+ c2ws = torch.as_tensor(c2ws)[None] # [1, n_frame, 4, 4]
+ plucker_embedding = ray_condition(K, c2ws, height, width, device=device)[0].permute(0, 3, 1, 2).contiguous() # V, 6, H, W
+ plucker_embedding = plucker_embedding[None]
+ plucker_embedding = rearrange(plucker_embedding, "b f c h w -> b f h w c")[0]
+ return plucker_embedding
+
+
+
+def generate_camera_coordinates(
+ direction: Literal["Left", "Right", "Up", "Down", "LeftUp", "LeftDown", "RightUp", "RightDown"],
+ length: int,
+ speed: float = 1/54,
+ origin=(0, 0.532139961, 0.946026558, 0.5, 0.5, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0)
+):
+ coordinates = [list(origin)]
+ while len(coordinates) < length:
+ coor = coordinates[-1].copy()
+ if "Left" in direction:
+ coor[9] += speed
+ if "Right" in direction:
+ coor[9] -= speed
+ if "Up" in direction:
+ coor[13] += speed
+ if "Down" in direction:
+ coor[13] -= speed
+ coordinates.append(coor)
+ return coordinates
diff --git a/dkt/models/wan_video_dit.py b/dkt/models/wan_video_dit.py
new file mode 100644
index 0000000000000000000000000000000000000000..f2b86b016f14557c28159db60ad3ba63d660b11f
--- /dev/null
+++ b/dkt/models/wan_video_dit.py
@@ -0,0 +1,719 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import math
+from typing import Tuple, Optional
+from einops import rearrange
+from .utils import hash_state_dict_keys
+from .wan_video_camera_controller import SimpleAdapter
+try:
+ import flash_attn_interface
+ FLASH_ATTN_3_AVAILABLE = True
+except ModuleNotFoundError:
+ FLASH_ATTN_3_AVAILABLE = False
+
+try:
+ import flash_attn
+ FLASH_ATTN_2_AVAILABLE = True
+except ModuleNotFoundError:
+ FLASH_ATTN_2_AVAILABLE = False
+
+try:
+ from sageattention import sageattn
+ SAGE_ATTN_AVAILABLE = True
+except ModuleNotFoundError:
+ SAGE_ATTN_AVAILABLE = False
+
+
+def flash_attention(q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, num_heads: int, compatibility_mode=False):
+ if compatibility_mode:
+ q = rearrange(q, "b s (n d) -> b n s d", n=num_heads)
+ k = rearrange(k, "b s (n d) -> b n s d", n=num_heads)
+ v = rearrange(v, "b s (n d) -> b n s d", n=num_heads)
+ x = F.scaled_dot_product_attention(q, k, v)
+ x = rearrange(x, "b n s d -> b s (n d)", n=num_heads)
+ elif FLASH_ATTN_3_AVAILABLE:
+ q = rearrange(q, "b s (n d) -> b s n d", n=num_heads)
+ k = rearrange(k, "b s (n d) -> b s n d", n=num_heads)
+ v = rearrange(v, "b s (n d) -> b s n d", n=num_heads)
+ x = flash_attn_interface.flash_attn_func(q, k, v)
+ if isinstance(x,tuple):
+ x = x[0]
+ x = rearrange(x, "b s n d -> b s (n d)", n=num_heads)
+ elif FLASH_ATTN_2_AVAILABLE:
+ q = rearrange(q, "b s (n d) -> b s n d", n=num_heads)
+ k = rearrange(k, "b s (n d) -> b s n d", n=num_heads)
+ v = rearrange(v, "b s (n d) -> b s n d", n=num_heads)
+ x = flash_attn.flash_attn_func(q, k, v)
+ x = rearrange(x, "b s n d -> b s (n d)", n=num_heads)
+ elif SAGE_ATTN_AVAILABLE:
+ q = rearrange(q, "b s (n d) -> b n s d", n=num_heads)
+ k = rearrange(k, "b s (n d) -> b n s d", n=num_heads)
+ v = rearrange(v, "b s (n d) -> b n s d", n=num_heads)
+ x = sageattn(q, k, v)
+ x = rearrange(x, "b n s d -> b s (n d)", n=num_heads)
+ else:
+ q = rearrange(q, "b s (n d) -> b n s d", n=num_heads)
+ k = rearrange(k, "b s (n d) -> b n s d", n=num_heads)
+ v = rearrange(v, "b s (n d) -> b n s d", n=num_heads)
+ x = F.scaled_dot_product_attention(q, k, v)
+ x = rearrange(x, "b n s d -> b s (n d)", n=num_heads)
+ return x
+
+
+def modulate(x: torch.Tensor, shift: torch.Tensor, scale: torch.Tensor):
+ return (x * (1 + scale) + shift)
+
+
+def sinusoidal_embedding_1d(dim, position):
+ sinusoid = torch.outer(position.type(torch.float64), torch.pow(
+ 10000, -torch.arange(dim//2, dtype=torch.float64, device=position.device).div(dim//2)))
+ x = torch.cat([torch.cos(sinusoid), torch.sin(sinusoid)], dim=1)
+ return x.to(position.dtype)
+
+
+def precompute_freqs_cis_3d(dim: int, end: int = 1024, theta: float = 10000.0):
+ # 3d rope precompute
+ f_freqs_cis = precompute_freqs_cis(dim - 2 * (dim // 3), end, theta)
+ h_freqs_cis = precompute_freqs_cis(dim // 3, end, theta)
+ w_freqs_cis = precompute_freqs_cis(dim // 3, end, theta)
+ return f_freqs_cis, h_freqs_cis, w_freqs_cis
+
+
+def precompute_freqs_cis(dim: int, end: int = 1024, theta: float = 10000.0):
+ # 1d rope precompute
+ freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)
+ [: (dim // 2)].double() / dim))
+ freqs = torch.outer(torch.arange(end, device=freqs.device), freqs)
+ freqs_cis = torch.polar(torch.ones_like(freqs), freqs) # complex64
+ return freqs_cis
+
+
+def rope_apply(x, freqs, num_heads):
+ x = rearrange(x, "b s (n d) -> b s n d", n=num_heads)
+ x_out = torch.view_as_complex(x.to(torch.float64).reshape(
+ x.shape[0], x.shape[1], x.shape[2], -1, 2))
+ x_out = torch.view_as_real(x_out * freqs).flatten(2)
+ return x_out.to(x.dtype)
+
+
+class RMSNorm(nn.Module):
+ def __init__(self, dim, eps=1e-5):
+ super().__init__()
+ self.eps = eps
+ self.weight = nn.Parameter(torch.ones(dim))
+
+ def norm(self, x):
+ return x * torch.rsqrt(x.pow(2).mean(dim=-1, keepdim=True) + self.eps)
+
+ def forward(self, x):
+ dtype = x.dtype
+ return self.norm(x.float()).to(dtype) * self.weight
+
+
+class AttentionModule(nn.Module):
+ def __init__(self, num_heads):
+ super().__init__()
+ self.num_heads = num_heads
+
+ def forward(self, q, k, v):
+ x = flash_attention(q=q, k=k, v=v, num_heads=self.num_heads)
+ return x
+
+
+class SelfAttention(nn.Module):
+ def __init__(self, dim: int, num_heads: int, eps: float = 1e-6):
+ super().__init__()
+ self.dim = dim
+ self.num_heads = num_heads
+ self.head_dim = dim // num_heads
+
+ self.q = nn.Linear(dim, dim)
+ self.k = nn.Linear(dim, dim)
+ self.v = nn.Linear(dim, dim)
+ self.o = nn.Linear(dim, dim)
+ self.norm_q = RMSNorm(dim, eps=eps)
+ self.norm_k = RMSNorm(dim, eps=eps)
+
+ self.attn = AttentionModule(self.num_heads)
+
+ def forward(self, x, freqs):
+ q = self.norm_q(self.q(x))
+ k = self.norm_k(self.k(x))
+ v = self.v(x)
+ q = rope_apply(q, freqs, self.num_heads)
+ k = rope_apply(k, freqs, self.num_heads)
+ x = self.attn(q, k, v)
+ return self.o(x)
+
+
+class CrossAttention(nn.Module):
+ def __init__(self, dim: int, num_heads: int, eps: float = 1e-6, has_image_input: bool = False):
+ super().__init__()
+ self.dim = dim
+ self.num_heads = num_heads
+ self.head_dim = dim // num_heads
+
+ self.q = nn.Linear(dim, dim)
+ self.k = nn.Linear(dim, dim)
+ self.v = nn.Linear(dim, dim)
+ self.o = nn.Linear(dim, dim)
+ self.norm_q = RMSNorm(dim, eps=eps)
+ self.norm_k = RMSNorm(dim, eps=eps)
+ self.has_image_input = has_image_input
+ if has_image_input:
+ self.k_img = nn.Linear(dim, dim)
+ self.v_img = nn.Linear(dim, dim)
+ self.norm_k_img = RMSNorm(dim, eps=eps)
+
+ self.attn = AttentionModule(self.num_heads)
+
+ def forward(self, x: torch.Tensor, y: torch.Tensor):
+ if self.has_image_input:
+ img = y[:, :257]
+ ctx = y[:, 257:]
+ else:
+ ctx = y
+ q = self.norm_q(self.q(x))
+ k = self.norm_k(self.k(ctx))
+ v = self.v(ctx)
+ x = self.attn(q, k, v)
+ if self.has_image_input:
+ k_img = self.norm_k_img(self.k_img(img))
+ v_img = self.v_img(img)
+ y = flash_attention(q, k_img, v_img, num_heads=self.num_heads)
+ x = x + y
+ return self.o(x)
+
+
+class GateModule(nn.Module):
+ def __init__(self,):
+ super().__init__()
+
+ def forward(self, x, gate, residual):
+ return x + gate * residual
+
+class DiTBlock(nn.Module):
+ def __init__(self, has_image_input: bool, dim: int, num_heads: int, ffn_dim: int, eps: float = 1e-6):
+ super().__init__()
+ self.dim = dim
+ self.num_heads = num_heads
+ self.ffn_dim = ffn_dim
+
+ self.self_attn = SelfAttention(dim, num_heads, eps)
+ self.cross_attn = CrossAttention(
+ dim, num_heads, eps, has_image_input=has_image_input)
+ self.norm1 = nn.LayerNorm(dim, eps=eps, elementwise_affine=False)
+ self.norm2 = nn.LayerNorm(dim, eps=eps, elementwise_affine=False)
+ self.norm3 = nn.LayerNorm(dim, eps=eps)
+ self.ffn = nn.Sequential(nn.Linear(dim, ffn_dim), nn.GELU(
+ approximate='tanh'), nn.Linear(ffn_dim, dim))
+ self.modulation = nn.Parameter(torch.randn(1, 6, dim) / dim**0.5)
+ self.gate = GateModule()
+
+ def forward(self, x, context, t_mod, freqs):
+ has_seq = len(t_mod.shape) == 4
+ chunk_dim = 2 if has_seq else 1
+ # msa: multi-head self-attention mlp: multi-layer perceptron
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (
+ self.modulation.to(dtype=t_mod.dtype, device=t_mod.device) + t_mod).chunk(6, dim=chunk_dim)
+ if has_seq:
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (
+ shift_msa.squeeze(2), scale_msa.squeeze(2), gate_msa.squeeze(2),
+ shift_mlp.squeeze(2), scale_mlp.squeeze(2), gate_mlp.squeeze(2),
+ )
+ input_x = modulate(self.norm1(x), shift_msa, scale_msa)
+ x = self.gate(x, gate_msa, self.self_attn(input_x, freqs))
+ x = x + self.cross_attn(self.norm3(x), context)
+ input_x = modulate(self.norm2(x), shift_mlp, scale_mlp)
+ x = self.gate(x, gate_mlp, self.ffn(input_x))
+ return x
+
+
+class MLP(torch.nn.Module):
+ def __init__(self, in_dim, out_dim, has_pos_emb=False):
+ super().__init__()
+ self.proj = torch.nn.Sequential(
+ nn.LayerNorm(in_dim),
+ nn.Linear(in_dim, in_dim),
+ nn.GELU(),
+ nn.Linear(in_dim, out_dim),
+ nn.LayerNorm(out_dim)
+ )
+ self.has_pos_emb = has_pos_emb
+ if has_pos_emb:
+ self.emb_pos = torch.nn.Parameter(torch.zeros((1, 514, 1280)))
+
+ def forward(self, x):
+ if self.has_pos_emb:
+ x = x + self.emb_pos.to(dtype=x.dtype, device=x.device)
+ return self.proj(x)
+
+
+class Head(nn.Module):
+ def __init__(self, dim: int, out_dim: int, patch_size: Tuple[int, int, int], eps: float):
+ super().__init__()
+ self.dim = dim
+ self.patch_size = patch_size
+ self.norm = nn.LayerNorm(dim, eps=eps, elementwise_affine=False)
+ self.head = nn.Linear(dim, out_dim * math.prod(patch_size))
+ self.modulation = nn.Parameter(torch.randn(1, 2, dim) / dim**0.5)
+
+ def forward(self, x, t_mod):
+ if len(t_mod.shape) == 3:
+ shift, scale = (self.modulation.unsqueeze(0).to(dtype=t_mod.dtype, device=t_mod.device) + t_mod.unsqueeze(2)).chunk(2, dim=2)
+ x = (self.head(self.norm(x) * (1 + scale.squeeze(2)) + shift.squeeze(2)))
+ else:
+ shift, scale = (self.modulation.to(dtype=t_mod.dtype, device=t_mod.device) + t_mod).chunk(2, dim=1)
+ x = (self.head(self.norm(x) * (1 + scale) + shift))
+ return x
+
+
+class WanModel(torch.nn.Module):
+ def __init__(
+ self,
+ dim: int,
+ in_dim: int,
+ ffn_dim: int,
+ out_dim: int,
+ text_dim: int,
+ freq_dim: int,
+ eps: float,
+ patch_size: Tuple[int, int, int],
+ num_heads: int,
+ num_layers: int,
+ has_image_input: bool,
+ has_image_pos_emb: bool = False,
+ has_ref_conv: bool = False,
+ add_control_adapter: bool = False,
+ in_dim_control_adapter: int = 24,
+ seperated_timestep: bool = False,
+ require_vae_embedding: bool = True,
+ require_clip_embedding: bool = True,
+ fuse_vae_embedding_in_latents: bool = False,
+ ):
+ super().__init__()
+ self.dim = dim
+ self.freq_dim = freq_dim
+ self.has_image_input = has_image_input
+ self.patch_size = patch_size
+ self.seperated_timestep = seperated_timestep
+ self.require_vae_embedding = require_vae_embedding
+ self.require_clip_embedding = require_clip_embedding
+ self.fuse_vae_embedding_in_latents = fuse_vae_embedding_in_latents
+
+ self.patch_embedding = nn.Conv3d(
+ in_dim, dim, kernel_size=patch_size, stride=patch_size)
+ self.text_embedding = nn.Sequential(
+ nn.Linear(text_dim, dim),
+ nn.GELU(approximate='tanh'),
+ nn.Linear(dim, dim)
+ )
+ self.time_embedding = nn.Sequential(
+ nn.Linear(freq_dim, dim),
+ nn.SiLU(),
+ nn.Linear(dim, dim)
+ )
+ self.time_projection = nn.Sequential(
+ nn.SiLU(), nn.Linear(dim, dim * 6))
+ self.blocks = nn.ModuleList([
+ DiTBlock(has_image_input, dim, num_heads, ffn_dim, eps)
+ for _ in range(num_layers)
+ ])
+ self.head = Head(dim, out_dim, patch_size, eps)
+ head_dim = dim // num_heads
+ self.freqs = precompute_freqs_cis_3d(head_dim)
+
+ if has_image_input:
+ self.img_emb = MLP(1280, dim, has_pos_emb=has_image_pos_emb) # clip_feature_dim = 1280
+ if has_ref_conv:
+ self.ref_conv = nn.Conv2d(16, dim, kernel_size=(2, 2), stride=(2, 2))
+ self.has_image_pos_emb = has_image_pos_emb
+ self.has_ref_conv = has_ref_conv
+ if add_control_adapter:
+ self.control_adapter = SimpleAdapter(in_dim_control_adapter, dim, kernel_size=patch_size[1:], stride=patch_size[1:])
+ else:
+ self.control_adapter = None
+
+ def patchify(self, x: torch.Tensor,control_camera_latents_input: torch.Tensor = None):
+
+ x = self.patch_embedding(x) #* from ([1, 48, 21, 30, 40]) to [1, 1536, 21, 15, 20]),
+ if self.control_adapter is not None and control_camera_latents_input is not None:
+ y_camera = self.control_adapter(control_camera_latents_input)
+ x = [u + v for u, v in zip(x, y_camera)]
+ x = x[0].unsqueeze(0)
+ grid_size = x.shape[2:]#* get the (F,H,W)
+ x = rearrange(x, 'b c f h w -> b (f h w) c').contiguous()
+ return x, grid_size # x, grid_size: (f, h, w)
+
+ def unpatchify(self, x: torch.Tensor, grid_size: torch.Tensor):
+ return rearrange(
+ x, 'b (f h w) (x y z c) -> b c (f x) (h y) (w z)',
+ f=grid_size[0], h=grid_size[1], w=grid_size[2],
+ x=self.patch_size[0], y=self.patch_size[1], z=self.patch_size[2]
+ )
+
+ def forward(self,
+ x: torch.Tensor,
+ timestep: torch.Tensor,
+ context: torch.Tensor,
+ clip_feature: Optional[torch.Tensor] = None,
+ y: Optional[torch.Tensor] = None,
+ use_gradient_checkpointing: bool = False,
+ use_gradient_checkpointing_offload: bool = False,
+ **kwargs,
+ ):
+ t = self.time_embedding(
+ sinusoidal_embedding_1d(self.freq_dim, timestep))
+ t_mod = self.time_projection(t).unflatten(1, (6, self.dim))
+ context = self.text_embedding(context)
+
+ if self.has_image_input:
+ x = torch.cat([x, y], dim=1) # (b, c_x + c_y, f, h, w)
+ clip_embdding = self.img_emb(clip_feature)
+ context = torch.cat([clip_embdding, context], dim=1)
+
+ x, (f, h, w) = self.patchify(x)
+
+ freqs = torch.cat([
+ self.freqs[0][:f].view(f, 1, 1, -1).expand(f, h, w, -1),
+ self.freqs[1][:h].view(1, h, 1, -1).expand(f, h, w, -1),
+ self.freqs[2][:w].view(1, 1, w, -1).expand(f, h, w, -1)
+ ], dim=-1).reshape(f * h * w, 1, -1).to(x.device)
+
+ def create_custom_forward(module):
+ def custom_forward(*inputs):
+ return module(*inputs)
+ return custom_forward
+
+ for block in self.blocks:
+ if self.training and use_gradient_checkpointing:
+ if use_gradient_checkpointing_offload:
+ with torch.autograd.graph.save_on_cpu():
+ x = torch.utils.checkpoint.checkpoint(
+ create_custom_forward(block),
+ x, context, t_mod, freqs,
+ use_reentrant=False,
+ )
+ else:
+ x = torch.utils.checkpoint.checkpoint(
+ create_custom_forward(block),
+ x, context, t_mod, freqs,
+ use_reentrant=False,
+ )
+ else:
+ x = block(x, context, t_mod, freqs)
+
+ x = self.head(x, t)
+ x = self.unpatchify(x, (f, h, w))
+ return x
+
+ @staticmethod
+ def state_dict_converter():
+ return WanModelStateDictConverter()
+
+
+class WanModelStateDictConverter:
+ def __init__(self):
+ pass
+
+ def from_diffusers(self, state_dict):
+ rename_dict = {
+ "blocks.0.attn1.norm_k.weight": "blocks.0.self_attn.norm_k.weight",
+ "blocks.0.attn1.norm_q.weight": "blocks.0.self_attn.norm_q.weight",
+ "blocks.0.attn1.to_k.bias": "blocks.0.self_attn.k.bias",
+ "blocks.0.attn1.to_k.weight": "blocks.0.self_attn.k.weight",
+ "blocks.0.attn1.to_out.0.bias": "blocks.0.self_attn.o.bias",
+ "blocks.0.attn1.to_out.0.weight": "blocks.0.self_attn.o.weight",
+ "blocks.0.attn1.to_q.bias": "blocks.0.self_attn.q.bias",
+ "blocks.0.attn1.to_q.weight": "blocks.0.self_attn.q.weight",
+ "blocks.0.attn1.to_v.bias": "blocks.0.self_attn.v.bias",
+ "blocks.0.attn1.to_v.weight": "blocks.0.self_attn.v.weight",
+ "blocks.0.attn2.norm_k.weight": "blocks.0.cross_attn.norm_k.weight",
+ "blocks.0.attn2.norm_q.weight": "blocks.0.cross_attn.norm_q.weight",
+ "blocks.0.attn2.to_k.bias": "blocks.0.cross_attn.k.bias",
+ "blocks.0.attn2.to_k.weight": "blocks.0.cross_attn.k.weight",
+ "blocks.0.attn2.to_out.0.bias": "blocks.0.cross_attn.o.bias",
+ "blocks.0.attn2.to_out.0.weight": "blocks.0.cross_attn.o.weight",
+ "blocks.0.attn2.to_q.bias": "blocks.0.cross_attn.q.bias",
+ "blocks.0.attn2.to_q.weight": "blocks.0.cross_attn.q.weight",
+ "blocks.0.attn2.to_v.bias": "blocks.0.cross_attn.v.bias",
+ "blocks.0.attn2.to_v.weight": "blocks.0.cross_attn.v.weight",
+ "blocks.0.ffn.net.0.proj.bias": "blocks.0.ffn.0.bias",
+ "blocks.0.ffn.net.0.proj.weight": "blocks.0.ffn.0.weight",
+ "blocks.0.ffn.net.2.bias": "blocks.0.ffn.2.bias",
+ "blocks.0.ffn.net.2.weight": "blocks.0.ffn.2.weight",
+ "blocks.0.norm2.bias": "blocks.0.norm3.bias",
+ "blocks.0.norm2.weight": "blocks.0.norm3.weight",
+ "blocks.0.scale_shift_table": "blocks.0.modulation",
+ "condition_embedder.text_embedder.linear_1.bias": "text_embedding.0.bias",
+ "condition_embedder.text_embedder.linear_1.weight": "text_embedding.0.weight",
+ "condition_embedder.text_embedder.linear_2.bias": "text_embedding.2.bias",
+ "condition_embedder.text_embedder.linear_2.weight": "text_embedding.2.weight",
+ "condition_embedder.time_embedder.linear_1.bias": "time_embedding.0.bias",
+ "condition_embedder.time_embedder.linear_1.weight": "time_embedding.0.weight",
+ "condition_embedder.time_embedder.linear_2.bias": "time_embedding.2.bias",
+ "condition_embedder.time_embedder.linear_2.weight": "time_embedding.2.weight",
+ "condition_embedder.time_proj.bias": "time_projection.1.bias",
+ "condition_embedder.time_proj.weight": "time_projection.1.weight",
+ "patch_embedding.bias": "patch_embedding.bias",
+ "patch_embedding.weight": "patch_embedding.weight",
+ "scale_shift_table": "head.modulation",
+ "proj_out.bias": "head.head.bias",
+ "proj_out.weight": "head.head.weight",
+ }
+ state_dict_ = {}
+ for name, param in state_dict.items():
+ if name in rename_dict:
+ state_dict_[rename_dict[name]] = param
+ else:
+ name_ = ".".join(name.split(".")[:1] + ["0"] + name.split(".")[2:])
+ if name_ in rename_dict:
+ name_ = rename_dict[name_]
+ name_ = ".".join(name_.split(".")[:1] + [name.split(".")[1]] + name_.split(".")[2:])
+ state_dict_[name_] = param
+ if hash_state_dict_keys(state_dict) == "cb104773c6c2cb6df4f9529ad5c60d0b":
+ config = {
+ "model_type": "t2v",
+ "patch_size": (1, 2, 2),
+ "text_len": 512,
+ "in_dim": 16,
+ "dim": 5120,
+ "ffn_dim": 13824,
+ "freq_dim": 256,
+ "text_dim": 4096,
+ "out_dim": 16,
+ "num_heads": 40,
+ "num_layers": 40,
+ "window_size": (-1, -1),
+ "qk_norm": True,
+ "cross_attn_norm": True,
+ "eps": 1e-6,
+ }
+ else:
+ config = {}
+ return state_dict_, config
+
+ def from_civitai(self, state_dict):
+ state_dict = {name: param for name, param in state_dict.items() if not name.startswith("vace")}
+ if hash_state_dict_keys(state_dict) == "9269f8db9040a9d860eaca435be61814":
+ config = {
+ "has_image_input": False,
+ "patch_size": [1, 2, 2],
+ "in_dim": 16,
+ "dim": 1536,
+ "ffn_dim": 8960,
+ "freq_dim": 256,
+ "text_dim": 4096,
+ "out_dim": 16,
+ "num_heads": 12,
+ "num_layers": 30,
+ "eps": 1e-6
+ }
+ elif hash_state_dict_keys(state_dict) == "aafcfd9672c3a2456dc46e1cb6e52c70":
+ config = {
+ "has_image_input": False,
+ "patch_size": [1, 2, 2],
+ "in_dim": 16,
+ "dim": 5120,
+ "ffn_dim": 13824,
+ "freq_dim": 256,
+ "text_dim": 4096,
+ "out_dim": 16,
+ "num_heads": 40,
+ "num_layers": 40,
+ "eps": 1e-6
+ }
+ elif hash_state_dict_keys(state_dict) == "6bfcfb3b342cb286ce886889d519a77e":
+ config = {
+ "has_image_input": True,
+ "patch_size": [1, 2, 2],
+ "in_dim": 36,
+ "dim": 5120,
+ "ffn_dim": 13824,
+ "freq_dim": 256,
+ "text_dim": 4096,
+ "out_dim": 16,
+ "num_heads": 40,
+ "num_layers": 40,
+ "eps": 1e-6
+ }
+ elif hash_state_dict_keys(state_dict) == "6d6ccde6845b95ad9114ab993d917893":
+ config = {
+ "has_image_input": True,
+ "patch_size": [1, 2, 2],
+ "in_dim": 36,
+ "dim": 1536,
+ "ffn_dim": 8960,
+ "freq_dim": 256,
+ "text_dim": 4096,
+ "out_dim": 16,
+ "num_heads": 12,
+ "num_layers": 30,
+ "eps": 1e-6
+ }
+ elif hash_state_dict_keys(state_dict) == "6bfcfb3b342cb286ce886889d519a77e":
+ config = {
+ "has_image_input": True,
+ "patch_size": [1, 2, 2],
+ "in_dim": 36,
+ "dim": 5120,
+ "ffn_dim": 13824,
+ "freq_dim": 256,
+ "text_dim": 4096,
+ "out_dim": 16,
+ "num_heads": 40,
+ "num_layers": 40,
+ "eps": 1e-6
+ }
+ elif hash_state_dict_keys(state_dict) == "349723183fc063b2bfc10bb2835cf677":
+ # 1.3B PAI control
+ config = {
+ "has_image_input": True,
+ "patch_size": [1, 2, 2],
+ "in_dim": 48,
+ "dim": 1536,
+ "ffn_dim": 8960,
+ "freq_dim": 256,
+ "text_dim": 4096,
+ "out_dim": 16,
+ "num_heads": 12,
+ "num_layers": 30,
+ "eps": 1e-6
+ }
+ elif hash_state_dict_keys(state_dict) == "efa44cddf936c70abd0ea28b6cbe946c":
+ # 14B PAI control
+ config = {
+ "has_image_input": True,
+ "patch_size": [1, 2, 2],
+ "in_dim": 48,
+ "dim": 5120,
+ "ffn_dim": 13824,
+ "freq_dim": 256,
+ "text_dim": 4096,
+ "out_dim": 16,
+ "num_heads": 40,
+ "num_layers": 40,
+ "eps": 1e-6
+ }
+ elif hash_state_dict_keys(state_dict) == "3ef3b1f8e1dab83d5b71fd7b617f859f":
+ config = {
+ "has_image_input": True,
+ "patch_size": [1, 2, 2],
+ "in_dim": 36,
+ "dim": 5120,
+ "ffn_dim": 13824,
+ "freq_dim": 256,
+ "text_dim": 4096,
+ "out_dim": 16,
+ "num_heads": 40,
+ "num_layers": 40,
+ "eps": 1e-6,
+ "has_image_pos_emb": True
+ }
+ elif hash_state_dict_keys(state_dict) == "70ddad9d3a133785da5ea371aae09504":
+ # 1.3B PAI control v1.1
+ config = {
+ "has_image_input": True,
+ "patch_size": [1, 2, 2],
+ "in_dim": 48,
+ "dim": 1536,
+ "ffn_dim": 8960,
+ "freq_dim": 256,
+ "text_dim": 4096,
+ "out_dim": 16,
+ "num_heads": 12,
+ "num_layers": 30,
+ "eps": 1e-6,
+ "has_ref_conv": True
+ }
+ elif hash_state_dict_keys(state_dict) == "26bde73488a92e64cc20b0a7485b9e5b":
+ # 14B PAI control v1.1
+ config = {
+ "has_image_input": True,
+ "patch_size": [1, 2, 2],
+ "in_dim": 48,
+ "dim": 5120,
+ "ffn_dim": 13824,
+ "freq_dim": 256,
+ "text_dim": 4096,
+ "out_dim": 16,
+ "num_heads": 40,
+ "num_layers": 40,
+ "eps": 1e-6,
+ "has_ref_conv": True
+ }
+ elif hash_state_dict_keys(state_dict) == "ac6a5aa74f4a0aab6f64eb9a72f19901":
+ # 1.3B PAI control-camera v1.1
+ config = {
+ "has_image_input": True,
+ "patch_size": [1, 2, 2],
+ "in_dim": 32,
+ "dim": 1536,
+ "ffn_dim": 8960,
+ "freq_dim": 256,
+ "text_dim": 4096,
+ "out_dim": 16,
+ "num_heads": 12,
+ "num_layers": 30,
+ "eps": 1e-6,
+ "has_ref_conv": False,
+ "add_control_adapter": True,
+ "in_dim_control_adapter": 24,
+ }
+ elif hash_state_dict_keys(state_dict) == "b61c605c2adbd23124d152ed28e049ae":
+ # 14B PAI control-camera v1.1
+ config = {
+ "has_image_input": True,
+ "patch_size": [1, 2, 2],
+ "in_dim": 32,
+ "dim": 5120,
+ "ffn_dim": 13824,
+ "freq_dim": 256,
+ "text_dim": 4096,
+ "out_dim": 16,
+ "num_heads": 40,
+ "num_layers": 40,
+ "eps": 1e-6,
+ "has_ref_conv": False,
+ "add_control_adapter": True,
+ "in_dim_control_adapter": 24,
+ }
+ elif hash_state_dict_keys(state_dict) == "1f5ab7703c6fc803fdded85ff040c316":
+ # Wan-AI/Wan2.2-TI2V-5B
+ config = {
+ "has_image_input": False,
+ "patch_size": [1, 2, 2],
+ "in_dim": 48,
+ "dim": 3072,
+ "ffn_dim": 14336,
+ "freq_dim": 256,
+ "text_dim": 4096,
+ "out_dim": 48,
+ "num_heads": 24,
+ "num_layers": 30,
+ "eps": 1e-6,
+ "seperated_timestep": True,
+ "require_clip_embedding": False,
+ "require_vae_embedding": False,
+ "fuse_vae_embedding_in_latents": True,
+ }
+ elif hash_state_dict_keys(state_dict) == "5b013604280dd715f8457c6ed6d6a626":
+ # Wan-AI/Wan2.2-I2V-A14B
+ config = {
+ "has_image_input": False,
+ "patch_size": [1, 2, 2],
+ "in_dim": 36,
+ "dim": 5120,
+ "ffn_dim": 13824,
+ "freq_dim": 256,
+ "text_dim": 4096,
+ "out_dim": 16,
+ "num_heads": 40,
+ "num_layers": 40,
+ "eps": 1e-6,
+ "require_clip_embedding": False,
+ }
+ else:
+ config = {}
+ return state_dict, config
diff --git a/dkt/models/wan_video_image_encoder.py b/dkt/models/wan_video_image_encoder.py
new file mode 100644
index 0000000000000000000000000000000000000000..5ca878b1fd6ed6dc00420f092f87479fb65ef63a
--- /dev/null
+++ b/dkt/models/wan_video_image_encoder.py
@@ -0,0 +1,902 @@
+"""
+Concise re-implementation of
+``https://github.com/openai/CLIP'' and
+``https://github.com/mlfoundations/open_clip''.
+"""
+import math
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import torchvision.transforms as T
+from .wan_video_dit import flash_attention
+
+
+class SelfAttention(nn.Module):
+
+ def __init__(self, dim, num_heads, dropout=0.1, eps=1e-5):
+ assert dim % num_heads == 0
+ super().__init__()
+ self.dim = dim
+ self.num_heads = num_heads
+ self.head_dim = dim // num_heads
+ self.eps = eps
+
+ # layers
+ self.q = nn.Linear(dim, dim)
+ self.k = nn.Linear(dim, dim)
+ self.v = nn.Linear(dim, dim)
+ self.o = nn.Linear(dim, dim)
+ self.dropout = nn.Dropout(dropout)
+
+ def forward(self, x, mask):
+ """
+ x: [B, L, C].
+ """
+ b, s, c, n, d = *x.size(), self.num_heads, self.head_dim
+
+ # compute query, key, value
+ q = self.q(x).reshape(b, s, n, d).permute(0, 2, 1, 3)
+ k = self.k(x).reshape(b, s, n, d).permute(0, 2, 1, 3)
+ v = self.v(x).reshape(b, s, n, d).permute(0, 2, 1, 3)
+
+ # compute attention
+ p = self.dropout.p if self.training else 0.0
+ x = F.scaled_dot_product_attention(q, k, v, mask, p)
+ x = x.permute(0, 2, 1, 3).reshape(b, s, c)
+
+ # output
+ x = self.o(x)
+ x = self.dropout(x)
+ return x
+
+
+class AttentionBlock(nn.Module):
+
+ def __init__(self, dim, num_heads, post_norm, dropout=0.1, eps=1e-5):
+ super().__init__()
+ self.dim = dim
+ self.num_heads = num_heads
+ self.post_norm = post_norm
+ self.eps = eps
+
+ # layers
+ self.attn = SelfAttention(dim, num_heads, dropout, eps)
+ self.norm1 = nn.LayerNorm(dim, eps=eps)
+ self.ffn = nn.Sequential(
+ nn.Linear(dim, dim * 4), nn.GELU(), nn.Linear(dim * 4, dim),
+ nn.Dropout(dropout))
+ self.norm2 = nn.LayerNorm(dim, eps=eps)
+
+ def forward(self, x, mask):
+ if self.post_norm:
+ x = self.norm1(x + self.attn(x, mask))
+ x = self.norm2(x + self.ffn(x))
+ else:
+ x = x + self.attn(self.norm1(x), mask)
+ x = x + self.ffn(self.norm2(x))
+ return x
+
+
+class XLMRoberta(nn.Module):
+ """
+ XLMRobertaModel with no pooler and no LM head.
+ """
+
+ def __init__(self,
+ vocab_size=250002,
+ max_seq_len=514,
+ type_size=1,
+ pad_id=1,
+ dim=1024,
+ num_heads=16,
+ num_layers=24,
+ post_norm=True,
+ dropout=0.1,
+ eps=1e-5):
+ super().__init__()
+ self.vocab_size = vocab_size
+ self.max_seq_len = max_seq_len
+ self.type_size = type_size
+ self.pad_id = pad_id
+ self.dim = dim
+ self.num_heads = num_heads
+ self.num_layers = num_layers
+ self.post_norm = post_norm
+ self.eps = eps
+
+ # embeddings
+ self.token_embedding = nn.Embedding(vocab_size, dim, padding_idx=pad_id)
+ self.type_embedding = nn.Embedding(type_size, dim)
+ self.pos_embedding = nn.Embedding(max_seq_len, dim, padding_idx=pad_id)
+ self.dropout = nn.Dropout(dropout)
+
+ # blocks
+ self.blocks = nn.ModuleList([
+ AttentionBlock(dim, num_heads, post_norm, dropout, eps)
+ for _ in range(num_layers)
+ ])
+
+ # norm layer
+ self.norm = nn.LayerNorm(dim, eps=eps)
+
+ def forward(self, ids):
+ """
+ ids: [B, L] of torch.LongTensor.
+ """
+ b, s = ids.shape
+ mask = ids.ne(self.pad_id).long()
+
+ # embeddings
+ x = self.token_embedding(ids) + \
+ self.type_embedding(torch.zeros_like(ids)) + \
+ self.pos_embedding(self.pad_id + torch.cumsum(mask, dim=1) * mask)
+ if self.post_norm:
+ x = self.norm(x)
+ x = self.dropout(x)
+
+ # blocks
+ mask = torch.where(
+ mask.view(b, 1, 1, s).gt(0), 0.0,
+ torch.finfo(x.dtype).min)
+ for block in self.blocks:
+ x = block(x, mask)
+
+ # output
+ if not self.post_norm:
+ x = self.norm(x)
+ return x
+
+
+def xlm_roberta_large(pretrained=False,
+ return_tokenizer=False,
+ device='cpu',
+ **kwargs):
+ """
+ XLMRobertaLarge adapted from Huggingface.
+ """
+ # params
+ cfg = dict(
+ vocab_size=250002,
+ max_seq_len=514,
+ type_size=1,
+ pad_id=1,
+ dim=1024,
+ num_heads=16,
+ num_layers=24,
+ post_norm=True,
+ dropout=0.1,
+ eps=1e-5)
+ cfg.update(**kwargs)
+
+ # init model
+ if pretrained:
+ from sora import DOWNLOAD_TO_CACHE
+
+ # init a meta model
+ with torch.device('meta'):
+ model = XLMRoberta(**cfg)
+
+ # load checkpoint
+ model.load_state_dict(
+ torch.load(
+ DOWNLOAD_TO_CACHE('models/xlm_roberta/xlm_roberta_large.pth'),
+ map_location=device),
+ assign=True)
+ else:
+ # init a model on device
+ with torch.device(device):
+ model = XLMRoberta(**cfg)
+
+ # init tokenizer
+ if return_tokenizer:
+ from sora.data import HuggingfaceTokenizer
+ tokenizer = HuggingfaceTokenizer(
+ name='xlm-roberta-large',
+ seq_len=model.text_len,
+ clean='whitespace')
+ return model, tokenizer
+ else:
+ return model
+
+
+
+def pos_interpolate(pos, seq_len):
+ if pos.size(1) == seq_len:
+ return pos
+ else:
+ src_grid = int(math.sqrt(pos.size(1)))
+ tar_grid = int(math.sqrt(seq_len))
+ n = pos.size(1) - src_grid * src_grid
+ return torch.cat([
+ pos[:, :n],
+ F.interpolate(
+ pos[:, n:].float().reshape(1, src_grid, src_grid, -1).permute(
+ 0, 3, 1, 2),
+ size=(tar_grid, tar_grid),
+ mode='bicubic',
+ align_corners=False).flatten(2).transpose(1, 2)
+ ],
+ dim=1)
+
+
+class QuickGELU(nn.Module):
+
+ def forward(self, x):
+ return x * torch.sigmoid(1.702 * x)
+
+
+class LayerNorm(nn.LayerNorm):
+
+ def forward(self, x):
+ return super().forward(x).type_as(x)
+
+
+class SelfAttention(nn.Module):
+
+ def __init__(self,
+ dim,
+ num_heads,
+ causal=False,
+ attn_dropout=0.0,
+ proj_dropout=0.0):
+ assert dim % num_heads == 0
+ super().__init__()
+ self.dim = dim
+ self.num_heads = num_heads
+ self.head_dim = dim // num_heads
+ self.causal = causal
+ self.attn_dropout = attn_dropout
+ self.proj_dropout = proj_dropout
+
+ # layers
+ self.to_qkv = nn.Linear(dim, dim * 3)
+ self.proj = nn.Linear(dim, dim)
+
+ def forward(self, x):
+ """
+ x: [B, L, C].
+ """
+ # compute query, key, value
+ q, k, v = self.to_qkv(x).chunk(3, dim=-1)
+
+ # compute attention
+ x = flash_attention(q, k, v, num_heads=self.num_heads, compatibility_mode=True)
+
+ # output
+ x = self.proj(x)
+ x = F.dropout(x, self.proj_dropout, self.training)
+ return x
+
+
+class SwiGLU(nn.Module):
+
+ def __init__(self, dim, mid_dim):
+ super().__init__()
+ self.dim = dim
+ self.mid_dim = mid_dim
+
+ # layers
+ self.fc1 = nn.Linear(dim, mid_dim)
+ self.fc2 = nn.Linear(dim, mid_dim)
+ self.fc3 = nn.Linear(mid_dim, dim)
+
+ def forward(self, x):
+ x = F.silu(self.fc1(x)) * self.fc2(x)
+ x = self.fc3(x)
+ return x
+
+
+class AttentionBlock(nn.Module):
+
+ def __init__(self,
+ dim,
+ mlp_ratio,
+ num_heads,
+ post_norm=False,
+ causal=False,
+ activation='quick_gelu',
+ attn_dropout=0.0,
+ proj_dropout=0.0,
+ norm_eps=1e-5):
+ assert activation in ['quick_gelu', 'gelu', 'swi_glu']
+ super().__init__()
+ self.dim = dim
+ self.mlp_ratio = mlp_ratio
+ self.num_heads = num_heads
+ self.post_norm = post_norm
+ self.causal = causal
+ self.norm_eps = norm_eps
+
+ # layers
+ self.norm1 = LayerNorm(dim, eps=norm_eps)
+ self.attn = SelfAttention(dim, num_heads, causal, attn_dropout,
+ proj_dropout)
+ self.norm2 = LayerNorm(dim, eps=norm_eps)
+ if activation == 'swi_glu':
+ self.mlp = SwiGLU(dim, int(dim * mlp_ratio))
+ else:
+ self.mlp = nn.Sequential(
+ nn.Linear(dim, int(dim * mlp_ratio)),
+ QuickGELU() if activation == 'quick_gelu' else nn.GELU(),
+ nn.Linear(int(dim * mlp_ratio), dim), nn.Dropout(proj_dropout))
+
+ def forward(self, x):
+ if self.post_norm:
+ x = x + self.norm1(self.attn(x))
+ x = x + self.norm2(self.mlp(x))
+ else:
+ x = x + self.attn(self.norm1(x))
+ x = x + self.mlp(self.norm2(x))
+ return x
+
+
+class AttentionPool(nn.Module):
+
+ def __init__(self,
+ dim,
+ mlp_ratio,
+ num_heads,
+ activation='gelu',
+ proj_dropout=0.0,
+ norm_eps=1e-5):
+ assert dim % num_heads == 0
+ super().__init__()
+ self.dim = dim
+ self.mlp_ratio = mlp_ratio
+ self.num_heads = num_heads
+ self.head_dim = dim // num_heads
+ self.proj_dropout = proj_dropout
+ self.norm_eps = norm_eps
+
+ # layers
+ gain = 1.0 / math.sqrt(dim)
+ self.cls_embedding = nn.Parameter(gain * torch.randn(1, 1, dim))
+ self.to_q = nn.Linear(dim, dim)
+ self.to_kv = nn.Linear(dim, dim * 2)
+ self.proj = nn.Linear(dim, dim)
+ self.norm = LayerNorm(dim, eps=norm_eps)
+ self.mlp = nn.Sequential(
+ nn.Linear(dim, int(dim * mlp_ratio)),
+ QuickGELU() if activation == 'quick_gelu' else nn.GELU(),
+ nn.Linear(int(dim * mlp_ratio), dim), nn.Dropout(proj_dropout))
+
+ def forward(self, x):
+ """
+ x: [B, L, C].
+ """
+ b, s, c, n, d = *x.size(), self.num_heads, self.head_dim
+
+ # compute query, key, value
+ q = self.to_q(self.cls_embedding).view(1, 1, n*d).expand(b, -1, -1)
+ k, v = self.to_kv(x).chunk(2, dim=-1)
+
+ # compute attention
+ x = flash_attention(q, k, v, num_heads=self.num_heads, compatibility_mode=True)
+ x = x.reshape(b, 1, c)
+
+ # output
+ x = self.proj(x)
+ x = F.dropout(x, self.proj_dropout, self.training)
+
+ # mlp
+ x = x + self.mlp(self.norm(x))
+ return x[:, 0]
+
+
+class VisionTransformer(nn.Module):
+
+ def __init__(self,
+ image_size=224,
+ patch_size=16,
+ dim=768,
+ mlp_ratio=4,
+ out_dim=512,
+ num_heads=12,
+ num_layers=12,
+ pool_type='token',
+ pre_norm=True,
+ post_norm=False,
+ activation='quick_gelu',
+ attn_dropout=0.0,
+ proj_dropout=0.0,
+ embedding_dropout=0.0,
+ norm_eps=1e-5):
+ if image_size % patch_size != 0:
+ print(
+ '[WARNING] image_size is not divisible by patch_size',
+ flush=True)
+ assert pool_type in ('token', 'token_fc', 'attn_pool')
+ out_dim = out_dim or dim
+ super().__init__()
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_patches = (image_size // patch_size)**2
+ self.dim = dim
+ self.mlp_ratio = mlp_ratio
+ self.out_dim = out_dim
+ self.num_heads = num_heads
+ self.num_layers = num_layers
+ self.pool_type = pool_type
+ self.post_norm = post_norm
+ self.norm_eps = norm_eps
+
+ # embeddings
+ gain = 1.0 / math.sqrt(dim)
+ self.patch_embedding = nn.Conv2d(
+ 3,
+ dim,
+ kernel_size=patch_size,
+ stride=patch_size,
+ bias=not pre_norm)
+ if pool_type in ('token', 'token_fc'):
+ self.cls_embedding = nn.Parameter(gain * torch.randn(1, 1, dim))
+ self.pos_embedding = nn.Parameter(gain * torch.randn(
+ 1, self.num_patches +
+ (1 if pool_type in ('token', 'token_fc') else 0), dim))
+ self.dropout = nn.Dropout(embedding_dropout)
+
+ # transformer
+ self.pre_norm = LayerNorm(dim, eps=norm_eps) if pre_norm else None
+ self.transformer = nn.Sequential(*[
+ AttentionBlock(dim, mlp_ratio, num_heads, post_norm, False,
+ activation, attn_dropout, proj_dropout, norm_eps)
+ for _ in range(num_layers)
+ ])
+ self.post_norm = LayerNorm(dim, eps=norm_eps)
+
+ # head
+ if pool_type == 'token':
+ self.head = nn.Parameter(gain * torch.randn(dim, out_dim))
+ elif pool_type == 'token_fc':
+ self.head = nn.Linear(dim, out_dim)
+ elif pool_type == 'attn_pool':
+ self.head = AttentionPool(dim, mlp_ratio, num_heads, activation,
+ proj_dropout, norm_eps)
+
+ def forward(self, x, interpolation=False, use_31_block=False):
+ b = x.size(0)
+
+ # embeddings
+ x = self.patch_embedding(x).flatten(2).permute(0, 2, 1)
+ if self.pool_type in ('token', 'token_fc'):
+ x = torch.cat([self.cls_embedding.expand(b, -1, -1).to(dtype=x.dtype, device=x.device), x], dim=1)
+ if interpolation:
+ e = pos_interpolate(self.pos_embedding, x.size(1))
+ else:
+ e = self.pos_embedding
+ e = e.to(dtype=x.dtype, device=x.device)
+ x = self.dropout(x + e)
+ if self.pre_norm is not None:
+ x = self.pre_norm(x)
+
+ # transformer
+ if use_31_block:
+ x = self.transformer[:-1](x)
+ return x
+ else:
+ x = self.transformer(x)
+ return x
+
+
+class CLIP(nn.Module):
+
+ def __init__(self,
+ embed_dim=512,
+ image_size=224,
+ patch_size=16,
+ vision_dim=768,
+ vision_mlp_ratio=4,
+ vision_heads=12,
+ vision_layers=12,
+ vision_pool='token',
+ vision_pre_norm=True,
+ vision_post_norm=False,
+ vocab_size=49408,
+ text_len=77,
+ text_dim=512,
+ text_mlp_ratio=4,
+ text_heads=8,
+ text_layers=12,
+ text_causal=True,
+ text_pool='argmax',
+ text_head_bias=False,
+ logit_bias=None,
+ activation='quick_gelu',
+ attn_dropout=0.0,
+ proj_dropout=0.0,
+ embedding_dropout=0.0,
+ norm_eps=1e-5):
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.vision_dim = vision_dim
+ self.vision_mlp_ratio = vision_mlp_ratio
+ self.vision_heads = vision_heads
+ self.vision_layers = vision_layers
+ self.vision_pool = vision_pool
+ self.vision_pre_norm = vision_pre_norm
+ self.vision_post_norm = vision_post_norm
+ self.vocab_size = vocab_size
+ self.text_len = text_len
+ self.text_dim = text_dim
+ self.text_mlp_ratio = text_mlp_ratio
+ self.text_heads = text_heads
+ self.text_layers = text_layers
+ self.text_causal = text_causal
+ self.text_pool = text_pool
+ self.text_head_bias = text_head_bias
+ self.norm_eps = norm_eps
+
+ # models
+ self.visual = VisionTransformer(
+ image_size=image_size,
+ patch_size=patch_size,
+ dim=vision_dim,
+ mlp_ratio=vision_mlp_ratio,
+ out_dim=embed_dim,
+ num_heads=vision_heads,
+ num_layers=vision_layers,
+ pool_type=vision_pool,
+ pre_norm=vision_pre_norm,
+ post_norm=vision_post_norm,
+ activation=activation,
+ attn_dropout=attn_dropout,
+ proj_dropout=proj_dropout,
+ embedding_dropout=embedding_dropout,
+ norm_eps=norm_eps)
+ self.textual = TextTransformer(
+ vocab_size=vocab_size,
+ text_len=text_len,
+ dim=text_dim,
+ mlp_ratio=text_mlp_ratio,
+ out_dim=embed_dim,
+ num_heads=text_heads,
+ num_layers=text_layers,
+ causal=text_causal,
+ pool_type=text_pool,
+ head_bias=text_head_bias,
+ activation=activation,
+ attn_dropout=attn_dropout,
+ proj_dropout=proj_dropout,
+ embedding_dropout=embedding_dropout,
+ norm_eps=norm_eps)
+ self.log_scale = nn.Parameter(math.log(1 / 0.07) * torch.ones([]))
+ if logit_bias is not None:
+ self.logit_bias = nn.Parameter(logit_bias * torch.ones([]))
+
+ # initialize weights
+ self.init_weights()
+
+ def forward(self, imgs, txt_ids):
+ """
+ imgs: [B, 3, H, W] of torch.float32.
+ - mean: [0.48145466, 0.4578275, 0.40821073]
+ - std: [0.26862954, 0.26130258, 0.27577711]
+ txt_ids: [B, L] of torch.long. Encoded by data.CLIPTokenizer.
+ """
+ xi = self.visual(imgs)
+ xt = self.textual(txt_ids)
+ return xi, xt
+
+ def init_weights(self):
+ # embeddings
+ nn.init.normal_(self.textual.token_embedding.weight, std=0.02)
+ nn.init.normal_(self.visual.patch_embedding.weight, std=0.1)
+
+ # attentions
+ for modality in ['visual', 'textual']:
+ dim = self.vision_dim if modality == 'visual' else self.text_dim
+ transformer = getattr(self, modality).transformer
+ proj_gain = (1.0 / math.sqrt(dim)) * (
+ 1.0 / math.sqrt(2 * len(transformer)))
+ attn_gain = 1.0 / math.sqrt(dim)
+ mlp_gain = 1.0 / math.sqrt(2.0 * dim)
+ for block in transformer:
+ nn.init.normal_(block.attn.to_qkv.weight, std=attn_gain)
+ nn.init.normal_(block.attn.proj.weight, std=proj_gain)
+ nn.init.normal_(block.mlp[0].weight, std=mlp_gain)
+ nn.init.normal_(block.mlp[2].weight, std=proj_gain)
+
+ def param_groups(self):
+ groups = [{
+ 'params': [
+ p for n, p in self.named_parameters()
+ if 'norm' in n or n.endswith('bias')
+ ],
+ 'weight_decay': 0.0
+ }, {
+ 'params': [
+ p for n, p in self.named_parameters()
+ if not ('norm' in n or n.endswith('bias'))
+ ]
+ }]
+ return groups
+
+
+class XLMRobertaWithHead(XLMRoberta):
+
+ def __init__(self, **kwargs):
+ self.out_dim = kwargs.pop('out_dim')
+ super().__init__(**kwargs)
+
+ # head
+ mid_dim = (self.dim + self.out_dim) // 2
+ self.head = nn.Sequential(
+ nn.Linear(self.dim, mid_dim, bias=False), nn.GELU(),
+ nn.Linear(mid_dim, self.out_dim, bias=False))
+
+ def forward(self, ids):
+ # xlm-roberta
+ x = super().forward(ids)
+
+ # average pooling
+ mask = ids.ne(self.pad_id).unsqueeze(-1).to(x)
+ x = (x * mask).sum(dim=1) / mask.sum(dim=1)
+
+ # head
+ x = self.head(x)
+ return x
+
+
+class XLMRobertaCLIP(nn.Module):
+
+ def __init__(self,
+ embed_dim=1024,
+ image_size=224,
+ patch_size=14,
+ vision_dim=1280,
+ vision_mlp_ratio=4,
+ vision_heads=16,
+ vision_layers=32,
+ vision_pool='token',
+ vision_pre_norm=True,
+ vision_post_norm=False,
+ activation='gelu',
+ vocab_size=250002,
+ max_text_len=514,
+ type_size=1,
+ pad_id=1,
+ text_dim=1024,
+ text_heads=16,
+ text_layers=24,
+ text_post_norm=True,
+ text_dropout=0.1,
+ attn_dropout=0.0,
+ proj_dropout=0.0,
+ embedding_dropout=0.0,
+ norm_eps=1e-5):
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.vision_dim = vision_dim
+ self.vision_mlp_ratio = vision_mlp_ratio
+ self.vision_heads = vision_heads
+ self.vision_layers = vision_layers
+ self.vision_pre_norm = vision_pre_norm
+ self.vision_post_norm = vision_post_norm
+ self.activation = activation
+ self.vocab_size = vocab_size
+ self.max_text_len = max_text_len
+ self.type_size = type_size
+ self.pad_id = pad_id
+ self.text_dim = text_dim
+ self.text_heads = text_heads
+ self.text_layers = text_layers
+ self.text_post_norm = text_post_norm
+ self.norm_eps = norm_eps
+
+ # models
+ self.visual = VisionTransformer(
+ image_size=image_size,
+ patch_size=patch_size,
+ dim=vision_dim,
+ mlp_ratio=vision_mlp_ratio,
+ out_dim=embed_dim,
+ num_heads=vision_heads,
+ num_layers=vision_layers,
+ pool_type=vision_pool,
+ pre_norm=vision_pre_norm,
+ post_norm=vision_post_norm,
+ activation=activation,
+ attn_dropout=attn_dropout,
+ proj_dropout=proj_dropout,
+ embedding_dropout=embedding_dropout,
+ norm_eps=norm_eps)
+ self.textual = None
+ self.log_scale = nn.Parameter(math.log(1 / 0.07) * torch.ones([]))
+
+ def forward(self, imgs, txt_ids):
+ """
+ imgs: [B, 3, H, W] of torch.float32.
+ - mean: [0.48145466, 0.4578275, 0.40821073]
+ - std: [0.26862954, 0.26130258, 0.27577711]
+ txt_ids: [B, L] of torch.long.
+ Encoded by data.CLIPTokenizer.
+ """
+ xi = self.visual(imgs)
+ xt = self.textual(txt_ids)
+ return xi, xt
+
+ def param_groups(self):
+ groups = [{
+ 'params': [
+ p for n, p in self.named_parameters()
+ if 'norm' in n or n.endswith('bias')
+ ],
+ 'weight_decay': 0.0
+ }, {
+ 'params': [
+ p for n, p in self.named_parameters()
+ if not ('norm' in n or n.endswith('bias'))
+ ]
+ }]
+ return groups
+
+
+def _clip(pretrained=False,
+ pretrained_name=None,
+ model_cls=CLIP,
+ return_transforms=False,
+ return_tokenizer=False,
+ tokenizer_padding='eos',
+ dtype=torch.float32,
+ device='cpu',
+ **kwargs):
+ # init model
+ if pretrained and pretrained_name:
+ from sora import BUCKET, DOWNLOAD_TO_CACHE
+
+ # init a meta model
+ with torch.device('meta'):
+ model = model_cls(**kwargs)
+
+ # checkpoint path
+ checkpoint = f'models/clip/{pretrained_name}'
+ if dtype in (torch.float16, torch.bfloat16):
+ suffix = '-' + {
+ torch.float16: 'fp16',
+ torch.bfloat16: 'bf16'
+ }[dtype]
+ if object_exists(BUCKET, f'{checkpoint}{suffix}.pth'):
+ checkpoint = f'{checkpoint}{suffix}'
+ checkpoint += '.pth'
+
+ # load
+ model.load_state_dict(
+ torch.load(DOWNLOAD_TO_CACHE(checkpoint), map_location=device),
+ assign=True,
+ strict=False)
+ else:
+ # init a model on device
+ with torch.device(device):
+ model = model_cls(**kwargs)
+
+ # set device
+ output = (model,)
+
+ # init transforms
+ if return_transforms:
+ # mean and std
+ if 'siglip' in pretrained_name.lower():
+ mean, std = [0.5, 0.5, 0.5], [0.5, 0.5, 0.5]
+ else:
+ mean = [0.48145466, 0.4578275, 0.40821073]
+ std = [0.26862954, 0.26130258, 0.27577711]
+
+ # transforms
+ transforms = T.Compose([
+ T.Resize((model.image_size, model.image_size),
+ interpolation=T.InterpolationMode.BICUBIC),
+ T.ToTensor(),
+ T.Normalize(mean=mean, std=std)
+ ])
+ output += (transforms,)
+
+ # init tokenizer
+ if return_tokenizer:
+ from sora import data
+ if 'siglip' in pretrained_name.lower():
+ tokenizer = data.HuggingfaceTokenizer(
+ name=f'timm/{pretrained_name}',
+ seq_len=model.text_len,
+ clean='canonicalize')
+ elif 'xlm' in pretrained_name.lower():
+ tokenizer = data.HuggingfaceTokenizer(
+ name='xlm-roberta-large',
+ seq_len=model.max_text_len - 2,
+ clean='whitespace')
+ elif 'mba' in pretrained_name.lower():
+ tokenizer = data.HuggingfaceTokenizer(
+ name='facebook/xlm-roberta-xl',
+ seq_len=model.max_text_len - 2,
+ clean='whitespace')
+ else:
+ tokenizer = data.CLIPTokenizer(
+ seq_len=model.text_len, padding=tokenizer_padding)
+ output += (tokenizer,)
+ return output[0] if len(output) == 1 else output
+
+
+def clip_xlm_roberta_vit_h_14(
+ pretrained=False,
+ pretrained_name='open-clip-xlm-roberta-large-vit-huge-14',
+ **kwargs):
+ cfg = dict(
+ embed_dim=1024,
+ image_size=224,
+ patch_size=14,
+ vision_dim=1280,
+ vision_mlp_ratio=4,
+ vision_heads=16,
+ vision_layers=32,
+ vision_pool='token',
+ activation='gelu',
+ vocab_size=250002,
+ max_text_len=514,
+ type_size=1,
+ pad_id=1,
+ text_dim=1024,
+ text_heads=16,
+ text_layers=24,
+ text_post_norm=True,
+ text_dropout=0.1,
+ attn_dropout=0.0,
+ proj_dropout=0.0,
+ embedding_dropout=0.0)
+ cfg.update(**kwargs)
+ return _clip(pretrained, pretrained_name, XLMRobertaCLIP, **cfg)
+
+
+class WanImageEncoder(torch.nn.Module):
+
+ def __init__(self):
+ super().__init__()
+ # init model
+ self.model, self.transforms = clip_xlm_roberta_vit_h_14(
+ pretrained=False,
+ return_transforms=True,
+ return_tokenizer=False,
+ dtype=torch.float32,
+ device="cpu")
+
+ def encode_image(self, videos):
+ # preprocess
+ size = (self.model.image_size,) * 2
+ videos = torch.cat([
+ F.interpolate(
+ u,
+ size=size,
+ mode='bicubic',
+ align_corners=False) for u in videos
+ ])
+ videos = self.transforms.transforms[-1](videos.mul_(0.5).add_(0.5))
+
+ # forward
+ dtype = next(iter(self.model.visual.parameters())).dtype
+ videos = videos.to(dtype)
+ out = self.model.visual(videos, use_31_block=True)
+ return out
+
+ @staticmethod
+ def state_dict_converter():
+ return WanImageEncoderStateDictConverter()
+
+
+class WanImageEncoderStateDictConverter:
+ def __init__(self):
+ pass
+
+ def from_diffusers(self, state_dict):
+ return state_dict
+
+ def from_civitai(self, state_dict):
+ state_dict_ = {}
+ for name, param in state_dict.items():
+ if name.startswith("textual."):
+ continue
+ name = "model." + name
+ state_dict_[name] = param
+ return state_dict_
+
diff --git a/dkt/models/wan_video_motion_controller.py b/dkt/models/wan_video_motion_controller.py
new file mode 100644
index 0000000000000000000000000000000000000000..518c1c66edca1cae11d5f3371af0455808b2a66a
--- /dev/null
+++ b/dkt/models/wan_video_motion_controller.py
@@ -0,0 +1,44 @@
+import torch
+import torch.nn as nn
+from .wan_video_dit import sinusoidal_embedding_1d
+
+
+
+class WanMotionControllerModel(torch.nn.Module):
+ def __init__(self, freq_dim=256, dim=1536):
+ super().__init__()
+ self.freq_dim = freq_dim
+ self.linear = nn.Sequential(
+ nn.Linear(freq_dim, dim),
+ nn.SiLU(),
+ nn.Linear(dim, dim),
+ nn.SiLU(),
+ nn.Linear(dim, dim * 6),
+ )
+
+ def forward(self, motion_bucket_id):
+ emb = sinusoidal_embedding_1d(self.freq_dim, motion_bucket_id * 10)
+ emb = self.linear(emb)
+ return emb
+
+ def init(self):
+ state_dict = self.linear[-1].state_dict()
+ state_dict = {i: state_dict[i] * 0 for i in state_dict}
+ self.linear[-1].load_state_dict(state_dict)
+
+ @staticmethod
+ def state_dict_converter():
+ return WanMotionControllerModelDictConverter()
+
+
+
+class WanMotionControllerModelDictConverter:
+ def __init__(self):
+ pass
+
+ def from_diffusers(self, state_dict):
+ return state_dict
+
+ def from_civitai(self, state_dict):
+ return state_dict
+
diff --git a/dkt/models/wan_video_text_encoder.py b/dkt/models/wan_video_text_encoder.py
new file mode 100644
index 0000000000000000000000000000000000000000..c28873722ee92f23914712c9d5b2c3a26fd2adb7
--- /dev/null
+++ b/dkt/models/wan_video_text_encoder.py
@@ -0,0 +1,269 @@
+import math
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+
+def fp16_clamp(x):
+ if x.dtype == torch.float16 and torch.isinf(x).any():
+ clamp = torch.finfo(x.dtype).max - 1000
+ x = torch.clamp(x, min=-clamp, max=clamp)
+ return x
+
+
+class GELU(nn.Module):
+
+ def forward(self, x):
+ return 0.5 * x * (1.0 + torch.tanh(
+ math.sqrt(2.0 / math.pi) * (x + 0.044715 * torch.pow(x, 3.0))))
+
+
+class T5LayerNorm(nn.Module):
+
+ def __init__(self, dim, eps=1e-6):
+ super(T5LayerNorm, self).__init__()
+ self.dim = dim
+ self.eps = eps
+ self.weight = nn.Parameter(torch.ones(dim))
+
+ def forward(self, x):
+ x = x * torch.rsqrt(x.float().pow(2).mean(dim=-1, keepdim=True) +
+ self.eps)
+ if self.weight.dtype in [torch.float16, torch.bfloat16]:
+ x = x.type_as(self.weight)
+ return self.weight * x
+
+
+class T5Attention(nn.Module):
+
+ def __init__(self, dim, dim_attn, num_heads, dropout=0.1):
+ assert dim_attn % num_heads == 0
+ super(T5Attention, self).__init__()
+ self.dim = dim
+ self.dim_attn = dim_attn
+ self.num_heads = num_heads
+ self.head_dim = dim_attn // num_heads
+
+ # layers
+ self.q = nn.Linear(dim, dim_attn, bias=False)
+ self.k = nn.Linear(dim, dim_attn, bias=False)
+ self.v = nn.Linear(dim, dim_attn, bias=False)
+ self.o = nn.Linear(dim_attn, dim, bias=False)
+ self.dropout = nn.Dropout(dropout)
+
+ def forward(self, x, context=None, mask=None, pos_bias=None):
+ """
+ x: [B, L1, C].
+ context: [B, L2, C] or None.
+ mask: [B, L2] or [B, L1, L2] or None.
+ """
+ # check inputs
+ context = x if context is None else context
+ b, n, c = x.size(0), self.num_heads, self.head_dim
+
+ # compute query, key, value
+ q = self.q(x).view(b, -1, n, c)
+ k = self.k(context).view(b, -1, n, c)
+ v = self.v(context).view(b, -1, n, c)
+
+ # attention bias
+ attn_bias = x.new_zeros(b, n, q.size(1), k.size(1))
+ if pos_bias is not None:
+ attn_bias += pos_bias
+ if mask is not None:
+ assert mask.ndim in [2, 3]
+ mask = mask.view(b, 1, 1,
+ -1) if mask.ndim == 2 else mask.unsqueeze(1)
+ attn_bias.masked_fill_(mask == 0, torch.finfo(x.dtype).min)
+
+ # compute attention (T5 does not use scaling)
+ attn = torch.einsum('binc,bjnc->bnij', q, k) + attn_bias
+ attn = F.softmax(attn.float(), dim=-1).type_as(attn)
+ x = torch.einsum('bnij,bjnc->binc', attn, v)
+
+ # output
+ x = x.reshape(b, -1, n * c)
+ x = self.o(x)
+ x = self.dropout(x)
+ return x
+
+
+class T5FeedForward(nn.Module):
+
+ def __init__(self, dim, dim_ffn, dropout=0.1):
+ super(T5FeedForward, self).__init__()
+ self.dim = dim
+ self.dim_ffn = dim_ffn
+
+ # layers
+ self.gate = nn.Sequential(nn.Linear(dim, dim_ffn, bias=False), GELU())
+ self.fc1 = nn.Linear(dim, dim_ffn, bias=False)
+ self.fc2 = nn.Linear(dim_ffn, dim, bias=False)
+ self.dropout = nn.Dropout(dropout)
+
+ def forward(self, x):
+ x = self.fc1(x) * self.gate(x)
+ x = self.dropout(x)
+ x = self.fc2(x)
+ x = self.dropout(x)
+ return x
+
+
+class T5SelfAttention(nn.Module):
+
+ def __init__(self,
+ dim,
+ dim_attn,
+ dim_ffn,
+ num_heads,
+ num_buckets,
+ shared_pos=True,
+ dropout=0.1):
+ super(T5SelfAttention, self).__init__()
+ self.dim = dim
+ self.dim_attn = dim_attn
+ self.dim_ffn = dim_ffn
+ self.num_heads = num_heads
+ self.num_buckets = num_buckets
+ self.shared_pos = shared_pos
+
+ # layers
+ self.norm1 = T5LayerNorm(dim)
+ self.attn = T5Attention(dim, dim_attn, num_heads, dropout)
+ self.norm2 = T5LayerNorm(dim)
+ self.ffn = T5FeedForward(dim, dim_ffn, dropout)
+ self.pos_embedding = None if shared_pos else T5RelativeEmbedding(
+ num_buckets, num_heads, bidirectional=True)
+
+ def forward(self, x, mask=None, pos_bias=None):
+ e = pos_bias if self.shared_pos else self.pos_embedding(
+ x.size(1), x.size(1))
+ x = fp16_clamp(x + self.attn(self.norm1(x), mask=mask, pos_bias=e))
+ x = fp16_clamp(x + self.ffn(self.norm2(x)))
+ return x
+
+
+class T5RelativeEmbedding(nn.Module):
+
+ def __init__(self, num_buckets, num_heads, bidirectional, max_dist=128):
+ super(T5RelativeEmbedding, self).__init__()
+ self.num_buckets = num_buckets
+ self.num_heads = num_heads
+ self.bidirectional = bidirectional
+ self.max_dist = max_dist
+
+ # layers
+ self.embedding = nn.Embedding(num_buckets, num_heads)
+
+ def forward(self, lq, lk):
+ device = self.embedding.weight.device
+ # rel_pos = torch.arange(lk).unsqueeze(0).to(device) - \
+ # torch.arange(lq).unsqueeze(1).to(device)
+ rel_pos = torch.arange(lk, device=device).unsqueeze(0) - \
+ torch.arange(lq, device=device).unsqueeze(1)
+ rel_pos = self._relative_position_bucket(rel_pos)
+ rel_pos_embeds = self.embedding(rel_pos)
+ rel_pos_embeds = rel_pos_embeds.permute(2, 0, 1).unsqueeze(
+ 0) # [1, N, Lq, Lk]
+ return rel_pos_embeds.contiguous()
+
+ def _relative_position_bucket(self, rel_pos):
+ # preprocess
+ if self.bidirectional:
+ num_buckets = self.num_buckets // 2
+ rel_buckets = (rel_pos > 0).long() * num_buckets
+ rel_pos = torch.abs(rel_pos)
+ else:
+ num_buckets = self.num_buckets
+ rel_buckets = 0
+ rel_pos = -torch.min(rel_pos, torch.zeros_like(rel_pos))
+
+ # embeddings for small and large positions
+ max_exact = num_buckets // 2
+ rel_pos_large = max_exact + (torch.log(rel_pos.float() / max_exact) /
+ math.log(self.max_dist / max_exact) *
+ (num_buckets - max_exact)).long()
+ rel_pos_large = torch.min(
+ rel_pos_large, torch.full_like(rel_pos_large, num_buckets - 1))
+ rel_buckets += torch.where(rel_pos < max_exact, rel_pos, rel_pos_large)
+ return rel_buckets
+
+def init_weights(m):
+ if isinstance(m, T5LayerNorm):
+ nn.init.ones_(m.weight)
+ elif isinstance(m, T5FeedForward):
+ nn.init.normal_(m.gate[0].weight, std=m.dim**-0.5)
+ nn.init.normal_(m.fc1.weight, std=m.dim**-0.5)
+ nn.init.normal_(m.fc2.weight, std=m.dim_ffn**-0.5)
+ elif isinstance(m, T5Attention):
+ nn.init.normal_(m.q.weight, std=(m.dim * m.dim_attn)**-0.5)
+ nn.init.normal_(m.k.weight, std=m.dim**-0.5)
+ nn.init.normal_(m.v.weight, std=m.dim**-0.5)
+ nn.init.normal_(m.o.weight, std=(m.num_heads * m.dim_attn)**-0.5)
+ elif isinstance(m, T5RelativeEmbedding):
+ nn.init.normal_(
+ m.embedding.weight, std=(2 * m.num_buckets * m.num_heads)**-0.5)
+
+
+class WanTextEncoder(torch.nn.Module):
+
+ def __init__(self,
+ vocab=256384,
+ dim=4096,
+ dim_attn=4096,
+ dim_ffn=10240,
+ num_heads=64,
+ num_layers=24,
+ num_buckets=32,
+ shared_pos=False,
+ dropout=0.1):
+ super(WanTextEncoder, self).__init__()
+ self.dim = dim
+ self.dim_attn = dim_attn
+ self.dim_ffn = dim_ffn
+ self.num_heads = num_heads
+ self.num_layers = num_layers
+ self.num_buckets = num_buckets
+ self.shared_pos = shared_pos
+
+ # layers
+ self.token_embedding = vocab if isinstance(vocab, nn.Embedding) \
+ else nn.Embedding(vocab, dim)
+ self.pos_embedding = T5RelativeEmbedding(
+ num_buckets, num_heads, bidirectional=True) if shared_pos else None
+ self.dropout = nn.Dropout(dropout)
+ self.blocks = nn.ModuleList([
+ T5SelfAttention(dim, dim_attn, dim_ffn, num_heads, num_buckets,
+ shared_pos, dropout) for _ in range(num_layers)
+ ])
+ self.norm = T5LayerNorm(dim)
+
+ # initialize weights
+ self.apply(init_weights)
+
+ def forward(self, ids, mask=None):
+ x = self.token_embedding(ids)
+ x = self.dropout(x)
+ e = self.pos_embedding(x.size(1),
+ x.size(1)) if self.shared_pos else None
+ for block in self.blocks:
+ x = block(x, mask, pos_bias=e)
+ x = self.norm(x)
+ x = self.dropout(x)
+ return x
+
+ @staticmethod
+ def state_dict_converter():
+ return WanTextEncoderStateDictConverter()
+
+
+class WanTextEncoderStateDictConverter:
+ def __init__(self):
+ pass
+
+ def from_diffusers(self, state_dict):
+ return state_dict
+
+ def from_civitai(self, state_dict):
+ return state_dict
diff --git a/dkt/models/wan_video_vace.py b/dkt/models/wan_video_vace.py
new file mode 100644
index 0000000000000000000000000000000000000000..40f38048fe07184084c76e3b1a5abac062bb0ab8
--- /dev/null
+++ b/dkt/models/wan_video_vace.py
@@ -0,0 +1,113 @@
+import torch
+from .wan_video_dit import DiTBlock
+from .utils import hash_state_dict_keys
+
+class VaceWanAttentionBlock(DiTBlock):
+ def __init__(self, has_image_input, dim, num_heads, ffn_dim, eps=1e-6, block_id=0):
+ super().__init__(has_image_input, dim, num_heads, ffn_dim, eps=eps)
+ self.block_id = block_id
+ if block_id == 0:
+ self.before_proj = torch.nn.Linear(self.dim, self.dim)
+ self.after_proj = torch.nn.Linear(self.dim, self.dim)
+
+ def forward(self, c, x, context, t_mod, freqs):
+ if self.block_id == 0:
+ c = self.before_proj(c) + x
+ all_c = []
+ else:
+ all_c = list(torch.unbind(c))
+ c = all_c.pop(-1)
+ c = super().forward(c, context, t_mod, freqs)
+ c_skip = self.after_proj(c)
+ all_c += [c_skip, c]
+ c = torch.stack(all_c)
+ return c
+
+
+class VaceWanModel(torch.nn.Module):
+ def __init__(
+ self,
+ vace_layers=(0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28),
+ vace_in_dim=96,
+ patch_size=(1, 2, 2),
+ has_image_input=False,
+ dim=1536,
+ num_heads=12,
+ ffn_dim=8960,
+ eps=1e-6,
+ ):
+ super().__init__()
+ self.vace_layers = vace_layers
+ self.vace_in_dim = vace_in_dim
+ self.vace_layers_mapping = {i: n for n, i in enumerate(self.vace_layers)}
+
+ # vace blocks
+ self.vace_blocks = torch.nn.ModuleList([
+ VaceWanAttentionBlock(has_image_input, dim, num_heads, ffn_dim, eps, block_id=i)
+ for i in self.vace_layers
+ ])
+
+ # vace patch embeddings
+ self.vace_patch_embedding = torch.nn.Conv3d(vace_in_dim, dim, kernel_size=patch_size, stride=patch_size)
+
+ def forward(
+ self, x, vace_context, context, t_mod, freqs,
+ use_gradient_checkpointing: bool = False,
+ use_gradient_checkpointing_offload: bool = False,
+ ):
+ c = [self.vace_patch_embedding(u.unsqueeze(0)) for u in vace_context]
+ c = [u.flatten(2).transpose(1, 2) for u in c]
+ c = torch.cat([
+ torch.cat([u, u.new_zeros(1, x.shape[1] - u.size(1), u.size(2))],
+ dim=1) for u in c
+ ])
+
+ def create_custom_forward(module):
+ def custom_forward(*inputs):
+ return module(*inputs)
+ return custom_forward
+
+ for block in self.vace_blocks:
+ if use_gradient_checkpointing_offload:
+ with torch.autograd.graph.save_on_cpu():
+ c = torch.utils.checkpoint.checkpoint(
+ create_custom_forward(block),
+ c, x, context, t_mod, freqs,
+ use_reentrant=False,
+ )
+ elif use_gradient_checkpointing:
+ c = torch.utils.checkpoint.checkpoint(
+ create_custom_forward(block),
+ c, x, context, t_mod, freqs,
+ use_reentrant=False,
+ )
+ else:
+ c = block(c, x, context, t_mod, freqs)
+ hints = torch.unbind(c)[:-1]
+ return hints
+
+ @staticmethod
+ def state_dict_converter():
+ return VaceWanModelDictConverter()
+
+
+class VaceWanModelDictConverter:
+ def __init__(self):
+ pass
+
+ def from_civitai(self, state_dict):
+ state_dict_ = {name: param for name, param in state_dict.items() if name.startswith("vace")}
+ if hash_state_dict_keys(state_dict_) == '3b2726384e4f64837bdf216eea3f310d': # vace 14B
+ config = {
+ "vace_layers": (0, 5, 10, 15, 20, 25, 30, 35),
+ "vace_in_dim": 96,
+ "patch_size": (1, 2, 2),
+ "has_image_input": False,
+ "dim": 5120,
+ "num_heads": 40,
+ "ffn_dim": 13824,
+ "eps": 1e-06,
+ }
+ else:
+ config = {}
+ return state_dict_, config
diff --git a/dkt/models/wan_video_vae.py b/dkt/models/wan_video_vae.py
new file mode 100644
index 0000000000000000000000000000000000000000..397a2e7b66258159d84ac299b7798c52bc7e038a
--- /dev/null
+++ b/dkt/models/wan_video_vae.py
@@ -0,0 +1,1376 @@
+from einops import rearrange, repeat
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from tqdm import tqdm
+
+CACHE_T = 2
+
+
+def check_is_instance(model, module_class):
+ if isinstance(model, module_class):
+ return True
+ if hasattr(model, "module") and isinstance(model.module, module_class):
+ return True
+ return False
+
+
+def block_causal_mask(x, block_size):
+ # params
+ b, n, s, _, device = *x.size(), x.device
+ assert s % block_size == 0
+ num_blocks = s // block_size
+
+ # build mask
+ mask = torch.zeros(b, n, s, s, dtype=torch.bool, device=device)
+ for i in range(num_blocks):
+ mask[:, :,
+ i * block_size:(i + 1) * block_size, :(i + 1) * block_size] = 1
+ return mask
+
+
+class CausalConv3d(nn.Conv3d):
+ """
+ Causal 3d convolusion.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self._padding = (self.padding[2], self.padding[2], self.padding[1],
+ self.padding[1], 2 * self.padding[0], 0)
+ self.padding = (0, 0, 0)
+
+ def forward(self, x, cache_x=None):
+ padding = list(self._padding)
+ if cache_x is not None and self._padding[4] > 0:
+ cache_x = cache_x.to(x.device)
+ x = torch.cat([cache_x, x], dim=2)
+ padding[4] -= cache_x.shape[2]
+ x = F.pad(x, padding)
+
+ return super().forward(x)
+
+
+class RMS_norm(nn.Module):
+
+ def __init__(self, dim, channel_first=True, images=True, bias=False):
+ super().__init__()
+ broadcastable_dims = (1, 1, 1) if not images else (1, 1)
+ shape = (dim, *broadcastable_dims) if channel_first else (dim,)
+
+ self.channel_first = channel_first
+ self.scale = dim**0.5
+ self.gamma = nn.Parameter(torch.ones(shape))
+ self.bias = nn.Parameter(torch.zeros(shape)) if bias else 0.
+
+ def forward(self, x):
+ return F.normalize(
+ x, dim=(1 if self.channel_first else
+ -1)) * self.scale * self.gamma + self.bias
+
+
+class Upsample(nn.Upsample):
+
+ def forward(self, x):
+ """
+ Fix bfloat16 support for nearest neighbor interpolation.
+ """
+ return super().forward(x.float()).type_as(x)
+
+
+class Resample(nn.Module):
+
+ def __init__(self, dim, mode):
+ assert mode in ('none', 'upsample2d', 'upsample3d', 'downsample2d',
+ 'downsample3d')
+ super().__init__()
+ self.dim = dim
+ self.mode = mode
+
+ # layers
+ if mode == 'upsample2d':
+ self.resample = nn.Sequential(
+ Upsample(scale_factor=(2., 2.), mode='nearest-exact'),
+ nn.Conv2d(dim, dim // 2, 3, padding=1))
+ elif mode == 'upsample3d':
+ self.resample = nn.Sequential(
+ Upsample(scale_factor=(2., 2.), mode='nearest-exact'),
+ nn.Conv2d(dim, dim // 2, 3, padding=1))
+ self.time_conv = CausalConv3d(dim,
+ dim * 2, (3, 1, 1),
+ padding=(1, 0, 0))
+
+ elif mode == 'downsample2d':
+ self.resample = nn.Sequential(
+ nn.ZeroPad2d((0, 1, 0, 1)),
+ nn.Conv2d(dim, dim, 3, stride=(2, 2)))
+ elif mode == 'downsample3d':
+ self.resample = nn.Sequential(
+ nn.ZeroPad2d((0, 1, 0, 1)),
+ nn.Conv2d(dim, dim, 3, stride=(2, 2)))
+ self.time_conv = CausalConv3d(dim,
+ dim, (3, 1, 1),
+ stride=(2, 1, 1),
+ padding=(0, 0, 0))
+
+ else:
+ self.resample = nn.Identity()
+
+ def forward(self, x, feat_cache=None, feat_idx=[0]):
+ b, c, t, h, w = x.size()
+ if self.mode == 'upsample3d':
+ if feat_cache is not None:
+ idx = feat_idx[0]
+ if feat_cache[idx] is None:
+ feat_cache[idx] = 'Rep'
+ feat_idx[0] += 1
+ else:
+
+ cache_x = x[:, :, -CACHE_T:, :, :].clone()
+ if cache_x.shape[2] < 2 and feat_cache[
+ idx] is not None and feat_cache[idx] != 'Rep':
+ # cache last frame of last two chunk
+ cache_x = torch.cat([
+ feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(
+ cache_x.device), cache_x
+ ],
+ dim=2)
+ if cache_x.shape[2] < 2 and feat_cache[
+ idx] is not None and feat_cache[idx] == 'Rep':
+ cache_x = torch.cat([
+ torch.zeros_like(cache_x).to(cache_x.device),
+ cache_x
+ ],
+ dim=2)
+ if feat_cache[idx] == 'Rep':
+ x = self.time_conv(x)
+ else:
+ x = self.time_conv(x, feat_cache[idx])
+ feat_cache[idx] = cache_x
+ feat_idx[0] += 1
+
+ x = x.reshape(b, 2, c, t, h, w)
+ x = torch.stack((x[:, 0, :, :, :, :], x[:, 1, :, :, :, :]),
+ 3)
+ x = x.reshape(b, c, t * 2, h, w)
+ t = x.shape[2]
+ x = rearrange(x, 'b c t h w -> (b t) c h w')
+ x = self.resample(x)
+ x = rearrange(x, '(b t) c h w -> b c t h w', t=t)
+
+ if self.mode == 'downsample3d':
+ if feat_cache is not None:
+ idx = feat_idx[0]
+ if feat_cache[idx] is None:
+ feat_cache[idx] = x.clone()
+ feat_idx[0] += 1
+ else:
+ cache_x = x[:, :, -1:, :, :].clone()
+ x = self.time_conv(
+ torch.cat([feat_cache[idx][:, :, -1:, :, :], x], 2))
+ feat_cache[idx] = cache_x
+ feat_idx[0] += 1
+ return x
+
+ def init_weight(self, conv):
+ conv_weight = conv.weight
+ nn.init.zeros_(conv_weight)
+ c1, c2, t, h, w = conv_weight.size()
+ one_matrix = torch.eye(c1, c2)
+ init_matrix = one_matrix
+ nn.init.zeros_(conv_weight)
+ conv_weight.data[:, :, 1, 0, 0] = init_matrix
+ conv.weight.data.copy_(conv_weight)
+ nn.init.zeros_(conv.bias.data)
+
+ def init_weight2(self, conv):
+ conv_weight = conv.weight.data
+ nn.init.zeros_(conv_weight)
+ c1, c2, t, h, w = conv_weight.size()
+ init_matrix = torch.eye(c1 // 2, c2)
+ conv_weight[:c1 // 2, :, -1, 0, 0] = init_matrix
+ conv_weight[c1 // 2:, :, -1, 0, 0] = init_matrix
+ conv.weight.data.copy_(conv_weight)
+ nn.init.zeros_(conv.bias.data)
+
+
+
+def patchify(x, patch_size):
+ if patch_size == 1:
+ return x
+ if x.dim() == 4:
+ x = rearrange(x, "b c (h q) (w r) -> b (c r q) h w", q=patch_size, r=patch_size)
+ elif x.dim() == 5:
+ x = rearrange(x,
+ "b c f (h q) (w r) -> b (c r q) f h w",
+ q=patch_size,
+ r=patch_size)
+ else:
+ raise ValueError(f"Invalid input shape: {x.shape}")
+ return x
+
+
+def unpatchify(x, patch_size):
+ if patch_size == 1:
+ return x
+ if x.dim() == 4:
+ x = rearrange(x, "b (c r q) h w -> b c (h q) (w r)", q=patch_size, r=patch_size)
+ elif x.dim() == 5:
+ x = rearrange(x,
+ "b (c r q) f h w -> b c f (h q) (w r)",
+ q=patch_size,
+ r=patch_size)
+ return x
+
+
+class Resample38(Resample):
+
+ def __init__(self, dim, mode):
+ assert mode in (
+ "none",
+ "upsample2d",
+ "upsample3d",
+ "downsample2d",
+ "downsample3d",
+ )
+ super(Resample, self).__init__()
+ self.dim = dim
+ self.mode = mode
+
+ # layers
+ if mode == "upsample2d":
+ self.resample = nn.Sequential(
+ Upsample(scale_factor=(2.0, 2.0), mode="nearest-exact"),
+ nn.Conv2d(dim, dim, 3, padding=1),
+ )
+ elif mode == "upsample3d":
+ self.resample = nn.Sequential(
+ Upsample(scale_factor=(2.0, 2.0), mode="nearest-exact"),
+ nn.Conv2d(dim, dim, 3, padding=1),
+ )
+ self.time_conv = CausalConv3d(dim, dim * 2, (3, 1, 1), padding=(1, 0, 0))
+ elif mode == "downsample2d":
+ self.resample = nn.Sequential(
+ nn.ZeroPad2d((0, 1, 0, 1)), nn.Conv2d(dim, dim, 3, stride=(2, 2))
+ )
+ elif mode == "downsample3d":
+ self.resample = nn.Sequential(
+ nn.ZeroPad2d((0, 1, 0, 1)), nn.Conv2d(dim, dim, 3, stride=(2, 2))
+ )
+ self.time_conv = CausalConv3d(
+ dim, dim, (3, 1, 1), stride=(2, 1, 1), padding=(0, 0, 0)
+ )
+ else:
+ self.resample = nn.Identity()
+
+class ResidualBlock(nn.Module):
+
+ def __init__(self, in_dim, out_dim, dropout=0.0):
+ super().__init__()
+ self.in_dim = in_dim
+ self.out_dim = out_dim
+
+ # layers
+ self.residual = nn.Sequential(
+ RMS_norm(in_dim, images=False), nn.SiLU(),
+ CausalConv3d(in_dim, out_dim, 3, padding=1),
+ RMS_norm(out_dim, images=False), nn.SiLU(), nn.Dropout(dropout),
+ CausalConv3d(out_dim, out_dim, 3, padding=1))
+ self.shortcut = CausalConv3d(in_dim, out_dim, 1) \
+ if in_dim != out_dim else nn.Identity()
+
+ def forward(self, x, feat_cache=None, feat_idx=[0]):
+ h = self.shortcut(x)
+ for layer in self.residual:
+ if check_is_instance(layer, CausalConv3d) and feat_cache is not None:
+ idx = feat_idx[0]
+ cache_x = x[:, :, -CACHE_T:, :, :].clone()
+ if cache_x.shape[2] < 2 and feat_cache[idx] is not None:
+ # cache last frame of last two chunk
+ cache_x = torch.cat([
+ feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(
+ cache_x.device), cache_x
+ ],
+ dim=2)
+ x = layer(x, feat_cache[idx])
+ feat_cache[idx] = cache_x
+ feat_idx[0] += 1
+ else:
+ x = layer(x)
+ return x + h
+
+
+class AttentionBlock(nn.Module):
+ """
+ Causal self-attention with a single head.
+ """
+
+ def __init__(self, dim):
+ super().__init__()
+ self.dim = dim
+
+ # layers
+ self.norm = RMS_norm(dim)
+ self.to_qkv = nn.Conv2d(dim, dim * 3, 1)
+ self.proj = nn.Conv2d(dim, dim, 1)
+
+ # zero out the last layer params
+ nn.init.zeros_(self.proj.weight)
+
+ def forward(self, x):
+ identity = x
+ b, c, t, h, w = x.size()
+ x = rearrange(x, 'b c t h w -> (b t) c h w')
+ x = self.norm(x)
+ # compute query, key, value
+ q, k, v = self.to_qkv(x).reshape(b * t, 1, c * 3, -1).permute(
+ 0, 1, 3, 2).contiguous().chunk(3, dim=-1)
+
+ # apply attention
+ x = F.scaled_dot_product_attention(
+ q,
+ k,
+ v,
+ #attn_mask=block_causal_mask(q, block_size=h * w)
+ )
+ x = x.squeeze(1).permute(0, 2, 1).reshape(b * t, c, h, w)
+
+ # output
+ x = self.proj(x)
+ x = rearrange(x, '(b t) c h w-> b c t h w', t=t)
+ return x + identity
+
+
+class AvgDown3D(nn.Module):
+ def __init__(
+ self,
+ in_channels,
+ out_channels,
+ factor_t,
+ factor_s=1,
+ ):
+ super().__init__()
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.factor_t = factor_t
+ self.factor_s = factor_s
+ self.factor = self.factor_t * self.factor_s * self.factor_s
+
+ assert in_channels * self.factor % out_channels == 0
+ self.group_size = in_channels * self.factor // out_channels
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ pad_t = (self.factor_t - x.shape[2] % self.factor_t) % self.factor_t
+ pad = (0, 0, 0, 0, pad_t, 0)
+ x = F.pad(x, pad)
+ B, C, T, H, W = x.shape
+ x = x.view(
+ B,
+ C,
+ T // self.factor_t,
+ self.factor_t,
+ H // self.factor_s,
+ self.factor_s,
+ W // self.factor_s,
+ self.factor_s,
+ )
+ x = x.permute(0, 1, 3, 5, 7, 2, 4, 6).contiguous()
+ x = x.view(
+ B,
+ C * self.factor,
+ T // self.factor_t,
+ H // self.factor_s,
+ W // self.factor_s,
+ )
+ x = x.view(
+ B,
+ self.out_channels,
+ self.group_size,
+ T // self.factor_t,
+ H // self.factor_s,
+ W // self.factor_s,
+ )
+ x = x.mean(dim=2)
+ return x
+
+
+class DupUp3D(nn.Module):
+ def __init__(
+ self,
+ in_channels: int,
+ out_channels: int,
+ factor_t,
+ factor_s=1,
+ ):
+ super().__init__()
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+
+ self.factor_t = factor_t
+ self.factor_s = factor_s
+ self.factor = self.factor_t * self.factor_s * self.factor_s
+
+ assert out_channels * self.factor % in_channels == 0
+ self.repeats = out_channels * self.factor // in_channels
+
+ def forward(self, x: torch.Tensor, first_chunk=False) -> torch.Tensor:
+ x = x.repeat_interleave(self.repeats, dim=1)
+ x = x.view(
+ x.size(0),
+ self.out_channels,
+ self.factor_t,
+ self.factor_s,
+ self.factor_s,
+ x.size(2),
+ x.size(3),
+ x.size(4),
+ )
+ x = x.permute(0, 1, 5, 2, 6, 3, 7, 4).contiguous()
+ x = x.view(
+ x.size(0),
+ self.out_channels,
+ x.size(2) * self.factor_t,
+ x.size(4) * self.factor_s,
+ x.size(6) * self.factor_s,
+ )
+ if first_chunk:
+ x = x[:, :, self.factor_t - 1 :, :, :]
+ return x
+
+
+class Down_ResidualBlock(nn.Module):
+ def __init__(
+ self, in_dim, out_dim, dropout, mult, temperal_downsample=False, down_flag=False
+ ):
+ super().__init__()
+
+ # Shortcut path with downsample
+ self.avg_shortcut = AvgDown3D(
+ in_dim,
+ out_dim,
+ factor_t=2 if temperal_downsample else 1,
+ factor_s=2 if down_flag else 1,
+ )
+
+ # Main path with residual blocks and downsample
+ downsamples = []
+ for _ in range(mult):
+ downsamples.append(ResidualBlock(in_dim, out_dim, dropout))
+ in_dim = out_dim
+
+ # Add the final downsample block
+ if down_flag:
+ mode = "downsample3d" if temperal_downsample else "downsample2d"
+ downsamples.append(Resample38(out_dim, mode=mode))
+
+ self.downsamples = nn.Sequential(*downsamples)
+
+ def forward(self, x, feat_cache=None, feat_idx=[0]):
+ x_copy = x.clone()
+ for module in self.downsamples:
+ x = module(x, feat_cache, feat_idx)
+
+ return x + self.avg_shortcut(x_copy)
+
+
+class Up_ResidualBlock(nn.Module):
+ def __init__(
+ self, in_dim, out_dim, dropout, mult, temperal_upsample=False, up_flag=False
+ ):
+ super().__init__()
+ # Shortcut path with upsample
+ if up_flag:
+ self.avg_shortcut = DupUp3D(
+ in_dim,
+ out_dim,
+ factor_t=2 if temperal_upsample else 1,
+ factor_s=2 if up_flag else 1,
+ )
+ else:
+ self.avg_shortcut = None
+
+ # Main path with residual blocks and upsample
+ upsamples = []
+ for _ in range(mult):
+ upsamples.append(ResidualBlock(in_dim, out_dim, dropout))
+ in_dim = out_dim
+
+ # Add the final upsample block
+ if up_flag:
+ mode = "upsample3d" if temperal_upsample else "upsample2d"
+ upsamples.append(Resample38(out_dim, mode=mode))
+
+ self.upsamples = nn.Sequential(*upsamples)
+
+ def forward(self, x, feat_cache=None, feat_idx=[0], first_chunk=False):
+ x_main = x.clone()
+ for module in self.upsamples:
+ x_main = module(x_main, feat_cache, feat_idx)
+ if self.avg_shortcut is not None:
+ x_shortcut = self.avg_shortcut(x, first_chunk)
+ return x_main + x_shortcut
+ else:
+ return x_main
+
+
+class Encoder3d(nn.Module):
+
+ def __init__(self,
+ dim=128,
+ z_dim=4,
+ dim_mult=[1, 2, 4, 4],
+ num_res_blocks=2,
+ attn_scales=[],
+ temperal_downsample=[True, True, False],
+ dropout=0.0):
+ super().__init__()
+ self.dim = dim
+ self.z_dim = z_dim
+ self.dim_mult = dim_mult
+ self.num_res_blocks = num_res_blocks
+ self.attn_scales = attn_scales
+ self.temperal_downsample = temperal_downsample
+
+ # dimensions
+ dims = [dim * u for u in [1] + dim_mult]
+ scale = 1.0
+
+ # init block
+ self.conv1 = CausalConv3d(3, dims[0], 3, padding=1)
+
+ # downsample blocks
+ downsamples = []
+ for i, (in_dim, out_dim) in enumerate(zip(dims[:-1], dims[1:])):
+ # residual (+attention) blocks
+ for _ in range(num_res_blocks):
+ downsamples.append(ResidualBlock(in_dim, out_dim, dropout))
+ if scale in attn_scales:
+ downsamples.append(AttentionBlock(out_dim))
+ in_dim = out_dim
+
+ # downsample block
+ if i != len(dim_mult) - 1:
+ mode = 'downsample3d' if temperal_downsample[
+ i] else 'downsample2d'
+ downsamples.append(Resample(out_dim, mode=mode))
+ scale /= 2.0
+ self.downsamples = nn.Sequential(*downsamples)
+
+ # middle blocks
+ self.middle = nn.Sequential(ResidualBlock(out_dim, out_dim, dropout),
+ AttentionBlock(out_dim),
+ ResidualBlock(out_dim, out_dim, dropout))
+
+ # output blocks
+ self.head = nn.Sequential(RMS_norm(out_dim, images=False), nn.SiLU(),
+ CausalConv3d(out_dim, z_dim, 3, padding=1))
+
+ def forward(self, x, feat_cache=None, feat_idx=[0]):
+ if feat_cache is not None:
+ idx = feat_idx[0]
+ cache_x = x[:, :, -CACHE_T:, :, :].clone()
+ if cache_x.shape[2] < 2 and feat_cache[idx] is not None:
+ # cache last frame of last two chunk
+ cache_x = torch.cat([
+ feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(
+ cache_x.device), cache_x
+ ],
+ dim=2)
+ x = self.conv1(x, feat_cache[idx])
+ feat_cache[idx] = cache_x
+ feat_idx[0] += 1
+ else:
+ x = self.conv1(x)
+
+ ## downsamples
+ for layer in self.downsamples:
+ if feat_cache is not None:
+ x = layer(x, feat_cache, feat_idx)
+ else:
+ x = layer(x)
+
+ ## middle
+ for layer in self.middle:
+ if check_is_instance(layer, ResidualBlock) and feat_cache is not None:
+ x = layer(x, feat_cache, feat_idx)
+ else:
+ x = layer(x)
+
+ ## head
+ for layer in self.head:
+ if check_is_instance(layer, CausalConv3d) and feat_cache is not None:
+ idx = feat_idx[0]
+ cache_x = x[:, :, -CACHE_T:, :, :].clone()
+ if cache_x.shape[2] < 2 and feat_cache[idx] is not None:
+ # cache last frame of last two chunk
+ cache_x = torch.cat([
+ feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(
+ cache_x.device), cache_x
+ ],
+ dim=2)
+ x = layer(x, feat_cache[idx])
+ feat_cache[idx] = cache_x
+ feat_idx[0] += 1
+ else:
+ x = layer(x)
+ return x
+
+
+class Encoder3d_38(nn.Module):
+
+ def __init__(self,
+ dim=128,
+ z_dim=4,
+ dim_mult=[1, 2, 4, 4],
+ num_res_blocks=2,
+ attn_scales=[],
+ temperal_downsample=[False, True, True],
+ dropout=0.0):
+ super().__init__()
+ self.dim = dim
+ self.z_dim = z_dim
+ self.dim_mult = dim_mult
+ self.num_res_blocks = num_res_blocks
+ self.attn_scales = attn_scales
+ self.temperal_downsample = temperal_downsample
+
+ # dimensions
+ dims = [dim * u for u in [1] + dim_mult]
+ scale = 1.0
+
+ # init block
+ self.conv1 = CausalConv3d(12, dims[0], 3, padding=1)
+
+ # downsample blocks
+ downsamples = []
+ for i, (in_dim, out_dim) in enumerate(zip(dims[:-1], dims[1:])):
+ t_down_flag = (
+ temperal_downsample[i] if i < len(temperal_downsample) else False
+ )
+ downsamples.append(
+ Down_ResidualBlock(
+ in_dim=in_dim,
+ out_dim=out_dim,
+ dropout=dropout,
+ mult=num_res_blocks,
+ temperal_downsample=t_down_flag,
+ down_flag=i != len(dim_mult) - 1,
+ )
+ )
+ scale /= 2.0
+ self.downsamples = nn.Sequential(*downsamples)
+
+ # middle blocks
+ self.middle = nn.Sequential(
+ ResidualBlock(out_dim, out_dim, dropout),
+ AttentionBlock(out_dim),
+ ResidualBlock(out_dim, out_dim, dropout),
+ )
+
+ # # output blocks
+ self.head = nn.Sequential(
+ RMS_norm(out_dim, images=False),
+ nn.SiLU(),
+ CausalConv3d(out_dim, z_dim, 3, padding=1),
+ )
+
+
+ def forward(self, x, feat_cache=None, feat_idx=[0]):
+
+ if feat_cache is not None:
+ idx = feat_idx[0]
+ cache_x = x[:, :, -CACHE_T:, :, :].clone()
+ if cache_x.shape[2] < 2 and feat_cache[idx] is not None:
+ cache_x = torch.cat(
+ [
+ feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(cache_x.device),
+ cache_x,
+ ],
+ dim=2,
+ )
+ x = self.conv1(x, feat_cache[idx])
+ feat_cache[idx] = cache_x
+ feat_idx[0] += 1
+ else:
+ x = self.conv1(x)
+
+ ## downsamples
+ for layer in self.downsamples:
+ if feat_cache is not None:
+ x = layer(x, feat_cache, feat_idx)
+ else:
+ x = layer(x)
+
+ ## middle
+ for layer in self.middle:
+ if isinstance(layer, ResidualBlock) and feat_cache is not None:
+ x = layer(x, feat_cache, feat_idx)
+ else:
+ x = layer(x)
+
+ ## head
+ for layer in self.head:
+ if isinstance(layer, CausalConv3d) and feat_cache is not None:
+ idx = feat_idx[0]
+ cache_x = x[:, :, -CACHE_T:, :, :].clone()
+ if cache_x.shape[2] < 2 and feat_cache[idx] is not None:
+ cache_x = torch.cat(
+ [
+ feat_cache[idx][:, :, -1, :, :]
+ .unsqueeze(2)
+ .to(cache_x.device),
+ cache_x,
+ ],
+ dim=2,
+ )
+ x = layer(x, feat_cache[idx])
+ feat_cache[idx] = cache_x
+ feat_idx[0] += 1
+ else:
+ x = layer(x)
+
+ return x
+
+
+class Decoder3d(nn.Module):
+
+ def __init__(self,
+ dim=128,
+ z_dim=4,
+ dim_mult=[1, 2, 4, 4],
+ num_res_blocks=2,
+ attn_scales=[],
+ temperal_upsample=[False, True, True],
+ dropout=0.0):
+ super().__init__()
+ self.dim = dim
+ self.z_dim = z_dim
+ self.dim_mult = dim_mult
+ self.num_res_blocks = num_res_blocks
+ self.attn_scales = attn_scales
+ self.temperal_upsample = temperal_upsample
+
+ # dimensions
+ dims = [dim * u for u in [dim_mult[-1]] + dim_mult[::-1]]
+ scale = 1.0 / 2**(len(dim_mult) - 2)
+
+ # init block
+ self.conv1 = CausalConv3d(z_dim, dims[0], 3, padding=1)
+
+ # middle blocks
+ self.middle = nn.Sequential(ResidualBlock(dims[0], dims[0], dropout),
+ AttentionBlock(dims[0]),
+ ResidualBlock(dims[0], dims[0], dropout))
+
+ # upsample blocks
+ upsamples = []
+ for i, (in_dim, out_dim) in enumerate(zip(dims[:-1], dims[1:])):
+ # residual (+attention) blocks
+ if i == 1 or i == 2 or i == 3:
+ in_dim = in_dim // 2
+ for _ in range(num_res_blocks + 1):
+ upsamples.append(ResidualBlock(in_dim, out_dim, dropout))
+ if scale in attn_scales:
+ upsamples.append(AttentionBlock(out_dim))
+ in_dim = out_dim
+
+ # upsample block
+ if i != len(dim_mult) - 1:
+ mode = 'upsample3d' if temperal_upsample[i] else 'upsample2d'
+ upsamples.append(Resample(out_dim, mode=mode))
+ scale *= 2.0
+ self.upsamples = nn.Sequential(*upsamples)
+
+ # output blocks
+ self.head = nn.Sequential(RMS_norm(out_dim, images=False), nn.SiLU(),
+ CausalConv3d(out_dim, 3, 3, padding=1))
+
+ def forward(self, x, feat_cache=None, feat_idx=[0]):
+ ## conv1
+ if feat_cache is not None:
+ idx = feat_idx[0]
+ cache_x = x[:, :, -CACHE_T:, :, :].clone()
+ if cache_x.shape[2] < 2 and feat_cache[idx] is not None:
+ # cache last frame of last two chunk
+ cache_x = torch.cat([
+ feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(
+ cache_x.device), cache_x
+ ],
+ dim=2)
+ x = self.conv1(x, feat_cache[idx])
+ feat_cache[idx] = cache_x
+ feat_idx[0] += 1
+ else:
+ x = self.conv1(x)
+
+ ## middle
+ for layer in self.middle:
+ if check_is_instance(layer, ResidualBlock) and feat_cache is not None:
+ x = layer(x, feat_cache, feat_idx)
+ else:
+ x = layer(x)
+
+ ## upsamples
+ for layer in self.upsamples:
+ if feat_cache is not None:
+ x = layer(x, feat_cache, feat_idx)
+ else:
+ x = layer(x)
+
+ ## head
+ for layer in self.head:
+ if check_is_instance(layer, CausalConv3d) and feat_cache is not None:
+ idx = feat_idx[0]
+ cache_x = x[:, :, -CACHE_T:, :, :].clone()
+ if cache_x.shape[2] < 2 and feat_cache[idx] is not None:
+ # cache last frame of last two chunk
+ cache_x = torch.cat([
+ feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(
+ cache_x.device), cache_x
+ ],
+ dim=2)
+ x = layer(x, feat_cache[idx])
+ feat_cache[idx] = cache_x
+ feat_idx[0] += 1
+ else:
+ x = layer(x)
+ return x
+
+
+
+class Decoder3d_38(nn.Module):
+
+ def __init__(self,
+ dim=128,
+ z_dim=4,
+ dim_mult=[1, 2, 4, 4],
+ num_res_blocks=2,
+ attn_scales=[],
+ temperal_upsample=[False, True, True],
+ dropout=0.0):
+ super().__init__()
+ self.dim = dim
+ self.z_dim = z_dim
+ self.dim_mult = dim_mult
+ self.num_res_blocks = num_res_blocks
+ self.attn_scales = attn_scales
+ self.temperal_upsample = temperal_upsample
+
+ # dimensions
+ dims = [dim * u for u in [dim_mult[-1]] + dim_mult[::-1]]
+ scale = 1.0 / 2 ** (len(dim_mult) - 2)
+ # init block
+ self.conv1 = CausalConv3d(z_dim, dims[0], 3, padding=1)
+
+ # middle blocks
+ self.middle = nn.Sequential(ResidualBlock(dims[0], dims[0], dropout),
+ AttentionBlock(dims[0]),
+ ResidualBlock(dims[0], dims[0], dropout))
+
+ # upsample blocks
+ upsamples = []
+ for i, (in_dim, out_dim) in enumerate(zip(dims[:-1], dims[1:])):
+ t_up_flag = temperal_upsample[i] if i < len(temperal_upsample) else False
+ upsamples.append(
+ Up_ResidualBlock(in_dim=in_dim,
+ out_dim=out_dim,
+ dropout=dropout,
+ mult=num_res_blocks + 1,
+ temperal_upsample=t_up_flag,
+ up_flag=i != len(dim_mult) - 1))
+ self.upsamples = nn.Sequential(*upsamples)
+
+ # output blocks
+ self.head = nn.Sequential(RMS_norm(out_dim, images=False), nn.SiLU(),
+ CausalConv3d(out_dim, 12, 3, padding=1))
+
+
+ def forward(self, x, feat_cache=None, feat_idx=[0], first_chunk=False):
+ if feat_cache is not None:
+ idx = feat_idx[0]
+ cache_x = x[:, :, -CACHE_T:, :, :].clone()
+ if cache_x.shape[2] < 2 and feat_cache[idx] is not None:
+ cache_x = torch.cat(
+ [
+ feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(cache_x.device),
+ cache_x,
+ ],
+ dim=2,
+ )
+ x = self.conv1(x, feat_cache[idx])
+ feat_cache[idx] = cache_x
+ feat_idx[0] += 1
+ else:
+ x = self.conv1(x)
+
+ for layer in self.middle:
+ if check_is_instance(layer, ResidualBlock) and feat_cache is not None:
+ x = layer(x, feat_cache, feat_idx)
+ else:
+ x = layer(x)
+
+ ## upsamples
+ for layer in self.upsamples:
+ if feat_cache is not None:
+ x = layer(x, feat_cache, feat_idx, first_chunk)
+ else:
+ x = layer(x)
+
+ ## head
+ for layer in self.head:
+ if check_is_instance(layer, CausalConv3d) and feat_cache is not None:
+ idx = feat_idx[0]
+ cache_x = x[:, :, -CACHE_T:, :, :].clone()
+ if cache_x.shape[2] < 2 and feat_cache[idx] is not None:
+ cache_x = torch.cat(
+ [
+ feat_cache[idx][:, :, -1, :, :]
+ .unsqueeze(2)
+ .to(cache_x.device),
+ cache_x,
+ ],
+ dim=2,
+ )
+ x = layer(x, feat_cache[idx])
+ feat_cache[idx] = cache_x
+ feat_idx[0] += 1
+ else:
+ x = layer(x)
+ return x
+
+
+def count_conv3d(model):
+ count = 0
+ for m in model.modules():
+ if isinstance(m, CausalConv3d):
+ count += 1
+ return count
+
+
+class VideoVAE_(nn.Module):
+
+ def __init__(self,
+ dim=96,
+ z_dim=16,
+ dim_mult=[1, 2, 4, 4],
+ num_res_blocks=2,
+ attn_scales=[],
+ temperal_downsample=[False, True, True],
+ dropout=0.0):
+ super().__init__()
+ self.dim = dim
+ self.z_dim = z_dim
+ self.dim_mult = dim_mult
+ self.num_res_blocks = num_res_blocks
+ self.attn_scales = attn_scales
+ self.temperal_downsample = temperal_downsample
+ self.temperal_upsample = temperal_downsample[::-1]
+
+ # modules
+ self.encoder = Encoder3d(dim, z_dim * 2, dim_mult, num_res_blocks,
+ attn_scales, self.temperal_downsample, dropout)
+ self.conv1 = CausalConv3d(z_dim * 2, z_dim * 2, 1)
+ self.conv2 = CausalConv3d(z_dim, z_dim, 1)
+ self.decoder = Decoder3d(dim, z_dim, dim_mult, num_res_blocks,
+ attn_scales, self.temperal_upsample, dropout)
+
+ def forward(self, x):
+ mu, log_var = self.encode(x)
+ z = self.reparameterize(mu, log_var)
+ x_recon = self.decode(z)
+ return x_recon, mu, log_var
+
+ def encode(self, x, scale):
+ self.clear_cache()
+ ## cache
+ t = x.shape[2]
+ iter_ = 1 + (t - 1) // 4
+
+ for i in range(iter_):
+ self._enc_conv_idx = [0]
+ if i == 0:
+ out = self.encoder(x[:, :, :1, :, :],
+ feat_cache=self._enc_feat_map,
+ feat_idx=self._enc_conv_idx)
+ else:
+ out_ = self.encoder(x[:, :, 1 + 4 * (i - 1):1 + 4 * i, :, :],
+ feat_cache=self._enc_feat_map,
+ feat_idx=self._enc_conv_idx)
+ out = torch.cat([out, out_], 2)
+ mu, log_var = self.conv1(out).chunk(2, dim=1)
+ if isinstance(scale[0], torch.Tensor):
+ scale = [s.to(dtype=mu.dtype, device=mu.device) for s in scale]
+ mu = (mu - scale[0].view(1, self.z_dim, 1, 1, 1)) * scale[1].view(
+ 1, self.z_dim, 1, 1, 1)
+ else:
+ scale = scale.to(dtype=mu.dtype, device=mu.device)
+ mu = (mu - scale[0]) * scale[1]
+ return mu
+
+ def decode(self, z, scale):
+ self.clear_cache()
+ # z: [b,c,t,h,w]
+ if isinstance(scale[0], torch.Tensor):
+ scale = [s.to(dtype=z.dtype, device=z.device) for s in scale]
+ z = z / scale[1].view(1, self.z_dim, 1, 1, 1) + scale[0].view(
+ 1, self.z_dim, 1, 1, 1)
+ else:
+ scale = scale.to(dtype=z.dtype, device=z.device)
+ z = z / scale[1] + scale[0]
+ iter_ = z.shape[2]
+ x = self.conv2(z)
+ for i in range(iter_):
+ self._conv_idx = [0]
+ if i == 0:
+ out = self.decoder(x[:, :, i:i + 1, :, :],
+ feat_cache=self._feat_map,
+ feat_idx=self._conv_idx)
+ else:
+ out_ = self.decoder(x[:, :, i:i + 1, :, :],
+ feat_cache=self._feat_map,
+ feat_idx=self._conv_idx)
+ out = torch.cat([out, out_], 2) # may add tensor offload
+ return out
+
+ def reparameterize(self, mu, log_var):
+ std = torch.exp(0.5 * log_var)
+ eps = torch.randn_like(std)
+ return eps * std + mu
+
+ def sample(self, imgs, deterministic=False):
+ mu, log_var = self.encode(imgs)
+ if deterministic:
+ return mu
+ std = torch.exp(0.5 * log_var.clamp(-30.0, 20.0))
+ return mu + std * torch.randn_like(std)
+
+ def clear_cache(self):
+ self._conv_num = count_conv3d(self.decoder)
+ self._conv_idx = [0]
+ self._feat_map = [None] * self._conv_num
+ # cache encode
+ self._enc_conv_num = count_conv3d(self.encoder)
+ self._enc_conv_idx = [0]
+ self._enc_feat_map = [None] * self._enc_conv_num
+
+
+class WanVideoVAE(nn.Module):
+
+ def __init__(self, z_dim=16):
+ super().__init__()
+
+ mean = [
+ -0.7571, -0.7089, -0.9113, 0.1075, -0.1745, 0.9653, -0.1517, 1.5508,
+ 0.4134, -0.0715, 0.5517, -0.3632, -0.1922, -0.9497, 0.2503, -0.2921
+ ]
+ std = [
+ 2.8184, 1.4541, 2.3275, 2.6558, 1.2196, 1.7708, 2.6052, 2.0743,
+ 3.2687, 2.1526, 2.8652, 1.5579, 1.6382, 1.1253, 2.8251, 1.9160
+ ]
+ self.mean = torch.tensor(mean)
+ self.std = torch.tensor(std)
+ self.scale = [self.mean, 1.0 / self.std]
+
+ # init model
+ self.model = VideoVAE_(z_dim=z_dim).eval().requires_grad_(False)
+ self.upsampling_factor = 8
+ self.z_dim = z_dim
+
+
+ def build_1d_mask(self, length, left_bound, right_bound, border_width):
+ x = torch.ones((length,))
+ if not left_bound:
+ x[:border_width] = (torch.arange(border_width) + 1) / border_width
+ if not right_bound:
+ x[-border_width:] = torch.flip((torch.arange(border_width) + 1) / border_width, dims=(0,))
+ return x
+
+
+ def build_mask(self, data, is_bound, border_width):
+ _, _, _, H, W = data.shape
+ h = self.build_1d_mask(H, is_bound[0], is_bound[1], border_width[0])
+ w = self.build_1d_mask(W, is_bound[2], is_bound[3], border_width[1])
+
+ h = repeat(h, "H -> H W", H=H, W=W)
+ w = repeat(w, "W -> H W", H=H, W=W)
+
+ mask = torch.stack([h, w]).min(dim=0).values
+ mask = rearrange(mask, "H W -> 1 1 1 H W")
+ return mask
+
+
+ def tiled_decode(self, hidden_states, device, tile_size, tile_stride):
+ _, _, T, H, W = hidden_states.shape
+ size_h, size_w = tile_size
+ stride_h, stride_w = tile_stride
+
+ # Split tasks
+ tasks = []
+ for h in range(0, H, stride_h):
+ if (h-stride_h >= 0 and h-stride_h+size_h >= H): continue
+ for w in range(0, W, stride_w):
+ if (w-stride_w >= 0 and w-stride_w+size_w >= W): continue
+ h_, w_ = h + size_h, w + size_w
+ tasks.append((h, h_, w, w_))
+
+ data_device = "cpu"
+ computation_device = device
+
+ out_T = T * 4 - 3
+ weight = torch.zeros((1, 1, out_T, H * self.upsampling_factor, W * self.upsampling_factor), dtype=hidden_states.dtype, device=data_device)
+ values = torch.zeros((1, 3, out_T, H * self.upsampling_factor, W * self.upsampling_factor), dtype=hidden_states.dtype, device=data_device)
+
+ for h, h_, w, w_ in tqdm(tasks, desc="VAE decoding"):
+ hidden_states_batch = hidden_states[:, :, :, h:h_, w:w_].to(computation_device)
+ hidden_states_batch = self.model.decode(hidden_states_batch, self.scale).to(data_device)
+
+ mask = self.build_mask(
+ hidden_states_batch,
+ is_bound=(h==0, h_>=H, w==0, w_>=W),
+ border_width=((size_h - stride_h) * self.upsampling_factor, (size_w - stride_w) * self.upsampling_factor)
+ ).to(dtype=hidden_states.dtype, device=data_device)
+
+ target_h = h * self.upsampling_factor
+ target_w = w * self.upsampling_factor
+ values[
+ :,
+ :,
+ :,
+ target_h:target_h + hidden_states_batch.shape[3],
+ target_w:target_w + hidden_states_batch.shape[4],
+ ] += hidden_states_batch * mask
+ weight[
+ :,
+ :,
+ :,
+ target_h: target_h + hidden_states_batch.shape[3],
+ target_w: target_w + hidden_states_batch.shape[4],
+ ] += mask
+ values = values / weight
+ values = values.clamp_(-1, 1)
+ return values
+
+
+ def tiled_encode(self, video, device, tile_size, tile_stride):
+ _, _, T, H, W = video.shape
+ size_h, size_w = tile_size
+ stride_h, stride_w = tile_stride
+
+ # Split tasks
+ tasks = []
+ for h in range(0, H, stride_h):
+ if (h-stride_h >= 0 and h-stride_h+size_h >= H): continue
+ for w in range(0, W, stride_w):
+ if (w-stride_w >= 0 and w-stride_w+size_w >= W): continue
+ h_, w_ = h + size_h, w + size_w
+ tasks.append((h, h_, w, w_))
+
+ data_device = "cpu"
+ computation_device = device
+
+ out_T = (T + 3) // 4
+ weight = torch.zeros((1, 1, out_T, H // self.upsampling_factor, W // self.upsampling_factor), dtype=video.dtype, device=data_device)
+ values = torch.zeros((1, self.z_dim, out_T, H // self.upsampling_factor, W // self.upsampling_factor), dtype=video.dtype, device=data_device)
+
+ for h, h_, w, w_ in tqdm(tasks, desc="VAE encoding"):
+ hidden_states_batch = video[:, :, :, h:h_, w:w_].to(computation_device)
+ hidden_states_batch = self.model.encode(hidden_states_batch, self.scale).to(data_device)
+
+ mask = self.build_mask(
+ hidden_states_batch,
+ is_bound=(h==0, h_>=H, w==0, w_>=W),
+ border_width=((size_h - stride_h) // self.upsampling_factor, (size_w - stride_w) // self.upsampling_factor)
+ ).to(dtype=video.dtype, device=data_device)
+
+ target_h = h // self.upsampling_factor
+ target_w = w // self.upsampling_factor
+ values[
+ :,
+ :,
+ :,
+ target_h:target_h + hidden_states_batch.shape[3],
+ target_w:target_w + hidden_states_batch.shape[4],
+ ] += hidden_states_batch * mask
+ weight[
+ :,
+ :,
+ :,
+ target_h: target_h + hidden_states_batch.shape[3],
+ target_w: target_w + hidden_states_batch.shape[4],
+ ] += mask
+ values = values / weight
+ return values
+
+
+ def single_encode(self, video, device):
+ video = video.to(device)
+ x = self.model.encode(video, self.scale)
+ return x
+
+
+ def single_decode(self, hidden_state, device):
+ hidden_state = hidden_state.to(device)
+ video = self.model.decode(hidden_state, self.scale)
+ return video.clamp_(-1, 1)
+
+
+ def encode(self, videos, device, tiled=False, tile_size=(34, 34), tile_stride=(18, 16)):
+
+ videos = [video.to("cpu") for video in videos]
+ hidden_states = []
+ for video in videos:
+ video = video.unsqueeze(0)
+ if tiled:
+ tile_size = (tile_size[0] * self.upsampling_factor, tile_size[1] * self.upsampling_factor)
+ tile_stride = (tile_stride[0] * self.upsampling_factor, tile_stride[1] * self.upsampling_factor)
+ hidden_state = self.tiled_encode(video, device, tile_size, tile_stride)
+ else:
+ hidden_state = self.single_encode(video, device)
+ hidden_state = hidden_state.squeeze(0)
+ hidden_states.append(hidden_state)
+ hidden_states = torch.stack(hidden_states)
+ return hidden_states
+
+
+ def decode(self, hidden_states, device, tiled=False, tile_size=(34, 34), tile_stride=(18, 16)):
+ if tiled:
+ video = self.tiled_decode(hidden_states, device, tile_size, tile_stride)
+ else:
+ video = self.single_decode(hidden_states, device)
+ return video
+
+
+ @staticmethod
+ def state_dict_converter():
+ return WanVideoVAEStateDictConverter()
+
+
+class WanVideoVAEStateDictConverter:
+
+ def __init__(self):
+ pass
+
+ def from_civitai(self, state_dict):
+ state_dict_ = {}
+ if 'model_state' in state_dict:
+ state_dict = state_dict['model_state']
+ for name in state_dict:
+ state_dict_['model.' + name] = state_dict[name]
+ return state_dict_
+
+
+class VideoVAE38_(VideoVAE_):
+
+ def __init__(self,
+ dim=160,
+ z_dim=48,
+ dec_dim=256,
+ dim_mult=[1, 2, 4, 4],
+ num_res_blocks=2,
+ attn_scales=[],
+ temperal_downsample=[False, True, True],
+ dropout=0.0):
+ super(VideoVAE_, self).__init__()
+ self.dim = dim
+ self.z_dim = z_dim
+ self.dim_mult = dim_mult
+ self.num_res_blocks = num_res_blocks
+ self.attn_scales = attn_scales
+ self.temperal_downsample = temperal_downsample
+ self.temperal_upsample = temperal_downsample[::-1]
+
+ # modules
+ self.encoder = Encoder3d_38(dim, z_dim * 2, dim_mult, num_res_blocks,
+ attn_scales, self.temperal_downsample, dropout)
+ self.conv1 = CausalConv3d(z_dim * 2, z_dim * 2, 1)
+ self.conv2 = CausalConv3d(z_dim, z_dim, 1)
+ self.decoder = Decoder3d_38(dec_dim, z_dim, dim_mult, num_res_blocks,
+ attn_scales, self.temperal_upsample, dropout)
+
+
+ def encode(self, x, scale):
+ self.clear_cache()
+ x = patchify(x, patch_size=2)
+ t = x.shape[2]
+ iter_ = 1 + (t - 1) // 4
+ for i in range(iter_):
+ self._enc_conv_idx = [0]
+ if i == 0:
+ out = self.encoder(x[:, :, :1, :, :],
+ feat_cache=self._enc_feat_map,
+ feat_idx=self._enc_conv_idx)
+ else:
+ out_ = self.encoder(x[:, :, 1 + 4 * (i - 1):1 + 4 * i, :, :],
+ feat_cache=self._enc_feat_map,
+ feat_idx=self._enc_conv_idx)
+ out = torch.cat([out, out_], 2)
+ mu, log_var = self.conv1(out).chunk(2, dim=1)
+ if isinstance(scale[0], torch.Tensor):
+ scale = [s.to(dtype=mu.dtype, device=mu.device) for s in scale]
+ mu = (mu - scale[0].view(1, self.z_dim, 1, 1, 1)) * scale[1].view(
+ 1, self.z_dim, 1, 1, 1)
+ else:
+ scale = scale.to(dtype=mu.dtype, device=mu.device)
+ mu = (mu - scale[0]) * scale[1]
+ self.clear_cache()
+ return mu
+
+
+ def decode(self, z, scale):
+ self.clear_cache()
+ if isinstance(scale[0], torch.Tensor):
+ scale = [s.to(dtype=z.dtype, device=z.device) for s in scale]
+ z = z / scale[1].view(1, self.z_dim, 1, 1, 1) + scale[0].view(
+ 1, self.z_dim, 1, 1, 1)
+ else:
+ scale = scale.to(dtype=z.dtype, device=z.device)
+ z = z / scale[1] + scale[0]
+ iter_ = z.shape[2]
+ x = self.conv2(z)
+ for i in range(iter_):
+ self._conv_idx = [0]
+ if i == 0:
+ out = self.decoder(x[:, :, i:i + 1, :, :],
+ feat_cache=self._feat_map,
+ feat_idx=self._conv_idx,
+ first_chunk=True)
+ else:
+ out_ = self.decoder(x[:, :, i:i + 1, :, :],
+ feat_cache=self._feat_map,
+ feat_idx=self._conv_idx)
+ out = torch.cat([out, out_], 2)
+ out = unpatchify(out, patch_size=2)
+ self.clear_cache()
+ return out
+
+
+class WanVideoVAE38(WanVideoVAE):
+
+ def __init__(self, z_dim=48, dim=160):
+ super(WanVideoVAE, self).__init__()
+
+ mean = [
+ -0.2289, -0.0052, -0.1323, -0.2339, -0.2799, 0.0174, 0.1838, 0.1557,
+ -0.1382, 0.0542, 0.2813, 0.0891, 0.1570, -0.0098, 0.0375, -0.1825,
+ -0.2246, -0.1207, -0.0698, 0.5109, 0.2665, -0.2108, -0.2158, 0.2502,
+ -0.2055, -0.0322, 0.1109, 0.1567, -0.0729, 0.0899, -0.2799, -0.1230,
+ -0.0313, -0.1649, 0.0117, 0.0723, -0.2839, -0.2083, -0.0520, 0.3748,
+ 0.0152, 0.1957, 0.1433, -0.2944, 0.3573, -0.0548, -0.1681, -0.0667
+ ]
+ std = [
+ 0.4765, 1.0364, 0.4514, 1.1677, 0.5313, 0.4990, 0.4818, 0.5013,
+ 0.8158, 1.0344, 0.5894, 1.0901, 0.6885, 0.6165, 0.8454, 0.4978,
+ 0.5759, 0.3523, 0.7135, 0.6804, 0.5833, 1.4146, 0.8986, 0.5659,
+ 0.7069, 0.5338, 0.4889, 0.4917, 0.4069, 0.4999, 0.6866, 0.4093,
+ 0.5709, 0.6065, 0.6415, 0.4944, 0.5726, 1.2042, 0.5458, 1.6887,
+ 0.3971, 1.0600, 0.3943, 0.5537, 0.5444, 0.4089, 0.7468, 0.7744
+ ]
+ self.mean = torch.tensor(mean)
+ self.std = torch.tensor(std)
+ self.scale = [self.mean, 1.0 / self.std]
+
+ # init model
+ self.model = VideoVAE38_(z_dim=z_dim, dim=dim).eval().requires_grad_(False)
+ self.upsampling_factor = 16
+ self.z_dim = z_dim
diff --git a/dkt/pipelines/__init__.py b/dkt/pipelines/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/dkt/pipelines/wan_video_new.py b/dkt/pipelines/wan_video_new.py
new file mode 100644
index 0000000000000000000000000000000000000000..a30c5bfc51bf6a3558ec43bb3060202f68bbd366
--- /dev/null
+++ b/dkt/pipelines/wan_video_new.py
@@ -0,0 +1,1486 @@
+import torch, warnings, glob, os, types
+import numpy as np
+from PIL import Image
+from einops import repeat, reduce
+from typing import Optional, Union
+from dataclasses import dataclass
+from modelscope import snapshot_download as ms_snap_download
+from huggingface_hub import snapshot_download as hf_snap_download
+
+from einops import rearrange
+import numpy as np
+from PIL import Image
+from tqdm import tqdm
+from typing import Optional
+from typing_extensions import Literal
+
+from ..utils import BasePipeline, ModelConfig, PipelineUnit, PipelineUnitRunner
+from ..models import ModelManager, load_state_dict
+from ..models.wan_video_dit import WanModel, RMSNorm, sinusoidal_embedding_1d
+from ..models.wan_video_text_encoder import WanTextEncoder, T5RelativeEmbedding, T5LayerNorm
+from ..models.wan_video_vae import WanVideoVAE, RMS_norm, CausalConv3d, Upsample
+from ..models.wan_video_image_encoder import WanImageEncoder
+from ..models.wan_video_vace import VaceWanModel
+from ..models.wan_video_motion_controller import WanMotionControllerModel
+from ..schedulers.flow_match import FlowMatchScheduler
+from ..prompters import WanPrompter
+from ..vram_management import enable_vram_management, AutoWrappedModule, AutoWrappedLinear, WanAutoCastLayerNorm
+from ..lora import GeneralLoRALoader
+
+from loguru import logger
+
+
+
+class BasePipeline(torch.nn.Module):
+
+ def __init__(
+ self,
+ device="cuda", torch_dtype=torch.float16,
+ height_division_factor=64, width_division_factor=64,
+ time_division_factor=None, time_division_remainder=None,
+ ):
+ super().__init__()
+ # The device and torch_dtype is used for the storage of intermediate variables, not models.
+ self.device = device
+ self.torch_dtype = torch_dtype
+ # The following parameters are used for shape check.
+ self.height_division_factor = height_division_factor
+ self.width_division_factor = width_division_factor
+ self.time_division_factor = time_division_factor
+ self.time_division_remainder = time_division_remainder
+ self.vram_management_enabled = False
+
+
+ def to(self, *args, **kwargs):
+ device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs)
+ if device is not None:
+ self.device = device
+ if dtype is not None:
+ self.torch_dtype = dtype
+ super().to(*args, **kwargs)
+ return self
+
+
+ def check_resize_height_width(self, height, width, num_frames=None):
+ # Shape check
+ if height % self.height_division_factor != 0:
+ height = (height + self.height_division_factor - 1) // self.height_division_factor * self.height_division_factor
+ print(f"height % {self.height_division_factor} != 0. We round it up to {height}.")
+ if width % self.width_division_factor != 0:
+ width = (width + self.width_division_factor - 1) // self.width_division_factor * self.width_division_factor
+ print(f"width % {self.width_division_factor} != 0. We round it up to {width}.")
+ if num_frames is None:
+ return height, width
+ else:
+ if num_frames % self.time_division_factor != self.time_division_remainder:
+ num_frames = (num_frames + self.time_division_factor - 1) // self.time_division_factor * self.time_division_factor + self.time_division_remainder
+ print(f"num_frames % {self.time_division_factor} != {self.time_division_remainder}. We round it up to {num_frames}.")
+ return height, width, num_frames
+
+
+ def preprocess_image(self, image, torch_dtype=None, device=None, pattern="B C H W", min_value=-1, max_value=1):
+ # Transform a PIL.Image to torch.Tensor
+ image = torch.Tensor(np.array(image, dtype=np.float32))
+ image = image.to(dtype=torch_dtype or self.torch_dtype, device=device or self.device)
+ image = image * ((max_value - min_value) / 255) + min_value
+ image = repeat(image, f"H W C -> {pattern}", **({"B": 1} if "B" in pattern else {}))
+ return image
+
+
+ def preprocess_video(self, video, torch_dtype=None, device=None, pattern="B C T H W", min_value=-1, max_value=1):
+ # Transform a list of PIL.Image to torch.Tensor
+
+
+ if hasattr(video, 'length') and video.length is not None:
+ video = [self.preprocess_image(video[idx], torch_dtype=torch_dtype, device=device, min_value=min_value, max_value=max_value) for idx in range(video.length)]
+ else:
+ video = [self.preprocess_image(image, torch_dtype=torch_dtype, device=device, min_value=min_value, max_value=max_value) for image in video]
+ video = torch.stack(video, dim=pattern.index("T") // 2)
+ return video
+
+
+ def vae_output_to_image(self, vae_output, pattern="B C H W", min_value=-1, max_value=1):
+ # Transform a torch.Tensor to PIL.Image
+ if pattern != "H W C":
+ vae_output = reduce(vae_output, f"{pattern} -> H W C", reduction="mean")
+ image = ((vae_output - min_value) * (255 / (max_value - min_value))).clip(0, 255)
+ image = image.to(device="cpu", dtype=torch.uint8)
+ image = Image.fromarray(image.numpy())
+ return image
+
+
+ def vae_output_to_video(self, vae_output, pattern="B C T H W", min_value=-1, max_value=1):
+ # Transform a torch.Tensor to list of PIL.Image
+ if pattern != "T H W C":
+ vae_output = reduce(vae_output, f"{pattern} -> T H W C", reduction="mean")
+ video = [self.vae_output_to_image(image, pattern="H W C", min_value=min_value, max_value=max_value) for image in vae_output]
+ return video
+
+
+ def load_models_to_device(self, model_names=[]):
+ if self.vram_management_enabled:
+ # offload models
+ for name, model in self.named_children():
+ if name not in model_names:
+ if hasattr(model, "vram_management_enabled") and model.vram_management_enabled:
+ for module in model.modules():
+ if hasattr(module, "offload"):
+ module.offload()
+ else:
+ model.cpu()
+ torch.cuda.empty_cache()
+ # onload models
+ for name, model in self.named_children():
+ if name in model_names:
+ if hasattr(model, "vram_management_enabled") and model.vram_management_enabled:
+ for module in model.modules():
+ if hasattr(module, "onload"):
+ module.onload()
+ else:
+ model.to(self.device)
+
+
+ def generate_noise(self, shape, seed=None, rand_device="cpu", rand_torch_dtype=torch.float32, device=None, torch_dtype=None):
+ # Initialize Gaussian noise
+ generator = None if seed is None else torch.Generator(rand_device).manual_seed(seed)
+ noise = torch.randn(shape, generator=generator, device=rand_device, dtype=rand_torch_dtype)
+ noise = noise.to(dtype=torch_dtype or self.torch_dtype, device=device or self.device)
+ return noise
+
+
+ def enable_cpu_offload(self):
+ warnings.warn("`enable_cpu_offload` will be deprecated. Please use `enable_vram_management`.")
+ self.vram_management_enabled = True
+
+
+ def get_vram(self):
+ return torch.cuda.mem_get_info(self.device)[1] / (1024 ** 3)
+
+
+ def freeze_except(self, model_names):
+ for name, model in self.named_children():
+ if name in model_names:
+ model.train()
+ model.requires_grad_(True)
+ else:
+ model.eval()
+ model.requires_grad_(False)
+
+
+@dataclass
+class ModelConfig:
+ path: Union[str, list[str]] = None
+ model_id: str = None
+ origin_file_pattern: Union[str, list[str]] = None
+ download_resource: str = "ModelScope"
+ offload_device: Optional[Union[str, torch.device]] = None
+ offload_dtype: Optional[torch.dtype] = None
+
+ def download_if_necessary(self, local_model_path="./checkpoints", skip_download=False, use_usp=False):
+ if self.path is None:
+ # Check model_id and origin_file_pattern
+ if self.model_id is None:
+ raise ValueError(f"""No valid model files. Please use `ModelConfig(path="xxx")` or `ModelConfig(model_id="xxx/yyy", origin_file_pattern="zzz")`.""")
+
+ # Skip if not in rank 0
+ if use_usp:
+ import torch.distributed as dist
+ skip_download = dist.get_rank() != 0
+
+ # Check whether the origin path is a folder
+ if self.origin_file_pattern is None or self.origin_file_pattern == "":
+ self.origin_file_pattern = ""
+ allow_file_pattern = None
+ is_folder = True
+ elif isinstance(self.origin_file_pattern, str) and self.origin_file_pattern.endswith("/"):
+ allow_file_pattern = self.origin_file_pattern + "*"
+ is_folder = True
+ else:
+ allow_file_pattern = self.origin_file_pattern
+ is_folder = False
+
+ # Download
+ if not skip_download:
+
+ # downloaded_files = glob.glob(self.origin_file_pattern, root_dir=os.path.join(local_model_path, self.model_id))
+ #!========================================================================================================================
+ downloaded_files = glob.glob(os.path.join(local_model_path, self.model_id, self.origin_file_pattern))
+ #!========================================================================================================================
+
+ if downloaded_files is None or len(downloaded_files) == 0 or not os.path.exists(downloaded_files[0]) :
+ if 'Wan2' in self.model_id:
+ ms_snap_download(
+ self.model_id,
+ local_dir=os.path.join(local_model_path, self.model_id),
+ allow_file_pattern=allow_file_pattern,
+ ignore_file_pattern=downloaded_files,
+ )
+ else:
+ hf_snap_download(
+ repo_id=self.model_id,
+ local_dir=os.path.join(local_model_path, self.model_id),
+ allow_patterns=allow_file_pattern,
+ ignore_patterns=downloaded_files if downloaded_files else None
+ )
+
+ # Let rank 1, 2, ... wait for rank 0
+ if use_usp:
+ import torch.distributed as dist
+ dist.barrier(device_ids=[dist.get_rank()])
+
+ # Return downloaded files
+ if is_folder:
+ self.path = os.path.join(local_model_path, self.model_id, self.origin_file_pattern)
+ else:
+ self.path = glob.glob(os.path.join(local_model_path, self.model_id, self.origin_file_pattern))
+ if isinstance(self.path, list) and len(self.path) == 1:
+ self.path = self.path[0]
+
+
+
+
+class WanVideoPipeline(BasePipeline):
+
+ def __init__(self, device="cuda", torch_dtype=torch.bfloat16, tokenizer_path=None):
+ super().__init__(
+ device=device, torch_dtype=torch_dtype,
+ height_division_factor=16, width_division_factor=16, time_division_factor=4, time_division_remainder=1
+ )
+ self.scheduler = FlowMatchScheduler(shift=5, sigma_min=0.0, extra_one_step=True)
+
+ self.prompter = WanPrompter(tokenizer_path=tokenizer_path)
+ self.text_encoder: WanTextEncoder = None
+ self.image_encoder: WanImageEncoder = None
+ self.dit: WanModel = None
+ self.dit2: WanModel = None
+ self.vae: WanVideoVAE = None
+ self.motion_controller: WanMotionControllerModel = None
+ self.vace: VaceWanModel = None
+ self.in_iteration_models = ("dit", "motion_controller", "vace")
+ self.in_iteration_models_2 = ("dit2", "motion_controller", "vace")
+ self.unit_runner = PipelineUnitRunner()
+ self.units = [
+ WanVideoUnit_ShapeChecker(),
+ WanVideoUnit_NoiseInitializer(),
+ WanVideoUnit_InputVideoEmbedder(),
+ WanVideoUnit_PromptEmbedder(),
+ # WanVideoUnit_ImageEmbedderVAE(),
+ # WanVideoUnit_ImageEmbedderCLIP(),
+ # WanVideoUnit_ImageEmbedderFused(),
+ # WanVideoUnit_FunControl(),
+ WanVideoUnit_FunControl_Mask(),
+ # WanVideoUnit_FunReference(),
+ # WanVideoUnit_FunCameraControl(),
+ # WanVideoUnit_SpeedControl(),
+ # WanVideoUnit_VACE(),
+ # WanVideoUnit_UnifiedSequenceParallel(),
+ # WanVideoUnit_TeaCache(),
+ # WanVideoUnit_CfgMerger(),
+ ]
+ self.model_fn = model_fn_wan_video
+
+
+ def load_lora(self, module, path, alpha=1):
+ loader = GeneralLoRALoader(torch_dtype=self.torch_dtype, device=self.device)
+ lora = load_state_dict(path, torch_dtype=self.torch_dtype, device=self.device)
+ loader.load(module, lora, alpha=alpha)
+
+
+ def training_loss(self, **inputs):
+ max_timestep_boundary = int(inputs.get("max_timestep_boundary", 1) * self.scheduler.num_train_timesteps)
+ min_timestep_boundary = int(inputs.get("min_timestep_boundary", 0) * self.scheduler.num_train_timesteps)
+ timestep_id = torch.randint(min_timestep_boundary, max_timestep_boundary, (1,))
+ timestep = self.scheduler.timesteps[timestep_id].to(dtype=self.torch_dtype, device=self.device)
+ #* 单步去噪的时候,每次返回的都是纯噪声
+ #? 指的是input_latents 吧?
+ #* 本来就有inputs["latents"], 只不过是完全等于inputs["noise"], 这里做了更新然后覆盖
+ inputs["latents"] = self.scheduler.add_noise(inputs["input_latents"], inputs["noise"], timestep)
+ training_target = self.scheduler.training_target(inputs["input_latents"], inputs["noise"], timestep)
+
+ noise_pred = self.model_fn(**inputs, timestep=timestep)#* timestep === 1
+
+ loss = torch.nn.functional.mse_loss(noise_pred.float(), training_target.float())
+ loss = loss * self.scheduler.training_weight(timestep)
+ return loss
+
+
+ def enable_vram_management(self, num_persistent_param_in_dit=None, vram_limit=None, vram_buffer=0.5):
+ self.vram_management_enabled = True
+ if num_persistent_param_in_dit is not None:
+ vram_limit = None
+ else:
+ if vram_limit is None:
+ vram_limit = self.get_vram()
+ vram_limit = vram_limit - vram_buffer
+ if self.text_encoder is not None:
+ dtype = next(iter(self.text_encoder.parameters())).dtype
+ enable_vram_management(
+ self.text_encoder,
+ module_map = {
+ torch.nn.Linear: AutoWrappedLinear,
+ torch.nn.Embedding: AutoWrappedModule,
+ T5RelativeEmbedding: AutoWrappedModule,
+ T5LayerNorm: AutoWrappedModule,
+ },
+ module_config = dict(
+ offload_dtype=dtype,
+ offload_device="cpu",
+ onload_dtype=dtype,
+ onload_device="cpu",
+ computation_dtype=self.torch_dtype,
+ computation_device=self.device,
+ ),
+ vram_limit=vram_limit,
+ )
+ if self.dit is not None:
+ dtype = next(iter(self.dit.parameters())).dtype
+ device = "cpu" if vram_limit is not None else self.device
+ enable_vram_management(
+ self.dit,
+ module_map = {
+ torch.nn.Linear: AutoWrappedLinear,
+ torch.nn.Conv3d: AutoWrappedModule,
+ torch.nn.LayerNorm: WanAutoCastLayerNorm,
+ RMSNorm: AutoWrappedModule,
+ torch.nn.Conv2d: AutoWrappedModule,
+ },
+ module_config = dict(
+ offload_dtype=dtype,
+ offload_device="cpu",
+ onload_dtype=dtype,
+ onload_device=device,
+ computation_dtype=self.torch_dtype,
+ computation_device=self.device,
+ ),
+ max_num_param=num_persistent_param_in_dit,
+ overflow_module_config = dict(
+ offload_dtype=dtype,
+ offload_device="cpu",
+ onload_dtype=dtype,
+ onload_device="cpu",
+ computation_dtype=self.torch_dtype,
+ computation_device=self.device,
+ ),
+ vram_limit=vram_limit,
+ )
+ if self.dit2 is not None:
+ dtype = next(iter(self.dit2.parameters())).dtype
+ device = "cpu" if vram_limit is not None else self.device
+ enable_vram_management(
+ self.dit2,
+ module_map = {
+ torch.nn.Linear: AutoWrappedLinear,
+ torch.nn.Conv3d: AutoWrappedModule,
+ torch.nn.LayerNorm: WanAutoCastLayerNorm,
+ RMSNorm: AutoWrappedModule,
+ torch.nn.Conv2d: AutoWrappedModule,
+ },
+ module_config = dict(
+ offload_dtype=dtype,
+ offload_device="cpu",
+ onload_dtype=dtype,
+ onload_device=device,
+ computation_dtype=self.torch_dtype,
+ computation_device=self.device,
+ ),
+ max_num_param=num_persistent_param_in_dit,
+ overflow_module_config = dict(
+ offload_dtype=dtype,
+ offload_device="cpu",
+ onload_dtype=dtype,
+ onload_device="cpu",
+ computation_dtype=self.torch_dtype,
+ computation_device=self.device,
+ ),
+ vram_limit=vram_limit,
+ )
+ if self.vae is not None:
+ dtype = next(iter(self.vae.parameters())).dtype
+ enable_vram_management(
+ self.vae,
+ module_map = {
+ torch.nn.Linear: AutoWrappedLinear,
+ torch.nn.Conv2d: AutoWrappedModule,
+ RMS_norm: AutoWrappedModule,
+ CausalConv3d: AutoWrappedModule,
+ Upsample: AutoWrappedModule,
+ torch.nn.SiLU: AutoWrappedModule,
+ torch.nn.Dropout: AutoWrappedModule,
+ },
+ module_config = dict(
+ offload_dtype=dtype,
+ offload_device="cpu",
+ onload_dtype=dtype,
+ onload_device=self.device,
+ computation_dtype=self.torch_dtype,
+ computation_device=self.device,
+ ),
+ )
+ if self.image_encoder is not None:
+ dtype = next(iter(self.image_encoder.parameters())).dtype
+ enable_vram_management(
+ self.image_encoder,
+ module_map = {
+ torch.nn.Linear: AutoWrappedLinear,
+ torch.nn.Conv2d: AutoWrappedModule,
+ torch.nn.LayerNorm: AutoWrappedModule,
+ },
+ module_config = dict(
+ offload_dtype=dtype,
+ offload_device="cpu",
+ onload_dtype=dtype,
+ onload_device="cpu",
+ computation_dtype=dtype,
+ computation_device=self.device,
+ ),
+ )
+ if self.motion_controller is not None:
+ dtype = next(iter(self.motion_controller.parameters())).dtype
+ enable_vram_management(
+ self.motion_controller,
+ module_map = {
+ torch.nn.Linear: AutoWrappedLinear,
+ },
+ module_config = dict(
+ offload_dtype=dtype,
+ offload_device="cpu",
+ onload_dtype=dtype,
+ onload_device="cpu",
+ computation_dtype=dtype,
+ computation_device=self.device,
+ ),
+ )
+ if self.vace is not None:
+ device = "cpu" if vram_limit is not None else self.device
+ enable_vram_management(
+ self.vace,
+ module_map = {
+ torch.nn.Linear: AutoWrappedLinear,
+ torch.nn.Conv3d: AutoWrappedModule,
+ torch.nn.LayerNorm: AutoWrappedModule,
+ RMSNorm: AutoWrappedModule,
+ },
+ module_config = dict(
+ offload_dtype=dtype,
+ offload_device="cpu",
+ onload_dtype=dtype,
+ onload_device=device,
+ computation_dtype=self.torch_dtype,
+ computation_device=self.device,
+ ),
+ vram_limit=vram_limit,
+ )
+
+
+ def initialize_usp(self):
+ import torch.distributed as dist
+ from xfuser.core.distributed import initialize_model_parallel, init_distributed_environment
+ dist.init_process_group(backend="nccl", init_method="env://")
+ init_distributed_environment(rank=dist.get_rank(), world_size=dist.get_world_size())
+ initialize_model_parallel(
+ sequence_parallel_degree=dist.get_world_size(),
+ ring_degree=1,
+ ulysses_degree=dist.get_world_size(),
+ )
+ torch.cuda.set_device(dist.get_rank())
+
+
+ def enable_usp(self):
+ from xfuser.core.distributed import get_sequence_parallel_world_size
+ from ..distributed.xdit_context_parallel import usp_attn_forward, usp_dit_forward
+
+ for block in self.dit.blocks:
+ block.self_attn.forward = types.MethodType(usp_attn_forward, block.self_attn)
+ self.dit.forward = types.MethodType(usp_dit_forward, self.dit)
+ if self.dit2 is not None:
+ for block in self.dit2.blocks:
+ block.self_attn.forward = types.MethodType(usp_attn_forward, block.self_attn)
+ self.dit2.forward = types.MethodType(usp_dit_forward, self.dit2)
+ self.sp_size = get_sequence_parallel_world_size()
+ self.use_unified_sequence_parallel = True
+
+
+ @staticmethod
+ def from_pretrained(
+ torch_dtype: torch.dtype = torch.bfloat16,
+ device: Union[str, torch.device] = "cuda",
+ model_configs: list[ModelConfig] = [],
+ tokenizer_config: ModelConfig = ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="google/*"),
+ local_model_path: str = "./checkpoints",
+ skip_download: bool = False,
+ redirect_common_files: bool = True,
+ use_usp=False,
+ training_strategy='origin',
+ ):
+
+ # Redirect model path
+
+ if redirect_common_files:
+
+ redirect_dict = {
+ "models_t5_umt5-xxl-enc-bf16.pth": "Wan-AI/Wan2.1-T2V-1.3B",
+ "Wan2.1_VAE.pth": "Wan-AI/Wan2.1-T2V-1.3B",
+ "models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth": "Wan-AI/Wan2.1-I2V-14B-480P",
+ }
+ for model_config in model_configs:
+ if model_config.origin_file_pattern is None or model_config.model_id is None:
+ continue
+ if model_config.origin_file_pattern in redirect_dict and model_config.model_id != redirect_dict[model_config.origin_file_pattern]:
+ print(f"To avoid repeatedly downloading model files, ({model_config.model_id}, {model_config.origin_file_pattern}) is redirected to ({redirect_dict[model_config.origin_file_pattern]}, {model_config.origin_file_pattern}). You can use `redirect_common_files=False` to disable file redirection.")
+ model_config.model_id = redirect_dict[model_config.origin_file_pattern]
+
+ # Initialize pipeline
+
+ if training_strategy == 'origin':
+ pipe = WanVideoPipeline(device=device, torch_dtype=torch_dtype)
+ logger.warning("Using origin generative model training")
+ else:
+ raise ValueError(f"Invalid training strategy: {training_strategy}")
+
+ if use_usp: pipe.initialize_usp()
+
+ # Download and load models
+ model_manager = ModelManager()
+
+ for model_config in model_configs:
+ model_config.download_if_necessary(use_usp=use_usp)
+ model_manager.load_model(
+ model_config.path,
+ device=model_config.offload_device or device,
+ torch_dtype=model_config.offload_dtype or torch_dtype
+ )
+
+ # Load models
+ pipe.text_encoder = model_manager.fetch_model("wan_video_text_encoder")
+ dit = model_manager.fetch_model("wan_video_dit", index=2)
+ if isinstance(dit, list):
+ pipe.dit, pipe.dit2 = dit
+ else:
+ pipe.dit = dit
+ pipe.vae = model_manager.fetch_model("wan_video_vae")
+ pipe.image_encoder = model_manager.fetch_model("wan_video_image_encoder")
+ pipe.motion_controller = model_manager.fetch_model("wan_video_motion_controller")
+ pipe.vace = model_manager.fetch_model("wan_video_vace")
+
+ # Size division factor
+ if pipe.vae is not None:
+ pipe.height_division_factor = pipe.vae.upsampling_factor * 2
+ pipe.width_division_factor = pipe.vae.upsampling_factor * 2
+
+ # Initialize tokenizer
+ tokenizer_config.download_if_necessary(use_usp=use_usp)
+ pipe.prompter.fetch_models(pipe.text_encoder)
+ pipe.prompter.fetch_tokenizer(tokenizer_config.path)
+
+ # Unified Sequence Parallel
+ if use_usp: pipe.enable_usp()
+ return pipe
+
+
+
+
+ @torch.no_grad()
+ def __call__(
+ self,
+ # Prompt
+ prompt: str,
+ negative_prompt: Optional[str] = "",
+ # Image-to-video
+ input_image: Optional[Image.Image] = None,
+ # First-last-frame-to-video
+ end_image: Optional[Image.Image] = None,
+ # Video-to-video
+ input_video: Optional[list[Image.Image]] = None,
+ denoising_strength: Optional[float] = 1.0,
+ # ControlNet
+ control_video: Optional[list[Image.Image]] = None,
+ reference_image: Optional[Image.Image] = None,
+ # Camera control
+ camera_control_direction: Optional[Literal["Left", "Right", "Up", "Down", "LeftUp", "LeftDown", "RightUp", "RightDown"]] = None,
+ camera_control_speed: Optional[float] = 1/54,
+ camera_control_origin: Optional[tuple] = (0, 0.532139961, 0.946026558, 0.5, 0.5, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0),
+ # VACE
+ vace_video: Optional[list[Image.Image]] = None,
+ vace_video_mask: Optional[Image.Image] = None,
+ vace_reference_image: Optional[Image.Image] = None,
+ vace_scale: Optional[float] = 1.0,
+ # Randomness
+ seed: Optional[int] = None,
+ rand_device: Optional[str] = "cpu",
+ # Shape
+ height: Optional[int] = 480,
+ width: Optional[int] = 832,
+ num_frames=81,
+ # Classifier-free guidance
+ cfg_scale: Optional[float] = 5.0,
+ cfg_merge: Optional[bool] = False,
+ # Boundary
+ switch_DiT_boundary: Optional[float] = 0.875,
+ # Scheduler
+ num_inference_steps: Optional[int] = 50,
+ sigma_shift: Optional[float] = 5.0,
+ # Speed control
+ motion_bucket_id: Optional[int] = None,
+ # VAE tiling
+ tiled: Optional[bool] = True,
+ tile_size: Optional[tuple[int, int]] = (30, 52),
+ tile_stride: Optional[tuple[int, int]] = (15, 26),
+ # Sliding window
+ sliding_window_size: Optional[int] = None,
+ sliding_window_stride: Optional[int] = None,
+ # Teacache
+ tea_cache_l1_thresh: Optional[float] = None,
+ tea_cache_model_id: Optional[str] = "",
+ # progress_bar
+ progress_bar_cmd=tqdm,
+ mask: Optional[Image.Image] = None,
+ ):
+
+
+
+ # Scheduler
+ self.scheduler.set_timesteps(num_inference_steps, denoising_strength=denoising_strength, shift=sigma_shift)
+
+ # Inputs
+ inputs_posi = {
+ "prompt": prompt,
+ "tea_cache_l1_thresh": tea_cache_l1_thresh, "tea_cache_model_id": tea_cache_model_id, "num_inference_steps": num_inference_steps,
+ }
+ inputs_nega = {
+ "negative_prompt": negative_prompt,
+ "tea_cache_l1_thresh": tea_cache_l1_thresh, "tea_cache_model_id": tea_cache_model_id, "num_inference_steps": num_inference_steps,
+ }
+ inputs_shared = {
+ "input_image": input_image,
+ "end_image": end_image,
+ "input_video": input_video, "denoising_strength": denoising_strength,
+ "control_video": control_video, "reference_image": reference_image,
+ "camera_control_direction": camera_control_direction, "camera_control_speed": camera_control_speed, "camera_control_origin": camera_control_origin,
+ "vace_video": vace_video, "vace_video_mask": vace_video_mask, "vace_reference_image": vace_reference_image, "vace_scale": vace_scale,
+ "seed": seed, "rand_device": rand_device,
+ "height": height, "width": width, "num_frames": num_frames,
+ "cfg_scale": cfg_scale, "cfg_merge": cfg_merge,
+ "sigma_shift": sigma_shift,
+ "motion_bucket_id": motion_bucket_id,
+ "tiled": tiled, "tile_size": tile_size, "tile_stride": tile_stride,
+ "sliding_window_size": sliding_window_size, "sliding_window_stride": sliding_window_stride,
+ "mask":mask,
+ }
+
+ for unit in self.units:
+ inputs_shared, inputs_posi, inputs_nega = self.unit_runner(unit, self, inputs_shared, inputs_posi, inputs_nega)
+
+ # Denoise
+ self.load_models_to_device(self.in_iteration_models)
+ models = {name: getattr(self, name) for name in self.in_iteration_models}
+ for progress_id, timestep in enumerate(progress_bar_cmd(self.scheduler.timesteps)):
+ # Switch DiT if necessary
+ if timestep.item() < switch_DiT_boundary * self.scheduler.num_train_timesteps and self.dit2 is not None and not models["dit"] is self.dit2:
+ self.load_models_to_device(self.in_iteration_models_2)
+ models["dit"] = self.dit2
+
+ # Timestep
+ timestep = timestep.unsqueeze(0).to(dtype=self.torch_dtype, device=self.device)
+
+ # Inference
+ noise_pred_posi = self.model_fn(**models, **inputs_shared, **inputs_posi, timestep=timestep)
+ if cfg_scale != 1.0:
+ if cfg_merge:
+ noise_pred_posi, noise_pred_nega = noise_pred_posi.chunk(2, dim=0)
+ else:
+ noise_pred_nega = self.model_fn(**models, **inputs_shared, **inputs_nega, timestep=timestep)
+ noise_pred = noise_pred_nega + cfg_scale * (noise_pred_posi - noise_pred_nega)
+ else:
+ noise_pred = noise_pred_posi
+
+ # Scheduler
+ inputs_shared["latents"] = self.scheduler.step(noise_pred, self.scheduler.timesteps[progress_id], inputs_shared["latents"])
+ if "first_frame_latents" in inputs_shared:
+ inputs_shared["latents"][:, :, 0:1] = inputs_shared["first_frame_latents"]
+
+ # VACE (TODO: remove it)
+ if vace_reference_image is not None:
+ inputs_shared["latents"] = inputs_shared["latents"][:, :, 1:]
+
+ # Decode
+ self.load_models_to_device(['vae'])
+ vae_outs = self.vae.decode(inputs_shared["latents"], device=self.device, tiled=tiled, tile_size=tile_size, tile_stride=tile_stride)
+ # from einops import reduce
+ # video = reduce(vae_outs, 'b c t h w -> b c t', 'mean')
+
+ video = self.vae_output_to_video(vae_outs)
+ self.load_models_to_device([])
+
+ return video,vae_outs
+
+
+
+
+
+
+
+
+
+class WanVideoUnit_ShapeChecker(PipelineUnit):
+ def __init__(self):
+ super().__init__(input_params=("height", "width", "num_frames"))
+
+ def process(self, pipe: WanVideoPipeline, height, width, num_frames):
+ height, width, num_frames = pipe.check_resize_height_width(height, width, num_frames)
+ return {"height": height, "width": width, "num_frames": num_frames}
+
+
+
+class WanVideoUnit_NoiseInitializer(PipelineUnit):
+ def __init__(self):
+ super().__init__(input_params=("height", "width", "num_frames", "seed", "rand_device", "vace_reference_image"))
+
+ def process(self, pipe: WanVideoPipeline, height, width, num_frames, seed, rand_device, vace_reference_image):
+ length = (num_frames - 1) // 4 + 1
+ if vace_reference_image is not None:
+ length += 1
+ shape = (1, pipe.vae.model.z_dim, length, height // pipe.vae.upsampling_factor, width // pipe.vae.upsampling_factor)
+ noise = pipe.generate_noise(shape, seed=seed, rand_device=rand_device)
+ if vace_reference_image is not None:
+ noise = torch.concat((noise[:, :, -1:], noise[:, :, :-1]), dim=2)
+ return {"noise": noise}
+
+
+
+class WanVideoUnit_InputVideoEmbedder(PipelineUnit):
+ def __init__(self):
+ super().__init__(
+ input_params=("input_video", "noise", "tiled", "tile_size", "tile_stride", "vace_reference_image"),
+ onload_model_names=("vae",)
+ )
+
+ def process(self, pipe: WanVideoPipeline, input_video, noise, tiled, tile_size, tile_stride, vace_reference_image):
+ if input_video is None:
+ return {"latents": noise}
+
+ pipe.load_models_to_device(["vae"])#* input_video is the GT
+ input_video = pipe.preprocess_video(input_video) #* [B,3,F,W,H]
+ #* [B,3,(F/4) + 1 ,W/8,H/8]
+ input_latents = pipe.vae.encode(input_video, device=pipe.device, tiled=tiled, tile_size=tile_size, tile_stride=tile_stride).to(dtype=pipe.torch_dtype, device=pipe.device)
+ if vace_reference_image is not None:
+ vace_reference_image = pipe.preprocess_video([vace_reference_image])
+ vace_reference_latents = pipe.vae.encode(vace_reference_image, device=pipe.device).to(dtype=pipe.torch_dtype, device=pipe.device)
+ input_latents = torch.concat([vace_reference_latents, input_latents], dim=2)
+ #? during training, the input_latents have nothing to do with the noise,
+ #? but during inference, the input_latents is used to generate the noise
+ if pipe.scheduler.training:
+ return {"latents": noise, "input_latents": input_latents}
+ else:
+ latents = pipe.scheduler.add_noise(input_latents, noise, timestep=pipe.scheduler.timesteps[0])
+ return {"latents": latents}
+
+
+
+class WanVideoUnit_PromptEmbedder(PipelineUnit):
+ def __init__(self):
+ super().__init__(
+ seperate_cfg=True,
+ input_params_posi={"prompt": "prompt", "positive": "positive"},
+ input_params_nega={"prompt": "negative_prompt", "positive": "positive"},
+ onload_model_names=("text_encoder",)
+ )
+
+ def process(self, pipe: WanVideoPipeline, prompt, positive) -> dict:
+ pipe.load_models_to_device(self.onload_model_names)
+ prompt_emb = pipe.prompter.encode_prompt(prompt, positive=positive, device=pipe.device)
+ return {"context": prompt_emb}
+
+
+
+class WanVideoUnit_ImageEmbedder(PipelineUnit):
+ """
+ Deprecated
+ """
+ def __init__(self):
+ super().__init__(
+ input_params=("input_image", "end_image", "num_frames", "height", "width", "tiled", "tile_size", "tile_stride"),
+ onload_model_names=("image_encoder", "vae")
+ )
+
+ def process(self, pipe: WanVideoPipeline, input_image, end_image, num_frames, height, width, tiled, tile_size, tile_stride):
+ if input_image is None or pipe.image_encoder is None:
+ return {}
+ pipe.load_models_to_device(self.onload_model_names)
+ image = pipe.preprocess_image(input_image.resize((width, height))).to(pipe.device)
+ clip_context = pipe.image_encoder.encode_image([image])
+ msk = torch.ones(1, num_frames, height//8, width//8, device=pipe.device) #* indicate which image is reference image
+ msk[:, 1:] = 0
+ if end_image is not None:
+ end_image = pipe.preprocess_image(end_image.resize((width, height))).to(pipe.device)
+ vae_input = torch.concat([image.transpose(0,1), torch.zeros(3, num_frames-2, height, width).to(image.device), end_image.transpose(0,1)],dim=1)
+ if pipe.dit.has_image_pos_emb:
+ clip_context = torch.concat([clip_context, pipe.image_encoder.encode_image([end_image])], dim=1)
+ msk[:, -1:] = 1
+ else:
+ vae_input = torch.concat([image.transpose(0, 1), torch.zeros(3, num_frames-1, height, width).to(image.device)], dim=1)
+
+ msk = torch.concat([torch.repeat_interleave(msk[:, 0:1], repeats=4, dim=1), msk[:, 1:]], dim=1)
+ msk = msk.view(1, msk.shape[1] // 4, 4, height//8, width//8)
+ msk = msk.transpose(1, 2)[0]
+
+ y = pipe.vae.encode([vae_input.to(dtype=pipe.torch_dtype, device=pipe.device)], device=pipe.device, tiled=tiled, tile_size=tile_size, tile_stride=tile_stride)[0]
+ y = y.to(dtype=pipe.torch_dtype, device=pipe.device)
+ y = torch.concat([msk, y])
+ y = y.unsqueeze(0)
+ clip_context = clip_context.to(dtype=pipe.torch_dtype, device=pipe.device)
+ y = y.to(dtype=pipe.torch_dtype, device=pipe.device)
+ return {"clip_feature": clip_context, "y": y}
+
+
+
+class WanVideoUnit_ImageEmbedderCLIP(PipelineUnit):
+ def __init__(self):
+ super().__init__(
+ input_params=("input_image", "end_image", "height", "width"),
+ onload_model_names=("image_encoder",)
+ )
+
+ def process(self, pipe: WanVideoPipeline, input_image, end_image, height, width):
+ if input_image is None or pipe.image_encoder is None or not pipe.dit.require_clip_embedding:
+ return {}
+ pipe.load_models_to_device(self.onload_model_names)
+ image = pipe.preprocess_image(input_image.resize((width, height))).to(pipe.device)
+ clip_context = pipe.image_encoder.encode_image([image])
+ if end_image is not None:
+ end_image = pipe.preprocess_image(end_image.resize((width, height))).to(pipe.device)
+ if pipe.dit.has_image_pos_emb:
+ clip_context = torch.concat([clip_context, pipe.image_encoder.encode_image([end_image])], dim=1)
+ clip_context = clip_context.to(dtype=pipe.torch_dtype, device=pipe.device)
+ return {"clip_feature": clip_context}
+
+
+
+class WanVideoUnit_ImageEmbedderVAE(PipelineUnit):
+ def __init__(self):
+ super().__init__(
+ input_params=("input_image", "end_image", "num_frames", "height", "width", "tiled", "tile_size", "tile_stride"),
+ onload_model_names=("vae",)
+ )
+
+ def process(self, pipe: WanVideoPipeline, input_image, end_image, num_frames, height, width, tiled, tile_size, tile_stride):
+ if input_image is None or not pipe.dit.require_vae_embedding:
+ return {}
+ pipe.load_models_to_device(self.onload_model_names)
+ image = pipe.preprocess_image(input_image.resize((width, height))).to(pipe.device)
+ msk = torch.ones(1, num_frames, height//8, width//8, device=pipe.device)
+ msk[:, 1:] = 0
+ if end_image is not None:
+ end_image = pipe.preprocess_image(end_image.resize((width, height))).to(pipe.device)
+ vae_input = torch.concat([image.transpose(0,1), torch.zeros(3, num_frames-2, height, width).to(image.device), end_image.transpose(0,1)],dim=1)
+ msk[:, -1:] = 1
+ else:
+ vae_input = torch.concat([image.transpose(0, 1), torch.zeros(3, num_frames-1, height, width).to(image.device)], dim=1)
+
+ msk = torch.concat([torch.repeat_interleave(msk[:, 0:1], repeats=4, dim=1), msk[:, 1:]], dim=1)
+ msk = msk.view(1, msk.shape[1] // 4, 4, height//8, width//8)
+ msk = msk.transpose(1, 2)[0]
+
+ y = pipe.vae.encode([vae_input.to(dtype=pipe.torch_dtype, device=pipe.device)], device=pipe.device, tiled=tiled, tile_size=tile_size, tile_stride=tile_stride)[0]
+ y = y.to(dtype=pipe.torch_dtype, device=pipe.device)
+ y = torch.concat([msk, y])
+ y = y.unsqueeze(0)
+ y = y.to(dtype=pipe.torch_dtype, device=pipe.device)
+ return {"y": y}
+
+
+
+class WanVideoUnit_ImageEmbedderFused(PipelineUnit):
+ """
+ Encode input image to latents using VAE. This unit is for Wan-AI/Wan2.2-TI2V-5B.
+ """
+ def __init__(self):
+ super().__init__(
+ input_params=("input_image", "latents", "height", "width", "tiled", "tile_size", "tile_stride"),
+ onload_model_names=("vae",)
+ )
+
+ def process(self, pipe: WanVideoPipeline, input_image, latents, height, width, tiled, tile_size, tile_stride):
+ if input_image is None or not pipe.dit.fuse_vae_embedding_in_latents:
+ return {}
+ pipe.load_models_to_device(self.onload_model_names)
+ image = pipe.preprocess_image(input_image.resize((width, height))).transpose(0, 1)
+ z = pipe.vae.encode([image], device=pipe.device, tiled=tiled, tile_size=tile_size, tile_stride=tile_stride)
+ latents[:, :, 0: 1] = z
+ return {"latents": latents, "fuse_vae_embedding_in_latents": True, "first_frame_latents": z}
+
+
+
+class WanVideoUnit_FunControl(PipelineUnit):
+ def __init__(self):
+ super().__init__(
+ input_params=("control_video", "num_frames", "height", "width", "tiled", "tile_size", "tile_stride", "clip_feature", "y"),
+ onload_model_names=("vae",)
+ )
+
+ def process(self, pipe: WanVideoPipeline, control_video, num_frames, height, width, tiled, tile_size, tile_stride, clip_feature, y):
+
+ if control_video is None:
+ return {}
+ pipe.load_models_to_device(self.onload_model_names)
+ #* transfer to torch.tensor from PIL.Image
+ #* result size: [1, 3, F, H, W]
+ control_video = pipe.preprocess_video(control_video)
+
+ control_latents = pipe.vae.encode(control_video, device=pipe.device, tiled=tiled, tile_size=tile_size, tile_stride=tile_stride).to(dtype=pipe.torch_dtype, device=pipe.device)
+ #* size of control_latents: [1, 3, (F/4) + 1 , H/8, W/8]
+ control_latents = control_latents.to(dtype=pipe.torch_dtype, device=pipe.device)
+
+ if clip_feature is None or y is None:
+ #* this branch is used during training
+ clip_feature = torch.zeros((1, 257, 1280), dtype=pipe.torch_dtype, device=pipe.device)
+
+ # y = torch.zeros((1, 16, (num_frames - 1) // 4 + 1, height//8, width//8), dtype=pipe.torch_dtype, device=pipe.device)
+
+ #* [1, 16, (F/4) + 1 , H/8, W/8]
+ y = torch.zeros((1, 16, control_latents.shape[-3], height//8, width//8), dtype=pipe.torch_dtype, device=pipe.device)
+ else:
+ y = y[:, -16:]
+ #* control_latents: [1, 16, 21, 60, 80]; y: [1, 16, 21, 60, 80])
+
+ #* [1, 32, (F/4) + 1 , H/8, W/8], 前16个通道是control_latents, 后16个通道是y(或者说0 vector)
+ y = torch.concat([control_latents, y], dim=1)
+ return {"clip_feature": clip_feature, "y": y}
+
+
+
+class WanVideoUnit_FunControl_Mask(PipelineUnit):
+ def __init__(self):
+ super().__init__(
+ input_params=("control_video", "mask","num_frames", "height", "width", "tiled", "tile_size", "tile_stride", "clip_feature", "y"),
+ onload_model_names=("vae",)
+ )
+
+ def process(self, pipe: WanVideoPipeline, control_video, mask, num_frames, height, width, tiled, tile_size, tile_stride, clip_feature, y):
+
+ if control_video is None:
+ return {}
+ pipe.load_models_to_device(self.onload_model_names)
+ #* transfer to torch.tensor from PIL.Image
+ #* result size: [1, 3, F, H, W]
+
+ control_video = pipe.preprocess_video(control_video)
+
+
+ control_latents = pipe.vae.encode(control_video, device=pipe.device, tiled=tiled, tile_size=tile_size, tile_stride=tile_stride).to(dtype=pipe.torch_dtype, device=pipe.device)
+
+ #* size of control_latents: [1, 3, (F/4) + 1 , H/8, W/8]
+ control_latents = control_latents.to(dtype=pipe.torch_dtype, device=pipe.device)
+
+ if mask is not None:
+ mask = pipe.preprocess_video(mask)
+ mask_latents = pipe.vae.encode(mask, device=pipe.device, tiled=tiled, tile_size=tile_size, tile_stride=tile_stride).to(dtype=pipe.torch_dtype, device=pipe.device)
+ mask_latents = mask_latents.to(dtype=pipe.torch_dtype, device=pipe.device)
+
+
+ if clip_feature is None or y is None:
+ #* this branch is used during training
+ clip_feature = torch.zeros((1, 257, 1280), dtype=pipe.torch_dtype, device=pipe.device)
+
+ # y = torch.zeros((1, 16, (num_frames - 1) // 4 + 1, height//8, width//8), dtype=pipe.torch_dtype, device=pipe.device)
+
+ #* [1, 16, (F/4) + 1 , H/8, W/8]
+ y = torch.zeros((1, 16, control_latents.shape[-3], height//8, width//8), dtype=pipe.torch_dtype, device=pipe.device)
+ else:
+ y = y[:, -16:]
+
+ #* control_latents: [1, 16, 21, 60, 80]; y: [1, 16, 21, 60, 80])
+
+ #* [1, 32, (F/4) + 1 , H/8, W/8], 前16个通道是control_latents, 后16个通道是y(或者说0 vector)
+
+ if mask is not None:
+ y = torch.concat([control_latents, mask_latents], dim=1)
+ # logger.warning(f"mask is provided, using mask_latents instead of y")
+ else:
+ y = torch.concat([control_latents, y], dim=1)
+ # logger.warning(f"mask is not provided, using y")
+
+ return {"clip_feature": clip_feature, "y": y}
+
+
+
+class WanVideoUnit_FunReference(PipelineUnit):
+ def __init__(self):
+ super().__init__(
+ input_params=("reference_image", "height", "width", "reference_image"),
+ onload_model_names=("vae",)
+ )
+
+ def process(self, pipe: WanVideoPipeline, reference_image, height, width):
+ if reference_image is None:
+ return {}
+ pipe.load_models_to_device(["vae"])
+ reference_image = reference_image.resize((width, height))
+ reference_latents = pipe.preprocess_video([reference_image])
+ reference_latents = pipe.vae.encode(reference_latents, device=pipe.device)
+ clip_feature = pipe.preprocess_image(reference_image)
+ clip_feature = pipe.image_encoder.encode_image([clip_feature])
+ return {"reference_latents": reference_latents, "clip_feature": clip_feature}
+
+
+
+
+class WanVideoUnit_FunCameraControl(PipelineUnit):
+ def __init__(self):
+ super().__init__(
+ input_params=("height", "width", "num_frames", "camera_control_direction", "camera_control_speed", "camera_control_origin", "latents", "input_image"),
+ onload_model_names=("vae",)
+ )
+
+ def process(self, pipe: WanVideoPipeline, height, width, num_frames, camera_control_direction, camera_control_speed, camera_control_origin, latents, input_image):
+ if camera_control_direction is None:
+ return {}
+ camera_control_plucker_embedding = pipe.dit.control_adapter.process_camera_coordinates(
+ camera_control_direction, num_frames, height, width, camera_control_speed, camera_control_origin)
+
+ control_camera_video = camera_control_plucker_embedding[:num_frames].permute([3, 0, 1, 2]).unsqueeze(0)
+ control_camera_latents = torch.concat(
+ [
+ torch.repeat_interleave(control_camera_video[:, :, 0:1], repeats=4, dim=2),
+ control_camera_video[:, :, 1:]
+ ], dim=2
+ ).transpose(1, 2)
+ b, f, c, h, w = control_camera_latents.shape
+ control_camera_latents = control_camera_latents.contiguous().view(b, f // 4, 4, c, h, w).transpose(2, 3)
+ control_camera_latents = control_camera_latents.contiguous().view(b, f // 4, c * 4, h, w).transpose(1, 2)
+ control_camera_latents_input = control_camera_latents.to(device=pipe.device, dtype=pipe.torch_dtype)
+
+ input_image = input_image.resize((width, height))
+ input_latents = pipe.preprocess_video([input_image])
+ pipe.load_models_to_device(self.onload_model_names)
+ input_latents = pipe.vae.encode(input_latents, device=pipe.device)
+ y = torch.zeros_like(latents).to(pipe.device)
+ y[:, :, :1] = input_latents
+ y = y.to(dtype=pipe.torch_dtype, device=pipe.device)
+ return {"control_camera_latents_input": control_camera_latents_input, "y": y}
+
+
+
+class WanVideoUnit_SpeedControl(PipelineUnit):
+ def __init__(self):
+ super().__init__(input_params=("motion_bucket_id",))
+
+ def process(self, pipe: WanVideoPipeline, motion_bucket_id):
+ if motion_bucket_id is None:
+ return {}
+ motion_bucket_id = torch.Tensor((motion_bucket_id,)).to(dtype=pipe.torch_dtype, device=pipe.device)
+ return {"motion_bucket_id": motion_bucket_id}
+
+
+
+class WanVideoUnit_VACE(PipelineUnit):
+ def __init__(self):
+ super().__init__(
+ input_params=("vace_video", "vace_video_mask", "vace_reference_image", "vace_scale", "height", "width", "num_frames", "tiled", "tile_size", "tile_stride"),
+ onload_model_names=("vae",)
+ )
+
+ def process(
+ self,
+ pipe: WanVideoPipeline,
+ vace_video, vace_video_mask, vace_reference_image, vace_scale,
+ height, width, num_frames,
+ tiled, tile_size, tile_stride
+ ):
+ if vace_video is not None or vace_video_mask is not None or vace_reference_image is not None:
+ pipe.load_models_to_device(["vae"])
+ if vace_video is None:
+ vace_video = torch.zeros((1, 3, num_frames, height, width), dtype=pipe.torch_dtype, device=pipe.device)
+ else:
+ vace_video = pipe.preprocess_video(vace_video)
+
+ if vace_video_mask is None:
+ vace_video_mask = torch.ones_like(vace_video)
+ else:
+ vace_video_mask = pipe.preprocess_video(vace_video_mask, min_value=0, max_value=1)
+
+ inactive = vace_video * (1 - vace_video_mask) + 0 * vace_video_mask
+ reactive = vace_video * vace_video_mask + 0 * (1 - vace_video_mask)
+ inactive = pipe.vae.encode(inactive, device=pipe.device, tiled=tiled, tile_size=tile_size, tile_stride=tile_stride).to(dtype=pipe.torch_dtype, device=pipe.device)
+ reactive = pipe.vae.encode(reactive, device=pipe.device, tiled=tiled, tile_size=tile_size, tile_stride=tile_stride).to(dtype=pipe.torch_dtype, device=pipe.device)
+ vace_video_latents = torch.concat((inactive, reactive), dim=1)
+
+ vace_mask_latents = rearrange(vace_video_mask[0,0], "T (H P) (W Q) -> 1 (P Q) T H W", P=8, Q=8)
+ vace_mask_latents = torch.nn.functional.interpolate(vace_mask_latents, size=((vace_mask_latents.shape[2] + 3) // 4, vace_mask_latents.shape[3], vace_mask_latents.shape[4]), mode='nearest-exact')
+
+ if vace_reference_image is None:
+ pass
+ else:
+ vace_reference_image = pipe.preprocess_video([vace_reference_image])
+ vace_reference_latents = pipe.vae.encode(vace_reference_image, device=pipe.device, tiled=tiled, tile_size=tile_size, tile_stride=tile_stride).to(dtype=pipe.torch_dtype, device=pipe.device)
+ vace_reference_latents = torch.concat((vace_reference_latents, torch.zeros_like(vace_reference_latents)), dim=1)
+ vace_video_latents = torch.concat((vace_reference_latents, vace_video_latents), dim=2)
+ vace_mask_latents = torch.concat((torch.zeros_like(vace_mask_latents[:, :, :1]), vace_mask_latents), dim=2)
+
+ vace_context = torch.concat((vace_video_latents, vace_mask_latents), dim=1)
+ return {"vace_context": vace_context, "vace_scale": vace_scale}
+ else:
+ return {"vace_context": None, "vace_scale": vace_scale}
+
+
+
+class WanVideoUnit_UnifiedSequenceParallel(PipelineUnit):
+ def __init__(self):
+ super().__init__(input_params=())
+
+ def process(self, pipe: WanVideoPipeline):
+ if hasattr(pipe, "use_unified_sequence_parallel"):
+ if pipe.use_unified_sequence_parallel:
+ return {"use_unified_sequence_parallel": True}
+ return {}
+
+
+
+class WanVideoUnit_TeaCache(PipelineUnit):
+ def __init__(self):
+ super().__init__(
+ seperate_cfg=True,
+ input_params_posi={"num_inference_steps": "num_inference_steps", "tea_cache_l1_thresh": "tea_cache_l1_thresh", "tea_cache_model_id": "tea_cache_model_id"},
+ input_params_nega={"num_inference_steps": "num_inference_steps", "tea_cache_l1_thresh": "tea_cache_l1_thresh", "tea_cache_model_id": "tea_cache_model_id"},
+ )
+
+ def process(self, pipe: WanVideoPipeline, num_inference_steps, tea_cache_l1_thresh, tea_cache_model_id):
+ if tea_cache_l1_thresh is None:
+ return {}
+ return {"tea_cache": TeaCache(num_inference_steps, rel_l1_thresh=tea_cache_l1_thresh, model_id=tea_cache_model_id)}
+
+
+
+class WanVideoUnit_CfgMerger(PipelineUnit):
+ def __init__(self):
+ super().__init__(take_over=True)
+ self.concat_tensor_names = ["context", "clip_feature", "y", "reference_latents"]
+
+ def process(self, pipe: WanVideoPipeline, inputs_shared, inputs_posi, inputs_nega):
+ if not inputs_shared["cfg_merge"]:
+ return inputs_shared, inputs_posi, inputs_nega
+ for name in self.concat_tensor_names:
+ tensor_posi = inputs_posi.get(name)
+ tensor_nega = inputs_nega.get(name)
+ tensor_shared = inputs_shared.get(name)
+ if tensor_posi is not None and tensor_nega is not None:
+ inputs_shared[name] = torch.concat((tensor_posi, tensor_nega), dim=0)
+ elif tensor_shared is not None:
+ inputs_shared[name] = torch.concat((tensor_shared, tensor_shared), dim=0)
+ inputs_posi.clear()
+ inputs_nega.clear()
+ return inputs_shared, inputs_posi, inputs_nega
+
+
+
+class TeaCache:
+ def __init__(self, num_inference_steps, rel_l1_thresh, model_id):
+ self.num_inference_steps = num_inference_steps
+ self.step = 0
+ self.accumulated_rel_l1_distance = 0
+ self.previous_modulated_input = None
+ self.rel_l1_thresh = rel_l1_thresh
+ self.previous_residual = None
+ self.previous_hidden_states = None
+
+ self.coefficients_dict = {
+ "Wan2.1-T2V-1.3B": [-5.21862437e+04, 9.23041404e+03, -5.28275948e+02, 1.36987616e+01, -4.99875664e-02],
+ "Wan2.1-T2V-14B": [-3.03318725e+05, 4.90537029e+04, -2.65530556e+03, 5.87365115e+01, -3.15583525e-01],
+ "Wan2.1-I2V-14B-480P": [2.57151496e+05, -3.54229917e+04, 1.40286849e+03, -1.35890334e+01, 1.32517977e-01],
+ "Wan2.1-I2V-14B-720P": [ 8.10705460e+03, 2.13393892e+03, -3.72934672e+02, 1.66203073e+01, -4.17769401e-02],
+ }
+ if model_id not in self.coefficients_dict:
+ supported_model_ids = ", ".join([i for i in self.coefficients_dict])
+ raise ValueError(f"{model_id} is not a supported TeaCache model id. Please choose a valid model id in ({supported_model_ids}).")
+ self.coefficients = self.coefficients_dict[model_id]
+
+ def check(self, dit: WanModel, x, t_mod):
+ modulated_inp = t_mod.clone()
+ if self.step == 0 or self.step == self.num_inference_steps - 1:
+ should_calc = True
+ self.accumulated_rel_l1_distance = 0
+ else:
+ coefficients = self.coefficients
+ rescale_func = np.poly1d(coefficients)
+ self.accumulated_rel_l1_distance += rescale_func(((modulated_inp-self.previous_modulated_input).abs().mean() / self.previous_modulated_input.abs().mean()).cpu().item())
+ if self.accumulated_rel_l1_distance < self.rel_l1_thresh:
+ should_calc = False
+ else:
+ should_calc = True
+ self.accumulated_rel_l1_distance = 0
+ self.previous_modulated_input = modulated_inp
+ self.step += 1
+ if self.step == self.num_inference_steps:
+ self.step = 0
+ if should_calc:
+ self.previous_hidden_states = x.clone()
+ return not should_calc
+
+ def store(self, hidden_states):
+ self.previous_residual = hidden_states - self.previous_hidden_states
+ self.previous_hidden_states = None
+
+ def update(self, hidden_states):
+ hidden_states = hidden_states + self.previous_residual
+ return hidden_states
+
+
+
+class TemporalTiler_BCTHW:
+ def __init__(self):
+ pass
+
+ def build_1d_mask(self, length, left_bound, right_bound, border_width):
+ x = torch.ones((length,))
+ if border_width == 0:
+ return x
+
+ shift = 0.5
+ if not left_bound:
+ x[:border_width] = (torch.arange(border_width) + shift) / border_width
+ if not right_bound:
+ x[-border_width:] = torch.flip((torch.arange(border_width) + shift) / border_width, dims=(0,))
+ return x
+
+ def build_mask(self, data, is_bound, border_width):
+ _, _, T, _, _ = data.shape
+ t = self.build_1d_mask(T, is_bound[0], is_bound[1], border_width[0])
+ mask = repeat(t, "T -> 1 1 T 1 1")
+ return mask
+
+ def run(self, model_fn, sliding_window_size, sliding_window_stride, computation_device, computation_dtype, model_kwargs, tensor_names, batch_size=None):
+ tensor_names = [tensor_name for tensor_name in tensor_names if model_kwargs.get(tensor_name) is not None]
+ tensor_dict = {tensor_name: model_kwargs[tensor_name] for tensor_name in tensor_names}
+
+ B, C, T, H, W = tensor_dict[tensor_names[0]].shape
+ if batch_size is not None:
+ B *= batch_size
+ data_device, data_dtype = tensor_dict[tensor_names[0]].device, tensor_dict[tensor_names[0]].dtype
+ value = torch.zeros((B, C, T, H, W), device=data_device, dtype=data_dtype)
+ weight = torch.zeros((1, 1, T, 1, 1), device=data_device, dtype=data_dtype)
+ for t in range(0, T, sliding_window_stride):
+ if t - sliding_window_stride >= 0 and t - sliding_window_stride + sliding_window_size >= T: #* 如果上一个窗口已经走到最后一帧了, 那么就continue/break
+ continue
+ t_ = min(t + sliding_window_size, T)
+
+ model_kwargs.update({
+ tensor_name: tensor_dict[tensor_name][:, :, t: t_:, :].to(device=computation_device, dtype=computation_dtype) \
+ for tensor_name in tensor_names
+ })
+ model_output = model_fn(**model_kwargs).to(device=data_device, dtype=data_dtype)
+
+ mask = self.build_mask(
+ model_output,
+ is_bound=(t == 0, t_ == T),
+ border_width=(sliding_window_size - sliding_window_stride,)
+ ).to(device=data_device, dtype=data_dtype)
+
+ # logger.info(f"t: {t}, t_: {t_}, sliding_window_size: {sliding_window_size}, sliding_window_stride: {sliding_window_stride}")
+
+ value[:, :, t: t_, :, :] += model_output * mask
+ weight[:, :, t: t_, :, :] += mask
+ value /= weight
+ model_kwargs.update(tensor_dict)
+ return value
+
+
+
+def model_fn_wan_video(
+ dit: WanModel,
+ motion_controller: WanMotionControllerModel = None,
+ vace: VaceWanModel = None,
+ latents: torch.Tensor = None,
+ timestep: torch.Tensor = None,
+ context: torch.Tensor = None,
+ clip_feature: Optional[torch.Tensor] = None,
+ y: Optional[torch.Tensor] = None,
+ reference_latents = None,
+ vace_context = None,
+ vace_scale = 1.0,
+ tea_cache: TeaCache = None,
+ use_unified_sequence_parallel: bool = False,
+ motion_bucket_id: Optional[torch.Tensor] = None,
+ sliding_window_size: Optional[int] = None,
+ sliding_window_stride: Optional[int] = None,
+ cfg_merge: bool = False,
+ use_gradient_checkpointing: bool = False,
+ use_gradient_checkpointing_offload: bool = False,
+ control_camera_latents_input = None,
+ fuse_vae_embedding_in_latents: bool = False,
+ **kwargs,
+):
+
+
+ if sliding_window_size is not None and sliding_window_stride is not None: #* skip for training,
+ model_kwargs = dict(
+ dit=dit,
+ motion_controller=motion_controller,
+ vace=vace,
+ latents=latents,
+ timestep=timestep,
+ context=context,
+ clip_feature=clip_feature,
+ y=y,
+ reference_latents=reference_latents,
+ vace_context=vace_context,
+ vace_scale=vace_scale,
+ tea_cache=tea_cache,
+ use_unified_sequence_parallel=use_unified_sequence_parallel,
+ motion_bucket_id=motion_bucket_id,
+ )
+ return TemporalTiler_BCTHW().run(
+ model_fn_wan_video,
+ sliding_window_size, sliding_window_stride,
+ latents.device, latents.dtype,
+ model_kwargs=model_kwargs,
+ tensor_names=["latents", "y"],
+ batch_size=2 if cfg_merge else 1
+ )
+
+ if use_unified_sequence_parallel:#* skip
+ import torch.distributed as dist
+ from xfuser.core.distributed import (get_sequence_parallel_rank,
+ get_sequence_parallel_world_size,
+ get_sp_group)
+
+
+ # Timestep
+ if dit.seperated_timestep and fuse_vae_embedding_in_latents:
+ timestep = torch.concat([
+ torch.zeros((1, latents.shape[3] * latents.shape[4] // 4), dtype=latents.dtype, device=latents.device),
+ torch.ones((latents.shape[2] - 1, latents.shape[3] * latents.shape[4] // 4), dtype=latents.dtype, device=latents.device) * timestep
+ ]).flatten()
+ t = dit.time_embedding(sinusoidal_embedding_1d(dit.freq_dim, timestep).unsqueeze(0))
+ if use_unified_sequence_parallel and dist.is_initialized() and dist.get_world_size() > 1:
+ t_chunks = torch.chunk(t, get_sequence_parallel_world_size(), dim=1)
+ t_chunks = [torch.nn.functional.pad(chunk, (0, 0, 0, t_chunks[0].shape[1]-chunk.shape[1]), value=0) for chunk in t_chunks]
+ t = t_chunks[get_sequence_parallel_rank()]
+ t_mod = dit.time_projection(t).unflatten(2, (6, dit.dim))
+ else:#* this branch
+ t = dit.time_embedding(sinusoidal_embedding_1d(dit.freq_dim, timestep)) #* out: torch.Size([1, 1536])
+ t_mod = dit.time_projection(t).unflatten(1, (6, dit.dim)) #* out: torch.Size([1, 6, 1536]); dit.dim: 1536
+
+
+
+
+ if motion_bucket_id is not None and motion_controller is not None: #* skip
+ t_mod = t_mod + motion_controller(motion_bucket_id).unflatten(1, (6, dit.dim))
+
+ context = dit.text_embedding(context)#* text prompt, 比如“depth”, : from torch.Size([1, 512, 4096]) to torch.Size([1, 512, 1536])
+ #todo double check 这个x
+ #* [1, 16, (F-1)/4, H/8, W/8], 纯高斯噪声 或者 加噪后的gt
+ x = latents
+
+ # Merged cfg
+ #* batch 这个维度必须一致, 跟
+ if x.shape[0] != context.shape[0]:
+ x = torch.concat([x] * context.shape[0], dim=0)
+ if timestep.shape[0] != context.shape[0]:
+ timestep = torch.concat([timestep] * context.shape[0], dim=0)
+
+ # Image Embedding
+ """
+ new parameters:
+ #* require_vae_embedding
+ #* require_clip_embedding
+ """
+
+ # todo: x 是target video(也就是depth/normal video) 通过噪声调整的结果 / 纯高斯噪声; y是输入的rgb video
+ #todo , double check 这个y, [1, 32, (F-1)/4, H/8, W/8]
+ if y is not None and dit.require_vae_embedding:
+ x = torch.cat([x, y], dim=1)# (b, c_x + c_y, f, h, w) #* [1, 48, (F-1)/4, H/8, W/8]
+ if clip_feature is not None and dit.require_clip_embedding:
+ #* clip_feature is initialized by zero, from torch.Size([1, 257, 1280]) to torch.Size([1, 257, 1536])
+ clip_embdding = dit.img_emb(clip_feature)
+ #* concat 257 and 512 to form torch.Size([1, 769, 1536])
+ context = torch.cat([clip_embdding, context], dim=1)
+
+ # Add camera control
+ #* from torch.Size([1, 48, (F-1)/4, H/8, W/8]),
+ #* to [1, 1536, (F-1)/4, H/16, W/16] (函数内的mlp)
+ #* to [1, 1536, ( (F-1)/4 * H/16 * W/16)]
+ #* x_out: [1, 1536, ( (F-1)/4 * H/16 * W/16)]
+ x, (f, h, w) = dit.patchify(x, control_camera_latents_input)
+
+ # Reference image
+ if reference_latents is not None: #* skip
+ if len(reference_latents.shape) == 5:
+ reference_latents = reference_latents[:, :, 0]
+ reference_latents = dit.ref_conv(reference_latents).flatten(2).transpose(1, 2)
+ x = torch.concat([reference_latents, x], dim=1)
+ f += 1
+
+ #* RoPE position embedding for 3D video, [ ( (F-1)/4 * H/16 * W/16), 1, 64]
+ freqs = torch.cat([
+ dit.freqs[0][:f].view(f, 1, 1, -1).expand(f, h, w, -1),
+ dit.freqs[1][:h].view(1, h, 1, -1).expand(f, h, w, -1),
+ dit.freqs[2][:w].view(1, 1, w, -1).expand(f, h, w, -1)
+ ], dim=-1).reshape(f * h * w, 1, -1).to(x.device)
+
+ # TeaCache
+ if tea_cache is not None:#*skip
+ tea_cache_update = tea_cache.check(dit, x, t_mod)
+ else:
+ tea_cache_update = False
+
+ if vace_context is not None:#*skip
+ vace_hints = vace(x, vace_context, context, t_mod, freqs)
+
+ # blocks
+ if use_unified_sequence_parallel:#* skip
+ if dist.is_initialized() and dist.get_world_size() > 1:
+ chunks = torch.chunk(x, get_sequence_parallel_world_size(), dim=1)
+ pad_shape = chunks[0].shape[1] - chunks[-1].shape[1]
+ chunks = [torch.nn.functional.pad(chunk, (0, 0, 0, chunks[0].shape[1]-chunk.shape[1]), value=0) for chunk in chunks]
+ x = chunks[get_sequence_parallel_rank()]
+ if tea_cache_update:
+ x = tea_cache.update(x)
+ else:
+ def create_custom_forward(module):
+ def custom_forward(*inputs):
+ return module(*inputs)
+ return custom_forward
+ #* pass through dit blocks 30 times
+ for block_id, block in enumerate(dit.blocks):
+ if use_gradient_checkpointing_offload:
+ with torch.autograd.graph.save_on_cpu():
+ x = torch.utils.checkpoint.checkpoint(
+ create_custom_forward(block),
+ x, context, t_mod, freqs,
+ use_reentrant=False,
+ )
+ elif use_gradient_checkpointing:
+ x = torch.utils.checkpoint.checkpoint(
+ create_custom_forward(block),
+ x, context, t_mod, freqs,
+ use_reentrant=False,
+ )
+ else:
+ x = block(x, context, t_mod, freqs)#* x_in: [1, ( (F-1)/4 * H/16 * W/16), 1536], context_in: [1, 769, 1536], t_mod_in: [1, 6, 1536], freqs_in: [ ( (F-1)/4 * H/16 * W/16), 1, 64], x_out: [1, ( (F-1)/4 * H/16 * W/16), 1536]
+ if vace_context is not None and block_id in vace.vace_layers_mapping:#* skip
+ current_vace_hint = vace_hints[vace.vace_layers_mapping[block_id]]
+ if use_unified_sequence_parallel and dist.is_initialized() and dist.get_world_size() > 1:
+ current_vace_hint = torch.chunk(current_vace_hint, get_sequence_parallel_world_size(), dim=1)[get_sequence_parallel_rank()]
+ current_vace_hint = torch.nn.functional.pad(current_vace_hint, (0, 0, 0, chunks[0].shape[1] - current_vace_hint.shape[1]), value=0)
+ x = x + current_vace_hint * vace_scale
+ if tea_cache is not None:#* skip
+ tea_cache.store(x)
+
+ #* x_in: [1, ( (F-1)/4 * H/16 * W/16), 1536], t_in: [1, 1536],
+ #* x_out: [1, ( (F-1)/4 * H/16 * W/16), 64]
+ x = dit.head(x, t)
+ if use_unified_sequence_parallel:#* skip
+ if dist.is_initialized() and dist.get_world_size() > 1:
+ x = get_sp_group().all_gather(x, dim=1)
+ x = x[:, :-pad_shape] if pad_shape > 0 else x
+
+ # Remove reference latents
+ if reference_latents is not None:#* skip
+ x = x[:, reference_latents.shape[1]:]
+ f -= 1
+
+ #* unpatchify, from [1, ( (F-1)/4 * H/16 * W/16), 64] to [1, 16, (F-1)/4, H/8, W/8]
+ x = dit.unpatchify(x, (f, h, w))
+ return x
+
+
+
diff --git a/dkt/prompters/__init__.py b/dkt/prompters/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..57ea725872aff356d33bb6e8e6fc5d9e5f143205
--- /dev/null
+++ b/dkt/prompters/__init__.py
@@ -0,0 +1,13 @@
+# Only wan_prompter is used by entry files
+# from .prompt_refiners import Translator, BeautifulPrompt, QwenPrompt
+# from .sd_prompter import SDPrompter
+# from .sdxl_prompter import SDXLPrompter
+# from .sd3_prompter import SD3Prompter
+# from .hunyuan_dit_prompter import HunyuanDiTPrompter
+# from .kolors_prompter import KolorsPrompter
+# from .flux_prompter import FluxPrompter
+# from .omost import OmostPromter
+# from .cog_prompter import CogPrompter
+# from .hunyuan_video_prompter import HunyuanVideoPrompter
+# from .stepvideo_prompter import StepVideoPrompter
+from .wan_prompter import WanPrompter
diff --git a/dkt/prompters/base_prompter.py b/dkt/prompters/base_prompter.py
new file mode 100644
index 0000000000000000000000000000000000000000..136abd18fabdb04e618f59801420c9ce5fb94634
--- /dev/null
+++ b/dkt/prompters/base_prompter.py
@@ -0,0 +1,70 @@
+from ..models.model_manager import ModelManager
+import torch
+
+
+
+def tokenize_long_prompt(tokenizer, prompt, max_length=None):
+ # Get model_max_length from self.tokenizer
+ length = tokenizer.model_max_length if max_length is None else max_length
+
+ # To avoid the warning. set self.tokenizer.model_max_length to +oo.
+ tokenizer.model_max_length = 99999999
+
+ # Tokenize it!
+ input_ids = tokenizer(prompt, return_tensors="pt").input_ids
+
+ # Determine the real length.
+ max_length = (input_ids.shape[1] + length - 1) // length * length
+
+ # Restore tokenizer.model_max_length
+ tokenizer.model_max_length = length
+
+ # Tokenize it again with fixed length.
+ input_ids = tokenizer(
+ prompt,
+ return_tensors="pt",
+ padding="max_length",
+ max_length=max_length,
+ truncation=True
+ ).input_ids
+
+ # Reshape input_ids to fit the text encoder.
+ num_sentence = input_ids.shape[1] // length
+ input_ids = input_ids.reshape((num_sentence, length))
+
+ return input_ids
+
+
+
+class BasePrompter:
+ def __init__(self):
+ self.refiners = []
+ self.extenders = []
+
+
+ def load_prompt_refiners(self, model_manager: ModelManager, refiner_classes=[]):
+ for refiner_class in refiner_classes:
+ refiner = refiner_class.from_model_manager(model_manager)
+ self.refiners.append(refiner)
+
+ def load_prompt_extenders(self,model_manager:ModelManager,extender_classes=[]):
+ for extender_class in extender_classes:
+ extender = extender_class.from_model_manager(model_manager)
+ self.extenders.append(extender)
+
+
+ @torch.no_grad()
+ def process_prompt(self, prompt, positive=True):
+ if isinstance(prompt, list):
+ prompt = [self.process_prompt(prompt_, positive=positive) for prompt_ in prompt]
+ else:
+ for refiner in self.refiners:
+ prompt = refiner(prompt, positive=positive)
+ return prompt
+
+ @torch.no_grad()
+ def extend_prompt(self, prompt:str, positive=True):
+ extended_prompt = dict(prompt=prompt)
+ for extender in self.extenders:
+ extended_prompt = extender(extended_prompt)
+ return extended_prompt
\ No newline at end of file
diff --git a/dkt/prompters/wan_prompter.py b/dkt/prompters/wan_prompter.py
new file mode 100644
index 0000000000000000000000000000000000000000..01a765d3cb3bf2ee4d06553fd061ed7dd75443b2
--- /dev/null
+++ b/dkt/prompters/wan_prompter.py
@@ -0,0 +1,109 @@
+from .base_prompter import BasePrompter
+from ..models.wan_video_text_encoder import WanTextEncoder
+from transformers import AutoTokenizer
+import os, torch
+import ftfy
+import html
+import string
+import regex as re
+
+
+def basic_clean(text):
+ text = ftfy.fix_text(text)
+ text = html.unescape(html.unescape(text))
+ return text.strip()
+
+
+def whitespace_clean(text):
+ text = re.sub(r'\s+', ' ', text)
+ text = text.strip()
+ return text
+
+
+def canonicalize(text, keep_punctuation_exact_string=None):
+ text = text.replace('_', ' ')
+ if keep_punctuation_exact_string:
+ text = keep_punctuation_exact_string.join(
+ part.translate(str.maketrans('', '', string.punctuation))
+ for part in text.split(keep_punctuation_exact_string))
+ else:
+ text = text.translate(str.maketrans('', '', string.punctuation))
+ text = text.lower()
+ text = re.sub(r'\s+', ' ', text)
+ return text.strip()
+
+
+class HuggingfaceTokenizer:
+
+ def __init__(self, name, seq_len=None, clean=None, **kwargs):
+ assert clean in (None, 'whitespace', 'lower', 'canonicalize')
+ self.name = name
+ self.seq_len = seq_len
+ self.clean = clean
+
+ # init tokenizer
+ self.tokenizer = AutoTokenizer.from_pretrained(name, **kwargs)
+ self.vocab_size = self.tokenizer.vocab_size
+
+ def __call__(self, sequence, **kwargs):
+ return_mask = kwargs.pop('return_mask', False)
+
+ # arguments
+ _kwargs = {'return_tensors': 'pt'}
+ if self.seq_len is not None:
+ _kwargs.update({
+ 'padding': 'max_length',
+ 'truncation': True,
+ 'max_length': self.seq_len
+ })
+ _kwargs.update(**kwargs)
+
+ # tokenization
+ if isinstance(sequence, str):
+ sequence = [sequence]
+ if self.clean:
+ sequence = [self._clean(u) for u in sequence]
+ ids = self.tokenizer(sequence, **_kwargs)
+
+ # output
+ if return_mask:
+ return ids.input_ids, ids.attention_mask
+ else:
+ return ids.input_ids
+
+ def _clean(self, text):
+ if self.clean == 'whitespace':
+ text = whitespace_clean(basic_clean(text))
+ elif self.clean == 'lower':
+ text = whitespace_clean(basic_clean(text)).lower()
+ elif self.clean == 'canonicalize':
+ text = canonicalize(basic_clean(text))
+ return text
+
+
+class WanPrompter(BasePrompter):
+
+ def __init__(self, tokenizer_path=None, text_len=512):
+ super().__init__()
+ self.text_len = text_len
+ self.text_encoder = None
+ self.fetch_tokenizer(tokenizer_path)
+
+ def fetch_tokenizer(self, tokenizer_path=None):
+ if tokenizer_path is not None:
+ self.tokenizer = HuggingfaceTokenizer(name=tokenizer_path, seq_len=self.text_len, clean='whitespace')
+
+ def fetch_models(self, text_encoder: WanTextEncoder = None):
+ self.text_encoder = text_encoder
+
+ def encode_prompt(self, prompt, positive=True, device="cuda"):
+ prompt = self.process_prompt(prompt, positive=positive)
+
+ ids, mask = self.tokenizer(prompt, return_mask=True, add_special_tokens=True)
+ ids = ids.to(device)
+ mask = mask.to(device)
+ seq_lens = mask.gt(0).sum(dim=1).long()
+ prompt_emb = self.text_encoder(ids, mask)
+ for i, v in enumerate(seq_lens):
+ prompt_emb[:, v:] = 0
+ return prompt_emb
diff --git a/dkt/schedulers/__init__.py b/dkt/schedulers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..744c8dd4bd216c137583dd6f5091d2da731fadf8
--- /dev/null
+++ b/dkt/schedulers/__init__.py
@@ -0,0 +1 @@
+from .flow_match import FlowMatchScheduler
diff --git a/dkt/schedulers/flow_match.py b/dkt/schedulers/flow_match.py
new file mode 100644
index 0000000000000000000000000000000000000000..63cb0f14b2fdf86ec10e8036e8e109cece5ffc21
--- /dev/null
+++ b/dkt/schedulers/flow_match.py
@@ -0,0 +1,126 @@
+import torch, math
+
+
+
+class FlowMatchScheduler():
+
+ def __init__(
+ self,
+ num_inference_steps=100,
+ num_train_timesteps=1000,
+ shift=3.0,
+ sigma_max=1.0,
+ sigma_min=0.003/1.002,
+ inverse_timesteps=False,
+ extra_one_step=False,
+ reverse_sigmas=False,
+ exponential_shift=False,
+ exponential_shift_mu=None,
+ shift_terminal=None,
+ ):
+ self.num_train_timesteps = num_train_timesteps
+ self.shift = shift
+ self.sigma_max = sigma_max
+ self.sigma_min = sigma_min
+ self.inverse_timesteps = inverse_timesteps
+ self.extra_one_step = extra_one_step
+ self.reverse_sigmas = reverse_sigmas
+ self.exponential_shift = exponential_shift
+ self.exponential_shift_mu = exponential_shift_mu
+ self.shift_terminal = shift_terminal
+ self.set_timesteps(num_inference_steps)
+
+
+ def set_timesteps(self, num_inference_steps=100, denoising_strength=1.0, training=False, shift=None, dynamic_shift_len=None):
+ if shift is not None:
+ self.shift = shift
+ sigma_start = self.sigma_min + (self.sigma_max - self.sigma_min) * denoising_strength
+ if self.extra_one_step:
+ self.sigmas = torch.linspace(sigma_start, self.sigma_min, num_inference_steps + 1)[:-1]
+ else:
+ self.sigmas = torch.linspace(sigma_start, self.sigma_min, num_inference_steps)
+
+ if self.inverse_timesteps:
+ self.sigmas = torch.flip(self.sigmas, dims=[0])
+ if self.exponential_shift:
+ mu = self.calculate_shift(dynamic_shift_len) if dynamic_shift_len is not None else self.exponential_shift_mu
+ self.sigmas = math.exp(mu) / (math.exp(mu) + (1 / self.sigmas - 1))
+ else:
+ self.sigmas = self.shift * self.sigmas / (1 + (self.shift - 1) * self.sigmas)
+ if self.shift_terminal is not None:
+ one_minus_z = 1 - self.sigmas
+ scale_factor = one_minus_z[-1] / (1 - self.shift_terminal)
+ self.sigmas = 1 - (one_minus_z / scale_factor)
+ if self.reverse_sigmas:
+ self.sigmas = 1 - self.sigmas
+ self.timesteps = self.sigmas * self.num_train_timesteps
+
+ if training:
+ x = self.timesteps
+ y = torch.exp(-2 * ((x - num_inference_steps / 2) / num_inference_steps) ** 2)
+ y_shifted = y - y.min()
+ bsmntw_weighing = y_shifted * (num_inference_steps / y_shifted.sum())
+ self.linear_timesteps_weights = bsmntw_weighing
+ self.training = True
+ else:
+ self.training = False
+
+
+ def step(self, model_output, timestep, sample, to_final=False, **kwargs):
+ if isinstance(timestep, torch.Tensor):
+ timestep = timestep.cpu()
+ timestep_id = torch.argmin((self.timesteps - timestep).abs())
+
+ sigma = self.sigmas[timestep_id] #* 当前的sigma值, 也就是加了多少比例的噪声
+ if to_final or timestep_id + 1 >= len(self.timesteps): #* 下一步的噪声比例,
+ sigma_ = 1 if (self.inverse_timesteps or self.reverse_sigmas) else 0
+ else:
+ sigma_ = self.sigmas[timestep_id + 1]
+ prev_sample = sample + model_output * (sigma_ - sigma)
+ return prev_sample
+
+
+ def return_to_timestep(self, timestep, sample, sample_stablized):
+ if isinstance(timestep, torch.Tensor):
+ timestep = timestep.cpu()
+ timestep_id = torch.argmin((self.timesteps - timestep).abs())
+ sigma = self.sigmas[timestep_id]
+ model_output = (sample - sample_stablized) / sigma
+ return model_output
+
+
+ def add_noise(self, original_samples, noise, timestep):
+ if isinstance(timestep, torch.Tensor):
+ timestep = timestep.cpu()
+ timestep_id = torch.argmin((self.timesteps - timestep).abs())
+ sigma = self.sigmas[timestep_id]
+ sample = (1 - sigma) * original_samples + sigma * noise
+ return sample
+
+
+ def training_target(self, sample, noise, timestep):
+ #* so: noise - target = sample
+ #* sample + target = noise
+ target = noise - sample
+ #* noise: is rgb images
+ return target
+
+
+ def training_weight(self, timestep):
+ timestep_id = torch.argmin((self.timesteps - timestep.to(self.timesteps.device)).abs())
+ weights = self.linear_timesteps_weights[timestep_id]
+ return weights
+
+
+ def calculate_shift(
+ self,
+ image_seq_len,
+ base_seq_len: int = 256,
+ max_seq_len: int = 8192,
+ base_shift: float = 0.5,
+ max_shift: float = 0.9,
+ ):
+ m = (max_shift - base_shift) / (max_seq_len - base_seq_len)
+ b = base_shift - m * base_seq_len
+ mu = image_seq_len * m + b
+ return mu
diff --git a/dkt/utils/__init__.py b/dkt/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..97f3926411718d008c84fcde792b552915c5687d
--- /dev/null
+++ b/dkt/utils/__init__.py
@@ -0,0 +1,261 @@
+import torch, warnings, glob, os
+import numpy as np
+from PIL import Image
+from einops import repeat, reduce
+from typing import Optional, Union
+from dataclasses import dataclass
+from modelscope import snapshot_download
+import numpy as np
+from PIL import Image
+from typing import Optional
+
+
+class BasePipeline(torch.nn.Module):
+
+ def __init__(
+ self,
+ device="cuda", torch_dtype=torch.float16,
+ height_division_factor=64, width_division_factor=64,
+ time_division_factor=None, time_division_remainder=None,
+ ):
+ super().__init__()
+ # The device and torch_dtype is used for the storage of intermediate variables, not models.
+ self.device = device
+ self.torch_dtype = torch_dtype
+ # The following parameters are used for shape check.
+ self.height_division_factor = height_division_factor
+ self.width_division_factor = width_division_factor
+ self.time_division_factor = time_division_factor
+ self.time_division_remainder = time_division_remainder
+ self.vram_management_enabled = False
+
+
+ def to(self, *args, **kwargs):
+ device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs)
+ if device is not None:
+ self.device = device
+ if dtype is not None:
+ self.torch_dtype = dtype
+ super().to(*args, **kwargs)
+ return self
+
+
+ def check_resize_height_width(self, height, width, num_frames=None):
+ # Shape check
+ if height % self.height_division_factor != 0:
+ height = (height + self.height_division_factor - 1) // self.height_division_factor * self.height_division_factor
+ print(f"height % {self.height_division_factor} != 0. We round it up to {height}.")
+ if width % self.width_division_factor != 0:
+ width = (width + self.width_division_factor - 1) // self.width_division_factor * self.width_division_factor
+ print(f"width % {self.width_division_factor} != 0. We round it up to {width}.")
+ if num_frames is None:
+ return height, width
+ else:
+ if num_frames % self.time_division_factor != self.time_division_remainder:
+ num_frames = (num_frames + self.time_division_factor - 1) // self.time_division_factor * self.time_division_factor + self.time_division_remainder
+ print(f"num_frames % {self.time_division_factor} != {self.time_division_remainder}. We round it up to {num_frames}.")
+ return height, width, num_frames
+
+
+ def preprocess_image(self, image, torch_dtype=None, device=None, pattern="B C H W", min_value=-1, max_value=1):
+ # Transform a PIL.Image to torch.Tensor
+ image = torch.Tensor(np.array(image, dtype=np.float32))
+ image = image.to(dtype=torch_dtype or self.torch_dtype, device=device or self.device)
+ image = image * ((max_value - min_value) / 255) + min_value
+ image = repeat(image, f"H W C -> {pattern}", **({"B": 1} if "B" in pattern else {}))
+ return image
+
+
+ def preprocess_video(self, video, torch_dtype=None, device=None, pattern="B C T H W", min_value=-1, max_value=1):
+ # Transform a list of PIL.Image to torch.Tensor
+ video = [self.preprocess_image(image, torch_dtype=torch_dtype, device=device, min_value=min_value, max_value=max_value) for image in video]
+ video = torch.stack(video, dim=pattern.index("T") // 2)
+ return video
+
+
+ def vae_output_to_image(self, vae_output, pattern="B C H W", min_value=-1, max_value=1):
+ # Transform a torch.Tensor to PIL.Image
+ if pattern != "H W C":
+ vae_output = reduce(vae_output, f"{pattern} -> H W C", reduction="mean")
+ image = ((vae_output - min_value) * (255 / (max_value - min_value))).clip(0, 255)
+ image = image.to(device="cpu", dtype=torch.uint8)
+ image = Image.fromarray(image.numpy())
+ return image
+
+
+ def vae_output_to_video(self, vae_output, pattern="B C T H W", min_value=-1, max_value=1):
+ # Transform a torch.Tensor to list of PIL.Image
+ if pattern != "T H W C":
+ vae_output = reduce(vae_output, f"{pattern} -> T H W C", reduction="mean")
+ video = [self.vae_output_to_image(image, pattern="H W C", min_value=min_value, max_value=max_value) for image in vae_output]
+ return video
+
+
+ def load_models_to_device(self, model_names=[]):
+ if self.vram_management_enabled:
+ # offload models
+ for name, model in self.named_children():
+ if name not in model_names:
+ if hasattr(model, "vram_management_enabled") and model.vram_management_enabled:
+ for module in model.modules():
+ if hasattr(module, "offload"):
+ module.offload()
+ else:
+ model.cpu()
+ torch.cuda.empty_cache()
+ # onload models
+ for name, model in self.named_children():
+ if name in model_names:
+ if hasattr(model, "vram_management_enabled") and model.vram_management_enabled:
+ for module in model.modules():
+ if hasattr(module, "onload"):
+ module.onload()
+ else:
+ model.to(self.device)
+
+
+ def generate_noise(self, shape, seed=None, rand_device="cpu", rand_torch_dtype=torch.float32, device=None, torch_dtype=None):
+ # Initialize Gaussian noise
+ generator = None if seed is None else torch.Generator(rand_device).manual_seed(seed)
+ noise = torch.randn(shape, generator=generator, device=rand_device, dtype=rand_torch_dtype)
+ noise = noise.to(dtype=torch_dtype or self.torch_dtype, device=device or self.device)
+ return noise
+
+
+ def enable_cpu_offload(self):
+ warnings.warn("`enable_cpu_offload` will be deprecated. Please use `enable_vram_management`.")
+ self.vram_management_enabled = True
+
+
+ def get_vram(self):
+ return torch.cuda.mem_get_info(self.device)[1] / (1024 ** 3)
+
+
+ def freeze_except(self, model_names):
+ for name, model in self.named_children():
+ if name in model_names:
+ model.train()
+ model.requires_grad_(True)
+ else:
+ model.eval()
+ model.requires_grad_(False)
+
+
+@dataclass
+class ModelConfig:
+ path: Union[str, list[str]] = None
+ model_id: str = None
+ origin_file_pattern: Union[str, list[str]] = None
+ download_resource: str = "ModelScope"
+ offload_device: Optional[Union[str, torch.device]] = None
+ offload_dtype: Optional[torch.dtype] = None
+ local_model_path: str = None
+ skip_download: bool = False
+
+ def download_if_necessary(self, use_usp=False):
+ if self.path is None:
+ # Check model_id and origin_file_pattern
+ if self.model_id is None:
+ raise ValueError(f"""No valid model files. Please use `ModelConfig(path="xxx")` or `ModelConfig(model_id="xxx/yyy", origin_file_pattern="zzz")`.""")
+
+ # Skip if not in rank 0
+ if use_usp:
+ import torch.distributed as dist
+ skip_download = self.skip_download or dist.get_rank() != 0
+ else:
+ skip_download = self.skip_download
+
+ # Check whether the origin path is a folder
+ if self.origin_file_pattern is None or self.origin_file_pattern == "":
+ self.origin_file_pattern = ""
+ allow_file_pattern = None
+ is_folder = True
+ elif isinstance(self.origin_file_pattern, str) and self.origin_file_pattern.endswith("/"):
+ allow_file_pattern = self.origin_file_pattern + "*"
+ is_folder = True
+ else:
+ allow_file_pattern = self.origin_file_pattern
+ is_folder = False
+
+ # Download
+ if self.local_model_path is None:
+ self.local_model_path = "./models"
+ if not skip_download:
+ downloaded_files = glob.glob(self.origin_file_pattern, root_dir=os.path.join(self.local_model_path, self.model_id))
+ snapshot_download(
+ self.model_id,
+ local_dir=os.path.join(self.local_model_path, self.model_id),
+ allow_file_pattern=allow_file_pattern,
+ ignore_file_pattern=downloaded_files,
+ local_files_only=False
+ )
+
+ # Let rank 1, 2, ... wait for rank 0
+ if use_usp:
+ import torch.distributed as dist
+ dist.barrier(device_ids=[dist.get_rank()])
+
+ # Return downloaded files
+ if is_folder:
+ self.path = os.path.join(self.local_model_path, self.model_id, self.origin_file_pattern)
+ else:
+ self.path = glob.glob(os.path.join(self.local_model_path, self.model_id, self.origin_file_pattern))
+ if isinstance(self.path, list) and len(self.path) == 1:
+ self.path = self.path[0]
+
+
+
+class PipelineUnit:
+ def __init__(
+ self,
+ seperate_cfg: bool = False,
+ take_over: bool = False,
+ input_params: tuple[str] = None,
+ input_params_posi: dict[str, str] = None,
+ input_params_nega: dict[str, str] = None,
+ onload_model_names: tuple[str] = None
+ ):
+ self.seperate_cfg = seperate_cfg
+ self.take_over = take_over
+ self.input_params = input_params
+ self.input_params_posi = input_params_posi
+ self.input_params_nega = input_params_nega
+ self.onload_model_names = onload_model_names
+
+
+ def process(self, pipe: BasePipeline, inputs: dict, positive=True, **kwargs) -> dict:
+ raise NotImplementedError("`process` is not implemented.")
+
+
+
+class PipelineUnitRunner:
+ def __init__(self):
+ pass
+
+ def __call__(self, unit: PipelineUnit, pipe: BasePipeline, inputs_shared: dict, inputs_posi: dict, inputs_nega: dict) -> tuple[dict, dict]:
+ if unit.take_over:
+ # Let the pipeline unit take over this function.
+ inputs_shared, inputs_posi, inputs_nega = unit.process(pipe, inputs_shared=inputs_shared, inputs_posi=inputs_posi, inputs_nega=inputs_nega)
+ elif unit.seperate_cfg:
+ # Positive side
+ processor_inputs = {name: inputs_posi.get(name_) for name, name_ in unit.input_params_posi.items()}
+ if unit.input_params is not None:
+ for name in unit.input_params:
+ processor_inputs[name] = inputs_shared.get(name)
+ processor_outputs = unit.process(pipe, **processor_inputs)
+ inputs_posi.update(processor_outputs)
+ # Negative side
+ if inputs_shared["cfg_scale"] != 1:
+ processor_inputs = {name: inputs_nega.get(name_) for name, name_ in unit.input_params_nega.items()}
+ if unit.input_params is not None:
+ for name in unit.input_params:
+ processor_inputs[name] = inputs_shared.get(name)
+ processor_outputs = unit.process(pipe, **processor_inputs)
+ inputs_nega.update(processor_outputs)
+ else:
+ inputs_nega.update(processor_outputs)
+ else:
+ processor_inputs = {name: inputs_shared.get(name) for name in unit.input_params}
+ processor_outputs = unit.process(pipe, **processor_inputs)
+ inputs_shared.update(processor_outputs)
+ return inputs_shared, inputs_posi, inputs_nega
diff --git a/dkt/vram_management/__init__.py b/dkt/vram_management/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5b075800b6fd5b1702eeb7ff9c619aef1093ed46
--- /dev/null
+++ b/dkt/vram_management/__init__.py
@@ -0,0 +1,2 @@
+from .layers import *
+from .gradient_checkpointing import *
diff --git a/dkt/vram_management/gradient_checkpointing.py b/dkt/vram_management/gradient_checkpointing.py
new file mode 100644
index 0000000000000000000000000000000000000000..b356415a004f3d74afdd45840f1fc4caf6659e16
--- /dev/null
+++ b/dkt/vram_management/gradient_checkpointing.py
@@ -0,0 +1,34 @@
+import torch
+
+
+def create_custom_forward(module):
+ def custom_forward(*inputs, **kwargs):
+ return module(*inputs, **kwargs)
+ return custom_forward
+
+
+def gradient_checkpoint_forward(
+ model,
+ use_gradient_checkpointing,
+ use_gradient_checkpointing_offload,
+ *args,
+ **kwargs,
+):
+ if use_gradient_checkpointing_offload:
+ with torch.autograd.graph.save_on_cpu():
+ model_output = torch.utils.checkpoint.checkpoint(
+ create_custom_forward(model),
+ *args,
+ **kwargs,
+ use_reentrant=False,
+ )
+ elif use_gradient_checkpointing:
+ model_output = torch.utils.checkpoint.checkpoint(
+ create_custom_forward(model),
+ *args,
+ **kwargs,
+ use_reentrant=False,
+ )
+ else:
+ model_output = model(*args, **kwargs)
+ return model_output
diff --git a/dkt/vram_management/layers.py b/dkt/vram_management/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..9f1ad38887c628fbc9270da8c501f6c3487397a0
--- /dev/null
+++ b/dkt/vram_management/layers.py
@@ -0,0 +1,213 @@
+import torch, copy
+from typing import Union
+from ..models.utils import init_weights_on_device
+
+
+def cast_to(weight, dtype, device):
+ r = torch.empty_like(weight, dtype=dtype, device=device)
+ r.copy_(weight)
+ return r
+
+
+class AutoTorchModule(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+
+ def check_free_vram(self):
+ gpu_mem_state = torch.cuda.mem_get_info(self.computation_device)
+ used_memory = (gpu_mem_state[1] - gpu_mem_state[0]) / (1024 ** 3)
+ return used_memory < self.vram_limit
+
+ def offload(self):
+ if self.state != 0:
+ self.to(dtype=self.offload_dtype, device=self.offload_device)
+ self.state = 0
+
+ def onload(self):
+ if self.state != 1:
+ self.to(dtype=self.onload_dtype, device=self.onload_device)
+ self.state = 1
+
+ def keep(self):
+ if self.state != 2:
+ self.to(dtype=self.computation_dtype, device=self.computation_device)
+ self.state = 2
+
+
+class AutoWrappedModule(AutoTorchModule):
+ def __init__(self, module: torch.nn.Module, offload_dtype, offload_device, onload_dtype, onload_device, computation_dtype, computation_device, vram_limit, **kwargs):
+ super().__init__()
+ self.module = module.to(dtype=offload_dtype, device=offload_device)
+ self.offload_dtype = offload_dtype
+ self.offload_device = offload_device
+ self.onload_dtype = onload_dtype
+ self.onload_device = onload_device
+ self.computation_dtype = computation_dtype
+ self.computation_device = computation_device
+ self.vram_limit = vram_limit
+ self.state = 0
+
+ def forward(self, *args, **kwargs):
+ if self.state == 2:
+ module = self.module
+ else:
+ if self.onload_dtype == self.computation_dtype and self.onload_device == self.computation_device:
+ module = self.module
+ elif self.vram_limit is not None and self.check_free_vram():
+ self.keep()
+ module = self.module
+ else:
+ module = copy.deepcopy(self.module).to(dtype=self.computation_dtype, device=self.computation_device)
+ return module(*args, **kwargs)
+
+
+class WanAutoCastLayerNorm(torch.nn.LayerNorm, AutoTorchModule):
+ def __init__(self, module: torch.nn.LayerNorm, offload_dtype, offload_device, onload_dtype, onload_device, computation_dtype, computation_device, vram_limit, **kwargs):
+ with init_weights_on_device(device=torch.device("meta")):
+ super().__init__(module.normalized_shape, eps=module.eps, elementwise_affine=module.elementwise_affine, bias=module.bias is not None, dtype=offload_dtype, device=offload_device)
+ self.weight = module.weight
+ self.bias = module.bias
+ self.offload_dtype = offload_dtype
+ self.offload_device = offload_device
+ self.onload_dtype = onload_dtype
+ self.onload_device = onload_device
+ self.computation_dtype = computation_dtype
+ self.computation_device = computation_device
+ self.vram_limit = vram_limit
+ self.state = 0
+
+ def forward(self, x, *args, **kwargs):
+ if self.state == 2:
+ weight, bias = self.weight, self.bias
+ else:
+ if self.onload_dtype == self.computation_dtype and self.onload_device == self.computation_device:
+ weight, bias = self.weight, self.bias
+ elif self.vram_limit is not None and self.check_free_vram():
+ self.keep()
+ weight, bias = self.weight, self.bias
+ else:
+ weight = None if self.weight is None else cast_to(self.weight, self.computation_dtype, self.computation_device)
+ bias = None if self.bias is None else cast_to(self.bias, self.computation_dtype, self.computation_device)
+ with torch.amp.autocast(device_type=x.device.type):
+ x = torch.nn.functional.layer_norm(x.float(), self.normalized_shape, weight, bias, self.eps).type_as(x)
+ return x
+
+
+class AutoWrappedLinear(torch.nn.Linear, AutoTorchModule):
+ def __init__(self, module: torch.nn.Linear, offload_dtype, offload_device, onload_dtype, onload_device, computation_dtype, computation_device, vram_limit, name="", **kwargs):
+ with init_weights_on_device(device=torch.device("meta")):
+ super().__init__(in_features=module.in_features, out_features=module.out_features, bias=module.bias is not None, dtype=offload_dtype, device=offload_device)
+ self.weight = module.weight
+ self.bias = module.bias
+ self.offload_dtype = offload_dtype
+ self.offload_device = offload_device
+ self.onload_dtype = onload_dtype
+ self.onload_device = onload_device
+ self.computation_dtype = computation_dtype
+ self.computation_device = computation_device
+ self.vram_limit = vram_limit
+ self.state = 0
+ self.name = name
+ self.lora_A_weights = []
+ self.lora_B_weights = []
+ self.lora_merger = None
+ self.enable_fp8 = computation_dtype in [torch.float8_e4m3fn, torch.float8_e4m3fnuz]
+
+ def fp8_linear(
+ self,
+ input: torch.Tensor,
+ weight: torch.Tensor,
+ bias: Union[torch.Tensor, None] = None):
+ device = input.device
+ origin_dtype = input.dtype
+ origin_shape = input.shape
+ input = input.reshape(-1, origin_shape[-1])
+
+ x_max = torch.max(torch.abs(input), dim=-1, keepdim=True).values
+ fp8_max = 448.0
+ # For float8_e4m3fnuz, the maximum representable value is half of that of e4m3fn.
+ # To avoid overflow and ensure numerical compatibility during FP8 computation,
+ # we scale down the input by 2.0 in advance.
+ # This scaling will be compensated later during the final result scaling.
+ if self.computation_dtype == torch.float8_e4m3fnuz:
+ fp8_max = fp8_max / 2.0
+ scale_a = torch.clamp(x_max / fp8_max, min=1.0).float().to(device=device)
+ scale_b = torch.ones((weight.shape[0], 1)).to(device=device)
+ input = input / (scale_a + 1e-8)
+ input = input.to(self.computation_dtype)
+ weight = weight.to(self.computation_dtype)
+ bias = bias.to(torch.bfloat16)
+
+ result = torch._scaled_mm(
+ input,
+ weight.T,
+ scale_a=scale_a,
+ scale_b=scale_b.T,
+ bias=bias,
+ out_dtype=origin_dtype,
+ )
+ new_shape = origin_shape[:-1] + result.shape[-1:]
+ result = result.reshape(new_shape)
+ return result
+
+ def forward(self, x, *args, **kwargs):
+ # VRAM management
+ if self.state == 2:
+ weight, bias = self.weight, self.bias
+ else:
+ if self.onload_dtype == self.computation_dtype and self.onload_device == self.computation_device:
+ weight, bias = self.weight, self.bias
+ elif self.vram_limit is not None and self.check_free_vram():
+ self.keep()
+ weight, bias = self.weight, self.bias
+ else:
+ weight = cast_to(self.weight, self.computation_dtype, self.computation_device)
+ bias = None if self.bias is None else cast_to(self.bias, self.computation_dtype, self.computation_device)
+
+ # Linear forward
+ if self.enable_fp8:
+ out = self.fp8_linear(x, weight, bias)
+ else:
+ out = torch.nn.functional.linear(x, weight, bias)
+
+ # LoRA
+ if len(self.lora_A_weights) == 0:
+ # No LoRA
+ return out
+ elif self.lora_merger is None:
+ # Native LoRA inference
+ for lora_A, lora_B in zip(self.lora_A_weights, self.lora_B_weights):
+ out = out + x @ lora_A.T @ lora_B.T
+ else:
+ # LoRA fusion
+ lora_output = []
+ for lora_A, lora_B in zip(self.lora_A_weights, self.lora_B_weights):
+ lora_output.append(x @ lora_A.T @ lora_B.T)
+ lora_output = torch.stack(lora_output)
+ out = self.lora_merger(out, lora_output)
+ return out
+
+
+def enable_vram_management_recursively(model: torch.nn.Module, module_map: dict, module_config: dict, max_num_param=None, overflow_module_config: dict = None, total_num_param=0, vram_limit=None, name_prefix=""):
+ for name, module in model.named_children():
+ layer_name = name if name_prefix == "" else name_prefix + "." + name
+ for source_module, target_module in module_map.items():
+ if isinstance(module, source_module):
+ num_param = sum(p.numel() for p in module.parameters())
+ if max_num_param is not None and total_num_param + num_param > max_num_param:
+ module_config_ = overflow_module_config
+ else:
+ module_config_ = module_config
+ module_ = target_module(module, **module_config_, vram_limit=vram_limit, name=layer_name)
+ setattr(model, name, module_)
+ total_num_param += num_param
+ break
+ else:
+ total_num_param = enable_vram_management_recursively(module, module_map, module_config, max_num_param, overflow_module_config, total_num_param, vram_limit=vram_limit, name_prefix=layer_name)
+ return total_num_param
+
+
+def enable_vram_management(model: torch.nn.Module, module_map: dict, module_config: dict, max_num_param=None, overflow_module_config: dict = None, vram_limit=None):
+ enable_vram_management_recursively(model, module_map, module_config, max_num_param, overflow_module_config, total_num_param=0, vram_limit=vram_limit)
+ model.vram_management_enabled = True
+
diff --git a/examples/1.mp4 b/examples/1.mp4
new file mode 100755
index 0000000000000000000000000000000000000000..9b12148608470421ff580cd87f2f8c9780ae9fd4
--- /dev/null
+++ b/examples/1.mp4
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:68ae88d1729f13d2eba6b9c3ae265af24ba563c725cd6ec7430fa9cf2a8f3584
+size 695111
diff --git a/examples/10.mp4 b/examples/10.mp4
new file mode 100755
index 0000000000000000000000000000000000000000..77a3b345230500e5619c79b27cc5f02eefd49bea
--- /dev/null
+++ b/examples/10.mp4
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:33faeefd4d21ca9ddc6386f0c7a83523632901d7f791fb5bd307b43739235c3d
+size 3886742
diff --git a/examples/178db6e89ab682bfc612a3290fec58dd.mp4 b/examples/178db6e89ab682bfc612a3290fec58dd.mp4
new file mode 100755
index 0000000000000000000000000000000000000000..3cd92e5fe43066838879aa14c0ba95cfa23ae386
--- /dev/null
+++ b/examples/178db6e89ab682bfc612a3290fec58dd.mp4
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:803bea53274f55f02463af8855585a0e4950ec1ae498ed1e6ef261d83d38b371
+size 1552729
diff --git a/examples/1b0daeb776471c7389b36cee53049417.mp4 b/examples/1b0daeb776471c7389b36cee53049417.mp4
new file mode 100755
index 0000000000000000000000000000000000000000..6ae4a7ff20448451c392dde76dac9f1f4ced6132
--- /dev/null
+++ b/examples/1b0daeb776471c7389b36cee53049417.mp4
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c89828ebf754c762daa916ba764293193986fde8e2ff0e70be66926fbc9a8d07
+size 1447735
diff --git a/examples/2.mp4 b/examples/2.mp4
new file mode 100755
index 0000000000000000000000000000000000000000..cf2636229adc302b36d02b60822289c3b670f6af
--- /dev/null
+++ b/examples/2.mp4
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6dd9a4527924fa2c26fd5bcc85237951180dcc136bac88d08bcd78635de58848
+size 883548
diff --git a/examples/3.mp4 b/examples/3.mp4
new file mode 100755
index 0000000000000000000000000000000000000000..3321ca2b4b48bb5f6f0eefd7a5b664869b9e3c56
--- /dev/null
+++ b/examples/3.mp4
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:da710ac25e12ef740d0bac04a452d1da6078a7acfe35bd456ab8a659a81401ff
+size 628311
diff --git a/examples/30.mp4 b/examples/30.mp4
new file mode 100755
index 0000000000000000000000000000000000000000..1a51381f846b317f04ce10dff24aad95287e5c62
--- /dev/null
+++ b/examples/30.mp4
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f0aee9bbb44a52ffd26424b4a0d804ee236e56edddadbcec5b7b6d79b8b2464e
+size 2677102
diff --git a/examples/31.mp4 b/examples/31.mp4
new file mode 100755
index 0000000000000000000000000000000000000000..e27d374886555dcb7498dcf41623d46d4cd6417a
--- /dev/null
+++ b/examples/31.mp4
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9d0060e4564595ecc6397b7516ffc906beab7ff2a11971be2c9cd0e7807e6772
+size 569935
diff --git a/examples/32.mp4 b/examples/32.mp4
new file mode 100755
index 0000000000000000000000000000000000000000..33d10d293c961186eb187bea444a16c1f233fe43
--- /dev/null
+++ b/examples/32.mp4
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:209d7f97bd9e5c881789d35859c06840a7321db7a1f5cfe285dadf18d84e847c
+size 1593158
diff --git a/examples/33.mp4 b/examples/33.mp4
new file mode 100755
index 0000000000000000000000000000000000000000..c852df5ed0aeb9df961034b51a7eceb138e8d6fe
--- /dev/null
+++ b/examples/33.mp4
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:54c8de91a05ceb1c01c3bcc267e527a55adffcf6dee1714629bb5b09850a38fd
+size 918682
diff --git a/examples/35.mp4 b/examples/35.mp4
new file mode 100755
index 0000000000000000000000000000000000000000..9c93997be9ddb561199601d9a6718eb0f4de0457
--- /dev/null
+++ b/examples/35.mp4
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d9fba530d137d1ebeae598f502c4b59f7693d87738038d6f726512d00b54a6e4
+size 952985
diff --git a/examples/36.mp4 b/examples/36.mp4
new file mode 100755
index 0000000000000000000000000000000000000000..f35f45e878f8955b73f4d592c47ee8c97daae7e0
--- /dev/null
+++ b/examples/36.mp4
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:eff2bff79f77dadc26bb74623b50ef6f9e6b37b68247a0779f48b57822e5f74d
+size 1008476
diff --git a/examples/39.mp4 b/examples/39.mp4
new file mode 100755
index 0000000000000000000000000000000000000000..77ced16b1213af980e173d1dcd80226aad000d11
--- /dev/null
+++ b/examples/39.mp4
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:df7ff152c9835a20d95d3ccf8bd0386ed8fd25ed393b7eb2b0f669f7866124e3
+size 740198
diff --git a/examples/40.mp4 b/examples/40.mp4
new file mode 100755
index 0000000000000000000000000000000000000000..bcb518d490e375afbcf52a2d2169b8c266360199
--- /dev/null
+++ b/examples/40.mp4
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:deb684b15a71152cc7377957946042e9e8fa8efce619f7801de8cb91eb3c1e82
+size 1021691
diff --git a/examples/5.mp4 b/examples/5.mp4
new file mode 100755
index 0000000000000000000000000000000000000000..785e7e804b13d6a02377fb9a7cdd03c7db764fe5
--- /dev/null
+++ b/examples/5.mp4
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:631fc39bbde6f098c4f539f87c7c43de16a0d117ec51d16976d4b4e7e7279bc0
+size 7130292
diff --git a/examples/69230f105ad8740e08d743a8ee11c651.mp4 b/examples/69230f105ad8740e08d743a8ee11c651.mp4
new file mode 100755
index 0000000000000000000000000000000000000000..0cd9eea9dd093709c4dd09c07afa8a6d6b28bfd2
--- /dev/null
+++ b/examples/69230f105ad8740e08d743a8ee11c651.mp4
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7d2920ef6ab0be19b5459a51aaacff4dbc773a18ecab150939866804d46b98ce
+size 416952
diff --git a/examples/7.mp4 b/examples/7.mp4
new file mode 100755
index 0000000000000000000000000000000000000000..c930a21312ad3571edbb6dedac686aeffa3864ce
--- /dev/null
+++ b/examples/7.mp4
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2cc7c666bcb736f6c8fca479eb49eb9257ad321db5f0737cea946f1dc831d888
+size 6361240
diff --git a/examples/8.mp4 b/examples/8.mp4
new file mode 100755
index 0000000000000000000000000000000000000000..3c3822e1e283e287a97217461364de430880d7f5
--- /dev/null
+++ b/examples/8.mp4
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:eca401fb7a79439fa8f40ed5815647575bfa0a6cdf274682bd159d4bb2b0b479
+size 5633115
diff --git a/examples/8a6dfb8cfe80634f4f77ae9aa830d075.mp4 b/examples/8a6dfb8cfe80634f4f77ae9aa830d075.mp4
new file mode 100755
index 0000000000000000000000000000000000000000..82e6cc375f00be3b67208f5bfc7f026d91170405
--- /dev/null
+++ b/examples/8a6dfb8cfe80634f4f77ae9aa830d075.mp4
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ffbd6627eb7921be3c7a69b42365e4ccf8ecae12c83543a7e0330363da949551
+size 2579203
diff --git a/examples/9.mp4 b/examples/9.mp4
new file mode 100755
index 0000000000000000000000000000000000000000..431520e2699e25f4f6a0edf7af8d088087406e2f
--- /dev/null
+++ b/examples/9.mp4
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d8551685f75e70a7558d64843c9cdc5d447309e837b92814805145d7ac301a46
+size 8999851
diff --git a/examples/DJI_20250912163642_0003_D.mp4 b/examples/DJI_20250912163642_0003_D.mp4
new file mode 100755
index 0000000000000000000000000000000000000000..418ca443bb44c0a1fdddae1312c4ebc68039beea
--- /dev/null
+++ b/examples/DJI_20250912163642_0003_D.mp4
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:90caa5c003303937601dc8f20a8e9458113aca1829d1b6e4882857c69909bb7b
+size 7752776
diff --git a/examples/DJI_20250912164311_0007_D.mp4 b/examples/DJI_20250912164311_0007_D.mp4
new file mode 100755
index 0000000000000000000000000000000000000000..002643f3a940a09810f8adae38a0c1ee3a8b0d78
--- /dev/null
+++ b/examples/DJI_20250912164311_0007_D.mp4
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3573d054b7deec8494f9a2128a82f785d267de70a4268f7c63c25c044fe0432c
+size 5134725
diff --git a/examples/b1f1fa44f414d7731cd7d77751093c44.mp4 b/examples/b1f1fa44f414d7731cd7d77751093c44.mp4
new file mode 100755
index 0000000000000000000000000000000000000000..12f9a702e7d95605edfbdf038480dbdff856ef50
--- /dev/null
+++ b/examples/b1f1fa44f414d7731cd7d77751093c44.mp4
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:88802d219750e66b4e4393f35c5158806760307fa9d59bdf75c8bf78620c135d
+size 2169174
diff --git a/examples/b68045aa2128ab63d9c7518f8d62eafe.mp4 b/examples/b68045aa2128ab63d9c7518f8d62eafe.mp4
new file mode 100755
index 0000000000000000000000000000000000000000..00c240af0d1646f3fc36c174849705fb68784a79
--- /dev/null
+++ b/examples/b68045aa2128ab63d9c7518f8d62eafe.mp4
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2785d36b2bb50aee0cb8c76c21e9ae7da7729ea5f4e3f5c1d6dd7208c572a6c1
+size 2614636
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..2df78560b0a7c7566fe1ca2a22f65f81ec3c7900
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,14 @@
+torch>=2.0.0
+torchvision
+transformers
+imageio
+imageio[ffmpeg]
+safetensors
+einops
+modelscope
+ftfy
+accelerate
+loguru
+git+https://github.com/microsoft/MoGe.git -i https://pypi.org/simple/ --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host files.pythonhosted.org
+sentencepiece
+spaces
\ No newline at end of file
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000000000000000000000000000000000000..4b1bf643e0bace9913179e976e4f919d9fa0a053
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,30 @@
+import os
+from setuptools import setup, find_packages
+import pkg_resources
+
+# Path to the requirements file
+requirements_path = os.path.join(os.path.dirname(__file__), "requirements.txt")
+
+# Read the requirements from the requirements file
+if os.path.exists(requirements_path):
+ with open(requirements_path, 'r') as f:
+ install_requires = [str(r) for r in pkg_resources.parse_requirements(f)]
+else:
+ install_requires = []
+
+setup(
+ name="dkt",
+ version="1.1.7",
+ description="Enjoy the magic of Diffusion models!",
+ author="Artiprocher",
+ packages=find_packages(),
+ install_requires=install_requires,
+ include_package_data=True,
+ classifiers=[
+ "Programming Language :: Python :: 3",
+ "License :: OSI Approved :: Apache Software License",
+ "Operating System :: OS Independent",
+ ],
+ package_data={"dkt": ["tokenizer_configs/**/**/*.*"]},
+ python_requires='>=3.6',
+)
diff --git a/tools/__init__.py b/tools/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/tools/common_utils.py b/tools/common_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..71dd90c95a1977c55b62339956ad7202f05b9371
--- /dev/null
+++ b/tools/common_utils.py
@@ -0,0 +1,19 @@
+
+
+
+import imageio
+import numpy as np
+from tqdm import tqdm
+
+
+def save_video(frames, save_path, fps, quality=9, ffmpeg_params=None):
+
+ if len(frames) == 1 :
+ frames[0].save(save_path.replace('.mp4', '.png'))
+ return
+
+ writer = imageio.get_writer(save_path, fps=fps, quality=quality, ffmpeg_params=ffmpeg_params)
+ for frame in tqdm(frames, desc="Saving video"):
+ frame = np.array(frame)
+ writer.append_data(frame)
+ writer.close()
\ No newline at end of file
diff --git a/tools/depth2pcd.py b/tools/depth2pcd.py
new file mode 100644
index 0000000000000000000000000000000000000000..94efb3a31e07d18e38fc3a5c03c5e22e2a7b149b
--- /dev/null
+++ b/tools/depth2pcd.py
@@ -0,0 +1,57 @@
+
+
+import numpy as np
+import open3d as o3d
+
+def depth2pcd(depth, intrinsic, color=None, input_mask=None, ret_pcd=False):
+ """
+ Convert a depth map into a 3D point cloud.
+
+ Args:
+ depth (np.ndarray): (H, W) depth map in meters.
+ intrinsic (np.ndarray): (3, 3) camera intrinsic matrix.
+ color (np.ndarray, optional): (H, W, 3) RGB image aligned with the depth map.
+ input_mask (np.ndarray, optional): (H, W) boolean mask indicating valid pixels.
+ ret_pcd (bool, optional): If True, returns an Open3D PointCloud object;
+ otherwise returns NumPy arrays.
+
+ Returns:
+ - If ret_pcd=True: returns `o3d.geometry.PointCloud()`
+ - Otherwise: returns (N, 3) point coordinates and (N, 3) color arrays.
+ """
+ H, W = depth.shape
+ u, v = np.meshgrid(np.arange(W), np.arange(H))
+
+ fx, fy = intrinsic[0,0], intrinsic[1,1]
+ cx, cy = intrinsic[0,2], intrinsic[1,2]
+
+ Z = depth.reshape(-1)
+ X = ((u.reshape(-1) - cx) / fx) * Z
+ Y = ((v.reshape(-1) - cy) / fy) * Z
+
+ points = np.stack([X, Y, Z], axis=1)
+
+ # mask valid points
+ mask = np.ones_like(Z, dtype=bool)
+ if input_mask is not None:
+ mask &= input_mask.reshape(-1)
+
+ # Keep only valid points
+ points = points[mask]
+
+ # Process color information
+ if color is not None:
+ color = color.astype(np.float32) / 255.0
+ colors = color.reshape(-1, 3)[mask]
+ else:
+ colors = None
+
+ # Return Open3D point cloud or NumPy arrays
+ if ret_pcd:
+ pcd = o3d.geometry.PointCloud()
+ pcd.points = o3d.utility.Vector3dVector(points)
+ if colors is not None:
+ pcd.colors = o3d.utility.Vector3dVector(colors)
+ return pcd
+ else:
+ return points, colors
\ No newline at end of file
diff --git a/tools/eval_utils.py b/tools/eval_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..969b5f3a18257b18f573f216ce1cef0dab6889ec
--- /dev/null
+++ b/tools/eval_utils.py
@@ -0,0 +1,122 @@
+import torch
+import numpy as np
+import matplotlib
+import matplotlib.pyplot as plt
+from PIL import Image
+import io
+
+
+
+
+
+def colorize_depth_map(depth, mask=None, reverse_color=False, color_map="Spectral"):
+ cm = matplotlib.colormaps[color_map]
+ #* the depth is normalized by defailt
+
+ if reverse_color:
+ img_colored_np = cm(1 - depth, bytes=False)[:, :, 0:3]
+ else:
+ img_colored_np = cm(depth, bytes=False)[:, :, 0:3]
+
+ depth_colored = (img_colored_np * 255).astype(np.uint8)
+ if mask is not None:
+ masked_image = np.zeros_like(depth_colored)
+ masked_image[mask] = depth_colored[mask]
+ depth_colored_img = Image.fromarray(masked_image)
+ else:
+ depth_colored_img = Image.fromarray(depth_colored)
+
+ return depth_colored_img
+
+
+
+
+def depth2disparity(depth, return_mask=False):
+ if isinstance(depth, torch.Tensor):
+ disparity = torch.zeros_like(depth)
+ elif isinstance(depth, np.ndarray):
+ disparity = np.zeros_like(depth)
+ non_negtive_mask = depth > 0
+ disparity[non_negtive_mask] = 1.0 / depth[non_negtive_mask]
+ if return_mask:
+ return disparity, non_negtive_mask
+ else:
+ return disparity
+
+
+def disparity2depth(disparity, **kwargs):
+ return depth2disparity(disparity, **kwargs)
+
+
+
+
+def align_depth_least_square(
+ gt_arr: np.ndarray,
+ pred_arr: np.ndarray,
+ valid_mask_arr: np.ndarray,
+ return_scale_shift=True,
+ max_resolution=None,
+):
+ ori_shape = pred_arr.shape # input shape
+
+ gt = gt_arr.squeeze() # [H, W]
+ pred = pred_arr.squeeze()
+ valid_mask = valid_mask_arr.squeeze()
+
+ # Downsample
+ if max_resolution is not None:
+ scale_factor = np.min(max_resolution / np.array(ori_shape[-2:]))
+ if scale_factor < 1:
+ downscaler = torch.nn.Upsample(scale_factor=scale_factor, mode="nearest")
+ gt = downscaler(torch.as_tensor(gt).unsqueeze(0)).numpy()
+ pred = downscaler(torch.as_tensor(pred).unsqueeze(0)).numpy()
+ valid_mask = (
+ downscaler(torch.as_tensor(valid_mask).unsqueeze(0).float())
+ .bool()
+ .numpy()
+ )
+
+ assert (
+ gt.shape == pred.shape == valid_mask.shape
+ ), f"{gt.shape}, {pred.shape}, {valid_mask.shape}"
+
+ gt_masked = gt[valid_mask].reshape((-1, 1))
+ pred_masked = pred[valid_mask].reshape((-1, 1))
+
+ # numpy solver
+ _ones = np.ones_like(pred_masked)
+ A = np.concatenate([pred_masked, _ones], axis=-1)
+ X = np.linalg.lstsq(A, gt_masked, rcond=None)[0]
+ scale, shift = X
+
+ aligned_pred = pred_arr * scale + shift
+
+ # restore dimensions
+ aligned_pred = aligned_pred.reshape(ori_shape)
+
+ if return_scale_shift:
+ return aligned_pred, scale, shift
+ else:
+ return aligned_pred
+
+
+
+
+def transfer_pred_disp2depth(all_pred_disparity, all_gt_depths, all_masks):
+ gt_disparity,gt_non_neg_mask = depth2disparity(all_gt_depths, return_mask=True)
+ pred_non_neg_mask = all_pred_disparity > 0
+ valid_non_neg_mask = pred_non_neg_mask & gt_non_neg_mask & all_masks
+
+ align_disp_pred,scale,shift = align_depth_least_square( gt_arr=gt_disparity,
+ pred_arr=all_pred_disparity,
+ valid_mask_arr=valid_non_neg_mask,
+ return_scale_shift=True,
+ max_resolution=None,)
+
+ align_disp_pred = np.clip(
+ align_disp_pred, a_min=1e-3, a_max=None
+ ) # avoid 0 disparity
+ all_pred_depths = disparity2depth(align_disp_pred)
+ return all_pred_depths
+
+
\ No newline at end of file