import os import random import sys from typing import Sequence, Mapping, Any, Union import torch import gradio as gr import spaces from comfy import model_management import subprocess from huggingface_hub import hf_hub_download from huggingface_hub import snapshot_download from utils.image_utils import remove_image_metadata, resize_and_optimize_image #https://huggingface.co/SG161222/Realistic_Vision_V6.0_B1_noVAE/blob/main/Realistic_Vision_V6.0_NV_B1_fp16.safetensors print("Realistic_Vision_V6.0_NV_B1_fp16.safetensors") hf_hub_download(repo_id="SG161222/Realistic_Vision_V6.0_B1_noVAE", filename="Realistic_Vision_V6.0_NV_B1_fp16.safetensors", local_dir="models/checkpoints/SD1.5/") #https://huggingface.co/gemasai/4x_NMKD-Superscale-SP_178000_G/blob/main/4x_NMKD-Superscale-SP_178000_G.pth print("4x_NMKD-Superscale-SP_178000_G.pth") hf_hub_download(repo_id="gemasai/4x_NMKD-Superscale-SP_178000_G", filename="4x_NMKD-Superscale-SP_178000_G.pth", local_dir="models/upscale_models/") #https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_openpose.pth print("control_v11p_sd15_openpose.pth") hf_hub_download(repo_id="lllyasviel/ControlNet-v1-1", filename="control_v11p_sd15_openpose.pth", local_dir="models/controlnet/SD1.5/") #https://huggingface.co/microsoft/Florence-2-base print("microsoft/Florence-2-base") snapshot_download(repo_id="microsoft/Florence-2-base", local_dir="models/LLM/Florence-2-base/", revision='ceaf371f01ef66192264811b390bccad475a4f02') #https://huggingface.co/ahtoshkaa/Dreamshaper/blob/d4415d1a2644f08ab34bd7adabfbbb70571a782a/dreamshaper_8Inpainting.safetensors print("dreamshaper_8Inpainting.safetensors") hf_hub_download(repo_id="ahtoshkaa/Dreamshaper", filename="dreamshaper_8Inpainting.safetensors", local_dir="models/checkpoints/SD1.5/") #https://huggingface.co/naonovn/Lora/blob/main/add_detail.safetensors print("add_detail.safetensors") hf_hub_download(repo_id="naonovn/Lora", filename="add_detail.safetensors", local_dir="models/loras/") #https://huggingface.co/Dreamspire/BaldifierW2/blob/main/BaldifierW2.safetensors print("BaldifierW2.safetensors") hf_hub_download(repo_id="Dreamspire/BaldifierW2", filename="BaldifierW2.safetensors", local_dir="models/loras/") #./clip_vision/CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors #https://huggingface.co/h94/IP-Adapter/blob/main/models/image_encoder/model.safetensors print("CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors") hf_hub_download(repo_id="h94/IP-Adapter", filename="models/image_encoder/model.safetensors", local_dir="models/clip_vision/") # rename try: source_file = "models/clip_vision/models/image_encoder/model.safetensors" destination_file = "models/clip_vision/CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors" result = subprocess.run(["mv", source_file, destination_file], check=True, capture_output=True, text=True) # check=True raises a CalledProcessError if the command fails (returns a non-zero exit code) # capture_output=True captures stdout and stderr. text=True decodes to string print(f"Command executed successfully. Return code: {result.returncode}") print(f"Standard output: {result.stdout}") print(f"Standard error: {result.stderr}") except subprocess.CalledProcessError as e: print(f"Command failed with error code: {e.returncode}") print(f"Standard output: {e.stdout}") print(f"Standard error: {e.stderr}") except FileNotFoundError: print("Error: The 'mv' command was not found in your system's PATH.") print("CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors done") #./ipadapter/ip-adapter_sd15_light_v11.bin #./ipadapter/ip-adapter_sd15.safetensors #./ipadapter/ip-adapter-plus-face_sd15.safetensors #https://huggingface.co/h94/IP-Adapter/blob/main/models/ print("ipadapter") hf_hub_download(repo_id="h94/IP-Adapter", filename="models/ip-adapter_sd15.safetensors", local_dir="models/ipadapter/") hf_hub_download(repo_id="h94/IP-Adapter", filename="models/ip-adapter_sd15_light_v11.bin", local_dir="models/ipadapter/") hf_hub_download(repo_id="h94/IP-Adapter", filename="models/ip-adapter-plus_sd15.safetensors", local_dir="models/ipadapter/") hf_hub_download(repo_id="h94/IP-Adapter", filename="models/ip-adapter-plus-face_sd15.safetensors", local_dir="models/ipadapter/") hf_hub_download(repo_id="h94/IP-Adapter", filename="models/ip-adapter-full-face_sd15.safetensors", local_dir="models/ipadapter/") hf_hub_download(repo_id="h94/IP-Adapter", filename="models/ip-adapter_sd15_vit-G.safetensors", local_dir="models/ipadapter/") # rename try: source_file = "models/ipadapter/models/*" destination_file = "models/ipadapter/" result = subprocess.run(["mv", source_file, destination_file], check=True, capture_output=True, text=True) # check=True raises a CalledProcessError if the command fails (returns a non-zero exit code) # capture_output=True captures stdout and stderr. text=True decodes to string print(f"Command executed successfully. Return code: {result.returncode}") print(f"Standard output: {result.stdout}") print(f"Standard error: {result.stderr}") except subprocess.CalledProcessError as e: print(f"Command failed with error code: {e.returncode}") print(f"Standard output: {e.stdout}") print(f"Standard error: {e.stderr}") except FileNotFoundError: print("Error: The 'mv' command was not found in your system's PATH.") print("ipadapter done") #download auto when startup #./insightface/models/buffalo_l/w600k_r50.onnx #./insightface/models/buffalo_l/det_10g.onnx #./insightface/models/buffalo_l/2d106det.onnx #./insightface/models/buffalo_l/1k3d68.onnx #./insightface/models/buffalo_l/genderage.onnx #./annotator/yzd-v/DWPose/yolox_l.onnx def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any: """Returns the value at the given index of a sequence or mapping. If the object is a sequence (like list or string), returns the value at the given index. If the object is a mapping (like a dictionary), returns the value at the index-th key. Some return a dictionary, in these cases, we look for the "results" key Args: obj (Union[Sequence, Mapping]): The object to retrieve the value from. index (int): The index of the value to retrieve. Returns: Any: The value at the given index. Raises: IndexError: If the index is out of bounds for the object and the object is not a mapping. """ try: return obj[index] except KeyError: return obj["result"][index] def find_path(name: str, path: str = None) -> str: """ Recursively looks at parent folders starting from the given path until it finds the given name. Returns the path as a Path object if found, or None otherwise. """ # If no path is given, use the current working directory if path is None: path = os.getcwd() # Check if the current directory contains the name if name in os.listdir(path): path_name = os.path.join(path, name) print(f"{name} found: {path_name}") return path_name # Get the parent directory parent_directory = os.path.dirname(path) # If the parent directory is the same as the current directory, we've reached the root and stop the search if parent_directory == path: return None # Recursively call the function with the parent directory return find_path(name, parent_directory) def add_comfyui_directory_to_sys_path() -> None: """ Add 'ComfyUI' to the sys.path """ comfyui_path = find_path("ComfyUI") if comfyui_path is not None and os.path.isdir(comfyui_path): sys.path.append(comfyui_path) print(f"'{comfyui_path}' added to sys.path") def add_extra_model_paths() -> None: """ Parse the optional extra_model_paths.yaml file and add the parsed paths to the sys.path. """ try: from main import load_extra_path_config except ImportError: print( "Could not import load_extra_path_config from main.py. Looking in utils.extra_config instead." ) from utils.extra_config import load_extra_path_config extra_model_paths = find_path("extra_model_paths.yaml") if extra_model_paths is not None: load_extra_path_config(extra_model_paths) else: print("Could not find the extra_model_paths config file.") add_comfyui_directory_to_sys_path() add_extra_model_paths() def import_custom_nodes() -> None: """Find all custom nodes in the custom_nodes folder and add those node objects to NODE_CLASS_MAPPINGS This function sets up a new asyncio event loop, initializes the PromptServer, creates a PromptQueue, and initializes the custom nodes. """ import asyncio import execution from nodes import init_extra_nodes import server # Creating a new event loop and setting it as the default loop loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) # Creating an instance of PromptServer with the loop server_instance = server.PromptServer(loop) execution.PromptQueue(server_instance) # Initializing custom nodes init_extra_nodes() print("Custom nodes initialized.") from nodes import NODE_CLASS_MAPPINGS print("import_custom_nodes()") import_custom_nodes() print("import_custom_nodes() done") # NODE_CLASS_MAPPINGS 移到顶层 checkpointloadersimple = NODE_CLASS_MAPPINGS["CheckpointLoaderSimple"]() loraloader = NODE_CLASS_MAPPINGS["LoraLoader"]() cliptextencode = NODE_CLASS_MAPPINGS["CLIPTextEncode"]() controlnetloader = NODE_CLASS_MAPPINGS["ControlNetLoader"]() loadimage = NODE_CLASS_MAPPINGS["LoadImage"]() florence2modelloader = NODE_CLASS_MAPPINGS["Florence2ModelLoader"]() florence2run = NODE_CLASS_MAPPINGS["Florence2Run"]() #text_string = NODE_CLASS_MAPPINGS["Text String"]() #text_concatenate = NODE_CLASS_MAPPINGS["Text Concatenate"]() dwpreprocessor = NODE_CLASS_MAPPINGS["DWPreprocessor"]() controlnetapplyadvanced = NODE_CLASS_MAPPINGS["ControlNetApplyAdvanced"]() layerutility_imagescalebyaspectratio_v2 = NODE_CLASS_MAPPINGS[ "LayerUtility: ImageScaleByAspectRatio V2" ]() layermask_personmaskultra_v2 = NODE_CLASS_MAPPINGS[ "LayerMask: PersonMaskUltra V2" ]() growmask = NODE_CLASS_MAPPINGS["GrowMask"]() inpaintmodelconditioning = NODE_CLASS_MAPPINGS["InpaintModelConditioning"]() ksampler = NODE_CLASS_MAPPINGS["KSampler"]() vaedecode = NODE_CLASS_MAPPINGS["VAEDecode"]() vaeencode = NODE_CLASS_MAPPINGS["VAEEncode"]() faceanalysismodels = NODE_CLASS_MAPPINGS["FaceAnalysisModels"]() upscalemodelloader = NODE_CLASS_MAPPINGS["UpscaleModelLoader"]() ipadapterunifiedloader = NODE_CLASS_MAPPINGS["IPAdapterUnifiedLoader"]() ipadapteradvanced = NODE_CLASS_MAPPINGS["IPAdapterAdvanced"]() facesegmentation = NODE_CLASS_MAPPINGS["FaceSegmentation"]() layerutility_imageblend_v2 = NODE_CLASS_MAPPINGS[ "LayerUtility: ImageBlend V2" ]() image_comparer_rgthree = NODE_CLASS_MAPPINGS["Image Comparer (rgthree)"]() saveimage = NODE_CLASS_MAPPINGS["SaveImage"]() imageupscalewithmodel = NODE_CLASS_MAPPINGS["ImageUpscaleWithModel"]() #showtextpysssss = NODE_CLASS_MAPPINGS["ShowText|pysssss"]() #4、针对Florence2ModelLoader做手动的初始化 if "Florence2ModelLoader" in NODE_CLASS_MAPPINGS: print("Manually initializing Florence2ModelLoader.INPUT_TYPES() to populate model paths.") florence_class = NODE_CLASS_MAPPINGS["Florence2ModelLoader"] florence_class.INPUT_TYPES() # ========================================================================= #5、其它需要提前加载的模型,放到顶层加载 checkpointloadersimple_50 = checkpointloadersimple.load_checkpoint( ckpt_name="SD1.5/Realistic_Vision_V6.0_NV_B1_fp16.safetensors" ) loraloader_841 = loraloader.load_lora( lora_name="add_detail.safetensors", strength_model=1, strength_clip=1, model=get_value_at_index(checkpointloadersimple_50, 0), clip=get_value_at_index(checkpointloadersimple_50, 1), ) controlnetloader_73 = controlnetloader.load_controlnet( control_net_name="SD1.5/control_v11p_sd15_openpose.pth" ) checkpointloadersimple_319 = checkpointloadersimple.load_checkpoint( ckpt_name="SD1.5/dreamshaper_8Inpainting.safetensors" ) loraloader_338 = loraloader.load_lora( lora_name="add_detail.safetensors", strength_model=1, strength_clip=1, model=get_value_at_index(checkpointloadersimple_319, 0), clip=get_value_at_index(checkpointloadersimple_319, 1), ) loraloader_353 = loraloader.load_lora( lora_name="BaldifierW2.safetensors", strength_model=2, strength_clip=1, model=get_value_at_index(loraloader_338, 0), clip=get_value_at_index(loraloader_338, 1), ) controlnetloader_389 = controlnetloader.load_controlnet( control_net_name="SD1.5/control_v11p_sd15_openpose.pth" ) florence2modelloader_204 = florence2modelloader.loadmodel( model="Florence-2-base", precision="fp16", attention="sdpa", convert_to_safetensors=False, ) faceanalysismodels_506 = faceanalysismodels.load_models( library="insightface", provider="CPU" ) upscalemodelloader_835 = upscalemodelloader.load_model( model_name="4x_NMKD-Superscale-SP_178000_G.pth" ) faceanalysismodels_840 = faceanalysismodels.load_models( library="insightface", provider="CUDA" ) #6、model_management.load_models_gpu改造,对两个sd的checkpoint,使用gpu加载 model_loaders = [ checkpointloadersimple_50, checkpointloadersimple_319, # controlnetloader_73, # florence2modelloader_204, # loraloader_338, # loraloader_353, # controlnetloader_389, # upscalemodelloader_835 ] print("model_management.load_models_gpu(model_loaders)") model_management.load_models_gpu([ loader[0].patcher if hasattr(loader[0], 'patcher') else loader[0] for loader in model_loaders ]) print("model_management.load_models_gpu(model_loaders) done") @spaces.GPU(duration=60) def generate_image(model_image, hairstyle_template_image): with torch.inference_mode(): cliptextencode_52 = cliptextencode.encode( text="multiple_hands, multiple_legs, multiple_girls\nlow quality, blurry, out of focus, distorted, unrealistic, extra limbs, missing limbs, deformed hands, deformed fingers, extra fingers, long neck, unnatural face, bad anatomy, bad proportions, poorly drawn face, poorly drawn eyes, asymmetrical eyes, extra eyes, extra head, floating objects, watermark, text, logo, jpeg artifacts, overexposed, underexposed, harsh lighting, bad posture, strange angles, unnatural expressions, oversaturated colors, messy hair, unrealistic skin texture, wrinkles inappropriately placed, incorrect shading, pixelation, complex background, busy background, detailed background, crowded scene, clutter, messy elements, unnecessary objects, overlapping objects, intricate patterns, vibrant colors, high contrast, graffiti, shadows, reflections, multiple layers, unrealistic lighting, overexposed areas,cartoon, anime, painting, illustration, 3d, cgi, render,blurry, soft focus, out of focus, noise, jpeg artifacts,plastic hair, smooth skin, unrealistic, fake", clip=get_value_at_index(loraloader_841, 1), ) loadimage_144 = loadimage.load_image(image=hairstyle_template_image) florence2run_203 = florence2run.encode( text_input="", task="more_detailed_caption", fill_mask=True, keep_model_loaded=False, max_new_tokens=1024, num_beams=3, do_sample=True, output_mask_select="", seed=random.randint(1, 2**64), image=get_value_at_index(loadimage_144, 0), florence2_model=get_value_at_index(florence2modelloader_204, 0), ) # text_string_845 = text_string.text_string( # text="visible hair follicles,sharp focus,photorealistic, hyperrealistic, 8k uhd, professional photography, soft natural lighting", # text_b="", # text_c="", # text_d="", # ) # text_concatenate_842 = text_concatenate.text_concatenate( # delimiter=", ", # clean_whitespace="true", # text_a=get_value_at_index(florence2run_203, 2), # text_b=get_value_at_index(text_string_845, 0), # ) cliptextencode_188 = cliptextencode.encode( text=get_value_at_index(florence2run_203, 2), clip=get_value_at_index(loraloader_841, 1), ) cliptextencode_836 = cliptextencode.encode( text=" Bald, no hair, small head, small head, nothing around, no light, no highlights, no sunlight,Smooth forehead,No wrinkles", clip=get_value_at_index(loraloader_353, 1), ) cliptextencode_321 = cliptextencode.encode( text="wrinkles,Big forehead, big head, big back of the head,multiple_hands, multiple_legs, multiple_girls\nlow quality, blurry, out of focus, distorted, unrealistic, extra limbs, missing limbs, deformed hands, deformed fingers, extra fingers, long neck, unnatural face, bad anatomy, bad proportions, poorly drawn face, poorly drawn eyes, asymmetrical eyes, extra eyes, extra head, floating objects, watermark, text, logo, jpeg artifacts, overexposed, underexposed, harsh lighting, bad posture, strange angles, unnatural expressions, oversaturated colors, messy hair, unrealistic skin texture, wrinkles inappropriately placed, incorrect shading, pixelation, complex background, busy background, detailed background, crowded scene, clutter, messy elements, unnecessary objects, overlapping objects, intricate patterns, vibrant colors, high contrast, graffiti, shadows, reflections, multiple layers, unrealistic lighting, overexposed areas.", clip=get_value_at_index(loraloader_353, 1), ) loadimage_317 = loadimage.load_image( image=model_image ) dwpreprocessor_390 = dwpreprocessor.estimate_pose( detect_hand="enable", detect_body="enable", detect_face="enable", resolution=768, bbox_detector="yolox_l.onnx", pose_estimator="dw-ll_ucoco_384_bs5.torchscript.pt", scale_stick_for_xinsr_cn="disable", image=get_value_at_index(loadimage_317, 0), ) controlnetapplyadvanced_388 = controlnetapplyadvanced.apply_controlnet( strength=1, start_percent=0, end_percent=1, positive=get_value_at_index(cliptextencode_836, 0), negative=get_value_at_index(cliptextencode_321, 0), control_net=get_value_at_index(controlnetloader_389, 0), image=get_value_at_index(dwpreprocessor_390, 0), vae=get_value_at_index(checkpointloadersimple_319, 2), ) layerutility_imagescalebyaspectratio_v2_331 = ( layerutility_imagescalebyaspectratio_v2.image_scale_by_aspect_ratio( aspect_ratio="original", proportional_width=1, proportional_height=1, fit="letterbox", method="lanczos", round_to_multiple="8", scale_to_side="longest", scale_to_length=768, background_color="#000000", image=get_value_at_index(loadimage_317, 0), mask=get_value_at_index(loadimage_317, 1), ) ) layermask_personmaskultra_v2_327 = ( layermask_personmaskultra_v2.person_mask_ultra_v2( face=False, hair=True, body=False, clothes=False, accessories=False, background=False, confidence=0.4, detail_method="VITMatte", detail_erode=6, detail_dilate=6, black_point=0.01, white_point=0.99, process_detail=True, device="cuda", max_megapixels=2, images=get_value_at_index( layerutility_imagescalebyaspectratio_v2_331, 0 ), ) ) growmask_502 = growmask.expand_mask( expand=30, tapered_corners=True, mask=get_value_at_index(layermask_personmaskultra_v2_327, 1), ) inpaintmodelconditioning_330 = inpaintmodelconditioning.encode( noise_mask=True, positive=get_value_at_index(controlnetapplyadvanced_388, 0), negative=get_value_at_index(controlnetapplyadvanced_388, 1), vae=get_value_at_index(checkpointloadersimple_319, 2), pixels=get_value_at_index(layerutility_imagescalebyaspectratio_v2_331, 0), mask=get_value_at_index(growmask_502, 0), ) ksampler_318 = ksampler.sample( seed=random.randint(1, 2**64), steps=10, cfg=2.5, sampler_name="euler_ancestral", scheduler="normal", denoise=1, model=get_value_at_index(loraloader_353, 0), positive=get_value_at_index(inpaintmodelconditioning_330, 0), negative=get_value_at_index(inpaintmodelconditioning_330, 1), latent_image=get_value_at_index(inpaintmodelconditioning_330, 2), ) vaedecode_322 = vaedecode.decode( samples=get_value_at_index(ksampler_318, 0), vae=get_value_at_index(checkpointloadersimple_319, 2), ) vaeencode_191 = vaeencode.encode( pixels=get_value_at_index(vaedecode_322, 0), vae=get_value_at_index(checkpointloadersimple_50, 2), ) #7、for q in range(1):要注释掉,对应的for循环改为单层 #for q in range(1): ipadapterunifiedloader_90 = ipadapterunifiedloader.load_models( preset="PLUS (high strength)", model=get_value_at_index(loraloader_841, 0), ) layerutility_imagescalebyaspectratio_v2_187 = ( layerutility_imagescalebyaspectratio_v2.image_scale_by_aspect_ratio( aspect_ratio="original", proportional_width=132, proportional_height=1, fit="letterbox", method="lanczos", round_to_multiple="8", scale_to_side="longest", scale_to_length=768, background_color="#000000", image=get_value_at_index(loadimage_144, 0), ) ) ipadapteradvanced_85 = ipadapteradvanced.apply_ipadapter( weight=1, weight_type="strong style transfer", combine_embeds="concat", start_at=0, end_at=1, embeds_scaling="V only", model=get_value_at_index(ipadapterunifiedloader_90, 0), ipadapter=get_value_at_index(ipadapterunifiedloader_90, 1), image=get_value_at_index( layerutility_imagescalebyaspectratio_v2_187, 0 ), ) dwpreprocessor_72 = dwpreprocessor.estimate_pose( detect_hand="enable", detect_body="enable", detect_face="enable", resolution=1024, bbox_detector="yolox_l.onnx", pose_estimator="dw-ll_ucoco_384_bs5.torchscript.pt", scale_stick_for_xinsr_cn="disable", image=get_value_at_index(vaedecode_322, 0), ) controlnetapplyadvanced_189 = controlnetapplyadvanced.apply_controlnet( strength=1, start_percent=0, end_percent=1, positive=get_value_at_index(cliptextencode_188, 0), negative=get_value_at_index(cliptextencode_52, 0), control_net=get_value_at_index(controlnetloader_73, 0), image=get_value_at_index(dwpreprocessor_72, 0), vae=get_value_at_index(checkpointloadersimple_50, 2), ) ksampler_45 = ksampler.sample( seed=random.randint(1, 2**64), steps=8, cfg=1, sampler_name="dpmpp_2m_sde", scheduler="karras", denoise=0.9, model=get_value_at_index(ipadapteradvanced_85, 0), positive=get_value_at_index(controlnetapplyadvanced_189, 0), negative=get_value_at_index(controlnetapplyadvanced_189, 1), latent_image=get_value_at_index(vaeencode_191, 0), ) vaedecode_67 = vaedecode.decode( samples=get_value_at_index(ksampler_45, 0), vae=get_value_at_index(checkpointloadersimple_50, 2), ) layermask_personmaskultra_v2_192 = ( layermask_personmaskultra_v2.person_mask_ultra_v2( face=False, hair=True, body=False, clothes=False, accessories=False, background=False, confidence=0.15, detail_method="VITMatte", detail_erode=6, detail_dilate=6, black_point=0.01, white_point=0.99, process_detail=True, device="cuda", max_megapixels=2, images=get_value_at_index(vaedecode_67, 0), ) ) facesegmentation_505 = facesegmentation.segment( area="face+forehead (if available)", grow=-20, grow_tapered=False, blur=51, analysis_models=get_value_at_index(faceanalysismodels_506, 0), image=get_value_at_index( layerutility_imagescalebyaspectratio_v2_331, 0 ), ) growmask_396 = growmask.expand_mask( expand=0, tapered_corners=True, mask=get_value_at_index(facesegmentation_505, 0), ) layerutility_imageblend_v2_399 = layerutility_imageblend_v2.image_blend_v2( invert_mask=True, blend_mode="normal", opacity=100, background_image=get_value_at_index( layerutility_imagescalebyaspectratio_v2_331, 0 ), layer_image=get_value_at_index(vaedecode_322, 0), layer_mask=get_value_at_index(growmask_396, 0), ) layerutility_imageblend_v2_314 = layerutility_imageblend_v2.image_blend_v2( invert_mask=True, blend_mode="normal", opacity=100, background_image=get_value_at_index(layerutility_imageblend_v2_399, 0), layer_image=get_value_at_index(layermask_personmaskultra_v2_192, 0), ) facesegmentation_838 = facesegmentation.segment( area="face+forehead (if available)", grow=-20, grow_tapered=False, blur=51, analysis_models=get_value_at_index(faceanalysismodels_840, 0), image=get_value_at_index(layerutility_imageblend_v2_399, 0), ) growmask_839 = growmask.expand_mask( expand=0, tapered_corners=True, mask=get_value_at_index(facesegmentation_838, 0), ) layerutility_imageblend_v2_686 = layerutility_imageblend_v2.image_blend_v2( invert_mask=False, blend_mode="normal", opacity=100, background_image=get_value_at_index(layerutility_imageblend_v2_314, 0), layer_image=get_value_at_index(layerutility_imageblend_v2_399, 0), layer_mask=get_value_at_index(growmask_839, 0), ) image_comparer_rgthree_486 = image_comparer_rgthree.compare_images( image_a=get_value_at_index(layerutility_imageblend_v2_686, 0), image_b=get_value_at_index( layerutility_imagescalebyaspectratio_v2_331, 0 ), ) # result saveimage_680 = saveimage.save_images( filename_prefix="hairstyle_filter", images=get_value_at_index(layerutility_imageblend_v2_686, 0), ) remove_image_metadata(f"output/{saveimage_680['ui']['images'][0]['filename']}") image_comparer_rgthree_820 = image_comparer_rgthree.compare_images( image_a=get_value_at_index(layerutility_imageblend_v2_399, 0), image_b=get_value_at_index( layerutility_imagescalebyaspectratio_v2_331, 0 ), ) imageupscalewithmodel_831 = imageupscalewithmodel.upscale( upscale_model=get_value_at_index(upscalemodelloader_835, 0), image=get_value_at_index(layerutility_imageblend_v2_686, 0), ) # showtextpysssss_846 = showtextpysssss.notify( # text=get_value_at_index(text_concatenate_842, 0), # unique_id=907207178790687794, # ) # result with ultra sharp saveimage_847 = saveimage.save_images( filename_prefix="hairstyle_filter_result", images=get_value_at_index(imageupscalewithmodel_831, 0), ) # 将4x upscale的图片缩小到2x,并优化压缩(长边从3072降到1536) resize_and_optimize_image( f"output/{saveimage_847['ui']['images'][0]['filename']}", target_long_edge=1536 ) # bald saveimage_848 = saveimage.save_images( filename_prefix="hairstyle_bald_result", images=get_value_at_index(layerutility_imageblend_v2_399, 0), ) remove_image_metadata(f"output/{saveimage_848['ui']['images'][0]['filename']}") saved_path_bald = f"output/{saveimage_848['ui']['images'][0]['filename']}" saved_path_result = f"output/{saveimage_680['ui']['images'][0]['filename']}" saved_path_result_sharp = f"output/{saveimage_847['ui']['images'][0]['filename']}" return saved_path_result_sharp @spaces.GPU(duration=60) def generate_image_bald(model_image): with torch.inference_mode(): cliptextencode_836 = cliptextencode.encode( text=" Bald, no hair, small head, small head, nothing around, no light, no highlights, no sunlight,Smooth forehead,No wrinkles", clip=get_value_at_index(loraloader_353, 1), ) cliptextencode_321 = cliptextencode.encode( text="wrinkles,Big forehead, big head, big back of the head,multiple_hands, multiple_legs, multiple_girls\nlow quality, blurry, out of focus, distorted, unrealistic, extra limbs, missing limbs, deformed hands, deformed fingers, extra fingers, long neck, unnatural face, bad anatomy, bad proportions, poorly drawn face, poorly drawn eyes, asymmetrical eyes, extra eyes, extra head, floating objects, watermark, text, logo, jpeg artifacts, overexposed, underexposed, harsh lighting, bad posture, strange angles, unnatural expressions, oversaturated colors, messy hair, unrealistic skin texture, wrinkles inappropriately placed, incorrect shading, pixelation, complex background, busy background, detailed background, crowded scene, clutter, messy elements, unnecessary objects, overlapping objects, intricate patterns, vibrant colors, high contrast, graffiti, shadows, reflections, multiple layers, unrealistic lighting, overexposed areas.", clip=get_value_at_index(loraloader_353, 1), ) loadimage_317 = loadimage.load_image( image=model_image ) dwpreprocessor_390 = dwpreprocessor.estimate_pose( detect_hand="enable", detect_body="enable", detect_face="enable", resolution=768, bbox_detector="yolox_l.onnx", pose_estimator="dw-ll_ucoco_384_bs5.torchscript.pt", scale_stick_for_xinsr_cn="disable", image=get_value_at_index(loadimage_317, 0), ) controlnetapplyadvanced_388 = controlnetapplyadvanced.apply_controlnet( strength=1, start_percent=0, end_percent=1, positive=get_value_at_index(cliptextencode_836, 0), negative=get_value_at_index(cliptextencode_321, 0), control_net=get_value_at_index(controlnetloader_389, 0), image=get_value_at_index(dwpreprocessor_390, 0), vae=get_value_at_index(checkpointloadersimple_319, 2), ) layerutility_imagescalebyaspectratio_v2_331 = ( layerutility_imagescalebyaspectratio_v2.image_scale_by_aspect_ratio( aspect_ratio="original", proportional_width=1, proportional_height=1, fit="letterbox", method="lanczos", round_to_multiple="8", scale_to_side="longest", scale_to_length=768, background_color="#000000", image=get_value_at_index(loadimage_317, 0), mask=get_value_at_index(loadimage_317, 1), ) ) layermask_personmaskultra_v2_327 = ( layermask_personmaskultra_v2.person_mask_ultra_v2( face=False, hair=True, body=False, clothes=False, accessories=False, background=False, confidence=0.4, detail_method="VITMatte", detail_erode=6, detail_dilate=6, black_point=0.01, white_point=0.99, process_detail=True, device="cuda", max_megapixels=2, images=get_value_at_index( layerutility_imagescalebyaspectratio_v2_331, 0 ), ) ) growmask_502 = growmask.expand_mask( expand=30, tapered_corners=True, mask=get_value_at_index(layermask_personmaskultra_v2_327, 1), ) inpaintmodelconditioning_330 = inpaintmodelconditioning.encode( noise_mask=True, positive=get_value_at_index(controlnetapplyadvanced_388, 0), negative=get_value_at_index(controlnetapplyadvanced_388, 1), vae=get_value_at_index(checkpointloadersimple_319, 2), pixels=get_value_at_index(layerutility_imagescalebyaspectratio_v2_331, 0), mask=get_value_at_index(growmask_502, 0), ) ksampler_318 = ksampler.sample( seed=random.randint(1, 2**64), steps=10, cfg=2.5, sampler_name="euler_ancestral", scheduler="normal", denoise=1, model=get_value_at_index(loraloader_353, 0), positive=get_value_at_index(inpaintmodelconditioning_330, 0), negative=get_value_at_index(inpaintmodelconditioning_330, 1), latent_image=get_value_at_index(inpaintmodelconditioning_330, 2), ) vaedecode_322 = vaedecode.decode( samples=get_value_at_index(ksampler_318, 0), vae=get_value_at_index(checkpointloadersimple_319, 2), ) facesegmentation_505 = facesegmentation.segment( area="face+forehead (if available)", grow=-20, grow_tapered=False, blur=51, analysis_models=get_value_at_index(faceanalysismodels_506, 0), image=get_value_at_index( layerutility_imagescalebyaspectratio_v2_331, 0 ), ) growmask_396 = growmask.expand_mask( expand=0, tapered_corners=True, mask=get_value_at_index(facesegmentation_505, 0), ) layerutility_imageblend_v2_399 = layerutility_imageblend_v2.image_blend_v2( invert_mask=True, blend_mode="normal", opacity=100, background_image=get_value_at_index( layerutility_imagescalebyaspectratio_v2_331, 0 ), layer_image=get_value_at_index(vaedecode_322, 0), layer_mask=get_value_at_index(growmask_396, 0), ) # bald saveimage_848 = saveimage.save_images( filename_prefix="hairstyle_bald_result", images=get_value_at_index(layerutility_imageblend_v2_399, 0), ) remove_image_metadata(f"output/{saveimage_848['ui']['images'][0]['filename']}") saved_path_bald = f"output/{saveimage_848['ui']['images'][0]['filename']}" return saved_path_bald if __name__ == "__main__": # main() with gr.Blocks() as app: gr.Markdown("# Swap Hairstyle") with gr.Row(): # 添加输入 with gr.Column(): with gr.Row(): # 第一组包括结构图像和深度强度 with gr.Group(): model_image = gr.Image(label="Model Image", type="filepath") # 第二组包括风格图像和风格强度 with gr.Group(): hairstyle_template_image = gr.Image(label="Hairstyle Template Image", type="filepath") with gr.Column(): # 输出图像 output_image = gr.Image(label="Generated Image") with gr.Row(): with gr.Column(): genderage_btn_bald = gr.Button("Genderage Bald") with gr.Column(): generate_btn = gr.Button("Generate") genderage_btn_bald.click( fn=generate_image_bald, inputs=[model_image], outputs=[output_image] ) generate_btn.click( fn=generate_image, inputs=[model_image, hairstyle_template_image], outputs=[output_image] ) app.launch(share=True)