Skip to content

Commit 5127e18

Browse files
committed
sync changes to kijai's nodes
- Adding a new `fantasyportrait_model` input to support FantasyPortrait models. - Renaming the `vace_model` input to a more generic `extra_model` to allow loading other auxiliary models like VACE or MTV Crafter. - Correcting the node type for the `fantasytalking_model` from `FANTASYTALKINGMODEL` to `FANTASYTALKMODEL`.
1 parent 336e236 commit 5127e18

1 file changed

Lines changed: 13 additions & 8 deletions

File tree

wanvideo.py

Lines changed: 13 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -36,9 +36,10 @@ def INPUT_TYPES(s):
3636
"block_swap_args": ("BLOCKSWAPARGS", ),
3737
"lora": ("WANVIDLORA", {"default": None}),
3838
"vram_management_args": ("VRAM_MANAGEMENTARGS", {"default": None, "tooltip": "Alternative offloading method from DiffSynth-Studio, more aggressive in reducing memory use than block swapping, but can be slower"}),
39-
"vace_model": ("VACEPATH", {"default": None, "tooltip": "VACE model to use when not using model that has it included"}),
40-
"fantasytalking_model": ("FANTASYTALKINGMODEL", {"default": None, "tooltip": "FantasyTalking model https://github.com/Fantasy-AMAP"}),
39+
"extra_model": ("VACEPATH", {"default": None, "tooltip": "Extra model to add to the main model, ie. VACE or MTV Crafter"}),
40+
"fantasytalking_model": ("FANTASYTALKMODEL", {"default": None, "tooltip": "FantasyTalking model https://github.com/Fantasy-AMAP"}),
4141
"multitalk_model": ("MULTITALKMODEL", {"default": None, "tooltip": "Multitalk model"}),
42+
"fantasyportrait_model": ("FANTASYPORTRAITMODEL", {"default": None, "tooltip": "FantasyPortrait model"}),
4243
}
4344
}
4445

@@ -48,7 +49,7 @@ def INPUT_TYPES(s):
4849
CATEGORY = "WanVideoWrapper"
4950

5051
def loadmodel(self, model, base_precision, device, quantization,
51-
compile_args=None, attention_mode="sdpa", block_swap_args=None, lora=None, vram_management_args=None, vace_model=None, fantasytalking_model=None, multitalk_model=None):
52+
compile_args=None, attention_mode="sdpa", block_swap_args=None, lora=None, vram_management_args=None, extra_model=None, fantasytalking_model=None, multitalk_model=None, fantasyportrait_model=None):
5253
logging.debug(f"[MultiGPU] WanVideoModelLoader: User selected device: {device}")
5354

5455
selected_device = torch.device(device)
@@ -89,7 +90,7 @@ def loadmodel(self, model, base_precision, device, quantization,
8990

9091
logging.debug(f"[MultiGPU] Calling original WanVideo loader")
9192
result = original_loader.loadmodel(model, base_precision, load_device, quantization,
92-
compile_args, attention_mode, block_swap_args, lora, vram_management_args, vace_model, fantasytalking_model, multitalk_model)
93+
compile_args, attention_mode, block_swap_args, lora, vram_management_args, extra_model=extra_model, fantasytalking_model=fantasytalking_model, multitalk_model=multitalk_model, fantasyportrait_model=fantasyportrait_model)
9394

9495
if result and len(result) > 0 and hasattr(result[0], 'model'):
9596
model_obj = result[0]
@@ -107,7 +108,7 @@ def loadmodel(self, model, base_precision, device, quantization,
107108
else:
108109
logging.error(f"[MultiGPU] Could not patch WanVideo modules, falling back")
109110
return original_loader.loadmodel(model, base_precision, load_device, quantization,
110-
compile_args, attention_mode, block_swap_args, lora, vram_management_args, vace_model, fantasytalking_model, multitalk_model)
111+
compile_args, attention_mode, block_swap_args, lora, vram_management_args, extra_model=extra_model, fantasytalking_model=fantasytalking_model, multitalk_model=multitalk_model, fantasyportrait_model=fantasyportrait_model)
111112

112113

113114
class WanVideoVAELoader:
@@ -356,11 +357,11 @@ def INPUT_TYPES(s):
356357

357358
def loadmodel(self, model, base_precision, device, quantization,
358359
compile_args=None, attention_mode="sdpa", block_swap_args=None, lora=None,
359-
vram_management_args=None, vace_model=None, fantasytalking_model=None, multitalk_model=None):
360+
vram_management_args=None, vace_model=None, fantasytalking_model=None, multitalk_model=None, fantasyportrait_model=None):
360361
loader = WanVideoModelLoader()
361362
return loader.loadmodel(model, base_precision, device, quantization,
362363
compile_args, attention_mode, block_swap_args, lora,
363-
vram_management_args, vace_model, fantasytalking_model, multitalk_model)
364+
vram_management_args, vace_model, fantasytalking_model, multitalk_model, fantasyportrait_model)
364365

365366
class WanVideoSampler:
366367
@classmethod
@@ -436,6 +437,8 @@ def INPUT_TYPES(s):
436437
"tooltip": "Use non-blocking memory transfer for offloading, reserves more RAM but is faster"}),
437438
"vace_blocks_to_swap": ("INT", {"default": 0, "min": 0, "max": 15, "step": 1,
438439
"tooltip": "Number of VACE blocks to swap, the VACE model has 15 blocks"}),
440+
"prefetch_blocks": ("INT", {"default": 0, "min": 0, "max": 40, "step": 1, "tooltip": "Number of blocks to prefetch ahead, can speed up processing but increases memory usage. 1 is usually enough to offset speed loss from block swapping, use the debug option to confirm it for your system"}),
441+
"block_swap_debug": ("BOOLEAN", {"default": False, "tooltip": "Enable debug logging for block swapping"}),
439442
},
440443
}
441444

@@ -446,7 +449,7 @@ def INPUT_TYPES(s):
446449
DESCRIPTION = "Block swap settings with explicit device selection for memory management across GPUs"
447450

448451
def setargs(self, blocks_to_swap, swap_device, model_offload_device, offload_img_emb, offload_txt_emb,
449-
use_non_blocking=False, vace_blocks_to_swap=0):
452+
use_non_blocking=False, vace_blocks_to_swap=0, prefetch_blocks=0, block_swap_debug=False):
450453
logging.debug(f"[MultiGPU] WanVideoBlockSwap: swap_device={swap_device}, model_offload_device={model_offload_device}, blocks_to_swap={blocks_to_swap}")
451454

452455
selected_swap_device = torch.device(swap_device)
@@ -472,6 +475,8 @@ def setargs(self, blocks_to_swap, swap_device, model_offload_device, offload_img
472475
"offload_txt_emb": offload_txt_emb,
473476
"use_non_blocking": use_non_blocking,
474477
"vace_blocks_to_swap": vace_blocks_to_swap,
478+
"prefetch_blocks": prefetch_blocks,
479+
"block_swap_debug": block_swap_debug,
475480
"swap_device": swap_device,
476481
"model_offload_device": model_offload_device,
477482
}

0 commit comments

Comments
 (0)