Skip to content

Commit fa437d2

Browse files
pollockjjmax-solo23
andcommitted
Updating distorch_2.py:
1. Replacing ad-hoc print() with structured logging 2. Simplifying device detection (fail-fast approach) 3. Maintaining the implemented backward compatibility for GGUF/ModelPatcher by max-solo23 4. Following the repository's logging conventions Co-authored-by: max-solo23 <maksym.solomyanov@gmail.com>
1 parent 5912141 commit fa437d2

1 file changed

Lines changed: 5 additions & 6 deletions

File tree

distorch_2.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -54,15 +54,14 @@ def patched_load_models_gpu(models, memory_required=0, force_patch_weights=False
5454
model_type = type(m).__name__
5555

5656
if ("GGUF" in model_type or "ModelPatcher" in model_type) and hasattr(m, "model_patches_to"):
57-
print(f"[MultiGPU] {type(m).__name__} missing 'model_patches_models' attribute, using 'model_patches_to' fallback.")
58-
target_device = getattr(m, "load_device",
59-
f"cuda:{torch.cuda.current_device()}" if torch.cuda.is_available() else "cpu")
60-
print(f"Target device: {target_device}")
57+
logger.info(f"[MultiGPU DisTorch V2] {type(m).__name__} missing 'model_patches_models' attribute, using 'model_patches_to' fallback.")
58+
target_device = m.load_device
59+
logger.debug(f"[MultiGPU DisTorch V2] Target device: {target_device}")
6160
patches = m.model_patches_to(target_device)
6261
if patches:
63-
print(f"[MultiGPU] Found {len(patches)} mm_patch(es) for {type(m).__name__} on device {target_device}")
62+
logger.debug(f"[MultiGPU DisTorch V2] Found {len(patches)} mm_patch(es) for {type(m).__name__} on device {target_device}")
6463
for mm_patch in patches:
65-
print(f"[MultiGPU] Registering mm_patch: {type(mm_patch).__name__}")
64+
logger.debug(f"[MultiGPU DisTorch V2] Registering mm_patch: {type(mm_patch).__name__}")
6665
models_temp.add(mm_patch)
6766
continue
6867

0 commit comments

Comments
 (0)