|
| 1 | +""" |
| 2 | +Device detection and management utilities for ComfyUI-MultiGPU. |
| 3 | +Single source of truth for all device enumeration and compatibility checks. |
| 4 | +Handles all device types supported by ComfyUI core. |
| 5 | +""" |
| 6 | + |
| 7 | +import torch |
| 8 | +import logging |
| 9 | + |
| 10 | +logger = logging.getLogger("MultiGPU") |
| 11 | + |
| 12 | +# Module-level cache for device list (populated once on first call) |
| 13 | +_DEVICE_LIST_CACHE = None |
| 14 | + |
| 15 | +def get_device_list(): |
| 16 | + """ |
| 17 | + Enumerate ALL physically available devices that can store torch tensors. |
| 18 | + This includes all device types supported by ComfyUI core. |
| 19 | + Results are cached after first call since devices don't change during runtime. |
| 20 | + |
| 21 | + Returns a comprehensive list of all available devices across all types: |
| 22 | + - CPU (always available) |
| 23 | + - CUDA devices (NVIDIA GPUs) |
| 24 | + - XPU devices (Intel GPUs) |
| 25 | + - NPU devices (Ascend NPUs from Huawei) |
| 26 | + - MLU devices (Cambricon MLUs) |
| 27 | + - MPS device (Apple Metal) |
| 28 | + - DirectML devices (Windows DirectML) |
| 29 | + - CoreX/IXUCA devices |
| 30 | + """ |
| 31 | + global _DEVICE_LIST_CACHE |
| 32 | + |
| 33 | + # Return cached result if already populated |
| 34 | + if _DEVICE_LIST_CACHE is not None: |
| 35 | + return _DEVICE_LIST_CACHE |
| 36 | + |
| 37 | + # First time - do the actual detection |
| 38 | + devs = [] |
| 39 | + |
| 40 | + # CPU is always physically present and can store tensors |
| 41 | + devs.append("cpu") |
| 42 | + |
| 43 | + # CUDA devices (NVIDIA GPUs) |
| 44 | + try: |
| 45 | + if hasattr(torch, "cuda") and hasattr(torch.cuda, "is_available") and torch.cuda.is_available(): |
| 46 | + device_count = torch.cuda.device_count() |
| 47 | + devs += [f"cuda:{i}" for i in range(device_count)] |
| 48 | + logger.debug(f"[MultiGPU] Found {device_count} CUDA device(s)") |
| 49 | + except Exception as e: |
| 50 | + logger.debug(f"[MultiGPU] CUDA detection failed: {e}") |
| 51 | + |
| 52 | + # XPU devices (Intel GPUs) |
| 53 | + try: |
| 54 | + # Try to import intel extension first (may be required for XPU support) |
| 55 | + import intel_extension_for_pytorch as ipex |
| 56 | + except ImportError: |
| 57 | + pass |
| 58 | + try: |
| 59 | + if hasattr(torch, "xpu") and hasattr(torch.xpu, "is_available") and torch.xpu.is_available(): |
| 60 | + device_count = torch.xpu.device_count() |
| 61 | + devs += [f"xpu:{i}" for i in range(device_count)] |
| 62 | + logger.debug(f"[MultiGPU] Found {device_count} XPU device(s)") |
| 63 | + except Exception as e: |
| 64 | + logger.debug(f"[MultiGPU] XPU detection failed: {e}") |
| 65 | + |
| 66 | + # NPU devices (Ascend NPUs from Huawei) |
| 67 | + try: |
| 68 | + import torch_npu |
| 69 | + if hasattr(torch, "npu") and hasattr(torch.npu, "is_available") and torch.npu.is_available(): |
| 70 | + device_count = torch.npu.device_count() |
| 71 | + devs += [f"npu:{i}" for i in range(device_count)] |
| 72 | + logger.debug(f"[MultiGPU] Found {device_count} NPU device(s)") |
| 73 | + except Exception as e: |
| 74 | + logger.debug(f"[MultiGPU] NPU detection failed: {e}") |
| 75 | + |
| 76 | + # MLU devices (Cambricon MLUs) |
| 77 | + try: |
| 78 | + import torch_mlu |
| 79 | + if hasattr(torch, "mlu") and hasattr(torch.mlu, "is_available") and torch.mlu.is_available(): |
| 80 | + device_count = torch.mlu.device_count() |
| 81 | + devs += [f"mlu:{i}" for i in range(device_count)] |
| 82 | + logger.debug(f"[MultiGPU] Found {device_count} MLU device(s)") |
| 83 | + except Exception as e: |
| 84 | + logger.debug(f"[MultiGPU] MLU detection failed: {e}") |
| 85 | + |
| 86 | + # MPS device (Apple Metal - single device only) |
| 87 | + try: |
| 88 | + if hasattr(torch.backends, "mps") and torch.backends.mps.is_available(): |
| 89 | + devs.append("mps") |
| 90 | + logger.debug("[MultiGPU] Found MPS device") |
| 91 | + except Exception as e: |
| 92 | + logger.debug(f"[MultiGPU] MPS detection failed: {e}") |
| 93 | + |
| 94 | + # DirectML devices (Windows DirectML for AMD/Intel/NVIDIA) |
| 95 | + try: |
| 96 | + import torch_directml |
| 97 | + adapter_count = torch_directml.device_count() |
| 98 | + if adapter_count > 0: |
| 99 | + devs += [f"directml:{i}" for i in range(adapter_count)] |
| 100 | + logger.debug(f"[MultiGPU] Found {adapter_count} DirectML adapter(s)") |
| 101 | + except Exception as e: |
| 102 | + logger.debug(f"[MultiGPU] DirectML detection failed: {e}") |
| 103 | + |
| 104 | + # IXUCA/CoreX devices (special accelerator) |
| 105 | + try: |
| 106 | + if hasattr(torch, "corex"): |
| 107 | + # CoreX typically exposes single device, but check if there's a count method |
| 108 | + if hasattr(torch.corex, "device_count"): |
| 109 | + device_count = torch.corex.device_count() |
| 110 | + devs += [f"corex:{i}" for i in range(device_count)] |
| 111 | + logger.debug(f"[MultiGPU] Found {device_count} CoreX device(s)") |
| 112 | + else: |
| 113 | + devs.append("corex:0") |
| 114 | + logger.debug("[MultiGPU] Found CoreX device") |
| 115 | + except Exception as e: |
| 116 | + logger.debug(f"[MultiGPU] CoreX detection failed: {e}") |
| 117 | + |
| 118 | + # Cache the result for future calls |
| 119 | + _DEVICE_LIST_CACHE = devs |
| 120 | + |
| 121 | + # Log only once when initially populated |
| 122 | + logger.info(f"[MultiGPU] Device list initialized: {devs}") |
| 123 | + |
| 124 | + return devs |
| 125 | + |
| 126 | + |
| 127 | +def is_accelerator_available(): |
| 128 | + """ |
| 129 | + Check if any accelerator device is available. |
| 130 | + Used by patched functions to determine CPU fallback. |
| 131 | + |
| 132 | + Returns True if any GPU/accelerator is available, False otherwise. |
| 133 | + """ |
| 134 | + # Check CUDA |
| 135 | + try: |
| 136 | + if torch.cuda.is_available(): |
| 137 | + return True |
| 138 | + except: |
| 139 | + pass |
| 140 | + |
| 141 | + # Check XPU (Intel GPU) |
| 142 | + try: |
| 143 | + if hasattr(torch, "xpu") and torch.xpu.is_available(): |
| 144 | + return True |
| 145 | + except: |
| 146 | + pass |
| 147 | + |
| 148 | + # Check NPU (Ascend) |
| 149 | + try: |
| 150 | + import torch_npu |
| 151 | + if hasattr(torch, "npu") and torch.npu.is_available(): |
| 152 | + return True |
| 153 | + except: |
| 154 | + pass |
| 155 | + |
| 156 | + # Check MLU (Cambricon) |
| 157 | + try: |
| 158 | + import torch_mlu |
| 159 | + if hasattr(torch, "mlu") and torch.mlu.is_available(): |
| 160 | + return True |
| 161 | + except: |
| 162 | + pass |
| 163 | + |
| 164 | + # Check MPS (Apple Metal) |
| 165 | + try: |
| 166 | + if hasattr(torch.backends, "mps") and torch.backends.mps.is_available(): |
| 167 | + return True |
| 168 | + except: |
| 169 | + pass |
| 170 | + |
| 171 | + # Check DirectML |
| 172 | + try: |
| 173 | + import torch_directml |
| 174 | + if torch_directml.device_count() > 0: |
| 175 | + return True |
| 176 | + except: |
| 177 | + pass |
| 178 | + |
| 179 | + # Check CoreX/IXUCA |
| 180 | + try: |
| 181 | + if hasattr(torch, "corex"): |
| 182 | + return True |
| 183 | + except: |
| 184 | + pass |
| 185 | + |
| 186 | + return False |
| 187 | + |
| 188 | + |
| 189 | +def is_device_compatible(device_string): |
| 190 | + """ |
| 191 | + Check if a device string represents a valid, available device. |
| 192 | + |
| 193 | + Args: |
| 194 | + device_string: Device identifier like "cuda:0", "cpu", "xpu:1", etc. |
| 195 | + |
| 196 | + Returns: |
| 197 | + True if the device is available, False otherwise. |
| 198 | + """ |
| 199 | + available_devices = get_device_list() |
| 200 | + return device_string in available_devices |
| 201 | + |
| 202 | + |
| 203 | +def get_device_type(device_string): |
| 204 | + """ |
| 205 | + Extract the device type from a device string. |
| 206 | + |
| 207 | + Args: |
| 208 | + device_string: Device identifier like "cuda:0", "cpu", "xpu:1", etc. |
| 209 | + |
| 210 | + Returns: |
| 211 | + Device type string (e.g., "cuda", "cpu", "xpu", "npu", "mlu", "mps", "directml", "corex") |
| 212 | + """ |
| 213 | + if ":" in device_string: |
| 214 | + return device_string.split(":")[0] |
| 215 | + return device_string |
| 216 | + |
| 217 | + |
| 218 | +def parse_device_string(device_string): |
| 219 | + """ |
| 220 | + Parse a device string into type and index. |
| 221 | + |
| 222 | + Args: |
| 223 | + device_string: Device identifier like "cuda:0", "cpu", "xpu:1", etc. |
| 224 | + |
| 225 | + Returns: |
| 226 | + Tuple of (device_type, device_index) where index is None for non-indexed devices |
| 227 | + """ |
| 228 | + if ":" in device_string: |
| 229 | + parts = device_string.split(":") |
| 230 | + return parts[0], int(parts[1]) |
| 231 | + return device_string, None |
0 commit comments