diff --git a/comfy/cli_args.py b/comfy/cli_args.py index a906ff1c00d..2c2b957d0bb 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -74,6 +74,7 @@ def __call__(self, parser, namespace, values, option_string=None): fpvae_group.add_argument("--bf16-vae", action="store_true", help="Run the VAE in bf16.") parser.add_argument("--cpu-vae", action="store_true", help="Run the VAE on the CPU.") +parser.add_argument("--cpu-model-sampling", action="store_true", help="Run the model sampling on the CPU.") fpte_group = parser.add_mutually_exclusive_group() fpte_group.add_argument("--fp8_e4m3fn-text-enc", action="store_true", help="Store text encoder weights in fp8 (e4m3fn variant).") diff --git a/comfy/model_management.py b/comfy/model_management.py index f3d90c66819..e74c1d0a9d5 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -764,6 +764,11 @@ def vae_device(): return torch.device("cpu") return get_torch_device() +def model_sampling_device(): + if args.cpu_model_sampling: + return torch.device("cpu") + return get_torch_device() + def vae_offload_device(): if args.gpu_only: return get_torch_device() diff --git a/comfy/sd.py b/comfy/sd.py index eabf0bda05d..7379bbc1a08 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -932,6 +932,10 @@ def load_state_dict_guess_config(sd, output_vae=True, output_clip=True, output_c if inital_load_device != torch.device("cpu"): logging.info("loaded diffusion model directly to GPU") model_management.load_models_gpu([model_patcher], force_full_load=True) + #damcclos: move the model_sampling back to the CPU. The work needed for this is not worth the gpu. + model_sampling_device = model_management.model_sampling_device() + if model_sampling_device == torch.device("cpu"): + model_patcher.model.model_sampling.to(model_sampling_device) return (model_patcher, clip, vae, clipvision)