Skip to content

Commit

Permalink
Add advanced device option to clip loader nodes.
Browse files Browse the repository at this point in the history
Right click the "Load CLIP" or DualCLIPLoader node and "Show Advanced".
  • Loading branch information
comfyanonymous committed Jan 5, 2025
1 parent d45ebb6 commit 5cbf797
Showing 1 changed file with 14 additions and 4 deletions.
18 changes: 14 additions & 4 deletions nodes.py
Original file line number Diff line number Diff line change
Expand Up @@ -913,6 +913,7 @@ class CLIPLoader:
def INPUT_TYPES(s):
return {"required": { "clip_name": (folder_paths.get_filename_list("text_encoders"), ),
"type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi", "ltxv", "pixart"], ),
"device": (["default", "cpu"], {"advanced": True}),
}}
RETURN_TYPES = ("CLIP",)
FUNCTION = "load_clip"
Expand All @@ -921,7 +922,7 @@ def INPUT_TYPES(s):

DESCRIPTION = "[Recipes]\n\nstable_diffusion: clip-l\nstable_cascade: clip-g\nsd3: t5 / clip-g / clip-l\nstable_audio: t5\nmochi: t5"

def load_clip(self, clip_name, type="stable_diffusion"):
def load_clip(self, clip_name, type="stable_diffusion", device="default"):
if type == "stable_cascade":
clip_type = comfy.sd.CLIPType.STABLE_CASCADE
elif type == "sd3":
Expand All @@ -937,8 +938,12 @@ def load_clip(self, clip_name, type="stable_diffusion"):
else:
clip_type = comfy.sd.CLIPType.STABLE_DIFFUSION

model_options = {}
if device == "cpu":
model_options["load_device"] = model_options["offload_device"] = torch.device("cpu")

clip_path = folder_paths.get_full_path_or_raise("text_encoders", clip_name)
clip = comfy.sd.load_clip(ckpt_paths=[clip_path], embedding_directory=folder_paths.get_folder_paths("embeddings"), clip_type=clip_type)
clip = comfy.sd.load_clip(ckpt_paths=[clip_path], embedding_directory=folder_paths.get_folder_paths("embeddings"), clip_type=clip_type, model_options=model_options)
return (clip,)

class DualCLIPLoader:
Expand All @@ -947,6 +952,7 @@ def INPUT_TYPES(s):
return {"required": { "clip_name1": (folder_paths.get_filename_list("text_encoders"), ),
"clip_name2": (folder_paths.get_filename_list("text_encoders"), ),
"type": (["sdxl", "sd3", "flux", "hunyuan_video"], ),
"device": (["default", "cpu"], {"advanced": True}),
}}
RETURN_TYPES = ("CLIP",)
FUNCTION = "load_clip"
Expand All @@ -955,7 +961,7 @@ def INPUT_TYPES(s):

DESCRIPTION = "[Recipes]\n\nsdxl: clip-l, clip-g\nsd3: clip-l, clip-g / clip-l, t5 / clip-g, t5\nflux: clip-l, t5"

def load_clip(self, clip_name1, clip_name2, type):
def load_clip(self, clip_name1, clip_name2, type, device="default"):
clip_path1 = folder_paths.get_full_path_or_raise("text_encoders", clip_name1)
clip_path2 = folder_paths.get_full_path_or_raise("text_encoders", clip_name2)
if type == "sdxl":
Expand All @@ -967,7 +973,11 @@ def load_clip(self, clip_name1, clip_name2, type):
elif type == "hunyuan_video":
clip_type = comfy.sd.CLIPType.HUNYUAN_VIDEO

clip = comfy.sd.load_clip(ckpt_paths=[clip_path1, clip_path2], embedding_directory=folder_paths.get_folder_paths("embeddings"), clip_type=clip_type)
model_options = {}
if device == "cpu":
model_options["load_device"] = model_options["offload_device"] = torch.device("cpu")

clip = comfy.sd.load_clip(ckpt_paths=[clip_path1, clip_path2], embedding_directory=folder_paths.get_folder_paths("embeddings"), clip_type=clip_type, model_options=model_options)
return (clip,)

class CLIPVisionLoader:
Expand Down

0 comments on commit 5cbf797

Please sign in to comment.