Skip to content

Commit

Permalink
fix gpu error
Browse files Browse the repository at this point in the history
  • Loading branch information
lldacing committed Dec 1, 2024
1 parent 5af7420 commit 4bbae92
Show file tree
Hide file tree
Showing 4 changed files with 21 additions and 21 deletions.
37 changes: 18 additions & 19 deletions nodes/HairNode.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,8 @@ def INPUT_TYPES(cls):
if os.path.exists(stable_hair_path):
for root, subdir, files in os.walk(stable_hair_path, followlinks=True):
for file in files:
file_name, ext = file.split(".")
if '.{}'.format(ext) in supported_pt_extensions:
file_name_ext = file.split(".")
if len(file_name_ext) > 1 and '.{}'.format(file_name_ext[-1]) in supported_pt_extensions:
model_paths.append(file)
return {
"required": {
Expand All @@ -48,6 +48,7 @@ def INPUT_TYPES(cls):
CATEGORY = "hair/transfer"

def load_model(self, ckpt_name, bald_model, device):
model_management.soft_empty_cache()
sd15_model_path = folder_paths.get_full_path_or_raise("checkpoints", ckpt_name)
bald_model_path = folder_paths.get_full_path_or_raise("diffusers", hair_model_path_format.format(bald_model))
if device == "AUTO":
Expand All @@ -72,10 +73,7 @@ def load_model(self, ckpt_name, bald_model, device):
remove_hair_pipeline.register_modules(controlnet=bald_converter)

remove_hair_pipeline.scheduler = UniPCMultistepScheduler.from_config(remove_hair_pipeline.scheduler.config)
remove_hair_pipeline = remove_hair_pipeline.to(device_type)

if model_management.XFORMERS_IS_AVAILABLE and device_type == "cuda":
remove_hair_pipeline.enable_xformers_memory_efficient_attention()
remove_hair_pipeline.to(device_type)

return remove_hair_pipeline,

Expand All @@ -90,8 +88,8 @@ def INPUT_TYPES(cls):
if os.path.exists(stable_hair_path):
for root, subdir, files in os.walk(stable_hair_path, followlinks=True):
for file in files:
file_name, ext = file.split(".")
if '.{}'.format(ext) in supported_pt_extensions:
file_name_ext = file.split(".")
if len(file_name_ext) >1 and '.{}'.format(file_name_ext[-1]) in supported_pt_extensions:
model_paths.append(file)
return {
"required": {
Expand All @@ -110,6 +108,7 @@ def INPUT_TYPES(cls):
CATEGORY = "hair/transfer"

def load_model(self, ckpt_name, encoder_model, adapter_model, control_model, device):
model_management.soft_empty_cache()
sd15_model_path = folder_paths.get_full_path_or_raise("checkpoints", ckpt_name)
encoder_model_path = folder_paths.get_full_path_or_raise("diffusers",
hair_model_path_format.format(encoder_model))
Expand Down Expand Up @@ -143,17 +142,14 @@ def load_model(self, ckpt_name, encoder_model, adapter_model, control_model, dev
hair_encoder = RefHairUnet.from_config(pipeline.unet.config)
_state_dict = torch.load(encoder_model_path)
hair_encoder.load_state_dict(_state_dict, strict=False)
hair_encoder.to(device_type, dtype=weight_dtype)
pipeline.register_modules(reference_encoder=hair_encoder)

hair_adapter = adapter_injection(pipeline.unet, device=device_type, dtype=weight_dtype, use_resampler=False)
_state_dict = torch.load(adapter_model_path)

hair_adapter.load_state_dict(_state_dict, strict=False)

# 启用 xformers
if model_management.XFORMERS_IS_AVAILABLE and device_type == "cuda":
pipeline.enable_xformers_memory_efficient_attention()

return pipeline,


Expand All @@ -167,7 +163,10 @@ def INPUT_TYPES(cls):
"images": ("IMAGE",),
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
"steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
"strength": ("FLOAT", {"default": 0.9, "min": 0.0, "max": 1.0, "step": 0.01}),
"strength": ("FLOAT", {"default": 1.5, "min": 0.0, "max": 5.0, "step": 0.01}),
},
"optional": {
"cfg": ("FLOAT", {"default": 1.5, "min": 0.0, "max": 100.0, "step": 0.1, "round": 0.01}),
}
}

Expand All @@ -176,7 +175,7 @@ def INPUT_TYPES(cls):
FUNCTION = "apply"
CATEGORY = "hair/transfer"

def apply(self, bald_model, images, seed, steps, strength):
def apply(self, bald_model, images, seed, steps, strength, cfg=1.5):
_images = []
_masks = []

Expand All @@ -198,7 +197,7 @@ def callback_bar(step, timestep, latents):
prompt="",
negative_prompt="",
num_inference_steps=steps,
guidance_scale=1.5,
guidance_scale=cfg,
width=W,
height=H,
image=im_tensor.unsqueeze(0),
Expand Down Expand Up @@ -231,8 +230,8 @@ def INPUT_TYPES(cls):
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
"steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
"cfg": ("FLOAT", {"default": 1.5, "min": 0.0, "max": 100.0, "step": 0.1, "round": 0.01}),
"control_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
"adapter_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
"control_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 5.0, "step": 0.01}),
"adapter_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 5.0, "step": 0.01}),
}
}

Expand Down Expand Up @@ -261,8 +260,8 @@ def apply(self, model, images, bald_image, seed, steps, cfg, control_strength, a
def callback_bar(step, timestep, latents):
comfy_pbar.update(1)

ref_image_np = (image.numpy() * 255).astype(numpy.uint8)
bald_image_np = (bald_image.squeeze(0).numpy() * 255).astype(numpy.uint8)
ref_image_np = (image.cpu().numpy() * 255).astype(numpy.uint8)
bald_image_np = (bald_image.squeeze(0).cpu().numpy() * 255).astype(numpy.uint8)
with torch.no_grad():
# 采样,转移发型
result_image = model(
Expand Down
2 changes: 1 addition & 1 deletion nodes/libs/ref_encoder/reference_unet.py
Original file line number Diff line number Diff line change
Expand Up @@ -835,7 +835,7 @@ def forward(
# `Timesteps` does not contain any weights and will always return f32 tensors
# but time_embedding might actually be running in fp16. so we need to cast here.
# there might be better ways to encapsulate this.
t_emb = t_emb.to(dtype=sample.dtype)
t_emb = t_emb.to(sample.device, dtype=sample.dtype)

emb = self.time_embedding(t_emb, timestep_cond)
aug_emb = None
Expand Down
1 change: 1 addition & 0 deletions nodes/libs/utils/pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -435,6 +435,7 @@ def __call__(

ref_padding_latents = torch.ones_like(ref_image_latents) * -1
ref_image_latents = torch.cat([ref_padding_latents, ref_image_latents]) if do_classifier_free_guidance else ref_image_latents
ref_image_latents.to(device)

# Denoising loop
for i, t in tqdm(enumerate(timesteps), total=len(timesteps), disable=(rank != 0)):
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
[project]
name = "comfyui_stablehair_ll"
description = "Hair transfer"
version = "1.0.0"
version = "1.0.1"
license = {file = "LICENSE"}
dependencies = ["numpy"]

Expand Down

0 comments on commit 4bbae92

Please sign in to comment.