Skip to content

Commit

Permalink
fix: downloading loras on fast systems no longer causes duplicate ima…
Browse files Browse the repository at this point in the history
…ges (#133)

fix: downloading loras on fast systems no longer causes duplicate images
fix: bug with _default_lora_ids not being initialized in the LoraModelManager
ci: adds tests for LCM loras
feat: adds sgm_uniform and exponential to the list of supported schedulers
feat: reintroduces HordeLoraLoader
---------

Co-authored-by: tazlin <[email protected]>
  • Loading branch information
db0 and tazlin authored Dec 29, 2023
1 parent 73b2bab commit 1fbce7d
Show file tree
Hide file tree
Showing 10 changed files with 294 additions and 39 deletions.
40 changes: 22 additions & 18 deletions hordelib/comfy_horde.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,19 +194,21 @@ def do_comfy_import(force_normal_vram_mode: bool = False, extra_comfyui_args: li
def recursive_output_delete_if_changed_hijack(prompt: dict, old_prompt, outputs, current_item):
global _last_pipeline_settings_hash
if current_item == "prompt":
pipeline_settings_hash = hashlib.md5(json.dumps(prompt).encode("utf-8")).hexdigest()
logger.debug(f"pipeline_settings_hash: {pipeline_settings_hash}")

if pipeline_settings_hash != _last_pipeline_settings_hash:
_last_pipeline_settings_hash = pipeline_settings_hash
logger.debug("Pipeline settings changed")

if old_prompt:
old_pipeline_settings_hash = hashlib.md5(json.dumps(old_prompt).encode("utf-8")).hexdigest()
logger.debug(f"old_pipeline_settings_hash: {old_pipeline_settings_hash}")
if pipeline_settings_hash != old_pipeline_settings_hash:
logger.debug("Pipeline settings changed from old_prompt")

try:
pipeline_settings_hash = hashlib.md5(json.dumps(prompt).encode("utf-8")).hexdigest()
logger.debug(f"pipeline_settings_hash: {pipeline_settings_hash}")

if pipeline_settings_hash != _last_pipeline_settings_hash:
_last_pipeline_settings_hash = pipeline_settings_hash
logger.debug("Pipeline settings changed")

if old_prompt:
old_pipeline_settings_hash = hashlib.md5(json.dumps(old_prompt).encode("utf-8")).hexdigest()
logger.debug(f"old_pipeline_settings_hash: {old_pipeline_settings_hash}")
if pipeline_settings_hash != old_pipeline_settings_hash:
logger.debug("Pipeline settings changed from old_prompt")
except TypeError:
logger.debug("could not print hash due to source image in payload")
if current_item == "prompt" or current_item == "negative_prompt":
try:
prompt_text = prompt[current_item]["inputs"]["text"]
Expand Down Expand Up @@ -292,7 +294,7 @@ class Comfy_Horde:
"SaveImage": "HordeImageOutput",
"LoadImage": "HordeImageLoader",
# "DiffControlNetLoader": "HordeDiffControlNetLoader",
# "LoraLoader": "HordeLoraLoader",
"LoraLoader": "HordeLoraLoader",
}
"""A mapping of ComfyUI node types to Horde node types."""

Expand Down Expand Up @@ -330,6 +332,8 @@ def __init__(
self._gc_timer = time.time()
self._counter_mutex = threading.Lock()

self.images = []

# Set comfyui paths for checkpoints, loras, etc
self._set_comfyui_paths()

Expand Down Expand Up @@ -678,10 +682,10 @@ def _run_pipeline(self, pipeline: dict, params: dict) -> list[dict] | None:
# Which gives us these nice hardcoded list indexes, which valid[2] is the output node list
self.client_id = str(uuid.uuid4())
valid = _comfy_validate_prompt(pipeline)
inference.outputs = {}
inference.object_storage = {}
inference.outputs_ui = {}
inference.old_prompt = {}
import folder_paths

if "embeddings" in folder_paths.filename_list_cache:
del folder_paths.filename_list_cache["embeddings"]
inference.execute(pipeline, self.client_id, {"client_id": self.client_id}, valid[2])

stdio.replay()
Expand Down
8 changes: 4 additions & 4 deletions hordelib/horde.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,7 @@ class HordeLib:
"uni_pc": "uni_pc",
"uni_pc_bh2": "uni_pc_bh2",
"plms": "euler",
"lcm": "lcm",
}

# Horde names on the left, our node names on the right
Expand Down Expand Up @@ -115,7 +116,7 @@ class HordeLib:

SOURCE_IMAGE_PROCESSING_OPTIONS = ["img2img", "inpainting", "outpainting"]

SCHEDULERS = ["normal", "karras", "simple", "ddim_uniform"]
SCHEDULERS = ["normal", "karras", "simple", "ddim_uniform", "sgm_uniform", "exponential"]

# Describe a valid payload, it's types and bounds. All incoming payload data is validated against,
# and normalised to, this schema.
Expand Down Expand Up @@ -558,7 +559,7 @@ def _final_pipeline_adjustments(self, payload, pipeline_data):
"strength_clip": lora["clip"],
# "model_manager": SharedModelManager,
},
"class_type": "LoraLoader",
"class_type": "HordeLoraLoader",
}
else:
# Subsequent chained loras
Expand All @@ -571,7 +572,7 @@ def _final_pipeline_adjustments(self, payload, pipeline_data):
"strength_clip": lora["clip"],
# "model_manager": SharedModelManager,
},
"class_type": "LoraLoader",
"class_type": "HordeLoraLoader",
}

for lora_index in range(len(payload.get("loras"))):
Expand Down Expand Up @@ -657,7 +658,6 @@ def _final_pipeline_adjustments(self, payload, pipeline_data):
# the source image instead of the latent noise generator
if pipeline_params.get("image_loader.image"):
self.generator.reconnect_input(pipeline_data, "sampler.latent_image", "vae_encode")

return pipeline_params, faults

def _get_appropriate_pipeline(self, params):
Expand Down
22 changes: 18 additions & 4 deletions hordelib/model_manager/lora.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,7 @@ def __init__(
self._index_version_ids = {} # type: ignore # FIXME: add type
self._index_orig_names = {} # type: ignore # FIXME: add type
self.total_retries_attempted = 0
self._default_lora_ids: list = []

models_db_path = LEGACY_REFERENCE_FOLDER.joinpath("lora.json").resolve()

Expand Down Expand Up @@ -178,6 +179,14 @@ def load_model_database(self) -> None:
}
new_model_reference[lora_key] = new_lora_entry
else:
existing_versions = {}
for version in lora["versions"]:
filepath = os.path.join(self.model_folder_path, lora["versions"][version]["filename"])
if not Path(f"{filepath}").exists():
logger.warning(f"{filepath} doesn't exist. Removing lora version from reference.")
continue
existing_versions[version] = lora["versions"][version]
lora["versions"] = existing_versions
new_model_reference[old_lora_key] = lora
for lora in new_model_reference.values():
self._index_ids[lora["id"]] = lora["name"]
Expand Down Expand Up @@ -206,10 +215,11 @@ def download_model_reference(self):

def _get_lora_defaults(self):
try:
json_ret = self._get_json(self.LORA_DEFAULTS)
if not json_ret:
self._default_lora_ids = self._get_json(self.LORA_DEFAULTS)
if not isinstance(self._default_lora_ids, list):
logger.error("Could not download default LoRas reference!")
self._add_lora_ids_to_download_queue(json_ret)
self._default_lora_ids = []
self._add_lora_ids_to_download_queue(self._default_lora_ids)

except Exception as err:
logger.error(f"_get_lora_defaults() raised {err}")
Expand Down Expand Up @@ -375,7 +385,11 @@ def _parse_civitai_lora_data(self, item, adhoc=False):
return None
# We don't want to start downloading GBs of a single LoRa.
# We just ignore anything over 150Mb. Them's the breaks...
if lora["versions"][lora_version]["adhoc"] and lora["versions"][lora_version]["size_mb"] > 220:
if (
lora["versions"][lora_version]["adhoc"]
and lora["versions"][lora_version]["size_mb"] > 220
and lora["id"] not in self._default_lora_ids
):
logger.debug(f"Rejecting LoRa {lora.get('name')} version {lora_version} because its size is over 220Mb.")
return None
if lora["versions"][lora_version]["adhoc"] and lora["nsfw"] and not self.nsfw:
Expand Down
57 changes: 44 additions & 13 deletions hordelib/nodes/node_lora_loader.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,23 @@
import os

from comfy.sd import load_lora_for_models
import comfy.utils
import folder_paths
from loguru import logger


class HordeLoraLoader:
def __init__(self):
self.loaded_lora = None

@classmethod
def INPUT_TYPES(s):
return {
"required": {
"model": ("MODEL",),
"clip": ("CLIP",),
"lora_name": ("STRING",),
"strength_model": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
"strength_clip": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
"model_manager": ("MODEL_MANAGER",),
"lora_name": ("STRING", {"default": ""}),
"strength_model": ("FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step": 0.01}),
"strength_clip": ("FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step": 0.01}),
},
}

Expand All @@ -23,15 +26,43 @@ def INPUT_TYPES(s):

CATEGORY = "loaders"

def load_lora(self, model, clip, lora_name, strength_model, strength_clip, model_manager):
if model_manager.manager is None:
logger.error("LoraLoader node was not passed a model manager")
raise RuntimeError
def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
if strength_model == 0 and strength_clip == 0:
return (model, clip)

if lora_name == "":
logger.warning("No lora name provided, skipping lora loading")
return (model, clip)

if not os.path.exists(folder_paths.get_full_path("loras", lora_name)):
logger.warning(f"Lora file {lora_name} does not exist, skipping lora loading")
return (model, clip)

loras_on_disk = folder_paths.get_filename_list("loras")

if "loras" in folder_paths.filename_list_cache:
del folder_paths.filename_list_cache["loras"]

if lora_name not in loras_on_disk:
logger.warning(f"Lora file {lora_name} does not exist, skipping lora loading")
return (model, clip)

lora_path = folder_paths.get_full_path("loras", lora_name)

lora = None
if self.loaded_lora is not None:
if self.loaded_lora[0] == lora_path:
lora = self.loaded_lora[1]
else:
temp = self.loaded_lora
self.loaded_lora = None
del temp

if lora is None:
lora = comfy.utils.load_torch_file(lora_path, safe_load=True)
self.loaded_lora = (lora_path, lora)

lora_path = model_manager.manager.get_model_directory("lora")
lora_path = os.path.join(lora_path, lora_name)
# XXX This should call back to the hordelib model manager once it has support
model_lora, clip_lora = load_lora_for_models(model, clip, lora_path, strength_model, strength_clip)
model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora, strength_model, strength_clip)
return (model_lora, clip_lora)


Expand Down
Binary file added images_expected/lcm_lora_lcm_1_5.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added images_expected/lcm_lora_turbomix_dpmpp_sde.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added images_expected/lcm_lora_turbomix_euler_a.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added images_expected/lcm_lora_turbomix_lcm.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added images_expected/sampler_30_steps_lcm.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading

0 comments on commit 1fbce7d

Please sign in to comment.