Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: newest comfyui+torch+cu124 by default; fix: broken qrcode dep #349

Merged
merged 8 commits into from
Oct 3, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 26 additions & 0 deletions hordelib/comfy_horde.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,7 @@

_comfy_is_changed_cache_get: Callable
_comfy_model_patcher_load: Callable
_comfy_load_calculate_weight: Callable

_comfy_interrupt_current_processing: types.FunctionType

Expand Down Expand Up @@ -265,6 +266,12 @@ def do_comfy_import(
_comfy_model_patcher_load = ModelPatcher.load
ModelPatcher.load = _model_patcher_load_hijack # type: ignore

global _comfy_load_calculate_weight
import comfy.lora
from comfy.lora import calculate_weight as _comfy_load_calculate_weight

comfy.lora.calculate_weight = _calculate_weight_hijack # type: ignore

from hordelib.nodes.comfy_controlnet_preprocessors import (
canny as _canny,
hed as _hed,
Expand Down Expand Up @@ -352,6 +359,25 @@ def _model_patcher_load_hijack(*args, **kwargs):
_comfy_model_patcher_load(*args, **kwargs)


def _calculate_weight_hijack(*args, **kwargs):
global _comfy_load_calculate_weight
patches = args[0]

for p in patches:
v = p[1]
patch_type = v[0]
if patch_type != "diff":
continue
if len(v) == 2 and isinstance(v[1], list):
for idx, val in enumerate(v[1]):
if val is None:
v[1][idx] = {"pad_weight": False}
# logger.debug(f"Setting pad_weight to False for {v[0]} on {p} in {patches}")
break

return _comfy_load_calculate_weight(*args, **kwargs)


_last_pipeline_settings_hash = ""

import PIL.Image
Expand Down
2 changes: 1 addition & 1 deletion hordelib/consts.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

from hordelib.config_path import get_hordelib_path

COMFYUI_VERSION = "ca08597670c180554ab494e9452e12132d9b346a"
COMFYUI_VERSION = "3bb4dec720bd3be13a6fd381f641929386476efc"
"""The exact version of ComfyUI version to load."""

REMOTE_PROXY = ""
Expand Down
2 changes: 1 addition & 1 deletion hordelib/model_manager/lora.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ class LoraModelManager(BaseModelManager):
MAX_DOWNLOAD_THREADS = 5 if not TESTS_ONGOING else 75
RETRY_DELAY = 3 if not TESTS_ONGOING else 0.2
"""The time to wait between retries in seconds"""
REQUEST_METADATA_TIMEOUT = 20 # Longer because civitai performs poorly on metadata requests for more than 5 models
REQUEST_METADATA_TIMEOUT = 35 # Longer because civitai performs poorly on metadata requests for more than 5 models
"""The maximum time for no data to be received before we give up on a metadata fetch, in seconds"""
REQUEST_DOWNLOAD_TIMEOUT = 10 if not TESTS_ONGOING else 1
"""The maximum time for no data to be received before we give up on a download, in seconds
Expand Down
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
10 changes: 5 additions & 5 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
# Add this in for tox, comment out for build
--extra-index-url https://download.pytorch.org/whl/cu121
horde_sdk>=0.14.3
horde_model_reference>=0.9.0
--extra-index-url https://download.pytorch.org/whl/cu124
horde_sdk>=0.15.0
horde_model_reference>=0.9.1
pydantic
numpy==1.26.4
torch>=2.3.1
torch>=2.4.1
# xformers>=0.0.19
torchvision
# torchaudio
Expand Down Expand Up @@ -49,7 +49,7 @@ unidecode
fuzzywuzzy
strenum
kornia
qrcode
qrcode==7.4.2
spandrel
spandrel_extra_arches
lpips
16 changes: 16 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,13 +122,17 @@ def isolated_comfy_horde_instance(init_horde) -> Comfy_Horde:
_sdxl_refined_model_name = "AlbedoBase XL (SDXL)"
_stable_cascade_base_model_name = "Stable Cascade 1.0"
_flux1_schnell_fp8_base_model_name = "Flux.1-Schnell fp8 (Compact)"
_am_pony_xl_model_name = "AMPonyXL"
_rev_animated_model_name = "Rev Animated"

_all_model_names = [
_testing_model_name,
_sdxl_1_0_model_name,
_sdxl_refined_model_name,
_stable_cascade_base_model_name,
_flux1_schnell_fp8_base_model_name,
_am_pony_xl_model_name,
_rev_animated_model_name,
]

# !!!!
Expand Down Expand Up @@ -166,6 +170,18 @@ def flux1_schnell_fp8_base_model_name(shared_model_manager: type[SharedModelMana
return _flux1_schnell_fp8_base_model_name


@pytest.fixture(scope="session")
def am_pony_xl_model_name(shared_model_manager: type[SharedModelManager]) -> str:
"""The default AMPonyXL model name used for testing."""
return _am_pony_xl_model_name


@pytest.fixture(scope="session")
def rev_animated_model_name(shared_model_manager: type[SharedModelManager]) -> str:
"""The default Rev Animated model name used for testing."""
return _rev_animated_model_name


# !!!!
# If you're adding a model name, follow the pattern and **add it to `_all_model_names`**
# !!!!
Expand Down
72 changes: 36 additions & 36 deletions tests/test_horde_inference_flux.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,42 +11,42 @@

class TestHordeInferenceFlux:

# @pytest.mark.default_flux1_model
# def test_flux_dev_fp8_text_to_image(
# self,
# hordelib_instance: HordeLib,
# flux1_dev_fp8_base_model_name: str,
# ):
# data = {
# "sampler_name": "k_euler",
# "cfg_scale": 1,
# "denoising_strength": 1.0,
# "seed": 13122,
# "height": 1024,
# "width": 1024,
# "karras": False,
# "tiling": False,
# "hires_fix": False,
# "clip_skip": 1,
# "control_type": None,
# "image_is_control": False,
# "return_control_map": False,
# "prompt": 'a steampunk text that says "Horde Engine" floating',
# "ddim_steps": 20,
# "n_iter": 1,
# "model": flux1_dev_fp8_base_model_name,
# }
# pil_image = hordelib_instance.basic_inference_single_image(data).image
# assert pil_image is not None
# assert isinstance(pil_image, Image.Image)
@pytest.mark.default_flux1_model
def test_flux_schnell_fp8_text_to_image(
self,
hordelib_instance: HordeLib,
flux1_schnell_fp8_base_model_name: str,
):
data = {
"sampler_name": "k_euler",
"cfg_scale": 1,
"denoising_strength": 1.0,
"seed": 13122,
"height": 1024,
"width": 1024,
"karras": False,
"tiling": False,
"hires_fix": False,
"clip_skip": 1,
"control_type": None,
"image_is_control": False,
"return_control_map": False,
"prompt": 'a steampunk text that says "Horde Engine" floating',
"ddim_steps": 4,
"n_iter": 1,
"model": flux1_schnell_fp8_base_model_name,
}
pil_image = hordelib_instance.basic_inference_single_image(data).image
assert pil_image is not None
assert isinstance(pil_image, Image.Image)

# img_filename = "flux_dev_fp8_text_to_image.png"
# pil_image.save(f"images/{img_filename}", quality=100)
img_filename = "flux_schnell_fp8_text_to_image.png"
pil_image.save(f"images/{img_filename}", quality=100)

# assert check_single_inference_image_similarity(
# f"images_expected/{img_filename}",
# pil_image,
# )
assert check_single_inference_image_similarity(
f"images_expected/{img_filename}",
pil_image,
)

@pytest.mark.default_flux1_model
def test_flux_schnell_fp8_text_to_image_n_iter(
Expand Down Expand Up @@ -91,8 +91,8 @@ def test_flux_schnell_fp8_text_to_image_n_iter(
img_pairs_to_check.append((f"images_expected/{img_filename}", image_result.image))

assert check_single_inference_image_similarity(
"images_expected/text_to_image.png",
"images/text_to_image_n_iter_0.png",
"images_expected/flux_schnell_fp8_text_to_image.png",
"images/flux_schnell_fp8_text_to_image_n_iter_0.png",
)

assert check_list_inference_images_similarity(img_pairs_to_check)
Expand Down
9 changes: 6 additions & 3 deletions tests/test_horde_lora.py
Original file line number Diff line number Diff line change
Expand Up @@ -390,6 +390,7 @@ def test_text_to_image_lora_character_hires_fix(
self,
shared_model_manager: type[SharedModelManager],
hordelib_instance: HordeLib,
rev_animated_model_name: str,
):
assert shared_model_manager.manager.lora

Expand Down Expand Up @@ -435,7 +436,7 @@ def test_text_to_image_lora_character_hires_fix(
],
"ddim_steps": 30,
"n_iter": 1,
"model": "Rev Animated",
"model": rev_animated_model_name,
}

pil_image = hordelib_instance.basic_inference_single_image(data).image
Expand All @@ -454,6 +455,7 @@ def test_text_to_image_lora_character_hires_fix_sdxl(
self,
shared_model_manager: type[SharedModelManager],
hordelib_instance: HordeLib,
am_pony_xl_model_name: str,
):
assert shared_model_manager.manager.lora

Expand Down Expand Up @@ -488,7 +490,7 @@ def test_text_to_image_lora_character_hires_fix_sdxl(
],
"ddim_steps": 12,
"n_iter": 1,
"model": "AMPonyXL",
"model": am_pony_xl_model_name,
}

pil_image = hordelib_instance.basic_inference_single_image(data).image
Expand Down Expand Up @@ -1010,6 +1012,7 @@ def test_login_gated_lora(
self,
shared_model_manager: type[SharedModelManager],
hordelib_instance: HordeLib,
sdxl_1_0_base_model_name: str,
):
assert shared_model_manager.manager.lora

Expand Down Expand Up @@ -1044,7 +1047,7 @@ def test_login_gated_lora(
"loras": [{"name": download_gated_lora_version_id, "model": 1.0, "clip": 1.0, "is_version": True}],
"ddim_steps": 25,
"n_iter": 1,
"model": "SDXL 1.0",
"model": sdxl_1_0_base_model_name,
}
ret = hordelib_instance.basic_inference_single_image(data)
assert isinstance(ret, ResultingImageReturn)
Expand Down
Loading