From 66f2cd3be957f68042ce7966eb760c7a8931cdc1 Mon Sep 17 00:00:00 2001 From: tazlin Date: Thu, 3 Oct 2024 13:46:08 -0400 Subject: [PATCH 01/32] fix: don't run image.save() twice This already functionally happens in hordelib. We can use the bytestream passed from it directly. --- horde_worker_regen/process_management/inference_process.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/horde_worker_regen/process_management/inference_process.py b/horde_worker_regen/process_management/inference_process.py index e3603c60..1a6b0aba 100644 --- a/horde_worker_regen/process_management/inference_process.py +++ b/horde_worker_regen/process_management/inference_process.py @@ -585,13 +585,11 @@ def send_inference_result_message( if results is not None: for result in results: - buffered_image = io.BytesIO() - if result.image is None: + if result.rawpng is None: logger.critical("Result or result image is None") continue - result.image.save(buffered_image, format="PNG") - image_base64 = base64.b64encode(buffered_image.getvalue()).decode("utf-8") + image_base64 = base64.b64encode(result.rawpng.getvalue()).decode("utf-8") all_image_results.append( HordeImageResult( image_base64=image_base64, From 10386d9094408751e505bd7aaab87369552de48d Mon Sep 17 00:00:00 2001 From: tazlin Date: Mon, 30 Sep 2024 21:03:16 -0400 Subject: [PATCH 02/32] feat: use torch 2.4.1 and cu124 by default --- .pre-commit-config.yaml | 2 +- Dockerfiles/Dockerfile.12.1.1-22.04 | 2 +- Dockerfiles/Dockerfile.12.2.2-22.04 | 2 +- Dockerfiles/Dockerfile.12.3.2-22.04 | 2 +- README_advanced.md | 2 +- requirements.rocm.txt | 2 +- requirements.txt | 2 +- tox.ini | 2 +- update-runtime.cmd | 6 +++--- update-runtime.sh | 4 ++-- 10 files changed, 13 insertions(+), 13 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b127c901..7ed436f3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -38,7 +38,7 @@ repos: - python-dotenv - aiohttp - horde_safety==0.2.3 - - torch==2.3.1 + - torch==2.4.1 - ruamel.yaml - horde_engine==2.15.3 - horde_sdk==0.14.11 diff --git a/Dockerfiles/Dockerfile.12.1.1-22.04 b/Dockerfiles/Dockerfile.12.1.1-22.04 index 70aa0d3a..9e9f1173 100644 --- a/Dockerfiles/Dockerfile.12.1.1-22.04 +++ b/Dockerfiles/Dockerfile.12.1.1-22.04 @@ -17,7 +17,7 @@ RUN git clone https://github.com/Haidra-Org/horde-worker-reGen.git && \ python3 -m venv venv && \ . venv/bin/activate && \ python -m pip install --upgrade pip && \ - python -m pip install -r /horde-worker-reGen/requirements.txt -U --extra-index-url https://download.pytorch.org/whl/cu121 && \ + python -m pip install -r /horde-worker-reGen/requirements.txt -U --extra-index-url https://download.pytorch.org/whl/cu124 && \ python -m pip cache purge RUN apt-get update && apt-get install libgl1 -y diff --git a/Dockerfiles/Dockerfile.12.2.2-22.04 b/Dockerfiles/Dockerfile.12.2.2-22.04 index aeb4a8d9..348cef3b 100644 --- a/Dockerfiles/Dockerfile.12.2.2-22.04 +++ b/Dockerfiles/Dockerfile.12.2.2-22.04 @@ -17,7 +17,7 @@ RUN git clone https://github.com/Haidra-Org/horde-worker-reGen.git && \ python3 -m venv venv && \ . venv/bin/activate && \ python -m pip install --upgrade pip && \ - python -m pip install -r /horde-worker-reGen/requirements.txt -U --extra-index-url https://download.pytorch.org/whl/cu121 && \ + python -m pip install -r /horde-worker-reGen/requirements.txt -U --extra-index-url https://download.pytorch.org/whl/cu124 && \ python -m pip cache purge RUN apt-get update && apt-get install libgl1 -y diff --git a/Dockerfiles/Dockerfile.12.3.2-22.04 b/Dockerfiles/Dockerfile.12.3.2-22.04 index 19fdda0f..5e7b36a3 100644 --- a/Dockerfiles/Dockerfile.12.3.2-22.04 +++ b/Dockerfiles/Dockerfile.12.3.2-22.04 @@ -17,7 +17,7 @@ RUN git clone https://github.com/Haidra-Org/horde-worker-reGen.git && \ python3 -m venv venv && \ . venv/bin/activate && \ python -m pip install --upgrade pip && \ - python -m pip install -r /horde-worker-reGen/requirements.txt -U --extra-index-url https://download.pytorch.org/whl/cu121 && \ + python -m pip install -r /horde-worker-reGen/requirements.txt -U --extra-index-url https://download.pytorch.org/whl/cu124 && \ python -m pip cache purge RUN apt-get update && apt-get install libgl1 -y diff --git a/README_advanced.md b/README_advanced.md index ef5c31fd..6b781028 100644 --- a/README_advanced.md +++ b/README_advanced.md @@ -39,7 +39,7 @@ ### Get worker files and install dependencies - `git clone https://github.com/Haidra-Org/horde-worker-reGen.git` - `cd .\horde-worker-reGen\` -- `pip install -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cu121` +- `pip install -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cu124` ### Run worker - Set your config now, copying `bridgeData_template.yaml` to `bridgeData.yaml`, being sure to set an API key and worker name at a minimum diff --git a/requirements.rocm.txt b/requirements.rocm.txt index f7dc6864..69314fb9 100644 --- a/requirements.rocm.txt +++ b/requirements.rocm.txt @@ -1,5 +1,5 @@ numpy==1.26.4 -torch==2.3.1+rocm6.0 +torch==2.4.1+rocm6.0 horde_sdk~=0.14.11 horde_safety~=0.2.3 diff --git a/requirements.txt b/requirements.txt index c039b1fe..6eeeef6d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ numpy==1.26.4 -torch==2.3.1 +torch==2.4.1 qrcode==7.4.2 horde_sdk~=0.14.11 diff --git a/tox.ini b/tox.ini index 4cabad41..76a707e6 100644 --- a/tox.ini +++ b/tox.ini @@ -28,7 +28,7 @@ deps = pytest-sugar pytest-cov requests - -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cu121 + -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cu124 commands = pytest tests {posargs} --cov diff --git a/update-runtime.cmd b/update-runtime.cmd index 0375811a..adc817f4 100644 --- a/update-runtime.cmd +++ b/update-runtime.cmd @@ -49,16 +49,16 @@ micromamba.exe shell hook -s cmd.exe -p %MAMBA_ROOT_PREFIX% -v call "%MAMBA_ROOT_PREFIX%\condabin\mamba_hook.bat" call "%MAMBA_ROOT_PREFIX%\condabin\micromamba.bat" activate windows -python -s -m pip install torch==2.3.1 torchvision==0.18.1 --index-url https://download.pytorch.org/whl/cu121 -U +python -s -m pip install torch==2.4.1 torchvision==0.18.1 --index-url https://download.pytorch.org/whl/cu124 -U if defined hordelib ( python -s -m pip uninstall -y hordelib horde_engine horde_model_reference - python -s -m pip install horde_engine horde_model_reference --extra-index-url https://download.pytorch.org/whl/cu121 + python -s -m pip install horde_engine horde_model_reference --extra-index-url https://download.pytorch.org/whl/cu124 ) else ( if defined scribe ( python -s -m pip install -r requirements-scribe.txt ) else ( - python -s -m pip install -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cu121 -U + python -s -m pip install -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cu124 -U ) ) call deactivate diff --git a/update-runtime.sh b/update-runtime.sh index 30eeb060..5fa66bfa 100755 --- a/update-runtime.sh +++ b/update-runtime.sh @@ -35,7 +35,7 @@ ${SCRIPT_DIR}/bin/micromamba create --no-shortcuts -r "$SCRIPT_DIR/conda" -n lin if [ "$hordelib" = true ]; then ${SCRIPT_DIR}/bin/micromamba run -r "$SCRIPT_DIR/conda" -n linux python -s -m pip uninstall -y hordelib horde_engine horde_sdk horde_model_reference - ${SCRIPT_DIR}/bin/micromamba run -r "$SCRIPT_DIR/conda" -n linux python -s -m pip install horde_engine horde_model_reference --extra-index-url https://download.pytorch.org/whl/cu121 + ${SCRIPT_DIR}/bin/micromamba run -r "$SCRIPT_DIR/conda" -n linux python -s -m pip install horde_engine horde_model_reference --extra-index-url https://download.pytorch.org/whl/cu124 else - ${SCRIPT_DIR}/bin/micromamba run -r "$SCRIPT_DIR/conda" -n linux python -s -m pip install -r "$SCRIPT_DIR/requirements.txt" -U --extra-index-url https://download.pytorch.org/whl/cu121 + ${SCRIPT_DIR}/bin/micromamba run -r "$SCRIPT_DIR/conda" -n linux python -s -m pip install -r "$SCRIPT_DIR/requirements.txt" -U --extra-index-url https://download.pytorch.org/whl/cu124 fi From 42e2bf434188b9b51e386cde8e5789a449541cdc Mon Sep 17 00:00:00 2001 From: tazlin Date: Thu, 3 Oct 2024 13:52:53 -0400 Subject: [PATCH 03/32] feat: use latest horde deps w/ latest comfyui+fixes --- requirements.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements.txt b/requirements.txt index 6eeeef6d..f2009c5a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,10 +2,10 @@ numpy==1.26.4 torch==2.4.1 qrcode==7.4.2 -horde_sdk~=0.14.11 +horde_sdk~=0.15.0 horde_safety~=0.2.3 -horde_engine~=2.15.3 -horde_model_reference>=0.9.0 +horde_engine~=2.16.0 +horde_model_reference>=0.9.1 python-dotenv ruamel.yaml From f1b80daa5b86b1ef891d5aca7a120d816741c2b0 Mon Sep 17 00:00:00 2001 From: tazlin Date: Thu, 3 Oct 2024 14:00:57 -0400 Subject: [PATCH 04/32] build/fix: condense and update dockerfiles --- Dockerfiles/Dockerfile.12.1.1-22.04 | 4 ++-- Dockerfiles/Dockerfile.12.2.2-22.04 | 4 ++-- Dockerfiles/Dockerfile.12.3.2-22.04 | 4 ++-- Dockerfiles/Dockerfile.12.4-22.04 | 30 +++++++++++++++++++++++++++++ 4 files changed, 36 insertions(+), 6 deletions(-) create mode 100644 Dockerfiles/Dockerfile.12.4-22.04 diff --git a/Dockerfiles/Dockerfile.12.1.1-22.04 b/Dockerfiles/Dockerfile.12.1.1-22.04 index 9e9f1173..d0a0b29f 100644 --- a/Dockerfiles/Dockerfile.12.1.1-22.04 +++ b/Dockerfiles/Dockerfile.12.1.1-22.04 @@ -9,6 +9,7 @@ RUN apt-get update && \ python3.11 \ python3-pip \ python3-venv \ + libgl1 \ git RUN git clone https://github.com/Haidra-Org/horde-worker-reGen.git && \ @@ -18,10 +19,9 @@ RUN git clone https://github.com/Haidra-Org/horde-worker-reGen.git && \ . venv/bin/activate && \ python -m pip install --upgrade pip && \ python -m pip install -r /horde-worker-reGen/requirements.txt -U --extra-index-url https://download.pytorch.org/whl/cu124 && \ + python -m pip install opencv-python-headless -U && \ python -m pip cache purge -RUN apt-get update && apt-get install libgl1 -y - CMD cd /horde-worker-reGen && \ git pull && \ . venv/bin/activate && \ diff --git a/Dockerfiles/Dockerfile.12.2.2-22.04 b/Dockerfiles/Dockerfile.12.2.2-22.04 index 348cef3b..819198cc 100644 --- a/Dockerfiles/Dockerfile.12.2.2-22.04 +++ b/Dockerfiles/Dockerfile.12.2.2-22.04 @@ -9,6 +9,7 @@ RUN apt-get update && \ python3.11 \ python3-pip \ python3-venv \ + libgl1 \ git RUN git clone https://github.com/Haidra-Org/horde-worker-reGen.git && \ @@ -18,10 +19,9 @@ RUN git clone https://github.com/Haidra-Org/horde-worker-reGen.git && \ . venv/bin/activate && \ python -m pip install --upgrade pip && \ python -m pip install -r /horde-worker-reGen/requirements.txt -U --extra-index-url https://download.pytorch.org/whl/cu124 && \ + python -m pip install opencv-python-headless -U && \ python -m pip cache purge -RUN apt-get update && apt-get install libgl1 -y - CMD cd /horde-worker-reGen && \ git pull && \ . venv/bin/activate && \ diff --git a/Dockerfiles/Dockerfile.12.3.2-22.04 b/Dockerfiles/Dockerfile.12.3.2-22.04 index 5e7b36a3..06c1409d 100644 --- a/Dockerfiles/Dockerfile.12.3.2-22.04 +++ b/Dockerfiles/Dockerfile.12.3.2-22.04 @@ -9,6 +9,7 @@ RUN apt-get update && \ python3.11 \ python3-pip \ python3-venv \ + libgl1 \ git RUN git clone https://github.com/Haidra-Org/horde-worker-reGen.git && \ @@ -18,10 +19,9 @@ RUN git clone https://github.com/Haidra-Org/horde-worker-reGen.git && \ . venv/bin/activate && \ python -m pip install --upgrade pip && \ python -m pip install -r /horde-worker-reGen/requirements.txt -U --extra-index-url https://download.pytorch.org/whl/cu124 && \ + python -m pip install opencv-python-headless -U && \ python -m pip cache purge -RUN apt-get update && apt-get install libgl1 -y - CMD cd /horde-worker-reGen && \ git pull && \ . venv/bin/activate && \ diff --git a/Dockerfiles/Dockerfile.12.4-22.04 b/Dockerfiles/Dockerfile.12.4-22.04 new file mode 100644 index 00000000..418bd690 --- /dev/null +++ b/Dockerfiles/Dockerfile.12.4-22.04 @@ -0,0 +1,30 @@ +FROM nvidia/cuda:12.4.0-runtime-ubuntu22.04 + +ARG DEBIAN_FRONTEND=noninteractive + +RUN apt-get update && \ + apt-get install -y software-properties-common && \ + add-apt-repository ppa:deadsnakes/ppa && \ + apt-get install -y --no-install-recommends \ + python3.11 \ + python3-pip \ + python3-venv \ + libgl1 \ + git + +RUN git clone https://github.com/Haidra-Org/horde-worker-reGen.git && \ + cd /horde-worker-reGen && \ + python3.11 -m pip install --upgrade pip && \ + python3 -m venv venv && \ + . venv/bin/activate && \ + python -m pip install --upgrade pip && \ + python -m pip install -r /horde-worker-reGen/requirements.txt -U --extra-index-url https://download.pytorch.org/whl/cu124 && \ + python -m pip install opencv-python-headless -U && \ + python -m pip cache purge + +CMD cd /horde-worker-reGen && \ + git pull && \ + . venv/bin/activate && \ + python -m pip install -r requirements.txt -U && \ + python download_models.py -e && \ + python run_worker.py -e From de071ceb3f19633d33dc8b4a0be42734dcc942e0 Mon Sep 17 00:00:00 2001 From: tazlin Date: Thu, 3 Oct 2024 18:58:43 -0400 Subject: [PATCH 05/32] chore: version bump --- horde_worker_regen/__init__.py | 2 +- horde_worker_regen/_version_meta.json | 2 +- pyproject.toml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/horde_worker_regen/__init__.py b/horde_worker_regen/__init__.py index 0a901426..495ecd23 100644 --- a/horde_worker_regen/__init__.py +++ b/horde_worker_regen/__init__.py @@ -8,7 +8,7 @@ ASSETS_FOLDER_PATH = Path(__file__).parent / "assets" -__version__ = "9.0.7" +__version__ = "9.1.0" import pkg_resources # noqa: E402 diff --git a/horde_worker_regen/_version_meta.json b/horde_worker_regen/_version_meta.json index c508e732..01fe77f9 100644 --- a/horde_worker_regen/_version_meta.json +++ b/horde_worker_regen/_version_meta.json @@ -1,5 +1,5 @@ { - "recommended_version": "9.0.7", + "recommended_version": "9.1.0", "required_min_version": "9.0.2", "required_min_version_update_date": "2024-09-26", "required_min_version_info": { diff --git a/pyproject.toml b/pyproject.toml index 13e53cb4..2889d8d9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "horde_worker_regen" -version = "9.0.7" +version = "9.1.0" description = "Allows you to connect to the AI Horde and generate images for users." authors = [ {name = "tazlin", email = "tazlin.on.github@gmail.com"}, From ddc4d785f5da6079a246e5d088aa0ef65ee7b067 Mon Sep 17 00:00:00 2001 From: tazlin Date: Thu, 3 Oct 2024 21:19:14 -0400 Subject: [PATCH 06/32] fix: pop more often with threads>1 --- horde_worker_regen/process_management/process_manager.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/horde_worker_regen/process_management/process_manager.py b/horde_worker_regen/process_management/process_manager.py index 63022950..a5f02cf0 100644 --- a/horde_worker_regen/process_management/process_manager.py +++ b/horde_worker_regen/process_management/process_manager.py @@ -3255,6 +3255,9 @@ async def api_job_pop(self) -> None: seconds_to_wait *= 0.5 # logger.debug("Moderate performance mode is enabled, reducing the wait time by 50%") + if self.bridge_data.max_threads > 1: + seconds_to_wait = seconds_to_wait * 0.5 + if self._triggered_max_pending_megapixelsteps is False: self._triggered_max_pending_megapixelsteps = True self._triggered_max_pending_megapixelsteps_time = time.time() From 624282ce1bda0c05c4dfbbbed74a67bad94e2a1b Mon Sep 17 00:00:00 2001 From: tazlin Date: Thu, 3 Oct 2024 21:19:36 -0400 Subject: [PATCH 07/32] fix: wait less time w/ high perf. mode --- horde_worker_regen/process_management/process_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/horde_worker_regen/process_management/process_manager.py b/horde_worker_regen/process_management/process_manager.py index a5f02cf0..56afc90a 100644 --- a/horde_worker_regen/process_management/process_manager.py +++ b/horde_worker_regen/process_management/process_manager.py @@ -3249,7 +3249,7 @@ async def api_job_pop(self) -> None: seconds_to_wait = self.get_pending_megapixelsteps() * 0.9 if self.bridge_data.high_performance_mode: - seconds_to_wait *= 0.35 + seconds_to_wait *= 0.25 # logger.debug("High performance mode is enabled, reducing the wait time by 70%") elif self.bridge_data.moderate_performance_mode: seconds_to_wait *= 0.5 From 1befebb99ede1c0e73b045adc653586473f9fbb7 Mon Sep 17 00:00:00 2001 From: tazlin Date: Thu, 3 Oct 2024 21:24:35 -0400 Subject: [PATCH 08/32] fix: dont pause at all for short jobs on high perf mode --- .../process_management/process_manager.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/horde_worker_regen/process_management/process_manager.py b/horde_worker_regen/process_management/process_manager.py index 56afc90a..4362f40a 100644 --- a/horde_worker_regen/process_management/process_manager.py +++ b/horde_worker_regen/process_management/process_manager.py @@ -3248,15 +3248,15 @@ async def api_job_pop(self) -> None: else: seconds_to_wait = self.get_pending_megapixelsteps() * 0.9 + if self.bridge_data.max_threads > 1: + seconds_to_wait = seconds_to_wait * 0.5 + if self.bridge_data.high_performance_mode: seconds_to_wait *= 0.25 - # logger.debug("High performance mode is enabled, reducing the wait time by 70%") + if seconds_to_wait < 10: + seconds_to_wait = 1 elif self.bridge_data.moderate_performance_mode: seconds_to_wait *= 0.5 - # logger.debug("Moderate performance mode is enabled, reducing the wait time by 50%") - - if self.bridge_data.max_threads > 1: - seconds_to_wait = seconds_to_wait * 0.5 if self._triggered_max_pending_megapixelsteps is False: self._triggered_max_pending_megapixelsteps = True From 6be67159a444918f839bb6456a5e8cc878c157f7 Mon Sep 17 00:00:00 2001 From: tazlin Date: Thu, 3 Oct 2024 21:30:05 -0400 Subject: [PATCH 09/32] fix: wait even less w/ high perf mode --- horde_worker_regen/process_management/process_manager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/horde_worker_regen/process_management/process_manager.py b/horde_worker_regen/process_management/process_manager.py index 4362f40a..e188986f 100644 --- a/horde_worker_regen/process_management/process_manager.py +++ b/horde_worker_regen/process_management/process_manager.py @@ -3249,11 +3249,11 @@ async def api_job_pop(self) -> None: seconds_to_wait = self.get_pending_megapixelsteps() * 0.9 if self.bridge_data.max_threads > 1: - seconds_to_wait = seconds_to_wait * 0.5 + seconds_to_wait *= 0.25 if self.bridge_data.high_performance_mode: seconds_to_wait *= 0.25 - if seconds_to_wait < 10: + if seconds_to_wait < 20: seconds_to_wait = 1 elif self.bridge_data.moderate_performance_mode: seconds_to_wait *= 0.5 From 925e1114bdb6d9d115a8497a25154385ecf3b452 Mon Sep 17 00:00:00 2001 From: tazlin Date: Fri, 4 Oct 2024 07:21:26 -0400 Subject: [PATCH 10/32] docs/fix: clarify certain stats/config in logs and docstrings --- .../process_management/process_manager.py | 79 +++++++++++++++++-- 1 file changed, 72 insertions(+), 7 deletions(-) diff --git a/horde_worker_regen/process_management/process_manager.py b/horde_worker_regen/process_management/process_manager.py index e188986f..105188a3 100644 --- a/horde_worker_regen/process_management/process_manager.py +++ b/horde_worker_regen/process_management/process_manager.py @@ -891,6 +891,8 @@ class HordeWorkerProcessManager: can run at once. Use `max_concurrent_inference_processes` to control that behavior.""" _max_concurrent_inference_processes: int + """The maximum number of inference processes that can run jobs concurrently. \ + This is set at initialization to prevent changing the value at runtime.""" @property def max_concurrent_inference_processes(self) -> int: @@ -932,6 +934,7 @@ def get_process_total_ram_usage(self) -> int: return total jobs_lookup: dict[ImageGenerateJobPopResponse, HordeJobInfo] + """The mapping of API responses to their corresponding worker job info.""" jobs_in_progress: list[ImageGenerateJobPopResponse] """A list of jobs that are currently in progress.""" @@ -940,27 +943,38 @@ def get_process_total_ram_usage(self) -> int: """A list of jobs that have exhibited faults and what kinds.""" jobs_pending_safety_check: list[HordeJobInfo] + """A list of jobs that were generated but have not yet been safety checked.""" _jobs_safety_check_lock: Lock_Asyncio + """The asyncio lock for the safety check queue.""" jobs_being_safety_checked: list[HordeJobInfo] + """The list of jobs that are currently being safety checked.""" _num_jobs_faulted: int = 0 + """The number of jobs which were marked as faulted. This may not include jobs which failed for unknown reasons.""" completed_jobs: list[HordeJobInfo] """A list of 3 tuples containing the job, the state, and whether or not the job was censored.""" _completed_jobs_lock: Lock_Asyncio + """The asyncio lock for the completed jobs queue.""" kudos_generated_this_session: float = 0 + """The amount of kudos generated this entire session.""" kudos_events: list[tuple[float, float]] """A deque of kudos events, each is a tuple of the time the event occurred and the amount of kudos generated.""" session_start_time: float = 0 + """The time at which the session started in epoch time.""" _aiohttp_client_session: aiohttp.ClientSession + """The aiohttp client session to use for making network calls.""" stable_diffusion_reference: StableDiffusion_ModelReference | None + """The class which contains the list of models from horde_model_reference.""" horde_client: AIHordeAPIAsyncSimpleClient + """The horde sdk client to make api calls.""" horde_client_session: AIHordeAPIAsyncClientSession + """The context manager for the horde sdk client.""" user_info: UserDetailsResponse | None = None """The user info for the user that this worker is logged in as.""" @@ -998,21 +1012,29 @@ def num_total_processes(self) -> int: job_deque: deque[ImageGenerateJobPopResponse] """A deque of jobs that are waiting to be processed.""" _job_deque_lock: Lock_Asyncio + """The asyncio lock for the job deque.""" job_pop_timestamps: dict[ImageGenerateJobPopResponse, float] + """A mapping of jobs to the time at which they were popped.""" _job_pop_timestamps_lock: Lock_Asyncio + """The asyncio lock for the job pop timestamps.""" _inference_semaphore: Semaphore """A semaphore that limits the number of inference processes that can run at once.""" _disk_lock: Lock_MultiProcessing + """A lock to prevent multiple processes from accessing the disk at once.""" _aux_model_lock: Lock_MultiProcessing + """A lock to prevent multiple processes from accessing the auxiliary models at once (such as LoRas).""" _shutting_down = False + """Whether or not the worker is shutting down.""" _lru: LRUCache + """A simple LRU cache. This is used to keep track of the most recently used models.""" _amd_gpu: bool + """Whether or not the GPU is an AMD GPU.""" def __init__( self, @@ -1142,7 +1164,7 @@ def __init__( self._process_message_queue = multiprocessing.Queue() - self.kudos_events = [] + self.kudos_events: list[tuple[float, float]] = [] self.stable_diffusion_reference = None @@ -1420,8 +1442,12 @@ def _end_inference_process(self, process_info: HordeProcessInfo) -> None: logger.info(f"Ended inference process {process_info.process_id}") _num_process_recoveries = 0 + """The number of times a child process crashed or was killed and recovered.""" _safety_processes_should_be_replaced: bool = False + """Whether or not the safety processes should be replaced due to a detected problem.""" _safety_processes_ending: bool = False + """Whether or not the safety processes are in the process of ending. \ + This only occurs when they are being replaced.""" def _replace_all_safety_process(self) -> None: """Replace all of the safety processes. @@ -1499,6 +1525,7 @@ def _replace_inference_process(self, process_info: HordeProcessInfo) -> None: self._num_process_recoveries += 1 total_num_completed_jobs: int = 0 + """The total number of jobs that have been completed.""" def end_safety_processes(self) -> None: """End any safety processes above the configured limit, or all of them if shutting down.""" @@ -2423,6 +2450,7 @@ def base64_image_to_stream_buffer(self, image_base64: str) -> BytesIO | None: return None _num_job_slowdowns = 0 + """The number of jobs which did not meet the minimum expected kudos/second rate.""" @logger.catch(reraise=True) async def submit_single_generation(self, new_submit: PendingSubmitJob) -> PendingSubmitJob: @@ -3173,11 +3201,19 @@ async def _get_source_images(self, job_pop_response: ImageGenerateJobPopResponse return job_pop_response _last_pop_no_jobs_available: bool = False + """Whether the last job pop attempt had a no jobs available response.""" _last_pop_no_jobs_available_time: float = 0.0 + """The time at which the last job pop attempt had a no jobs available response.""" _time_spent_no_jobs_available: float = 0.0 + """The number of seconds spent with no jobs popped or available.""" + _max_time_spent_no_jobs_available: float = 60.0 * 60.0 + """The maximum number of seconds to spend with no jobs popped or available before warning the user.""" _too_many_consecutive_failed_jobs: bool = False + """Whether too many consecutive failed jobs have occurred and job pops are paused.""" _too_many_consecutive_failed_jobs_time: float = 0.0 + """The time at which too many consecutive failed jobs occurred.""" _too_many_consecutive_failed_jobs_wait_time = 180 + """The time to wait after too many consecutive failed jobs before resuming job pops.""" @logger.catch(reraise=True) async def api_job_pop(self) -> None: @@ -3470,14 +3506,20 @@ async def api_job_pop(self) -> None: ) _user_info_failed = False + """Whether the API request to fetch user info failed.""" _user_info_failed_reason: str | None = None + """The reason the API request to fetch user info failed.""" _current_worker_id: str | None = None + """The current worker ID.""" def calculate_kudos_info(self) -> None: """Calculate and log information about the kudos generated in the current session.""" time_since_session_start = time.time() - self.session_start_time kudos_per_hour_session = self.kudos_generated_this_session / time_since_session_start * 3600 + active_kudos_per_hour = ( + self.kudos_generated_this_session / (time_since_session_start - self._time_spent_no_jobs_available) * 3600 + ) kudos_total_past_hour = self.calculate_kudos_totals() @@ -3485,6 +3527,7 @@ def calculate_kudos_info(self) -> None: time_since_session_start, kudos_per_hour_session, kudos_total_past_hour, + active_kudos_per_hour, ) self.log_kudos_info(kudos_info_string) @@ -3517,6 +3560,7 @@ def generate_kudos_info_string( time_since_session_start: float, kudos_per_hour_session: float, kudos_total_past_hour: float, + active_kudos_per_hour: float, ) -> str: """Generate a string with information about the kudos generated in the current session. @@ -3524,6 +3568,7 @@ def generate_kudos_info_string( time_since_session_start (float): The time since the session started. kudos_per_hour_session (float): The kudos per hour generated in the current session. kudos_total_past_hour (float): The total kudos generated in the past hour. + active_kudos_per_hour (float): The kudos per hour generated while active (jobs available). Returns: str: A string with information about the kudos generated in the current session. @@ -3555,6 +3600,11 @@ def generate_kudos_info_string( # "Last Hour: (pending) kudos", # ) + if self._time_spent_no_jobs_available > self._max_time_spent_no_jobs_available: + kudos_info_string_elements.append( + f"Active (jobs available): {active_kudos_per_hour:,.2f} kudos/hr", + ) + return " | ".join(kudos_info_string_elements) def log_kudos_info(self, kudos_info_string: str) -> None: @@ -3611,6 +3661,7 @@ async def api_get_user_info(self) -> None: await logger.complete() _job_submit_loop_interval = 0.02 + """The interval between job submit loop iterations.""" async def _job_submit_loop(self) -> None: """Run the job submit loop.""" @@ -3670,7 +3721,9 @@ async def _api_call_loop(self) -> None: await asyncio.sleep(self._api_call_loop_interval) _status_message_frequency = 20.0 + """The rate in seconds at which to print status messages with details about the current state of the worker.""" _last_status_message_time = 0.0 + """The epoch time of the last status message.""" async def _process_control_loop(self) -> None: self.start_safety_processes() @@ -3804,12 +3857,18 @@ async def _process_control_loop(self) -> None: sys.exit(0) - _last_deadlock_detected_time = 0.0 - _in_deadlock = False - _in_queue_deadlock = False - _last_queue_deadlock_detected_time = 0.0 + _last_deadlock_detected_time: float = 0.0 + """The epoch time of the last deadlock detected.""" + _in_deadlock: bool = False + """Whether the worker is in a deadlock state.""" + _in_queue_deadlock: bool = False + """Whether the worker is in a queue deadlock state.""" + _last_queue_deadlock_detected_time: float = 0.0 + """The epoch time of the last queue deadlock detected.""" _queue_deadlock_model: str | None = None + """The model causing the queue deadlock.""" _queue_deadlock_process_id: int | None = None + """The process ID causing the queue deadlock.""" def detect_deadlock(self) -> None: """Detect if there are jobs in the queue but no processes doing anything.""" @@ -3897,6 +3956,8 @@ def print_status_method(self) -> None: logger.info("Process info:") for process_info_string in process_info_strings: logger.info(process_info_string) + + max_power_dimension = int((self.bridge_data.max_power * 8 * 64 * 64) // 2) logger.info( " | ".join( [ @@ -3904,7 +3965,7 @@ def print_status_method(self) -> None: f"(v{horde_worker_regen.__version__})", f"horde user: {self.user_info.username if self.user_info is not None else 'Unknown'}", f"num_models: {len(self.bridge_data.image_models_to_load)}", - f"max_power: {self.bridge_data.max_power}", + f"max_power: {self.bridge_data.max_power} ({max_power_dimension}x{max_power_dimension})", f"max_threads: {self.max_concurrent_inference_processes}", f"queue_size: {self.bridge_data.queue_size}", f"safety_on_gpu: {self.bridge_data.safety_on_gpu}", @@ -4043,7 +4104,7 @@ def print_status_method(self) -> None: if not self.bridge_data.suppress_speed_warnings: logger.warning( f"Your worker spent more than {minutes_allowed_without_jobs} minutes combined throughout this " - f"session ({cur_session_minutes:.2f} minutes) " + f"session ({self._time_spent_no_jobs_available/60:.2f}/{cur_session_minutes:.2f} minutes) " "without jobs. This may be due to low demand. However, offering more models or increasing " "your max_power may help increase the number of jobs you receive and reduce downtime.", ) @@ -4059,9 +4120,12 @@ def print_status_method(self) -> None: self._last_status_message_time = cur_time _bridge_data_loop_interval = 1.0 + """The interval between bridge data loop iterations.""" _last_bridge_data_reload_time = 0.0 + """The epoch time of the last bridge data reload.""" _bridge_data_last_modified_time = 0.0 + """The time the bridge data file on disk was last modified.""" def get_bridge_data_from_disk(self) -> None: """Load the bridge data from disk.""" @@ -4146,6 +4210,7 @@ async def _main_loop(self) -> None: await asyncio.gather(*tasks) _caught_sigints = 0 + """The number of SIGINTs or SIGTERMs caught.""" def start(self) -> None: """Start the process manager.""" From 15682bc3cd709427963f40817e0e11d27999100a Mon Sep 17 00:00:00 2001 From: tazlin Date: Fri, 4 Oct 2024 08:17:23 -0400 Subject: [PATCH 11/32] fix: use sqrt as intended --- horde_worker_regen/process_management/process_manager.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/horde_worker_regen/process_management/process_manager.py b/horde_worker_regen/process_management/process_manager.py index 105188a3..4bd92fbe 100644 --- a/horde_worker_regen/process_management/process_manager.py +++ b/horde_worker_regen/process_management/process_manager.py @@ -4,6 +4,7 @@ import collections import enum import json +import math import multiprocessing import os import queue @@ -3957,7 +3958,7 @@ def print_status_method(self) -> None: for process_info_string in process_info_strings: logger.info(process_info_string) - max_power_dimension = int((self.bridge_data.max_power * 8 * 64 * 64) // 2) + max_power_dimension = int(math.sqrt(self.bridge_data.max_power * 8 * 64 * 64)) logger.info( " | ".join( [ From d662404251e0c65d014eb713e2edc5b8cb5b81ea Mon Sep 17 00:00:00 2001 From: tazlin Date: Wed, 2 Oct 2024 13:35:29 -0400 Subject: [PATCH 12/32] fix: exit(1) on compvis model dl failure From c0b401e7f456fe66c3f80daf3013b297bdf38250 Mon Sep 17 00:00:00 2001 From: tazlin Date: Wed, 2 Oct 2024 13:43:52 -0400 Subject: [PATCH 13/32] fix: use a `certifi` ssl context for r2 uploads This is tied to a bug of unclear root-cause but whose practical effect is that the root signing certificate was not being found on a relatively fresh windows 10 pro machine. `certifi` should already be being pulled in, but I've marked it as an explicit requirement and I anticipate that there should not be side effects on machines which were previously running fine. --- horde_worker_regen/process_management/process_manager.py | 5 +++++ requirements.txt | 4 +++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/horde_worker_regen/process_management/process_manager.py b/horde_worker_regen/process_management/process_manager.py index 4bd92fbe..3a4203b0 100644 --- a/horde_worker_regen/process_management/process_manager.py +++ b/horde_worker_regen/process_management/process_manager.py @@ -9,6 +9,7 @@ import os import queue import random +import ssl import sys import time from asyncio import CancelledError, Task @@ -23,6 +24,7 @@ import aiohttp import aiohttp.client_exceptions +import certifi import PIL import PIL.Image import psutil @@ -90,6 +92,8 @@ ) from horde_worker_regen.process_management.worker_entry_points import start_inference_process, start_safety_process +sslcontext = ssl.create_default_context(cafile=certifi.where()) + # This is due to Linux/Windows differences in the multiprocessing module try: from multiprocessing.connection import PipeConnection as Connection # type: ignore @@ -2498,6 +2502,7 @@ async def _do_upload(new_submit: PendingSubmitJob, image_in_buffer_bytes: bytes) data=image_in_buffer_bytes, skip_auto_headers=["content-type"], timeout=aiohttp.ClientTimeout(total=10), + ssl=sslcontext, ) as response: if response.status != 200: logger.error(f"Failed to upload image to R2: {response}") diff --git a/requirements.txt b/requirements.txt index f2009c5a..39ae8554 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,8 @@ numpy==1.26.4 torch==2.4.1 -qrcode==7.4.2 +qrcode==7.4.2 # >8 breaks horde-engine 2.15.3 via the qr code generation nodes + +certifi # Required for SSL cert resolution horde_sdk~=0.15.0 horde_safety~=0.2.3 From 7fe8794a57d01a43d42a8f3caf6fe33b1eb12d8c Mon Sep 17 00:00:00 2001 From: tazlin Date: Fri, 4 Oct 2024 09:17:18 -0400 Subject: [PATCH 14/32] fix: don't concurrently preload more than 1 model The new config option `very_fast_disk_mode` overrides this. --- bridgeData_template.yaml | 4 +++ horde_worker_regen/bridge_data/data_model.py | 3 +++ .../process_management/process_manager.py | 25 +++++++++++++++++++ 3 files changed, 32 insertions(+) diff --git a/bridgeData_template.yaml b/bridgeData_template.yaml index 48c7fd5f..eef53623 100644 --- a/bridgeData_template.yaml +++ b/bridgeData_template.yaml @@ -114,6 +114,10 @@ unload_models_from_vram_often: true # 12GB-16GB VRAM: false # 8GB-10GB VRAM: true +# Normally only one model will load off disk at a time. Set to true to load multiple models at once. +# This requires a very fast disk. You will see a sharp increase in disk usage, especially with SDXL/Cascade/Flux/other large models. +very_fast_disk_mode: false + # List of words to reject if they appear in the prompt. blacklist: [] diff --git a/horde_worker_regen/bridge_data/data_model.py b/horde_worker_regen/bridge_data/data_model.py index fc26e0ee..391af8af 100644 --- a/horde_worker_regen/bridge_data/data_model.py +++ b/horde_worker_regen/bridge_data/data_model.py @@ -68,6 +68,9 @@ class reGenBridgeData(CombinedHordeBridgeData): moderate_performance_mode: bool = Field(default=False) """If you have a 3080 or better, set this to true to enable moderate performance mode.""" + very_fast_disk_mode: bool = Field(default=False) + """If you have a very fast disk, set this to true to concurrently load more models at a time from disk.""" + post_process_job_overlap: bool = Field(default=False) """High and moderate performance modes will skip post processing if this is set to true.""" diff --git a/horde_worker_regen/process_management/process_manager.py b/horde_worker_regen/process_management/process_manager.py index 3a4203b0..5f998961 100644 --- a/horde_worker_regen/process_management/process_manager.py +++ b/horde_worker_regen/process_management/process_manager.py @@ -662,6 +662,14 @@ def num_busy_with_post_processing(self) -> int: count += 1 return count + def num_preloading_processes(self) -> int: + """Return the number of processes that are preloading models.""" + count = 0 + for p in self.values(): + if p.last_process_state == HordeProcessState.PRELOADING_MODEL: + count += 1 + return count + def __repr__(self) -> str: """Return a string representation of the process map.""" base_string = "Processes: " @@ -1941,6 +1949,23 @@ def preload_models(self) -> bool: self._replace_inference_process(available_process) return False + num_preloading_processes = self._process_map.num_preloading_processes() + + at_least_one_preloading_process = num_preloading_processes >= 1 + very_fast_disk_mode_enabled = self.bridge_data.very_fast_disk_mode + max_concurrent_inference_processes_reached = ( + num_preloading_processes >= self._max_concurrent_inference_processes + ) + + if (not very_fast_disk_mode_enabled and at_least_one_preloading_process) or ( + very_fast_disk_mode_enabled and max_concurrent_inference_processes_reached + ): + logger.info( + f"Already preloading {num_preloading_processes} models, waiting for one to finish before " + f"preloading {job.model}", + ) + return False + logger.debug(f"Preloading model {job.model} on process {available_process.process_id}") logger.debug(f"Available inference processes: {self._process_map}") only_active_models = { From 683b1cad0c7e6d2b86e52a93c6740a7568b3d52a Mon Sep 17 00:00:00 2001 From: tazlin Date: Fri, 4 Oct 2024 09:21:57 -0400 Subject: [PATCH 15/32] fix: don't spam preload delay messages --- horde_worker_regen/process_management/process_manager.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/horde_worker_regen/process_management/process_manager.py b/horde_worker_regen/process_management/process_manager.py index 5f998961..6359d45c 100644 --- a/horde_worker_regen/process_management/process_manager.py +++ b/horde_worker_regen/process_management/process_manager.py @@ -1875,6 +1875,8 @@ def receive_and_handle_process_messages(self) -> None: # logger.debug([c.generation_faults for c in completed_job_info.job_image_results]) self.completed_jobs.append(completed_job_info) + _preload_delay_notified = False + def preload_models(self) -> bool: """Preload models that are likely to be used soon. @@ -1964,8 +1966,10 @@ def preload_models(self) -> bool: f"Already preloading {num_preloading_processes} models, waiting for one to finish before " f"preloading {job.model}", ) + self._preload_delay_notified = True return False + self._preload_delay_notified = False logger.debug(f"Preloading model {job.model} on process {available_process.process_id}") logger.debug(f"Available inference processes: {self._process_map}") only_active_models = { From e73fcfb0db491a6fd8ec5e387f1e8878e7bb3dc0 Mon Sep 17 00:00:00 2001 From: tazlin Date: Fri, 4 Oct 2024 09:26:12 -0400 Subject: [PATCH 16/32] fix: include conditional to not spam delay messages --- horde_worker_regen/process_management/process_manager.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/horde_worker_regen/process_management/process_manager.py b/horde_worker_regen/process_management/process_manager.py index 6359d45c..3e5a5123 100644 --- a/horde_worker_regen/process_management/process_manager.py +++ b/horde_worker_regen/process_management/process_manager.py @@ -1962,10 +1962,11 @@ def preload_models(self) -> bool: if (not very_fast_disk_mode_enabled and at_least_one_preloading_process) or ( very_fast_disk_mode_enabled and max_concurrent_inference_processes_reached ): - logger.info( - f"Already preloading {num_preloading_processes} models, waiting for one to finish before " - f"preloading {job.model}", - ) + if not self._preload_delay_notified: + logger.info( + f"Already preloading {num_preloading_processes} models, waiting for one to finish before " + f"preloading {job.model}", + ) self._preload_delay_notified = True return False From bbc99c7b2fa092056de5360f4996d6cf73b1b28f Mon Sep 17 00:00:00 2001 From: tazlin Date: Fri, 4 Oct 2024 09:28:04 -0400 Subject: [PATCH 17/32] fix: give models a chance to load before failing The recent change to preloading being single-model only causes the logic here to think that the model isn't loaded when it should be. If there is one holding up the line, we'll wait to see what happens. --- horde_worker_regen/process_management/process_manager.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/horde_worker_regen/process_management/process_manager.py b/horde_worker_regen/process_management/process_manager.py index 3e5a5123..bfe34f75 100644 --- a/horde_worker_regen/process_management/process_manager.py +++ b/horde_worker_regen/process_management/process_manager.py @@ -2068,6 +2068,8 @@ def handle_process_missing(job: ImageGenerateJobPopResponse) -> None: if self._horde_model_map.is_model_loaded(next_job.model): if process_with_model is None: + if self._preload_delay_notified: + return None handle_process_missing(next_job) return None From 9aaf86263170089554bd81737eb93b2057d420a9 Mon Sep 17 00:00:00 2001 From: tazlin Date: Fri, 4 Oct 2024 09:48:02 -0400 Subject: [PATCH 18/32] fix: correct version pins across dep files --- .pre-commit-config.yaml | 4 ++-- horde-bridge.cmd | 2 +- requirements.rocm.txt | 4 ++-- requirements.txt | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7ed436f3..10c7d442 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -40,7 +40,7 @@ repos: - horde_safety==0.2.3 - torch==2.4.1 - ruamel.yaml - - horde_engine==2.15.3 - - horde_sdk==0.14.11 + - horde_engine==2.16.0 + - horde_sdk==0.15.1 - horde_model_reference==0.9.0 - semver diff --git a/horde-bridge.cmd b/horde-bridge.cmd index adce7be4..8358bf54 100644 --- a/horde-bridge.cmd +++ b/horde-bridge.cmd @@ -5,7 +5,7 @@ cd /d %~dp0 call runtime python -s -m pip -V call python -s -m pip uninstall hordelib -call python -s -m pip install horde_sdk~=0.14.11 horde_model_reference~=0.9.0 horde_engine~=2.15.3 horde_safety~=0.2.3 -U +call python -s -m pip install horde_sdk~=0.15.1 horde_model_reference~=0.9.0 horde_engine~=2.16.0 horde_safety~=0.2.3 -U if %ERRORLEVEL% NEQ 0 ( echo "Please run update-runtime.cmd." diff --git a/requirements.rocm.txt b/requirements.rocm.txt index 69314fb9..b68d4152 100644 --- a/requirements.rocm.txt +++ b/requirements.rocm.txt @@ -1,9 +1,9 @@ numpy==1.26.4 torch==2.4.1+rocm6.0 -horde_sdk~=0.14.11 +horde_sdk~=0.15.1 horde_safety~=0.2.3 -horde_engine~=2.15.3 +horde_engine~=2.16.0 horde_model_reference~=0.9.0 python-dotenv diff --git a/requirements.txt b/requirements.txt index 39ae8554..c28b3493 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,10 +1,10 @@ numpy==1.26.4 torch==2.4.1 -qrcode==7.4.2 # >8 breaks horde-engine 2.15.3 via the qr code generation nodes +qrcode==7.4.2 # >8 breaks horde-engine 2.16.0 via the qr code generation nodes certifi # Required for SSL cert resolution -horde_sdk~=0.15.0 +horde_sdk~=0.15.1 horde_safety~=0.2.3 horde_engine~=2.16.0 horde_model_reference>=0.9.1 From 155820cfceba03bacb448c9533c9d9b94daafa31 Mon Sep 17 00:00:00 2001 From: tazlin Date: Fri, 4 Oct 2024 09:50:17 -0400 Subject: [PATCH 19/32] fix: use latest horde model reference --- .pre-commit-config.yaml | 2 +- horde-bridge.cmd | 2 +- requirements.rocm.txt | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 10c7d442..bb3c1e47 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -42,5 +42,5 @@ repos: - ruamel.yaml - horde_engine==2.16.0 - horde_sdk==0.15.1 - - horde_model_reference==0.9.0 + - horde_model_reference==0.9.1 - semver diff --git a/horde-bridge.cmd b/horde-bridge.cmd index 8358bf54..a7184473 100644 --- a/horde-bridge.cmd +++ b/horde-bridge.cmd @@ -5,7 +5,7 @@ cd /d %~dp0 call runtime python -s -m pip -V call python -s -m pip uninstall hordelib -call python -s -m pip install horde_sdk~=0.15.1 horde_model_reference~=0.9.0 horde_engine~=2.16.0 horde_safety~=0.2.3 -U +call python -s -m pip install horde_sdk~=0.15.1 horde_model_reference~=0.9.1 horde_engine~=2.16.0 horde_safety~=0.2.3 -U if %ERRORLEVEL% NEQ 0 ( echo "Please run update-runtime.cmd." diff --git a/requirements.rocm.txt b/requirements.rocm.txt index b68d4152..ca423fed 100644 --- a/requirements.rocm.txt +++ b/requirements.rocm.txt @@ -4,7 +4,7 @@ torch==2.4.1+rocm6.0 horde_sdk~=0.15.1 horde_safety~=0.2.3 horde_engine~=2.16.0 -horde_model_reference~=0.9.0 +horde_model_reference~=0.9.1 python-dotenv ruamel.yaml From 17f5099e184266f5bb575ae67adacbcbf03f801f Mon Sep 17 00:00:00 2001 From: tazlin Date: Fri, 4 Oct 2024 09:56:44 -0400 Subject: [PATCH 20/32] style: fix --- horde_worker_regen/process_management/inference_process.py | 1 - 1 file changed, 1 deletion(-) diff --git a/horde_worker_regen/process_management/inference_process.py b/horde_worker_regen/process_management/inference_process.py index 1a6b0aba..ee86cb8f 100644 --- a/horde_worker_regen/process_management/inference_process.py +++ b/horde_worker_regen/process_management/inference_process.py @@ -4,7 +4,6 @@ import base64 import contextlib -import io import sys import time From cfc62eb4f47efc470afd2cdece3c007f56507627 Mon Sep 17 00:00:00 2001 From: tazlin Date: Fri, 4 Oct 2024 10:35:11 -0400 Subject: [PATCH 21/32] fix: better deadlock detection when all procs. aren't busy --- .../process_management/process_manager.py | 39 ++++++++++++++++--- 1 file changed, 34 insertions(+), 5 deletions(-) diff --git a/horde_worker_regen/process_management/process_manager.py b/horde_worker_regen/process_management/process_manager.py index bfe34f75..d1adb28a 100644 --- a/horde_worker_regen/process_management/process_manager.py +++ b/horde_worker_regen/process_management/process_manager.py @@ -702,6 +702,10 @@ def get_process_info_strings(self) -> list[str]: return info_strings + def all_waiting_for_job(self) -> bool: + """Return true if all processes are waiting for a job.""" + return all(p.last_process_state == HordeProcessState.WAITING_FOR_JOB for p in self.values()) + class TorchDeviceInfo(BaseModel): """Contains information about a torch device.""" @@ -1577,6 +1581,7 @@ def receive_and_handle_process_messages(self) -> None: break self._in_deadlock = False + self._in_queue_deadlock = False if isinstance(message, HordeProcessHeartbeatMessage): self._process_map.on_heartbeat( @@ -1967,7 +1972,7 @@ def preload_models(self) -> bool: f"Already preloading {num_preloading_processes} models, waiting for one to finish before " f"preloading {job.model}", ) - self._preload_delay_notified = True + self._preload_delay_notified = True return False self._preload_delay_notified = False @@ -2061,6 +2066,7 @@ def handle_process_missing(job: ImageGenerateJobPopResponse) -> None: if job.model is not None: logger.debug(f"Expiring entry for model {job.model}") self._horde_model_map.expire_entry(job.model) + try: self.jobs_in_progress.remove(job) except ValueError: @@ -2112,6 +2118,8 @@ def handle_process_missing(job: ImageGenerateJobPopResponse) -> None: return None if process_with_model is None: + if self._preload_delay_notified: + return None handle_process_missing(next_job) return None @@ -3930,7 +3938,7 @@ def _print_deadlock_info() -> None: not self._in_queue_deadlock and (self._process_map.num_busy_processes() == 0 and len(self.job_deque) > 0) and len(self.jobs_in_progress) == 0 - ): + ) or (self._process_map.all_waiting_for_job() and len(self.job_deque) > 0): currently_loaded_models = set() model_process_map: dict[str, int] = {} @@ -3945,13 +3953,34 @@ def _print_deadlock_info() -> None: self._last_queue_deadlock_detected_time = time.time() self._queue_deadlock_model = job.model self._queue_deadlock_process_id = model_process_map[job.model] + break + else: + logger.debug("Queue deadlock detected without a model causing it.") + _print_deadlock_info() + self._in_queue_deadlock = True + self._last_queue_deadlock_detected_time = time.time() + # we're going to fall back to the next model in the deque + self._queue_deadlock_model = self.job_deque[0].model elif self._in_queue_deadlock and (self._last_queue_deadlock_detected_time + 10) < time.time(): logger.debug("Queue deadlock detected") _print_deadlock_info() - logger.debug(f"Model causing deadlock: {self._queue_deadlock_model}") - if self._queue_deadlock_process_id is not None: - self._replace_inference_process(self._process_map[self._queue_deadlock_process_id]) + if self._queue_deadlock_model is not None: + logger.debug(f"Model causing deadlock: {self._queue_deadlock_model}") + if self._queue_deadlock_process_id is not None: + self._horde_model_map.expire_entry(self._queue_deadlock_model) + self._replace_inference_process(self._process_map[self._queue_deadlock_process_id]) + else: + logger.warning("Queue deadlock detected but no model causing it.") + num_processes_replaced = 0 + # We're going to replace up to two process which aren't busy + for process in self._process_map.values(): + if not process.is_process_busy(): + self._replace_inference_process(process) + num_processes_replaced += 1 + if num_processes_replaced >= 2: + break + self._in_queue_deadlock = False self._queue_deadlock_model = None self._queue_deadlock_process_id = None From 0c54357227e5c0a2d000485a179bb33b2c54a59c Mon Sep 17 00:00:00 2001 From: tazlin Date: Fri, 4 Oct 2024 11:18:07 -0400 Subject: [PATCH 22/32] fix: be slightly less aggressive w/ pops w/ high perf/threads --- horde_worker_regen/process_management/process_manager.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/horde_worker_regen/process_management/process_manager.py b/horde_worker_regen/process_management/process_manager.py index d1adb28a..991ac8d1 100644 --- a/horde_worker_regen/process_management/process_manager.py +++ b/horde_worker_regen/process_management/process_manager.py @@ -3331,11 +3331,11 @@ async def api_job_pop(self) -> None: seconds_to_wait = self.get_pending_megapixelsteps() * 0.9 if self.bridge_data.max_threads > 1: - seconds_to_wait *= 0.25 + seconds_to_wait *= 0.75 if self.bridge_data.high_performance_mode: - seconds_to_wait *= 0.25 - if seconds_to_wait < 20: + seconds_to_wait *= 0.35 + if seconds_to_wait < 25: seconds_to_wait = 1 elif self.bridge_data.moderate_performance_mode: seconds_to_wait *= 0.5 From d2f839ead60a248a48a7b0b0b3ad75dc317e2716 Mon Sep 17 00:00:00 2001 From: tazlin Date: Fri, 4 Oct 2024 11:57:30 -0400 Subject: [PATCH 23/32] fix: don't give conflicting advice about `high_memory_mode` and threads --- horde_worker_regen/process_management/process_manager.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/horde_worker_regen/process_management/process_manager.py b/horde_worker_regen/process_management/process_manager.py index 991ac8d1..003cc27c 100644 --- a/horde_worker_regen/process_management/process_manager.py +++ b/horde_worker_regen/process_management/process_manager.py @@ -4141,7 +4141,11 @@ def print_status_method(self) -> None: f"Device {device.device_name} ({device.device_index}) has less than 10GB of memory. " "This may cause issues with `high_memory_mode` enabled.", ) - elif total_memory_mb > 20_000 and not self.bridge_data.high_memory_mode: + elif ( + total_memory_mb > 20_000 + and not self.bridge_data.high_memory_mode + and self.bridge_data.max_threads == 1 + ): logger.warning( f"Device {device.device_name} ({device.device_index}) has more than 20GB of memory. " "You should enable `high_memory_mode` in your config to take advantage of this.", From 6d397e607b5f4ac1ba48abc8e40097a68e1a6497 Mon Sep 17 00:00:00 2001 From: tazlin Date: Fri, 4 Oct 2024 11:57:54 -0400 Subject: [PATCH 24/32] chore: log a message to see if inf. proc. `preload_models` is called --- horde_worker_regen/process_management/inference_process.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/horde_worker_regen/process_management/inference_process.py b/horde_worker_regen/process_management/inference_process.py index ee86cb8f..a956c9e0 100644 --- a/horde_worker_regen/process_management/inference_process.py +++ b/horde_worker_regen/process_management/inference_process.py @@ -351,13 +351,14 @@ def preload_model( seamless_tiling_enabled (bool): Whether or not seamless tiling is enabled. job_info (ImageGenerateJobPopResponse): The job to preload the model for. """ + logger.debug(f"Currently active model is {self._active_model_name}. Requested model is {horde_model_name}") + if self._active_model_name == horde_model_name: return if self._is_busy: logger.warning("Cannot preload model while busy") - logger.debug(f"Currently active model is {self._active_model_name}") logger.debug(f"Preloading model {horde_model_name}") if self._active_model_name is not None: From ced9cedcd8a9986983d93cea9b79517ac45c9b8f Mon Sep 17 00:00:00 2001 From: tazlin Date: Fri, 4 Oct 2024 13:45:53 -0400 Subject: [PATCH 25/32] fix: don't suggest `high_memory_mode` with <=32 sys ram --- .../process_management/process_manager.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/horde_worker_regen/process_management/process_manager.py b/horde_worker_regen/process_management/process_manager.py index 003cc27c..80b76f81 100644 --- a/horde_worker_regen/process_management/process_manager.py +++ b/horde_worker_regen/process_management/process_manager.py @@ -923,6 +923,17 @@ def max_concurrent_inference_processes(self) -> int: total_ram_bytes: int """The total amount of RAM on the system.""" + + @property + def total_ram_megabytes(self) -> int: + """The total amount of RAM on the system in megabytes.""" + return self.total_ram_bytes // 1024 // 1024 + + @property + def total_ram_gigabytes(self) -> int: + """The total amount of RAM on the system in gigabytes.""" + return self.total_ram_bytes // 1024 // 1024 // 1024 + target_ram_overhead_bytes: int """The target amount of RAM to keep free.""" @@ -4145,6 +4156,7 @@ def print_status_method(self) -> None: total_memory_mb > 20_000 and not self.bridge_data.high_memory_mode and self.bridge_data.max_threads == 1 + and self.total_ram_gigabytes > 32 ): logger.warning( f"Device {device.device_name} ({device.device_index}) has more than 20GB of memory. " From a9a06be8477ff490157b00b984f9ffd1d526484f Mon Sep 17 00:00:00 2001 From: tazlin Date: Fri, 4 Oct 2024 15:00:00 -0400 Subject: [PATCH 26/32] fix: avoid killing all processes before jobs are finished This is response to some observed behavior that control+c exiting would lead to all processes getting killed before all jobs were finished (with threads=2) --- .../process_management/process_manager.py | 21 ++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/horde_worker_regen/process_management/process_manager.py b/horde_worker_regen/process_management/process_manager.py index 80b76f81..1919f486 100644 --- a/horde_worker_regen/process_management/process_manager.py +++ b/horde_worker_regen/process_management/process_manager.py @@ -486,6 +486,18 @@ def num_inference_processes(self) -> int: count += 1 return count + def num_loaded_inference_processes(self) -> int: + """Return the number of inference processes that haven't been ended.""" + count = 0 + for p in self.values(): + if ( + p.process_type == HordeProcessType.INFERENCE + and p.last_process_state != HordeProcessState.PROCESS_ENDING + and p.last_process_state != HordeProcessState.PROCESS_ENDED + ): + count += 1 + return count + def num_available_inference_processes(self) -> int: """Return the number of inference processes that are available to accept jobs.""" count = 0 @@ -3878,7 +3890,14 @@ async def _process_control_loop(self) -> None: # self.unload_models() - if self._shutting_down: + is_job_and_one_inference_process = ( + len(self.job_deque) >= 1 and self._process_map.num_loaded_inference_processes() == 1 + ) + + if self._shutting_down and not self._last_pop_recently() and not is_job_and_one_inference_process: + # We want to avoid too aggressively killing inference processes + # while we still have jobs to process, so we'll make sure at least 1 process stays up + # while we have jobs to process self.end_inference_processes() if self.is_time_for_shutdown(): From 6c106ca51b54e74311f81a4feb50c9bf929f00ed Mon Sep 17 00:00:00 2001 From: tazlin Date: Fri, 4 Oct 2024 15:21:32 -0400 Subject: [PATCH 27/32] chore: version bump --- horde_worker_regen/__init__.py | 2 +- horde_worker_regen/_version_meta.json | 2 +- pyproject.toml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/horde_worker_regen/__init__.py b/horde_worker_regen/__init__.py index 495ecd23..07f09bfb 100644 --- a/horde_worker_regen/__init__.py +++ b/horde_worker_regen/__init__.py @@ -8,7 +8,7 @@ ASSETS_FOLDER_PATH = Path(__file__).parent / "assets" -__version__ = "9.1.0" +__version__ = "9.1.1" import pkg_resources # noqa: E402 diff --git a/horde_worker_regen/_version_meta.json b/horde_worker_regen/_version_meta.json index 01fe77f9..86098c1c 100644 --- a/horde_worker_regen/_version_meta.json +++ b/horde_worker_regen/_version_meta.json @@ -1,5 +1,5 @@ { - "recommended_version": "9.1.0", + "recommended_version": "9.1.1", "required_min_version": "9.0.2", "required_min_version_update_date": "2024-09-26", "required_min_version_info": { diff --git a/pyproject.toml b/pyproject.toml index 2889d8d9..ceee5ce1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "horde_worker_regen" -version = "9.1.0" +version = "9.1.1" description = "Allows you to connect to the AI Horde and generate images for users." authors = [ {name = "tazlin", email = "tazlin.on.github@gmail.com"}, From 7bef6fe2a6d9c27846673175095a0cf70dcb45ec Mon Sep 17 00:00:00 2001 From: tazlin Date: Fri, 4 Oct 2024 16:00:34 -0400 Subject: [PATCH 28/32] fix: micromamba updated cli syntax for update-runtime --- runtime.cmd | 4 ++-- update-runtime.cmd | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/runtime.cmd b/runtime.cmd index fcc980f5..2e826009 100644 --- a/runtime.cmd +++ b/runtime.cmd @@ -16,7 +16,7 @@ IF EXIST CONDA GOTO APP call update-runtime :APP -micromamba.exe shell hook -s cmd.exe -p "%MAMBA_ROOT_PREFIX%" -v +micromamba.exe shell hook -s cmd.exe "%MAMBA_ROOT_PREFIX%" -v call "%MAMBA_ROOT_PREFIX%\condabin\mamba_hook.bat" -call "%MAMBA_ROOT_PREFIX%\condabin\micromamba.bat" activate windows +call "%MAMBA_ROOT_PREFIX%\condabin\mamba.bat" activate windows %* diff --git a/update-runtime.cmd b/update-runtime.cmd index adc817f4..518c4362 100644 --- a/update-runtime.cmd +++ b/update-runtime.cmd @@ -45,9 +45,9 @@ IF EXIST CONDA GOTO WORKAROUND_END REM Check if hordelib argument is defined -micromamba.exe shell hook -s cmd.exe -p %MAMBA_ROOT_PREFIX% -v +micromamba.exe shell hook -s cmd.exe %MAMBA_ROOT_PREFIX% -v call "%MAMBA_ROOT_PREFIX%\condabin\mamba_hook.bat" -call "%MAMBA_ROOT_PREFIX%\condabin\micromamba.bat" activate windows +call "%MAMBA_ROOT_PREFIX%\condabin\mamba.bat" activate windows python -s -m pip install torch==2.4.1 torchvision==0.18.1 --index-url https://download.pytorch.org/whl/cu124 -U From c01927f26d4268c78c83f3536ba139657e1ce4a0 Mon Sep 17 00:00:00 2001 From: tazlin Date: Fri, 4 Oct 2024 16:11:53 -0400 Subject: [PATCH 29/32] fix: conflicting torchvision dep in update runtime --- update-runtime.cmd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/update-runtime.cmd b/update-runtime.cmd index 518c4362..4abcee6f 100644 --- a/update-runtime.cmd +++ b/update-runtime.cmd @@ -49,7 +49,7 @@ micromamba.exe shell hook -s cmd.exe %MAMBA_ROOT_PREFIX% -v call "%MAMBA_ROOT_PREFIX%\condabin\mamba_hook.bat" call "%MAMBA_ROOT_PREFIX%\condabin\mamba.bat" activate windows -python -s -m pip install torch==2.4.1 torchvision==0.18.1 --index-url https://download.pytorch.org/whl/cu124 -U +python -s -m pip install torch==2.4.1 torchvision==0.19.1 --index-url https://download.pytorch.org/whl/cu124 -U if defined hordelib ( python -s -m pip uninstall -y hordelib horde_engine horde_model_reference From e52b354c9b969a50694459aed4cd4fbf38d20a5c Mon Sep 17 00:00:00 2001 From: tazlin Date: Fri, 4 Oct 2024 17:02:53 -0400 Subject: [PATCH 30/32] fix: flag ending processes correctly --- .../process_management/process_manager.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/horde_worker_regen/process_management/process_manager.py b/horde_worker_regen/process_management/process_manager.py index 1919f486..bf60dcf1 100644 --- a/horde_worker_regen/process_management/process_manager.py +++ b/horde_worker_regen/process_management/process_manager.py @@ -341,7 +341,7 @@ def on_heartbeat(self, process_id: int, heartbeat_type: HordeHeartbeatType) -> N else: self[process_id].heartbeats_inference_steps = 0 - def on_process_ended(self, process_id: int) -> None: + def on_process_ending(self, process_id: int) -> None: """Update the process map when a process has ended. Args: @@ -1467,7 +1467,7 @@ def _end_inference_process(self, process_info: HordeProcessInfo) -> None: :param process_info: HordeProcessInfo for the process to end :return: None """ - self._process_map.on_process_ended(process_id=process_info.process_id) + self._process_map.on_process_ending(process_id=process_info.process_id) if process_info.loaded_horde_model_name is not None: self._horde_model_map.expire_entry(process_info.loaded_horde_model_name) @@ -1578,7 +1578,7 @@ def end_safety_processes(self) -> None: process_info.safe_send_message(HordeControlMessage(control_flag=HordeControlFlag.END_PROCESS)) # Update the process map - self._process_map.on_process_ended(process_id=process_info.process_id) + self._process_map.on_process_ending(process_id=process_info.process_id) logger.info(f"Ended safety process {process_info.process_id}") @@ -1643,6 +1643,10 @@ def receive_and_handle_process_messages(self) -> None: new_state=message.process_state, ) + if message.process_state == HordeProcessState.PROCESS_ENDING: + logger.info(f"Process {message.process_id} is ending") + self._process_map.on_process_ending(process_id=message.process_id) + if message.process_state == HordeProcessState.PROCESS_ENDED: logger.info(f"Process {message.process_id} has ended with message: {message.info}") else: From 107aa962c99ebb08179123d9c5d4fb5eea0cf4d9 Mon Sep 17 00:00:00 2001 From: tazlin Date: Sat, 5 Oct 2024 10:01:44 -0400 Subject: [PATCH 31/32] fix: correctly download via `load_large_models` --- horde_worker_regen/load_env_vars.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/horde_worker_regen/load_env_vars.py b/horde_worker_regen/load_env_vars.py index 4ac4f0a0..db1c7e86 100644 --- a/horde_worker_regen/load_env_vars.py +++ b/horde_worker_regen/load_env_vars.py @@ -72,6 +72,11 @@ def load_env_vars_from_config() -> None: # FIXME: there is a dynamic way to do ) os.environ["AI_HORDE_URL"] = custom_horde_url + if "load_large_models" in config and os.getenv("AI_HORDE_MODEL_META_LARGE_MODELS") is None: + config_value = config["load_large_models"] + if config_value is True: + os.environ["AI_HORDE_MODEL_META_LARGE_MODELS"] = "1" + if __name__ == "__main__": load_env_vars_from_config() From 5e203d9ae2952395d63a0aed54e421c6ecabd2c1 Mon Sep 17 00:00:00 2001 From: HPPinata <83947761+HPPinata@users.noreply.github.com> Date: Sun, 6 Oct 2024 06:56:00 +0200 Subject: [PATCH 32/32] fix: docker installed python deps This was incomplete and masked by 22.04 also using 3.11 as it's Python version. python3.11-dev is technically optional, but included because it's needed on the AMD side. venv creation HAS to be called with the full version, otherwise the dist default is used. pip only needs to be updated inside the venv. --- Dockerfiles/Dockerfile.12.1.1-22.04 | 7 ++++--- Dockerfiles/Dockerfile.12.2.2-22.04 | 7 ++++--- Dockerfiles/Dockerfile.12.3.2-22.04 | 7 ++++--- Dockerfiles/Dockerfile.12.4-22.04 | 7 ++++--- 4 files changed, 16 insertions(+), 12 deletions(-) diff --git a/Dockerfiles/Dockerfile.12.1.1-22.04 b/Dockerfiles/Dockerfile.12.1.1-22.04 index d0a0b29f..655b3e34 100644 --- a/Dockerfiles/Dockerfile.12.1.1-22.04 +++ b/Dockerfiles/Dockerfile.12.1.1-22.04 @@ -8,14 +8,15 @@ RUN apt-get update && \ apt-get install -y --no-install-recommends \ python3.11 \ python3-pip \ - python3-venv \ + python3.11-dev \ + python3.11-venv \ + python3.11-distutils \ libgl1 \ git RUN git clone https://github.com/Haidra-Org/horde-worker-reGen.git && \ cd /horde-worker-reGen && \ - python3.11 -m pip install --upgrade pip && \ - python3 -m venv venv && \ + python3.11 -m venv venv && \ . venv/bin/activate && \ python -m pip install --upgrade pip && \ python -m pip install -r /horde-worker-reGen/requirements.txt -U --extra-index-url https://download.pytorch.org/whl/cu124 && \ diff --git a/Dockerfiles/Dockerfile.12.2.2-22.04 b/Dockerfiles/Dockerfile.12.2.2-22.04 index 819198cc..7a81e30f 100644 --- a/Dockerfiles/Dockerfile.12.2.2-22.04 +++ b/Dockerfiles/Dockerfile.12.2.2-22.04 @@ -8,14 +8,15 @@ RUN apt-get update && \ apt-get install -y --no-install-recommends \ python3.11 \ python3-pip \ - python3-venv \ + python3.11-dev \ + python3.11-venv \ + python3.11-distutils \ libgl1 \ git RUN git clone https://github.com/Haidra-Org/horde-worker-reGen.git && \ cd /horde-worker-reGen && \ - python3.11 -m pip install --upgrade pip && \ - python3 -m venv venv && \ + python3.11 -m venv venv && \ . venv/bin/activate && \ python -m pip install --upgrade pip && \ python -m pip install -r /horde-worker-reGen/requirements.txt -U --extra-index-url https://download.pytorch.org/whl/cu124 && \ diff --git a/Dockerfiles/Dockerfile.12.3.2-22.04 b/Dockerfiles/Dockerfile.12.3.2-22.04 index 06c1409d..cc27d29b 100644 --- a/Dockerfiles/Dockerfile.12.3.2-22.04 +++ b/Dockerfiles/Dockerfile.12.3.2-22.04 @@ -8,14 +8,15 @@ RUN apt-get update && \ apt-get install -y --no-install-recommends \ python3.11 \ python3-pip \ - python3-venv \ + python3.11-dev \ + python3.11-venv \ + python3.11-distutils \ libgl1 \ git RUN git clone https://github.com/Haidra-Org/horde-worker-reGen.git && \ cd /horde-worker-reGen && \ - python3.11 -m pip install --upgrade pip && \ - python3 -m venv venv && \ + python3.11 -m venv venv && \ . venv/bin/activate && \ python -m pip install --upgrade pip && \ python -m pip install -r /horde-worker-reGen/requirements.txt -U --extra-index-url https://download.pytorch.org/whl/cu124 && \ diff --git a/Dockerfiles/Dockerfile.12.4-22.04 b/Dockerfiles/Dockerfile.12.4-22.04 index 418bd690..7ae8b1e6 100644 --- a/Dockerfiles/Dockerfile.12.4-22.04 +++ b/Dockerfiles/Dockerfile.12.4-22.04 @@ -8,14 +8,15 @@ RUN apt-get update && \ apt-get install -y --no-install-recommends \ python3.11 \ python3-pip \ - python3-venv \ + python3.11-dev \ + python3.11-venv \ + python3.11-distutils \ libgl1 \ git RUN git clone https://github.com/Haidra-Org/horde-worker-reGen.git && \ cd /horde-worker-reGen && \ - python3.11 -m pip install --upgrade pip && \ - python3 -m venv venv && \ + python3.11 -m venv venv && \ . venv/bin/activate && \ python -m pip install --upgrade pip && \ python -m pip install -r /horde-worker-reGen/requirements.txt -U --extra-index-url https://download.pytorch.org/whl/cu124 && \