diff --git a/.gitignore b/.gitignore index 51e2926..09b14c8 100644 --- a/.gitignore +++ b/.gitignore @@ -139,8 +139,19 @@ out.json .vscode/launch.json .vscode/settings.json -examples/requested_images/*.* +requested_images/*.* +examples/ai_horde_client/image/requested_images/*.* + +requested_text/*.* +examples/ai_horde_client/text/requested_text/*.* + _version.py +# to prevent mkdocs from including this file in the documentation +_version.md tests/testing_result_images/* !tests/testing_result_images/.results_go_here + +workers.txt + +ai_horde_codegen.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 4879c64..b0cdcbc 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -27,5 +27,6 @@ repos: types-pytz, types-setuptools, types-urllib3, + types-aiofiles, StrEnum ] diff --git a/codegen/ai_horde/README.md b/codegen/ai_horde/README.md index 4dfc436..21439fc 100644 --- a/codegen/ai_horde/README.md +++ b/codegen/ai_horde/README.md @@ -25,25 +25,25 @@ api-spec-converter --from=swagger_2 --to=openapi_3 swagger.json > swagger_openap Generate the code: ```bash -datamodel-codegen --input codegen/swagger_openapi3.json --output codegen/ai_horde_codegen.py --output-model-type pydantic_v2.BaseModel --use-union-operator --field-constraints +datamodel-codegen --input swagger_openapi3.json --output ai_horde_codegen.py --output-model-type pydantic_v2.BaseModel --use-union-operator --field-constraints ``` Standardize quotes with black: ```bash -black codegen/ai_horde_codegen.py +black ai_horde_codegen.py ``` Clean up issues with datamodel-code-generator v0.21.1 ```bash -python codegen/codegen_regex_fixes.py codegen/ai_horde_codegen.py +python codegen_regex_fixes.py ai_horde_codegen.py ``` Format again, this time truncating the lines with `--preview`, and auto-fix lint problems ```bash -black codegen/ai_horde_codegen.py --unstable --enable-unstable-feature string_processing -ruff codegen/ai_horde_codegen.py --fix -black codegen/ai_horde_codegen.py --unstable --enable-unstable-feature string_processing # for good measure -ruff codegen/ai_horde_codegen.py --fix # for good measure +black ai_horde_codegen.py --unstable --enable-unstable-feature string_processing +ruff ai_horde_codegen.py --fix +black ai_horde_codegen.py --unstable --enable-unstable-feature string_processing # for good measure +ruff ai_horde_codegen.py --fix # for good measure ``` * Fix Enum classes diff --git a/docs/api_to_sdk_map.md b/docs/api_to_sdk_map.md index e2348c8..2bb9a35 100644 --- a/docs/api_to_sdk_map.md +++ b/docs/api_to_sdk_map.md @@ -21,6 +21,7 @@ This is a mapping of the AI-Horde API models (defined at [https://stablehorde.ne | /v2/interrogate/status/{id} | DELETE | [AlchemyDeleteRequest][horde_sdk.ai_horde_api.apimodels.alchemy._status.AlchemyDeleteRequest] | | /v2/interrogate/status/{id} | GET | [AlchemyStatusRequest][horde_sdk.ai_horde_api.apimodels.alchemy._status.AlchemyStatusRequest] | | /v2/interrogate/submit | POST | [AlchemyJobSubmitRequest][horde_sdk.ai_horde_api.apimodels.alchemy._submit.AlchemyJobSubmitRequest] | +| /v2/kudos/transfer | POST | [KudosTransferRequest][horde_sdk.ai_horde_api.apimodels._kudos.KudosTransferRequest] | | /v2/stats/img/models | GET | [ImageStatsModelsRequest][horde_sdk.ai_horde_api.apimodels._stats.ImageStatsModelsRequest] | | /v2/stats/img/totals | GET | [ImageStatsModelsTotalRequest][horde_sdk.ai_horde_api.apimodels._stats.ImageStatsModelsTotalRequest] | | /v2/stats/text/models | GET | [TextStatsModelsRequest][horde_sdk.ai_horde_api.apimodels._stats.TextStatsModelsRequest] | @@ -30,14 +31,19 @@ This is a mapping of the AI-Horde API models (defined at [https://stablehorde.ne | /v2/status/models/{model_name} | GET | [HordeStatusModelsSingleRequest][horde_sdk.ai_horde_api.apimodels._status.HordeStatusModelsSingleRequest] | | /v2/status/news | GET | [NewsRequest][horde_sdk.ai_horde_api.apimodels._status.NewsRequest] | | /v2/status/performance | GET | [HordePerformanceRequest][horde_sdk.ai_horde_api.apimodels._status.HordePerformanceRequest] | +| /v2/users | GET | [ListUsersDetailsRequest][horde_sdk.ai_horde_api.apimodels._users.ListUsersDetailsRequest] | +| /v2/users/{user_id} | PUT | [ModifyUserRequest][horde_sdk.ai_horde_api.apimodels._users.ModifyUserRequest] | +| /v2/users/{user_id} | GET | [SingleUserDetailsRequest][horde_sdk.ai_horde_api.apimodels._users.SingleUserDetailsRequest] | | /v2/workers | GET | [AllWorkersDetailsRequest][horde_sdk.ai_horde_api.apimodels.workers._workers.AllWorkersDetailsRequest] | +| /v2/workers/{worker_id} | DELETE | [DeleteWorkerRequest][horde_sdk.ai_horde_api.apimodels.workers._workers.DeleteWorkerRequest] | +| /v2/workers/{worker_id} | PUT | [ModifyWorkerRequest][horde_sdk.ai_horde_api.apimodels.workers._workers.ModifyWorkerRequest] | | /v2/workers/{worker_id} | GET | [SingleWorkerDetailsRequest][horde_sdk.ai_horde_api.apimodels.workers._workers.SingleWorkerDetailsRequest] | ## Responses | API Endpoint | HTTP Status Code | SDK Response Type | | ------------ | ----------- | ----------------- | -| /v2/find_user | 200 | [FindUserResponse][horde_sdk.ai_horde_api.apimodels._find_user.FindUserResponse] | +| /v2/find_user | 200 | [UserDetailsResponse][horde_sdk.ai_horde_api.apimodels._users.UserDetailsResponse] | | /v2/generate/async | 200 | [ImageGenerateAsyncDryRunResponse][horde_sdk.ai_horde_api.apimodels.generate._async.ImageGenerateAsyncDryRunResponse] | | /v2/generate/async | 202 | [ImageGenerateAsyncResponse][horde_sdk.ai_horde_api.apimodels.generate._async.ImageGenerateAsyncResponse] | | /v2/generate/check/{id} | 200 | [ImageGenerateCheckResponse][horde_sdk.ai_horde_api.apimodels.generate._check.ImageGenerateCheckResponse] | @@ -53,14 +59,17 @@ This is a mapping of the AI-Horde API models (defined at [https://stablehorde.ne | /v2/interrogate/pop | 200 | [AlchemyPopResponse][horde_sdk.ai_horde_api.apimodels.alchemy._pop.AlchemyPopResponse] | | /v2/interrogate/status/{id} | 200 | [AlchemyStatusResponse][horde_sdk.ai_horde_api.apimodels.alchemy._status.AlchemyStatusResponse] | | /v2/interrogate/submit | 200 | [AlchemyJobSubmitResponse][horde_sdk.ai_horde_api.apimodels.alchemy._submit.AlchemyJobSubmitResponse] | -| /v2/stats/img/models | 200 | [ImageModelStatsResponse][horde_sdk.ai_horde_api.apimodels._stats.ImageModelStatsResponse] | +| /v2/kudos/transfer | 200 | [KudosTransferResponse][horde_sdk.ai_horde_api.apimodels._kudos.KudosTransferResponse] | +| /v2/stats/img/models | 200 | [ImageStatsModelsResponse][horde_sdk.ai_horde_api.apimodels._stats.ImageStatsModelsResponse] | | /v2/stats/img/totals | 200 | [ImageStatsModelsTotalResponse][horde_sdk.ai_horde_api.apimodels._stats.ImageStatsModelsTotalResponse] | -| /v2/stats/text/models | 200 | [TextModelStatsResponse][horde_sdk.ai_horde_api.apimodels._stats.TextModelStatsResponse] | +| /v2/stats/text/models | 200 | [TextStatsModelResponse][horde_sdk.ai_horde_api.apimodels._stats.TextStatsModelResponse] | | /v2/stats/text/totals | 200 | [TextStatsModelsTotalResponse][horde_sdk.ai_horde_api.apimodels._stats.TextStatsModelsTotalResponse] | | /v2/status/heartbeat | 200 | [AIHordeHeartbeatResponse][horde_sdk.ai_horde_api.apimodels._status.AIHordeHeartbeatResponse] | | /v2/status/models | 200 | [HordeStatusModelsAllResponse][horde_sdk.ai_horde_api.apimodels._status.HordeStatusModelsAllResponse] | | /v2/status/models/{model_name} | 200 | [HordeStatusModelsSingleResponse][horde_sdk.ai_horde_api.apimodels._status.HordeStatusModelsSingleResponse] | | /v2/status/news | 200 | [NewsResponse][horde_sdk.ai_horde_api.apimodels._status.NewsResponse] | | /v2/status/performance | 200 | [HordePerformanceResponse][horde_sdk.ai_horde_api.apimodels._status.HordePerformanceResponse] | +| /v2/users | 200 | [ListUsersDetailsResponse][horde_sdk.ai_horde_api.apimodels._users.ListUsersDetailsResponse] | +| /v2/users/{user_id} | 200 | [UserDetailsResponse][horde_sdk.ai_horde_api.apimodels._users.UserDetailsResponse] | | /v2/workers | 200 | [AllWorkersDetailsResponse][horde_sdk.ai_horde_api.apimodels.workers._workers.AllWorkersDetailsResponse] | | /v2/workers/{worker_id} | 200 | [SingleWorkerDetailsResponse][horde_sdk.ai_horde_api.apimodels.workers._workers.SingleWorkerDetailsResponse] | diff --git a/docs/api_to_sdk_payload_map.json b/docs/api_to_sdk_payload_map.json index 5c46cec..d0a0b40 100644 --- a/docs/api_to_sdk_payload_map.json +++ b/docs/api_to_sdk_payload_map.json @@ -26,6 +26,11 @@ "DELETE": "horde_sdk.ai_horde_api.apimodels.generate.text._status.DeleteTextGenerateRequest", "GET": "horde_sdk.ai_horde_api.apimodels.generate.text._status.TextGenerateStatusRequest" }, + "/v2/workers/{worker_id}": { + "DELETE": "horde_sdk.ai_horde_api.apimodels.workers._workers.DeleteWorkerRequest", + "PUT": "horde_sdk.ai_horde_api.apimodels.workers._workers.ModifyWorkerRequest", + "GET": "horde_sdk.ai_horde_api.apimodels.workers._workers.SingleWorkerDetailsRequest" + }, "/v2/find_user": { "GET": "horde_sdk.ai_horde_api.apimodels._find_user.FindUserRequest" }, @@ -56,12 +61,19 @@ "/v2/stats/img/totals": { "GET": "horde_sdk.ai_horde_api.apimodels._stats.ImageStatsModelsTotalRequest" }, + "/v2/kudos/transfer": { + "POST": "horde_sdk.ai_horde_api.apimodels._kudos.KudosTransferRequest" + }, + "/v2/users": { + "GET": "horde_sdk.ai_horde_api.apimodels._users.ListUsersDetailsRequest" + }, + "/v2/users/{user_id}": { + "PUT": "horde_sdk.ai_horde_api.apimodels._users.ModifyUserRequest", + "GET": "horde_sdk.ai_horde_api.apimodels._users.SingleUserDetailsRequest" + }, "/v2/status/news": { "GET": "horde_sdk.ai_horde_api.apimodels._status.NewsRequest" }, - "/v2/workers/{worker_id}": { - "GET": "horde_sdk.ai_horde_api.apimodels.workers._workers.SingleWorkerDetailsRequest" - }, "/v2/generate/text/async": { "POST": "horde_sdk.ai_horde_api.apimodels.generate.text._async.TextGenerateAsyncRequest" }, diff --git a/docs/api_to_sdk_response_map.json b/docs/api_to_sdk_response_map.json index b97b198..6cec4ce 100644 --- a/docs/api_to_sdk_response_map.json +++ b/docs/api_to_sdk_response_map.json @@ -23,8 +23,11 @@ "/v2/generate/text/status/{id}": { "200": "horde_sdk.ai_horde_api.apimodels.generate.text._status.TextGenerateStatusResponse" }, + "/v2/workers/{worker_id}": { + "200": "horde_sdk.ai_horde_api.apimodels.workers._workers.SingleWorkerDetailsResponse" + }, "/v2/find_user": { - "200": "horde_sdk.ai_horde_api.apimodels._find_user.FindUserResponse" + "200": "horde_sdk.ai_horde_api.apimodels._users.UserDetailsResponse" }, "/v2/status/performance": { "200": "horde_sdk.ai_horde_api.apimodels._status.HordePerformanceResponse" @@ -49,17 +52,23 @@ "200": "horde_sdk.ai_horde_api.apimodels.base.JobSubmitResponse" }, "/v2/stats/img/models": { - "200": "horde_sdk.ai_horde_api.apimodels._stats.ImageModelStatsResponse" + "200": "horde_sdk.ai_horde_api.apimodels._stats.ImageStatsModelsResponse" }, "/v2/stats/img/totals": { "200": "horde_sdk.ai_horde_api.apimodels._stats.ImageStatsModelsTotalResponse" }, + "/v2/kudos/transfer": { + "200": "horde_sdk.ai_horde_api.apimodels._kudos.KudosTransferResponse" + }, + "/v2/users": { + "200": "horde_sdk.ai_horde_api.apimodels._users.ListUsersDetailsResponse" + }, + "/v2/users/{user_id}": { + "200": "horde_sdk.ai_horde_api.apimodels._users.UserDetailsResponse" + }, "/v2/status/news": { "200": "horde_sdk.ai_horde_api.apimodels._status.NewsResponse" }, - "/v2/workers/{worker_id}": { - "200": "horde_sdk.ai_horde_api.apimodels.workers._workers.SingleWorkerDetailsResponse" - }, "/v2/generate/text/async": { "200": "horde_sdk.ai_horde_api.apimodels.generate.text._async.TextGenerateAsyncDryRunResponse", "202": "horde_sdk.ai_horde_api.apimodels.generate.text._async.TextGenerateAsyncResponse" @@ -71,7 +80,7 @@ "200": "horde_sdk.ai_horde_api.apimodels.base.JobSubmitResponse" }, "/v2/stats/text/models": { - "200": "horde_sdk.ai_horde_api.apimodels._stats.TextModelStatsResponse" + "200": "horde_sdk.ai_horde_api.apimodels._stats.TextStatsModelResponse" }, "/v2/stats/text/totals": { "200": "horde_sdk.ai_horde_api.apimodels._stats.TextStatsModelsTotalResponse" diff --git a/docs/faq.md b/docs/faq.md index bf3f52b..8dbd989 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -4,7 +4,7 @@ title: Frequently Asked Questions > The objects returned by horde_sdk are immutable. If you need to change > something, you'll need to create a new object with the changes you -> want. See the [section in getting started](../getting_started/#faux-immutability-or-why-cant-i-change-this-attribute) for more info. +> want. See the [section in getting started](getting_started.md#faux-immutability) for more info. # I don't like types. Why is this library so focused on them? diff --git a/docs/getting_started.md b/docs/getting_started.md index 3a761ad..b999ac4 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -21,7 +21,7 @@ pip install horde_sdk - [AIHordeAPIManualClient][horde_sdk.ai_horde_api.ai_horde_clients.AIHordeAPIManualClient] (more control, manual cleanup required) -2. Find the `*Request` object type appropriate to what you want to do. (see also: [naming](../getting_started/#naming)) +2. Find the `*Request` object type appropriate to what you want to do. (see also: [naming](getting_started.md#naming)) - These objects types are always found in the `apimodels` namespace of the `*_api` sub package. - e.g., [ImageGenerateAsyncRequest][horde_sdk.ai_horde_api.apimodels.generate._async.ImageGenerateAsyncRequest] - **Note** that there is always one or more response types mapped to a request. You can get the default success response `type` like so: @@ -147,7 +147,8 @@ A few endpoints, such as `/v2/generate/async` ([ImageGenerateAsyncRequest][horde trailing underscore, as in `id_`. Ingested json still will work with the field `id` (its a alias). -### Faux Immutability (or 'Why can't I change this attribute?!') +### Faux Immutability +> 'Why can't I change this attribute?! - All of the \*Request and \*Response class, and many other classes, implement faux immutability, and their attributes are **read only**. diff --git a/docs/horde_sdk/_version.md b/docs/horde_sdk/_version.md deleted file mode 100644 index f17be70..0000000 --- a/docs/horde_sdk/_version.md +++ /dev/null @@ -1,2 +0,0 @@ -# _version -::: horde_sdk._version diff --git a/docs/horde_sdk/ai_horde_api/apimodels/_kudos.md b/docs/horde_sdk/ai_horde_api/apimodels/_kudos.md new file mode 100644 index 0000000..57ee0e8 --- /dev/null +++ b/docs/horde_sdk/ai_horde_api/apimodels/_kudos.md @@ -0,0 +1,2 @@ +# _kudos +::: horde_sdk.ai_horde_api.apimodels._kudos diff --git a/docs/horde_sdk/ai_horde_api/apimodels/_users.md b/docs/horde_sdk/ai_horde_api/apimodels/_users.md new file mode 100644 index 0000000..8d4f8e4 --- /dev/null +++ b/docs/horde_sdk/ai_horde_api/apimodels/_users.md @@ -0,0 +1,2 @@ +# _users +::: horde_sdk.ai_horde_api.apimodels._users diff --git a/docs/request_field_names_and_descriptions.json b/docs/request_field_names_and_descriptions.json index e33e39f..7f0bbbc 100644 --- a/docs/request_field_names_and_descriptions.json +++ b/docs/request_field_names_and_descriptions.json @@ -1,710 +1,1422 @@ { - "AIHordeHeartbeatRequest": [ - [ - "accept", - null - ], - [ - "client_agent", - null - ] - ], - "AlchemyAsyncRequest": [ - [ - "apikey", - null - ], - [ - "accept", - null - ], - [ - "client_agent", - null - ], - [ - "forms", - null - ], - [ - "source_image", - null - ], - [ - "slow_workers", - null - ] - ], - "AlchemyDeleteRequest": [ - [ - "id_", - null - ], - [ - "accept", - null - ], - [ - "client_agent", - null - ] - ], - "AlchemyJobSubmitRequest": [ - [ - "apikey", - null - ], - [ - "id_", - null - ], - [ - "accept", - null - ], - [ - "client_agent", - null - ], - [ - "result", - null - ], - [ - "state", - null - ] - ], - "AlchemyPopRequest": [ - [ - "apikey", - null - ], - [ - "accept", - null - ], - [ - "client_agent", - null - ], - [ - "name", - null - ], - [ - "priority_usernames", - null - ], - [ - "forms", - null - ] - ], - "AlchemyStatusRequest": [ - [ - "apikey", - null - ], - [ - "id_", - null - ], - [ - "accept", - null - ], - [ - "client_agent", - null - ] - ], - "AllWorkersDetailsRequest": [ - [ - "apikey", - null - ], - [ - "accept", - null - ], - [ - "client_agent", - null - ], - [ - "type_", - null - ] - ], - "DeleteImageGenerateRequest": [ - [ - "id_", - null - ], - [ - "accept", - null - ], - [ - "client_agent", - null - ] - ], - "DeleteTextGenerateRequest": [ - [ - "id_", - null - ], - [ - "accept", - null - ], - [ - "client_agent", - null - ] - ], - "FindUserRequest": [ - [ - "apikey", - null - ], - [ - "accept", - null - ], - [ - "client_agent", - null - ] - ], - "HordePerformanceRequest": [ - [ - "accept", - null - ], - [ - "client_agent", - null - ] - ], - "HordeStatusModelsAllRequest": [ - [ - "accept", - null - ], - [ - "client_agent", - null - ], - [ - "type_", - "The type of model to filter by." - ], - [ - "min_count", - null - ], - [ - "max_count", - null - ], - [ - "model_state", - null - ] - ], - "HordeStatusModelsSingleRequest": [ - [ - "accept", - null - ], - [ - "client_agent", - null - ], - [ - "model_name", - null - ] - ], - "ImageGenerateAsyncRequest": [ - [ - "trusted_workers", - null - ], - [ - "slow_workers", - null - ], - [ - "workers", - null - ], - [ - "worker_blacklist", - null - ], - [ - "models", - null - ], - [ - "dry_run", - null - ], - [ - "apikey", - null - ], - [ - "accept", - null - ], - [ - "client_agent", - null - ], - [ - "prompt", - null - ], - [ - "params", - null - ], - [ - "nsfw", - null - ], - [ - "censor_nsfw", - null - ], - [ - "r2", - null - ], - [ - "shared", - null - ], - [ - "replacement_filter", - null - ], - [ - "source_image", - null - ], - [ - "source_processing", - null - ], - [ - "source_mask", - null - ], - [ - "extra_source_images", - null - ] - ], - "ImageGenerateCheckRequest": [ - [ - "id_", - null - ], - [ - "accept", - null - ], - [ - "client_agent", - null - ] - ], - "ImageGenerateJobPopRequest": [ - [ - "amount", - "How many jobvs to pop at the same time" - ], - [ - "bridge_agent", - "The worker name, version and website." - ], - [ - "models", - null - ], - [ - "name", - "The Name of the Worker." - ], - [ - "nsfw", - "Whether this worker can generate NSFW requests or not." - ], - [ - "priority_usernames", - null - ], - [ - "require_upfront_kudos", - "If True, this worker will only pick up requests where the owner has the required kudos to consume already available." - ], - [ - "threads", - "How many threads this worker is running. This is used to accurately the current power available in the horde." - ], - [ - "apikey", - null - ], - [ - "accept", - null - ], - [ - "client_agent", - null - ], - [ - "bridge_version", - null - ], - [ - "max_pixels", - null - ], - [ - "blacklist", - null - ], - [ - "allow_img2img", - null - ], - [ - "allow_painting", - null - ], - [ - "allow_unsafe_ipaddr", - null - ], - [ - "allow_post_processing", - null - ], - [ - "allow_controlnet", - null - ], - [ - "allow_sdxl_controlnet", - null - ], - [ - "allow_lora", - null - ] - ], - "ImageGenerateStatusRequest": [ - [ - "id_", - null - ], - [ - "accept", - null - ], - [ - "client_agent", - null - ] - ], - "ImageGenerationJobSubmitRequest": [ - [ - "apikey", - null - ], - [ - "id_", - null - ], - [ - "accept", - null - ], - [ - "client_agent", - null - ], - [ - "generation", - null - ], - [ - "state", - null - ], - [ - "seed", - null - ], - [ - "censored", - null - ], - [ - "gen_metadata", - null - ] - ], - "ImageStatsModelsRequest": [ - [ - "accept", - null - ], - [ - "client_agent", - null - ], - [ - "model_state", - "The state of the models to get stats for. Known models are models that are known to the system." - ] - ], - "ImageStatsModelsTotalRequest": [ - [ - "accept", - null - ], - [ - "client_agent", - null - ] - ], - "NewsRequest": [ - [ - "accept", - null - ], - [ - "client_agent", - null - ] - ], - "SingleWorkerDetailsRequest": [ - [ - "apikey", - null - ], - [ - "accept", - null - ], - [ - "client_agent", - null - ], - [ - "worker_id", - null - ] - ], - "TextGenerateAsyncRequest": [ - [ - "trusted_workers", - null - ], - [ - "slow_workers", - null - ], - [ - "workers", - null - ], - [ - "worker_blacklist", - null - ], - [ - "models", - null - ], - [ - "dry_run", - null - ], - [ - "apikey", - null - ], - [ - "accept", - null - ], - [ - "client_agent", - null - ], - [ - "params", - null - ], - [ - "prompt", - "The prompt which will be sent to KoboldAI to generate text." - ], - [ - "allow_downgrade", - "When true and the request requires upfront kudos and the account does not have enough The request will be downgraded in max context and max tokens so that it does not need upfront kudos." - ], - [ - "disable_batching", - "When true, This request will not use batching. This will allow you to retrieve accurate seeds. Feature is restricted to Trusted users and Patreons." - ], - [ - "extra_source_images", - null - ], - [ - "proxied_account", - "If using a service account as a proxy, provide this value to identify the actual account from which this request is coming from." - ], - [ - "softprompt", - "Specify which softprompt needs to be used to service this request." - ], - [ - "webhook", - "Provide a URL where the AI Horde will send a POST call after each delivered generation. The request will include the details of the job as well as the request ID." - ] - ], - "TextGenerateJobPopRequest": [ - [ - "amount", - "How many jobvs to pop at the same time" - ], - [ - "bridge_agent", - "The worker name, version and website." - ], - [ - "models", - null - ], - [ - "name", - "The Name of the Worker." - ], - [ - "nsfw", - "Whether this worker can generate NSFW requests or not." - ], - [ - "priority_usernames", - null - ], - [ - "require_upfront_kudos", - "If True, this worker will only pick up requests where the owner has the required kudos to consume already available." - ], - [ - "threads", - "How many threads this worker is running. This is used to accurately the current power available in the horde." - ], - [ - "max_length", - "The maximum amount of tokens this worker can generate." - ], - [ - "max_context_length", - "The max amount of context to submit to this AI for sampling." - ], - [ - "softprompts", - "The available softprompt files on this worker for the currently running model." - ], - [ - "apikey", - null - ], - [ - "accept", - null - ], - [ - "client_agent", - null - ] - ], - "TextGenerateStatusRequest": [ - [ - "id_", - null - ], - [ - "accept", - null - ], - [ - "client_agent", - null - ] - ], - "TextGenerationJobSubmitRequest": [ - [ - "apikey", - null - ], - [ - "id_", - null - ], - [ - "accept", - null - ], - [ - "client_agent", - null - ], - [ - "generation", - null - ], - [ - "state", - null - ], - [ - "gen_metadata", - null - ] - ], - "TextStatsModelsRequest": [ - [ - "accept", - null - ], - [ - "client_agent", - null - ] - ], - "TextStatsModelsTotalRequest": [ - [ - "accept", - null - ], - [ - "client_agent", - null - ] - ] + "AIHordeHeartbeatRequest": { + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "AlchemyAsyncRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "forms": { + "description": "The list of forms (types of post-processing/interrogation/captioning/etc) to request.", + "types": [ + "list[horde_sdk.ai_horde_api.apimodels.alchemy._async.AlchemyAsyncRequestFormItem]" + ] + }, + "source_image": { + "description": "The public URL of the source image or a base64 string to use.", + "types": [ + "str" + ] + }, + "slow_workers": { + "description": "Whether to use the slower workers. Costs additional kudos if `False`.", + "types": [ + "bool" + ] + } + }, + "AlchemyDeleteRequest": { + "id_": { + "description": "The UUID for this job. Use this to post the results in the future.", + "types": [ + "horde_sdk.ai_horde_api.fields.JobID" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "AlchemyJobSubmitRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "id_": { + "description": "The UUID for this job. Use this to post the results in the future.", + "types": [ + "horde_sdk.ai_horde_api.fields.JobID" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "result": { + "description": "The result of the alchemy job.", + "types": [ + "str" + ] + }, + "state": { + "description": "The state of this generation. See `GENERATION_STATE` for more information.", + "types": [ + "horde_sdk.ai_horde_api.consts.GENERATION_STATE" + ] + } + }, + "AlchemyPopRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "name": { + "description": "The name of the request. This is used to identify the request in the logs.", + "types": [ + "str" + ] + }, + "priority_usernames": { + "description": "The usernames that should be prioritized for this request.", + "types": [ + "list[str]" + ] + }, + "forms": { + "description": "The types of alchemy that should be generated.", + "types": [ + "list[horde_sdk.ai_horde_api.consts.KNOWN_ALCHEMY_TYPES]" + ] + } + }, + "AlchemyStatusRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "id_": { + "description": "The UUID for this job. Use this to post the results in the future.", + "types": [ + "horde_sdk.ai_horde_api.fields.JobID" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "AllWorkersDetailsRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "type_": { + "description": "Filter workers by type. Default is 'all' which returns all workers.", + "types": [ + "horde_sdk.ai_horde_api.consts.WORKER_TYPE" + ] + } + }, + "DeleteImageGenerateRequest": { + "id_": { + "description": "The UUID for this job. Use this to post the results in the future.", + "types": [ + "horde_sdk.ai_horde_api.fields.JobID" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "DeleteTextGenerateRequest": { + "id_": { + "description": "The UUID for this job. Use this to post the results in the future.", + "types": [ + "horde_sdk.ai_horde_api.fields.JobID" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "DeleteWorkerRequest": { + "worker_id": { + "description": "The UUID of the worker in question for this request.", + "types": [ + "str", + "horde_sdk.ai_horde_api.fields.WorkerID" + ] + }, + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "FindUserRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "HordePerformanceRequest": { + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "HordeStatusModelsAllRequest": { + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "type_": { + "description": "The type of model to filter by.", + "types": [ + "horde_sdk.ai_horde_api.consts.MODEL_TYPE" + ] + }, + "min_count": { + "description": "Filter only models that have at least this amount of threads serving.", + "types": [ + "int", + "None" + ] + }, + "max_count": { + "description": "Filter only models that have at most this amount of threads serving.", + "types": [ + "int", + "None" + ] + }, + "model_state": { + "description": "If 'known', only show stats for known models in the model reference. If 'custom' only show stats for custom\nmodels. If 'all' shows stats for all models.", + "types": [ + "horde_sdk.ai_horde_api.consts.MODEL_STATE" + ] + } + }, + "HordeStatusModelsSingleRequest": { + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "model_name": { + "description": "The name of the model to request.", + "types": [ + "str" + ] + } + }, + "ImageGenerateAsyncRequest": { + "trusted_workers": { + "description": "When true, only trusted workers will serve this request. When False, Evaluating workers will also be used\nwhich can increase speed but adds more risk!", + "types": [ + "bool" + ] + }, + "slow_workers": { + "description": "When True, allows slower workers to pick up this request. Disabling this incurs an extra kudos cost.", + "types": [ + "bool" + ] + }, + "workers": { + "description": "A list of worker IDs to use for this request. If empty, any worker can pick up the request. Using this incurs\nand extra kudos cost.", + "types": [ + "list[str]" + ] + }, + "worker_blacklist": { + "description": "If true, the worker list will be treated as a blacklist instead of a whitelist.", + "types": [ + "list[str]" + ] + }, + "models": { + "description": "The generative models to use for this request.", + "types": [ + "list[str]" + ] + }, + "dry_run": { + "description": "If true, the request will not be processed, but will return a response with the estimated kudos cost.", + "types": [ + "bool" + ] + }, + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "prompt": { + "description": "The prompt which will be sent to Stable Diffusion to generate an image.", + "types": [ + "str" + ] + }, + "params": { + "description": "The parameters for the image generation.", + "types": [ + "horde_sdk.ai_horde_api.apimodels.generate._async.ImageGenerationInputPayload", + "None" + ] + }, + "nsfw": { + "description": "Set to true if this request is NSFW. This will skip workers which censor images.", + "types": [ + "bool", + "None" + ] + }, + "censor_nsfw": { + "description": "If the request is SFW, and the worker accidentally generates NSFW, it will send back a censored image.", + "types": [ + "bool" + ] + }, + "r2": { + "description": "If True, the image will be sent via cloudflare r2 download link.", + "types": [ + "bool" + ] + }, + "shared": { + "description": "If True, The image will be shared with LAION for improving their dataset. This will also reduce your\nkudos consumption by 2. For anonymous users, this is always True.", + "types": [ + "bool" + ] + }, + "replacement_filter": { + "description": "If enabled, suspicious prompts are sanitized through a string replacement filter instead.", + "types": [ + "bool" + ] + }, + "source_image": { + "description": "The public URL of the source image or a base64 string to use.", + "types": [ + "str", + "None" + ] + }, + "source_processing": { + "description": "If source_image is provided, specifies how to process it.", + "types": [ + "horde_sdk.ai_horde_api.consts.KNOWN_SOURCE_PROCESSING" + ] + }, + "source_mask": { + "description": "If source_processing is set to 'inpainting' or 'outpainting', this parameter can be optionally provided as the\nBase64-encoded webp mask of the areas to inpaint. If this arg is not passed, the inpainting/outpainting mask has to\nbe embedded as alpha channel.", + "types": [ + "str", + "None" + ] + }, + "extra_source_images": { + "description": "Additional uploaded images which can be used for further operations.", + "types": [ + "list[horde_sdk.ai_horde_api.apimodels.base.ExtraSourceImageEntry]", + "None" + ] + } + }, + "ImageGenerateCheckRequest": { + "id_": { + "description": "The UUID for this job. Use this to post the results in the future.", + "types": [ + "horde_sdk.ai_horde_api.fields.JobID" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "ImageGenerateJobPopRequest": { + "amount": { + "description": "The number of jobs to pop at the same time.", + "types": [ + "int", + "None" + ] + }, + "bridge_agent": { + "description": "The worker name, version and website.", + "types": [ + "str", + "None" + ] + }, + "models": { + "description": "The models this worker can generate.", + "types": [ + "list[str]", + "None" + ] + }, + "name": { + "description": "The Name of the Worker.", + "types": [ + "str", + "None" + ] + }, + "nsfw": { + "description": "Whether this worker can generate NSFW requests or not.", + "types": [ + "bool", + "None" + ] + }, + "priority_usernames": { + "description": "The usernames that should be prioritized by this worker.", + "types": [ + "list[str]", + "None" + ] + }, + "require_upfront_kudos": { + "description": "If True, this worker will only pick up requests where the owner has the required kudos to consume already available.", + "types": [ + "bool", + "None" + ] + }, + "threads": { + "description": "How many threads this worker is running. This is used to accurately the current power available in the horde.", + "types": [ + "int", + "None" + ] + }, + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "bridge_version": { + "description": "The version of the bridge this worker is running.", + "types": [ + "int", + "None" + ] + }, + "max_pixels": { + "description": "The maximum number of pixels this worker can generate.", + "types": [ + "int" + ] + }, + "blacklist": { + "description": "The list of words this worker will not accept in a prompt.", + "types": [ + "list[str]" + ] + }, + "allow_img2img": { + "description": "Whether this worker can generate img2img.", + "types": [ + "bool" + ] + }, + "allow_painting": { + "description": "Whether this worker can generate inpainting/outpainting.", + "types": [ + "bool" + ] + }, + "allow_unsafe_ipaddr": { + "description": "Whether this worker will generate from unsafe/VPN IP addresses.", + "types": [ + "bool" + ] + }, + "allow_post_processing": { + "description": "Whether this worker can do post-processing.", + "types": [ + "bool" + ] + }, + "allow_controlnet": { + "description": "Whether this worker can generate using controlnets.", + "types": [ + "bool" + ] + }, + "allow_sdxl_controlnet": { + "description": "Whether this worker can generate using SDXL controlnets.", + "types": [ + "bool" + ] + }, + "allow_lora": { + "description": "Whether this worker can generate using Loras.", + "types": [ + "bool" + ] + } + }, + "ImageGenerateStatusRequest": { + "id_": { + "description": "The UUID for this job. Use this to post the results in the future.", + "types": [ + "horde_sdk.ai_horde_api.fields.JobID" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "ImageGenerationJobSubmitRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "id_": { + "description": "The UUID for this job. Use this to post the results in the future.", + "types": [ + "horde_sdk.ai_horde_api.fields.JobID" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "generation": { + "description": "R2 result was uploaded to R2, else the string of the result.", + "types": [ + "str" + ] + }, + "state": { + "description": "The state of this generation.", + "types": [ + "horde_sdk.ai_horde_api.consts.GENERATION_STATE" + ] + }, + "seed": { + "description": "The seed for this generation.", + "types": [ + "int" + ] + }, + "censored": { + "description": "If True, this resulting image has been censored.", + "types": [ + "bool" + ] + }, + "gen_metadata": { + "description": "Extra metadata about faulted or defaulted components of the generation", + "types": [ + "list[horde_sdk.ai_horde_api.apimodels.base.GenMetadataEntry]", + "None" + ] + } + }, + "ImageStatsModelsRequest": { + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "model_state": { + "description": "The state of the models to get stats for. Known models are models that are known to the system.", + "types": [ + "horde_sdk.ai_horde_api.consts.MODEL_STATE" + ] + } + }, + "ImageStatsModelsTotalRequest": { + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "KudosTransferRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "username": { + "description": "The username of the user to transfer Kudos to.", + "types": [ + "str" + ] + }, + "amount": { + "description": "The amount of Kudos to transfer.", + "types": [ + "float" + ] + } + }, + "ListUsersDetailsRequest": { + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "page": { + "description": "The page number to request. There are up to 25 users per page.", + "types": [ + "int" + ] + }, + "sort": { + "description": "The field to sort the users by. The default is by kudos.", + "types": [ + "str" + ] + } + }, + "ModifyUserRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "admin_comment": { + "description": "Add further information about this user for the other admins.", + "types": [ + "str", + "None" + ] + }, + "concurrency": { + "description": "The amount of concurrent request this user can have.", + "types": [ + "int", + "None" + ] + }, + "contact": { + "description": "Contact details for the horde admins to reach the user in case of emergency. This is only visible to horde\nmoderators.", + "types": [ + "str", + "None" + ] + }, + "customizer": { + "description": "When set to true, the user will be able to serve custom Stable Diffusion models which do not exist in the\nOfficial AI Horde Model Reference.", + "types": [ + "bool", + "None" + ] + }, + "education": { + "description": "When set to true, the user is considered an education account and some options become more restrictive.", + "types": [ + "bool", + "None" + ] + }, + "filtered": { + "description": "When set to true, the replacement filter will always be applied against this user", + "types": [ + "bool", + "None" + ] + }, + "flagged": { + "description": "When set to true, the user cannot transfer kudos and all their workers are put into permanent maintenance.", + "types": [ + "bool", + "None" + ] + }, + "moderator": { + "description": "Set to true to make this user a horde moderator.", + "types": [ + "bool", + "None" + ] + }, + "monthly_kudos": { + "description": "When specified, will start assigning the user monthly kudos, starting now!", + "types": [ + "int", + "None" + ] + }, + "public_workers": { + "description": "Set to true to make this user display their worker IDs.", + "types": [ + "bool", + "None" + ] + }, + "service": { + "description": "When set to true, the user is considered a service account proxying the requests for other users.", + "types": [ + "bool", + "None" + ] + }, + "special": { + "description": "When set to true, The user can send special payloads.", + "types": [ + "bool", + "None" + ] + }, + "trusted": { + "description": "When set to true,the user and their servers will not be affected by suspicion.", + "types": [ + "bool", + "None" + ] + }, + "usage_multiplier": { + "description": "The amount by which to multiply the users kudos consumption.", + "types": [ + "float", + "None" + ] + }, + "username": { + "description": "When specified, will change the username. No profanity allowed!", + "types": [ + "str", + "None" + ] + }, + "vpn": { + "description": "When set to true, the user will be able to onboard workers behind a VPN. This should be used as a temporary\nsolution until the user is trusted.", + "types": [ + "bool", + "None" + ] + }, + "worker_invited": { + "description": "Set to the amount of workers this user is allowed to join to the horde when in worker invite-only mode.", + "types": [ + "int", + "None" + ] + }, + "kudos": { + "description": "The amount of kudos to modify (can be negative).", + "types": [ + "float", + "None" + ] + }, + "reset_suspicion": { + "description": "Set the user's suspicion back to 0.", + "types": [ + "bool", + "None" + ] + }, + "user_id": { + "description": "The user's ID, as a `str`, but only containing numeric values.", + "types": [ + "str" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "ModifyWorkerRequest": { + "worker_id": { + "description": "The UUID of the worker in question for this request.", + "types": [ + "str", + "horde_sdk.ai_horde_api.fields.WorkerID" + ] + }, + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "info": { + "description": "You can optionally provide a server note which will be seen in the server details. No profanity allowed!", + "types": [ + "str", + "None" + ] + }, + "maintenance": { + "description": "Set to true to put this worker into maintenance.", + "types": [ + "bool", + "None" + ] + }, + "maintenance_msg": { + "description": "If maintenance is True, you can optionally provide a message to be used instead of the default maintenance\nmessage, so that the owner is informed.", + "types": [ + "str", + "None" + ] + }, + "name": { + "description": "When this is set, it will change the worker's name. No profanity allowed!", + "types": [ + "str", + "None" + ] + }, + "paused": { + "description": "(Mods only) Set to true to pause this worker.", + "types": [ + "bool", + "None" + ] + }, + "team": { + "description": "The team towards which this worker contributes kudos. It an empty string ('') is passed, it will leave the", + "types": [ + "str", + "None" + ] + } + }, + "NewsRequest": { + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "SingleUserDetailsRequest": { + "user_id": { + "description": "The user's ID, as a `str`, but only containing numeric values.", + "types": [ + "str" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "SingleWorkerDetailsRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "worker_id": { + "description": "The UUID of the worker in question for this request.", + "types": [ + "str", + "horde_sdk.ai_horde_api.fields.WorkerID" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "TextGenerateAsyncRequest": { + "trusted_workers": { + "description": "When true, only trusted workers will serve this request. When False, Evaluating workers will also be used\nwhich can increase speed but adds more risk!", + "types": [ + "bool" + ] + }, + "slow_workers": { + "description": "When True, allows slower workers to pick up this request. Disabling this incurs an extra kudos cost.", + "types": [ + "bool" + ] + }, + "workers": { + "description": "A list of worker IDs to use for this request. If empty, any worker can pick up the request. Using this incurs\nand extra kudos cost.", + "types": [ + "list[str]" + ] + }, + "worker_blacklist": { + "description": "If true, the worker list will be treated as a blacklist instead of a whitelist.", + "types": [ + "list[str]" + ] + }, + "models": { + "description": "The generative models to use for this request.", + "types": [ + "list[str]" + ] + }, + "dry_run": { + "description": "If true, the request will not be processed, but will return a response with the estimated kudos cost.", + "types": [ + "bool" + ] + }, + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "params": { + "description": "The parameters to use for the generation.", + "types": [ + "horde_sdk.ai_horde_api.apimodels.generate.text._async.ModelGenerationInputKobold", + "None" + ] + }, + "prompt": { + "description": "The prompt which will be sent to KoboldAI to generate text.", + "types": [ + "str", + "None" + ] + }, + "allow_downgrade": { + "description": "When true and the request requires upfront kudos and the account does not have enough The request will be\ndowngraded in max context and max tokens so that it does not need upfront kudos.", + "types": [ + "bool", + "None" + ] + }, + "disable_batching": { + "description": "When true, This request will not use batching. This will allow you to retrieve accurate seeds.\nFeature is restricted to Trusted users and Patreons.", + "types": [ + "bool", + "None" + ] + }, + "extra_source_images": { + "description": "Any extra source images that should be used for this request; e.g., for multi-modal models.", + "types": [ + "list[horde_sdk.ai_horde_api.apimodels.base.ExtraSourceImageEntry]", + "None" + ] + }, + "proxied_account": { + "description": "If using a service account as a proxy, provide this value to identify the actual account from which this\nrequest is coming from.", + "types": [ + "str", + "None" + ] + }, + "softprompt": { + "description": "Specify which softprompt needs to be used to service this request.", + "types": [ + "str", + "None" + ] + }, + "webhook": { + "description": "Provide a URL where the AI Horde will send a POST call after each delivered generation.\nThe request will include the details of the job as well as the request ID.", + "types": [ + "str", + "None" + ] + } + }, + "TextGenerateJobPopRequest": { + "amount": { + "description": "The number of jobs to pop at the same time.", + "types": [ + "int", + "None" + ] + }, + "bridge_agent": { + "description": "The worker name, version and website.", + "types": [ + "str", + "None" + ] + }, + "models": { + "description": "The models this worker can generate.", + "types": [ + "list[str]", + "None" + ] + }, + "name": { + "description": "The Name of the Worker.", + "types": [ + "str", + "None" + ] + }, + "nsfw": { + "description": "Whether this worker can generate NSFW requests or not.", + "types": [ + "bool", + "None" + ] + }, + "priority_usernames": { + "description": "The usernames that should be prioritized by this worker.", + "types": [ + "list[str]", + "None" + ] + }, + "require_upfront_kudos": { + "description": "If True, this worker will only pick up requests where the owner has the required kudos to consume already available.", + "types": [ + "bool", + "None" + ] + }, + "threads": { + "description": "How many threads this worker is running. This is used to accurately the current power available in the horde.", + "types": [ + "int", + "None" + ] + }, + "max_length": { + "description": "The maximum amount of tokens this worker can generate.", + "types": [ + "int" + ] + }, + "max_context_length": { + "description": "The max amount of context to submit to this AI for sampling.", + "types": [ + "int" + ] + }, + "softprompts": { + "description": "The available softprompt files on this worker for the currently running model.", + "types": [ + "list[str]", + "None" + ] + }, + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "TextGenerateStatusRequest": { + "id_": { + "description": "The UUID for this job. Use this to post the results in the future.", + "types": [ + "horde_sdk.ai_horde_api.fields.JobID" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "TextGenerationJobSubmitRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "id_": { + "description": "The UUID for this job. Use this to post the results in the future.", + "types": [ + "horde_sdk.ai_horde_api.fields.JobID" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "generation": { + "description": "R2 result was uploaded to R2, else the string of the result.", + "types": [ + "str" + ] + }, + "state": { + "description": "The state of this generation.", + "types": [ + "horde_sdk.ai_horde_api.consts.GENERATION_STATE" + ] + }, + "gen_metadata": { + "description": "Extra metadata about faulted or defaulted components of the generation", + "types": [ + "list[horde_sdk.ai_horde_api.apimodels.base.GenMetadataEntry]", + "None" + ] + } + }, + "TextStatsModelsRequest": { + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "TextStatsModelsTotalRequest": { + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + } } diff --git a/docs/response_field_names_and_descriptions.json b/docs/response_field_names_and_descriptions.json index f997245..a559a9c 100644 --- a/docs/response_field_names_and_descriptions.json +++ b/docs/response_field_names_and_descriptions.json @@ -1,818 +1,1342 @@ { - "AIHordeHeartbeatResponse": [ - [ - "message", - null - ], - [ - "version", - null - ] - ], - "AlchemyAsyncResponse": [ - [ - "message", - null - ], - [ - "id_", - null - ] - ], - "AlchemyStatusResponse": [ - [ - "state", - null - ], - [ - "forms", - null - ], - [ - "state", - null - ], - [ - "forms", - null - ] - ], - "AlchemyJobSubmitResponse": [ - [ - "reward", - null - ] - ], - "AlchemyPopResponse": [ - [ - "forms", - null - ], - [ - "skipped", - null - ] - ], - "AllWorkersDetailsResponse": [ - [ - "root", - null - ] - ], - "ImageGenerateStatusResponse": [ - [ - "finished", - null - ], - [ - "processing", - null - ], - [ - "restarted", - null - ], - [ - "waiting", - null - ], - [ - "done", - null - ], - [ - "faulted", - null - ], - [ - "wait_time", - null - ], - [ - "queue_position", - null - ], - [ - "kudos", - null - ], - [ - "is_possible", - null - ], - [ - "generations", - null - ], - [ - "shared", - null - ], - [ - "finished", - null - ], - [ - "processing", - null - ], - [ - "restarted", - null - ], - [ - "waiting", - null - ], - [ - "done", - null - ], - [ - "faulted", - null - ], - [ - "wait_time", - null - ], - [ - "queue_position", - null - ], - [ - "kudos", - null - ], - [ - "is_possible", - null - ], - [ - "generations", - null - ], - [ - "shared", - null - ] - ], - "TextGenerateStatusResponse": [ - [ - "finished", - null - ], - [ - "processing", - null - ], - [ - "restarted", - null - ], - [ - "waiting", - null - ], - [ - "done", - null - ], - [ - "faulted", - null - ], - [ - "wait_time", - null - ], - [ - "queue_position", - null - ], - [ - "kudos", - null - ], - [ - "is_possible", - null - ], - [ - "generations", - "The generations that have been completed in this request." - ], - [ - "finished", - null - ], - [ - "processing", - null - ], - [ - "restarted", - null - ], - [ - "waiting", - null - ], - [ - "done", - null - ], - [ - "faulted", - null - ], - [ - "wait_time", - null - ], - [ - "queue_position", - null - ], - [ - "kudos", - null - ], - [ - "is_possible", - null - ], - [ - "generations", - "The generations that have been completed in this request." - ] - ], - "FindUserResponse": [ - [ - "admin_comment", - "(Privileged) Comments from the horde admins about this user." - ], - [ - "account_age", - "How many seconds since this account was created." - ], - [ - "concurrency", - "How many concurrent generations this user may request." - ], - [ - "contact", - "(Privileged) Contact details for the horde admins to reach the user in case of emergency." - ], - [ - "contributions", - null - ], - [ - "customizer", - "If this user can run custom models." - ], - [ - "evaluating_kudos", - "(Privileged) The amount of Evaluating Kudos this untrusted user has from generations and uptime. When this number reaches a pre-specified threshold, they automatically become trusted." - ], - [ - "flagged", - "This user has been flagged for suspicious activity." - ], - [ - "id_", - "The user unique ID. It is always an integer." - ], - [ - "kudos", - "The amount of Kudos this user has. The amount of Kudos determines the priority when requesting image generations." - ], - [ - "kudos_details", - null - ], - [ - "moderator", - "This user is a Horde moderator." - ], - [ - "monthly_kudos", - null - ], - [ - "pseudonymous", - "If true, this user has not registered using an oauth service." - ], - [ - "records", - null - ], - [ - "sharedkey_ids", - null - ], - [ - "service", - "This user is a Horde service account and can provide the `proxied_user` field." - ], - [ - "special", - "(Privileged) This user has been given the Special role." - ], - [ - "suspicious", - "(Privileged) How much suspicion this user has accumulated." - ], - [ - "trusted", - "This user is a trusted member of the Horde." - ], - [ - "usage", - null - ], - [ - "username", - "The user's unique Username. It is a combination of their chosen alias plus their ID." - ], - [ - "vpn", - "(Privileged) This user has been given the VPN role." - ], - [ - "education", - "This user has been given education VPN role." - ], - [ - "worker_count", - "How many workers this user has created (active or inactive)." - ], - [ - "worker_ids", - null - ], - [ - "worker_invited", - "Whether this user has been invited to join a worker to the horde and how many of them. When 0, this user cannot add (new) workers to the horde." - ] - ], - "HordePerformanceResponse": [ - [ - "interrogator_count", - "How many workers are actively processing image interrogations in this {horde_noun} in the past 5 minutes." - ], - [ - "interrogator_thread_count", - "How many worker threads are actively processing image interrogation in this {horde_noun} in the past 5 minutes." - ], - [ - "past_minute_megapixelsteps", - "How many megapixelsteps this horde generated in the last minute." - ], - [ - "past_minute_tokens", - "How many tokens this horde generated in the last minute." - ], - [ - "queued_forms", - "The amount of image interrogations waiting and processing currently in this horde." - ], - [ - "queued_megapixelsteps", - "The amount of megapixelsteps in waiting and processing requests currently in this horde." - ], - [ - "queued_requests", - "The amount of waiting and processing image requests currently in this horde." - ], - [ - "queued_text_requests", - "The amount of waiting and processing text requests currently in this horde." - ], - [ - "queued_tokens", - "The amount of tokens in waiting and processing requests currently in this horde." - ], - [ - "text_thread_count", - "How many worker threads are actively processing prompt generations in this {horde_noun} in the past 5 minutes." - ], - [ - "text_worker_count", - "How many workers are actively processing prompt generations in this horde in the past 5 minutes." - ], - [ - "thread_count", - "How many worker threads are actively processing prompt generations in this {horde_noun} in the past 5 minutes." - ], - [ - "worker_count", - "How many workers are actively processing prompt generations in this horde in the past 5 minutes." - ] - ], - "HordeStatusModelsAllResponse": [ - [ - "root", - null - ] - ], - "HordeStatusModelsSingleResponse": [ - [ - "root", - null - ] - ], - "ImageGenerateAsyncDryRunResponse": [ - [ - "kudos", - null - ] - ], - "ImageGenerateAsyncResponse": [ - [ - "message", - null - ], - [ - "id_", - null - ], - [ - "kudos", - null - ], - [ - "warnings", - null - ] - ], - "ImageGenerateCheckResponse": [ - [ - "finished", - null - ], - [ - "processing", - null - ], - [ - "restarted", - null - ], - [ - "waiting", - null - ], - [ - "done", - null - ], - [ - "faulted", - null - ], - [ - "wait_time", - null - ], - [ - "queue_position", - null - ], - [ - "kudos", - null - ], - [ - "is_possible", - null - ] - ], - "ImageGenerateJobPopResponse": [ - [ - "extra_source_images", - null - ], - [ - "id_", - null - ], - [ - "ids", - null - ], - [ - "payload", - null - ], - [ - "skipped", - null - ], - [ - "model", - null - ], - [ - "source_image", - null - ], - [ - "source_processing", - null - ], - [ - "source_mask", - null - ], - [ - "r2_upload", - null - ], - [ - "r2_uploads", - null - ] - ], - "JobSubmitResponse": [ - [ - "reward", - null - ], - [ - "reward", - null - ] - ], - "ImageModelStatsResponse": [ - [ - "day", - null - ], - [ - "month", - null - ], - [ - "total", - null - ] - ], - "ImageStatsModelsTotalResponse": [ - [ - "day", - null - ], - [ - "hour", - null - ], - [ - "minute", - null - ], - [ - "month", - null - ], - [ - "total", - null - ] - ], - "NewsResponse": [ - [ - "root", - null - ] - ], - "SingleWorkerDetailsResponse": [ - [ - "type_", - null - ], - [ - "name", - null - ], - [ - "id_", - null - ], - [ - "online", - null - ], - [ - "requests_fulfilled", - null - ], - [ - "kudos_rewards", - null - ], - [ - "kudos_details", - null - ], - [ - "performance", - null - ], - [ - "threads", - null - ], - [ - "uptime", - null - ], - [ - "maintenance_mode", - null - ], - [ - "paused", - null - ], - [ - "info", - null - ], - [ - "nsfw", - null - ], - [ - "owner", - null - ], - [ - "ipaddr", - null - ], - [ - "trusted", - null - ], - [ - "flagged", - null - ], - [ - "suspicious", - null - ], - [ - "uncompleted_jobs", - null - ], - [ - "models", - null - ], - [ - "forms", - null - ], - [ - "team", - null - ], - [ - "contact", - null - ], - [ - "bridge_agent", - null - ], - [ - "max_pixels", - null - ], - [ - "megapixelsteps_generated", - null - ], - [ - "img2img", - null - ], - [ - "painting", - null - ], - [ - "post_processing", - null - ], - [ - "lora", - null - ], - [ - "max_length", - null - ], - [ - "max_context_length", - null - ], - [ - "tokens_generated", - null - ] - ], - "TextGenerateAsyncDryRunResponse": [ - [ - "kudos", - null - ] - ], - "TextGenerateAsyncResponse": [ - [ - "message", - null - ], - [ - "id_", - null - ], - [ - "kudos", - "The expected kudos consumption for this request." - ], - [ - "warnings", - null - ] - ], - "TextGenerateJobPopResponse": [ - [ - "extra_source_images", - null - ], - [ - "payload", - "The settings for this text generation." - ], - [ - "id_", - "The UUID for this text generation." - ], - [ - "ids", - "The UUIDs for this text generations." - ], - [ - "skipped", - "The skipped requests that were not valid for this worker." - ], - [ - "softprompt", - "The soft prompt requested for this generation." - ], - [ - "model", - "The model requested for this generation." - ] - ], - "TextModelStatsResponse": [ - [ - "day", - null - ], - [ - "month", - null - ], - [ - "total", - null - ] - ], - "TextStatsModelsTotalResponse": [ - [ - "minute", - null - ], - [ - "hour", - null - ], - [ - "day", - null - ], - [ - "month", - null - ], - [ - "total", - null - ] - ] + "AIHordeHeartbeatResponse": { + "message": { + "description": "A message from the API. This is typically an error or warning message, but may also be informational.", + "types": [ + "str" + ] + }, + "version": { + "description": "The version of the AI Horde API that this node is running.", + "types": [ + "str" + ] + } + }, + "AlchemyAsyncResponse": { + "message": { + "description": "A message from the API. This is typically an error or warning message, but may also be informational.", + "types": [ + "str" + ] + }, + "id_": { + "description": "The UUID for this job.", + "types": [ + "horde_sdk.ai_horde_api.fields.JobID" + ] + } + }, + "AlchemyStatusResponse": { + "state": { + "description": "The state of the job. See `GENERATION_STATE` for possible values.", + "types": [ + "horde_sdk.ai_horde_api.consts.GENERATION_STATE" + ] + }, + "forms": { + "description": "The status of each form in the job.", + "types": [ + "list[horde_sdk.ai_horde_api.apimodels.alchemy._status.AlchemyFormStatus]" + ] + } + }, + "AlchemyJobSubmitResponse": { + "reward": { + "description": "The kudos reward for this job.", + "types": [ + "float" + ] + } + }, + "AlchemyPopResponse": { + "forms": { + "description": "The forms that to be generated", + "types": [ + "list[horde_sdk.ai_horde_api.apimodels.alchemy._pop.AlchemyPopFormPayload]", + "None" + ] + }, + "skipped": { + "description": "The requests that were skipped because this worker were not eligible for them.", + "types": [ + "horde_sdk.ai_horde_api.apimodels.alchemy._pop.NoValidAlchemyFound", + "None" + ] + } + }, + "AllWorkersDetailsResponse": { + "root": { + "description": "The underlying list of worker details.", + "types": [ + "list[horde_sdk.ai_horde_api.apimodels.workers._workers.WorkerDetailItem]" + ] + } + }, + "ImageGenerateStatusResponse": { + "finished": { + "description": "The amount of finished jobs in this request.", + "types": [ + "int" + ] + }, + "processing": { + "description": "The amount of still processing jobs in this request.", + "types": [ + "int" + ] + }, + "restarted": { + "description": "The amount of jobs that timed out and had to be restarted or were reported as failed by a worker.", + "types": [ + "int" + ] + }, + "waiting": { + "description": "The amount of jobs waiting to be picked up by a worker.", + "types": [ + "int" + ] + }, + "done": { + "description": "True when all jobs in this request are done. Else False.", + "types": [ + "bool" + ] + }, + "faulted": { + "description": "True when this request caused an internal server error and could not be completed.", + "types": [ + "bool" + ] + }, + "wait_time": { + "description": "The expected amount to wait (in seconds) to generate all jobs in this request.", + "types": [ + "int" + ] + }, + "queue_position": { + "description": "The position in the requests queue. This position is determined by relative Kudos amounts.", + "types": [ + "int" + ] + }, + "kudos": { + "description": "The amount of total Kudos this request has consumed until now.", + "types": [ + "float" + ] + }, + "is_possible": { + "description": "If False, this request will not be able to be completed with the pool of workers currently available.", + "types": [ + "bool" + ] + }, + "generations": { + "description": "The individual image generation responses in this request.", + "types": [ + "list[horde_sdk.ai_horde_api.apimodels.generate._status.ImageGeneration]" + ] + }, + "shared": { + "description": "If True, These images have been shared with LAION.", + "types": [ + "bool", + "None" + ] + } + }, + "TextGenerateStatusResponse": { + "finished": { + "description": "The amount of finished jobs in this request.", + "types": [ + "int" + ] + }, + "processing": { + "description": "The amount of still processing jobs in this request.", + "types": [ + "int" + ] + }, + "restarted": { + "description": "The amount of jobs that timed out and had to be restarted or were reported as failed by a worker.", + "types": [ + "int" + ] + }, + "waiting": { + "description": "The amount of jobs waiting to be picked up by a worker.", + "types": [ + "int" + ] + }, + "done": { + "description": "True when all jobs in this request are done. Else False.", + "types": [ + "bool" + ] + }, + "faulted": { + "description": "True when this request caused an internal server error and could not be completed.", + "types": [ + "bool" + ] + }, + "wait_time": { + "description": "The expected amount to wait (in seconds) to generate all jobs in this request.", + "types": [ + "int" + ] + }, + "queue_position": { + "description": "The position in the requests queue. This position is determined by relative Kudos amounts.", + "types": [ + "int" + ] + }, + "kudos": { + "description": "The amount of total Kudos this request has consumed until now.", + "types": [ + "float" + ] + }, + "is_possible": { + "description": "If False, this request will not be able to be completed with the pool of workers currently available.", + "types": [ + "bool" + ] + }, + "generations": { + "description": "The generations that have been completed in this request.", + "types": [ + "list[horde_sdk.ai_horde_api.apimodels.generate.text._status.GenerationKobold]" + ] + } + }, + "DeleteWorkerResponse": { + "deleted_id_": { + "description": "The ID of the deleted worker.", + "types": [ + "str", + "None" + ] + }, + "deleted_name": { + "description": "The Name of the deleted worker.", + "types": [ + "str", + "None" + ] + } + }, + "UserDetailsResponse": { + "admin_comment": { + "description": "(Privileged) Comments from the horde admins about this user.", + "types": [ + "str", + "None" + ] + }, + "account_age": { + "description": "How many seconds since this account was created.", + "types": [ + "int", + "None" + ] + }, + "concurrency": { + "description": "How many concurrent generations this user may request.", + "types": [ + "int", + "None" + ] + }, + "contact": { + "description": "(Privileged) Contact details for the horde admins to reach the user in case of emergency.", + "types": [ + "str", + "None" + ] + }, + "contributions": { + "description": "How many images and megapixelsteps this user has generated.", + "types": [ + "horde_sdk.ai_horde_api.apimodels._users.ContributionsDetails", + "None" + ] + }, + "customizer": { + "description": "If this user can run custom models.", + "types": [ + "bool", + "None" + ] + }, + "evaluating_kudos": { + "description": "(Privileged) The amount of Evaluating Kudos this untrusted user has from generations and uptime. When this number reaches a pre-specified threshold, they automatically become trusted.", + "types": [ + "float", + "None" + ] + }, + "flagged": { + "description": "This user has been flagged for suspicious activity.", + "types": [ + "bool", + "None" + ] + }, + "id_": { + "description": "The user unique ID. It is always an integer.", + "types": [ + "int", + "None" + ] + }, + "kudos": { + "description": "The amount of Kudos this user has. The amount of Kudos determines the priority when requesting image generations.", + "types": [ + "float", + "None" + ] + }, + "kudos_details": { + "description": "How much Kudos this user has accumulated or used for generating images.", + "types": [ + "horde_sdk.ai_horde_api.apimodels._users.UserKudosDetails", + "None" + ] + }, + "moderator": { + "description": "This user is a Horde moderator.", + "types": [ + "bool", + "None" + ] + }, + "monthly_kudos": { + "description": "How much recurring Kudos this user receives monthly.", + "types": [ + "horde_sdk.ai_horde_api.apimodels._users.MonthlyKudos", + "None" + ] + }, + "pseudonymous": { + "description": "If true, this user has not registered using an oauth service.", + "types": [ + "bool", + "None" + ] + }, + "records": { + "description": "How many images, texts, megapixelsteps and tokens this user has generated or requested.", + "types": [ + "horde_sdk.ai_horde_api.apimodels._users.UserRecords", + "None" + ] + }, + "sharedkey_ids": { + "description": "The IDs of the shared keys this user has access to.", + "types": [ + "list[str]", + "None" + ] + }, + "service": { + "description": "This user is a Horde service account and can provide the `proxied_user` field.", + "types": [ + "bool", + "None" + ] + }, + "special": { + "description": "(Privileged) This user has been given the Special role.", + "types": [ + "bool", + "None" + ] + }, + "suspicious": { + "description": "(Privileged) How much suspicion this user has accumulated.", + "types": [ + "int", + "None" + ] + }, + "trusted": { + "description": "This user is a trusted member of the Horde.", + "types": [ + "bool", + "None" + ] + }, + "usage": { + "description": "How many images and megapixelsteps this user has requested.", + "types": [ + "horde_sdk.ai_horde_api.apimodels._users.UsageDetails", + "None" + ] + }, + "username": { + "description": "The user's unique Username. It is a combination of their chosen alias plus their ID.", + "types": [ + "str", + "None" + ] + }, + "vpn": { + "description": "(Privileged) This user has been given the VPN role.", + "types": [ + "bool", + "None" + ] + }, + "education": { + "description": "(This user has been given the education role.", + "types": [ + "bool", + "None" + ] + }, + "worker_count": { + "description": "How many workers this user has created (active or inactive).", + "types": [ + "int", + "None" + ] + }, + "worker_ids": { + "description": "The IDs of the workers this user has created (active or inactive).", + "types": [ + "list[str]", + "None" + ] + }, + "worker_invited": { + "description": "Whether this user has been invited to join a worker to the horde and how many of them. When 0, this user cannot add (new) workers to the horde.", + "types": [ + "int", + "None" + ] + } + }, + "HordePerformanceResponse": { + "interrogator_count": { + "description": "How many workers are actively processing image interrogations in this {horde_noun} in the past 5 minutes.", + "types": [ + "int", + "None" + ] + }, + "interrogator_thread_count": { + "description": "How many worker threads are actively processing image interrogation in this {horde_noun} in the past 5 minutes.", + "types": [ + "int", + "None" + ] + }, + "past_minute_megapixelsteps": { + "description": "How many megapixelsteps this horde generated in the last minute.", + "types": [ + "float", + "None" + ] + }, + "past_minute_tokens": { + "description": "How many tokens this horde generated in the last minute.", + "types": [ + "float", + "None" + ] + }, + "queued_forms": { + "description": "The amount of image interrogations waiting and processing currently in this horde.", + "types": [ + "float", + "None" + ] + }, + "queued_megapixelsteps": { + "description": "The amount of megapixelsteps in waiting and processing requests currently in this horde.", + "types": [ + "float", + "None" + ] + }, + "queued_requests": { + "description": "The amount of waiting and processing image requests currently in this horde.", + "types": [ + "int", + "None" + ] + }, + "queued_text_requests": { + "description": "The amount of waiting and processing text requests currently in this horde.", + "types": [ + "int", + "None" + ] + }, + "queued_tokens": { + "description": "The amount of tokens in waiting and processing requests currently in this horde.", + "types": [ + "float", + "None" + ] + }, + "text_thread_count": { + "description": "How many worker threads are actively processing prompt generations in this {horde_noun} in the past 5 minutes.", + "types": [ + "int", + "None" + ] + }, + "text_worker_count": { + "description": "How many workers are actively processing prompt generations in this horde in the past 5 minutes.", + "types": [ + "int", + "None" + ] + }, + "thread_count": { + "description": "How many worker threads are actively processing prompt generations in this {horde_noun} in the past 5 minutes.", + "types": [ + "int", + "None" + ] + }, + "worker_count": { + "description": "How many workers are actively processing prompt generations in this horde in the past 5 minutes.", + "types": [ + "int", + "None" + ] + } + }, + "HordeStatusModelsAllResponse": { + "root": { + "description": "The underlying list of models.", + "types": [ + "list[horde_sdk.ai_horde_api.apimodels._status.ActiveModel]" + ] + } + }, + "HordeStatusModelsSingleResponse": { + "root": { + "description": "The underlying list of models.", + "types": [ + "list[horde_sdk.ai_horde_api.apimodels._status.ActiveModel]" + ] + } + }, + "ImageGenerateAsyncDryRunResponse": { + "kudos": { + "description": "The expected kudos consumption for this request.", + "types": [ + "float" + ] + } + }, + "ImageGenerateAsyncResponse": { + "message": { + "description": "A message from the API. This is typically an error or warning message, but may also be informational.", + "types": [ + "str" + ] + }, + "id_": { + "description": "The UUID for this job.", + "types": [ + "horde_sdk.ai_horde_api.fields.JobID" + ] + }, + "kudos": { + "description": "The expected kudos consumption for this request.", + "types": [ + "float" + ] + }, + "warnings": { + "description": "Any warnings that were generated by the server or a serving worker.", + "types": [ + "list[horde_sdk.ai_horde_api.apimodels.base.SingleWarningEntry]", + "None" + ] + } + }, + "ImageGenerateCheckResponse": { + "finished": { + "description": "The amount of finished jobs in this request.", + "types": [ + "int" + ] + }, + "processing": { + "description": "The amount of still processing jobs in this request.", + "types": [ + "int" + ] + }, + "restarted": { + "description": "The amount of jobs that timed out and had to be restarted or were reported as failed by a worker.", + "types": [ + "int" + ] + }, + "waiting": { + "description": "The amount of jobs waiting to be picked up by a worker.", + "types": [ + "int" + ] + }, + "done": { + "description": "True when all jobs in this request are done. Else False.", + "types": [ + "bool" + ] + }, + "faulted": { + "description": "True when this request caused an internal server error and could not be completed.", + "types": [ + "bool" + ] + }, + "wait_time": { + "description": "The expected amount to wait (in seconds) to generate all jobs in this request.", + "types": [ + "int" + ] + }, + "queue_position": { + "description": "The position in the requests queue. This position is determined by relative Kudos amounts.", + "types": [ + "int" + ] + }, + "kudos": { + "description": "The amount of total Kudos this request has consumed until now.", + "types": [ + "float" + ] + }, + "is_possible": { + "description": "If False, this request will not be able to be completed with the pool of workers currently available.", + "types": [ + "bool" + ] + } + }, + "ImageGenerateJobPopResponse": { + "extra_source_images": { + "description": "Additional uploaded images (as base64) which can be used for further operations.", + "types": [ + "list[horde_sdk.ai_horde_api.apimodels.base.ExtraSourceImageEntry]", + "None" + ] + }, + "id_": { + "description": "(Obsolete) The UUID for this image generation.", + "types": [ + "horde_sdk.ai_horde_api.fields.JobID", + "None" + ] + }, + "ids": { + "description": "A list of UUIDs for image generation.", + "types": [ + "list[horde_sdk.ai_horde_api.fields.JobID]" + ] + }, + "payload": { + "description": "The parameters used to generate this image.", + "types": [ + "horde_sdk.ai_horde_api.apimodels.generate._pop.ImageGenerateJobPopPayload" + ] + }, + "skipped": { + "description": "The reasons this worker was not issued certain jobs, and the number of jobs for each reason.", + "types": [ + "horde_sdk.ai_horde_api.apimodels.generate._pop.ImageGenerateJobPopSkippedStatus" + ] + }, + "model": { + "description": "Which of the available models to use for this request.", + "types": [ + "str", + "None" + ] + }, + "source_image": { + "description": "The URL or Base64-encoded webp to use for img2img.", + "types": [ + "str", + "None" + ] + }, + "source_processing": { + "description": "If source_image is provided, specifies how to process it.", + "types": [ + "str", + "horde_sdk.ai_horde_api.consts.KNOWN_SOURCE_PROCESSING" + ] + }, + "source_mask": { + "description": "If img_processing is set to 'inpainting' or 'outpainting', this parameter can be optionally provided as the\nmask of the areas to inpaint. If this arg is not passed, the inpainting/outpainting mask has to be embedded as\nalpha channel.", + "types": [ + "str", + "None" + ] + }, + "r2_upload": { + "description": "(Obsolete) The r2 upload link to use to upload this image.", + "types": [ + "str", + "None" + ] + }, + "r2_uploads": { + "description": "The r2 upload links for each this image. Each index matches the ID in self.ids", + "types": [ + "list[str]", + "None" + ] + } + }, + "JobSubmitResponse": { + "reward": { + "description": "The amount of kudos gained for submitting this request.", + "types": [ + "float" + ] + } + }, + "ImageStatsModelsResponse": { + "day": { + "description": "The stats for the past day.", + "types": [ + "dict[str, int]" + ] + }, + "month": { + "description": "The stats for the past month.", + "types": [ + "dict[str, int]" + ] + }, + "total": { + "description": "The total stats for all time.", + "types": [ + "dict[str, int]" + ] + } + }, + "ImageStatsModelsTotalResponse": { + "day": { + "description": "The total stats for the past day.", + "types": [ + "horde_sdk.ai_horde_api.apimodels._stats.SinglePeriodImgStat", + "None" + ] + }, + "hour": { + "description": "The total stats for the past hour.", + "types": [ + "horde_sdk.ai_horde_api.apimodels._stats.SinglePeriodImgStat", + "None" + ] + }, + "minute": { + "description": "The total stats for the past minute.", + "types": [ + "horde_sdk.ai_horde_api.apimodels._stats.SinglePeriodImgStat", + "None" + ] + }, + "month": { + "description": "The total stats for the past month.", + "types": [ + "horde_sdk.ai_horde_api.apimodels._stats.SinglePeriodImgStat", + "None" + ] + }, + "total": { + "description": "The total stats for all time.", + "types": [ + "horde_sdk.ai_horde_api.apimodels._stats.SinglePeriodImgStat", + "None" + ] + } + }, + "KudosTransferResponse": { + "transferred": { + "description": "The amount of Kudos transferred.", + "types": [ + "float", + "None" + ] + } + }, + "ListUsersDetailsResponse": { + "root": { + "description": "The underlying list of user details.", + "types": [ + "list[horde_sdk.ai_horde_api.apimodels._users.UserDetailsResponse]" + ] + } + }, + "ModifyUserResponse": { + "admin_comment": { + "description": "Add further information about this user for the other admins.", + "types": [ + "str", + "None" + ] + }, + "concurrency": { + "description": "The amount of concurrent request this user can have.", + "types": [ + "int", + "None" + ] + }, + "contact": { + "description": "Contact details for the horde admins to reach the user in case of emergency. This is only visible to horde\nmoderators.", + "types": [ + "str", + "None" + ] + }, + "customizer": { + "description": "When set to true, the user will be able to serve custom Stable Diffusion models which do not exist in the\nOfficial AI Horde Model Reference.", + "types": [ + "bool", + "None" + ] + }, + "education": { + "description": "When set to true, the user is considered an education account and some options become more restrictive.", + "types": [ + "bool", + "None" + ] + }, + "filtered": { + "description": "When set to true, the replacement filter will always be applied against this user", + "types": [ + "bool", + "None" + ] + }, + "flagged": { + "description": "When set to true, the user cannot transfer kudos and all their workers are put into permanent maintenance.", + "types": [ + "bool", + "None" + ] + }, + "moderator": { + "description": "Set to true to make this user a horde moderator.", + "types": [ + "bool", + "None" + ] + }, + "monthly_kudos": { + "description": "When specified, will start assigning the user monthly kudos, starting now!", + "types": [ + "int", + "None" + ] + }, + "public_workers": { + "description": "Set to true to make this user display their worker IDs.", + "types": [ + "bool", + "None" + ] + }, + "service": { + "description": "When set to true, the user is considered a service account proxying the requests for other users.", + "types": [ + "bool", + "None" + ] + }, + "special": { + "description": "When set to true, The user can send special payloads.", + "types": [ + "bool", + "None" + ] + }, + "trusted": { + "description": "When set to true,the user and their servers will not be affected by suspicion.", + "types": [ + "bool", + "None" + ] + }, + "usage_multiplier": { + "description": "The amount by which to multiply the users kudos consumption.", + "types": [ + "float", + "None" + ] + }, + "username": { + "description": "When specified, will change the username. No profanity allowed!", + "types": [ + "str", + "None" + ] + }, + "vpn": { + "description": "When set to true, the user will be able to onboard workers behind a VPN. This should be used as a temporary\nsolution until the user is trusted.", + "types": [ + "bool", + "None" + ] + }, + "worker_invited": { + "description": "Set to the amount of workers this user is allowed to join to the horde when in worker invite-only mode.", + "types": [ + "int", + "None" + ] + }, + "new_kudos": { + "description": "The new amount of kudos this user has.", + "types": [ + "float", + "None" + ] + }, + "new_suspicion": { + "description": "The new amount of suspicion this user has.", + "types": [ + "int", + "None" + ] + } + }, + "ModifyWorkerResponse": { + "info": { + "description": "The new state of the 'info' var for this worker.", + "types": [ + "str", + "None" + ] + }, + "maintenance": { + "description": "The new state of the 'maintenance' var for this worker. When True, this worker will not pick up any new\nrequests.", + "types": [ + "bool", + "None" + ] + }, + "name": { + "description": "The new name for this this worker. No profanity allowed!", + "types": [ + "str", + "None" + ] + }, + "paused": { + "description": "The new state of the 'paused' var for this worker. When True, this worker will not be given any new requests.", + "types": [ + "bool", + "None" + ] + }, + "team": { + "description": "The new team of this worker.", + "types": [ + "str", + "None" + ] + } + }, + "NewsResponse": { + "root": { + "description": "The underlying list of newspieces.", + "types": [ + "list[horde_sdk.ai_horde_api.apimodels._status.Newspiece]" + ] + } + }, + "SingleWorkerDetailsResponse": { + "type_": { + "description": "The type of worker.", + "types": [ + "horde_sdk.ai_horde_api.consts.WORKER_TYPE" + ] + }, + "name": { + "description": "The Name given to this worker.", + "types": [ + "str" + ] + }, + "id_": { + "description": "The UUID of this worker.", + "types": [ + "str", + "horde_sdk.ai_horde_api.fields.WorkerID" + ] + }, + "online": { + "description": "True if the worker has checked-in the past 5 minutes.", + "types": [ + "bool", + "None" + ] + }, + "requests_fulfilled": { + "description": "How many images this worker has generated.", + "types": [ + "int", + "None" + ] + }, + "kudos_rewards": { + "description": "How many Kudos this worker has been rewarded in total.", + "types": [ + "float", + "None" + ] + }, + "kudos_details": { + "description": "How much Kudos this worker has accumulated or used for generating images.", + "types": [ + "horde_sdk.ai_horde_api.apimodels.workers._workers.WorkerKudosDetails", + "None" + ] + }, + "performance": { + "description": "The average performance of this worker in human readable form.", + "types": [ + "str", + "None" + ] + }, + "threads": { + "description": "How many threads this worker is running.", + "types": [ + "int", + "None" + ] + }, + "uptime": { + "description": "The amount of seconds this worker has been online for this AI Horde.", + "types": [ + "int", + "None" + ] + }, + "maintenance_mode": { + "description": "When True, this worker will not pick up any new requests.", + "types": [ + "bool" + ] + }, + "paused": { + "description": "When True, this worker not be given any new requests.", + "types": [ + "bool", + "None" + ] + }, + "info": { + "description": "Extra information or comments about this worker provided by its owner.", + "types": [ + "str", + "None" + ] + }, + "nsfw": { + "description": "Whether this worker can generate NSFW requests or not.", + "types": [ + "bool", + "None" + ] + }, + "owner": { + "description": "Privileged or public if the owner has allowed it. The alias of the owner of this worker.", + "types": [ + "str", + "None" + ] + }, + "ipaddr": { + "description": "Privileged. The last known IP this worker has connected from.", + "types": [ + "str", + "None" + ] + }, + "trusted": { + "description": "The worker is trusted to return valid generations.", + "types": [ + "bool", + "None" + ] + }, + "flagged": { + "description": "The worker's owner has been flagged for suspicious activity.\nThis worker will not be given any jobs to process.", + "types": [ + "bool", + "None" + ] + }, + "suspicious": { + "description": "(Privileged) How much suspicion this worker has accumulated.", + "types": [ + "int", + "None" + ] + }, + "uncompleted_jobs": { + "description": "How many jobs this worker has left uncompleted after it started them.", + "types": [ + "int", + "None" + ] + }, + "models": { + "description": "The models this worker supports.", + "types": [ + "list[str]", + "None" + ] + }, + "forms": { + "description": "The forms this worker supports.", + "types": [ + "list[str]", + "None" + ] + }, + "team": { + "description": "The team this worker belongs to.", + "types": [ + "horde_sdk.ai_horde_api.apimodels.workers._workers.TeamDetailsLite", + "None" + ] + }, + "contact": { + "description": "(Privileged) Contact details for the horde admins to reach the owner of this worker in emergencies.", + "types": [ + "str", + "None" + ] + }, + "bridge_agent": { + "description": "The bridge agent name, version and website. Example: AI Horde Worker reGen:4.1.0:", + "types": [ + "str" + ] + }, + "max_pixels": { + "description": "The maximum pixels in resolution this worker can generate. Example: 262144", + "types": [ + "int", + "None" + ] + }, + "megapixelsteps_generated": { + "description": "How many megapixelsteps this worker has generated until now.", + "types": [ + "int", + "None" + ] + }, + "img2img": { + "description": "If True, this worker supports and allows img2img requests.", + "types": [ + "bool", + "None" + ] + }, + "painting": { + "description": "If True, this worker supports and allows inpainting requests.", + "types": [ + "bool", + "None" + ] + }, + "post_processing": { + "description": "If True, this worker supports and allows post-processing requests.", + "types": [ + "bool", + "None" + ] + }, + "lora": { + "description": "If True, this worker supports and allows lora requests.", + "types": [ + "bool", + "None" + ] + }, + "max_length": { + "description": "The maximum tokens this worker can generate.", + "types": [ + "int", + "None" + ] + }, + "max_context_length": { + "description": "The maximum tokens this worker can read.", + "types": [ + "int", + "None" + ] + }, + "tokens_generated": { + "description": "How many tokens this worker has generated until now. ", + "types": [ + "int", + "None" + ] + } + }, + "TextGenerateAsyncDryRunResponse": { + "kudos": { + "description": "The expected kudos consumption for this request.", + "types": [ + "float" + ] + } + }, + "TextGenerateAsyncResponse": { + "message": { + "description": "A message from the API. This is typically an error or warning message, but may also be informational.", + "types": [ + "str" + ] + }, + "id_": { + "description": "The UUID for this job.", + "types": [ + "horde_sdk.ai_horde_api.fields.JobID" + ] + }, + "kudos": { + "description": "The expected kudos consumption for this request.", + "types": [ + "float", + "None" + ] + }, + "warnings": { + "description": "Any warnings that were generated by the server or a serving worker.", + "types": [ + "list[horde_sdk.ai_horde_api.apimodels.base.SingleWarningEntry]", + "None" + ] + } + }, + "TextGenerateJobPopResponse": { + "extra_source_images": { + "description": "Additional uploaded images (as base64) which can be used for further operations.", + "types": [ + "list[horde_sdk.ai_horde_api.apimodels.base.ExtraSourceImageEntry]", + "None" + ] + }, + "payload": { + "description": "The settings for this text generation.", + "types": [ + "horde_sdk.ai_horde_api.apimodels.generate.text._pop.ModelPayloadKobold" + ] + }, + "id_": { + "description": "The UUID for this text generation.", + "types": [ + "horde_sdk.ai_horde_api.fields.JobID", + "None" + ] + }, + "ids": { + "description": "The UUIDs for this text generations.", + "types": [ + "list[horde_sdk.ai_horde_api.fields.JobID]" + ] + }, + "skipped": { + "description": "The skipped requests that were not valid for this worker.", + "types": [ + "horde_sdk.ai_horde_api.apimodels.generate.text._pop.NoValidRequestFoundKobold" + ] + }, + "softprompt": { + "description": "The soft prompt requested for this generation.", + "types": [ + "str", + "None" + ] + }, + "model": { + "description": "The model requested for this generation.", + "types": [ + "str", + "None" + ] + } + }, + "TextStatsModelResponse": { + "day": { + "description": "The stats for the past day.", + "types": [ + "dict[str, int]" + ] + }, + "month": { + "description": "The stats for the past month.", + "types": [ + "dict[str, int]" + ] + }, + "total": { + "description": "The total stats for all time.", + "types": [ + "dict[str, int]" + ] + } + }, + "TextStatsModelsTotalResponse": { + "minute": { + "description": "The total stats for the past minute.", + "types": [ + "dict[str, int]" + ] + }, + "hour": { + "description": "The total stats for the past hour.", + "types": [ + "dict[str, int]" + ] + }, + "day": { + "description": "The total stats for the past day.", + "types": [ + "dict[str, int]" + ] + }, + "month": { + "description": "The total stats for the past month.", + "types": [ + "dict[str, int]" + ] + }, + "total": { + "description": "The total stats for all time.", + "types": [ + "dict[str, int]" + ] + } + } } diff --git a/examples/ai_horde_client/async_aihorde_simple_client_example.py b/examples/ai_horde_client/async_aihorde_simple_client_example.py deleted file mode 100644 index a08cb79..0000000 --- a/examples/ai_horde_client/async_aihorde_simple_client_example.py +++ /dev/null @@ -1,119 +0,0 @@ -import argparse -import asyncio -from collections.abc import Coroutine -from pathlib import Path - -import aiohttp -from PIL.Image import Image - -from horde_sdk import ANON_API_KEY, RequestErrorResponse -from horde_sdk.ai_horde_api.ai_horde_clients import AIHordeAPIAsyncSimpleClient -from horde_sdk.ai_horde_api.apimodels import ( - ImageGenerateAsyncRequest, - ImageGenerateStatusResponse, - ImageGenerationInputPayload, - TIPayloadEntry, -) -from horde_sdk.ai_horde_api.fields import JobID - - -async def async_one_image_generate_example( - simple_client: AIHordeAPIAsyncSimpleClient, - apikey: str = ANON_API_KEY, -) -> None: - single_generation_response: ImageGenerateStatusResponse - job_id: JobID - - single_generation_response, job_id = await simple_client.image_generate_request( - ImageGenerateAsyncRequest( - apikey=apikey, - prompt="A cat in a hat", - models=["Deliberate"], - params=ImageGenerationInputPayload( - height=512, - width=512, - tis=[ - TIPayloadEntry( - name="72437", - inject_ti="negprompt", - strength=1, - ), - ], - ), - ), - ) - - if isinstance(single_generation_response, RequestErrorResponse): - print(f"Error: {single_generation_response.message}") - else: - single_image, _ = await simple_client.download_image_from_generation(single_generation_response.generations[0]) - - example_path = Path("examples/requested_images") - example_path.mkdir(exist_ok=True, parents=True) - - single_image.save(example_path / f"{job_id}_simple_async_example.webp") - - -async def async_multi_image_generate_example( - simple_client: AIHordeAPIAsyncSimpleClient, - apikey: str = ANON_API_KEY, -) -> None: - multi_generation_responses: tuple[ - tuple[ImageGenerateStatusResponse, JobID], - tuple[ImageGenerateStatusResponse, JobID], - ] - multi_generation_responses = await asyncio.gather( - simple_client.image_generate_request( - ImageGenerateAsyncRequest( - apikey=apikey, - prompt="A cat in a blue hat", - models=["SDXL 1.0"], - params=ImageGenerationInputPayload(height=1024, width=1024), - ), - ), - simple_client.image_generate_request( - ImageGenerateAsyncRequest( - apikey=apikey, - prompt="A cat in a red hat", - models=["SDXL 1.0"], - params=ImageGenerationInputPayload(height=1024, width=1024), - ), - ), - ) - - download_image_from_generation_calls: list[Coroutine[None, None, tuple[Image, JobID]]] = [] - - for status_response, _ in multi_generation_responses: - download_image_from_generation_calls.append( - simple_client.download_image_from_generation(status_response.generations[0]), - ) - - downloaded_images: list[tuple[Image, JobID]] = await asyncio.gather(*download_image_from_generation_calls) - - example_path = Path("examples/requested_images") - example_path.mkdir(exist_ok=True, parents=True) - - for image, job_id in downloaded_images: - image.save(example_path / f"{job_id}_simple_async_example.webp") - - -async def async_simple_generate_example(apikey: str = ANON_API_KEY) -> None: - async with aiohttp.ClientSession() as aiohttp_session: - simple_client = AIHordeAPIAsyncSimpleClient(aiohttp_session) - - await async_one_image_generate_example(simple_client, apikey) - # await async_multi_image_generate_example(simple_client, apikey) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="AI Horde API Manual Client Example") - parser.add_argument( - "--apikey", - type=str, - default=ANON_API_KEY, - help="The API key to use. Defaults to the anon key.", - ) - args = parser.parse_args() - - # Run the example. - asyncio.run(async_simple_generate_example(args.apikey)) diff --git a/examples/ai_horde_client/find_user.py b/examples/ai_horde_client/find_user.py index 53fad5c..84cc011 100644 --- a/examples/ai_horde_client/find_user.py +++ b/examples/ai_horde_client/find_user.py @@ -2,7 +2,8 @@ from horde_sdk import RequestErrorResponse from horde_sdk.ai_horde_api.ai_horde_clients import AIHordeAPIManualClient -from horde_sdk.ai_horde_api.apimodels import FindUserRequest, FindUserResponse +from horde_sdk.ai_horde_api.apimodels import FindUserRequest +from horde_sdk.ai_horde_api.apimodels._users import UserDetailsResponse def find_user_example( @@ -13,8 +14,8 @@ def find_user_example( apikey=api_key, ) - find_user_response: FindUserResponse | RequestErrorResponse - find_user_response = client.submit_request(find_user_request, expected_response_type=FindUserResponse) + find_user_response: UserDetailsResponse | RequestErrorResponse + find_user_response = client.submit_request(find_user_request, expected_response_type=UserDetailsResponse) if isinstance(find_user_response, RequestErrorResponse): print(f"Error: {find_user_response.message}") @@ -28,7 +29,14 @@ def find_user_example( if __name__ == "__main__": # Add the api key argument with argparse parser = argparse.ArgumentParser() - parser.add_argument("--api_key", type=str, required=True) + parser.add_argument( + "--api-key", + "--api_key", + "--apikey", + "-k", + type=str, + required=True, + ) args = parser.parse_args() # Create the client diff --git a/examples/ai_horde_client/alchemy_example.py b/examples/ai_horde_client/image/alchemy_example.py similarity index 66% rename from examples/ai_horde_client/alchemy_example.py rename to examples/ai_horde_client/image/alchemy_example.py index 766e547..b74a86b 100644 --- a/examples/ai_horde_client/alchemy_example.py +++ b/examples/ai_horde_client/image/alchemy_example.py @@ -4,10 +4,11 @@ from pathlib import Path import aiohttp +from loguru import logger from PIL.Image import Image from horde_sdk import ANON_API_KEY, RequestErrorResponse -from horde_sdk.ai_horde_api.ai_horde_clients import AIHordeAPIAsyncSimpleClient +from horde_sdk.ai_horde_api.ai_horde_clients import AIHordeAPIAsyncClientSession, AIHordeAPIAsyncSimpleClient from horde_sdk.ai_horde_api.apimodels import ( KNOWN_ALCHEMY_TYPES, AlchemyAsyncRequest, @@ -20,16 +21,18 @@ async def async_alchemy_example( apikey: str = ANON_API_KEY, source_image_file_path: str = "examples/cat_in_a_hat.webp", ) -> None: - async with aiohttp.ClientSession() as aiohttp_session: - simple_client = AIHordeAPIAsyncSimpleClient(aiohttp_session) + aiohttp_session: aiohttp.ClientSession = aiohttp.ClientSession() + horde_client_session: AIHordeAPIAsyncClientSession = AIHordeAPIAsyncClientSession(aiohttp_session) + async with aiohttp_session, horde_client_session: + simple_client = AIHordeAPIAsyncSimpleClient(horde_client_session=horde_client_session) input_image_path = Path(source_image_file_path) if not input_image_path.exists(): - print(f"Error: {input_image_path} does not exist.") + logger.error(f"Input image file not found: {input_image_path}") return - base64_image: str = base64.b64encode(input_image_path.read_bytes()).decode() + source_image_base64: str = base64.b64encode(input_image_path.read_bytes()).decode() status_response: AlchemyStatusResponse | RequestErrorResponse status_response, job_id = await simple_client.alchemy_request( @@ -39,35 +42,37 @@ async def async_alchemy_example( AlchemyAsyncRequestFormItem(name=KNOWN_ALCHEMY_TYPES.caption), AlchemyAsyncRequestFormItem(name=KNOWN_ALCHEMY_TYPES.RealESRGAN_x2plus), ], - source_image=base64_image, + source_image=source_image_base64, ), ) - print(f"Status: {status_response.state}") + logger.info(f"Status: {status_response.state}") for caption_result in status_response.all_caption_results: - print(f"Caption: {caption_result.caption}") + logger.info(f"Caption: {caption_result.caption}") for upscale_result in status_response.all_upscale_results: upscale_result_image: Image = await simple_client.download_image_from_url(upscale_result.url) - example_path = Path("examples/requested_images") + example_path = Path("requested_images") example_path.mkdir(exist_ok=True, parents=True) upscale_result_image.save(example_path / f"{job_id}_{upscale_result.upscaler_used}.webp") - print(f"Upscale result saved to {example_path / f'{job_id}_{upscale_result.upscaler_used}.webp'}") + logger.info(f"Upscaled image saved to {example_path / f'{job_id}_{upscale_result.upscaler_used}.webp'}") if __name__ == "__main__": parser = argparse.ArgumentParser(description="AI Horde API Manual Client Example") parser.add_argument( + "-k", "--apikey", + "--api-key", + "--api_key", type=str, default=ANON_API_KEY, help="The API key to use. Defaults to the anon key.", ) args = parser.parse_args() - # Run the example. asyncio.run(async_alchemy_example(args.apikey)) diff --git a/examples/ai_horde_client/async_aihorde_manual_client_example.py b/examples/ai_horde_client/image/async_manual_client_example.py similarity index 61% rename from examples/ai_horde_client/async_aihorde_manual_client_example.py rename to examples/ai_horde_client/image/async_manual_client_example.py index 1ec77d7..5dbf620 100644 --- a/examples/ai_horde_client/async_aihorde_manual_client_example.py +++ b/examples/ai_horde_client/image/async_manual_client_example.py @@ -1,8 +1,11 @@ +import argparse import asyncio import time from pathlib import Path +import aiofiles import aiohttp +from loguru import logger from horde_sdk import ANON_API_KEY from horde_sdk.ai_horde_api import AIHordeAPIAsyncManualClient @@ -10,28 +13,27 @@ from horde_sdk.generic_api.apimodels import RequestErrorResponse -async def main() -> None: - print("Starting...") - +async def main(apikey: str = ANON_API_KEY) -> None: + logger.info("Starting...") async with aiohttp.ClientSession() as aiohttp_session: manual_client = AIHordeAPIAsyncManualClient(aiohttp_session=aiohttp_session) image_generate_async_request = ImageGenerateAsyncRequest( - apikey=ANON_API_KEY, + apikey=apikey, prompt="A cat in a hat", models=["Deliberate"], ) - print("Submitting image generation request...") + logger.info("Submitting image generation request...") response = await manual_client.submit_request( image_generate_async_request, image_generate_async_request.get_default_success_response_type(), ) if isinstance(response, RequestErrorResponse): - print(f"Error: {response.message}") + logger.error(f"Error: {response.message}") return - print("Image generation request submitted!") + logger.info("Image generation request submitted!") image_done = False start_time = time.time() @@ -42,7 +44,7 @@ async def main() -> None: while not image_done: current_time = time.time() if current_time - cycle_time > 20 or check_counter == 0: - print(f"{current_time - start_time} seconds elapsed ({check_counter} checks made)...") + logger.info(f"{current_time - start_time} seconds elapsed ({check_counter} checks made)...") cycle_time = current_time check_counter += 1 @@ -51,12 +53,13 @@ async def main() -> None: ) if isinstance(check_response, RequestErrorResponse): - print(f"Error: {check_response.message}") + logger.error(f"Error: {check_response.message}") return if check_response.done: - print("Image is done!") - print(f"{time.time() - cycle_time} seconds elapsed ({check_counter} checks made)...") + logger.info("Image generation done!") + logger.info(f"{check_response}") + logger.info(f"{time.time() - cycle_time} seconds elapsed ({check_counter} checks made)...") image_done = True break @@ -74,15 +77,15 @@ async def main() -> None: ) if isinstance(status_response, RequestErrorResponse): - print(f"Error: {status_response.message}") + logger.error(f"Error: {status_response.message}") return for image_gen in status_response.generations: - print("Image generation:") - print(f"ID: {image_gen.id_}") - print(f"URL: {image_gen.img}") - # debug(image_gen) - print("Downloading image...") + logger.info("Image generation:") + logger.info(f"ID: {image_gen.id_}") + logger.info(f"URL: {image_gen.img}") + + logger.info("Downloading image...") image_bytes = None # image_gen.img is a url, download it using aiohttp. @@ -90,19 +93,30 @@ async def main() -> None: image_bytes = await resp.read() if image_bytes is None: - print("Error: Could not download image.") + logger.error("Failed to download image.") return - example_path = Path("examples/requested_images") + example_path = Path("requested_images") example_path.mkdir(exist_ok=True, parents=True) filepath_to_write_to = example_path / f"{image_gen.id_}_man_async_example.webp" - with open(filepath_to_write_to, "wb") as image_file: - image_file.write(image_bytes) + async with aiofiles.open(filepath_to_write_to, "wb") as file: + await file.write(image_bytes) - print(f"Image downloaded to {filepath_to_write_to}!") + logger.info(f"Image saved to {filepath_to_write_to}") if __name__ == "__main__": - asyncio.run(main()) + parser = argparse.ArgumentParser(description="AI Horde API Manual Client Example") + parser.add_argument( + "--apikey", + "--api-key", + "-k", + type=str, + default=ANON_API_KEY, + help="The API key to use. Defaults to the anon key.", + ) + args = parser.parse_args() + + asyncio.run(main(args.apikey)) diff --git a/examples/ai_horde_client/image/async_simple_client_example.py b/examples/ai_horde_client/image/async_simple_client_example.py new file mode 100644 index 0000000..aa1dd05 --- /dev/null +++ b/examples/ai_horde_client/image/async_simple_client_example.py @@ -0,0 +1,165 @@ +import argparse +import asyncio +from pathlib import Path + +import aiohttp +from loguru import logger +from PIL.Image import Image + +from horde_sdk import ANON_API_KEY, RequestErrorResponse +from horde_sdk.ai_horde_api.ai_horde_clients import AIHordeAPIAsyncClientSession, AIHordeAPIAsyncSimpleClient +from horde_sdk.ai_horde_api.apimodels import ( + # ExtraTextEntry, + ImageGenerateAsyncRequest, + ImageGenerateStatusResponse, + ImageGeneration, + ImageGenerationInputPayload, + TIPayloadEntry, +) +from horde_sdk.ai_horde_api.fields import JobID + + +def save_image_and_json( + image: Image, + generation: ImageGeneration, + example_path: Path, + filename_base: str, +) -> None: + image.save(example_path / f"{filename_base}.webp") + logger.info(f"Image saved to {example_path / f'{filename_base}.webp'}") + + with open(example_path / f"{filename_base}.json", "w") as f: + f.write(generation.model_dump_json(indent=4)) + + logger.info(f"Response JSON saved to {example_path / f'{filename_base}.json'}") + + +async def async_one_image_generate_example( + simple_client: AIHordeAPIAsyncSimpleClient, + apikey: str = ANON_API_KEY, +) -> None: + single_generation_response: ImageGenerateStatusResponse + job_id: JobID + + single_generation_response, job_id = await simple_client.image_generate_request( + ImageGenerateAsyncRequest( + apikey=apikey, + prompt="A cat in a hat", + models=["Deliberate"], + params=ImageGenerationInputPayload( + height=512, + width=512, + tis=[ + TIPayloadEntry( + name="72437", + inject_ti="negprompt", + strength=1, + ), + ], + n=2, + ), + ), + ) + + if isinstance(single_generation_response, RequestErrorResponse): + logger.error(f"Error: {single_generation_response.message}") + else: + example_path = Path("requested_images") + example_path.mkdir(exist_ok=True, parents=True) + + download_image_tasks: list[asyncio.Task[tuple[Image, JobID]]] = [] + + for generation in single_generation_response.generations: + download_image_tasks.append(asyncio.create_task(simple_client.download_image_from_generation(generation))) + + downloaded_images: list[tuple[Image, JobID]] = await asyncio.gather(*download_image_tasks) + + for image, job_id in downloaded_images: + filename_base = f"{job_id}_simple_async_example" + save_image_and_json(image, generation, example_path, filename_base) + + +async def async_multi_image_generate_example( + simple_client: AIHordeAPIAsyncSimpleClient, + apikey: str = ANON_API_KEY, +) -> None: + multi_generation_responses: tuple[ + tuple[ImageGenerateStatusResponse, JobID], + tuple[ImageGenerateStatusResponse, JobID], + ] + multi_generation_responses = await asyncio.gather( + simple_client.image_generate_request( + ImageGenerateAsyncRequest( + apikey=apikey, + prompt="a blue stylized brain", + models=["Anything Diffusion"], + params=ImageGenerationInputPayload( + height=1024, + width=1024, + n=2, + ), + ), + ), + simple_client.image_generate_request( + ImageGenerateAsyncRequest( + apikey=apikey, + prompt="a red stylized brain", + models=["AlbedoBase XL (SDXL)"], + params=ImageGenerationInputPayload( + height=1024, + width=1024, + n=2, + ), + ), + ), + ) + + download_image_tasks: list[asyncio.Task[tuple[Image, JobID]]] = [] + + for generation_response, _job_id in multi_generation_responses: + if isinstance(generation_response, RequestErrorResponse): + logger.error(f"Error: {generation_response.message}") + else: + example_path = Path("requested_images") + example_path.mkdir(exist_ok=True, parents=True) + + for generation in generation_response.generations: + download_image_tasks.append( + asyncio.create_task(simple_client.download_image_from_generation(generation)), + ) + + downloaded_images: list[tuple[Image, JobID]] = await asyncio.gather(*download_image_tasks) + + for image, job_id in downloaded_images: + filename_base = f"{job_id}_simple_async_example" + save_image_and_json(image, generation, example_path, filename_base) + + +async def async_simple_generate_example(apikey: str = ANON_API_KEY) -> None: + aiohttp_session = aiohttp.ClientSession() + horde_client_session = AIHordeAPIAsyncClientSession(aiohttp_session) + + async with aiohttp_session, horde_client_session: + simple_client = AIHordeAPIAsyncSimpleClient( + aiohttp_session=aiohttp_session, + horde_client_session=horde_client_session, + ) + + # await async_one_image_generate_example(simple_client, apikey) + await async_multi_image_generate_example(simple_client, apikey) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="AI Horde API Manual Client Example") + parser.add_argument( + "--apikey", + "--api-key", + "-k", + type=str, + default=ANON_API_KEY, + help="The API key to use. Defaults to the anon key.", + ) + args = parser.parse_args() + + # Run the example. + asyncio.run(async_simple_generate_example(args.apikey)) diff --git a/examples/ai_horde_client/aihorde_dry_run_example.py b/examples/ai_horde_client/image/dry_run_example.py similarity index 100% rename from examples/ai_horde_client/aihorde_dry_run_example.py rename to examples/ai_horde_client/image/dry_run_example.py diff --git a/examples/ai_horde_client/aihorde_manual_client_example.py b/examples/ai_horde_client/image/manual_client_example.py similarity index 56% rename from examples/ai_horde_client/aihorde_manual_client_example.py rename to examples/ai_horde_client/image/manual_client_example.py index 9114cc0..2367a07 100644 --- a/examples/ai_horde_client/aihorde_manual_client_example.py +++ b/examples/ai_horde_client/image/manual_client_example.py @@ -1,6 +1,9 @@ +import argparse import time from pathlib import Path +from loguru import logger + from horde_sdk import ANON_API_KEY, RequestErrorResponse from horde_sdk.ai_horde_api import AIHordeAPIManualClient, download_image_from_generation from horde_sdk.ai_horde_api.apimodels import ( @@ -12,8 +15,13 @@ ) -def main(apikey: str = ANON_API_KEY) -> None: - print("Starting...") +def manual_image_generation(apikey: str = ANON_API_KEY) -> None: + # Please see the documentation for important information about the potential + # pitfalls of manually managing requests and responses. + # + # See the simple client examples for a more user-friendly way to interact with the API. + + logger.info("Starting...") manual_client = AIHordeAPIManualClient() @@ -23,7 +31,7 @@ def main(apikey: str = ANON_API_KEY) -> None: models=["Deliberate"], ) - print("Submitting image generation request...") + logger.info("Submitting image generation request...") response: ImageGenerateAsyncResponse | RequestErrorResponse response = manual_client.submit_request( @@ -32,10 +40,10 @@ def main(apikey: str = ANON_API_KEY) -> None: ) if isinstance(response, RequestErrorResponse): - print(f"Error: {response.message}") + logger.error(f"Error: {response.message}") return - print("Image generation request submitted!") + logger.info("Image generation request submitted!") image_done = False start_time = time.time() cycle_time = start_time @@ -45,7 +53,7 @@ def main(apikey: str = ANON_API_KEY) -> None: while not image_done: current_time = time.time() if current_time - cycle_time > 20 or check_counter == 0: - print(f"{current_time - start_time} seconds elapsed ({check_counter} checks made)...") + logger.info(f"{current_time - start_time} seconds elapsed ({check_counter} checks made)...") cycle_time = current_time check_counter += 1 @@ -61,12 +69,12 @@ def main(apikey: str = ANON_API_KEY) -> None: # ) if isinstance(check_response, RequestErrorResponse): - print(f"Error: {check_response.message}") + logger.error(f"Error: {check_response.message}") return if check_response.done: - print("Image is done!") - print(f"{time.time() - cycle_time} seconds elapsed ({check_counter} checks made)...") + logger.info("Image generation done!") + logger.info(f"{time.time() - cycle_time} seconds elapsed ({check_counter} checks made)...") image_done = True break @@ -85,26 +93,48 @@ def main(apikey: str = ANON_API_KEY) -> None: ) if isinstance(status_response, RequestErrorResponse): - print(f"Error: {status_response.message}") + logger.error(f"Error: {status_response.message}") return for image_gen in status_response.generations: - print("Image generation:") - print(f"ID: {image_gen.id_}") - print(f"URL: {image_gen.img}") + logger.info("Image generation:") + logger.info(f"ID: {image_gen.id_}") + logger.info(f"URL: {image_gen.img}") - print("Downloading image...") + logger.info("Downloading image...") image_pil = download_image_from_generation(image_gen) - example_path = Path("examples/requested_images") + example_path = Path("requested_images") example_path.mkdir(exist_ok=True, parents=True) - filepath_to_write_to = example_path / f"{image_gen.id_}_man_sync_example.webp" - image_pil.save(filepath_to_write_to) + filename_base = f"{image_gen.id_}_man_sync_example" + + image_file_path = example_path / f"{filename_base}.webp" + image_pil.save(image_file_path) + + logger.info(f"Image saved to {image_file_path}") - print(f"Image downloaded to {filepath_to_write_to}!") + with open(example_path / f"{filename_base}.json", "w") as f: + f.write(image_gen.model_dump_json(indent=4)) + + logger.info(f"Response JSON saved to {example_path / f'{filename_base}.json'}") if __name__ == "__main__": - main() + argParser = argparse.ArgumentParser() + + argParser.add_argument( + "-k", + "--apikey", + "--api-key", + "--api_key", + required=False, + default=ANON_API_KEY, + help="Your horde API key.", + ) + args = argParser.parse_args() + + api_key = args.apikey + + manual_image_generation(api_key) diff --git a/examples/ai_horde_client/aihorde_simple_client_example.py b/examples/ai_horde_client/image/simple_client_example.py similarity index 51% rename from examples/ai_horde_client/aihorde_simple_client_example.py rename to examples/ai_horde_client/image/simple_client_example.py index a95be2d..f952615 100644 --- a/examples/ai_horde_client/aihorde_simple_client_example.py +++ b/examples/ai_horde_client/image/simple_client_example.py @@ -2,6 +2,7 @@ from pathlib import Path from loguru import logger +from PIL.Image import Image from horde_sdk import ANON_API_KEY from horde_sdk.ai_horde_api import KNOWN_SAMPLERS @@ -11,8 +12,9 @@ from horde_sdk.ai_horde_api.apimodels import ( ImageGenerateAsyncRequest, ImageGenerationInputPayload, + # ExtraTextEntry, LorasPayloadEntry, - # TIPayloadEntry, + ImageGeneration, ) # isort: on @@ -27,12 +29,19 @@ def simple_generate_example(api_key: str = ANON_API_KEY) -> None: params=ImageGenerationInputPayload( sampler_name=KNOWN_SAMPLERS.k_euler, cfg_scale=4, - width=768, + width=512, height=512, - karras=False, + # karras=False, hires_fix=False, clip_skip=1, steps=30, + # workflow="qr_code", + # extra_texts=[ + # ExtraTextEntry( + # text="stablehorde.net", + # reference="qr_code", + # ), + # ], loras=[ LorasPayloadEntry( name="GlowingRunesAI", @@ -41,7 +50,7 @@ def simple_generate_example(api_key: str = ANON_API_KEY) -> None: inject_trigger="any", # Get a random color trigger ), ], - n=3, + # n=3, # Number of images to generate via batch generation ), prompt="a dark magical crystal, GlowingRunes_paleblue, 8K resolution###blurry, out of focus", models=["Deliberate"], @@ -51,30 +60,55 @@ def simple_generate_example(api_key: str = ANON_API_KEY) -> None: if len(status_response.generations) == 0: raise ValueError("No generations returned in the response.") - example_path = Path("examples/requested_images") + example_path = Path("requested_images") example_path.mkdir(exist_ok=True, parents=True) logger.info( f"{status_response.kudos} kudos were spent on this request for {len(status_response.generations)} images.", ) - if len(status_response.generations) == 1: - image = simple_client.download_image_from_generation(status_response.generations[0]) - image.save(example_path / f"{job_id}_simple_sync_example.webp") - else: - for i, generation in enumerate(status_response.generations): - image = simple_client.download_image_from_generation(generation) - image.save(example_path / f"{job_id}_{i}_simple_sync_example.webp") + def save_image_and_json( + image: Image, + generation: ImageGeneration, + example_path: Path, + filename_base: str, + ) -> None: + image.save(example_path / f"{filename_base}.webp") + logger.info(f"Image saved to {example_path / f'{filename_base}.webp'}") + + with open(example_path / f"{filename_base}.json", "w") as f: + f.write(generation.model_dump_json(indent=4)) + + logger.info(f"Response JSON saved to {example_path / f'{filename_base}.json'}") + + filename_base = f"{job_id}_simple_sync_example" + + for generation in status_response.generations: + logger.info("Image generation:") + logger.info(f"ID: {generation.id_}") + logger.info(f"URL: {generation.img}") + + logger.info("Downloading image...") + + image_pil = simple_client.download_image_from_generation(generation) + + save_image_and_json(image_pil, generation, example_path, filename_base) if __name__ == "__main__": - # Use arg parser to get the API key argParser = argparse.ArgumentParser() - argParser.add_argument("-k", "--apikey", required=False, default=ANON_API_KEY, help="Your horde API key.") + argParser.add_argument( + "-k", + "--apikey", + "--api-key", + "--api_key", + required=False, + default=ANON_API_KEY, + help="Your horde API key.", + ) args = argParser.parse_args() - api_key = args.api_key + api_key = args.apikey - while True: - simple_generate_example(api_key) + simple_generate_example(api_key) diff --git a/examples/ai_horde_client/text/async_text_generate.py b/examples/ai_horde_client/text/async_text_generate.py new file mode 100644 index 0000000..f551bcd --- /dev/null +++ b/examples/ai_horde_client/text/async_text_generate.py @@ -0,0 +1,118 @@ +import argparse +import asyncio +from pathlib import Path + +import aiofiles +import aiohttp +from loguru import logger + +from horde_sdk import ANON_API_KEY +from horde_sdk.ai_horde_api.ai_horde_clients import AIHordeAPIAsyncClientSession, AIHordeAPIAsyncSimpleClient +from horde_sdk.ai_horde_api.apimodels import ( + ModelGenerationInputKobold, + TextGenerateAsyncRequest, + TextGenerateStatusResponse, +) +from horde_sdk.ai_horde_api.exceptions import AIHordeRequestError +from horde_sdk.ai_horde_api.fields import JobID + + +def check_callback(response: TextGenerateStatusResponse) -> None: + """Callback function that can be passed to the text_generate_request method to get progress updates on the + request.""" + logger.info(f"Response: {response}") + + +async def async_text_generate_example( + simple_client: AIHordeAPIAsyncSimpleClient, + apikey: str = ANON_API_KEY, +) -> None: + + status_response: TextGenerateStatusResponse + job_id: JobID + + try: + status_response, job_id = await simple_client.text_generate_request( + TextGenerateAsyncRequest( + apikey=apikey, + prompt="Hello, world!", + models=["koboldcpp/LLaMA2-13B-Psyfighter2"], + params=ModelGenerationInputKobold( + # dynatemp_exponent=1.0, + # dynatemp_range=0.0, + # frmtadsnsp=False, + # frmtrmblln=False, + # frmtrmspch=False, + # frmttriminc=False, + max_context_length=1024, + max_length=80, + # min_p=0.0, + # n=1, + # rep_pen=1.0, + # rep_pen_range=0, + # rep_pen_slope=0.0, + # sampler_order=[1, 2, 3], + # singleline=False, + # smoothing_factor=0.0, + # stop_sequence=["stop1", "stop2"], + # temperature=0.0, + # tfs=0.0, + # top_a=0.0, + # top_k=0, + # top_p=0.001, + # typical=0.0, + # use_default_badwordsids=True, + ), + ), + # timeout=60*60*20, # time before cancelling the request; times out server side at 20 minutes by default + check_callback=check_callback, + ) + except AIHordeRequestError as e: + logger.error(f"Server error: {e}") + return + except RuntimeError as e: + logger.error(f"Runtime error: {e}") + return + + if len(status_response.generations) == 0: + raise ValueError("No generations returned in the response.") + + logger.debug(f"Job ID: {job_id}") + logger.debug(f"Response: {status_response}") + + text_generated = status_response.generations[0].text + + logger.debug(f"Generated Text: {text_generated}") + + example_path = Path("requested_text") + example_path.mkdir(exist_ok=True, parents=True) + + async with aiofiles.open(example_path / f"{job_id}_async_example.txt", "w") as f: + await f.write(status_response.model_dump_json(indent=4)) + + logger.info(f"Wrote full response JSON to {example_path / f'{job_id}_async_example.txt'}") + + +async def main(apikey: str) -> None: + aiohttp_session = aiohttp.ClientSession() + horde_client_session = AIHordeAPIAsyncClientSession( + aiohttp_session=aiohttp_session, + ) + async_client = AIHordeAPIAsyncSimpleClient(horde_client_session=horde_client_session) + + async with aiohttp_session, horde_client_session: + await async_text_generate_example(async_client, apikey) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--api-key", + "--apikey", + "-k", + type=str, + required=True, + ) + args = parser.parse_args() + + asyncio.run(main(args.api_key)) diff --git a/examples/ai_horde_client/text/text_generate.py b/examples/ai_horde_client/text/text_generate.py new file mode 100644 index 0000000..1391960 --- /dev/null +++ b/examples/ai_horde_client/text/text_generate.py @@ -0,0 +1,96 @@ +import argparse +from pathlib import Path + +from loguru import logger + +from horde_sdk import ANON_API_KEY +from horde_sdk.ai_horde_api import JobID +from horde_sdk.ai_horde_api.ai_horde_clients import AIHordeAPISimpleClient +from horde_sdk.ai_horde_api.apimodels import ( + ModelGenerationInputKobold, + TextGenerateAsyncRequest, + TextGenerateStatusResponse, +) + + +def check_callback(response: TextGenerateStatusResponse) -> None: + """Callback function that can be passed to the text_generate_request method to get progress updates on the + request.""" + logger.info(f"Response: {response}") + + +def simple_generate_example(api_key: str = ANON_API_KEY) -> None: + simple_client = AIHordeAPISimpleClient() + + status_response: TextGenerateStatusResponse + job_id: JobID + + status_response, job_id = simple_client.text_generate_request( + TextGenerateAsyncRequest( + apikey=api_key, + prompt="Hello, world!", + models=[ + "koboldcpp/LLaMA2-13B-Psyfighter2", + ], + params=ModelGenerationInputKobold( + # dynatemp_exponent=1.0, + # dynatemp_range=0.0, + # frmtadsnsp=False, + # frmtrmblln=False, + # frmtrmspch=False, + # frmttriminc=False, + max_context_length=1024, + max_length=80, + # min_p=0.0, + # n=1, + # rep_pen=1.0, + # rep_pen_range=0, + # rep_pen_slope=0.0, + # sampler_order=[1, 2, 3], + # singleline=False, + # smoothing_factor=0.0, + # stop_sequence=["stop1", "stop2"], + # temperature=0.0, + # tfs=0.0, + # top_a=0.0, + # top_k=0, + # top_p=0.001, + # typical=0.0, + # use_default_badwordsids=True, + ), + ), + # timeout=60*60*20, # time before cancelling the request; times out server side at 20 minutes by default + check_callback=check_callback, + ) + + if len(status_response.generations) == 0: + raise ValueError("No generations returned in the response.") + + logger.debug(f"Job ID: {job_id}") + logger.debug(f"Response: {status_response}") + + text_generated = status_response.generations[0].text + + logger.debug(f"Generated Text: {text_generated}") + + example_path = Path("requested_text") + example_path.mkdir(exist_ok=True, parents=True) + + with open(example_path / f"{job_id}_simple_sync_example.txt", "w") as f: + f.write(status_response.model_dump_json(indent=4)) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Simple text generation example.") + parser.add_argument( + "--apikey", + "--api-key", + "-k", + type=str, + default=ANON_API_KEY, + help="The API key to use for the request.", + ) + + args = parser.parse_args() + + simple_generate_example(args.apikey) diff --git a/examples/ai_horde_client/workers.py b/examples/ai_horde_client/workers.py new file mode 100644 index 0000000..5548dba --- /dev/null +++ b/examples/ai_horde_client/workers.py @@ -0,0 +1,153 @@ +import argparse + +from loguru import logger + +from horde_sdk import ANON_API_KEY +from horde_sdk.ai_horde_api.ai_horde_clients import AIHordeAPISimpleClient +from horde_sdk.ai_horde_api.apimodels import ( + AllWorkersDetailsResponse, + ModifyWorkerRequest, + ModifyWorkerResponse, + SingleWorkerDetailsResponse, +) + + +def all_workers(api_key: str, simple_client: AIHordeAPISimpleClient, filename: str) -> None: + all_workers_response: AllWorkersDetailsResponse + + all_workers_response = simple_client.workers_all_details() + + if all_workers_response is None: + raise ValueError("No workers returned in the response.") + + logger.info(f"Number of workers: {len(all_workers_response)}") + + with open(filename, "w") as f: + f.write(all_workers_response.model_dump_json(indent=4)) + + logger.info(f"Workers written to {filename}") + + +def single_worker(api_key: str, simple_client: AIHordeAPISimpleClient, worker_id: str, filename: str) -> None: + single_worker_response: SingleWorkerDetailsResponse + + single_worker_response = simple_client.worker_details(worker_id=worker_id) + + if single_worker_response is None: + raise ValueError("No worker returned in the response.") + + logger.info(f"Worker: {single_worker_response}") + + with open(filename, "w") as f: + f.write(f"{single_worker_response}\n") + + logger.info(f"Worker details written to {filename}") + + +def set_maintenance_mode( + api_key: str, + simple_client: AIHordeAPISimpleClient, + worker_id: str, + maintenance_mode: bool, +) -> None: + modify_worker_request = ModifyWorkerRequest( + apikey=api_key, + worker_id=worker_id, + maintenance=maintenance_mode, + ) + + modify_worker_response: ModifyWorkerResponse + + modify_worker_response = simple_client.worker_modify(modify_worker_request) + + if modify_worker_response is None: + raise ValueError("No worker returned in the response.") + + logger.info(f"Worker: {modify_worker_response}") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Simple text generation example.") + parser.add_argument( + "--apikey", + "--api-key", + "--api_key", + "-k", + type=str, + default=ANON_API_KEY, + help="The API key to use for the request.", + ) + parser.add_argument( + "--filename", + "-f", + type=str, + default="workers.txt", + help="The filename to write the workers to.", + ) + + # Either all or worker_id must be specified. + group = parser.add_mutually_exclusive_group(required=True) + + group.add_argument( + "--all", + action="store_true", + help="Get details for all workers.", + ) + + group.add_argument( + "--worker_id", + "-w", + type=str, + help="The worker ID to get details for.", + ) + + group2 = parser.add_mutually_exclusive_group() + group2.add_argument( + "--maintenance-mode-on", + "-m", + action="store_true", + help="Turn on maintenance mode.", + ) + group2.add_argument( + "--maintenance-mode-off", + "-M", + action="store_true", + help="Turn off maintenance mode.", + ) + + args = parser.parse_args() + + # If `all` is specified and a maintenance mode flag is specified, raise an error. + if args.all and (args.maintenance_mode_on or args.maintenance_mode_off): + raise ValueError("Cannot specify maintenance mode with `all`.") + + simple_client = AIHordeAPISimpleClient() + + if args.all: + all_workers( + api_key=args.apikey, + simple_client=simple_client, + filename=args.filename, + ) + elif args.worker_id: + if args.maintenance_mode_on: + set_maintenance_mode( + api_key=args.apikey, + simple_client=simple_client, + worker_id=args.worker_id, + maintenance_mode=True, + ) + elif args.maintenance_mode_off: + set_maintenance_mode( + api_key=args.apikey, + simple_client=simple_client, + worker_id=args.worker_id, + maintenance_mode=False, + ) + else: + single_worker( + api_key=args.apikey, + simple_client=simple_client, + worker_id=args.worker_id, + filename=args.filename, + ) diff --git a/horde_sdk/ai_horde_api/ai_horde_clients.py b/horde_sdk/ai_horde_api/ai_horde_clients.py index 0975bcf..4babb34 100644 --- a/horde_sdk/ai_horde_api/ai_horde_clients.py +++ b/horde_sdk/ai_horde_api/ai_horde_clients.py @@ -4,27 +4,34 @@ import asyncio import base64 -import contextlib import inspect import io import time import urllib.parse from abc import ABC, abstractmethod from collections.abc import Callable, Coroutine -from enum import auto from typing import cast import aiohttp import PIL.Image import requests from loguru import logger -from strenum import StrEnum from horde_sdk import COMPLETE_LOGGER_LABEL, PROGRESS_LOGGER_LABEL from horde_sdk.ai_horde_api.apimodels import ( + AIHordeHeartbeatRequest, + AIHordeHeartbeatResponse, AlchemyAsyncRequest, AlchemyStatusResponse, + AllWorkersDetailsRequest, + AllWorkersDetailsResponse, DeleteImageGenerateRequest, + DeleteWorkerRequest, + DeleteWorkerResponse, + HordeStatusModelsAllRequest, + HordeStatusModelsAllResponse, + HordeStatusModelsSingleRequest, + HordeStatusModelsSingleResponse, ImageGenerateAsyncDryRunResponse, ImageGenerateAsyncRequest, ImageGenerateCheckRequest, @@ -32,13 +39,30 @@ ImageGenerateStatusRequest, ImageGenerateStatusResponse, ImageGeneration, + ImageStatsModelsRequest, + ImageStatsModelsResponse, + ImageStatsModelsTotalRequest, + ImageStatsModelsTotalResponse, + ModifyWorkerRequest, + ModifyWorkerResponse, + NewsRequest, + NewsResponse, ResponseGenerationProgressCombinedMixin, + SingleWorkerDetailsRequest, + SingleWorkerDetailsResponse, + TextGenerateAsyncDryRunResponse, + TextGenerateAsyncRequest, + TextGenerateStatusResponse, + TextStatsModelResponse, + TextStatsModelsRequest, + TextStatsModelsTotalRequest, + TextStatsModelsTotalResponse, ) from horde_sdk.ai_horde_api.apimodels.base import BaseAIHordeRequest, JobRequestMixin -from horde_sdk.ai_horde_api.consts import GENERATION_MAX_LIFE +from horde_sdk.ai_horde_api.consts import GENERATION_MAX_LIFE, PROGRESS_STATE from horde_sdk.ai_horde_api.endpoints import AI_HORDE_BASE_URL from horde_sdk.ai_horde_api.exceptions import AIHordeImageValidationError, AIHordeRequestError -from horde_sdk.ai_horde_api.fields import JobID +from horde_sdk.ai_horde_api.fields import JobID, WorkerID from horde_sdk.ai_horde_api.metadata import AIHordePathData, AIHordeQueryData from horde_sdk.generic_api.apimodels import ( ContainsMessageResponseMixin, @@ -203,7 +227,6 @@ def get_generate_check( Not to be confused with `get_generate_status` which returns the images too. Args: - apikey (str): The API key to use for authentication. job_id (JobID | str): The ID of the request to check. Returns: @@ -228,7 +251,6 @@ def get_generate_status( Use `get_generate_check` instead to check the status of a pending image request. Args: - apikey (str): The API key to use for authentication. job_id (JobID): The ID of the request to check. Returns: @@ -285,7 +307,6 @@ async def get_generate_check( Not to be confused with `get_generate_status` which returns the images too. Args: - apikey (str): The API key to use for authentication. job_id (JobID | str): The ID of the request to check. Returns: @@ -310,7 +331,6 @@ async def get_generate_status( Use `get_generate_check` instead to check the status of a pending image request. Args: - apikey (str): The API key to use for authentication. job_id (JobID): The ID of the request to check. Returns: @@ -383,22 +403,21 @@ def __init__( ) -class PROGRESS_STATE(StrEnum): - waiting = auto() - finished = auto() - timed_out = auto() - - class BaseAIHordeSimpleClient(ABC): """The base class for the most straightforward clients which interact with the AI-Horde API.""" reasonable_minimum_timeout = 20 - def validate_timeout(self, timeout: int, log_message: bool = False) -> int: + def validate_timeout( + self, + timeout: int, + log_message: bool = False, + ) -> int: """Check if a timeout is reasonable. Args: timeout (int): The timeout to check. + log_message (bool, optional): Whether to log a message if the timeout is too short. Defaults to False. Returns: bool: True if the timeout is reasonable, False otherwise. @@ -514,7 +533,6 @@ def _handle_progress_response( Typically, this is a response from a `check` or `status` request. """ - # Check for error responses if isinstance(check_response, RequestErrorResponse): raise AIHordeRequestError(check_response) @@ -565,7 +583,7 @@ class AIHordeAPISimpleClient(BaseAIHordeSimpleClient): """A simple client for the AI-Horde API. This is the easiest way to get started.""" def download_image_from_generation(self, generation: ImageGeneration) -> PIL.Image.Image: - """Synchronously convert from base64 or download an image from a response. + """Convert from base64 or download an image from a response synchronously. Args: generation (ImageGeneration): The image generation to convert. @@ -582,7 +600,7 @@ def download_image_from_generation(self, generation: ImageGeneration) -> PIL.Ima return download_image_from_generation(generation) def download_image_from_url(self, url: str) -> PIL.Image.Image: - """Synchronously download an image from a URL. + """Download an image from a URL synchronously. Args: url (str): The URL to download the image from. @@ -614,11 +632,13 @@ def _do_request_with_check( number_of_responses (int, optional): The number of responses to expect. Defaults to 1. timeout (int, optional): The number of seconds to wait before aborting. returns any completed images at the end of the timeout. Defaults to DEFAULT_GENERATION_TIMEOUT. + check_callback (Callable[[HordeResponse], None], optional): A callback to call with the check response. + check_callback_type (type[ResponseWithProgressMixin | ResponseGenerationProgressCombinedMixin], optional): + The type of response expected by the callback. Returns: tuple[HordeResponse, JobID]: The final response and the corresponding job ID. """ - if check_callback is not None and len(inspect.getfullargspec(check_callback).args) == 0: raise ValueError("Callback must take at least one argument") @@ -707,6 +727,26 @@ def _do_request_with_check( logger.error(f"Request: {api_request.log_safe_model_dump()}") raise RuntimeError("Something went wrong with the request") + def heartbeat_request( + self, + ) -> AIHordeHeartbeatResponse: + """Submit a heartbeat request to the AI-Horde API. + + Returns: + AIHordeHeartbeatResponse: The response from the API. + """ + api_request = AIHordeHeartbeatRequest() + + with AIHordeAPIClientSession() as horde_session: + api_response = horde_session.submit_request(api_request, api_request.get_default_success_response_type()) + + if isinstance(api_response, RequestErrorResponse): + raise AIHordeRequestError(api_response) + + return api_response + + raise RuntimeError("Something went wrong with the request") + def image_generate_request( self, image_gen_request: ImageGenerateAsyncRequest, @@ -718,7 +758,9 @@ def image_generate_request( Args: image_gen_request (ImageGenerateAsyncRequest): The request to submit. timeout (int, optional): The number of seconds to wait before aborting. - returns any completed images at the end of the timeout. Defaults to -1. + returns any completed images at the end of the timeout. Defaults to -1. + check_callback (Callable[[ImageGenerateCheckResponse], None], optional): A callback to call with the check + response. Returns: list[ImageGeneration]: The completed images. @@ -728,7 +770,6 @@ def image_generate_request( binascii.Error: If the image couldn't be parsed from base 64. RuntimeError: If the image couldn't be downloaded or parsed for any other reason. """ - # `cast()` returns the value unchanged but tells coerces the type for mypy's benefit # Static type checkers can't see that `_do_request_with_check` is reliably passing an object of the correct # type, but we are guaranteed that it is due to the `ImageGenerateCheckResponse` type being passed as an arg. @@ -759,6 +800,14 @@ def image_generate_request_dry_run( self, image_gen_request: ImageGenerateAsyncRequest, ) -> ImageGenerateAsyncDryRunResponse: + """Submit a dry run image generation, which will return the kudos cost without actually generating images. + + Args: + image_gen_request (ImageGenerateAsyncRequest): The request to submit. + + Returns: + ImageGenerateAsyncDryRunResponse: The response from the API. + """ if not image_gen_request.dry_run: raise RuntimeError("Dry run request must have dry_run set to True") @@ -786,6 +835,10 @@ def alchemy_request( Args: alchemy_request (AlchemyAsyncRequest): The request to submit. + timeout (int, optional): The number of seconds to wait before aborting. + returns any completed images at the end of the timeout. Defaults to -1. + check_callback (Callable[[AlchemyStatusResponse], None], optional): A callback to call with the check + response. Returns: AlchemyStatusResponse: The completed alchemy request(s). @@ -819,23 +872,342 @@ def alchemy_request( return (response, job_id) + def text_generate_request( + self, + text_gen_request: TextGenerateAsyncRequest, + timeout: int = GENERATION_MAX_LIFE, + check_callback: Callable[[TextGenerateStatusResponse], None] | None = None, + ) -> tuple[TextGenerateStatusResponse, JobID]: + """Submit a text generation request to the AI-Horde API, and wait for it to complete. + + Args: + text_gen_request (TextGenerateAsyncRequest): The request to submit. + timeout (int, optional): The number of seconds to wait before aborting. + returns any completed images at the end of the timeout. Defaults to -1. + check_callback (Callable[[TextGenerateStatusResponse], None], optional): A callback to call with the check + response. + + Returns: + TextGenerateStatusResponse: The completed text generation request. + + Raises: + AIHordeRequestError: If the request failed. The error response is included in the exception. + """ + # `cast()` returns the value unchanged but tells coerces the type for mypy's benefit + # Static type checkers can't see that `_do_request_with_check` is reliably passing an object of the correct + # type, but we are guaranteed that it is due to the `ImageGenerateCheckResponse` type being passed as an arg. + generic_callback = cast(Callable[[HordeResponse], None], check_callback) + + timeout = self.validate_timeout(timeout, log_message=True) + + num_gens_requested = 1 + + if text_gen_request.params and text_gen_request.params.n: + num_gens_requested = text_gen_request.params.n + + logger.log(PROGRESS_LOGGER_LABEL, f"Requesting {num_gens_requested} text generation.") + logger.debug(f"Request: {text_gen_request}") + + response, job_id = self._do_request_with_check( + text_gen_request, + number_of_responses=1, + timeout=timeout, + check_callback=generic_callback, + check_callback_type=TextGenerateStatusResponse, + ) + + if isinstance(response, RequestErrorResponse): + raise AIHordeRequestError(response) + + if not isinstance(response, TextGenerateStatusResponse): # pragma: no cover + raise RuntimeError("Response was not a TextGenerateStatusResponse") + + return (response, job_id) + + def text_generate_request_dry_run( + self, + text_gen_request: TextGenerateAsyncRequest, + ) -> TextGenerateAsyncDryRunResponse: + """Submit a dry run text generation, which will return the kudos cost without actually generating text. + + Args: + text_gen_request (TextGenerateAsyncRequest): The request to submit. + + Returns: + TextGenerateAsyncDryRunResponse: The response from the API. + """ + if not text_gen_request.dry_run: + raise RuntimeError("Dry run request must have dry_run set to True") + + logger.log(PROGRESS_LOGGER_LABEL, "Requesting dry run text generation.") + logger.debug(f"Request: {text_gen_request}") + + with AIHordeAPIClientSession() as horde_session: + dry_run_response = horde_session.submit_request(text_gen_request, TextGenerateAsyncDryRunResponse) + + if isinstance(dry_run_response, RequestErrorResponse): # pragma: no cover + logger.error(f"Error response received: {dry_run_response.message}") + raise AIHordeRequestError(dry_run_response) + + return dry_run_response + + raise RuntimeError("Something went wrong with the request") + + def workers_all_details( + self, + ) -> AllWorkersDetailsResponse: + """Get all the details for all workers. + + Returns: + WorkersAllDetailsResponse: The response from the API. + """ + with AIHordeAPIClientSession() as horde_session: + response = horde_session.submit_request(AllWorkersDetailsRequest(), AllWorkersDetailsResponse) + + if isinstance(response, RequestErrorResponse): + raise AIHordeRequestError(response) + + return response + + raise RuntimeError("Something went wrong with the request") + + def worker_details( + self, + worker_id: WorkerID | str, + ) -> SingleWorkerDetailsResponse: + """Get the details for a worker. + + Args: + worker_id (WorkerID): The ID of the worker to get the details for. + + Returns: + SingleWorkerDetailsResponse: The response from the API. + """ + with AIHordeAPIClientSession() as horde_session: + response = horde_session.submit_request( + SingleWorkerDetailsRequest(worker_id=worker_id), + SingleWorkerDetailsResponse, + ) + + if isinstance(response, RequestErrorResponse): + raise AIHordeRequestError(response) + + return response + + raise RuntimeError("Something went wrong with the request") + + def worker_modify( + self, + modify_worker_request: ModifyWorkerRequest, + ) -> ModifyWorkerResponse: + """Update a worker. + + Args: + worker_id (WorkerID): The ID of the worker to update. + modify_worker_request (ModifyWorkerRequest): The request to update the worker. + + Returns: + ModifyWorkerResponse: The response from the API. + """ + with AIHordeAPIClientSession() as horde_session: + response = horde_session.submit_request( + modify_worker_request, + ModifyWorkerResponse, + ) + + if isinstance(response, RequestErrorResponse): + raise AIHordeRequestError(response) + + return response + + raise RuntimeError("Something went wrong with the request") + + def worker_delete( + self, + worker_id: WorkerID | str, + ) -> DeleteWorkerResponse: + """Delete a worker. + + Args: + worker_id (WorkerID): The ID of the worker to delete. + + Returns: + DeleteWorkerResponse: The response from the API. + """ + with AIHordeAPIClientSession() as horde_session: + response = horde_session.submit_request(DeleteWorkerRequest(worker_id=worker_id), DeleteWorkerResponse) + + if isinstance(response, RequestErrorResponse): + raise AIHordeRequestError(response) + + return response + + raise RuntimeError("Something went wrong with the request") + + def image_stats_totals( + self, + ) -> ImageStatsModelsTotalResponse: + """Get the total stats for images. + + Returns: + ImageStatsTotalsResponse: The response from the API. + """ + with AIHordeAPIClientSession() as horde_session: + response = horde_session.submit_request(ImageStatsModelsTotalRequest(), ImageStatsModelsTotalResponse) + + if isinstance(response, RequestErrorResponse): + raise AIHordeRequestError(response) + + return response + + raise RuntimeError("Something went wrong with the request") + + def image_stats_models( + self, + ) -> ImageStatsModelsResponse: + """Get the stats for images by model. + + Returns: + ImageStatsModelsResponse: The response from the API. + """ + with AIHordeAPIClientSession() as horde_session: + response = horde_session.submit_request(ImageStatsModelsRequest(), ImageStatsModelsResponse) + + if isinstance(response, RequestErrorResponse): + raise AIHordeRequestError(response) + + return response + + raise RuntimeError("Something went wrong with the request") + + def text_stats_totals( + self, + ) -> TextStatsModelsTotalResponse: + """Get the total stats for text. + + Returns: + TextStatsTotalsResponse: The response from the API. + """ + with AIHordeAPIClientSession() as horde_session: + response = horde_session.submit_request(TextStatsModelsTotalRequest(), TextStatsModelsTotalResponse) + + if isinstance(response, RequestErrorResponse): + raise AIHordeRequestError(response) + + return response + + raise RuntimeError("Something went wrong with the request") + + def text_stats_models( + self, + ) -> TextStatsModelResponse: + """Get the stats for text by model. + + Returns: + TextModelStatsResponse: The response from the API. + """ + with AIHordeAPIClientSession() as horde_session: + response = horde_session.submit_request(TextStatsModelsRequest(), TextStatsModelResponse) + + if isinstance(response, RequestErrorResponse): + raise AIHordeRequestError(response) + + return response + + raise RuntimeError("Something went wrong with the request") + + def image_status_models_all( + self, + ) -> HordeStatusModelsAllResponse: + """Get the status of all image models. + + Returns: + ImageStatusModelsAllResponse: The response from the API. + """ + with AIHordeAPIClientSession() as horde_session: + response = horde_session.submit_request(HordeStatusModelsAllRequest(), HordeStatusModelsAllResponse) + + if isinstance(response, RequestErrorResponse): + raise AIHordeRequestError(response) + + return response + + raise RuntimeError("Something went wrong with the request") + + def image_status_models_single( + self, + model_name: str, + ) -> HordeStatusModelsSingleResponse: + """Get the status of a single image model. + + Args: + model_name (str): The name of the model to get the status of. + + Returns: + ImageStatusModelsSingleResponse: The response from the API. + """ + with AIHordeAPIClientSession() as horde_session: + response = horde_session.submit_request( + HordeStatusModelsSingleRequest(model_name=model_name), + HordeStatusModelsSingleResponse, + ) + + if isinstance(response, RequestErrorResponse): + raise AIHordeRequestError(response) + + return response + + raise RuntimeError("Something went wrong with the request") + + def get_news( + self, + ) -> NewsResponse: + """Get the latest news from the AI-Horde API. + + Returns: + NewsResponse: The response from the API. + """ + with AIHordeAPIClientSession() as horde_session: + response = horde_session.submit_request(NewsRequest(), NewsResponse) + + if isinstance(response, RequestErrorResponse): + raise AIHordeRequestError(response) + + return response + + raise RuntimeError("Something went wrong with the request") + class AIHordeAPIAsyncSimpleClient(BaseAIHordeSimpleClient): """An asyncio based simple client for the AI-Horde API. Start with this class if you want asyncio capabilities..""" - _horde_client_session: AIHordeAPIAsyncClientSession | None + _horde_client_session: AIHordeAPIAsyncClientSession def __init__( self, - aiohttp_session: aiohttp.ClientSession | None, + aiohttp_session: aiohttp.ClientSession | None = None, horde_client_session: AIHordeAPIAsyncClientSession | None = None, ) -> None: """Create a new instance of the AIHordeAPISimpleClient.""" - if aiohttp_session is not None and horde_client_session is not None: - raise ValueError("Only one of aiohttp_session or horde_client_session can be provided") + super().__init__() + + if aiohttp_session is None and horde_client_session is None: + raise RuntimeError("No aiohttp session provided but an async request was made.") - self._aiohttp_session = aiohttp_session - self._horde_client_session = horde_client_session + if ( + aiohttp_session is not None + and horde_client_session is not None + and horde_client_session._aiohttp_session != aiohttp_session + ): + raise RuntimeError("The aiohttp session provided does not match the session in the client session.") + + if aiohttp_session is not None and horde_client_session is None: + logger.info("Creating a new AIHordeAPIAsyncClientSession with the provided aiohttp session.") + self._aiohttp_session = aiohttp_session + self._horde_client_session = AIHordeAPIAsyncClientSession(aiohttp_session) + elif horde_client_session is not None: + self._horde_client_session = horde_client_session + self._aiohttp_session = horde_client_session._aiohttp_session async def download_image_from_generation(self, generation: ImageGeneration) -> tuple[PIL.Image.Image, JobID]: """Asynchronously convert from base64 or download an image from a response. @@ -924,6 +1296,9 @@ async def _do_request_with_check( number_of_responses (int, optional): The number of responses to expect. Defaults to 1. timeout (int, optional): The number of seconds to wait before aborting. returns any completed images at the end of the timeout. Defaults to GENERATION_MAX_LIFE. + check_callback (Callable[[HordeResponse], None], optional): A callback to call with the check response. + check_callback_type (type[ResponseWithProgressMixin | ResponseGenerationProgressCombinedMixin], optional): + The type of response expected by the callback. Returns: tuple[HordeResponse, JobID]: The final response and the corresponding job ID. @@ -931,113 +1306,116 @@ async def _do_request_with_check( Raises: AIHordeRequestError: If the request failed. The error response is included in the exception. """ - if check_callback is not None and len(inspect.getfullargspec(check_callback).args) == 0: raise ValueError("Callback must take at least one argument") - context: contextlib.nullcontext[None] | AIHordeAPIAsyncClientSession - ai_horde_session: AIHordeAPIAsyncClientSession + # This session class will cleanup incomplete requests in the event of an exception - if self._horde_client_session is not None: - # Use a dummy context manager to keep the type checker happy - context = contextlib.nullcontext() - ai_horde_session = self._horde_client_session - elif self._aiohttp_session is not None: - ai_horde_session = AIHordeAPIAsyncClientSession(self._aiohttp_session) - context = ai_horde_session - else: - raise RuntimeError("No aiohttp session or AIHordeAPIAsyncClientSession provided") + # Submit the initial request + logger.debug( + f"Submitting request: {api_request.log_safe_model_dump()} with timeout {timeout}", + ) + initial_response = await self._horde_client_session.submit_request( + api_request=api_request, + expected_response_type=api_request.get_default_success_response_type(), + ) - # This session class will cleanup incomplete requests in the event of an exception - async with context: - # Submit the initial request - logger.debug( - f"Submitting request: {api_request.log_safe_model_dump()} with timeout {timeout}", + # Handle the initial response to get the check request, job ID, and follow-up data + check_request, job_id, follow_up_data = self._handle_initial_response(initial_response) + + # There is a rate limit, so we start a clock to keep track of how long we've been waiting + start_time = time.time() + check_count = 0 + check_response: HordeResponse + + # Wait for the image generation to complete, checking every 4 seconds + while True: + check_count += 1 + + # Submit the check request + check_response = await self._horde_client_session.submit_request( + api_request=check_request, + expected_response_type=check_request.get_default_success_response_type(), ) - initial_response = await ai_horde_session.submit_request( - api_request=api_request, - expected_response_type=api_request.get_default_success_response_type(), + + # Handle the progress response to determine if the job is finished or timed out + progress_state = self._handle_progress_response( + check_request, + check_response, + job_id, + check_count=check_count, + number_of_responses=number_of_responses, + start_time=start_time, + timeout=timeout, + check_callback=check_callback, + check_callback_type=check_callback_type, ) - # Handle the initial response to get the check request, job ID, and follow-up data - check_request, job_id, follow_up_data = self._handle_initial_response(initial_response) + if progress_state == PROGRESS_STATE.finished or progress_state == PROGRESS_STATE.timed_out: + break - # There is a rate limit, so we start a clock to keep track of how long we've been waiting - start_time = time.time() - check_count = 0 - check_response: HordeResponse + # Wait for 4 seconds before checking again + await asyncio.sleep(4) - # Wait for the image generation to complete, checking every 4 seconds - while True: - check_count += 1 + # This is for type safety, but should never happen in production + if not isinstance(check_response, ResponseWithProgressMixin): # pragma: no cover + raise RuntimeError(f"Response did not have progress: {check_response}") - # Submit the check request - check_response = await ai_horde_session.submit_request( - api_request=check_request, - expected_response_type=check_request.get_default_success_response_type(), - ) + # Get the finalize request type from the check response + finalize_request_type = check_response.get_finalize_success_request_type() - # Handle the progress response to determine if the job is finished or timed out - progress_state = self._handle_progress_response( - check_request, - check_response, - job_id, - check_count=check_count, - number_of_responses=number_of_responses, - start_time=start_time, - timeout=timeout, - check_callback=check_callback, - check_callback_type=check_callback_type, + # Set the final response to the check response by default + final_response: HordeResponse = check_response + + # If there is a finalize request type, submit the finalize request + if finalize_request_type: + finalize_request = finalize_request_type.model_validate(follow_up_data[0]) + + # This is for type safety, but should never happen in production + if not isinstance(finalize_request, JobRequestMixin): # pragma: no cover + logger.error( + f"Finalize request type is not a JobRequestMixin: {finalize_request.log_safe_model_dump()}", + ) + raise RuntimeError( + f"Finalize request type is not a JobRequestMixin: {finalize_request.log_safe_model_dump()}", ) - if progress_state == PROGRESS_STATE.finished or progress_state == PROGRESS_STATE.timed_out: - break + final_response = await self._horde_client_session.submit_request( + api_request=finalize_request, + expected_response_type=finalize_request.get_default_success_response_type(), + ) - # Wait for 4 seconds before checking again - await asyncio.sleep(4) + if isinstance(final_response, RequestErrorResponse): + raise AIHordeRequestError(final_response) - # This is for type safety, but should never happen in production - if not isinstance(check_response, ResponseWithProgressMixin): # pragma: no cover - raise RuntimeError(f"Response did not have progress: {check_response}") + # Log a message indicating that the request is complete + logger.log(COMPLETE_LOGGER_LABEL, f"Request complete: {job_id}") - # Get the finalize request type from the check response - finalize_request_type = check_response.get_finalize_success_request_type() + # Return the final response and job ID + return (final_response, job_id) - # Set the final response to the check response by default - final_response: HordeResponse = check_response + async def heartbeat_request( + self, + ) -> AIHordeHeartbeatResponse: + """Submit a heartbeat request to the AI-Horde API. - # If there is a finalize request type, submit the finalize request - if finalize_request_type: - finalize_request = finalize_request_type.model_validate(follow_up_data[0]) - - # This is for type safety, but should never happen in production - if not isinstance(finalize_request, JobRequestMixin): # pragma: no cover - logger.error( - f"Finalize request type is not a JobRequestMixin: {finalize_request.log_safe_model_dump()}", - ) - raise RuntimeError( - f"Finalize request type is not a JobRequestMixin: {finalize_request.log_safe_model_dump()}", - ) - - final_response = await ai_horde_session.submit_request( - api_request=finalize_request, - expected_response_type=finalize_request.get_default_success_response_type(), - ) + Returns: + AIHordeHeartbeatResponse: The response from the API. + """ + api_request = AIHordeHeartbeatRequest() - if isinstance(final_response, RequestErrorResponse): - raise AIHordeRequestError(final_response) + if self._horde_client_session is not None: + api_response = await self._horde_client_session.submit_request( + api_request, + api_request.get_default_success_response_type(), + ) - # Log a message indicating that the request is complete - logger.log(COMPLETE_LOGGER_LABEL, f"Request complete: {job_id}") + if isinstance(api_response, RequestErrorResponse): + raise AIHordeRequestError(api_response) - # Return the final response and job ID - return (final_response, job_id) + return api_response - # If there we get to this point, something catastrophic has happened - # Log an error and raise a CancelledError to kill the coroutine task - logger.error("Something went wrong with the request (was it cancelled?):") - logger.error(f"Request: {api_request.log_safe_model_dump()}") - raise asyncio.CancelledError("Something went wrong with the request") + raise RuntimeError("No AIHordeAPIAsyncClientSession provided") async def image_generate_request( self, @@ -1054,8 +1432,12 @@ async def image_generate_request( Args: image_gen_request (ImageGenerateAsyncRequest): The request to submit. timeout (int, optional): The number of seconds to wait before aborting. - returns any completed images at the end of the timeout. Any value 0 or less will wait indefinitely. - Defaults to -1. + returns any completed images at the end of the timeout. Any value 0 or less will wait indefinitely. + Defaults to -1. + check_callback (Callable[[ImageGenerateCheckResponse], None], optional): A callback to call with the check + response. + delay (float, optional): The number of seconds to wait before checking the status. Defaults to 0.0. + Returns: tuple[ImageGenerateStatusResponse, JobID]: The final status response and the corresponding job ID. @@ -1089,6 +1471,38 @@ async def image_generate_request( return (final_response, job_id) + async def image_generate_request_dry_run( + self, + image_gen_request: ImageGenerateAsyncRequest, + ) -> ImageGenerateAsyncDryRunResponse: + """Submit a dry run image generation, which will return the kudos cost without actually generating images. + + Args: + image_gen_request (ImageGenerateAsyncRequest): The request to submit. + + Returns: + ImageGenerateAsyncDryRunResponse: The response from the API. + """ + if not image_gen_request.dry_run: + raise RuntimeError("Dry run request must have dry_run set to True") + + n = image_gen_request.params.n if image_gen_request.params and image_gen_request.params.n else 1 + logger.log(PROGRESS_LOGGER_LABEL, f"Requesting dry run for {n} images.") + + if self._horde_client_session is not None: + dry_run_response = await self._horde_client_session.submit_request( + image_gen_request, + ImageGenerateAsyncDryRunResponse, + ) + else: + raise RuntimeError("No AIHordeAPIAsyncClientSession provided") + + if isinstance(dry_run_response, RequestErrorResponse): + logger.error(f"Error response received: {dry_run_response.message}") + raise AIHordeRequestError(dry_run_response) + + return dry_run_response + async def alchemy_request( self, alchemy_request: AlchemyAsyncRequest, @@ -1102,6 +1516,10 @@ async def alchemy_request( Args: alchemy_request (AlchemyAsyncRequest): The request to submit. + timeout (int, optional): The number of seconds to wait before aborting. + returns any completed images at the end of the timeout. Defaults to -1. + check_callback (Callable[[AlchemyStatusResponse], None], optional): A callback to call with the check + response. Returns: tuple[ImageGenerateStatusResponse, JobID]: The final status response and the corresponding job ID. @@ -1130,3 +1548,343 @@ async def alchemy_request( raise RuntimeError("Response was not an AlchemyAsyncResponse") return (response, job_id) + + async def text_generate_request( + self, + text_gen_request: TextGenerateAsyncRequest, + timeout: int = GENERATION_MAX_LIFE, + check_callback: Callable[[TextGenerateStatusResponse], None] | None = None, + delay: float = 0.0, + ) -> tuple[TextGenerateStatusResponse, JobID]: + """Submit a text generation request to the AI-Horde API, and wait for it to complete. + + *Be warned* that using this method too frequently could trigger a rate limit from the AI-Horde API. + Space concurrent requests apart slightly to allow them to be less than 10/second. + + Args: + text_gen_request (TextGenerateAsyncRequest): The request to submit. + timeout (int, optional): The number of seconds to wait before aborting. + returns any completed images at the end of the timeout. Any value 0 or less will wait indefinitely. + Defaults to -1. + check_callback (Callable[[TextGenerateStatusResponse], None], optional): A callback to call with the check + response. + delay (float, optional): The number of seconds to wait before checking the status. Defaults to 0.0. + + Returns: + tuple[TextGenerateStatusResponse, JobID]: The final status response and the corresponding job ID. + + Raises: + AIHordeRequestError: If the request failed. The error response is included in the exception. + """ + # `cast()` returns the value unchanged but tells coerces the type for mypy's benefit + # Static type checkers can't see that `_do_request_with_check` is reliably passing an object of the correct + # type, but we are guaranteed that it is due to the `ImageGenerateCheckResponse` type being passed as an arg. + generic_callback = cast(Callable[[HordeResponse], None], check_callback) + + await asyncio.sleep(delay) + + timeout = self.validate_timeout(timeout, log_message=True) + + num_gens_requested = 1 + + if text_gen_request.params and text_gen_request.params.n: + num_gens_requested = text_gen_request.params.n + + logger.log(PROGRESS_LOGGER_LABEL, f"Requesting {num_gens_requested} text generation.") + logger.debug(f"Request: {text_gen_request}") + + response, job_id = await self._do_request_with_check( + text_gen_request, + number_of_responses=1, + timeout=timeout, + check_callback=generic_callback, + check_callback_type=TextGenerateStatusResponse, + ) + + if isinstance(response, RequestErrorResponse): + raise AIHordeRequestError(response) + + if not isinstance(response, TextGenerateStatusResponse): # pragma: no cover + raise RuntimeError("Response was not a TextGenerateStatusResponse") + + return (response, job_id) + + async def text_generate_request_dry_run( + self, + text_gen_request: TextGenerateAsyncRequest, + ) -> TextGenerateAsyncDryRunResponse: + """Submit a dry run text generation, which will return the kudos cost without actually generating text. + + Args: + text_gen_request (TextGenerateAsyncRequest): The request to submit. + + Returns: + TextGenerateAsyncDryRunResponse: The response from the API. + """ + if not text_gen_request.dry_run: + raise RuntimeError("Dry run request must have dry_run set to True") + + logger.log(PROGRESS_LOGGER_LABEL, "Requesting dry run text generation.") + logger.debug(f"Request: {text_gen_request}") + + if self._horde_client_session is not None: + dry_run_response = await self._horde_client_session.submit_request( + text_gen_request, + TextGenerateAsyncDryRunResponse, + ) + else: + raise RuntimeError("No AIHordeAPIAsyncClientSession provided") + + if isinstance(dry_run_response, RequestErrorResponse): + logger.error(f"Error response received: {dry_run_response.message}") + raise AIHordeRequestError(dry_run_response) + + return dry_run_response + + async def workers_all_details( + self, + ) -> AllWorkersDetailsResponse: + """Get all the details for all workers. + + Returns: + WorkersAllDetailsResponse: The response from the API. + """ + if self._horde_client_session is not None: + response = await self._horde_client_session.submit_request( + AllWorkersDetailsRequest(), + AllWorkersDetailsResponse, + ) + else: + raise RuntimeError("No AIHordeAPIAsyncClientSession provided") + + if isinstance(response, RequestErrorResponse): + raise AIHordeRequestError(response) + + return response + + async def worker_details( + self, + worker_id: WorkerID | str, + ) -> SingleWorkerDetailsResponse: + """Get the details for a worker. + + Args: + worker_id (WorkerID): The ID of the worker to get the details for. + + Returns: + SingleWorkerDetailsResponse: The response from the API. + """ + if self._horde_client_session is not None: + response = await self._horde_client_session.submit_request( + SingleWorkerDetailsRequest(worker_id=worker_id), + SingleWorkerDetailsResponse, + ) + else: + raise RuntimeError("No AIHordeAPIAsyncClientSession provided") + + if isinstance(response, RequestErrorResponse): + raise AIHordeRequestError(response) + + return response + + async def worker_modify( + self, + modify_worker_request: ModifyWorkerRequest, + ) -> ModifyWorkerResponse: + """Update a worker. + + Args: + worker_id (WorkerID): The ID of the worker to update. + modify_worker_request (ModifyWorkerRequest): The request to update the worker. + + Returns: + ModifyWorkerResponse: The response from the API. + """ + if self._horde_client_session is not None: + response = await self._horde_client_session.submit_request( + modify_worker_request, + ModifyWorkerResponse, + ) + else: + raise RuntimeError("No AIHordeAPIAsyncClientSession provided") + + if isinstance(response, RequestErrorResponse): + raise AIHordeRequestError(response) + + return response + + async def worker_delete( + self, + worker_id: WorkerID | str, + ) -> DeleteWorkerResponse: + """Delete a worker. + + Args: + worker_id (WorkerID): The ID of the worker to delete. + + Returns: + DeleteWorkerResponse: The response from the API. + """ + if self._horde_client_session is not None: + response = await self._horde_client_session.submit_request( + DeleteWorkerRequest(worker_id=worker_id), + DeleteWorkerResponse, + ) + else: + raise RuntimeError("No AIHordeAPIAsyncClientSession provided") + + if isinstance(response, RequestErrorResponse): + raise AIHordeRequestError(response) + + return response + + async def image_stats_totals( + self, + ) -> ImageStatsModelsTotalResponse: + """Get the total stats for images. + + Returns: + ImageStatsTotalsResponse: The response from the API. + """ + if self._horde_client_session is not None: + response = await self._horde_client_session.submit_request( + ImageStatsModelsTotalRequest(), + ImageStatsModelsTotalResponse, + ) + else: + raise RuntimeError("No AIHordeAPIAsyncClientSession provided") + + if isinstance(response, RequestErrorResponse): + raise AIHordeRequestError(response) + + return response + + async def image_stats_models( + self, + ) -> ImageStatsModelsResponse: + """Get the stats for images by model. + + Returns: + ImageStatsModelsResponse: The response from the API. + """ + if self._horde_client_session is not None: + response = await self._horde_client_session.submit_request( + ImageStatsModelsRequest(), + ImageStatsModelsResponse, + ) + else: + raise RuntimeError("No AIHordeAPIAsyncClientSession provided") + + if isinstance(response, RequestErrorResponse): + raise AIHordeRequestError(response) + + return response + + async def text_stats_totals( + self, + ) -> TextStatsModelsTotalResponse: + """Get the total stats for text. + + Returns: + TextStatsTotalsResponse: The response from the API. + """ + if self._horde_client_session is not None: + response = await self._horde_client_session.submit_request( + TextStatsModelsTotalRequest(), + TextStatsModelsTotalResponse, + ) + else: + raise RuntimeError("No AIHordeAPIAsyncClientSession provided") + + if isinstance(response, RequestErrorResponse): + raise AIHordeRequestError(response) + + return response + + async def text_stats_models( + self, + ) -> TextStatsModelResponse: + """Get the stats for text by model. + + Returns: + TextModelStatsResponse: The response from the API. + """ + if self._horde_client_session is not None: + response = await self._horde_client_session.submit_request( + TextStatsModelsRequest(), + TextStatsModelResponse, + ) + else: + raise RuntimeError("No AIHordeAPIAsyncClientSession provided") + + if isinstance(response, RequestErrorResponse): + raise AIHordeRequestError(response) + + return response + + async def image_status_models_all( + self, + ) -> HordeStatusModelsAllResponse: + """Get the status of all image models. + + Returns: + ImageStatusModelsAllResponse: The response from the API. + """ + if self._horde_client_session is not None: + response = await self._horde_client_session.submit_request( + HordeStatusModelsAllRequest(), + HordeStatusModelsAllResponse, + ) + else: + raise RuntimeError("No AIHordeAPIAsyncClientSession provided") + + if isinstance(response, RequestErrorResponse): + raise AIHordeRequestError(response) + + return response + + async def image_status_models_single( + self, + model_name: str, + ) -> HordeStatusModelsSingleResponse: + """Get the status of a single image model. + + Args: + model_name (str): The name of the model to get the status of. + + Returns: + ImageStatusModelsSingleResponse: The response from the API. + """ + if self._horde_client_session is not None: + response = await self._horde_client_session.submit_request( + HordeStatusModelsSingleRequest(model_name=model_name), + HordeStatusModelsSingleResponse, + ) + else: + raise RuntimeError("No AIHordeAPIAsyncClientSession provided") + + if isinstance(response, RequestErrorResponse): + raise AIHordeRequestError(response) + + return response + + async def get_news( + self, + ) -> NewsResponse: + """Get the latest news from the AI-Horde API. + + Returns: + NewsResponse: The response from the API. + """ + if self._horde_client_session is not None: + response = await self._horde_client_session.submit_request( + NewsRequest(), + NewsResponse, + ) + else: + raise RuntimeError("No AIHordeAPIAsyncClientSession provided") + + if isinstance(response, RequestErrorResponse): + raise AIHordeRequestError(response) + + return response diff --git a/horde_sdk/ai_horde_api/apimodels/__init__.py b/horde_sdk/ai_horde_api/apimodels/__init__.py index c0bfec2..226a027 100644 --- a/horde_sdk/ai_horde_api/apimodels/__init__.py +++ b/horde_sdk/ai_horde_api/apimodels/__init__.py @@ -1,25 +1,21 @@ """All requests, responses and API models defined for the AI Horde API.""" from horde_sdk.ai_horde_api.apimodels._find_user import ( - ContributionsDetails, FindUserRequest, - FindUserResponse, - MonthlyKudos, - UsageDetails, - UserAmountRecords, - UserKudosDetails, - UserRecords, - UserThingRecords, +) +from horde_sdk.ai_horde_api.apimodels._kudos import ( + KudosTransferRequest, + KudosTransferResponse, ) from horde_sdk.ai_horde_api.apimodels._stats import ( - ImageModelStatsResponse, ImageStatsModelsRequest, + ImageStatsModelsResponse, ImageStatsModelsTotalRequest, ImageStatsModelsTotalResponse, SinglePeriodImgStat, SinglePeriodTxtStat, StatsModelsTimeframe, - TextModelStatsResponse, + TextStatsModelResponse, TextStatsModelsRequest, TextStatsModelsTotalRequest, TextStatsModelsTotalResponse, @@ -40,6 +36,24 @@ NewsRequest, NewsResponse, ) +from horde_sdk.ai_horde_api.apimodels._users import ( + ContributionsDetails, + ListUsersDetailsRequest, + ListUsersDetailsResponse, + ModifyUser, + ModifyUserReply, + ModifyUserRequest, + ModifyUserResponse, + MonthlyKudos, + SingleUserDetailsRequest, + UsageDetails, + UserAmountRecords, + UserDetailsResponse, + UserKudosDetails, + UserRecords, + UserThingRecords, + _ModifyUserBase, +) from horde_sdk.ai_horde_api.apimodels.alchemy._async import ( AlchemyAsyncRequest, AlchemyAsyncRequestFormItem, @@ -131,6 +145,10 @@ from horde_sdk.ai_horde_api.apimodels.workers._workers import ( AllWorkersDetailsRequest, AllWorkersDetailsResponse, + DeleteWorkerRequest, + DeleteWorkerResponse, + ModifyWorkerRequest, + ModifyWorkerResponse, SingleWorkerDetailsRequest, SingleWorkerDetailsResponse, TeamDetailsLite, @@ -138,17 +156,29 @@ WorkerKudosDetails, ) from horde_sdk.ai_horde_api.consts import KNOWN_ALCHEMY_TYPES +from horde_sdk.generic_api.apimodels import ( + APIKeyAllowedInRequestMixin, + ContainsMessageResponseMixin, + RequestSpecifiesUserIDMixin, + RequestUsesWorkerMixin, + ResponseRequiringDownloadMixin, + ResponseRequiringFollowUpMixin, + ResponseWithProgressMixin, +) __all__ = [ "ContributionsDetails", "FindUserRequest", - "FindUserResponse", + "KudosTransferRequest", + "KudosTransferResponse", + "UserDetailsResponse", "MonthlyKudos", "UsageDetails", "UserAmountRecords", "UserKudosDetails", "UserRecords", "UserThingRecords", + "_ModifyUserBase", "ImageStatsModelsRequest", "ImageStatsModelsTotalRequest", "ImageStatsModelsTotalResponse", @@ -162,13 +192,20 @@ "Newspiece", "NewsRequest", "NewsResponse", + "ListUsersDetailsRequest", + "ListUsersDetailsResponse", + "ModifyUser", + "ModifyUserReply", + "SingleUserDetailsRequest", + "ModifyUserRequest", + "ModifyUserResponse", "ActiveModel", "ActiveModelLite", "SinglePeriodImgStat", "SinglePeriodTxtStat", - "ImageModelStatsResponse", + "ImageStatsModelsResponse", "StatsModelsTimeframe", - "TextModelStatsResponse", + "TextStatsModelResponse", "TextStatsModelsRequest", "TextStatsModelsTotalRequest", "TextStatsModelsTotalResponse", @@ -241,9 +278,20 @@ "TextGenerationJobSubmitRequest", "AllWorkersDetailsRequest", "AllWorkersDetailsResponse", + "DeleteWorkerRequest", + "DeleteWorkerResponse", + "ModifyWorkerResponse", + "ModifyWorkerRequest", "SingleWorkerDetailsRequest", "SingleWorkerDetailsResponse", "TeamDetailsLite", "WorkerDetailItem", "WorkerKudosDetails", + "APIKeyAllowedInRequestMixin", + "ContainsMessageResponseMixin", + "RequestSpecifiesUserIDMixin", + "RequestUsesWorkerMixin", + "ResponseRequiringDownloadMixin", + "ResponseRequiringFollowUpMixin", + "ResponseWithProgressMixin", ] diff --git a/horde_sdk/ai_horde_api/apimodels/_find_user.py b/horde_sdk/ai_horde_api/apimodels/_find_user.py index a1c9e79..6b3f2d7 100644 --- a/horde_sdk/ai_horde_api/apimodels/_find_user.py +++ b/horde_sdk/ai_horde_api/apimodels/_find_user.py @@ -1,206 +1,10 @@ -from datetime import datetime - -from pydantic import Field from typing_extensions import override +from horde_sdk.ai_horde_api.apimodels._users import UserDetailsResponse from horde_sdk.ai_horde_api.apimodels.base import BaseAIHordeRequest from horde_sdk.ai_horde_api.endpoints import AI_HORDE_API_ENDPOINT_SUBPATH from horde_sdk.consts import HTTPMethod -from horde_sdk.generic_api.apimodels import APIKeyAllowedInRequestMixin, HordeAPIDataObject, HordeResponseBaseModel -from horde_sdk.generic_api.decoration import Unequatable, Unhashable - - -class ContributionsDetails(HordeAPIDataObject): - fulfillments: int | None = Field(default=None, description="How many images this user has generated.") - megapixelsteps: float | None = Field(default=None, description="How many megapixelsteps this user has generated.") - - -class UserKudosDetails(HordeAPIDataObject): - accumulated: float | None = Field(0, description="The amount of Kudos accumulated or used for generating images.") - admin: float | None = Field(0, description="The amount of Kudos this user has been given by the Horde admins.") - awarded: float | None = Field( - 0, - description="The amount of Kudos this user has been awarded from things like rating images.", - ) - gifted: float | None = Field(0, description="The amount of Kudos this user has given to other users.") - received: float | None = Field(0, description="The amount of Kudos this user has been given by other users.") - donated: float | None = Field( - 0, - description="The amount of Kudos this user has donated to support education accounts.", - ) - recurring: float | None = Field( - 0, - description="The amount of Kudos this user has received from recurring rewards.", - ) - - -class MonthlyKudos(HordeAPIDataObject): - amount: int | None = Field(default=None, description="How much recurring Kudos this user receives monthly.") - last_received: datetime | None = Field(default=None, description="Last date this user received monthly Kudos.") - - -class UserThingRecords(HordeAPIDataObject): - megapixelsteps: float | None = Field( - 0, - description="How many megapixelsteps this user has generated or requested.", - ) - tokens: int | None = Field(0, description="How many token this user has generated or requested.") - - -class UserAmountRecords(HordeAPIDataObject): - image: int | None = Field(0, description="How many images this user has generated or requested.") - interrogation: int | None = Field(0, description="How many texts this user has generated or requested.") - text: int | None = Field(0, description="How many texts this user has generated or requested.") - - -class UserRecords(HordeAPIDataObject): - contribution: UserThingRecords | None = None - fulfillment: UserAmountRecords | None = None - request: UserAmountRecords | None = None - usage: UserThingRecords | None = None - - -class UsageDetails(HordeAPIDataObject): - megapixelsteps: float | None = Field(default=None, description="How many megapixelsteps this user has requested.") - requests: int | None = Field(default=None, description="How many images this user has requested.") - - -@Unhashable -@Unequatable -class FindUserResponse(HordeResponseBaseModel): - @override - @classmethod - def get_api_model_name(cls) -> str | None: - return "UserDetails" - - admin_comment: str | None = Field( - default=None, - description="(Privileged) Comments from the horde admins about this user.", - ) - account_age: int | None = Field( - default=None, - description="How many seconds since this account was created.", - examples=[60], - ) - """How many seconds since this account was created.""" - concurrency: int | None = Field(default=None, description="How many concurrent generations this user may request.") - """How many concurrent generations this user may request.""" - - contact: str | None = Field( - default=None, - description="(Privileged) Contact details for the horde admins to reach the user in case of emergency.", - examples=["email@examples.com"], - ) - """(Privileged) Contact details for the horde admins to reach the user in case of emergency.""" - contributions: ContributionsDetails | None = None - """How many images and megapixelsteps this user has generated.""" - - customizer: bool | None = Field( - default=None, - description="If this user can run custom models.", - examples=[False], - ) - - evaluating_kudos: float | None = Field( - default=None, - description=( - "(Privileged) The amount of Evaluating Kudos this untrusted user has from generations and uptime. When" - " this number reaches a pre-specified threshold, they automatically become trusted." - ), - ) - """(Privileged) The amount of Evaluating Kudos this untrusted user has from generations and uptime. - When this number reaches a pre-specified threshold, they automatically become trusted.""" - flagged: bool | None = Field( - default=None, - description="This user has been flagged for suspicious activity.", - examples=[False], - ) - """This user has been flagged for suspicious activity.""" - id_: int | None = Field(default=None, description="The user unique ID. It is always an integer.", alias="id") - """The user unique ID. It is always an integer.""" - kudos: float | None = Field( - default=None, - description=( - "The amount of Kudos this user has. The amount of Kudos determines the priority when requesting image" - " generations." - ), - ) - """The amount of Kudos this user has. The amount of Kudos determines the priority when requesting image - generations.""" - kudos_details: UserKudosDetails | None = None - """How much Kudos this user has accumulated or used for generating images.""" - moderator: bool | None = Field(default=None, description="This user is a Horde moderator.", examples=[False]) - """This user is a Horde moderator.""" - monthly_kudos: MonthlyKudos | None = None - """How much recurring Kudos this user receives monthly.""" - pseudonymous: bool | None = Field( - default=None, - description="If true, this user has not registered using an oauth service.", - examples=[False], - ) - """If true, this user has not registered using an oauth service.""" - records: UserRecords | None = None - """How many images, texts, megapixelsteps and tokens this user has generated or requested.""" - sharedkey_ids: list[str] | None = None - """The IDs of the shared keys this user has access to.""" - service: bool | None = Field( - default=None, - description="This user is a Horde service account and can provide the `proxied_user` field.", - examples=[False], - ) - special: bool | None = Field( - default=None, - description="(Privileged) This user has been given the Special role.", - examples=[False], - ) - """(Privileged) This user has been given the Special role.""" - suspicious: int | None = Field( - default=None, - description="(Privileged) How much suspicion this user has accumulated.", - examples=[0], - ) - """(Privileged) How much suspicion this user has accumulated.""" - trusted: bool | None = Field( - default=None, - description="This user is a trusted member of the Horde.", - examples=[False], - ) - """This user is a trusted member of the Horde.""" - usage: UsageDetails | None = None - """How many images and megapixelsteps this user has requested.""" - username: str | None = Field( - default=None, - description="The user's unique Username. It is a combination of their chosen alias plus their ID.", - ) - """The user's unique Username. It is a combination of their chosen alias plus their ID.""" - vpn: bool | None = Field( - default=None, - description="(Privileged) This user has been given the VPN role.", - examples=[False], - ) - """(Privileged) This user has been given the VPN role.""" - education: bool | None = Field( - default=None, - description="This user has been given education VPN role.", - examples=[False], - ) - """(This user has been given the education role.""" - worker_count: int | None = Field( - default=None, - description="How many workers this user has created (active or inactive).", - ) - """How many workers this user has created (active or inactive).""" - worker_ids: list[str] | None = None - """The IDs of the workers this user has created (active or inactive).""" - worker_invited: int | None = Field( - default=None, - description=( - "Whether this user has been invited to join a worker to the horde and how many of them. When 0, this user" - " cannot add (new) workers to the horde." - ), - ) - """Whether this user has been invited to join a worker to the horde and how many of them. - When 0, this user cannot add (new) workers to the horde.""" +from horde_sdk.generic_api.apimodels import APIKeyAllowedInRequestMixin class FindUserRequest(BaseAIHordeRequest, APIKeyAllowedInRequestMixin): @@ -221,5 +25,5 @@ def get_http_method(cls) -> HTTPMethod: @override @classmethod - def get_default_success_response_type(cls) -> type[FindUserResponse]: - return FindUserResponse + def get_default_success_response_type(cls) -> type[UserDetailsResponse]: + return UserDetailsResponse diff --git a/horde_sdk/ai_horde_api/apimodels/_kudos.py b/horde_sdk/ai_horde_api/apimodels/_kudos.py new file mode 100644 index 0000000..386f05b --- /dev/null +++ b/horde_sdk/ai_horde_api/apimodels/_kudos.py @@ -0,0 +1,51 @@ +from typing_extensions import override + +from horde_sdk.ai_horde_api.apimodels.base import BaseAIHordeRequest +from horde_sdk.ai_horde_api.endpoints import AI_HORDE_API_ENDPOINT_SUBPATH +from horde_sdk.consts import _ANONYMOUS_MODEL, HTTPMethod +from horde_sdk.generic_api.apimodels import APIKeyAllowedInRequestMixin, HordeResponse + + +class KudosTransferResponse(HordeResponse): + transferred: float | None = None + """The amount of Kudos transferred.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "KudosTransferred" + + +class KudosTransferRequest( + BaseAIHordeRequest, + APIKeyAllowedInRequestMixin, +): + username: str + """The username of the user to transfer Kudos to.""" + amount: float + """The amount of Kudos to transfer.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return _ANONYMOUS_MODEL + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.POST + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_kudos_transfer + + @override + @classmethod + def get_default_success_response_type(cls) -> type[KudosTransferResponse]: + return KudosTransferResponse + + @override + @classmethod + def is_api_key_required(cls) -> bool: + return True diff --git a/horde_sdk/ai_horde_api/apimodels/_stats.py b/horde_sdk/ai_horde_api/apimodels/_stats.py index ae82340..7b35e4e 100644 --- a/horde_sdk/ai_horde_api/apimodels/_stats.py +++ b/horde_sdk/ai_horde_api/apimodels/_stats.py @@ -1,5 +1,6 @@ from enum import auto +from loguru import logger from pydantic import ConfigDict, Field, field_validator from strenum import StrEnum from typing_extensions import override @@ -20,20 +21,23 @@ class StatsModelsTimeframe(StrEnum): @Unequatable @Unhashable -class ImageModelStatsResponse(HordeResponseBaseModel): +class ImageStatsModelsResponse(HordeResponseBaseModel): """Represents the data returned from the `/v2/stats/img/models` endpoint. v2 API Model: `ImgModelStats` """ day: dict[str, int] + """The stats for the past day.""" month: dict[str, int] + """The stats for the past month.""" total: dict[str, int] + """The total stats for all time.""" @field_validator("day", "month", "total", mode="before") @classmethod def validate_timeframe_data(cls, v: dict[str, int | None]) -> dict[str, int]: - """Validates the data for a timeframe. + """Validate the data for a timeframe. Args: v (dict[str, int | None]): The data for a timeframe. @@ -47,6 +51,10 @@ def validate_timeframe_data(cls, v: dict[str, int | None]) -> dict[str, int]: if v is None: return {} + if "additionalProp1" in v: + logger.warning("Found `additionalProp` in stats data, this is a dummy result. Ignoring.") + return {} + return_v = {} # Replace all `None` values with 0 for key, value in v.items(): @@ -63,7 +71,7 @@ def get_api_model_name(cls) -> str | None: return "ImgModelStats" def get_timeframe(self, timeframe: StatsModelsTimeframe) -> dict[str, int]: - """Returns the data for the given timeframe. + """Return the data for the given timeframe. Args: timeframe (StatsModelsTimeframe): The timeframe to get the data for. @@ -90,8 +98,8 @@ class ImageStatsModelsRequest(BaseAIHordeRequest): model_state: MODEL_STATE = Field( MODEL_STATE.all, - description="The state of the models to get stats for. Known models are models that are known to the system.", ) + """The state of the models to get stats for. Known models are models that are known to the system.""" @override @classmethod @@ -110,13 +118,19 @@ def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: @override @classmethod - def get_default_success_response_type(cls) -> type[ImageModelStatsResponse]: - return ImageModelStatsResponse + def get_default_success_response_type(cls) -> type[ImageStatsModelsResponse]: + return ImageStatsModelsResponse class SinglePeriodImgStat(HordeAPIDataObject): - images: int | None = Field(None, description="The amount of images generated during this period.") - ps: int | None = Field(None, description="The amount of pixelsteps generated during this period.") + images: int | None = Field( + None, + ) + """The amount of images generated during this period.""" + ps: int | None = Field( + None, + ) + """The amount of pixelsteps generated during this period.""" @property def mps(self) -> int | None: @@ -131,10 +145,15 @@ class ImageStatsModelsTotalResponse(HordeResponseBaseModel): """Represents the data returned from the `/v2/stats/img/totals` endpoint.""" day: SinglePeriodImgStat | None = None + """The total stats for the past day.""" hour: SinglePeriodImgStat | None = None + """The total stats for the past hour.""" minute: SinglePeriodImgStat | None = None + """The total stats for the past minute.""" month: SinglePeriodImgStat | None = None + """The total stats for the past month.""" total: SinglePeriodImgStat | None = None + """The total stats for all time.""" @override @classmethod @@ -167,16 +186,20 @@ def get_default_success_response_type(cls) -> type[ImageStatsModelsTotalResponse @Unhashable -class TextModelStatsResponse(HordeResponseBaseModel): +class TextStatsModelResponse(HordeResponseBaseModel): + """Represents the data returned from the `/v2/stats/text/models` endpoint.""" day: dict[str, int] + """The stats for the past day.""" month: dict[str, int] + """The stats for the past month.""" total: dict[str, int] + """The total stats for all time.""" @field_validator("day", "month", "total", mode="before") @classmethod def validate_timeframe_data(cls, v: dict[str, int | None]) -> dict[str, int]: - """Validates the data for a timeframe. + """Validate the data for a timeframe. Args: v (dict[str, int | None]): The data for a timeframe. @@ -226,13 +249,19 @@ def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: @override @classmethod - def get_default_success_response_type(cls) -> type[TextModelStatsResponse]: - return TextModelStatsResponse + def get_default_success_response_type(cls) -> type[TextStatsModelResponse]: + return TextStatsModelResponse class SinglePeriodTxtStat(HordeAPIDataObject): - requests: int | None = Field(None, description="The number of requests made during this period.") - tokens: int | None = Field(None, description="The number of tokens generated during this period.") + requests: int | None = Field( + None, + ) + """The number of requests made during this period.""" + tokens: int | None = Field( + None, + ) + """The number of tokens generated during this period.""" @Unhashable @@ -240,10 +269,15 @@ class TextStatsModelsTotalResponse(HordeResponseBaseModel): """Represents the data returned from the `/v2/stats/text/totals` endpoint.""" minute: dict[str, int] + """The total stats for the past minute.""" hour: dict[str, int] + """The total stats for the past hour.""" day: dict[str, int] + """The total stats for the past day.""" month: dict[str, int] + """The total stats for the past month.""" total: dict[str, int] + """The total stats for all time.""" @override @classmethod diff --git a/horde_sdk/ai_horde_api/apimodels/_status.py b/horde_sdk/ai_horde_api/apimodels/_status.py index fcdb124..1159401 100644 --- a/horde_sdk/ai_horde_api/apimodels/_status.py +++ b/horde_sdk/ai_horde_api/apimodels/_status.py @@ -18,6 +18,7 @@ class AIHordeHeartbeatResponse(HordeResponseBaseModel, ContainsMessageResponseMixin): version: str + """The version of the AI Horde API that this node is running.""" @override @classmethod @@ -54,6 +55,7 @@ class HordePerformanceResponse(HordeResponseBaseModel): "How many workers are actively processing image interrogations in this {horde_noun} in the past 5 minutes." ), ) + """How many workers are actively processing image interrogations in this {horde_noun} in the past 5 minutes.""" interrogator_thread_count: int | None = Field( None, description=( @@ -61,34 +63,36 @@ class HordePerformanceResponse(HordeResponseBaseModel): " minutes." ), ) + """How many worker threads are actively processing image interrogation in this {horde_noun} in the past 5 + minutes.""" past_minute_megapixelsteps: float | None = Field( None, - description="How many megapixelsteps this horde generated in the last minute.", ) + """How many megapixelsteps this horde generated in the last minute.""" past_minute_tokens: float | None = Field( None, - description="How many tokens this horde generated in the last minute.", ) + """How many tokens this horde generated in the last minute.""" queued_forms: float | None = Field( None, - description="The amount of image interrogations waiting and processing currently in this horde.", ) + """The amount of image interrogations waiting and processing currently in this horde.""" queued_megapixelsteps: float | None = Field( None, - description="The amount of megapixelsteps in waiting and processing requests currently in this horde.", ) + """The amount of megapixelsteps in waiting and processing requests currently in this horde.""" queued_requests: int | None = Field( None, - description="The amount of waiting and processing image requests currently in this horde.", ) + """The amount of waiting and processing image requests currently in this horde.""" queued_text_requests: int | None = Field( None, - description="The amount of waiting and processing text requests currently in this horde.", ) + """The amount of waiting and processing text requests currently in this horde.""" queued_tokens: float | None = Field( None, - description="The amount of tokens in waiting and processing requests currently in this horde.", ) + """The amount of tokens in waiting and processing requests currently in this horde.""" text_thread_count: int | None = Field( None, description=( @@ -96,10 +100,12 @@ class HordePerformanceResponse(HordeResponseBaseModel): " minutes." ), ) + """How many worker threads are actively processing prompt generations in this {horde_noun} in the past 5 + minutes.""" text_worker_count: int | None = Field( None, - description="How many workers are actively processing prompt generations in this horde in the past 5 minutes.", ) + """How many workers are actively processing prompt generations in this horde in the past 5 minutes.""" thread_count: int | None = Field( None, description=( @@ -107,10 +113,12 @@ class HordePerformanceResponse(HordeResponseBaseModel): " minutes." ), ) + """How many worker threads are actively processing prompt generations in this {horde_noun} in the past 5 + minutes.""" worker_count: int | None = Field( None, - description="How many workers are actively processing prompt generations in this horde in the past 5 minutes.", ) + """How many workers are actively processing prompt generations in this horde in the past 5 minutes.""" @override @classmethod @@ -141,9 +149,16 @@ def get_default_success_response_type(cls) -> type[HordePerformanceResponse]: class Newspiece(HordeAPIObject): - date_published: str | None = Field(None, description="The date this newspiece was published.") - importance: str | None = Field(None, description="How critical this piece of news is.", examples=["Information"]) - newspiece: str | None = Field(None, description="The actual piece of news.") + date_published: str | None = Field( + None, + ) + """The date this newspiece was published.""" + importance: str | None = Field(None, examples=["Information"]) + """How critical this piece of news is.""" + newspiece: str | None = Field( + None, + ) + """The actual piece of news.""" @override @classmethod @@ -153,6 +168,9 @@ def get_api_model_name(cls) -> str | None: @Unhashable class NewsResponse(HordeResponse, RootModel[list[Newspiece]]): + root: list[Newspiece] + """The underlying list of newspieces.""" + def __iter__(self) -> Iterator[Newspiece]: # type: ignore return iter(self.root) @@ -193,8 +211,14 @@ def get_default_success_response_type(cls) -> type[NewsResponse]: class ActiveModelLite(HordeAPIObject): - count: int | None = Field(None, description="How many of workers in this horde are running this model.") - name: str | None = Field(None, description="The Name of a model available by workers in this horde.") + count: int | None = Field( + None, + ) + """How many of workers in this horde are running this model.""" + name: str | None = Field( + None, + ) + """The Name of a model available by workers in this horde.""" @override @classmethod @@ -203,15 +227,27 @@ def get_api_model_name(cls) -> str | None: class ActiveModel(ActiveModelLite): - eta: int | None = Field(None, description="Estimated time in seconds for this model's queue to be cleared.") - jobs: float | None = Field(None, description="The job count waiting to be generated by this model.") - performance: float | None = Field(None, description="The average speed of generation for this model.") - queued: float | None = Field(None, description="The amount waiting to be generated by this model.") + eta: int | None = Field( + None, + ) + """Estimated time in seconds for this model's queue to be cleared.""" + jobs: float | None = Field( + None, + ) + """The job count waiting to be generated by this model.""" + performance: float | None = Field( + None, + ) + """The average speed of generation for this model.""" + queued: float | None = Field( + None, + ) + """The amount waiting to be generated by this model.""" type_: MODEL_TYPE | None = Field( - description="The model type (text or image).", examples=[MODEL_TYPE.image, MODEL_TYPE.text], alias="type", ) + """The model type (text or image).""" @override @classmethod @@ -221,6 +257,9 @@ def get_api_model_name(cls) -> str | None: @Unhashable class HordeStatusModelsAllResponse(HordeResponse, RootModel[list[ActiveModel]]): + root: list[ActiveModel] + """The underlying list of models.""" + def __iter__(self) -> Iterator[ActiveModel]: # type: ignore return iter(self.root) @@ -245,15 +284,19 @@ class HordeStatusModelsAllRequest(BaseAIHordeRequest): type_: MODEL_TYPE = Field( MODEL_TYPE.image, - description="The type of model to filter by.", examples=[MODEL_TYPE.image, MODEL_TYPE.text], alias="type", ) + """The type of model to filter by.""" min_count: int | None = None + """Filter only models that have at least this amount of threads serving.""" max_count: int | None = None + """Filter only models that have at most this amount of threads serving.""" model_state: MODEL_STATE = MODEL_STATE.all + """If 'known', only show stats for known models in the model reference. If 'custom' only show stats for custom + models. If 'all' shows stats for all models.""" @override @classmethod @@ -284,6 +327,10 @@ def get_query_fields(cls) -> list[str]: @Unhashable class HordeStatusModelsSingleResponse(HordeResponse, RootModel[list[ActiveModel]]): # This is a list because of an oversight in the structure of the API response. # FIXME + + root: list[ActiveModel] + """The underlying list of models.""" + def __iter__(self) -> Iterator[ActiveModel]: # type: ignore return iter(self.root) @@ -307,6 +354,7 @@ class HordeStatusModelsSingleRequest(BaseAIHordeRequest): ) model_name: str + """The name of the model to request.""" @override @classmethod @@ -332,18 +380,18 @@ def get_default_success_response_type(cls) -> type[HordeStatusModelsSingleRespon class HordeModes(HordeAPIObject): maintenance_mode: bool = Field( False, - description="Whether the horde is in maintenance mode.", ) + """Whether the horde is in maintenance mode.""" invite_only_mode: bool = Field( False, - description="Whether the horde is in invite-only mode.", ) + """Whether the horde is in invite-only mode.""" raid_mode: bool = Field( False, - description="Whether the horde is in raid mode.", ) + """Whether the horde is in raid mode.""" @override @classmethod diff --git a/horde_sdk/ai_horde_api/apimodels/_users.py b/horde_sdk/ai_horde_api/apimodels/_users.py new file mode 100644 index 0000000..ca72139 --- /dev/null +++ b/horde_sdk/ai_horde_api/apimodels/_users.py @@ -0,0 +1,469 @@ +from datetime import datetime + +from pydantic import Field, RootModel +from typing_extensions import override + +from horde_sdk.ai_horde_api.apimodels.base import BaseAIHordeRequest +from horde_sdk.ai_horde_api.endpoints import AI_HORDE_API_ENDPOINT_SUBPATH +from horde_sdk.consts import _ANONYMOUS_MODEL, HTTPMethod +from horde_sdk.generic_api.apimodels import ( + APIKeyAllowedInRequestMixin, + HordeAPIDataObject, + HordeResponse, + HordeResponseBaseModel, + RequestSpecifiesUserIDMixin, +) +from horde_sdk.generic_api.decoration import Unequatable, Unhashable + + +class ContributionsDetails(HordeAPIDataObject): + """How many images and megapixelsteps this user has generated.""" + + fulfillments: int | None = Field( + default=None, + ) + megapixelsteps: float | None = Field( + default=None, + ) + + +class UserKudosDetails(HordeAPIDataObject): + """The details of the kudos this user has accumulated, used, sent and received.""" + + accumulated: float | None = Field(0) + """The amount of Kudos accumulated or used for generating images.""" + + admin: float | None = Field(0) + """The amount of Kudos this user has been given by the Horde admins.""" + + awarded: float | None = Field(0) + """The amount of Kudos this user has been awarded from things like rating images.""" + + gifted: float | None = Field(0) + """The amount of Kudos this user has given to other users.""" + + received: float | None = Field(0) + """The amount of Kudos this user has been given by other users.""" + + donated: float | None = Field(0) + """The amount of Kudos this user has donated to support education accounts.""" + + recurring: float | None = Field(0) + """The amount of Kudos this user has received from recurring rewards.""" + + +class MonthlyKudos(HordeAPIDataObject): + amount: int | None = Field(default=None) + """How much recurring Kudos this user receives monthly.""" + + last_received: datetime | None = Field(default=None) + """Last date this user received monthly Kudos.""" + + +class UserThingRecords(HordeAPIDataObject): + megapixelsteps: float | None = Field(0) + """How many megapixelsteps this user has generated or requested.""" + + tokens: int | None = Field(0) + """How many token this user has generated or requested.""" + + +class UserAmountRecords(HordeAPIDataObject): + image: int | None = Field(0) + """How many images this user has generated or requested.""" + + interrogation: int | None = Field(0) + """How many texts this user has generated or requested.""" + + text: int | None = Field(0) + """How many texts this user has generated or requested.""" + + +class UserRecords(HordeAPIDataObject): + contribution: UserThingRecords | None = None + fulfillment: UserAmountRecords | None = None + request: UserAmountRecords | None = None + usage: UserThingRecords | None = None + + +class UsageDetails(HordeAPIDataObject): + megapixelsteps: float | None = Field(default=None) + """How many megapixelsteps this user has requested.""" + + requests: int | None = Field(default=None) + """How many images this user has requested.""" + + +@Unhashable +@Unequatable +class UserDetailsResponse(HordeResponseBaseModel): + @override + @classmethod + def get_api_model_name(cls) -> str | None: + return "UserDetails" + + admin_comment: str | None = Field( + default=None, + ) + """(Privileged) Comments from the horde admins about this user.""" + + account_age: int | None = Field( + default=None, + examples=[60], + ) + """How many seconds since this account was created.""" + + concurrency: int | None = Field( + default=None, + ) + """How many concurrent generations this user may request.""" + + contact: str | None = Field( + default=None, + examples=["email@examples.com"], + ) + """(Privileged) Contact details for the horde admins to reach the user in case of emergency.""" + + contributions: ContributionsDetails | None = None + """How many images and megapixelsteps this user has generated.""" + + customizer: bool | None = Field( + default=None, + examples=[False], + ) + """If this user can run custom models.""" + + evaluating_kudos: float | None = Field( + default=None, + description=( + "(Privileged) The amount of Evaluating Kudos this untrusted user has from generations and uptime. When" + " this number reaches a pre-specified threshold, they automatically become trusted." + ), + ) + """(Privileged) The amount of Evaluating Kudos this untrusted user has from generations and uptime. + When this number reaches a pre-specified threshold, they automatically become trusted.""" + + flagged: bool | None = Field( + default=None, + examples=[False], + ) + """This user has been flagged for suspicious activity.""" + + id_: int | None = Field(default=None, alias="id") + """The user unique ID. It is always an integer.""" + + kudos: float | None = Field( + default=None, + description=( + "The amount of Kudos this user has. The amount of Kudos determines the priority when requesting image" + " generations." + ), + ) + """The amount of Kudos this user has. The amount of Kudos determines the priority when requesting image + generations.""" + + kudos_details: UserKudosDetails | None = None + """How much Kudos this user has accumulated or used for generating images.""" + + moderator: bool | None = Field(default=None, examples=[False]) + """This user is a Horde moderator.""" + + monthly_kudos: MonthlyKudos | None = None + """How much recurring Kudos this user receives monthly.""" + + pseudonymous: bool | None = Field( + default=None, + examples=[False], + ) + """If true, this user has not registered using an oauth service.""" + + records: UserRecords | None = None + """How many images, texts, megapixelsteps and tokens this user has generated or requested.""" + + sharedkey_ids: list[str] | None = None + """The IDs of the shared keys this user has access to.""" + + service: bool | None = Field( + default=None, + examples=[False], + ) + """This user is a Horde service account and can provide the `proxied_user` field.""" + + special: bool | None = Field( + default=None, + examples=[False], + ) + """(Privileged) This user has been given the Special role.""" + + suspicious: int | None = Field( + default=None, + examples=[0], + ) + """(Privileged) How much suspicion this user has accumulated.""" + + trusted: bool | None = Field( + default=None, + examples=[False], + ) + """This user is a trusted member of the Horde.""" + + usage: UsageDetails | None = None + """How many images and megapixelsteps this user has requested.""" + + username: str | None = Field( + default=None, + ) + """The user's unique Username. It is a combination of their chosen alias plus their ID.""" + + vpn: bool | None = Field( + default=None, + examples=[False], + ) + """(Privileged) This user has been given the VPN role.""" + + education: bool | None = Field( + default=None, + examples=[False], + ) + """(This user has been given the education role.""" + + worker_count: int | None = Field( + default=None, + ) + """How many workers this user has created (active or inactive).""" + + worker_ids: list[str] | None = None + """The IDs of the workers this user has created (active or inactive).""" + + worker_invited: int | None = Field( + default=None, + description=( + "Whether this user has been invited to join a worker to the horde and how many of them. When 0, this user" + " cannot add (new) workers to the horde." + ), + ) + """Whether this user has been invited to join a worker to the horde and how many of them. + When 0, this user cannot add (new) workers to the horde.""" + + +@Unhashable +@Unequatable +class ListUsersDetailsResponse(HordeResponse, RootModel[list[UserDetailsResponse]]): + root: list[UserDetailsResponse] + """The underlying list of user details.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return _ANONYMOUS_MODEL + + +class ListUsersDetailsRequest(BaseAIHordeRequest): + page: int + """The page number to request. There are up to 25 users per page.""" + + sort: str = "kudos" + """The field to sort the users by. The default is by kudos.""" + + @override + @classmethod + def get_api_model_name(cls) -> None: + return None + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.GET + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_users_all + + @override + @classmethod + def get_default_success_response_type(cls) -> type[ListUsersDetailsResponse]: + return ListUsersDetailsResponse + + @override + @classmethod + def get_query_fields(cls) -> list[str]: + return ["page", "sort"] + + +class SingleUserDetailsRequest(BaseAIHordeRequest, RequestSpecifiesUserIDMixin): + + @override + @classmethod + def get_api_model_name(cls) -> None: + return None + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.GET + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_users_single + + @override + @classmethod + def get_default_success_response_type(cls) -> type[UserDetailsResponse]: + return UserDetailsResponse + + +class _ModifyUserBase(HordeAPIDataObject): + admin_comment: str | None = Field( + None, + max_length=500, + min_length=5, + ) + """Add further information about this user for the other admins.""" + + concurrency: int | None = Field( + None, + ge=0, + le=500, + ) + """The amount of concurrent request this user can have.""" + + contact: str | None = Field( + None, + examples=["email@example.com"], + max_length=500, + min_length=5, + ) + """Contact details for the horde admins to reach the user in case of emergency. This is only visible to horde + moderators.""" + + customizer: bool | None = Field( + None, + ) + """When set to true, the user will be able to serve custom Stable Diffusion models which do not exist in the + Official AI Horde Model Reference.""" + + education: bool | None = Field( + None, + ) + """When set to true, the user is considered an education account and some options become more restrictive.""" + + filtered: bool | None = Field( + None, + ) + """When set to true, the replacement filter will always be applied against this user""" + + flagged: bool | None = Field( + None, + ) + """When set to true, the user cannot transfer kudos and all their workers are put into permanent maintenance.""" + + moderator: bool | None = Field( + None, + ) + """Set to true to make this user a horde moderator.""" + + monthly_kudos: int | None = Field( + None, + ) + """When specified, will start assigning the user monthly kudos, starting now!""" + + public_workers: bool | None = Field( + None, + ) + """Set to true to make this user display their worker IDs.""" + + service: bool | None = Field( + None, + ) + """When set to true, the user is considered a service account proxying the requests for other users.""" + + special: bool | None = Field( + None, + ) + """When set to true, The user can send special payloads.""" + + trusted: bool | None = Field( + None, + ) + """When set to true,the user and their servers will not be affected by suspicion.""" + + usage_multiplier: float | None = Field( + None, + ge=0.1, + le=10.0, + ) + """The amount by which to multiply the users kudos consumption.""" + + username: str | None = Field( + None, + max_length=100, + min_length=3, + ) + """When specified, will change the username. No profanity allowed!""" + + vpn: bool | None = Field( + None, + ) + """When set to true, the user will be able to onboard workers behind a VPN. This should be used as a temporary + solution until the user is trusted.""" + + worker_invited: int | None = Field( + None, + ) + """Set to the amount of workers this user is allowed to join to the horde when in worker invite-only mode.""" + + +class ModifyUser(_ModifyUserBase): + kudos: float | None = Field(None) + """The amount of kudos to modify (can be negative).""" + + reset_suspicion: bool | None = Field(None) + """Set the user's suspicion back to 0.""" + + +class ModifyUserReply(_ModifyUserBase): + new_kudos: float | None = Field(None) + """The new amount of kudos this user has.""" + new_suspicion: int | None = Field(None) + """The new amount of suspicion this user has.""" + + +class ModifyUserResponse(HordeResponse, ModifyUserReply): + @override + @classmethod + def get_api_model_name(cls) -> str: + return "ModifyUser" + + +class ModifyUserRequest( + BaseAIHordeRequest, + RequestSpecifiesUserIDMixin, + ModifyUser, + APIKeyAllowedInRequestMixin, +): + @override + @classmethod + def get_api_model_name(cls) -> str: + return "ModifyUserInput" + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.PUT + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_users_single + + @override + @classmethod + def get_default_success_response_type(cls) -> type[ModifyUserResponse]: + return ModifyUserResponse + + @override + @classmethod + def is_api_key_required(cls) -> bool: + return True diff --git a/horde_sdk/ai_horde_api/apimodels/alchemy/_async.py b/horde_sdk/ai_horde_api/apimodels/alchemy/_async.py index 86d3c67..f2973ed 100644 --- a/horde_sdk/ai_horde_api/apimodels/alchemy/_async.py +++ b/horde_sdk/ai_horde_api/apimodels/alchemy/_async.py @@ -81,6 +81,7 @@ class AlchemyAsyncRequest( APIKeyAllowedInRequestMixin, ): forms: list[AlchemyAsyncRequestFormItem] + """The list of forms (types of post-processing/interrogation/captioning/etc) to request.""" source_image: str """The public URL of the source image or a base64 string to use.""" slow_workers: bool = True diff --git a/horde_sdk/ai_horde_api/apimodels/alchemy/_pop.py b/horde_sdk/ai_horde_api/apimodels/alchemy/_pop.py index 38538b3..3f540b2 100644 --- a/horde_sdk/ai_horde_api/apimodels/alchemy/_pop.py +++ b/horde_sdk/ai_horde_api/apimodels/alchemy/_pop.py @@ -32,9 +32,18 @@ class AlchemyFormPayloadStable(HordeAPIObject): def get_api_model_name(cls) -> str | None: return "ModelInterrogationFormPayloadStable" - additionalProp1: str = Field(validation_alias="additionalProp1", description="Currently unsupported") - additionalProp2: str = Field(validation_alias="additionalProp2", description="Currently unsupported") - additionalProp3: str = Field(validation_alias="additionalProp3", description="Currently unsupported") + additionalProp1: str = Field( + validation_alias="additionalProp1", + ) + """Currently unsupported.""" + additionalProp2: str = Field( + validation_alias="additionalProp2", + ) + """Currently unsupported.""" + additionalProp3: str = Field( + validation_alias="additionalProp3", + ) + """Currently unsupported.""" class AlchemyPopFormPayload(HordeAPIObject, JobRequestMixin): @@ -46,9 +55,9 @@ def get_api_model_name(cls) -> str | None: return "InterrogationPopFormPayload" form: KNOWN_ALCHEMY_TYPES | str = Field( - description="The name of this interrogation form", examples=["caption"], ) + """The name of this interrogation form.""" @field_validator("form", mode="before") def validate_form(cls, v: str | KNOWN_ALCHEMY_TYPES) -> KNOWN_ALCHEMY_TYPES | str: @@ -59,8 +68,15 @@ def validate_form(cls, v: str | KNOWN_ALCHEMY_TYPES) -> KNOWN_ALCHEMY_TYPES | st return v payload: AlchemyFormPayloadStable | None = None - r2_upload: str | None = Field(None, description="The URL in which the post-processed image can be uploaded.") - source_image: str | None = Field(None, description="The URL From which the source image can be downloaded.") + """The setting for this interrogation form.""" + r2_upload: str | None = Field( + None, + ) + """The URL in which the post-processed image can be uploaded.""" + source_image: str | None = Field( + None, + ) + """The URL From which the source image can be downloaded.""" class NoValidAlchemyFound(HordeAPIObject): @@ -80,6 +96,8 @@ def get_api_model_name(cls) -> str | None: examples=[0], ge=0, ) + """How many waiting requests were skipped because they require a higher version of the bridge than this worker is + running (upgrade if you see this in your skipped list).""" untrusted: int | None = Field( None, description=( @@ -87,11 +105,12 @@ def get_api_model_name(cls) -> str | None: ), ge=0, ) + """How many waiting requests were skipped because they demanded a trusted worker which this worker is not.""" worker_id: int | None = Field( None, - description="How many waiting requests were skipped because they demanded a specific worker.", ge=0, ) + """How many waiting requests were skipped because they demanded a specific worker.""" class AlchemyPopResponse(HordeResponseBaseModel, ResponseRequiringFollowUpMixin): @@ -99,7 +118,9 @@ class AlchemyPopResponse(HordeResponseBaseModel, ResponseRequiringFollowUpMixin) # and not actually specifying a schema forms: list[AlchemyPopFormPayload] | None = None + """The forms that to be generated""" skipped: NoValidAlchemyFound | None = None + """The requests that were skipped because this worker were not eligible for them.""" @override @classmethod @@ -176,8 +197,11 @@ class AlchemyPopRequest(BaseAIHordeRequest, APIKeyAllowedInRequestMixin): """ name: str + """The name of the request. This is used to identify the request in the logs.""" priority_usernames: list[str] + """The usernames that should be prioritized for this request.""" forms: list[KNOWN_ALCHEMY_TYPES] + """The types of alchemy that should be generated.""" @override @classmethod diff --git a/horde_sdk/ai_horde_api/apimodels/alchemy/_status.py b/horde_sdk/ai_horde_api/apimodels/alchemy/_status.py index 71d433c..5b2a6e7 100644 --- a/horde_sdk/ai_horde_api/apimodels/alchemy/_status.py +++ b/horde_sdk/ai_horde_api/apimodels/alchemy/_status.py @@ -107,7 +107,9 @@ class AlchemyStatusResponse(HordeResponseBaseModel, ResponseWithProgressMixin): """ state: GENERATION_STATE + """The state of the job. See `GENERATION_STATE` for possible values.""" forms: list[AlchemyFormStatus] + """The status of each form in the job.""" @property def all_interrogation_results(self) -> list[AlchemyInterrogationDetails]: diff --git a/horde_sdk/ai_horde_api/apimodels/alchemy/_submit.py b/horde_sdk/ai_horde_api/apimodels/alchemy/_submit.py index 1f5d2aa..bd4ba0d 100644 --- a/horde_sdk/ai_horde_api/apimodels/alchemy/_submit.py +++ b/horde_sdk/ai_horde_api/apimodels/alchemy/_submit.py @@ -14,11 +14,14 @@ def get_api_model_name(cls) -> str | None: return "GenerationSubmitted" reward: float + """The kudos reward for this job.""" class AlchemyJobSubmitRequest(BaseAIHordeRequest, JobRequestMixin, APIKeyAllowedInRequestMixin): result: str # FIXME + """The result of the alchemy job.""" state: GENERATION_STATE + """The state of this generation. See `GENERATION_STATE` for more information.""" @override @classmethod diff --git a/horde_sdk/ai_horde_api/apimodels/base.py b/horde_sdk/ai_horde_api/apimodels/base.py index 853510f..e6dce87 100644 --- a/horde_sdk/ai_horde_api/apimodels/base.py +++ b/horde_sdk/ai_horde_api/apimodels/base.py @@ -46,6 +46,7 @@ class JobRequestMixin(HordeAPIDataObject): @field_validator("id_", mode="before") def validate_id(cls, v: str | JobID) -> JobID | str: + """Ensure that the job ID is not empty.""" if isinstance(v, str) and v == "": logger.warning("Job ID is empty") return JobID(root=uuid.uuid4()) @@ -58,7 +59,7 @@ def __eq__(self, __value: object) -> bool: return False def __hash__(self) -> int: - return hash(self.id_) + return hash(JobRequestMixin.__name__) + hash(self.id_) class JobResponseMixin(HordeAPIDataObject): @@ -69,6 +70,7 @@ class JobResponseMixin(HordeAPIDataObject): @field_validator("id_", mode="before") def validate_id(cls, v: str | JobID) -> JobID | str: + """Ensure that the job ID is not empty.""" if isinstance(v, str) and v == "": logger.warning("Job ID is empty") return JobID(root=uuid.uuid4()) @@ -102,12 +104,15 @@ class LorasPayloadEntry(HordeAPIDataObject): class TIPayloadEntry(HordeAPIDataObject): + """Represents a single textual inversion (embedding) parameter.""" + name: str = Field(min_length=1, max_length=255) inject_ti: str | None = None strength: float = Field(default=1, ge=-5, le=5) @field_validator("inject_ti") def validate_inject_ti(cls, v: str | None) -> str | None: + """Ensure that the inject_ti is either 'prompt' or 'negprompt'.""" if v is None: return None if v not in ["prompt", "negprompt"]: @@ -116,6 +121,7 @@ def validate_inject_ti(cls, v: str | None) -> str | None: @field_validator("strength") def validate_strength(cls, v: float) -> float: + """Ensure that the strength is non-zero.""" if v == 0: raise ValueError("strength must be non-zero") @@ -123,6 +129,7 @@ def validate_strength(cls, v: float) -> float: @model_validator(mode="after") def strength_only_if_inject_ti(self) -> TIPayloadEntry: + """Ensure that the strength is only set if the inject_ti is set.""" if self.strength and self.inject_ti is None: logger.debug("strength is only valid when inject_ti is set") return self @@ -235,6 +242,8 @@ class ImageGenerateParamMixin(HordeAPIDataObject): """The specific comfyUI workflow to use.""" special: dict[Any, Any] = Field(default_factory=dict) """Reserved for future use.""" + use_nsfw_censor: bool = False + """If the request is SFW, and the worker accidentally generates NSFW, it will send back a censored image.""" @field_validator("width", "height", mode="before") def width_divisible_by_64(cls, value: int) -> int: @@ -243,8 +252,6 @@ def width_divisible_by_64(cls, value: int) -> int: raise ValueError("width must be divisible by 64") return value - use_nsfw_censor: bool = False - @field_validator("sampler_name") def sampler_name_must_be_known(cls, v: str | KNOWN_SAMPLERS) -> str | KNOWN_SAMPLERS: """Ensure that the sampler name is in this list of supported samplers.""" @@ -280,7 +287,6 @@ def post_processors_must_be_known( v: list[str | KNOWN_UPSCALERS | KNOWN_FACEFIXERS | KNOWN_MISC_POST_PROCESSORS], ) -> list[str | KNOWN_UPSCALERS | KNOWN_FACEFIXERS | KNOWN_MISC_POST_PROCESSORS]: """Ensure that the post processors are in this list of supported post processors.""" - _valid_types: list[type] = [str, KNOWN_UPSCALERS, KNOWN_FACEFIXERS, KNOWN_MISC_POST_PROCESSORS] for post_processor in v: if post_processor not in _all_valid_post_processors_names_and_values or ( diff --git a/horde_sdk/ai_horde_api/apimodels/generate/_async.py b/horde_sdk/ai_horde_api/apimodels/generate/_async.py index 2510496..014e9e7 100644 --- a/horde_sdk/ai_horde_api/apimodels/generate/_async.py +++ b/horde_sdk/ai_horde_api/apimodels/generate/_async.py @@ -15,7 +15,7 @@ from horde_sdk.ai_horde_api.apimodels.generate._status import DeleteImageGenerateRequest, ImageGenerateStatusRequest from horde_sdk.ai_horde_api.consts import KNOWN_SOURCE_PROCESSING from horde_sdk.ai_horde_api.endpoints import AI_HORDE_API_ENDPOINT_SUBPATH -from horde_sdk.consts import HTTPMethod, HTTPStatusCode +from horde_sdk.consts import _ANONYMOUS_MODEL, HTTPMethod, HTTPStatusCode from horde_sdk.generic_api.apimodels import ( APIKeyAllowedInRequestMixin, ContainsMessageResponseMixin, @@ -40,7 +40,9 @@ class ImageGenerateAsyncResponse( """The UUID for this image generation.""" kudos: float + """The expected kudos consumption for this request.""" warnings: list[SingleWarningEntry] | None = None + """Any warnings that were generated by the server or a serving worker.""" @model_validator(mode="after") def validate_warnings(self) -> ImageGenerateAsyncResponse: @@ -80,7 +82,7 @@ def get_api_model_name(cls) -> str | None: return "RequestAsync" def __hash__(self) -> int: - return hash(self.id_) + return hash(ImageGenerateAsyncResponse.__name__) + hash(self.id_) def __eq__(self, __value: object) -> bool: return isinstance(__value, ImageGenerateAsyncResponse) and self.id_ == __value.id_ @@ -88,11 +90,12 @@ def __eq__(self, __value: object) -> bool: class ImageGenerateAsyncDryRunResponse(HordeResponseBaseModel): kudos: float + """The expected kudos consumption for this request.""" @override @classmethod def get_api_model_name(cls) -> str | None: - return "UNDOCUMENTED" + return _ANONYMOUS_MODEL class ImageGenerationInputPayload(HordeAPIObject, ImageGenerateParamMixin): @@ -132,20 +135,34 @@ class ImageGenerateAsyncRequest( """ prompt: str + """The prompt which will be sent to Stable Diffusion to generate an image.""" + params: ImageGenerationInputPayload | None = None + """The parameters for the image generation.""" nsfw: bool | None = True + """Set to true if this request is NSFW. This will skip workers which censor images.""" censor_nsfw: bool = False + """If the request is SFW, and the worker accidentally generates NSFW, it will send back a censored image.""" r2: bool = True + """If True, the image will be sent via cloudflare r2 download link.""" shared: bool = False + """If True, The image will be shared with LAION for improving their dataset. This will also reduce your + kudos consumption by 2. For anonymous users, this is always True.""" replacement_filter: bool = True + """If enabled, suspicious prompts are sanitized through a string replacement filter instead.""" source_image: str | None = None + """The public URL of the source image or a base64 string to use.""" source_processing: KNOWN_SOURCE_PROCESSING = KNOWN_SOURCE_PROCESSING.txt2img + """If source_image is provided, specifies how to process it.""" source_mask: str | None = None + """If source_processing is set to 'inpainting' or 'outpainting', this parameter can be optionally provided as the + Base64-encoded webp mask of the areas to inpaint. If this arg is not passed, the inpainting/outpainting mask has to + be embedded as alpha channel.""" extra_source_images: list[ExtraSourceImageEntry] | None = None """Additional uploaded images which can be used for further operations.""" diff --git a/horde_sdk/ai_horde_api/apimodels/generate/_pop.py b/horde_sdk/ai_horde_api/apimodels/generate/_pop.py index 92db2d7..03c5514 100644 --- a/horde_sdk/ai_horde_api/apimodels/generate/_pop.py +++ b/horde_sdk/ai_horde_api/apimodels/generate/_pop.py @@ -34,64 +34,32 @@ class NoValidRequestFound(HordeAPIObject): - blacklist: int | None = Field( - None, - description=( - "How many waiting requests were skipped because they demanded a generation with a word that this worker" - " does not accept." - ), - ge=0, - ) - bridge_version: int | None = Field( - None, - description=( - "How many waiting requests were skipped because they require a higher version of the bridge than this" - " worker is running (upgrade if you see this in your skipped list)." - ), - examples=[0], - ge=0, - ) - kudos: int | None = Field( - None, - description=( - "How many waiting requests were skipped because the user didn't have enough kudos when this worker" - " requires upfront kudos." - ), - ) - models: int | None = Field( - None, - description=( - "How many waiting requests were skipped because they demanded a different model than what this worker" - " provides." - ), - examples=[0], - ge=0, - ) - nsfw: int | None = Field( - None, - description=( - "How many waiting requests were skipped because they demanded a nsfw generation which this worker does not" - " provide." - ), - ge=0, - ) + blacklist: int | None = Field(None, ge=0) + """How many waiting requests were skipped because they demanded a generation with a word that this worker does + not accept.""" + bridge_version: int | None = Field(None, examples=[0], ge=0) + """How many waiting requests were skipped because they require a higher version of the bridge than this worker + is running (upgrade if you see this in your skipped list).""" + kudos: int | None = Field(None) + """How many waiting requests were skipped because the user didn't have enough kudos when this worker requires""" + models: int | None = Field(None, examples=[0], ge=0) + """How many waiting requests were skipped because they demanded a different model than what this worker + provides.""" + nsfw: int | None = Field(None, ge=0) + """How many waiting requests were skipped because they demanded a nsfw generation which this worker does not + provide.""" performance: int | None = Field( None, - description="How many waiting requests were skipped because they required higher performance.", - ge=0, - ) - untrusted: int | None = Field( - None, - description=( - "How many waiting requests were skipped because they demanded a trusted worker which this worker is not." - ), ge=0, ) + """How many waiting requests were skipped because they demanded a higher performance than this worker provides.""" + untrusted: int | None = Field(None, ge=0) + """How many waiting requests were skipped because they demanded a trusted worker which this worker is not.""" worker_id: int | None = Field( None, - description="How many waiting requests were skipped because they demanded a specific worker.", ge=0, ) + """How many waiting requests were skipped because they demanded a specific worker.""" def is_empty(self) -> bool: """Whether or not this object has any non-zero values.""" @@ -215,7 +183,6 @@ async def _download_image_if_needed( def _sort_downloaded_images(self) -> None: """Sort the downloaded extra source images in the order they were requested.""" - if self.extra_source_images is None or self._downloaded_extra_source_images is None: return @@ -374,7 +341,6 @@ def get_downloaded_source_mask(self) -> str | None: def async_download_source_image(self, client_session: aiohttp.ClientSession) -> asyncio.Task[None]: """Download the source image concurrently.""" - # If the source image is not set, there is nothing to download. if self.source_image is None: return asyncio.create_task(asyncio.sleep(0)) @@ -390,7 +356,6 @@ def async_download_source_image(self, client_session: aiohttp.ClientSession) -> def async_download_source_mask(self, client_session: aiohttp.ClientSession) -> asyncio.Task[None]: """Download the source mask concurrently.""" - # If the source mask is not set, there is nothing to download. if self.source_mask is None: return asyncio.create_task(asyncio.sleep(0)) @@ -435,27 +400,36 @@ def __eq__(self, other: object) -> bool: def __hash__(self) -> int: if self.id_ is not None: - return hash(self.id_) + return hash(ImageGenerateJobPopResponse.__name__) + hash(self.id_) if len(self.ids) > 0: - return hash(tuple(self.ids)) + return hash(ImageGenerateJobPopResponse.__name__) + hash(tuple(self.ids)) logger.warning("No ID or IDs found in response. This is a bug.") return hash(0) class PopInput(HordeAPIObject): - amount: int | None = Field(1, description="How many jobvs to pop at the same time", ge=1, le=20) + amount: int | None = Field(1, ge=1, le=20) + """The number of jobs to pop at the same time.""" bridge_agent: str | None = Field( "unknown:0:unknown", - description="The worker name, version and website.", examples=["AI Horde Worker reGen:4.1.0:https://github.com/Haidra-Org/horde-worker-reGen"], max_length=1000, ) + """The worker name, version and website.""" models: list[str] | None = None - name: str | None = Field(None, description="The Name of the Worker.") - nsfw: bool | None = Field(False, description="Whether this worker can generate NSFW requests or not.") + """The models this worker can generate.""" + name: str | None = Field( + None, + ) + """The Name of the Worker.""" + nsfw: bool | None = Field( + False, + ) + """Whether this worker can generate NSFW requests or not.""" priority_usernames: list[str] | None = None + """The usernames that should be prioritized by this worker.""" require_upfront_kudos: bool | None = Field( False, description=( @@ -466,6 +440,8 @@ class PopInput(HordeAPIObject): False, ], ) + """If True, this worker will only pick up requests where the owner has the required kudos to consume already + available.""" threads: int | None = Field( 1, description=( @@ -475,6 +451,7 @@ class PopInput(HordeAPIObject): ge=1, le=50, ) + """How many threads this worker is running. This is used to accurately the current power available in the horde.""" @override @classmethod @@ -489,15 +466,25 @@ class ImageGenerateJobPopRequest(BaseAIHordeRequest, APIKeyAllowedInRequestMixin """ bridge_version: int | None = None - max_pixels: int + """The version of the bridge this worker is running.""" + max_pixels: int = Field(examples=[262144]) + """The maximum number of pixels this worker can generate.""" blacklist: list[str] = Field(default_factory=list) + """The list of words this worker will not accept in a prompt.""" allow_img2img: bool = True + """Whether this worker can generate img2img.""" allow_painting: bool = False + """Whether this worker can generate inpainting/outpainting.""" allow_unsafe_ipaddr: bool = True + """Whether this worker will generate from unsafe/VPN IP addresses.""" allow_post_processing: bool = True + """Whether this worker can do post-processing.""" allow_controlnet: bool = False + """Whether this worker can generate using controlnets.""" allow_sdxl_controlnet: bool = False + """Whether this worker can generate using SDXL controlnets.""" allow_lora: bool = False + """Whether this worker can generate using Loras.""" @override @classmethod diff --git a/horde_sdk/ai_horde_api/apimodels/generate/_status.py b/horde_sdk/ai_horde_api/apimodels/generate/_status.py index 7e92575..f168efe 100644 --- a/horde_sdk/ai_horde_api/apimodels/generate/_status.py +++ b/horde_sdk/ai_horde_api/apimodels/generate/_status.py @@ -14,21 +14,22 @@ class Generation(HordeAPIObject): - model: str = Field(description="The model which generated this image.", title="Generation Model") + model: str = Field(title="Generation Model") + """The model which generated this image.""" state: GENERATION_STATE = Field( ..., - description="OBSOLETE (Use the gen_metadata field). The state of this generation.", examples=["ok"], title="Generation State", ) + """OBSOLETE (Use the gen_metadata field). The state of this generation.""" worker_id: str | WorkerID = Field( - description="The UUID of the worker which generated this image.", title="Worker ID", ) + """The UUID of the worker which generated this image.""" worker_name: str = Field( - description="The name of the worker which generated this image.", title="Worker Name", ) + """The name of the worker which generated this image.""" class ImageGeneration(Generation): @@ -68,7 +69,7 @@ def __eq__(self, other: object) -> bool: return self.id_ == other.id_ def __hash__(self) -> int: - return hash(self.id_) + return hash(ImageGeneration.__name__) + hash(self.id_) class ImageGenerateStatusResponse( @@ -144,6 +145,17 @@ def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: def get_default_success_response_type(cls) -> type[ImageGenerateStatusResponse]: return ImageGenerateStatusResponse + @override + def __eq__(self, value: object) -> bool: + if not isinstance(value, DeleteImageGenerateRequest): + return False + + return self.id_ == value.id_ + + @override + def __hash__(self) -> int: + return hash(DeleteImageGenerateRequest.__name__) + hash(self.id_) + class ImageGenerateStatusRequest(BaseAIHordeRequest, JobRequestMixin): """Represents a GET request to the `/v2/generate/status/{id}` endpoint.""" @@ -167,3 +179,14 @@ def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: @classmethod def get_default_success_response_type(cls) -> type[ImageGenerateStatusResponse]: return ImageGenerateStatusResponse + + @override + def __eq__(self, value: object) -> bool: + if not isinstance(value, ImageGenerateStatusRequest): + return False + + return self.id_ == value.id_ + + @override + def __hash__(self) -> int: + return hash(ImageGenerateStatusRequest.__name__) + hash(self.id_) diff --git a/horde_sdk/ai_horde_api/apimodels/generate/text/__init__.py b/horde_sdk/ai_horde_api/apimodels/generate/text/__init__.py index e69de29..13c7bc0 100644 --- a/horde_sdk/ai_horde_api/apimodels/generate/text/__init__.py +++ b/horde_sdk/ai_horde_api/apimodels/generate/text/__init__.py @@ -0,0 +1 @@ +"""Text generation API models.""" diff --git a/horde_sdk/ai_horde_api/apimodels/generate/text/_async.py b/horde_sdk/ai_horde_api/apimodels/generate/text/_async.py index 39b0802..f8d34ff 100644 --- a/horde_sdk/ai_horde_api/apimodels/generate/text/_async.py +++ b/horde_sdk/ai_horde_api/apimodels/generate/text/_async.py @@ -12,7 +12,7 @@ ) from horde_sdk.ai_horde_api.apimodels.generate.text._status import DeleteTextGenerateRequest, TextGenerateStatusRequest from horde_sdk.ai_horde_api.endpoints import AI_HORDE_API_ENDPOINT_SUBPATH -from horde_sdk.consts import HTTPMethod, HTTPStatusCode +from horde_sdk.consts import _ANONYMOUS_MODEL, HTTPMethod, HTTPStatusCode from horde_sdk.generic_api.apimodels import ( APIKeyAllowedInRequestMixin, ContainsMessageResponseMixin, @@ -31,8 +31,12 @@ class TextGenerateAsyncResponse( ResponseRequiringFollowUpMixin, ContainsMessageResponseMixin, ): - kudos: float | None = Field(None, description="The expected kudos consumption for this request.") + kudos: float | None = Field( + None, + ) + """The expected kudos consumption for this request.""" warnings: list[SingleWarningEntry] | None = None + """Any warnings that were generated by the server or a serving worker.""" @model_validator(mode="after") def validate_warnings(self) -> TextGenerateAsyncResponse: @@ -72,7 +76,7 @@ def get_api_model_name(cls) -> str | None: return "RequestAsync" def __hash__(self) -> int: - return hash(self.id_) + return hash(TextGenerateAsyncResponse.__name__) + hash(self.id_) def __eq__(self, __value: object) -> bool: return isinstance(__value, TextGenerateAsyncResponse) and self.id_ == __value.id_ @@ -80,8 +84,10 @@ def __eq__(self, __value: object) -> bool: @Unhashable class ModelPayloadRootKobold(HordeAPIDataObject): - dynatemp_exponent: float | None = Field(1, description="Dynamic temperature exponent value.", ge=0.0, le=5.0) - dynatemp_range: float | None = Field(0, description="Dynamic temperature range value.", ge=0.0, le=5.0) + dynatemp_exponent: float | None = Field(1, ge=0.0, le=5.0) + """Dynamic temperature exponent value.""" + dynatemp_range: float | None = Field(0, ge=0.0, le=5.0) + """Dynamic temperature range value.""" frmtadsnsp: bool | None = Field( None, description=( @@ -92,6 +98,8 @@ class ModelPayloadRootKobold(HordeAPIDataObject): False, ], ) + """Input formatting option. When enabled, adds a leading space to your input if there is no trailing whitespace at + the end of the previous action.""" frmtrmblln: bool | None = Field( None, description=( @@ -102,13 +110,15 @@ class ModelPayloadRootKobold(HordeAPIDataObject): False, ], ) + """Output formatting option. When enabled, replaces all occurrences of two or more consecutive newlines in the + output with one newline.""" frmtrmspch: bool | None = Field( None, - description="Output formatting option. When enabled, removes #/@%}{+=~|\\^<> from the output.", examples=[ False, ], ) + """Output formatting option. When enabled, removes #/@%}{+=~|\\^<> from the output.""" frmttriminc: bool | None = Field( None, description=( @@ -120,19 +130,28 @@ class ModelPayloadRootKobold(HordeAPIDataObject): False, ], ) + """Output formatting option. When enabled, removes some characters from the end of the output such that the output + doesn't end in the middle of a sentence. If the output is less than one sentence long, does nothing.""" max_context_length: int | None = Field( 1024, - description="Maximum number of tokens to send to the model.", ge=80, le=32000, ) - max_length: int | None = Field(80, description="Number of tokens to generate.", ge=16, le=1024) - min_p: float | None = Field(0, description="Min-p sampling value.", ge=0.0, le=1.0) + """Maximum number of tokens to send to the model.""" + max_length: int | None = Field(80, ge=16, le=1024) + """Number of tokens to generate.""" + min_p: float | None = Field(0, ge=0.0, le=1.0) + """Min-p sampling value.""" n: int | None = Field(None, examples=[1], ge=1, le=20) - rep_pen: float | None = Field(None, description="Base repetition penalty value.", ge=1.0, le=3.0) - rep_pen_range: int | None = Field(None, description="Repetition penalty range.", ge=0, le=4096) - rep_pen_slope: float | None = Field(None, description="Repetition penalty slope.", ge=0.0, le=10.0) + """The number of generations to produce.""" + rep_pen: float | None = Field(None, ge=1.0, le=3.0) + """Base repetition penalty value.""" + rep_pen_range: int | None = Field(None, ge=0, le=4096) + """Repetition penalty range.""" + rep_pen_slope: float | None = Field(None, ge=0.0, le=10.0) + """Repetition penalty slope.""" sampler_order: list[int] | None = None + """The sampler order to use for the generation.""" singleline: bool | None = Field( None, description=( @@ -143,33 +162,42 @@ class ModelPayloadRootKobold(HordeAPIDataObject): False, ], ) - smoothing_factor: float | None = Field(0, description="Quadratic sampling value.", ge=0.0, le=10.0) + """Output formatting option. When enabled, removes everything after the first line of the output, including the + newline.""" + smoothing_factor: float | None = Field(0, ge=0.0, le=10.0) + """Quadratic sampling value.""" stop_sequence: list[str] | None = None - temperature: float | None = Field(None, description="Temperature value.", ge=0.0, le=5.0) - tfs: float | None = Field(None, description="Tail free sampling value.", ge=0.0, le=1.0) - top_a: float | None = Field(None, description="Top-a sampling value.", ge=0.0, le=1.0) - top_k: int | None = Field(None, description="Top-k sampling value.", ge=0, le=100) - top_p: float | None = Field(None, description="Top-p sampling value.", ge=0.001, le=1.0) - typical: float | None = Field(None, description="Typical sampling value.", ge=0.0, le=1.0) - use_default_badwordsids: bool | None = Field( - None, - description="When True, uses the default KoboldAI bad word IDs.", - examples=[True], - ) + """The stop sequences to use for the generation.""" + temperature: float | None = Field(None, ge=0.0, le=5.0) + """Temperature value.""" + tfs: float | None = Field(None, ge=0.0, le=1.0) + """Tail free sampling value.""" + top_a: float | None = Field(None, ge=0.0, le=1.0) + """Top-a sampling value.""" + top_k: int | None = Field(None, ge=0, le=100) + """Top-k sampling value.""" + top_p: float | None = Field(None, ge=0.001, le=1.0) + """Top-p sampling value.""" + typical: float | None = Field(None, ge=0.0, le=1.0) + """Typical sampling value.""" + use_default_badwordsids: bool | None = None + """When True, uses the default KoboldAI bad word IDs.""" @Unhashable class ModelGenerationInputKobold(ModelPayloadRootKobold): + pass class TextGenerateAsyncDryRunResponse(HordeResponseBaseModel): kudos: float + """The expected kudos consumption for this request.""" @override @classmethod def get_api_model_name(cls) -> str | None: - return "UNDOCUMENTED" + return _ANONYMOUS_MODEL @Unhashable @@ -184,43 +212,29 @@ class TextGenerateAsyncRequest( """ params: ModelGenerationInputKobold | None = None - prompt: str | None = Field(None, description="The prompt which will be sent to KoboldAI to generate text.") - - allow_downgrade: bool | None = Field( - False, - description=( - "When true and the request requires upfront kudos and the account does not have enough The request will be" - " downgraded in max context and max tokens so that it does not need upfront kudos." - ), - ) - disable_batching: bool | None = Field( - False, - description=( - "When true, This request will not use batching. This will allow you to retrieve accurate seeds. Feature is" - " restricted to Trusted users and Patreons." - ), - ) + """The parameters to use for the generation.""" + prompt: str | None = None + """The prompt which will be sent to KoboldAI to generate text.""" + + allow_downgrade: bool | None = Field(False) + """When true and the request requires upfront kudos and the account does not have enough The request will be + downgraded in max context and max tokens so that it does not need upfront kudos.""" + disable_batching: bool | None = Field(False) + """When true, This request will not use batching. This will allow you to retrieve accurate seeds. + Feature is restricted to Trusted users and Patreons.""" extra_source_images: list[ExtraSourceImageEntry] | None = None - - proxied_account: str | None = Field( - None, - description=( - "If using a service account as a proxy, provide this value to identify the actual account from which this" - " request is coming from." - ), - ) + """Any extra source images that should be used for this request; e.g., for multi-modal models.""" + proxied_account: str | None = Field(None) + """If using a service account as a proxy, provide this value to identify the actual account from which this + request is coming from.""" softprompt: str | None = Field( None, - description="Specify which softprompt needs to be used to service this request.", min_length=1, ) - webhook: str | None = Field( - None, - description=( - "Provide a URL where the AI Horde will send a POST call after each delivered generation. The request will" - " include the details of the job as well as the request ID." - ), - ) + """Specify which softprompt needs to be used to service this request.""" + webhook: str | None = Field(None) + """Provide a URL where the AI Horde will send a POST call after each delivered generation. + The request will include the details of the job as well as the request ID.""" @override @classmethod diff --git a/horde_sdk/ai_horde_api/apimodels/generate/text/_pop.py b/horde_sdk/ai_horde_api/apimodels/generate/text/_pop.py index 93f51fe..663e4cc 100644 --- a/horde_sdk/ai_horde_api/apimodels/generate/text/_pop.py +++ b/horde_sdk/ai_horde_api/apimodels/generate/text/_pop.py @@ -22,27 +22,20 @@ class ModelPayloadKobold(ModelPayloadRootKobold): - prompt: str | None = Field(None, description="The prompt for the text generation.") + prompt: str | None = None + """The prompt for the text generation.""" class NoValidRequestFoundKobold(NoValidRequestFound): - max_context_length: int | None = Field( - None, - description="How many waiting requests were skipped because they demanded a higher max_context_length than " - "what this worker provides.", - ) + max_context_length: int | None = Field(None) """How many waiting requests were skipped because they demanded a higher max_context_length than what this worker provides.""" - max_length: int | None = Field( - None, - description="How many waiting requests were skipped because they demanded a higher max_length than what this " - "worker provides.", - ) - matching_softprompt: int | None = Field( - None, - description="How many waiting requests were skipped because they demanded an available soft-prompt which this " - "worker does not have.", - ) + max_length: int | None = Field(None) + """How many waiting requests were skipped because they demanded a higher max_length than what this + worker provides.""" + matching_softprompt: int | None = Field(None) + """How many waiting requests were skipped because they demanded an available soft-prompt which this worker does not + have.""" @override @classmethod @@ -55,18 +48,17 @@ class TextGenerateJobPopResponse( ResponseRequiringFollowUpMixin, ExtraSourceImageMixin, ): - payload: ModelPayloadKobold = Field(..., description="The settings for this text generation.") - id_: JobID | None = Field(None, alias="id", description="The UUID for this text generation.") + payload: ModelPayloadKobold + """The settings for this text generation.""" + id_: JobID | None = Field(None, alias="id") """The UUID for this text generation.""" - ids: list[JobID] = Field(description="The UUIDs for this text generations.") + ids: list[JobID] """The UUIDs for this text generations.""" - skipped: NoValidRequestFoundKobold = Field( - NoValidRequestFoundKobold(), - description="The skipped requests that were not valid for this worker.", - ) - softprompt: str | None = Field(None, description="The soft prompt requested for this generation.") + skipped: NoValidRequestFoundKobold = Field(NoValidRequestFoundKobold()) + """The skipped requests that were not valid for this worker.""" + softprompt: str | None = Field(None) """The soft prompt requested for this generation.""" - model: str | None = Field(None, description="The model requested for this generation.") + model: str | None = Field(None) """The model requested for this generation.""" @field_validator("id_", mode="before") @@ -142,14 +134,11 @@ def __hash__(self) -> int: class _PopInputKobold(PopInput): - max_length: int = Field(512, description="The maximum amount of tokens this worker can generate.") + max_length: int = Field(512) """The maximum amount of tokens this worker can generate.""" - max_context_length: int = Field(2048, description="The max amount of context to submit to this AI for sampling.") + max_context_length: int = Field(2048) """The max amount of context to submit to this AI for sampling.""" - softprompts: list[str] | None = Field( - None, - description="The available softprompt files on this worker for the currently running model.", - ) + softprompts: list[str] | None = Field(None) """The available softprompt files on this worker for the currently running model.""" diff --git a/horde_sdk/ai_horde_api/apimodels/generate/text/_status.py b/horde_sdk/ai_horde_api/apimodels/generate/text/_status.py index 3fe846c..061760a 100644 --- a/horde_sdk/ai_horde_api/apimodels/generate/text/_status.py +++ b/horde_sdk/ai_horde_api/apimodels/generate/text/_status.py @@ -14,10 +14,14 @@ class GenerationKobold(Generation): - id_: str | None = Field(None, description="The ID for this image.", title="Generation ID") + id_: str | None = Field(None, title="Generation ID") + """The ID for this generation.""" gen_metadata: list[GenMetadataEntry] | None = None # FIXME: API declares a `GenerationMetadataKobold` here - seed: int | None = Field(0, description="The seed which generated this text.", title="Generation Seed") - text: str | None = Field(None, description="The generated text.", min_length=0, title="Generated Text") + """Extra metadata about faulted or defaulted components of the generation.""" + seed: int | None = Field(0, title="Generation Seed") + """The seed which generated this text.""" + text: str | None = Field(None, min_length=0, title="Generated Text") + """The generated text.""" @override @classmethod @@ -38,7 +42,7 @@ def __eq__(self, other: object) -> bool: return self.id_ == other.id_ def __hash__(self) -> int: - return hash(self.id_) + return hash(GenerationKobold.__name__) + hash(self.id_) class TextGenerateStatusResponse( @@ -48,9 +52,9 @@ class TextGenerateStatusResponse( ): generations: list[GenerationKobold] = Field( default_factory=list, - description="The generations that have been completed in this request.", title="Generations", ) + """The generations that have been completed in this request.""" @override @classmethod @@ -110,6 +114,17 @@ def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: def get_default_success_response_type(cls) -> type[TextGenerateStatusResponse]: return TextGenerateStatusResponse + @override + def __eq__(self, value: object) -> bool: + if not isinstance(value, DeleteTextGenerateRequest): + return False + + return self.id_ == value.id_ + + @override + def __hash__(self) -> int: + return hash(DeleteTextGenerateRequest.__name__) + hash(self.id_) + class TextGenerateStatusRequest(BaseAIHordeRequest, JobRequestMixin): """Represents a GET request to the `/v2/generate/status/{id}` endpoint.""" @@ -133,3 +148,14 @@ def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: @classmethod def get_default_success_response_type(cls) -> type[TextGenerateStatusResponse]: return TextGenerateStatusResponse + + @override + def __eq__(self, value: object) -> bool: + if not isinstance(value, TextGenerateStatusRequest): + return False + + return self.id_ == value.id_ + + @override + def __hash__(self) -> int: + return hash(TextGenerateStatusRequest.__name__) + hash(self.id_) diff --git a/horde_sdk/ai_horde_api/apimodels/workers/_workers.py b/horde_sdk/ai_horde_api/apimodels/workers/_workers.py index ef3f5a2..6d18cb1 100644 --- a/horde_sdk/ai_horde_api/apimodels/workers/_workers.py +++ b/horde_sdk/ai_horde_api/apimodels/workers/_workers.py @@ -3,7 +3,7 @@ from pydantic import AliasChoices, Field, RootModel from typing_extensions import override -from horde_sdk.ai_horde_api.apimodels.base import BaseAIHordeRequest +from horde_sdk.ai_horde_api.apimodels.base import BaseAIHordeRequest, WorkerRequestMixin from horde_sdk.ai_horde_api.consts import WORKER_TYPE from horde_sdk.ai_horde_api.endpoints import AI_HORDE_API_ENDPOINT_SUBPATH from horde_sdk.ai_horde_api.fields import TeamID, WorkerID @@ -43,43 +43,78 @@ def get_api_model_name(cls) -> str | None: @Unhashable class WorkerDetailItem(HordeAPIObject): type_: WORKER_TYPE = Field(alias="type") + """The type of worker.""" name: str + """The Name given to this worker.""" id_: str | WorkerID = Field(alias="id") + """The UUID of this worker.""" online: bool | None = None + """True if the worker has checked-in the past 5 minutes.""" requests_fulfilled: int | None = None + """How many images this worker has generated.""" kudos_rewards: float | None = None + """How many Kudos this worker has been rewarded in total.""" kudos_details: WorkerKudosDetails | None = None + """How much Kudos this worker has accumulated or used for generating images.""" performance: str | None = None + """The average performance of this worker in human readable form.""" threads: int | None = None + """How many threads this worker is running.""" uptime: int | None = None + """The amount of seconds this worker has been online for this AI Horde.""" maintenance_mode: bool + """When True, this worker will not pick up any new requests.""" paused: bool | None = None + """When True, this worker not be given any new requests.""" info: str | None = None + """Extra information or comments about this worker provided by its owner.""" nsfw: bool | None = None + """Whether this worker can generate NSFW requests or not.""" owner: str | None = None + """Privileged or public if the owner has allowed it. The alias of the owner of this worker.""" ipaddr: str | None = None + """Privileged. The last known IP this worker has connected from.""" trusted: bool | None = None + """The worker is trusted to return valid generations.""" flagged: bool | None = None + """The worker's owner has been flagged for suspicious activity. + This worker will not be given any jobs to process.""" suspicious: int | None = None + """(Privileged) How much suspicion this worker has accumulated.""" uncompleted_jobs: int | None = None + """How many jobs this worker has left uncompleted after it started them.""" models: list[str] | None = None + """The models this worker supports.""" forms: list[str] | None = None + """The forms this worker supports.""" team: TeamDetailsLite | None = None + """The team this worker belongs to.""" contact: str | None = Field(None, min_length=4, max_length=500) - bridge_agent: str = Field(max_length=1000) - max_pixels: int | None = None + """(Privileged) Contact details for the horde admins to reach the owner of this worker in emergencies.""" + bridge_agent: str = Field(max_length=1000, examples=["AI Horde Worker reGen:4.1.0:"]) + """The bridge agent name, version and website. Example: AI Horde Worker reGen:4.1.0:""" + max_pixels: int | None = Field(None, examples=[262144]) + """The maximum pixels in resolution this worker can generate. Example: 262144""" megapixelsteps_generated: int | None = None + """How many megapixelsteps this worker has generated until now.""" img2img: bool | None = None + """If True, this worker supports and allows img2img requests.""" painting: bool | None = None + """If True, this worker supports and allows inpainting requests.""" post_processing: bool | None = Field( None, validation_alias=AliasChoices("post_processing", "post-processing"), serialization_alias="post-processing", ) + """If True, this worker supports and allows post-processing requests.""" lora: bool | None = None - max_length: int | None = None - max_context_length: int | None = None - tokens_generated: int | None = None + """If True, this worker supports and allows lora requests.""" + max_length: int | None = Field(None, examples=[80]) + """The maximum tokens this worker can generate.""" + max_context_length: int | None = Field(None, examples=[80]) + """The maximum tokens this worker can read.""" + tokens_generated: int | None = Field(None, examples=[0]) + """How many tokens this worker has generated until now. """ @override @classmethod @@ -134,12 +169,19 @@ class AllWorkersDetailsResponse(HordeResponse, RootModel[list[WorkerDetailItem]] # without a `type: ignore``, mypy feels that this is a bad override. This is probably a sub-optimal solution # on my part with me hoping to come up with a more elegant path in the future. # TODO: fix this? + + root: list[WorkerDetailItem] + """The underlying list of worker details.""" + def __iter__(self) -> Iterator[WorkerDetailItem]: # type: ignore return iter(self.root) def __getitem__(self, item: int) -> WorkerDetailItem: return self.root[item] + def __len__(self) -> int: + return len(self.root) + @override @classmethod def get_api_model_name(cls) -> str | None: @@ -150,6 +192,7 @@ class AllWorkersDetailsRequest(BaseAIHordeRequest, APIKeyAllowedInRequestMixin): """Returns information on all works. If a moderator API key is specified, it will return additional information.""" type_: WORKER_TYPE = Field(WORKER_TYPE.all, alias="type") + """Filter workers by type. Default is 'all' which returns all workers.""" @override @classmethod @@ -191,12 +234,11 @@ def get_api_model_name(cls) -> str | None: return "WorkerDetails" -class SingleWorkerDetailsRequest(BaseAIHordeRequest, APIKeyAllowedInRequestMixin): +class SingleWorkerDetailsRequest(BaseAIHordeRequest, WorkerRequestMixin, APIKeyAllowedInRequestMixin): """Returns information on a single worker. - If a moderator API key is specified, additional information is returned.""" - - worker_id: str | WorkerID = Field(alias="id") + If a moderator API key is specified, additional information is returned. + """ @override @classmethod @@ -222,3 +264,105 @@ def get_default_success_response_type(cls) -> type[SingleWorkerDetailsResponse]: def is_api_key_required(cls) -> bool: """Return whether this endpoint requires an API key.""" return False + + +class ModifyWorkerResponse(HordeResponse): + info: str | None = Field(None) + """The new state of the 'info' var for this worker.""" + maintenance: bool | None = Field(None) + """The new state of the 'maintenance' var for this worker. When True, this worker will not pick up any new + requests.""" + name: str | None = Field(None) + """The new name for this this worker. No profanity allowed!""" + paused: bool | None = Field(None) + """The new state of the 'paused' var for this worker. When True, this worker will not be given any new requests.""" + team: str | None = Field(None, examples=["Direct Action"]) + """The new team of this worker.""" + + @override + @classmethod + def get_api_model_name(cls) -> str | None: + return "ModifyWorker" + + +class ModifyWorkerRequest( + BaseAIHordeRequest, + APIKeyAllowedInRequestMixin, + WorkerRequestMixin, +): + info: str | None = Field(None, max_length=1000) + """You can optionally provide a server note which will be seen in the server details. No profanity allowed!""" + maintenance: bool | None = Field(None) + """Set to true to put this worker into maintenance.""" + maintenance_msg: str | None = Field(None) + """If maintenance is True, you can optionally provide a message to be used instead of the default maintenance + message, so that the owner is informed.""" + name: str | None = Field(None, max_length=100, min_length=5) + """When this is set, it will change the worker's name. No profanity allowed!""" + paused: bool | None = Field(None) + """(Mods only) Set to true to pause this worker.""" + team: str | None = Field(None, examples=["0bed257b-e57c-4327-ac64-40cdfb1ac5e6"], max_length=36) + """The team towards which this worker contributes kudos. It an empty string ('') is passed, it will leave the""" + + @override + @classmethod + def get_api_model_name(cls) -> str | None: + return "ModifyWorkerInput" + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.PUT + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_workers_single + + @override + @classmethod + def get_default_success_response_type(cls) -> type[ModifyWorkerResponse]: + return ModifyWorkerResponse + + +class DeleteWorkerResponse(HordeResponse): + deleted_id_: str | None = None + """The ID of the deleted worker.""" + deleted_name: str | None = None + """The Name of the deleted worker.""" + + @override + @classmethod + def get_api_model_name(cls) -> str | None: + return "DeletedWorker" + + +class DeleteWorkerRequest( + BaseAIHordeRequest, + APIKeyAllowedInRequestMixin, + WorkerRequestMixin, +): + @override + @classmethod + def get_api_model_name(cls) -> str | None: + return None + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.DELETE + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_workers_single + + @override + @classmethod + def get_default_success_response_type(cls) -> type[DeleteWorkerResponse]: + return DeleteWorkerResponse + + @classmethod + def is_api_key_required(cls) -> bool: + """Return whether this endpoint requires an API key.""" + return True diff --git a/horde_sdk/ai_horde_api/consts.py b/horde_sdk/ai_horde_api/consts.py index 3d770a7..bc3c712 100644 --- a/horde_sdk/ai_horde_api/consts.py +++ b/horde_sdk/ai_horde_api/consts.py @@ -20,13 +20,21 @@ class GENERATION_STATE(StrEnum): _NONE = "" # FIXME ok = auto() + """The generation was successful. It still may have encountered warnings.""" censored = auto() + """The generation was censored.""" faulted = auto() + """The generation encountered an error and was cancelled. It usually is retried automatically.""" csam = auto() + """The generation was flagged as CSAM and automatically censored.""" waiting = auto() + """The generation is waiting for a worker to be assigned.""" processing = auto() + """The generation is being processed by a worker.""" partial = auto() + """The generation was returned partially complete.""" cancelled = auto() + """The generation was cancelled by the user.""" done = auto() @@ -36,11 +44,16 @@ class WORKER_TYPE(StrEnum): (alchemy, image, text, etc...) """ - all = auto() + all = "" + """All worker types.""" image = auto() + """Image generation worker.""" text = auto() + """Text generation worker.""" interrogation = auto() + """Alchemy/Interrogation worker.""" alchemist = "interrogation" + """Alchemy/Interrogation worker.""" class ALCHEMY_FORMS(StrEnum): @@ -51,8 +64,11 @@ class ALCHEMY_FORMS(StrEnum): nsfw = auto() caption = auto() + """Captioning (i.e., BLIP).""" interrogation = auto() + """Interrogation (i.e., CLIP).""" post_process = auto() + """Upscaling, facefixing, etc.""" class KNOWN_SAMPLERS(StrEnum): @@ -108,6 +124,7 @@ class KNOWN_SOURCE_PROCESSING(StrEnum): inpainting = auto() outpainting = auto() remix = auto() + """Stable Cascade Remix""" class KNOWN_UPSCALERS(StrEnum): @@ -162,20 +179,31 @@ class POST_PROCESSOR_ORDER_TYPE(StrEnum): """ facefixers_first = auto() + """The facefixers are processed first.""" upscalers_first = auto() + """The upscalers are processed first.""" custom = auto() + """User specified post processor order.""" DEFAULT_POST_PROCESSOR_ORDER = POST_PROCESSOR_ORDER_TYPE.facefixers_first +"""The default post processor order.""" class KNOWN_CLIP_BLIP_TYPES(StrEnum): + """The CLIP and BLIP models that are known to the API.""" + caption = auto() + """The caption (BLIP) model.""" interrogation = auto() + """The interrogation (CLIP) model.""" nsfw = auto() + """The NSFW model.""" class KNOWN_INTERROGATORS(StrEnum): + """The interrogators that are known to the API.""" + vit_l_14 = "ViT-L/14" @@ -210,12 +238,19 @@ class METADATA_TYPE(StrEnum): """ lora = auto() + """This refers to a LORA metadata type.""" ti = auto() + """This refers to a Textual Inversion metadata type.""" censorship = auto() + """The censorship metadata type.""" source_image = auto() + """The source image for img2img, inpainting, outpainting, or other source image processing.""" source_mask = auto() + """The mask for img2img, inpainting, outpainting, or other source image processing.""" extra_source_images = auto() + """Extra source images for the request.""" batch_index = auto() + """The index of the batch in a batch request.""" class METADATA_VALUE(StrEnum): @@ -225,66 +260,116 @@ class METADATA_VALUE(StrEnum): """ download_failed = auto() + """Something in the request couldn't be downloaded.""" parse_failed = auto() + """Something in the request couldn't be parsed.""" baseline_mismatch = auto() + """The model targeted wasn't the correct baseline (e.g., SD15 when the request required SDXL).""" csam = auto() + """The generation was flagged as CSAM and automatically censored.""" nsfw = auto() + """The generation is not safe for work.""" see_ref = auto() + """See the `ref` field for more information.""" class MODEL_STATE(StrEnum): + """The model states that are known to the API.""" + all = auto() + """Both known and custom models.""" known = auto() + """Known models that appear in the model reference""" custom = auto() + """Custom models.""" class MODEL_TYPE(StrEnum): + """The model types that are known to the API.""" + text = auto() + """Text generation models.""" image = auto() + """Image generation models.""" class WarningCode(StrEnum): + """The warning codes that are known to the API.""" + NoAvailableWorker = auto() + """There are no available workers for the request.""" ClipSkipMismatch = auto() + """The clip skip value doesn't match the model's preferred value.""" StepsTooFew = auto() + """The number of steps are lower than recommended.""" StepsTooMany = auto() + """The number of steps are higher than recommended.""" CfgScaleMismatch = auto() + """The scale in the CFG doesn't match the model's preferred scale.""" CfgScaleTooSmall = auto() + """The scale in the CFG is too small for the model to handle.""" CfgScaleTooLarge = auto() + """The scale in the CFG is too large for the model to handle.""" SamplerMismatch = auto() + """The sampler specified doesn't match the model's preferred sampler.""" SchedulerMismatch = auto() + """The scheduler specified doesn't match the model's preferred scheduler.""" class RC(StrEnum): + """The return codes (typically errors, sometimes warnings) that are known to the API.""" + MissingPrompt = auto() + """The prompt is missing but is required.""" CorruptPrompt = auto() + """The prompt couldn't be parsed.""" KudosValidationError = auto() + """The number of kudos for the requesting user is too low.""" NoValidActions = auto() InvalidSize = auto() InvalidPromptSize = auto() + """The prompt is too short or too long.""" TooManySteps = auto() + """The number of steps too high to be reasonable.""" Profanity = auto() ProfaneWorkerName = auto() + """The worker name contains profanity or rude language.""" ProfaneBridgeAgent = auto() + """The bridge agent contains profanity or rude language.""" ProfaneWorkerInfo = auto() + """The worker info contains profanity or rude language.""" ProfaneUserName = auto() + """The user name contains profanity or rude language.""" ProfaneUserContact = auto() + """The user contact contains profanity or rude language.""" ProfaneAdminComment = auto() + """The admin comment contains profanity or rude language.""" ProfaneTeamName = auto() + """The team name contains profanity or rude language.""" ProfaneTeamInfo = auto() + """The team info contains profanity or rude language.""" TooLong = auto() TooLongWorkerName = auto() + """The worker name is too long.""" TooLongUserName = auto() + """The user name is too long.""" NameAlreadyExists = auto() + """The name is already in use.""" WorkerNameAlreadyExists = auto() + """The worker name is already in use.""" TeamNameAlreadyExists = auto() + """The team name is already in use.""" PolymorphicNameConflict = auto() + """The name conflicts with a polymorphic name in the database.""" ImageValidationFailed = auto() + """The image couldn't be parsed. This may be due to a corrupt image.""" SourceImageResolutionExceeded = auto() SourceImageSizeExceeded = auto() SourceImageUrlInvalid = auto() + """The source image URL is invalid or is not accessible.""" SourceImageUnreadable = auto() InpaintingMissingMask = auto() + """Inpainting was selected but no mask was provided.""" SourceMaskUnnecessary = auto() UnsupportedSampler = auto() UnsupportedModel = auto() @@ -292,8 +377,11 @@ class RC(StrEnum): ControlNetSourceMissing = auto() ControlNetInvalidPayload = auto() SourceImageRequiredForModel = auto() + """The model requires a source image.""" UnexpectedModelName = auto() + """The model name is unexpected or unknown.""" TooManyUpscalers = auto() + """The number of upscalers in the request is too high.""" ProcGenNotFound = auto() InvalidAestheticAttempt = auto() AestheticsNotCompleted = auto() @@ -308,15 +396,20 @@ class RC(StrEnum): AestheticsServerDown = auto() AestheticsServerTimeout = auto() InvalidAPIKey = auto() + """The API key specified is invalid.""" WrongCredentials = auto() + """The API key specified doesn't match the target action.""" NotAdmin = auto() + """Only admins can perform this action.""" NotModerator = auto() + """Only moderators can perform this action.""" NotOwner = auto() NotPrivileged = auto() AnonForbidden = auto() AnonForbiddenWorker = auto() AnonForbiddenUserMod = auto() NotTrusted = auto() + """Only trusted users can perform this action.""" UntrustedTeamCreation = auto() UntrustedUnsafeIP = auto() WorkerMaintenance = auto() @@ -327,15 +420,25 @@ class RC(StrEnum): TimeoutIP = auto() TooManyNewIPs = auto() KudosUpfront = auto() + """The user must pay kudos upfront. This is typically only for anonymous users surpassing a certain kudos cost for + their request.""" SharedKeyEmpty = auto() SharedKeyExpired = auto() + """The shared key has expired.""" SharedKeyInsufficientKudos = auto() + """The shared key doesn't have enough kudos to perform this action""" InvalidJobID = auto() + """The job ID was not found, has timed out or has been deleted.""" RequestNotFound = auto() + """The request was not found, has timed out or has been deleted.""" WorkerNotFound = auto() + """The worker was not found.""" TeamNotFound = auto() + """The team was not found.""" FilterNotFound = auto() + """The filter was not found.""" UserNotFound = auto() + """The user was not found.""" DuplicateGen = auto() AbortedGen = auto() RequestExpired = auto() @@ -348,9 +451,13 @@ class RC(StrEnum): FaultWhenKudosSending = auto() TooFastKudosTransfers = auto() KudosTransferToAnon = auto() + """The user is trying to transfer kudos to the anonymous user.""" KudosTransferToSelf = auto() + """The user is trying to transfer kudos to themselves.""" KudosTransferNotEnough = auto() + """The user doesn't have enough kudos to transfer.""" NegativeKudosTransfer = auto() + """The user is trying to transfer a negative amount of kudos.""" KudosTransferFromAnon = auto() InvalidAwardUsername = auto() KudosAwardToAnon = auto() @@ -367,13 +474,19 @@ class RC(StrEnum): ControlNetMismatch = auto() HiResFixMismatch = auto() TooManyLoras = auto() + """The number of LORAs in the request is too high.""" BadLoraVersion = auto() + """The LORA version specficied is not valid.""" TooManyTIs = auto() + """The number of TIs in the request is too high.""" BetaAnonForbidden = auto() BetaComparisonFault = auto() BadCFGDecimals = auto() + """The number of decimals in the CFG is invalid.""" BadCFGNumber = auto() + """The number in the CFG is invalid.""" BadClientAgent = auto() + """The client agent is invalid.""" SpecialMissingPayload = auto() SpecialForbidden = auto() SpecialMissingUsername = auto() @@ -382,14 +495,26 @@ class RC(StrEnum): Img2ImgMismatch = auto() TilingMismatch = auto() EducationCannotSendKudos = auto() + """The account is an education account and cannot send kudos.""" InvalidPriorityUsername = auto() OnlyServiceAccountProxy = auto() + """"Only accounts marked as service accounts can use this field.""" RequiresTrust = auto() InvalidRemixModel = auto() InvalidExtraSourceImages = auto() TooManyExtraSourceImages = auto() MissingFullSamplerOrder = auto() TooManyStopSequences = auto() + """The text request has too many stop sequences.""" ExcessiveStopSequence = auto() + """The text request has an excessive stop sequence.""" TokenOverflow = auto() MoreThanMinExtraSourceImage = auto() + + +class PROGRESS_STATE(StrEnum): + """The state of a request as seen on the server.""" + + waiting = auto() + finished = auto() + timed_out = auto() diff --git a/horde_sdk/ai_horde_api/endpoints.py b/horde_sdk/ai_horde_api/endpoints.py index 367aa8d..b84a9da 100644 --- a/horde_sdk/ai_horde_api/endpoints.py +++ b/horde_sdk/ai_horde_api/endpoints.py @@ -79,7 +79,7 @@ class AI_HORDE_API_ENDPOINT_SUBPATH(GENERIC_API_ENDPOINT_SUBPATH): """Note that this is an API key lookup, not a user ID lookup.""" v2_users_all = "/v2/users" - v2_users = "/v2/users/{user_id}" + v2_users_single = "/v2/users/{user_id}" v2_workers_all = "/v2/workers" v2_workers_single = "/v2/workers/{worker_id}" diff --git a/horde_sdk/ai_horde_api/exceptions.py b/horde_sdk/ai_horde_api/exceptions.py index 1882f61..24be6c4 100644 --- a/horde_sdk/ai_horde_api/exceptions.py +++ b/horde_sdk/ai_horde_api/exceptions.py @@ -8,7 +8,9 @@ class AIHordeRequestError(HordeException): - def __init__(self, error_response: RequestErrorResponse) -> None: + """Exception for when the AI Horde API returns an error response.""" + + def __init__(self, error_response: RequestErrorResponse) -> None: # noqa: D107 logger.error(f"The AI Horde API returned an error response. Response: {error_response.message}") super().__init__(error_response.message) try: @@ -20,6 +22,8 @@ def __init__(self, error_response: RequestErrorResponse) -> None: class AIHordePayloadValidationError(HordeException): + """Exception for when the AI Horde API cannot parse a request payload.""" + def __init__(self, errors: dict[str, Any], message: str) -> None: """Exception for when the AI Horde API cannot parse a request payload.""" logger.error(f"The AI Horde API returned an error response. Response: {message}. Errors: {errors}") @@ -33,7 +37,7 @@ class AIHordeImageValidationError(AIHordeRequestError): class AIHordeGenerationTimedOutError(HordeException): """Exception for when the time limit for a generation request is reached.""" - def __init__(self, error_response: RequestErrorResponse) -> None: + def __init__(self, error_response: RequestErrorResponse) -> None: # noqa: D107 logger.error( f"The AI Horde API returned an error response. Response: {error_response.message}. " "This is likely because the generation timed out. " diff --git a/horde_sdk/ai_horde_api/fields.py b/horde_sdk/ai_horde_api/fields.py index 1a087e7..7ecaf0f 100644 --- a/horde_sdk/ai_horde_api/fields.py +++ b/horde_sdk/ai_horde_api/fields.py @@ -17,9 +17,11 @@ class UUID_Identifier(RootModel[uuid.UUID]): model_config: ClassVar[ConfigDict] = {"frozen": True} root: uuid.UUID + """The underlying UUID object.""" @model_serializer def ser_model(self) -> str: + """Serialize the model to a string.""" return str(self.root) @field_validator("root", mode="after") @@ -59,7 +61,7 @@ def __eq__(self, other: Any) -> bool: @override def __hash__(self) -> int: - return self.root.__hash__() + return hash(UUID_Identifier.__name__) + self.root.__hash__() def __lt__(self, other: object) -> bool: if isinstance(other, UUID_Identifier): diff --git a/horde_sdk/ai_horde_worker/__init__.py b/horde_sdk/ai_horde_worker/__init__.py index e69de29..ac53ec7 100644 --- a/horde_sdk/ai_horde_worker/__init__.py +++ b/horde_sdk/ai_horde_worker/__init__.py @@ -0,0 +1 @@ +"""Helper methods for creating a worker for the AI Horde.""" diff --git a/horde_sdk/ai_horde_worker/bridge_data.py b/horde_sdk/ai_horde_worker/bridge_data.py index d5ce06b..3b8771a 100644 --- a/horde_sdk/ai_horde_worker/bridge_data.py +++ b/horde_sdk/ai_horde_worker/bridge_data.py @@ -19,6 +19,8 @@ class MetaInstruction(StrEnum): + """Model load instructions which requiring further processing to resolve.""" + ALL_REGEX = r"all$|all models?$" ALL_SDXL_REGEX = r"all sdxl$|all sdxl models?$" @@ -283,6 +285,7 @@ class ImageWorkerBridgeData(SharedHordeBridgeData): @field_validator("forms", mode="before") def default_forms(cls, v: list[str]) -> list[str]: + """Set the default forms if none are specified.""" if v is None or len(v) == 0: logger.info("Using the default alchemy forms as none were specified.") return ["caption", "nsfw", "interrogation", "post-process"] @@ -324,6 +327,7 @@ def meta_load_instructions(self) -> list[str] | None: @model_validator(mode="after") def handle_meta_instructions(self) -> ImageWorkerBridgeData: + """Handle the meta instructions by resolving and applying them.""" # See if any entries are meta instructions, and if so, remove them and place them in _meta_load_instructions for instruction_regex in MetaInstruction.__members__.values(): for i, model in enumerate(self.image_models_to_load): @@ -342,6 +346,7 @@ def meta_skip_instructions(self) -> list[str] | None: @model_validator(mode="after") def handle_meta_skip_instructions(self) -> ImageWorkerBridgeData: + """Handle the meta skip instructions by resolving and applying them.""" # See if any entries are meta instructions, and if so, remove them and place them in _meta_skip_instructions for instruction_regex in MetaInstruction.__members__.values(): for i, model in enumerate(self.image_models_to_skip): diff --git a/horde_sdk/ai_horde_worker/model_meta.py b/horde_sdk/ai_horde_worker/model_meta.py index 1624e81..adaaeb6 100644 --- a/horde_sdk/ai_horde_worker/model_meta.py +++ b/horde_sdk/ai_horde_worker/model_meta.py @@ -6,7 +6,7 @@ from loguru import logger from horde_sdk.ai_horde_api.ai_horde_clients import AIHordeAPIManualClient -from horde_sdk.ai_horde_api.apimodels import ImageModelStatsResponse, ImageStatsModelsRequest, StatsModelsTimeframe +from horde_sdk.ai_horde_api.apimodels import ImageStatsModelsRequest, ImageStatsModelsResponse, StatsModelsTimeframe from horde_sdk.ai_horde_worker.bridge_data import MetaInstruction from horde_sdk.generic_api.apimodels import RequestErrorResponse @@ -19,7 +19,7 @@ class ImageModelLoadResolver: _model_reference_manager: ModelReferenceManager - def __init__(self, model_reference_manager: ModelReferenceManager) -> None: + def __init__(self, model_reference_manager: ModelReferenceManager) -> None: # noqa: D107 if not isinstance(model_reference_manager, ModelReferenceManager): raise TypeError("model_reference_manager must be of type ModelReferenceManager") self._model_reference_manager = model_reference_manager @@ -29,8 +29,17 @@ def resolve_meta_instructions( possible_meta_instructions: list[str], client: AIHordeAPIManualClient, ) -> set[str]: + """Return a set of model names based on the given meta instructions. + + Args: + possible_meta_instructions: A list of strings representing meta instructions. + client: An AIHordeAPIManualClient object to use for making requests. + + Returns: + A set of strings representing the names of models to load. + """ # Get model stats from the API - stats_response = client.submit_request(ImageStatsModelsRequest(), ImageModelStatsResponse) + stats_response = client.submit_request(ImageStatsModelsRequest(), ImageStatsModelsResponse) if isinstance(stats_response, RequestErrorResponse): raise Exception(f"Error getting stats for models: {stats_response.message}") @@ -253,7 +262,7 @@ def resolve_all_models_of_baseline(self, baseline: str) -> set[str]: @staticmethod def resolve_top_n_model_names( number_of_top_models: int, - response: ImageModelStatsResponse, + response: ImageStatsModelsResponse, timeframe: StatsModelsTimeframe, ) -> list[str]: """Get the names of the top N models based on usage statistics. @@ -283,7 +292,7 @@ def resolve_top_n_model_names( @staticmethod def resolve_bottom_n_model_names( number_of_bottom_models: int, - response: ImageModelStatsResponse, + response: ImageStatsModelsResponse, timeframe: StatsModelsTimeframe, ) -> list[str]: """Get the names of the bottom N models based on usage statistics. diff --git a/horde_sdk/consts.py b/horde_sdk/consts.py index 6f999fb..2b98de2 100644 --- a/horde_sdk/consts.py +++ b/horde_sdk/consts.py @@ -4,8 +4,8 @@ from strenum import StrEnum -_UNDEFINED_MODEL = "_UNDEFINED_MODEL" -"""This model is logically present on the API, but not defined in the swagger.""" +_ANONYMOUS_MODEL = "_ANONYMOUS_MODEL" +"""This model is on the API but does not have a name.""" class HTTPMethod(StrEnum): diff --git a/horde_sdk/generic_api/apimodels.py b/horde_sdk/generic_api/apimodels.py index bf5b0c3..79b5243 100644 --- a/horde_sdk/generic_api/apimodels.py +++ b/horde_sdk/generic_api/apimodels.py @@ -39,7 +39,10 @@ def get_api_model_name(cls) -> str | None: If none, there is no payload, such as for a GET request. """ - model_config = ConfigDict(frozen=True) + model_config = ConfigDict( + frozen=True, + use_attribute_docstrings=True, + ) class HordeAPIDataObject(BaseModel): @@ -51,7 +54,16 @@ class HordeAPIDataObject(BaseModel): """ model_config = ( - ConfigDict(frozen=True) if not os.getenv("TESTS_ONGOING") else ConfigDict(frozen=True, extra="forbid") + ConfigDict( + frozen=True, + use_attribute_docstrings=True, + ) + if not os.getenv("TESTS_ONGOING") + else ConfigDict( + frozen=True, + use_attribute_docstrings=True, + extra="forbid", + ) ) @@ -60,9 +72,11 @@ class HordeAPIMessage(HordeAPIObject): @classmethod def get_sensitive_fields(cls) -> set[str]: + """Return a set of fields which should be redacted from logs.""" return {"apikey"} def get_extra_fields_to_exclude_from_log(self) -> set[str]: + """Return an additional set of fields to exclude from the log_safe_model_dump method.""" return set() def log_safe_model_dump(self) -> dict[Any, Any]: @@ -185,7 +199,6 @@ def does_target_request_follow_up(self, target_request: HordeRequest) -> bool: Returns: bool: Whether the `target_request` would follow up on this request. """ - follow_up_returned_params = self.get_follow_up_returned_params(as_python_field_name=True) if len(follow_up_returned_params) == 0: @@ -200,7 +213,7 @@ def does_target_request_follow_up(self, target_request: HordeRequest) -> bool: return all_match -class ResponseWithProgressMixin(BaseModel): +class ResponseWithProgressMixin(HordeAPIDataObject): """Represents any response from any Horde API which contains progress information.""" @abc.abstractmethod @@ -233,7 +246,7 @@ def get_finalize_success_request_type(cls) -> type[HordeRequest] | None: """Return the request type for this response to finalize the job on success, or `None` if not needed.""" -class ResponseRequiringDownloadMixin(BaseModel): +class ResponseRequiringDownloadMixin(HordeAPIDataObject): """Represents any response which may require downloading additional data.""" async def download_file_as_base64(self, client_session: aiohttp.ClientSession, url: str) -> str: @@ -268,10 +281,11 @@ def download_additional_data(self) -> None: """Download any additional data required for this response.""" -class ContainsMessageResponseMixin(BaseModel): +class ContainsMessageResponseMixin(HordeAPIDataObject): """Represents any response from any Horde API which contains a message.""" message: str = "" + """A message from the API. This is typically an error or warning message, but may also be informational.""" class RequestErrorResponse(HordeResponseBaseModel, ContainsMessageResponseMixin): @@ -309,6 +323,8 @@ def get_http_method(cls) -> HTTPMethod: default=f"horde_sdk:{__version__}:https://githib.com/haidra-org/horde-sdk", alias="Client-Agent", ) + """The requesting client's agent. You should set this to reflect the name, version and contact information + for your client.""" @classmethod def get_api_endpoint_url(cls) -> str: @@ -369,6 +385,7 @@ def get_number_of_results_expected(self) -> int: def get_requires_follow_up(self) -> bool: """Return whether this request requires a follow up request(s). + Returns: bool: Whether this request requires a follow up request to close the job on the server. """ @@ -383,7 +400,7 @@ def get_sensitive_fields(cls) -> set[str]: return {"apikey"} -class APIKeyAllowedInRequestMixin(BaseModel): +class APIKeyAllowedInRequestMixin(HordeAPIDataObject): """Mix-in class to describe an endpoint which may require authentication.""" apikey: str | None = None @@ -413,7 +430,7 @@ def validate_api_key_length(cls, v: str) -> str: return v -class RequestSpecifiesUserIDMixin(BaseModel): +class RequestSpecifiesUserIDMixin(HordeAPIDataObject): """Mix-in class to describe an endpoint for which you can specify a user.""" user_id: str @@ -430,17 +447,24 @@ def user_id_is_numeric(cls, value: str) -> str: return value -class RequestUsesWorkerMixin(BaseModel): +class RequestUsesWorkerMixin(HordeAPIDataObject): """Mix-in class to describe an endpoint for which you can specify workers.""" trusted_workers: bool = False + """When true, only trusted workers will serve this request. When False, Evaluating workers will also be used + which can increase speed but adds more risk!""" slow_workers: bool = True + """When True, allows slower workers to pick up this request. Disabling this incurs an extra kudos cost.""" workers: list[str] = Field(default_factory=list) + """A list of worker IDs to use for this request. If empty, any worker can pick up the request. Using this incurs + and extra kudos cost.""" worker_blacklist: list[str] = Field(default_factory=list) - + """If true, the worker list will be treated as a blacklist instead of a whitelist.""" models: list[str] + """The generative models to use for this request.""" dry_run: bool = False + """If true, the request will not be processed, but will return a response with the estimated kudos cost.""" __all__ = [ diff --git a/horde_sdk/generic_api/decoration.py b/horde_sdk/generic_api/decoration.py index 2dc7dd3..fdeaf12 100644 --- a/horde_sdk/generic_api/decoration.py +++ b/horde_sdk/generic_api/decoration.py @@ -4,7 +4,7 @@ def Unhashable(cls: type[T]) -> type[T]: - """A decorator that makes a class unhashable. + """Make a class unhashable. Args: cls (Any): The class to make unhashable. @@ -12,7 +12,6 @@ def Unhashable(cls: type[T]) -> type[T]: Returns: Any: The unhashable class. """ - cls._unhashable = True # type: ignore cls.__hash__ = None # type: ignore @@ -35,7 +34,7 @@ def is_unhashable(obj: type | Any) -> bool: # noqa: ANN401 def Unequatable(cls: type[T]) -> type[T]: - """A decorator that makes a class unequatable + """Mark a class as unequatable. Args: cls (type[T]): The class to make unequatable @@ -43,7 +42,6 @@ def Unequatable(cls: type[T]) -> type[T]: Returns: type[T]: The unequatable class """ - cls._unequatable = True # type: ignore def __eq__(self, other: Any) -> bool: # type: ignore # noqa: ANN001, ANN401 diff --git a/horde_sdk/generic_api/generic_clients.py b/horde_sdk/generic_api/generic_clients.py index 0466ea2..4a7a6d9 100644 --- a/horde_sdk/generic_api/generic_clients.py +++ b/horde_sdk/generic_api/generic_clients.py @@ -88,20 +88,15 @@ def __init__( Args: apikey (str, optional): The API key to use for authenticated requests. Defaults to None, which will use the - anonymous API key. - + anonymous API key. header_fields (type[GenericHeaderFields], optional): Pass this to define the API's Header fields. - Defaults to GenericHeaderFields. - + Defaults to GenericHeaderFields. path_fields (type[GenericPathFields], optional): Pass this to define the API's URL path fields. - Defaults to GenericPathFields. - + Defaults to GenericPathFields. query_fields (type[GenericQueryFields], optional): Pass this to define the API's URL query fields. - Defaults to GenericQueryFields. - + Defaults to GenericQueryFields. accept_types (type[GenericAcceptTypes], optional): Pass this to define the API's accept types. - Defaults to GenericAcceptTypes. - + Defaults to GenericAcceptTypes. kwargs: Any additional keyword arguments are ignored. Raises: @@ -141,6 +136,7 @@ def _validate_and_prepare_request(self, api_request: HordeRequest) -> ParsedRawR Args: api_request (HordeRequest): The `HordeRequest` instance to be validated and prepared. expected_response_type (type[HordeResponse]): The expected response type. + Returns: _ParsedRequest: A `_ParsedRequest` instance with the extracted data to be sent in the request. @@ -389,7 +385,7 @@ class GenericAsyncHordeAPIManualClient(BaseHordeAPIClient): _aiohttp_session: aiohttp.ClientSession - def __init__( + def __init__( # noqa: D107 self, *, apikey: str | None = None, @@ -493,7 +489,7 @@ def __init__( ) self._pending_follow_ups = [] - def submit_request( + def submit_request( # noqa: D102 self, api_request: HordeRequest, expected_response_type: type[HordeResponseTypeVar], @@ -559,8 +555,12 @@ def __exit__(self, exc_type: type[BaseException], exc_val: Exception, exc_tb: ob if exc_type is None: return True - # Log the error. - logger.error(f"Error: {exc_val}, Type: {exc_type}, Traceback: {exc_tb}") + # Log the error + logger.error(f"Error: {exc_val}, Type: {exc_type}") + + # Show the traceback if there is one + if exc_tb and hasattr(exc_tb, "print_exc"): + exc_tb.print_exc() # If there are no pending follow-up requests, return True if the exception was a CancelledError. if not self._pending_follow_ups: @@ -649,7 +649,7 @@ class GenericAsyncHordeAPISession(GenericAsyncHordeAPIManualClient): it.""" _pending_follow_ups_lock: asyncio.Lock = asyncio.Lock() - def __init__( + def __init__( # noqa: D107 self, aiohttp_session: aiohttp.ClientSession, *, @@ -758,9 +758,13 @@ async def __aexit__(self, exc_type: type[BaseException], exc_val: Exception, exc for request in self._awaiting_requests: logger.warning(f"Request Unhandled: {request.log_safe_model_dump()}") - # If there was an exception, log it. - if exc_type is not None: - logger.debug(f"Error: {exc_val}, Type: {exc_type}, Traceback: {exc_tb}") + # Log the error if there was one. + if exc_type: + logger.error(f"Error: {exc_val}, Type: {exc_type}") + + # Show the traceback if there is one + if exc_tb and hasattr(exc_tb, "print_exc"): + exc_tb.print_exc() # If there are no pending follow-up requests, return True if the exception was a CancelledError. if not self._pending_follow_ups: diff --git a/horde_sdk/horde_logging.py b/horde_sdk/horde_logging.py index c1a3eae..4fc88e8 100644 --- a/horde_sdk/horde_logging.py +++ b/horde_sdk/horde_logging.py @@ -11,6 +11,7 @@ def set_logger_verbosity(count: int) -> None: + """Set the verbosity of the logger.""" global verbosity # The count comes reversed. So count = 0 means minimum verbosity # While count 5 means maximum verbosity @@ -19,24 +20,28 @@ def set_logger_verbosity(count: int) -> None: def is_stdout_log(record: dict[str, Any]) -> bool: + """Filter for stdout logs levels.""" if record["level"].no < verbosity: return False return True def is_msg_log(record: dict[str, Any]) -> bool: + """Filter for stdout logs levels.""" if record["level"].no < verbosity: return False return True def is_stderr_log(record: dict[str, Any]) -> bool: + """Filter for stderr logs levels.""" if record["level"].name not in error_levels: return False return True def is_trace_log(record: dict[str, Any]) -> bool: + """Filter for trace logs levels.""" if record["level"].name not in error_levels: return False return True diff --git a/horde_sdk/meta.py b/horde_sdk/meta.py index b3a4ff1..d25cb46 100644 --- a/horde_sdk/meta.py +++ b/horde_sdk/meta.py @@ -17,6 +17,15 @@ @cache def find_subclasses(module_or_package: types.ModuleType, super_type: type) -> list[type]: + """Find all subclasses of a given type in a module or package. + + Args: + module_or_package (types.ModuleType): The module or package to search in. + super_type (type): The super type of the classes to search for. + + Returns: + list[type]: A list of all the subclasses of the super type in the module or package. + """ subclasses: list[type] = [] if hasattr(module_or_package, "__package__") and module_or_package.__package__ is not None: @@ -64,7 +73,6 @@ def any_unimported_classes(module: types.ModuleType, super_type: type) -> tuple[ def all_undefined_classes(module: types.ModuleType) -> dict[str, str]: """Return all of the models defined on the API but not in the SDK.""" - module_found_classes = find_subclasses(module, HordeAPIObject) defined_api_object_names: set[str] = set() @@ -125,3 +133,39 @@ def all_unaddressed_endpoints_ai_horde() -> set[AI_HORDE_API_ENDPOINT_SUBPATH]: unaddressed_paths.add(path) return unaddressed_paths + + +def all_models_missing_docstrings() -> set[type]: + """Return all of the models that do not have docstrings.""" + all_classes = find_subclasses(horde_sdk.ai_horde_api.apimodels, HordeAPIObject) + + missing_docstrings = set() + + for class_type in all_classes: + if not class_type.__doc__: + missing_docstrings.add(class_type) + + return missing_docstrings + + +def all_model_and_fields_missing_docstrings() -> dict[type, set[str]]: + """Return all of the models' fields that do not have docstrings.""" + all_classes = find_subclasses(horde_sdk.ai_horde_api.apimodels, HordeAPIObject) + + missing_docstrings: dict[type, set[str]] = {} + + from pydantic import BaseModel + + for class_type in all_classes: + if not issubclass(class_type, BaseModel): + continue + + missing_fields = set() + for field_name, field_info in class_type.model_fields.items(): + if not field_info.description: + missing_fields.add(field_name) + + if missing_fields: + missing_docstrings[class_type] = missing_fields + + return missing_docstrings diff --git a/horde_sdk/ratings_api/apimodels.py b/horde_sdk/ratings_api/apimodels.py index f1bd127..5e6e57e 100644 --- a/horde_sdk/ratings_api/apimodels.py +++ b/horde_sdk/ratings_api/apimodels.py @@ -7,7 +7,7 @@ from strenum import StrEnum from typing_extensions import override -from horde_sdk.consts import _UNDEFINED_MODEL, HTTPMethod +from horde_sdk.consts import _ANONYMOUS_MODEL, HTTPMethod from horde_sdk.generic_api.apimodels import ( APIKeyAllowedInRequestMixin, HordeRequest, @@ -63,7 +63,7 @@ class ImageRatingsResponse(HordeResponseBaseModel): @override @classmethod def get_api_model_name(cls) -> str | None: - return _UNDEFINED_MODEL + return _ANONYMOUS_MODEL class UserRatingsResponseSubRecord(BaseImageRatingRecord): @@ -84,7 +84,7 @@ class UserRatingsResponse(HordeResponseBaseModel): @override @classmethod def get_api_model_name(cls) -> str | None: - return _UNDEFINED_MODEL + return _ANONYMOUS_MODEL class UserValidateResponseRecord(BaseImageRatingRecord): @@ -102,7 +102,7 @@ class UserValidateResponse(HordeResponseBaseModel): @override @classmethod def get_api_model_name(cls) -> str | None: - return _UNDEFINED_MODEL + return _ANONYMOUS_MODEL class UserCheckResponse(HordeResponseBaseModel): @@ -124,7 +124,7 @@ class UserCheckResponse(HordeResponseBaseModel): @override @classmethod def get_api_model_name(cls) -> str | None: - return _UNDEFINED_MODEL + return _ANONYMOUS_MODEL # endregion diff --git a/pyproject.toml b/pyproject.toml index 00067cc..a9d3523 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -55,28 +55,37 @@ select = [ "RET", "SIM", "COM", - # "D", + "D", + "D417", # Missing argument descriptions in the docstring "ANN", - "RUF"] + "RUF", + "ASYNC", + ] ignore = [ - "ANN101", - "ANN102", + "ANN101", # Missing type annotation for self in method + "ANN102", # Missing type annotation for cls in classmethod + "D105", # Missing docstring in magic method + "D100", # Missing docstring in public module # Ignore D rules for non-google docstring standard - "D203", - "D204", - "D213", - "D215", - "D400", - "D404", - "D406", - "D407", - "D408", - "D409", - "D413",] + "D203", # 1 blank line required before class docstring + "D204", # 1 blank line required after class docstring + "D213", # Multi-line docstring summary should start at the second line + "D215", # Section underline is over-indented + "D400", # First line should end with a period + "D404", # First word of the docstring should not be This + "D406", # Section name should end with a newline + "D407", # Missing dashed underline after section + "D408", # Section underline should be in the line following the section's name + "D409", # Section underline should match the length of its name + "D413",] # Missing blank line after last section [tool.ruff.lint.per-file-ignores] "__init__.py" = ["E402"] "conftest.py" = ["E402"] +"tests/*" = ["D"] # Ignore D rules for tests +"examples/*" = ["D"] # Ignore D rules for examples +"docs/*" = ["D"] # Ignore D rules for docs +"codegen/*" = ["D"] # Ignore D rules for codegen [tool.black] line-length = 119 diff --git a/requirements.dev.txt b/requirements.dev.txt index 32ff436..1a13cff 100644 --- a/requirements.dev.txt +++ b/requirements.dev.txt @@ -18,3 +18,4 @@ types-pytz types-requests types-setuptools types-urllib3 +types-aiofiles diff --git a/requirements.txt b/requirements.txt index 20fd6bc..da68173 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,6 +5,7 @@ requests StrEnum loguru aiohttp +aiofiles aiodns pillow python-dotenv diff --git a/tests/ai_horde_api/test_ai_horde_api_models.py b/tests/ai_horde_api/test_ai_horde_api_models.py index df5e37c..83b9c3b 100644 --- a/tests/ai_horde_api/test_ai_horde_api_models.py +++ b/tests/ai_horde_api/test_ai_horde_api_models.py @@ -16,11 +16,9 @@ ImageGenerateAsyncResponse, ) from horde_sdk.ai_horde_api.apimodels._find_user import ( - ContributionsDetails, FindUserRequest, - FindUserResponse, - UsageDetails, ) +from horde_sdk.ai_horde_api.apimodels._users import ContributionsDetails, UsageDetails, UserDetailsResponse from horde_sdk.ai_horde_api.apimodels.base import GenMetadataEntry from horde_sdk.ai_horde_api.apimodels.generate._async import ( ImageGenerateAsyncRequest, @@ -287,7 +285,7 @@ def test_FindUserRequest(ai_horde_api_key: str) -> None: def test_FindUserResponse() -> None: - FindUserResponse( + UserDetailsResponse( account_age=1, concurrency=1, contributions=ContributionsDetails( diff --git a/tests/ai_horde_api/test_ai_horde_generate_api_calls.py b/tests/ai_horde_api/test_ai_horde_generate_api_calls.py index e252b5f..1daaa70 100644 --- a/tests/ai_horde_api/test_ai_horde_generate_api_calls.py +++ b/tests/ai_horde_api/test_ai_horde_generate_api_calls.py @@ -3,6 +3,7 @@ import aiohttp import pytest +from loguru import logger from horde_sdk.ai_horde_api.ai_horde_clients import ( AIHordeAPIAsyncClientSession, @@ -481,6 +482,24 @@ def example_callback(generation: ImageGenerateCheckResponse) -> None: assert image is not None + @pytest.mark.asyncio + async def test_check_image_gen_callback_keyboard_interrupt( + self, + simple_image_gen_request: ImageGenerateAsyncRequest, + ) -> None: + with pytest.raises(KeyboardInterrupt, match="Test KeyboardInterrupt"): + async with aiohttp.ClientSession() as aiohttp_session: + simple_client = AIHordeAPIAsyncSimpleClient(aiohttp_session) + + def check_callback(response: ImageGenerateCheckResponse) -> None: + logger.debug(f"Response: {response}") + raise KeyboardInterrupt("Test KeyboardInterrupt") + + image_generate_status_response, job_id = await simple_client.image_generate_request( + simple_image_gen_request, + check_callback=check_callback, + ) + @pytest.mark.asyncio async def test_check_alchemy_callback( self, diff --git a/tests/ai_horde_api/test_ai_horde_generate_text_api_calls.py b/tests/ai_horde_api/test_ai_horde_generate_text_api_calls.py new file mode 100644 index 0000000..5877dd8 --- /dev/null +++ b/tests/ai_horde_api/test_ai_horde_generate_text_api_calls.py @@ -0,0 +1,96 @@ +import aiohttp +import pytest +from loguru import logger + +from horde_sdk.ai_horde_api.ai_horde_clients import ( + AIHordeAPIAsyncClientSession, + AIHordeAPIAsyncSimpleClient, + AIHordeAPISimpleClient, +) +from horde_sdk.ai_horde_api.apimodels import ( + TextGenerateAsyncRequest, + TextGenerateStatusResponse, +) +from horde_sdk.ai_horde_api.fields import JobID + + +class TestAIHordeTextGenerate: + def test_text_generate(self) -> None: + simple_client = AIHordeAPISimpleClient() + request = TextGenerateAsyncRequest( + prompt="Hello, world!", + models=[ + "koboldcpp/LLaMA2-13B-Psyfighter2", + ], + ) + + response, job_id = simple_client.text_generate_request(request) + + logger.debug(f"{job_id}: {response}") + + assert isinstance(job_id, JobID) + assert isinstance(response, TextGenerateStatusResponse) + + assert len(response.generations) == 1 + assert response.generations[0].model == "koboldcpp/LLaMA2-13B-Psyfighter2" + text_response = response.generations[0].text + assert text_response is not None + assert len(text_response) > 0 + + +class TestAIHordeTextGenerateAsync: + @pytest.mark.asyncio + async def test_text_generate_async(self) -> None: + aiohttp_session = aiohttp.ClientSession() + horde_client_session = AIHordeAPIAsyncClientSession( + aiohttp_session=aiohttp_session, + ) + async with aiohttp_session, horde_client_session: + simple_client = AIHordeAPIAsyncSimpleClient(horde_client_session=horde_client_session) + request = TextGenerateAsyncRequest( + prompt="Hello, world!", + models=[ + "koboldcpp/LLaMA2-13B-Psyfighter2", + ], + ) + + response, job_id = await simple_client.text_generate_request( + request, + check_callback=lambda response: logger.debug(f"Response: {response}"), + ) + + logger.debug(f"{job_id}: {response}") + + assert isinstance(job_id, JobID) + assert isinstance(response, TextGenerateStatusResponse) + + assert len(response.generations) == 1 + assert response.generations[0].model == "koboldcpp/LLaMA2-13B-Psyfighter2" + text_response = response.generations[0].text + assert text_response is not None + assert len(text_response) > 0 + + @pytest.mark.asyncio + async def test_text_generate_async_keyboard_interrupt(self) -> None: + aiohttp_session = aiohttp.ClientSession() + horde_client_session = AIHordeAPIAsyncClientSession( + aiohttp_session=aiohttp_session, + ) + async with aiohttp_session, horde_client_session: + simple_client = AIHordeAPIAsyncSimpleClient(horde_client_session=horde_client_session) + request = TextGenerateAsyncRequest( + prompt="Hello, world!", + models=[ + "koboldcpp/LLaMA2-13B-Psyfighter2", + ], + ) + + def check_callback(response: TextGenerateStatusResponse) -> None: + logger.debug(f"Response: {response}") + raise KeyboardInterrupt("Test KeyboardInterrupt") + + with pytest.raises(KeyboardInterrupt): + await simple_client.text_generate_request( + request, + check_callback=check_callback, + ) diff --git a/tests/ai_horde_api/test_ai_horde_stats_api_calls.py b/tests/ai_horde_api/test_ai_horde_stats_api_calls.py index 1f4e0a0..9ec557a 100644 --- a/tests/ai_horde_api/test_ai_horde_stats_api_calls.py +++ b/tests/ai_horde_api/test_ai_horde_stats_api_calls.py @@ -6,12 +6,12 @@ AIHordeAPIAsyncClientSession, ) from horde_sdk.ai_horde_api.apimodels._stats import ( - ImageModelStatsResponse, ImageStatsModelsRequest, + ImageStatsModelsResponse, ImageStatsModelsTotalRequest, ImageStatsModelsTotalResponse, SinglePeriodImgStat, - TextModelStatsResponse, + TextStatsModelResponse, TextStatsModelsRequest, TextStatsModelsTotalRequest, TextStatsModelsTotalResponse, @@ -29,14 +29,14 @@ async def test_get_image_stats_models(self) -> None: request = ImageStatsModelsRequest() response = await client.submit_request( request, - expected_response_type=ImageModelStatsResponse, + expected_response_type=ImageStatsModelsResponse, ) if isinstance(response, RequestErrorResponse): raise AssertionError(f"Request failed: {response}") assert response is not None - assert isinstance(response, ImageModelStatsResponse) + assert isinstance(response, ImageStatsModelsResponse) assert isinstance(response.day, dict) assert isinstance(response.month, dict) assert isinstance(response.total, dict) @@ -44,14 +44,14 @@ async def test_get_image_stats_models(self) -> None: request_known = ImageStatsModelsRequest(model_state="known") response_known = await client.submit_request( request_known, - expected_response_type=ImageModelStatsResponse, + expected_response_type=ImageStatsModelsResponse, ) if isinstance(response_known, RequestErrorResponse): raise AssertionError(f"Request failed: {response_known}") assert response_known is not None - assert isinstance(response_known, ImageModelStatsResponse) + assert isinstance(response_known, ImageStatsModelsResponse) assert isinstance(response_known.day, dict) assert isinstance(response_known.month, dict) assert isinstance(response_known.total, dict) @@ -59,20 +59,20 @@ async def test_get_image_stats_models(self) -> None: request_custom = ImageStatsModelsRequest(model_state="custom") response_custom = await client.submit_request( request_custom, - expected_response_type=ImageModelStatsResponse, + expected_response_type=ImageStatsModelsResponse, ) if isinstance(response_custom, RequestErrorResponse): raise AssertionError(f"Request failed: {response_custom}") assert response_custom is not None - assert isinstance(response_custom, ImageModelStatsResponse) + assert isinstance(response_custom, ImageStatsModelsResponse) assert isinstance(response_custom.day, dict) assert isinstance(response_custom.month, dict) assert isinstance(response_custom.total, dict) - if (not isinstance(response, ImageModelStatsResponse) or response.month is None) or ( - not isinstance(response_custom, ImageModelStatsResponse) or response_custom.month is None + if (not isinstance(response, ImageStatsModelsResponse) or response.month is None) or ( + not isinstance(response_custom, ImageStatsModelsResponse) or response_custom.month is None ): pytest.skip("No data to compare. Is this a development environment?") else: @@ -113,14 +113,14 @@ async def test_get_text_stats_models(self) -> None: request = TextStatsModelsRequest() response = await client.submit_request( request, - expected_response_type=TextModelStatsResponse, + expected_response_type=TextStatsModelResponse, ) if isinstance(response, RequestErrorResponse): raise AssertionError(f"Request failed: {response}") assert response is not None - assert isinstance(response, TextModelStatsResponse) + assert isinstance(response, TextStatsModelResponse) assert isinstance(response.day, dict) assert isinstance(response.month, dict) assert isinstance(response.total, dict) diff --git a/tests/ai_horde_api/test_dynamically_validate_against_swagger.py b/tests/ai_horde_api/test_dynamically_validate_against_swagger.py index 762b546..d6d84d0 100644 --- a/tests/ai_horde_api/test_dynamically_validate_against_swagger.py +++ b/tests/ai_horde_api/test_dynamically_validate_against_swagger.py @@ -1,11 +1,13 @@ import json +from types import NoneType, UnionType from typing import Any import pytest +from pydantic import BaseModel import horde_sdk.ai_horde_api.apimodels from horde_sdk.ai_horde_api.endpoints import get_ai_horde_swagger_url -from horde_sdk.consts import HTTPMethod, HTTPStatusCode, get_all_success_status_codes +from horde_sdk.consts import _ANONYMOUS_MODEL, HTTPMethod, HTTPStatusCode, get_all_success_status_codes from horde_sdk.generic_api._reflection import get_all_request_types from horde_sdk.generic_api.apimodels import HordeRequest, HordeResponse from horde_sdk.generic_api.endpoints import GENERIC_API_ENDPOINT_SUBPATH @@ -16,6 +18,43 @@ ) +def get_fields_descriptions_and_types(class_type: type[BaseModel]) -> dict[str, dict[str, str | list[str] | None]]: + field_names_and_descriptions: dict[str, dict[str, str | list[str] | None]] = {} + for field_name, field_info in class_type.model_fields.items(): + if field_info.description is not None: + field_names_and_descriptions[field_name] = {"description": field_info.description} + else: + field_names_and_descriptions[field_name] = {"description": None} + + if field_info.annotation is not None: + # Builtin-types should use their simple name while horde_sdk classes should use their fully qualified name + # dict and list types should use their string representation + types_list = [] + if isinstance(field_info.annotation, UnionType): + for anno_type in field_info.annotation.__args__: + if "horde_sdk" in anno_type.__module__: + types_list.append(anno_type.__module__ + "." + anno_type.__name__) + elif hasattr(anno_type, "__origin__") and ( + anno_type.__origin__ is dict or anno_type.__origin__ is list + ): + types_list.append(str(anno_type)) + else: + types_list.append(anno_type.__name__ if anno_type is not NoneType else "None") + else: + if "horde_sdk" in field_info.annotation.__module__: + types_list.append(field_info.annotation.__module__ + "." + field_info.annotation.__name__) + elif hasattr(field_info.annotation, "__origin__") and ( + field_info.annotation.__origin__ is dict or field_info.annotation.__origin__ is list + ): + types_list.append(str(field_info.annotation)) + else: + types_list.append(field_info.annotation.__name__) + + field_names_and_descriptions[field_name]["types"] = types_list + + return field_names_and_descriptions + + @pytest.mark.object_verify def all_ai_horde_model_defs_in_swagger(swagger_doc: SwaggerDoc) -> None: """Ensure all models defined in ai_horde_api are defined in the swagger doc.""" @@ -35,8 +74,8 @@ def all_ai_horde_model_defs_in_swagger(swagger_doc: SwaggerDoc) -> None: api_to_sdk_payload_model_map: dict[str, dict[HTTPMethod, type[HordeRequest]]] = {} api_to_sdk_response_model_map: dict[str, dict[HTTPStatusCode, type[HordeResponse]]] = {} - request_field_names_and_descriptions: dict[str, list[tuple[str, str | None]]] = {} - response_field_names_and_descriptions: dict[str, list[tuple[str, str | None]]] = {} + request_field_names_and_descriptions: dict[str, dict[str, dict[str, str | list[str] | None]]] = {} + response_field_names_and_descriptions: dict[str, dict[str, dict[str, str | list[str] | None]]] = {} default_num_request_fields = len(HordeRequest.model_fields) @@ -66,9 +105,17 @@ def all_ai_horde_model_defs_in_swagger(swagger_doc: SwaggerDoc) -> None: # Otherwise, the request type has a payload, and is (probably) supposed to be a POST, PUT, or PATCH with # a payload else: - assert ( - request_type.get_api_model_name() in swagger_defined_models - ), f"Model is defined in horde_sdk, but not in swagger: {request_type.get_api_model_name()}" + if request_type.get_api_model_name() == _ANONYMOUS_MODEL: + print( + f"Request type {request_type.__name__} has an anonymous model name. " + "This is probably not what you want. " + "Consider giving it a unique name on the API.", + ) + else: + + assert ( + request_type.get_api_model_name() in swagger_defined_models + ), f"Model is defined in horde_sdk, but not in swagger: {request_type.get_api_model_name()}" assert endpoint_subpath in swagger_doc.paths, f"Missing {request_type.__name__} in swagger" @@ -92,11 +139,8 @@ def all_ai_horde_model_defs_in_swagger(swagger_doc: SwaggerDoc) -> None: api_to_sdk_payload_model_map[endpoint_subpath][request_type.get_http_method()] = request_type - for field_name, field_info in request_type.model_fields.items(): - if request_type.__name__ not in request_field_names_and_descriptions: - request_field_names_and_descriptions[request_type.__name__] = [] - - request_field_names_and_descriptions[request_type.__name__].append((field_name, field_info.description)) + request_field_dict = get_fields_descriptions_and_types(request_type) + request_field_names_and_descriptions[request_type.__name__] = request_field_dict endpoint_success_http_status_codes: list[HTTPStatusCode] = [] @@ -126,16 +170,8 @@ def all_ai_horde_model_defs_in_swagger(swagger_doc: SwaggerDoc) -> None: print(f"Response type {response_type.__name__} has no fields") continue - for field_name, field_info in response_type.model_fields.items(): - if response_type.__name__ not in response_field_names_and_descriptions: - response_field_names_and_descriptions[response_type.__name__] = [] - - if field_info.description is not None: - response_field_names_and_descriptions[response_type.__name__].append( - (field_name, field_info.description), - ) - else: - response_field_names_and_descriptions[response_type.__name__].append((field_name, None)) + response_field_dict = get_fields_descriptions_and_types(response_type) + response_field_names_and_descriptions[response_type.__name__] = response_field_dict def json_serializer(obj: object) -> object: if isinstance(obj, str): diff --git a/tests/ai_horde_worker/test_model_meta_api_calls.py b/tests/ai_horde_worker/test_model_meta_api_calls.py index 9c0fd81..85d4b8e 100644 --- a/tests/ai_horde_worker/test_model_meta_api_calls.py +++ b/tests/ai_horde_worker/test_model_meta_api_calls.py @@ -2,16 +2,16 @@ from horde_model_reference.model_reference_manager import ModelReferenceManager from horde_sdk.ai_horde_api.ai_horde_clients import AIHordeAPIManualClient -from horde_sdk.ai_horde_api.apimodels import ImageModelStatsResponse, ImageStatsModelsRequest, StatsModelsTimeframe +from horde_sdk.ai_horde_api.apimodels import ImageStatsModelsRequest, ImageStatsModelsResponse, StatsModelsTimeframe from horde_sdk.ai_horde_worker.model_meta import ImageModelLoadResolver from horde_sdk.generic_api.apimodels import RequestErrorResponse @pytest.fixture(scope="session") -def stats_response() -> ImageModelStatsResponse: +def stats_response() -> ImageStatsModelsResponse: client = AIHordeAPIManualClient() - stats_response = client.submit_request(ImageStatsModelsRequest(), ImageModelStatsResponse) + stats_response = client.submit_request(ImageStatsModelsRequest(), ImageStatsModelsResponse) if isinstance(stats_response, RequestErrorResponse): raise Exception(f"Request error: {stats_response.message}. object_data: {stats_response.object_data}") @@ -32,7 +32,7 @@ def test_image_model_load_resolver_all(image_model_load_resolver: ImageModelLoad def test_image_model_load_resolver_top_n( image_model_load_resolver: ImageModelLoadResolver, - stats_response: ImageModelStatsResponse, + stats_response: ImageStatsModelsResponse, ) -> None: resolved_model_names = image_model_load_resolver.resolve_top_n_model_names( 1, @@ -45,7 +45,7 @@ def test_image_model_load_resolver_top_n( def test_image_model_top_10( image_model_load_resolver: ImageModelLoadResolver, - stats_response: ImageModelStatsResponse, + stats_response: ImageStatsModelsResponse, ) -> None: resolved_model_names = image_model_load_resolver.resolve_top_n_model_names( 10, @@ -58,7 +58,7 @@ def test_image_model_top_10( def test_image_model_load_resolver_bottom_n( image_model_load_resolver: ImageModelLoadResolver, - stats_response: ImageModelStatsResponse, + stats_response: ImageStatsModelsResponse, ) -> None: resolved_model_names = image_model_load_resolver.resolve_bottom_n_model_names( 1, @@ -71,7 +71,7 @@ def test_image_model_load_resolver_bottom_n( def test_image_model_load_resolver_bottom_10( image_model_load_resolver: ImageModelLoadResolver, - stats_response: ImageModelStatsResponse, + stats_response: ImageStatsModelsResponse, ) -> None: resolved_model_names = image_model_load_resolver.resolve_bottom_n_model_names( 10, diff --git a/tests/test_data/ai_horde_api/example_responses/_v2_users_get_200.json b/tests/test_data/ai_horde_api/example_responses/_v2_users_get_200.json index 704497b..f2ac293 100644 --- a/tests/test_data/ai_horde_api/example_responses/_v2_users_get_200.json +++ b/tests/test_data/ai_horde_api/example_responses/_v2_users_get_200.json @@ -1,69 +1,71 @@ -{ - "username": "", - "id": 0, - "kudos": 0.0, - "evaluating_kudos": 0.0, - "concurrency": 0, - "worker_invited": 0, - "moderator": false, - "kudos_details": { - "accumulated": 0, - "gifted": 0, - "donated": 0, - "admin": 0, - "received": 0, - "recurring": 0, - "awarded": 0 - }, - "worker_count": 0, - "worker_ids": [ - "00000000-0000-0000-0000-000000000000" - ], - "sharedkey_ids": [ - "00000000-0000-0000-0000-000000000000" - ], - "monthly_kudos": { - "amount": 0, - "last_received": "2021-01-01T00:00:00Z" - }, - "trusted": false, - "flagged": false, - "vpn": false, - "service": false, - "education": false, - "customizer": false, - "special": false, - "suspicious": 0, - "pseudonymous": false, - "contact": "email@example.com", - "admin_comment": "User is sus", - "account_age": 60, - "usage": { - "megapixelsteps": 0.0, - "requests": 0 - }, - "contributions": { - "megapixelsteps": 0.0, - "fulfillments": 0 - }, - "records": { - "usage": { - "megapixelsteps": 0, - "tokens": 0 +[ + { + "username": "", + "id": 0, + "kudos": 0.0, + "evaluating_kudos": 0.0, + "concurrency": 0, + "worker_invited": 0, + "moderator": false, + "kudos_details": { + "accumulated": 0, + "gifted": 0, + "donated": 0, + "admin": 0, + "received": 0, + "recurring": 0, + "awarded": 0 + }, + "worker_count": 0, + "worker_ids": [ + "00000000-0000-0000-0000-000000000000" + ], + "sharedkey_ids": [ + "00000000-0000-0000-0000-000000000000" + ], + "monthly_kudos": { + "amount": 0, + "last_received": "2021-01-01T00:00:00Z" }, - "contribution": { - "megapixelsteps": 0, - "tokens": 0 + "trusted": false, + "flagged": false, + "vpn": false, + "service": false, + "education": false, + "customizer": false, + "special": false, + "suspicious": 0, + "pseudonymous": false, + "contact": "email@example.com", + "admin_comment": "User is sus", + "account_age": 60, + "usage": { + "megapixelsteps": 0.0, + "requests": 0 }, - "fulfillment": { - "image": 0, - "text": 0, - "interrogation": 0 + "contributions": { + "megapixelsteps": 0.0, + "fulfillments": 0 }, - "request": { - "image": 0, - "text": 0, - "interrogation": 0 + "records": { + "usage": { + "megapixelsteps": 0, + "tokens": 0 + }, + "contribution": { + "megapixelsteps": 0, + "tokens": 0 + }, + "fulfillment": { + "image": 0, + "text": 0, + "interrogation": 0 + }, + "request": { + "image": 0, + "text": 0, + "interrogation": 0 + } } } -} +] diff --git a/tests/test_verify_api_surface.py b/tests/test_verify_api_surface.py index e567cb3..63ce7f6 100644 --- a/tests/test_verify_api_surface.py +++ b/tests/test_verify_api_surface.py @@ -54,6 +54,16 @@ def test_all_ai_horde_api_models_defined() -> None: # Pretty print the undefined classes sorted by dict values, NOT by keys import json + error_responses = { + "RequestError", + "RequestValidationError", + } + + for error_response in error_responses: + if error_response in undefined_classes: + print(f"Warning: {error_response} is an error response which may not be handled.") + undefined_classes.pop(error_response) + undefined_classes_sorted = dict(sorted(undefined_classes.items(), key=lambda x: x[1])) print(json.dumps(undefined_classes_sorted, indent=4)) @@ -101,3 +111,17 @@ def test_all_ratings_api_models_imported() -> None: f"namespace: : {missing_imports}" f"\n\nMissing import names: {missing_import_names}" ) + + +@pytest.mark.object_verify +def test_all_models_have_docstrings() -> None: + import horde_sdk.meta + + missing_docstrings = horde_sdk.meta.all_model_and_fields_missing_docstrings() + + import json + + stringified_missing_docstrings = {k.__name__: list(v) for k, v in missing_docstrings.items()} + jsonified_missing_docstrings = json.dumps(stringified_missing_docstrings, indent=4) + + assert not missing_docstrings, "The following models are missing docstrings: " f"{jsonified_missing_docstrings}"