From 191efadd7eb6dd7bb960c31f1bf6352d6bba288c Mon Sep 17 00:00:00 2001 From: fern-api <115122769+fern-api[bot]@users.noreply.github.com> Date: Thu, 12 Sep 2024 22:59:23 +0000 Subject: [PATCH] SDK regeneration --- .mock/definition/api.yml | 10 +- .../definition/empathic-voice/__package__.yml | 557 +++++++---- .mock/definition/empathic-voice/chat.yml | 4 +- .../{chat-groups.yml => chatGroups.yml} | 6 +- .mock/definition/empathic-voice/chats.yml | 6 +- .mock/definition/empathic-voice/configs.yml | 138 ++- .../empathic-voice/customVoices.yml | 197 ++++ .mock/definition/empathic-voice/prompts.yml | 26 +- .mock/definition/empathic-voice/tools.yml | 26 +- .../{batch.yml => batch/__package__.yml} | 228 ++--- .../{stream.yml => stream/__package__.yml} | 52 +- .mock/fern.config.json | 2 +- poetry.lock | 28 +- pyproject.toml | 99 +- reference.md | 469 ++++++++- src/hume/__init__.py | 4 +- src/hume/empathic_voice/__init__.py | 23 +- src/hume/empathic_voice/chat_groups/client.py | 42 + src/hume/empathic_voice/chats/client.py | 42 + src/hume/empathic_voice/client.py | 4 + src/hume/empathic_voice/configs/client.py | 264 +++++- .../empathic_voice/custom_voices/__init__.py | 2 + .../empathic_voice/custom_voices/client.py | 886 ++++++++++++++++++ src/hume/empathic_voice/errors/__init__.py | 5 + .../errors/bad_request_error.py | 9 + src/hume/empathic_voice/prompts/client.py | 194 +++- src/hume/empathic_voice/tools/client.py | 194 +++- src/hume/empathic_voice/types/__init__.py | 18 +- ...custom_voice_name.py => error_response.py} | 16 +- .../types/posted_config_prompt_spec.py | 36 + .../types/posted_custom_voice.py | 29 +- .../types/posted_custom_voice_base_voice.py | 7 + .../types/posted_custom_voice_parameters.py | 51 + .../types/posted_prompt_spec.py | 16 +- .../types/posted_user_defined_tool_spec.py | 2 +- src/hume/empathic_voice/types/posted_voice.py | 12 +- .../empathic_voice/types/posted_voice_name.py | 5 - .../empathic_voice/types/return_config.py | 7 +- .../types/return_config_spec.py | 2 +- .../types/return_custom_voice.py | 33 +- .../types/return_custom_voice_base_voice.py | 7 + .../types/return_custom_voice_parameters.py | 49 + .../types/return_paged_custom_voices.py | 12 +- .../empathic_voice/types/return_prompt.py | 2 +- .../types/return_user_defined_tool.py | 2 +- src/hume/empathic_voice/types/return_voice.py | 10 +- .../empathic_voice/types/return_voice_name.py | 5 - src/hume/expression_measurement/__init__.py | 309 +----- tests/empathic_voice/test_custom_voices.py | 164 ++++ .../expression_measurement/batch/__init__.py | 2 + .../{test_batch.py => batch/test_root.py} | 2 +- 51 files changed, 3412 insertions(+), 903 deletions(-) rename .mock/definition/empathic-voice/{chat-groups.yml => chatGroups.yml} (99%) create mode 100644 .mock/definition/empathic-voice/customVoices.yml rename .mock/definition/expression-measurement/{batch.yml => batch/__package__.yml} (88%) rename .mock/definition/expression-measurement/{stream.yml => stream/__package__.yml} (90%) create mode 100644 src/hume/empathic_voice/custom_voices/__init__.py create mode 100644 src/hume/empathic_voice/custom_voices/client.py create mode 100644 src/hume/empathic_voice/errors/__init__.py create mode 100644 src/hume/empathic_voice/errors/bad_request_error.py rename src/hume/empathic_voice/types/{posted_custom_voice_name.py => error_response.py} (63%) create mode 100644 src/hume/empathic_voice/types/posted_config_prompt_spec.py create mode 100644 src/hume/empathic_voice/types/posted_custom_voice_base_voice.py create mode 100644 src/hume/empathic_voice/types/posted_custom_voice_parameters.py delete mode 100644 src/hume/empathic_voice/types/posted_voice_name.py create mode 100644 src/hume/empathic_voice/types/return_custom_voice_base_voice.py create mode 100644 src/hume/empathic_voice/types/return_custom_voice_parameters.py delete mode 100644 src/hume/empathic_voice/types/return_voice_name.py create mode 100644 tests/empathic_voice/test_custom_voices.py create mode 100644 tests/expression_measurement/batch/__init__.py rename tests/expression_measurement/{test_batch.py => batch/test_root.py} (99%) diff --git a/.mock/definition/api.yml b/.mock/definition/api.yml index 517bc76e..a7923d94 100644 --- a/.mock/definition/api.yml +++ b/.mock/definition/api.yml @@ -1,12 +1,12 @@ name: api +error-discrimination: + strategy: status-code +default-environment: Production +environments: + Production: https://api.hume.ai auth: HeaderAuthScheme auth-schemes: HeaderAuthScheme: name: apiKey header: X-Hume-Api-Key type: optional -default-environment: Production -environments: - Production: https://api.hume.ai -error-discrimination: - strategy: status-code diff --git a/.mock/definition/empathic-voice/__package__.yml b/.mock/definition/empathic-voice/__package__.yml index 5c1854b5..cb0abcfc 100644 --- a/.mock/definition/empathic-voice/__package__.yml +++ b/.mock/definition/empathic-voice/__package__.yml @@ -1,4 +1,15 @@ +errors: + BadRequestError: + status-code: 400 + type: ErrorResponse + docs: Bad Request types: + ErrorResponse: + properties: + error: optional + message: optional + source: + openapi: stenographer-openapi.json ReturnUserDefinedToolToolType: enum: - BUILTIN @@ -7,7 +18,7 @@ types: Type of Tool. Either `BUILTIN` for natively implemented tools, like web search, or `FUNCTION` for user-defined tools. source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json ReturnUserDefinedToolVersionType: enum: - FIXED @@ -16,7 +27,7 @@ types: Versioning method for a Tool. Either `FIXED` for using a fixed version number or `LATEST` for auto-updating to the latest version. source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json ReturnUserDefinedTool: docs: A specific tool version returned from the server properties: @@ -34,9 +45,9 @@ types: Version number for a Tool. - Tools, as well as Configs and Prompts, are versioned. This versioning - system supports iterative development, allowing you to progressively - refine tools and revert to previous versions if needed. + Tools, Configs, Custom Voices, and Prompts are versioned. This + versioning system supports iterative development, allowing you to + progressively refine tools and revert to previous versions if needed. Version numbers are integer values representing different iterations @@ -85,7 +96,7 @@ types: Structured as a stringified JSON schema, this format ensures the tool receives data in the expected format. source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json ReturnPromptVersionType: enum: - FIXED @@ -94,7 +105,7 @@ types: Versioning method for a Prompt. Either `FIXED` for using a fixed version number or `LATEST` for auto-updating to the latest version. source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json ReturnPrompt: docs: A Prompt associated with this Config. properties: @@ -107,9 +118,10 @@ types: Version number for a Prompt. - Prompts, as well as Configs and Tools, are versioned. This versioning - system supports iterative development, allowing you to progressively - refine prompts and revert to previous versions if needed. + Prompts, Configs, Custom Voices, and Tools are versioned. This + versioning system supports iterative development, allowing you to + progressively refine prompts and revert to previous versions if + needed. Version numbers are integer values representing different iterations @@ -152,33 +164,159 @@ types: For help writing a system prompt, see our [Prompting Guide](/docs/empathic-voice-interface-evi/prompting). source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json + PostedCustomVoiceBaseVoice: + enum: + - ITO + - KORA + - DACHER + - AURA + - FINN + - STELLA + - WHIMSY + docs: Specifies the base voice used to create the Custom Voice. + source: + openapi: stenographer-openapi.json + PostedCustomVoiceParameters: + docs: >- + The specified attributes of a Custom Voice. + + + If no parameters are specified then all attributes will be set to their + defaults, meaning no modfications will be made to the base voice. + properties: + gender: + type: optional + docs: >- + The vocalization of gender, ranging between masculine and feminine. + + + The default value is `0`, with a minimum of `-100` (more masculine) + and a maximum of `100` (more feminine). A value of `0` leaves this + parameter unchanged from the base voice. + huskiness: + type: optional + docs: >- + The texture of the voice, ranging between bright and husky. + + + The default value is `0`, with a minimum of `-100` (brighter) and a + maximum of `100` (huskier). A value of `0` leaves this parameter + unchanged from the base voice. + nasality: + type: optional + docs: >- + The openness of the voice, ranging between resonant and nasal. + + + The default value is `0`, with a minimum of `-100` (more resonant) and + a maximum of `100` (more nasal). A value of `0` leaves this parameter + unchanged from the base voice. + pitch: + type: optional + docs: >- + The frequency of the voice, ranging between low and high. + + + The default value is `0`, with a minimum of `-100` (lower) and a + maximum of `100` (higher). A value of `0` leaves this parameter + unchanged from the base voice. + source: + openapi: stenographer-openapi.json PostedCustomVoice: - docs: A custom voice specifications posted to the server + docs: >- + A Custom Voice specification to be associated with this Config. + + + If a Custom Voice specification is not provided then the + [name](/reference/empathic-voice-interface-evi/configs/create-config#request.body.voice.name) + of a base voice or previously created Custom Voice must be provided. + + See our [Voices guide](/docs/empathic-voice-interface-evi/voices) for a tutorial on how to craft a Custom Voice. properties: name: type: string docs: >- - String with the name of the voice to use. Maximum length of 75 - characters. Will be converted to all-uppercase. + The name of the Custom Voice. Maximum length of 75 characters. Will be + converted to all-uppercase. (e.g., "sample voice" becomes "SAMPLE + VOICE") base_voice: - type: string - docs: The voice the custom voice is based off of. - speech_rate_multiplier: - type: optional - docs: The speech rate multiplier for this custom voice. + type: PostedCustomVoiceBaseVoice + docs: Specifies the base voice used to create the Custom Voice. parameter_model: - type: string + type: literal<"20240715-4parameter"> docs: >- The name of the parameter model used to define which attributes are - used by `parameters`. + used by the `parameters` field. Currently, only `20240715-4parameter` + is supported as the parameter model. parameters: - type: optional>> - docs: Voice specification for a Config. + type: optional + docs: >- + The specified attributes of a Custom Voice. + + + If no parameters are specified then all attributes will be set to + their defaults, meaning no modfications will be made to the base + voice. source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json + ReturnCustomVoiceBaseVoice: + enum: + - ITO + - KORA + - DACHER + - AURA + - FINN + - STELLA + - WHIMSY + docs: The base voice used to create the Custom Voice. + source: + openapi: stenographer-openapi.json + ReturnCustomVoiceParameters: + docs: >- + The specified attributes of a Custom Voice. If a parameter's value is `0` + (default), it will not be included in the response. + properties: + gender: + type: optional + docs: >- + The vocalization of gender, ranging between masculine and feminine. + + + The default value is `0`, with a minimum of `-100` (more masculine) + and a maximum of `100` (more feminine). A value of `0` leaves this + parameter unchanged from the base voice. + huskiness: + type: optional + docs: >- + The texture of the voice, ranging between bright and husky. + + + The default value is `0`, with a minimum of `-100` (brighter) and a + maximum of `100` (huskier). A value of `0` leaves this parameter + unchanged from the base voice. + nasality: + type: optional + docs: >- + The openness of the voice, ranging between resonant and nasal. + + + The default value is `0`, with a minimum of `-100` (more resonant) and + a maximum of `100` (more nasal). A value of `0` leaves this parameter + unchanged from the base voice. + pitch: + type: optional + docs: >- + The frequency of the voice, ranging between low and high. + + + The default value is `0`, with a minimum of `-100` (lower) and a + maximum of `100` (higher). A value of `0` leaves this parameter + unchanged from the base voice. + source: + openapi: stenographer-openapi.json ReturnCustomVoice: - docs: A custom voice specification returned from the server + docs: A Custom Voice specification associated with this Config. properties: id: type: string @@ -186,35 +324,47 @@ types: version: type: integer docs: >- - Version number for a Custom Voice. Version numbers should be integers. - The combination of custom_voice_id and version number is unique. + Version number for a Custom Voice. + + + Custom Voices, Prompts, Configs, and Tools are versioned. This + versioning system supports iterative development, allowing you to + progressively refine configurations and revert to previous versions if + needed. + + + Version numbers are integer values representing different iterations + of the Custom Voice. Each update to the Custom Voice increments its + version number. name: type: string - docs: >- - String with the name of the voice to use. Maximum length of 75 - characters. Will be converted to all-uppercase. + docs: The name of the Custom Voice. Maximum length of 75 characters. created_on: type: long - docs: The timestamp when the first version of this prompt was created. + docs: >- + Time at which the Custom Voice was created. Measured in seconds since + the Unix epoch. modified_on: type: long - docs: The timestamp when this version of the prompt was created. + docs: >- + Time at which the Custom Voice was last modified. Measured in seconds + since the Unix epoch. base_voice: - type: string - docs: The voice the custom voice is based off of. - speech_rate_multiplier: - type: optional - docs: The speech rate multiplier for this custom voice. + type: ReturnCustomVoiceBaseVoice + docs: The base voice used to create the Custom Voice. parameter_model: - type: string + type: literal<"20240715-4parameter"> docs: >- The name of the parameter model used to define which attributes are - used by `parameters`. + used by the `parameters` field. Currently, only `20240715-4parameter` + is supported as the parameter model. parameters: - type: map - docs: Voice specification for a Config. + type: ReturnCustomVoiceParameters + docs: >- + The specified attributes of a Custom Voice. If a parameter's value is + `0` (default), it will not be included in the response. source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json PostedBuiltinToolName: enum: - web_search @@ -234,7 +384,7 @@ types: For more information, see our guide on [using built-in tools](/docs/empathic-voice-interface-evi/tool-use#using-built-in-tools). source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json PostedBuiltinTool: docs: A configuration of a built-in tool to be posted to the server properties: @@ -261,7 +411,25 @@ types: result. The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation if the Tool errors. source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json + PostedConfigPromptSpec: + docs: >- + Identifies which prompt to use in a a config OR how to create a new prompt + to use in the config + properties: + id: + type: optional + docs: Identifier for a Prompt. Formatted as a UUID. + version: + type: optional + docs: >- + Version number for a Prompt. Version numbers should be integers. The + combination of configId and version number is unique. + text: + type: optional + docs: Text used to create a new prompt for a particular config. + source: + openapi: stenographer-openapi.json PostedEllmModel: docs: A eLLM model configuration to be posted to the server properties: @@ -272,7 +440,7 @@ types: If omitted, short responses from the eLLM are enabled by default. source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json PostedEventMessageSpec: docs: Settings for a specific event_message to be posted to the server properties: @@ -291,7 +459,7 @@ types: If no text is specified, EVI will generate an appropriate message based on its current context and the system prompt. source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json PostedEventMessageSpecs: docs: >- Collection of event messages returned by the server. @@ -334,7 +502,7 @@ types: to reaching the maximum chat duration, such as a message mentioning the time limit for the chat has been reached. source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json PostedLanguageModelModelProvider: enum: - OPEN_AI @@ -345,7 +513,7 @@ types: - GOOGLE docs: The provider of the supplemental language model. source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json PostedLanguageModel: docs: A LanguageModel to be posted to the server properties: @@ -365,29 +533,7 @@ types: yielding focused, deterministic responses and values closer to 1 producing more creative, diverse responses. source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - PostedPromptSpec: - docs: A Prompt associated with this Config. - properties: - id: - type: string - docs: Identifier for a Prompt. Formatted as a UUID. - version: - type: optional - docs: >- - Version number for a Prompt. - - - Prompts, as well as Configs and Tools, are versioned. This versioning - system supports iterative development, allowing you to progressively - refine prompts and revert to previous versions if needed. - - - Version numbers are integer values representing different iterations - of the Prompt. Each update to the Prompt increments its version - number. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json PostedTimeoutSpec: docs: Settings for a specific timeout to be posted to the server properties: @@ -398,7 +544,7 @@ types: type: optional docs: Duration in seconds for the timeout. source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json PostedTimeoutSpecsInactivity: docs: >- Specifies the duration of user inactivity (in seconds) after which the EVI @@ -424,7 +570,7 @@ types: Duration in seconds for the timeout (e.g. 600 seconds represents 10 minutes). source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json PostedTimeoutSpecsMaxDuration: docs: >- Specifies the maximum allowed duration (in seconds) for an EVI WebSocket @@ -450,7 +596,7 @@ types: Duration in seconds for the timeout (e.g. 600 seconds represents 10 minutes). source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json PostedTimeoutSpecs: docs: >- Collection of timeout specifications returned by the server. @@ -482,7 +628,7 @@ types: Accepts a minimum value of 1 second and a maximum value of 1,800 seconds. source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json PostedUserDefinedToolSpec: docs: A specific tool identifier to be posted to the server properties: @@ -495,25 +641,15 @@ types: Version number for a Tool. - Tools, as well as Configs and Prompts, are versioned. This versioning - system supports iterative development, allowing you to progressively - refine tools and revert to previous versions if needed. + Tools, Configs, Custom Voices, and Prompts are versioned. This + versioning system supports iterative development, allowing you to + progressively refine tools and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - PostedVoiceName: - enum: - - ITO - - DACHER - - KORA - docs: >- - String with the name of the voice to use. Maximum length of 75 characters. - Will be converted to all-uppercase. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json PostedVoice: docs: A Voice specification posted to the server properties: @@ -523,12 +659,23 @@ types: The provider of the voice to use. Currently, only `HUME_AI` is supported as the voice provider. name: - type: optional + type: optional docs: >- - String with the name of the voice to use. Maximum length of 75 - characters. Will be converted to all-uppercase. + Specifies the name of the voice to use. + + + This can be either the name of a previously created Custom Voice or + one of our 7 base voices: `ITO`, `KORA`, `DACHER`, `AURA`, `FINN`, + `WHIMSY`, or `STELLA`. + + + The name will be automatically converted to uppercase (e.g., "Ito" + becomes "ITO"). If a name is not specified, then a [Custom + Voice](/reference/empathic-voice-interface-evi/configs/create-config#request.body.voice.custom_voice) + specification must be provided. + custom_voice: optional source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json ReturnBuiltinToolToolType: enum: - BUILTIN @@ -537,7 +684,7 @@ types: Type of Tool. Either `BUILTIN` for natively implemented tools, like web search, or `FUNCTION` for user-defined tools. source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json ReturnBuiltinTool: docs: A specific builtin tool version returned from the server properties: @@ -556,7 +703,7 @@ types: result. The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation if the Tool errors. source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json ReturnConfig: docs: A specific config version returned from the server properties: @@ -569,14 +716,22 @@ types: Version number for a Config. - Configs, as well as Prompts and Tools, are versioned. This versioning - system supports iterative development, allowing you to progressively - refine configurations and revert to previous versions if needed. + Configs, Prompts, Custom Voices, and Tools are versioned. This + versioning system supports iterative development, allowing you to + progressively refine configurations and revert to previous versions if + needed. Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. + evi_version: + type: optional + docs: >- + Specifies the EVI version to use. Use `"1"` for version 1, or `"2"` + for the latest enhanced version. For a detailed comparison of the two + versions, refer to our + [guide](/docs/empathic-voice-interface-evi/evi-2). version_description: type: optional docs: An optional description of the Config version. @@ -625,7 +780,7 @@ types: event_messages: optional timeouts: optional source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json ReturnEllmModel: docs: A specific eLLM Model configuration properties: @@ -636,7 +791,7 @@ types: If omitted, short responses from the eLLM are enabled by default. source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json ReturnEventMessageSpec: docs: A specific event message configuration to be returned from the server properties: @@ -655,7 +810,7 @@ types: If no text is specified, EVI will generate an appropriate message based on its current context and the system prompt. source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json ReturnEventMessageSpecs: docs: >- Collection of event messages returned by the server. @@ -698,7 +853,7 @@ types: to reaching the maximum chat duration, such as a message mentioning the time limit for the chat has been reached. source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json ReturnLanguageModelModelProvider: enum: - OPEN_AI @@ -709,7 +864,7 @@ types: - GOOGLE docs: The provider of the supplemental language model. source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json ReturnLanguageModel: docs: A specific LanguageModel properties: @@ -729,7 +884,7 @@ types: yielding focused, deterministic responses and values closer to 1 producing more creative, diverse responses. source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json ReturnTimeoutSpec: docs: A specific timeout configuration to be returned from the server properties: @@ -749,7 +904,7 @@ types: Duration in seconds for the timeout (e.g. 600 seconds represents 10 minutes). source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json ReturnTimeoutSpecs: docs: >- Collection of timeout specifications returned by the server. @@ -781,17 +936,7 @@ types: Accepts a minimum value of 1 second and a maximum value of 1,800 seconds. source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnVoiceName: - enum: - - ITO - - DACHER - - KORA - docs: >- - String with the name of the voice to use. Maximum length of 75 characters. - Will be converted to all-uppercase. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json ReturnVoice: docs: A specific voice specification properties: @@ -801,22 +946,17 @@ types: The provider of the voice to use. Currently, only `HUME_AI` is supported as the voice provider. name: - type: optional - docs: >- - String with the name of the voice to use. Maximum length of 75 - characters. Will be converted to all-uppercase. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - PostedCustomVoiceName: - docs: A custom voice name change to be posted to the server - properties: - name: - type: string + type: optional docs: >- - String with the name of the voice to use. Maximum length of 75 - characters. Will be converted to all-uppercase. + The name of the specified voice. + + + This will either be the name of a previously created Custom Voice or + one of our 7 base voices: `ITO`, `KORA`, `DACHER`, `AURA`, `FINN`, + `WHIMSY`, or `STELLA`. + custom_voice: ReturnCustomVoice source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json ReturnPagedUserDefinedTools: docs: A paginated list of user defined tool versions returned from the server properties: @@ -845,7 +985,7 @@ types: `page_size`. type: list> source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json ReturnPagedPrompts: docs: A paginated list of prompt versions returned from the server properties: @@ -874,26 +1014,34 @@ types: `page_size`. type: list> source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json ReturnPagedCustomVoices: docs: A paginated list of custom voices returned from the server properties: page_number: type: integer - docs: The page number of the returned results. + docs: >- + The page number of the returned list. + + + This value corresponds to the `page_number` parameter specified in the + request. Pagination uses zero-based indexing. page_size: type: integer - docs: The number of results returned per page. + docs: >- + The maximum number of items returned per page. + + + This value corresponds to the `page_size` parameter specified in the + request. total_pages: type: integer - docs: The total number of pages in the collection + docs: The total number of pages in the collection. custom_voices_page: - docs: >- - List of custom voices returned for the specified page number and page - size. + docs: List of Custom Voices for the specified `page_number` and `page_size`. type: list source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json ReturnPagedConfigs: docs: A paginated list of config versions returned from the server properties: @@ -922,7 +1070,7 @@ types: List of configs returned for the specified `page_number` and `page_size`. source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json ReturnChatStatus: enum: - ACTIVE @@ -953,7 +1101,7 @@ types: - `ERROR`: The chat ended unexpectedly due to an error. source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json ReturnChat: docs: A description of chat and its status properties: @@ -1007,7 +1155,7 @@ types: docs: Stringified JSON with additional metadata about the chat. config: optional source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json ReturnConfigSpec: docs: The Config associated with this Chat. properties: @@ -1020,16 +1168,17 @@ types: Version number for a Config. - Configs, as well as Prompts and Tools, are versioned. This versioning - system supports iterative development, allowing you to progressively - refine configurations and revert to previous versions if needed. + Configs, Prompts, Custom Voices, and Tools are versioned. This + versioning system supports iterative development, allowing you to + progressively refine configurations and revert to previous versions if + needed. Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json ReturnPagedChatsPaginationDirection: enum: - ASC @@ -1044,7 +1193,7 @@ types: newest records first). This value corresponds to the `ascending_order` query parameter used in the request. source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json ReturnPagedChats: docs: A paginated list of chats returned from the server properties: @@ -1084,7 +1233,7 @@ types: `page_number` and `page_size`. type: list source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json ReturnChatEventRole: enum: - USER @@ -1107,7 +1256,7 @@ types: - `TOOL`: The function calling mechanism. source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json ReturnChatEventType: enum: - SYSTEM_PROMPT @@ -1139,7 +1288,7 @@ types: - `FUNCTION_CALL_RESPONSE`: Contains the tool response. source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json ReturnChatEvent: docs: A description of a single event in a chat returned from the server properties: @@ -1216,7 +1365,7 @@ types: type: optional docs: Stringified JSON with additional metadata about the chat event. source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json ReturnChatPagedEventsStatus: enum: - ACTIVE @@ -1247,7 +1396,7 @@ types: - `ERROR`: The chat ended unexpectedly due to an error. source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json ReturnChatPagedEventsPaginationDirection: enum: - ASC @@ -1262,7 +1411,7 @@ types: newest records first). This value corresponds to the `ascending_order` query parameter used in the request. source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json ReturnChatPagedEvents: docs: >- A description of chat status with a paginated list of chat events returned @@ -1348,7 +1497,7 @@ types: docs: The total number of pages in the collection. config: optional source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json ReturnActiveChatCount: docs: A description of current chat chat sessions for a user properties: @@ -1370,7 +1519,7 @@ types: type: optional>> docs: Optional List of chat counts per tag. source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json ReturnActiveChatCountPerTag: docs: A description of current chat chat sessions per tag properties: @@ -1381,7 +1530,7 @@ types: type: integer docs: The total number of active chats for this user with the specified tag. source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json ReturnChatGroup: docs: A description of chat_group and its status properties: @@ -1410,7 +1559,7 @@ types: docs: The total number of Chats in this Chat Group. active: optional source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json ReturnPagedChatGroupsPaginationDirection: enum: - ASC @@ -1425,7 +1574,7 @@ types: newest records first). This value corresponds to the `ascending_order` query parameter used in the request. source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json ReturnPagedChatGroups: docs: A paginated list of chat_groups returned from the server properties: @@ -1465,7 +1614,7 @@ types: `page_number` and `page_size`. type: list source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json ReturnChatGroupPagedChats: docs: >- A description of chat_group and its status with a paginated list of each @@ -1508,7 +1657,7 @@ types: type: list active: optional source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json ReturnChatGroupPagedEventsPaginationDirection: enum: - ASC @@ -1523,7 +1672,7 @@ types: newest records first). This value corresponds to the `ascending_order` query parameter used in the request. source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json ReturnChatGroupPagedEvents: docs: >- A paginated list of chat events that occurred across chats in this @@ -1568,7 +1717,13 @@ types: docs: List of Chat Events for the specified `page_number` and `page_size`. type: list source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json + PostedPromptSpec: + docs: A Prompt associated with this Config. + properties: + version: optional + source: + openapi: stenographer-openapi.json AssistantInput: docs: When provided, the input is spoken by EVI. properties: @@ -1597,7 +1752,7 @@ types: back to the user as an [Assistant Message](/reference/empathic-voice-interface-evi/chat/chat#receive.Assistant%20Message.type). source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json + openapi: assistant-asyncapi.json AudioConfiguration: properties: encoding: @@ -1612,7 +1767,7 @@ types: Audio sample rate. Number of samples per second in the audio input, measured in Hertz. source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json + openapi: assistant-asyncapi.json AudioInput: docs: When provided, the input is audio. properties: @@ -1650,7 +1805,7 @@ types: Hume recommends streaming audio with a buffer window of 20 milliseconds (ms), or 100 milliseconds (ms) for web applications. source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json + openapi: assistant-asyncapi.json BuiltInTool: type: literal<"web_search"> docs: >- @@ -1666,7 +1821,7 @@ types: The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation. source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json + openapi: assistant-asyncapi.json Context: properties: type: @@ -1703,14 +1858,14 @@ types: be appended to the end of user messages as `{Context: You are a helpful weather assistant}`. source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json + openapi: assistant-asyncapi.json ContextType: enum: - editable - persistent - temporary source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json + openapi: assistant-asyncapi.json Encoding: literal<"linear16"> ErrorLevel: literal<"warn"> PauseAssistantMessage: @@ -1737,7 +1892,7 @@ types: Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json + openapi: assistant-asyncapi.json ResumeAssistantMessage: docs: >- Resume responses from EVI. Chat history sent while paused will now be @@ -1763,7 +1918,7 @@ types: Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json + openapi: assistant-asyncapi.json SessionSettings: docs: Settings for this chat session. properties: @@ -1892,7 +2047,7 @@ types: type: optional> docs: Dynamic values that can be used to populate EVI prompts. source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json + openapi: assistant-asyncapi.json Tool: properties: type: @@ -1923,7 +2078,7 @@ types: The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation. source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json + openapi: assistant-asyncapi.json ToolErrorMessage: docs: When provided, the output is a function call error. properties: @@ -1977,7 +2132,7 @@ types: Indicates the severity of an error; for a Tool Error message, this must be `warn` to signal an unexpected event. source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json + openapi: assistant-asyncapi.json ToolResponseMessage: docs: When provided, the output is a function call response. properties: @@ -2029,13 +2184,13 @@ types: Type of tool called. Either `builtin` for natively implemented tools, like web search, or `function` for user-defined tools. source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json + openapi: assistant-asyncapi.json ToolType: enum: - builtin - function source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json + openapi: assistant-asyncapi.json UserInput: docs: User text to insert into the conversation. properties: @@ -2062,7 +2217,7 @@ types: messages, as the prosody model relies on audio input and cannot process text alone. source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json + openapi: assistant-asyncapi.json AssistantEnd: docs: When provided, the output is an assistant end message. properties: @@ -2082,7 +2237,7 @@ types: Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json + openapi: assistant-asyncapi.json AssistantMessage: docs: When provided, the output is an assistant message. properties: @@ -2119,7 +2274,7 @@ types: from an [Assistant Input message](/reference/empathic-voice-interface-evi/chat/chat#send.Assistant%20Input.text). source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json + openapi: assistant-asyncapi.json AudioOutput: docs: When provided, the output is audio. properties: @@ -2145,7 +2300,7 @@ types: client, where it can be decoded and played back as part of the user interaction. source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json + openapi: assistant-asyncapi.json ChatMessageToolResult: discriminated: false docs: Function call response from client. @@ -2153,7 +2308,7 @@ types: - ToolResponseMessage - ToolErrorMessage source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json + openapi: assistant-asyncapi.json ChatMessage: properties: role: @@ -2169,7 +2324,7 @@ types: type: optional docs: Function call response from client. source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json + openapi: assistant-asyncapi.json ChatMetadata: docs: When provided, the output is a chat metadata message. properties: @@ -2213,7 +2368,7 @@ types: type: optional docs: ID of the initiating request. source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json + openapi: assistant-asyncapi.json EmotionScores: properties: Admiration: double @@ -2265,7 +2420,7 @@ types: Tiredness: double Triumph: double source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json + openapi: assistant-asyncapi.json WebSocketError: docs: When provided, the output is an error message. properties: @@ -2297,7 +2452,7 @@ types: type: string docs: Detailed description of the error. source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json + openapi: assistant-asyncapi.json Inference: properties: prosody: @@ -2309,7 +2464,7 @@ types: EVI uses the prosody model to measure 48 emotions related to speech and vocal characteristics within a given expression. source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json + openapi: assistant-asyncapi.json MillisecondInterval: properties: begin: @@ -2319,7 +2474,7 @@ types: type: integer docs: End time of the interval in milliseconds. source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json + openapi: assistant-asyncapi.json ProsodyInference: properties: scores: @@ -2337,7 +2492,7 @@ types: results](/docs/expression-measurement/faq#how-do-i-interpret-my-results) to learn more. source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json + openapi: assistant-asyncapi.json Role: enum: - assistant @@ -2346,7 +2501,7 @@ types: - all - tool source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json + openapi: assistant-asyncapi.json ToolCallMessage: docs: When provided, the output is a tool call. properties: @@ -2400,7 +2555,7 @@ types: or a [Tool Error message](/reference/empathic-voice-interface-evi/chat/chat#send.Tool%20Error%20Message.type). source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json + openapi: assistant-asyncapi.json UserInterruption: docs: When provided, the output is an interruption. properties: @@ -2425,7 +2580,7 @@ types: type: integer docs: Unix timestamp of the detected user interruption. source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json + openapi: assistant-asyncapi.json UserMessage: docs: When provided, the output is a user message. properties: @@ -2466,7 +2621,7 @@ types: Input](/reference/empathic-voice-interface-evi/chat/chat#send.User%20Input.text) message. source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json + openapi: assistant-asyncapi.json JsonMessage: discriminated: false union: @@ -2480,47 +2635,47 @@ types: - ToolResponseMessage - ToolErrorMessage source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json + openapi: assistant-asyncapi.json TtsInput: properties: type: optional> source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json + openapi: assistant-asyncapi.json TextInput: properties: type: optional> source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json + openapi: assistant-asyncapi.json FunctionCallResponseInput: properties: type: optional> source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json + openapi: assistant-asyncapi.json ExtendedVoiceArgs: properties: text: string voice_args: VoiceArgs source: - openapi: ../empathic-voice-interface/assistant-openapi.json + openapi: assistant-openapi.json HTTPValidationError: properties: detail: optional> source: - openapi: ../empathic-voice-interface/assistant-openapi.json + openapi: assistant-openapi.json ValidationErrorLocItem: discriminated: false union: - string - integer source: - openapi: ../empathic-voice-interface/assistant-openapi.json + openapi: assistant-openapi.json ValidationError: properties: loc: list msg: string type: string source: - openapi: ../empathic-voice-interface/assistant-openapi.json + openapi: assistant-openapi.json VoiceArgs: properties: voice: optional @@ -2531,11 +2686,11 @@ types: type: optional default: false source: - openapi: ../empathic-voice-interface/assistant-openapi.json + openapi: assistant-openapi.json VoiceNameEnum: enum: - ITO - KORA - DACHER source: - openapi: ../empathic-voice-interface/assistant-openapi.json + openapi: assistant-openapi.json diff --git a/.mock/definition/empathic-voice/chat.yml b/.mock/definition/empathic-voice/chat.yml index d29bb889..c99729ae 100644 --- a/.mock/definition/empathic-voice/chat.yml +++ b/.mock/definition/empathic-voice/chat.yml @@ -130,7 +130,7 @@ types: - root.ToolResponseMessage - root.ToolErrorMessage source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json + openapi: assistant-asyncapi.json PublishEvent: discriminated: false union: @@ -143,4 +143,4 @@ types: - root.PauseAssistantMessage - root.ResumeAssistantMessage source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json + openapi: assistant-asyncapi.json diff --git a/.mock/definition/empathic-voice/chat-groups.yml b/.mock/definition/empathic-voice/chatGroups.yml similarity index 99% rename from .mock/definition/empathic-voice/chat-groups.yml rename to .mock/definition/empathic-voice/chatGroups.yml index 2579288e..7d15a6e3 100644 --- a/.mock/definition/empathic-voice/chat-groups.yml +++ b/.mock/definition/empathic-voice/chatGroups.yml @@ -52,6 +52,8 @@ service: response: docs: Success type: root.ReturnPagedChatGroups + errors: + - root.BadRequestError examples: - query-parameters: page_number: 0 @@ -115,6 +117,8 @@ service: response: docs: Success type: root.ReturnChatGroupPagedEvents + errors: + - root.BadRequestError examples: - path-parameters: id: 697056f0-6c7e-487d-9bd8-9c19df79f05f @@ -441,4 +445,4 @@ service: "Triumph": 0.04107666015625} metadata: '' source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json diff --git a/.mock/definition/empathic-voice/chats.yml b/.mock/definition/empathic-voice/chats.yml index 5346b7f3..f484d9c9 100644 --- a/.mock/definition/empathic-voice/chats.yml +++ b/.mock/definition/empathic-voice/chats.yml @@ -47,6 +47,8 @@ service: response: docs: Success type: root.ReturnPagedChats + errors: + - root.BadRequestError examples: - query-parameters: page_number: 0 @@ -116,6 +118,8 @@ service: response: docs: Success type: root.ReturnChatPagedEvents + errors: + - root.BadRequestError examples: - path-parameters: id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 @@ -450,4 +454,4 @@ service: id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 version: 0 source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json diff --git a/.mock/definition/empathic-voice/configs.yml b/.mock/definition/empathic-voice/configs.yml index 50ba0489..95a6c081 100644 --- a/.mock/definition/empathic-voice/configs.yml +++ b/.mock/definition/empathic-voice/configs.yml @@ -45,6 +45,8 @@ service: response: docs: Success type: root.ReturnPagedConfigs + errors: + - root.BadRequestError examples: - query-parameters: page_number: 0 @@ -61,6 +63,7 @@ service: name: Weather Assistant Config created_on: 1715267200693 modified_on: 1715267200693 + evi_version: '2' prompt: id: af699d45-2985-42cc-91b9-af9e5da3bac5 version: 0 @@ -78,7 +81,20 @@ service: Include helpful tips if severe weather is expected. voice: provider: HUME_AI - name: KORA + name: SAMPLE VOICE + custom_voice: + id: 00aa8ee9-c50e-4ea1-9af0-7b08ad451704 + version: 1 + name: SAMPLE VOICE + created_on: 1724704587367 + modified_on: 1725489961583 + base_voice: KORA + parameter_model: 20240715-4parameter + parameters: + gender: -7 + huskiness: -2 + nasality: -8 + pitch: -9 language_model: model_provider: ANTHROPIC model_resource: claude-3-5-sonnet-20240620 @@ -113,13 +129,20 @@ service: name: PostedConfig body: properties: + evi_version: + type: string + docs: >- + Specifies the EVI version to use. Use `"1"` for version 1, or + `"2"` for the latest enhanced version. For a detailed comparison + of the two versions, refer to our + [guide](/docs/empathic-voice-interface-evi/evi-2). name: type: string docs: Name applied to all versions of a particular Config. version_description: type: optional docs: An optional description of the Config version. - prompt: optional + prompt: optional voice: type: optional docs: A voice specification associated with this Config. @@ -154,15 +177,18 @@ service: response: docs: Created type: root.ReturnConfig + errors: + - root.BadRequestError examples: - request: name: Weather Assistant Config prompt: id: af699d45-2985-42cc-91b9-af9e5da3bac5 version: 0 + evi_version: '2' voice: provider: HUME_AI - name: KORA + name: SAMPLE VOICE language_model: model_provider: ANTHROPIC model_resource: claude-3-5-sonnet-20240620 @@ -185,6 +211,7 @@ service: name: Weather Assistant Config created_on: 1715275452390 modified_on: 1715275452390 + evi_version: '2' prompt: id: af699d45-2985-42cc-91b9-af9e5da3bac5 version: 0 @@ -202,7 +229,20 @@ service: severe weather is expected. voice: provider: HUME_AI - name: KORA + name: SAMPLE VOICE + custom_voice: + id: 00aa8ee9-c50e-4ea1-9af0-7b08ad451704 + version: 1 + name: SAMPLE VOICE + created_on: 1724704587367 + modified_on: 1725489961583 + base_voice: KORA + parameter_model: 20240715-4parameter + parameters: + gender: -7 + huskiness: -2 + nasality: -8 + pitch: -9 language_model: model_provider: ANTHROPIC model_resource: claude-3-5-sonnet-20240620 @@ -270,6 +310,8 @@ service: response: docs: Success type: root.ReturnPagedConfigs + errors: + - root.BadRequestError examples: - path-parameters: id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 @@ -285,6 +327,7 @@ service: name: Weather Assistant Config created_on: 1715275452390 modified_on: 1715275452390 + evi_version: '2' prompt: id: af699d45-2985-42cc-91b9-af9e5da3bac5 version: 0 @@ -302,7 +345,20 @@ service: Include helpful tips if severe weather is expected. voice: provider: HUME_AI - name: KORA + name: SAMPLE VOICE + custom_voice: + id: 00aa8ee9-c50e-4ea1-9af0-7b08ad451704 + version: 1 + name: SAMPLE VOICE + created_on: 1724704587367 + modified_on: 1725489961583 + base_voice: KORA + parameter_model: 20240715-4parameter + parameters: + gender: -7 + huskiness: -2 + nasality: -8 + pitch: -9 language_model: model_provider: ANTHROPIC model_resource: claude-3-5-sonnet-20240620 @@ -341,10 +397,13 @@ service: name: PostedConfigVersion body: properties: + evi_version: + type: string + docs: The version of the EVI used with this config. version_description: type: optional docs: An optional description of the Config version. - prompt: optional + prompt: optional voice: type: optional docs: A voice specification associated with this Config version. @@ -380,11 +439,14 @@ service: response: docs: Created type: root.ReturnConfig + errors: + - root.BadRequestError examples: - path-parameters: id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 request: version_description: This is an updated version of the Weather Assistant Config. + evi_version: '2' prompt: id: af699d45-2985-42cc-91b9-af9e5da3bac5 version: 0 @@ -415,6 +477,7 @@ service: name: Weather Assistant Config created_on: 1715275452390 modified_on: 1722642242998 + evi_version: '2' prompt: id: af699d45-2985-42cc-91b9-af9e5da3bac5 version: 0 @@ -433,6 +496,19 @@ service: voice: provider: HUME_AI name: ITO + custom_voice: + id: 00aa8ee9-c50e-4ea1-9af0-7b08ad451704 + version: 1 + name: SAMPLE VOICE + created_on: 1724704587367 + modified_on: 1725489961583 + base_voice: KORA + parameter_model: 20240715-4parameter + parameters: + gender: -7 + huskiness: -2 + nasality: -8 + pitch: -9 language_model: model_provider: ANTHROPIC model_resource: claude-3-5-sonnet-20240620 @@ -467,6 +543,8 @@ service: type: string docs: Identifier for a Config. Formatted as a UUID. display-name: Delete config + errors: + - root.BadRequestError examples: - path-parameters: id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 @@ -489,6 +567,8 @@ service: response: docs: Success type: text + errors: + - root.BadRequestError examples: - path-parameters: id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 @@ -508,7 +588,7 @@ service: Version number for a Config. - Configs, as well as Prompts and Tools, are versioned. This + Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. @@ -521,6 +601,8 @@ service: response: docs: Success type: root.ReturnConfig + errors: + - root.BadRequestError examples: - path-parameters: id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 @@ -533,6 +615,7 @@ service: name: Weather Assistant Config created_on: 1715275452390 modified_on: 1715275452390 + evi_version: '2' prompt: id: af699d45-2985-42cc-91b9-af9e5da3bac5 version: 0 @@ -550,7 +633,20 @@ service: severe weather is expected. voice: provider: HUME_AI - name: KORA + name: SAMPLE VOICE + custom_voice: + id: 00aa8ee9-c50e-4ea1-9af0-7b08ad451704 + version: 1 + name: SAMPLE VOICE + created_on: 1724704587367 + modified_on: 1725489961583 + base_voice: KORA + parameter_model: 20240715-4parameter + parameters: + gender: -7 + huskiness: -2 + nasality: -8 + pitch: -9 language_model: model_provider: ANTHROPIC model_resource: claude-3-5-sonnet-20240620 @@ -590,7 +686,7 @@ service: Version number for a Config. - Configs, as well as Prompts and Tools, are versioned. This + Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. @@ -600,6 +696,8 @@ service: of the Config. Each update to the Config increments its version number. display-name: Delete config version + errors: + - root.BadRequestError examples: - path-parameters: id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 @@ -618,7 +716,7 @@ service: Version number for a Config. - Configs, as well as Prompts and Tools, are versioned. This + Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. @@ -638,6 +736,8 @@ service: response: docs: Success type: root.ReturnConfig + errors: + - root.BadRequestError examples: - path-parameters: id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 @@ -652,6 +752,7 @@ service: name: Weather Assistant Config created_on: 1715275452390 modified_on: 1715275452390 + evi_version: '2' prompt: id: af699d45-2985-42cc-91b9-af9e5da3bac5 version: 0 @@ -669,7 +770,20 @@ service: severe weather is expected. voice: provider: HUME_AI - name: KORA + name: SAMPLE VOICE + custom_voice: + id: 00aa8ee9-c50e-4ea1-9af0-7b08ad451704 + version: 1 + name: SAMPLE VOICE + created_on: 1724704587367 + modified_on: 1725489961583 + base_voice: KORA + parameter_model: 20240715-4parameter + parameters: + gender: -7 + huskiness: -2 + nasality: -8 + pitch: -9 language_model: model_provider: ANTHROPIC model_resource: claude-3-5-sonnet-20240620 @@ -696,4 +810,4 @@ service: enabled: true duration_secs: 1800 source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json diff --git a/.mock/definition/empathic-voice/customVoices.yml b/.mock/definition/empathic-voice/customVoices.yml new file mode 100644 index 00000000..6fa448ad --- /dev/null +++ b/.mock/definition/empathic-voice/customVoices.yml @@ -0,0 +1,197 @@ +imports: + root: __package__.yml +service: + auth: false + base-path: '' + endpoints: + getReturnCustomVoicesForUser: + path: /v0/evi/custom_voices + method: GET + auth: true + display-name: List custom voices + request: + name: GetReturnCustomVoicesForUserRequest + query-parameters: + page_number: + type: optional + docs: >- + Specifies the page number to retrieve, enabling pagination. + + + This parameter uses zero-based indexing. For example, setting + `page_number` to 0 retrieves the first page of results (items 0-9 + if `page_size` is 10), setting `page_number` to 1 retrieves the + second page (items 10-19), and so on. Defaults to 0, which + retrieves the first page. + page_size: + type: optional + docs: >- + Specifies the maximum number of results to include per page, + enabling pagination. The value must be between 1 and 100, + inclusive. + + + For example, if `page_size` is set to 10, each page will include + up to 10 items. Defaults to 10. + name: + type: optional + docs: Filter to only include custom voices with this name. + response: + docs: Success + type: root.ReturnPagedCustomVoices + errors: + - root.BadRequestError + examples: + - response: + body: + page_number: 1 + page_size: 1 + total_pages: 1 + custom_voices_page: + - id: id + version: 1 + name: name + created_on: 1000000 + modified_on: 1000000 + base_voice: ITO + parameter_model: 20240715-4parameter + parameters: {} + createNewCustomVoice: + path: /v0/evi/custom_voices + method: POST + auth: true + display-name: Create custom voice + request: + body: root.PostedCustomVoice + response: + docs: Created + type: root.ReturnCustomVoice + errors: + - root.BadRequestError + examples: + - request: + name: name + base_voice: ITO + parameter_model: 20240715-4parameter + response: + body: + id: id + version: 1 + name: name + created_on: 1000000 + modified_on: 1000000 + base_voice: ITO + parameter_model: 20240715-4parameter + parameters: + gender: 1 + huskiness: 1 + nasality: 1 + pitch: 1 + getReturnCustomVoiceByCustomVoiceId: + path: /v0/evi/custom_voices/{id} + method: GET + auth: true + path-parameters: + id: + type: string + docs: Identifier for a Custom Voice. Formatted as a UUID. + display-name: Get specific custom voice by ID + response: + docs: Success + type: root.ReturnCustomVoice + errors: + - root.BadRequestError + examples: + - path-parameters: + id: id + response: + body: + id: id + version: 1 + name: name + created_on: 1000000 + modified_on: 1000000 + base_voice: ITO + parameter_model: 20240715-4parameter + parameters: + gender: 1 + huskiness: 1 + nasality: 1 + pitch: 1 + addNewCustomVoiceVersion: + path: /v0/evi/custom_voices/{id} + method: POST + auth: true + path-parameters: + id: + type: string + docs: Identifier for a Custom Voice. Formatted as a UUID. + display-name: Create new version of existing custom voice + request: + body: root.PostedCustomVoice + response: + docs: Created + type: root.ReturnCustomVoice + errors: + - root.BadRequestError + examples: + - path-parameters: + id: id + request: + name: name + base_voice: ITO + parameter_model: 20240715-4parameter + response: + body: + id: id + version: 1 + name: name + created_on: 1000000 + modified_on: 1000000 + base_voice: ITO + parameter_model: 20240715-4parameter + parameters: + gender: 1 + huskiness: 1 + nasality: 1 + pitch: 1 + deleteCustomVoice: + path: /v0/evi/custom_voices/{id} + method: DELETE + auth: true + path-parameters: + id: + type: string + docs: Identifier for a Custom Voice. Formatted as a UUID. + display-name: Delete a custom voice + errors: + - root.BadRequestError + examples: + - path-parameters: + id: id + updateCustomVoiceName: + path: /v0/evi/custom_voices/{id} + method: PATCH + auth: true + path-parameters: + id: + type: string + docs: Identifier for a Custom Voice. Formatted as a UUID. + display-name: Update custom voice name + request: + name: PostedCustomVoiceName + body: + properties: + name: + type: string + docs: >- + The name of the Custom Voice. Maximum length of 75 characters. + Will be converted to all-uppercase. (e.g., "sample voice" + becomes "SAMPLE VOICE") + response: + docs: Success + type: text + errors: + - root.BadRequestError + source: + openapi: stenographer-openapi.json diff --git a/.mock/definition/empathic-voice/prompts.yml b/.mock/definition/empathic-voice/prompts.yml index 36cd5fbd..3eb701ea 100644 --- a/.mock/definition/empathic-voice/prompts.yml +++ b/.mock/definition/empathic-voice/prompts.yml @@ -48,6 +48,8 @@ service: response: docs: Success type: root.ReturnPagedPrompts + errors: + - root.BadRequestError examples: - query-parameters: page_number: 0 @@ -122,6 +124,8 @@ service: response: docs: Created type: optional + errors: + - root.BadRequestError examples: - request: name: Weather Assistant Prompt @@ -189,6 +193,8 @@ service: response: docs: Success type: root.ReturnPagedPrompts + errors: + - root.BadRequestError examples: - path-parameters: id: af699d45-2985-42cc-91b9-af9e5da3bac5 @@ -248,6 +254,8 @@ service: response: docs: Created type: optional + errors: + - root.BadRequestError examples: - path-parameters: id: af699d45-2985-42cc-91b9-af9e5da3bac5 @@ -285,6 +293,8 @@ service: type: string docs: Identifier for a Prompt. Formatted as a UUID. display-name: Delete prompt + errors: + - root.BadRequestError examples: - path-parameters: id: af699d45-2985-42cc-91b9-af9e5da3bac5 @@ -307,6 +317,8 @@ service: response: docs: Success type: text + errors: + - root.BadRequestError examples: - path-parameters: id: af699d45-2985-42cc-91b9-af9e5da3bac5 @@ -326,7 +338,7 @@ service: Version number for a Prompt. - Prompts, as well as Configs and Tools, are versioned. This + Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. @@ -339,6 +351,8 @@ service: response: docs: Success type: optional + errors: + - root.BadRequestError examples: - path-parameters: id: af699d45-2985-42cc-91b9-af9e5da3bac5 @@ -373,7 +387,7 @@ service: Version number for a Prompt. - Prompts, as well as Configs and Tools, are versioned. This + Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. @@ -383,6 +397,8 @@ service: of the Prompt. Each update to the Prompt increments its version number. display-name: Delete prompt version + errors: + - root.BadRequestError examples: - path-parameters: id: af699d45-2985-42cc-91b9-af9e5da3bac5 @@ -401,7 +417,7 @@ service: Version number for a Prompt. - Prompts, as well as Configs and Tools, are versioned. This + Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. @@ -421,6 +437,8 @@ service: response: docs: Success type: optional + errors: + - root.BadRequestError examples: - path-parameters: id: af699d45-2985-42cc-91b9-af9e5da3bac5 @@ -444,4 +462,4 @@ service: conditions, and any weather alerts. Include helpful tips if severe weather is expected. source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json diff --git a/.mock/definition/empathic-voice/tools.yml b/.mock/definition/empathic-voice/tools.yml index caf08abd..a7fd95aa 100644 --- a/.mock/definition/empathic-voice/tools.yml +++ b/.mock/definition/empathic-voice/tools.yml @@ -48,6 +48,8 @@ service: response: docs: Success type: root.ReturnPagedUserDefinedTools + errors: + - root.BadRequestError examples: - query-parameters: page_number: 0 @@ -134,6 +136,8 @@ service: response: docs: Created type: optional + errors: + - root.BadRequestError examples: - request: name: get_current_weather @@ -212,6 +216,8 @@ service: response: docs: Success type: root.ReturnPagedUserDefinedTools + errors: + - root.BadRequestError examples: - path-parameters: id: 00183a3f-79ba-413d-9f3b-609864268bea @@ -282,6 +288,8 @@ service: response: docs: Created type: optional + errors: + - root.BadRequestError examples: - path-parameters: id: 00183a3f-79ba-413d-9f3b-609864268bea @@ -328,6 +336,8 @@ service: type: string docs: Identifier for a Tool. Formatted as a UUID. display-name: Delete tool + errors: + - root.BadRequestError examples: - path-parameters: id: 00183a3f-79ba-413d-9f3b-609864268bea @@ -350,6 +360,8 @@ service: response: docs: Success type: text + errors: + - root.BadRequestError examples: - path-parameters: id: 00183a3f-79ba-413d-9f3b-609864268bea @@ -369,7 +381,7 @@ service: Version number for a Tool. - Tools, as well as Configs and Prompts, are versioned. This + Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. @@ -381,6 +393,8 @@ service: response: docs: Success type: optional + errors: + - root.BadRequestError examples: - path-parameters: id: 00183a3f-79ba-413d-9f3b-609864268bea @@ -420,7 +434,7 @@ service: Version number for a Tool. - Tools, as well as Configs and Prompts, are versioned. This + Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. @@ -429,6 +443,8 @@ service: Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. display-name: Delete tool version + errors: + - root.BadRequestError examples: - path-parameters: id: 00183a3f-79ba-413d-9f3b-609864268bea @@ -447,7 +463,7 @@ service: Version number for a Tool. - Tools, as well as Configs and Prompts, are versioned. This + Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. @@ -466,6 +482,8 @@ service: response: docs: Success type: optional + errors: + - root.BadRequestError examples: - path-parameters: id: 00183a3f-79ba-413d-9f3b-609864268bea @@ -498,4 +516,4 @@ service: temperature unit to use. Infer this from the users location." } }, "required": ["location", "format"] } source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + openapi: stenographer-openapi.json diff --git a/.mock/definition/expression-measurement/batch.yml b/.mock/definition/expression-measurement/batch/__package__.yml similarity index 88% rename from .mock/definition/expression-measurement/batch.yml rename to .mock/definition/expression-measurement/batch/__package__.yml index 5b39f389..8fb80472 100644 --- a/.mock/definition/expression-measurement/batch.yml +++ b/.mock/definition/expression-measurement/batch/__package__.yml @@ -361,7 +361,7 @@ service: body: job_id: job_id source: - openapi: ../expression-measurement/batch-files-openapi.yml + openapi: batch-files-openapi.yml types: Alternative: literal<"language_only"> Bcp47Tag: @@ -405,7 +405,7 @@ types: - tr - uk source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json BoundingBox: docs: A bounding box around a face. properties: @@ -422,7 +422,7 @@ types: type: double docs: Bounding box height. source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json BurstPrediction: properties: time: TimeInterval @@ -433,7 +433,7 @@ types: docs: Modality-specific descriptive features and their scores. type: list source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json Classification: map CompletedEmbeddingGeneration: properties: @@ -447,7 +447,7 @@ types: type: long docs: When this job ended (Unix timestamp in milliseconds). source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json CompletedInference: properties: created_timestamp_ms: @@ -466,7 +466,7 @@ types: type: uint64 docs: The number of errors that occurred while running this job. source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json CompletedTlInference: properties: created_timestamp_ms: @@ -485,7 +485,7 @@ types: type: uint64 docs: The number of errors that occurred while running this job. source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json CompletedTraining: properties: created_timestamp_ms: @@ -500,28 +500,28 @@ types: custom_model: TrainingCustomModel alternatives: optional> source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json CustomModelPrediction: properties: output: map error: string task_type: string source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json CustomModelRequest: properties: name: string description: optional tags: optional> source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json Dataset: discriminated: false union: - DatasetId - DatasetVersionId source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json DatasetId: properties: id: @@ -529,7 +529,7 @@ types: validation: format: uuid source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json DatasetVersionId: properties: version_id: @@ -537,7 +537,7 @@ types: validation: format: uuid source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json DescriptionsScore: properties: name: @@ -547,20 +547,20 @@ types: type: float docs: Embedding value for the descriptive feature being expressed. source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json Direction: enum: - asc - desc source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json EmbeddingGenerationBaseRequest: properties: registry_file_details: type: optional> docs: File ID and File URL pairs for an asset registry file source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json EmotionScore: properties: name: @@ -570,7 +570,7 @@ types: type: float docs: Embedding value for the emotion being expressed. source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json Error: properties: message: @@ -580,12 +580,12 @@ types: type: string docs: A file path relative to the top level source URL or file. source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json EvaluationArgs: properties: validation: optional source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json Face: docs: >- The Facial Emotional Expression model analyzes human facial expressions in @@ -633,7 +633,7 @@ types: created by each job. default: false source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json FacePrediction: properties: frame: @@ -656,14 +656,14 @@ types: type: optional> docs: Modality-specific descriptive features and their scores. source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json FacemeshPrediction: properties: emotions: docs: A high-dimensional embedding in emotion space. type: list source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json FacsScore: properties: name: @@ -673,7 +673,7 @@ types: type: float docs: Embedding value for the FACS 2.0 feature being expressed. source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json Failed: properties: created_timestamp_ms: @@ -689,7 +689,7 @@ types: type: string docs: An error message. source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json File: docs: The list of files submitted for analysis. properties: @@ -703,7 +703,7 @@ types: type: string docs: The MD5 checksum of the file. source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json Granularity: enum: - word @@ -741,7 +741,7 @@ types: turn-level granularity for our Emotional Language model will produce results for the entire passage. source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json GroupedPredictionsBurstPrediction: properties: id: @@ -752,7 +752,7 @@ types: or if the model is unable to distinguish between individuals. predictions: list source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json GroupedPredictionsFacePrediction: properties: id: @@ -763,7 +763,7 @@ types: or if the model is unable to distinguish between individuals. predictions: list source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json GroupedPredictionsFacemeshPrediction: properties: id: @@ -774,7 +774,7 @@ types: or if the model is unable to distinguish between individuals. predictions: list source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json GroupedPredictionsLanguagePrediction: properties: id: @@ -785,7 +785,7 @@ types: or if the model is unable to distinguish between individuals. predictions: list source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json GroupedPredictionsNerPrediction: properties: id: @@ -796,7 +796,7 @@ types: or if the model is unable to distinguish between individuals. predictions: list source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json GroupedPredictionsProsodyPrediction: properties: id: @@ -807,7 +807,7 @@ types: or if the model is unable to distinguish between individuals. predictions: list source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json InProgress: properties: created_timestamp_ms: @@ -817,7 +817,7 @@ types: type: long docs: When this job started (Unix timestamp in milliseconds). source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json InferenceBaseRequest: properties: models: @@ -856,7 +856,7 @@ types: completion/failure. default: false source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json InferencePrediction: properties: file: @@ -864,7 +864,7 @@ types: docs: A file path relative to the top level source URL or file. models: ModelsPredictions source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json InferenceRequest: properties: models: optional @@ -895,13 +895,13 @@ types: default: false files: list source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json InferenceResults: properties: predictions: list errors: list source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json InferenceSourcePredictResult: properties: source: Source @@ -910,7 +910,7 @@ types: type: optional docs: An error message. source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json JobEmbeddingGeneration: properties: job_id: @@ -925,7 +925,7 @@ types: request: EmbeddingGenerationBaseRequest state: StateEmbeddingGeneration source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json JobInference: properties: job_id: @@ -940,7 +940,7 @@ types: type: StateInference docs: The current state of the job. source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json JobTlInference: properties: job_id: @@ -955,7 +955,7 @@ types: request: TlInferenceBaseRequest state: StateTlInference source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json JobTraining: properties: job_id: @@ -970,7 +970,7 @@ types: request: TrainingBaseRequest state: StateTraining source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json JobId: properties: job_id: @@ -979,7 +979,7 @@ types: validation: format: uuid source: - openapi: ../expression-measurement/batch-files-openapi.yml + openapi: batch-files-openapi.yml Language: docs: >- The Emotional Language model analyzes passages of text. This also supports @@ -1001,7 +1001,7 @@ types: `unknown` ID. default: false source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json LanguagePrediction: properties: text: @@ -1044,7 +1044,7 @@ types: classified into the following categories: `toxic`, `severe_toxic`, `obscene`, `threat`, `insult`, and `identity_hate`. source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json Models: docs: The models used for inference. properties: @@ -1055,7 +1055,7 @@ types: ner: optional facemesh: optional source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json ModelsPredictions: properties: face: optional @@ -1065,7 +1065,7 @@ types: ner: optional facemesh: optional source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json Ner: docs: >- The NER (Named-entity Recognition) model identifies real-world objects and @@ -1084,7 +1084,7 @@ types: `unknown` ID. default: false source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json NerPrediction: properties: entity: @@ -1120,7 +1120,7 @@ types: docs: A high-dimensional embedding in emotion space. type: list source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json 'Null': type: map docs: No associated metadata for this model. Value will be `null`. @@ -1137,43 +1137,43 @@ types: type: uint64 docs: The index of the last character in the text segment, exclusive. source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json PredictionsOptionalNullBurstPrediction: properties: metadata: optional grouped_predictions: list source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json PredictionsOptionalNullFacePrediction: properties: metadata: optional grouped_predictions: list source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json PredictionsOptionalNullFacemeshPrediction: properties: metadata: optional grouped_predictions: list source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json PredictionsOptionalTranscriptionMetadataLanguagePrediction: properties: metadata: optional grouped_predictions: list source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json PredictionsOptionalTranscriptionMetadataNerPrediction: properties: metadata: optional grouped_predictions: list source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json PredictionsOptionalTranscriptionMetadataProsodyPrediction: properties: metadata: optional grouped_predictions: list source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json Prosody: docs: >- The Speech Prosody model analyzes the intonation, stress, and rhythm of @@ -1193,7 +1193,7 @@ types: `unknown` ID. default: false source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json ProsodyPrediction: properties: text: @@ -1214,14 +1214,14 @@ types: docs: A high-dimensional embedding in emotion space. type: list source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json Queued: properties: created_timestamp_ms: type: long docs: When this job was created (Unix timestamp in milliseconds). source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json RegistryFileDetail: properties: file_id: @@ -1231,7 +1231,7 @@ types: type: string docs: URL to the file in the Asset Registry source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json Regression: map SentimentScore: properties: @@ -1242,14 +1242,14 @@ types: type: float docs: Prediction for this level of sentiment source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json SortBy: enum: - created - started - ended source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json Source: discriminated: false union: @@ -1257,26 +1257,26 @@ types: - SourceFile - SourceTextSource source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json SourceFile: properties: type: literal<"file"> extends: - File source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json SourceTextSource: properties: type: literal<"text"> source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json SourceUrl: properties: type: literal<"url"> extends: - Url source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json StateEmbeddingGeneration: discriminated: false union: @@ -1285,35 +1285,35 @@ types: - StateEmbeddingGenerationCompletedEmbeddingGeneration - StateEmbeddingGenerationFailed source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json StateEmbeddingGenerationCompletedEmbeddingGeneration: properties: status: literal<"COMPLETED"> extends: - CompletedEmbeddingGeneration source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json StateEmbeddingGenerationFailed: properties: status: literal<"FAILED"> extends: - Failed source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json StateEmbeddingGenerationInProgress: properties: status: literal<"IN_PROGRESS"> extends: - InProgress source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json StateEmbeddingGenerationQueued: properties: status: literal<"QUEUED"> extends: - Queued source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json StateInference: discriminated: false union: @@ -1322,35 +1322,35 @@ types: - CompletedState - FailedState source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json CompletedState: properties: status: literal<"COMPLETED"> extends: - CompletedInference source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json FailedState: properties: status: literal<"FAILED"> extends: - Failed source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json InProgressState: properties: status: literal<"IN_PROGRESS"> extends: - InProgress source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json QueuedState: properties: status: literal<"QUEUED"> extends: - Queued source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json StateTlInference: discriminated: false union: @@ -1359,35 +1359,35 @@ types: - StateTlInferenceCompletedTlInference - StateTlInferenceFailed source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json StateTlInferenceCompletedTlInference: properties: status: literal<"COMPLETED"> extends: - CompletedTlInference source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json StateTlInferenceFailed: properties: status: literal<"FAILED"> extends: - Failed source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json StateTlInferenceInProgress: properties: status: literal<"IN_PROGRESS"> extends: - InProgress source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json StateTlInferenceQueued: properties: status: literal<"QUEUED"> extends: - Queued source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json StateTraining: discriminated: false union: @@ -1396,35 +1396,35 @@ types: - StateTrainingCompletedTraining - StateTrainingFailed source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json StateTrainingCompletedTraining: properties: status: literal<"COMPLETED"> extends: - CompletedTraining source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json StateTrainingFailed: properties: status: literal<"FAILED"> extends: - Failed source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json StateTrainingInProgress: properties: status: literal<"IN_PROGRESS"> extends: - InProgress source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json StateTrainingQueued: properties: status: literal<"QUEUED"> extends: - Queued source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json Status: enum: - QUEUED @@ -1432,7 +1432,7 @@ types: - COMPLETED - FAILED source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json TlInferencePrediction: properties: file: @@ -1441,13 +1441,13 @@ types: file_type: string custom_models: map source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json TlInferenceResults: properties: predictions: list errors: list source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json TlInferenceSourcePredictResult: properties: source: Source @@ -1456,13 +1456,13 @@ types: type: optional docs: An error message. source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json Tag: properties: key: string value: string source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json Target: discriminated: false union: @@ -1470,24 +1470,24 @@ types: - double - string source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json Task: discriminated: false union: - TaskClassification - TaskRegression source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json TaskClassification: properties: type: literal<"classification"> source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json TaskRegression: properties: type: literal<"regression"> source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json TextSource: map TimeInterval: docs: A time range with a beginning and end, measured in seconds. @@ -1499,7 +1499,7 @@ types: type: double docs: End of time range in seconds. source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json TlInferenceBaseRequest: properties: custom_model: CustomModel @@ -1525,24 +1525,24 @@ types: completion/failure. default: false source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json CustomModel: discriminated: false union: - CustomModelId - CustomModelVersionId source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json CustomModelId: properties: id: string source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json CustomModelVersionId: properties: version_id: string source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json ToxicityScore: properties: name: @@ -1552,7 +1552,7 @@ types: type: float docs: Prediction for this category of toxicity source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json TrainingBaseRequest: properties: custom_model: CustomModelRequest @@ -1568,13 +1568,13 @@ types: type: optional default: false source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json TrainingCustomModel: properties: id: string version_id: optional source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json Transcription: docs: |- Transcription-related configuration options. @@ -1668,7 +1668,7 @@ types: min: 0 max: 1 source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json TranscriptionMetadata: docs: Transcription metadata for your media file. properties: @@ -1679,7 +1679,7 @@ types: relative confidence in the transcription of your media file. detected_language: optional source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json Type: enum: - EMBEDDING_GENERATION @@ -1687,7 +1687,7 @@ types: - TL_INFERENCE - TRAINING source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json Unconfigurable: type: map docs: >- @@ -1700,7 +1700,7 @@ types: extends: - JobEmbeddingGeneration source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json InferenceJob: properties: type: @@ -1714,21 +1714,21 @@ types: extends: - JobInference source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json CustomModelsInferenceJob: properties: type: string extends: - JobTlInference source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json CustomModelsTrainingJob: properties: type: string extends: - JobTraining source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json UnionPredictResult: InferenceSourcePredictResult Url: properties: @@ -1736,18 +1736,18 @@ types: type: string docs: The URL of the source media file. source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json ValidationArgs: properties: positive_label: optional source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json When: enum: - created_before - created_after source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json Window: docs: >- Generate predictions based on time. @@ -1771,4 +1771,4 @@ types: validation: min: 0.5 source: - openapi: ../expression-measurement/batch-openapi.json + openapi: batch-openapi.json diff --git a/.mock/definition/expression-measurement/stream.yml b/.mock/definition/expression-measurement/stream/__package__.yml similarity index 90% rename from .mock/definition/expression-measurement/stream.yml rename to .mock/definition/expression-measurement/stream/__package__.yml index 6e37c8ed..6a7fe07f 100644 --- a/.mock/definition/expression-measurement/stream.yml +++ b/.mock/definition/expression-measurement/stream/__package__.yml @@ -30,19 +30,19 @@ types: type: optional docs: ID of the current streaming job. source: - openapi: ../expression-measurement/streaming-asyncapi.yml + openapi: streaming-asyncapi.yml StreamModelPredictionsBurstPredictionsItem: properties: time: optional emotions: optional source: - openapi: ../expression-measurement/streaming-asyncapi.yml + openapi: streaming-asyncapi.yml StreamModelPredictionsBurst: docs: Response for the vocal burst emotion model. properties: predictions: optional> source: - openapi: ../expression-measurement/streaming-asyncapi.yml + openapi: streaming-asyncapi.yml StreamModelPredictionsFacePredictionsItem: properties: frame: @@ -64,24 +64,24 @@ types: facs: optional descriptions: optional source: - openapi: ../expression-measurement/streaming-asyncapi.yml + openapi: streaming-asyncapi.yml StreamModelPredictionsFace: docs: Response for the facial expression emotion model. properties: predictions: optional> source: - openapi: ../expression-measurement/streaming-asyncapi.yml + openapi: streaming-asyncapi.yml StreamModelPredictionsFacemeshPredictionsItem: properties: emotions: optional source: - openapi: ../expression-measurement/streaming-asyncapi.yml + openapi: streaming-asyncapi.yml StreamModelPredictionsFacemesh: docs: Response for the facemesh emotion model. properties: predictions: optional> source: - openapi: ../expression-measurement/streaming-asyncapi.yml + openapi: streaming-asyncapi.yml StreamModelPredictionsLanguagePredictionsItem: properties: text: @@ -92,25 +92,25 @@ types: sentiment: optional toxicity: optional source: - openapi: ../expression-measurement/streaming-asyncapi.yml + openapi: streaming-asyncapi.yml StreamModelPredictionsLanguage: docs: Response for the language emotion model. properties: predictions: optional> source: - openapi: ../expression-measurement/streaming-asyncapi.yml + openapi: streaming-asyncapi.yml StreamModelPredictionsProsodyPredictionsItem: properties: time: optional emotions: optional source: - openapi: ../expression-measurement/streaming-asyncapi.yml + openapi: streaming-asyncapi.yml StreamModelPredictionsProsody: docs: Response for the speech prosody emotion model. properties: predictions: optional> source: - openapi: ../expression-measurement/streaming-asyncapi.yml + openapi: streaming-asyncapi.yml Config: docs: > Configuration used to specify which models should be used and with what @@ -153,7 +153,7 @@ types: Please use the default configuration by passing an empty object `{}`. source: - openapi: ../expression-measurement/streaming-asyncapi.yml + openapi: streaming-asyncapi.yml JobDetails: docs: > If the job_details flag was set in the request, details about the current @@ -163,7 +163,7 @@ types: type: optional docs: ID of the current streaming job. source: - openapi: ../expression-measurement/streaming-asyncapi.yml + openapi: streaming-asyncapi.yml StreamErrorMessage: docs: Error message properties: @@ -184,7 +184,7 @@ types: If the job_details flag was set in the request, details about the current streaming job will be returned in the response body. source: - openapi: ../expression-measurement/streaming-asyncapi.yml + openapi: streaming-asyncapi.yml StreamWarningMessageJobDetails: docs: > If the job_details flag was set in the request, details about the current @@ -194,7 +194,7 @@ types: type: optional docs: ID of the current streaming job. source: - openapi: ../expression-measurement/streaming-asyncapi.yml + openapi: streaming-asyncapi.yml StreamWarningMessage: docs: Warning message properties: @@ -215,7 +215,7 @@ types: If the job_details flag was set in the request, details about the current streaming job will be returned in the response body. source: - openapi: ../expression-measurement/streaming-asyncapi.yml + openapi: streaming-asyncapi.yml SubscribeEvent: discriminated: false union: @@ -226,7 +226,7 @@ types: - type: StreamWarningMessage docs: Warning message source: - openapi: ../expression-measurement/streaming-asyncapi.yml + openapi: streaming-asyncapi.yml StreamModelsEndpointPayloadModelsFace: docs: > Configuration for the facial expression emotion model. @@ -275,7 +275,7 @@ types: threshold will be omitted from the response. default: 3 source: - openapi: ../expression-measurement/streaming-asyncapi.yml + openapi: streaming-asyncapi.yml StreamModelsEndpointPayloadModelsLanguage: docs: Configuration for the language emotion model. properties: @@ -297,7 +297,7 @@ types: the entire text of your streaming payload use `passage`. Default value is `word`. source: - openapi: ../expression-measurement/streaming-asyncapi.yml + openapi: streaming-asyncapi.yml StreamModelsEndpointPayload: docs: Models endpoint payload properties: @@ -384,7 +384,7 @@ types: face: optional language: optional source: - openapi: ../expression-measurement/streaming-asyncapi.yml + openapi: streaming-asyncapi.yml EmotionEmbeddingItem: properties: name: @@ -394,7 +394,7 @@ types: type: optional docs: Embedding value for the emotion being expressed. source: - openapi: ../expression-measurement/streaming-asyncapi.yml + openapi: streaming-asyncapi.yml EmotionEmbedding: docs: A high-dimensional embedding in emotion space. type: list @@ -422,7 +422,7 @@ types: validation: min: 0 source: - openapi: ../expression-measurement/streaming-asyncapi.yml + openapi: streaming-asyncapi.yml TimeRange: docs: A time range with a beginning and end, measured in seconds. properties: @@ -437,7 +437,7 @@ types: validation: min: 0 source: - openapi: ../expression-measurement/streaming-asyncapi.yml + openapi: streaming-asyncapi.yml TextPosition: docs: > Position of a segment of text within a larger document, measured in @@ -455,7 +455,7 @@ types: validation: min: 0 source: - openapi: ../expression-measurement/streaming-asyncapi.yml + openapi: streaming-asyncapi.yml SentimentItem: properties: name: @@ -465,7 +465,7 @@ types: type: optional docs: Prediction for this level of sentiment source: - openapi: ../expression-measurement/streaming-asyncapi.yml + openapi: streaming-asyncapi.yml Sentiment: docs: >- Sentiment predictions returned as a distribution. This model predicts the @@ -490,7 +490,7 @@ types: type: optional docs: Prediction for this category of toxicity source: - openapi: ../expression-measurement/streaming-asyncapi.yml + openapi: streaming-asyncapi.yml Toxicity: docs: >- Toxicity predictions returned as probabilities that the text can be diff --git a/.mock/fern.config.json b/.mock/fern.config.json index d9a9a190..ba052cad 100644 --- a/.mock/fern.config.json +++ b/.mock/fern.config.json @@ -1,4 +1,4 @@ { "organization" : "hume", - "version" : "0.40.2" + "version" : "0.41.9" } \ No newline at end of file diff --git a/poetry.lock b/poetry.lock index 027754f8..fed8992d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -146,8 +146,8 @@ files = [ lazy-object-proxy = ">=1.4.0" typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""} wrapt = [ - {version = ">=1.11,<2", markers = "python_version < \"3.11\""}, {version = ">=1.14,<2", markers = "python_version >= \"3.11\""}, + {version = ">=1.11,<2", markers = "python_version < \"3.11\""}, ] [[package]] @@ -482,9 +482,6 @@ files = [ {file = "covcheck-0.4.3.tar.gz", hash = "sha256:2c7bbb7e6a5f6992b63cae75a319fa8c883161401ac7035f577b5ec0f2ad0b90"}, ] -[package.dependencies] -toml = {version = ">=0.10.2,<0.11.0", optional = true, markers = "extra == \"toml\""} - [package.extras] toml = ["toml (>=0.10.2,<0.11.0)"] @@ -1837,8 +1834,8 @@ files = [ annotated-types = ">=0.6.0" pydantic-core = "2.23.3" typing-extensions = [ - {version = ">=4.6.1", markers = "python_version < \"3.13\""}, {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, + {version = ">=4.6.1", markers = "python_version < \"3.13\""}, ] [package.extras] @@ -2017,8 +2014,8 @@ files = [ astroid = ">=2.15.8,<=2.17.0-dev0" colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} dill = [ - {version = ">=0.2", markers = "python_version < \"3.11\""}, {version = ">=0.3.6", markers = "python_version >= \"3.11\""}, + {version = ">=0.2", markers = "python_version < \"3.11\""}, ] isort = ">=4.2.5,<6" mccabe = ">=0.6,<0.8" @@ -2739,17 +2736,6 @@ webencodings = ">=0.4" doc = ["sphinx", "sphinx_rtd_theme"] test = ["pytest", "ruff"] -[[package]] -name = "toml" -version = "0.10.2" -description = "Python Library for Tom's Obvious, Minimal Language" -optional = false -python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" -files = [ - {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, - {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, -] - [[package]] name = "tomli" version = "2.0.1" @@ -2856,13 +2842,13 @@ dev = ["flake8", "flake8-annotations", "flake8-bandit", "flake8-bugbear", "flake [[package]] name = "urllib3" -version = "2.2.2" +version = "2.2.3" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = true python-versions = ">=3.8" files = [ - {file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"}, - {file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"}, + {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"}, + {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"}, ] [package.extras] @@ -3122,4 +3108,4 @@ microphone = ["pydub", "simpleaudio", "sounddevice"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4" -content-hash = "6c05f984cb5e08248fe440717430eba30a9ceff1f453583f6f7576cf65ca0979" +content-hash = "916a3fb288d746e26c066c8a9c4aad97c8db7e679db41379634ba3538f9c98e0" diff --git a/pyproject.toml b/pyproject.toml index 4e0886f6..d9a5a49b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,24 +1,9 @@ [tool.poetry] -authors = [] -classifiers = [ - "Intended Audience :: Developers", - "Programming Language :: Python", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Programming Language :: Python :: 3.12", - "Operating System :: OS Independent", - "Operating System :: POSIX", - "Operating System :: MacOS", - "Operating System :: POSIX :: Linux", - "Operating System :: Microsoft :: Windows", - "Topic :: Software Development :: Libraries :: Python Modules", - "Typing :: Typed", - "License :: OSI Approved :: MIT License", -] +name = "hume" +version = "0.6.1" description = "A Python SDK for Hume AI" +readme = "README.md" +authors = [] keywords = [ "hume", "ai", @@ -39,13 +24,30 @@ keywords = [ "expressive", "embeddings", "communication", - "learning", + "learning" ] license = "MIT" -name = "hume" -packages = [{ include = "hume", from = "src" }] -readme = "README.md" -version = "0.7.0-rc2" +classifiers = [ + "Intended Audience :: Developers", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Operating System :: OS Independent", + "Operating System :: POSIX", + "Operating System :: MacOS", + "Operating System :: POSIX :: Linux", + "Operating System :: Microsoft :: Windows", + "Topic :: Software Development :: Libraries :: Python Modules", + "Typing :: Typed", + "License :: OSI Approved :: MIT License" +] +packages = [ + { include = "hume", from = "src"} +] [project.urls] Documentation = 'hhttps://dev.hume.ai/reference' @@ -53,58 +55,51 @@ Homepage = 'https://www.hume.ai/' Repository = 'https://github.com/HumeAI/hume-python-sdk' [tool.poetry.dependencies] +python = ">=3.9,<4" aiofiles = "^24.1.0" eval-type-backport = "^0.2.0" httpx = ">=0.21.2" -jupyter = { version = "^1.0.0", optional = true } +jupyter = { version="^1.0.0", optional = true} pydantic = ">= 1.9.2" pydantic-core = "^2.18.2" -pydub = { version = "^0.25.1", optional = true } -python = ">=3.9,<4" -simpleaudio = { version = "^1.0.4", optional = true } -sounddevice = { version = "^0.4.6", optional = true } +pydub = { version="^0.25.1", optional = true} +simpleaudio = { version="^1.0.4", optional = true} +sounddevice = { version="^0.4.6", optional = true} typing_extensions = ">= 4.0.0" websockets = "12.0" [tool.poetry.dev-dependencies] -covcheck = { version = "^0.4.3", extras = ["toml"] } mypy = "1.0.1" +pytest = "^7.4.0" +pytest-asyncio = "^0.23.5" +python-dateutil = "^2.9.0" +types-python-dateutil = "^2.9.0.20240316" +covcheck = "^0.4.3" pydocstyle = "^6.1.1" pydub-stubs = "^0.25.1" pylint = "^2.16.2" -pytest = "^7.4.0" -pytest-asyncio = "^0.23.5" pytest-cov = "^4.0.0" -python-dateutil = "^2.9.0" ruff = "^0.5.6" semver = "^2.13.0" testbook = "^0.4.2" types-aiofiles = "^24.1.0.20240626" -types-python-dateutil = "^2.9.0.20240316" - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" - -[tool.poetry.extras] -examples=["jupyter"] -microphone=["pydub", "simpleaudio", "sounddevice"] -legacy=["pydub"] - -[tool.covcheck.group.unit.coverage] -branch = 26.0 -line = 62.0 - -[tool.covcheck.group.service.coverage] -branch = 30.0 -line = 67.0 [tool.pytest.ini_options] +testpaths = [ "tests" ] asyncio_mode = "auto" -testpaths = ["tests"] [tool.mypy] plugins = ["pydantic.mypy"] [tool.ruff] line-length = 120 + + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" + +[tool.poetry.extras] +examples=["jupyter"] +microphone=["pydub", "simpleaudio", "sounddevice"] +legacy=["pydub"] diff --git a/reference.md b/reference.md index dd44f8f9..17e7c182 100644 --- a/reference.md +++ b/reference.md @@ -1088,7 +1088,7 @@ client.empathic_voice.tools.get_tool_version( Version number for a Tool. -Tools, as well as Configs and Prompts, are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. +Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. @@ -1159,7 +1159,7 @@ client.empathic_voice.tools.delete_tool_version( Version number for a Tool. -Tools, as well as Configs and Prompts, are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. +Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. @@ -1231,7 +1231,7 @@ client.empathic_voice.tools.update_tool_description( Version number for a Tool. -Tools, as well as Configs and Prompts, are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. +Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. @@ -1773,7 +1773,7 @@ client.empathic_voice.prompts.get_prompt_version( Version number for a Prompt. -Prompts, as well as Configs and Tools, are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. +Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. @@ -1844,7 +1844,7 @@ client.empathic_voice.prompts.delete_prompt_version( Version number for a Prompt. -Prompts, as well as Configs and Tools, are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. +Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. @@ -1916,7 +1916,7 @@ client.empathic_voice.prompts.update_prompt_description( Version number for a Prompt. -Prompts, as well as Configs and Tools, are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. +Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. @@ -1942,6 +1942,425 @@ Version numbers are integer values representing different iterations of the Prom + + + + +## EmpathicVoice CustomVoices +
client.empathic_voice.custom_voices.get_return_custom_voices_for_user(...) +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from hume import HumeClient + +client = HumeClient( + api_key="YOUR_API_KEY", +) +client.empathic_voice.custom_voices.get_return_custom_voices_for_user() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**page_number:** `typing.Optional[int]` + +Specifies the page number to retrieve, enabling pagination. + +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. + +
+
+ +
+
+ +**page_size:** `typing.Optional[int]` + +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. + +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. + +
+
+ +
+
+ +**name:** `typing.Optional[str]` — Filter to only include custom voices with this name. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.empathic_voice.custom_voices.create_new_custom_voice(...) +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from hume import HumeClient + +client = HumeClient( + api_key="YOUR_API_KEY", +) +client.empathic_voice.custom_voices.create_new_custom_voice( + name="name", + base_voice="ITO", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**name:** `str` — The name of the Custom Voice. Maximum length of 75 characters. Will be converted to all-uppercase. (e.g., "sample voice" becomes "SAMPLE VOICE") + +
+
+ +
+
+ +**base_voice:** `PostedCustomVoiceBaseVoice` — Specifies the base voice used to create the Custom Voice. + +
+
+ +
+
+ +**parameters:** `typing.Optional[PostedCustomVoiceParameters]` + +The specified attributes of a Custom Voice. + +If no parameters are specified then all attributes will be set to their defaults, meaning no modfications will be made to the base voice. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.empathic_voice.custom_voices.get_return_custom_voice_by_custom_voice_id(...) +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from hume import HumeClient + +client = HumeClient( + api_key="YOUR_API_KEY", +) +client.empathic_voice.custom_voices.get_return_custom_voice_by_custom_voice_id( + id="id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — Identifier for a Custom Voice. Formatted as a UUID. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.empathic_voice.custom_voices.add_new_custom_voice_version(...) +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from hume import HumeClient + +client = HumeClient( + api_key="YOUR_API_KEY", +) +client.empathic_voice.custom_voices.add_new_custom_voice_version( + id="id", + name="name", + base_voice="ITO", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — Identifier for a Custom Voice. Formatted as a UUID. + +
+
+ +
+
+ +**name:** `str` — The name of the Custom Voice. Maximum length of 75 characters. Will be converted to all-uppercase. (e.g., "sample voice" becomes "SAMPLE VOICE") + +
+
+ +
+
+ +**base_voice:** `PostedCustomVoiceBaseVoice` — Specifies the base voice used to create the Custom Voice. + +
+
+ +
+
+ +**parameters:** `typing.Optional[PostedCustomVoiceParameters]` + +The specified attributes of a Custom Voice. + +If no parameters are specified then all attributes will be set to their defaults, meaning no modfications will be made to the base voice. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.empathic_voice.custom_voices.delete_custom_voice(...) +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from hume import HumeClient + +client = HumeClient( + api_key="YOUR_API_KEY", +) +client.empathic_voice.custom_voices.delete_custom_voice( + id="id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — Identifier for a Custom Voice. Formatted as a UUID. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.empathic_voice.custom_voices.update_custom_voice_name(...) +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from hume import HumeClient + +client = HumeClient( + api_key="YOUR_API_KEY", +) +client.empathic_voice.custom_voices.update_custom_voice_name( + id="string", + name="string", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — Identifier for a Custom Voice. Formatted as a UUID. + +
+
+ +
+
+ +**name:** `str` — The name of the Custom Voice. Maximum length of 75 characters. Will be converted to all-uppercase. (e.g., "sample voice" becomes "SAMPLE VOICE") + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ +
@@ -2051,10 +2470,10 @@ For example, if `page_size` is set to 10, each page will include up to 10 items. ```python from hume import HumeClient from hume.empathic_voice import ( + PostedConfigPromptSpec, PostedEventMessageSpec, PostedEventMessageSpecs, PostedLanguageModel, - PostedPromptSpec, PostedVoice, ) @@ -2063,12 +2482,13 @@ client = HumeClient( ) client.empathic_voice.configs.create_config( name="Weather Assistant Config", - prompt=PostedPromptSpec( + prompt=PostedConfigPromptSpec( id="af699d45-2985-42cc-91b9-af9e5da3bac5", version=0, ), + evi_version="2", voice=PostedVoice( - name="KORA", + name="SAMPLE VOICE", ), language_model=PostedLanguageModel( model_provider="ANTHROPIC", @@ -2105,6 +2525,14 @@ client.empathic_voice.configs.create_config(
+**evi_version:** `str` — Specifies the EVI version to use. Use `"1"` for version 1, or `"2"` for the latest enhanced version. For a detailed comparison of the two versions, refer to our [guide](/docs/empathic-voice-interface-evi/evi-2). + +
+
+ +
+
+ **name:** `str` — Name applied to all versions of a particular Config.
@@ -2121,7 +2549,7 @@ client.empathic_voice.configs.create_config(
-**prompt:** `typing.Optional[PostedPromptSpec]` +**prompt:** `typing.Optional[PostedConfigPromptSpec]`
@@ -2308,11 +2736,11 @@ For example, if `page_size` is set to 10, each page will include up to 10 items. ```python from hume import HumeClient from hume.empathic_voice import ( + PostedConfigPromptSpec, PostedEllmModel, PostedEventMessageSpec, PostedEventMessageSpecs, PostedLanguageModel, - PostedPromptSpec, PostedVoice, ) @@ -2322,7 +2750,8 @@ client = HumeClient( client.empathic_voice.configs.create_config_version( id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", version_description="This is an updated version of the Weather Assistant Config.", - prompt=PostedPromptSpec( + evi_version="2", + prompt=PostedConfigPromptSpec( id="af699d45-2985-42cc-91b9-af9e5da3bac5", version=0, ), @@ -2375,6 +2804,14 @@ client.empathic_voice.configs.create_config_version(
+**evi_version:** `str` — The version of the EVI used with this config. + +
+
+ +
+
+ **version_description:** `typing.Optional[str]` — An optional description of the Config version.
@@ -2383,7 +2820,7 @@ client.empathic_voice.configs.create_config_version(
-**prompt:** `typing.Optional[PostedPromptSpec]` +**prompt:** `typing.Optional[PostedConfigPromptSpec]`
@@ -2637,7 +3074,7 @@ client.empathic_voice.configs.get_config_version( Version number for a Config. -Configs, as well as Prompts and Tools, are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. +Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. @@ -2708,7 +3145,7 @@ client.empathic_voice.configs.delete_config_version( Version number for a Config. -Configs, as well as Prompts and Tools, are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. +Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. @@ -2780,7 +3217,7 @@ client.empathic_voice.configs.update_config_description( Version number for a Config. -Configs, as well as Prompts and Tools, are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. +Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. diff --git a/src/hume/__init__.py b/src/hume/__init__.py index 13db0f50..fca09232 100644 --- a/src/hume/__init__.py +++ b/src/hume/__init__.py @@ -2,8 +2,9 @@ from . import empathic_voice, expression_measurement from .client import AsyncHumeClient, HumeClient +from .empathic_voice.chat.audio.asyncio_utilities import Stream +from .empathic_voice.chat.audio.microphone_interface import MicrophoneInterface from .environment import HumeClientEnvironment -from .legacy._voice.microphone.microphone_interface import MicrophoneInterface from .version import __version__ __all__ = [ @@ -11,6 +12,7 @@ "HumeClient", "HumeClientEnvironment", "MicrophoneInterface", + "Stream", "__version__", "empathic_voice", "expression_measurement", diff --git a/src/hume/empathic_voice/__init__.py b/src/hume/empathic_voice/__init__.py index 2707a5f5..bbfa7115 100644 --- a/src/hume/empathic_voice/__init__.py +++ b/src/hume/empathic_voice/__init__.py @@ -17,6 +17,7 @@ EmotionScores, Encoding, ErrorLevel, + ErrorResponse, ExtendedVoiceArgs, FunctionCallResponseInput, HttpValidationError, @@ -26,8 +27,10 @@ PauseAssistantMessage, PostedBuiltinTool, PostedBuiltinToolName, + PostedConfigPromptSpec, PostedCustomVoice, - PostedCustomVoiceName, + PostedCustomVoiceBaseVoice, + PostedCustomVoiceParameters, PostedEllmModel, PostedEventMessageSpec, PostedEventMessageSpecs, @@ -40,7 +43,6 @@ PostedTimeoutSpecsMaxDuration, PostedUserDefinedToolSpec, PostedVoice, - PostedVoiceName, ProsodyInference, ResumeAssistantMessage, ReturnActiveChatCount, @@ -62,6 +64,8 @@ ReturnConfig, ReturnConfigSpec, ReturnCustomVoice, + ReturnCustomVoiceBaseVoice, + ReturnCustomVoiceParameters, ReturnEllmModel, ReturnEventMessageSpec, ReturnEventMessageSpecs, @@ -83,7 +87,6 @@ ReturnUserDefinedToolToolType, ReturnUserDefinedToolVersionType, ReturnVoice, - ReturnVoiceName, Role, SessionSettings, TextInput, @@ -102,7 +105,8 @@ VoiceNameEnum, WebSocketError, ) -from . import chat, chat_groups, chats, configs, prompts, tools +from .errors import BadRequestError +from . import chat, chat_groups, chats, configs, custom_voices, prompts, tools from .chat import PublishEvent, SubscribeEvent __all__ = [ @@ -112,6 +116,7 @@ "AudioConfiguration", "AudioInput", "AudioOutput", + "BadRequestError", "BuiltInTool", "BuiltinToolConfig", "ChatMessage", @@ -122,6 +127,7 @@ "EmotionScores", "Encoding", "ErrorLevel", + "ErrorResponse", "ExtendedVoiceArgs", "FunctionCallResponseInput", "HttpValidationError", @@ -131,8 +137,10 @@ "PauseAssistantMessage", "PostedBuiltinTool", "PostedBuiltinToolName", + "PostedConfigPromptSpec", "PostedCustomVoice", - "PostedCustomVoiceName", + "PostedCustomVoiceBaseVoice", + "PostedCustomVoiceParameters", "PostedEllmModel", "PostedEventMessageSpec", "PostedEventMessageSpecs", @@ -145,7 +153,6 @@ "PostedTimeoutSpecsMaxDuration", "PostedUserDefinedToolSpec", "PostedVoice", - "PostedVoiceName", "ProsodyInference", "PublishEvent", "ResumeAssistantMessage", @@ -168,6 +175,8 @@ "ReturnConfig", "ReturnConfigSpec", "ReturnCustomVoice", + "ReturnCustomVoiceBaseVoice", + "ReturnCustomVoiceParameters", "ReturnEllmModel", "ReturnEventMessageSpec", "ReturnEventMessageSpecs", @@ -189,7 +198,6 @@ "ReturnUserDefinedToolToolType", "ReturnUserDefinedToolVersionType", "ReturnVoice", - "ReturnVoiceName", "Role", "SessionSettings", "SubscribeEvent", @@ -212,6 +220,7 @@ "chat_groups", "chats", "configs", + "custom_voices", "prompts", "tools", ] diff --git a/src/hume/empathic_voice/chat_groups/client.py b/src/hume/empathic_voice/chat_groups/client.py index c4452f71..70d24596 100644 --- a/src/hume/empathic_voice/chat_groups/client.py +++ b/src/hume/empathic_voice/chat_groups/client.py @@ -5,6 +5,8 @@ from ...core.request_options import RequestOptions from ..types.return_paged_chat_groups import ReturnPagedChatGroups from ...core.pydantic_utilities import parse_obj_as +from ..errors.bad_request_error import BadRequestError +from ..types.error_response import ErrorResponse from json.decoder import JSONDecodeError from ...core.api_error import ApiError from ..types.return_chat_group_paged_events import ReturnChatGroupPagedEvents @@ -88,6 +90,16 @@ def list_chat_groups( object_=_response.json(), ), ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -162,6 +174,16 @@ def list_chat_group_events( object_=_response.json(), ), ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -252,6 +274,16 @@ async def main() -> None: object_=_response.json(), ), ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -334,6 +366,16 @@ async def main() -> None: object_=_response.json(), ), ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) diff --git a/src/hume/empathic_voice/chats/client.py b/src/hume/empathic_voice/chats/client.py index f8ba3b76..c365c6a3 100644 --- a/src/hume/empathic_voice/chats/client.py +++ b/src/hume/empathic_voice/chats/client.py @@ -7,6 +7,8 @@ from ..types.return_chat import ReturnChat from ..types.return_paged_chats import ReturnPagedChats from ...core.pydantic_utilities import parse_obj_as +from ..errors.bad_request_error import BadRequestError +from ..types.error_response import ErrorResponse from json.decoder import JSONDecodeError from ...core.api_error import ApiError from ..types.return_chat_event import ReturnChatEvent @@ -99,6 +101,16 @@ def list_chats( ) _items = _parsed_response.chats_page return SyncPager(has_next=_has_next, items=_items, get_next=_get_next) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -189,6 +201,16 @@ def list_chat_events( ) _items = _parsed_response.events_page return SyncPager(has_next=_has_next, items=_items, get_next=_get_next) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -286,6 +308,16 @@ async def main() -> None: ) _items = _parsed_response.chats_page return AsyncPager(has_next=_has_next, items=_items, get_next=_get_next) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -384,6 +416,16 @@ async def main() -> None: ) _items = _parsed_response.events_page return AsyncPager(has_next=_has_next, items=_items, get_next=_get_next) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) diff --git a/src/hume/empathic_voice/client.py b/src/hume/empathic_voice/client.py index 027d16ee..d0817bf8 100644 --- a/src/hume/empathic_voice/client.py +++ b/src/hume/empathic_voice/client.py @@ -3,12 +3,14 @@ from ..core.client_wrapper import SyncClientWrapper from .tools.client import ToolsClient from .prompts.client import PromptsClient +from .custom_voices.client import CustomVoicesClient from .configs.client import ConfigsClient from .chats.client import ChatsClient from .chat_groups.client import ChatGroupsClient from ..core.client_wrapper import AsyncClientWrapper from .tools.client import AsyncToolsClient from .prompts.client import AsyncPromptsClient +from .custom_voices.client import AsyncCustomVoicesClient from .configs.client import AsyncConfigsClient from .chats.client import AsyncChatsClient from .chat_groups.client import AsyncChatGroupsClient @@ -19,6 +21,7 @@ def __init__(self, *, client_wrapper: SyncClientWrapper): self._client_wrapper = client_wrapper self.tools = ToolsClient(client_wrapper=self._client_wrapper) self.prompts = PromptsClient(client_wrapper=self._client_wrapper) + self.custom_voices = CustomVoicesClient(client_wrapper=self._client_wrapper) self.configs = ConfigsClient(client_wrapper=self._client_wrapper) self.chats = ChatsClient(client_wrapper=self._client_wrapper) self.chat_groups = ChatGroupsClient(client_wrapper=self._client_wrapper) @@ -29,6 +32,7 @@ def __init__(self, *, client_wrapper: AsyncClientWrapper): self._client_wrapper = client_wrapper self.tools = AsyncToolsClient(client_wrapper=self._client_wrapper) self.prompts = AsyncPromptsClient(client_wrapper=self._client_wrapper) + self.custom_voices = AsyncCustomVoicesClient(client_wrapper=self._client_wrapper) self.configs = AsyncConfigsClient(client_wrapper=self._client_wrapper) self.chats = AsyncChatsClient(client_wrapper=self._client_wrapper) self.chat_groups = AsyncChatGroupsClient(client_wrapper=self._client_wrapper) diff --git a/src/hume/empathic_voice/configs/client.py b/src/hume/empathic_voice/configs/client.py index 40ab1131..06d95c69 100644 --- a/src/hume/empathic_voice/configs/client.py +++ b/src/hume/empathic_voice/configs/client.py @@ -5,9 +5,11 @@ from ...core.request_options import RequestOptions from ..types.return_paged_configs import ReturnPagedConfigs from ...core.pydantic_utilities import parse_obj_as +from ..errors.bad_request_error import BadRequestError +from ..types.error_response import ErrorResponse from json.decoder import JSONDecodeError from ...core.api_error import ApiError -from ..types.posted_prompt_spec import PostedPromptSpec +from ..types.posted_config_prompt_spec import PostedConfigPromptSpec from ..types.posted_voice import PostedVoice from ..types.posted_language_model import PostedLanguageModel from ..types.posted_ellm_model import PostedEllmModel @@ -96,6 +98,16 @@ def list_configs( object_=_response.json(), ), ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -104,9 +116,10 @@ def list_configs( def create_config( self, *, + evi_version: str, name: str, version_description: typing.Optional[str] = OMIT, - prompt: typing.Optional[PostedPromptSpec] = OMIT, + prompt: typing.Optional[PostedConfigPromptSpec] = OMIT, voice: typing.Optional[PostedVoice] = OMIT, language_model: typing.Optional[PostedLanguageModel] = OMIT, ellm_model: typing.Optional[PostedEllmModel] = OMIT, @@ -119,13 +132,16 @@ def create_config( """ Parameters ---------- + evi_version : str + Specifies the EVI version to use. Use `"1"` for version 1, or `"2"` for the latest enhanced version. For a detailed comparison of the two versions, refer to our [guide](/docs/empathic-voice-interface-evi/evi-2). + name : str Name applied to all versions of a particular Config. version_description : typing.Optional[str] An optional description of the Config version. - prompt : typing.Optional[PostedPromptSpec] + prompt : typing.Optional[PostedConfigPromptSpec] voice : typing.Optional[PostedVoice] A voice specification associated with this Config. @@ -162,10 +178,10 @@ def create_config( -------- from hume import HumeClient from hume.empathic_voice import ( + PostedConfigPromptSpec, PostedEventMessageSpec, PostedEventMessageSpecs, PostedLanguageModel, - PostedPromptSpec, PostedVoice, ) @@ -174,12 +190,13 @@ def create_config( ) client.empathic_voice.configs.create_config( name="Weather Assistant Config", - prompt=PostedPromptSpec( + prompt=PostedConfigPromptSpec( id="af699d45-2985-42cc-91b9-af9e5da3bac5", version=0, ), + evi_version="2", voice=PostedVoice( - name="KORA", + name="SAMPLE VOICE", ), language_model=PostedLanguageModel( model_provider="ANTHROPIC", @@ -206,10 +223,11 @@ def create_config( "v0/evi/configs", method="POST", json={ + "evi_version": evi_version, "name": name, "version_description": version_description, "prompt": convert_and_respect_annotation_metadata( - object_=prompt, annotation=PostedPromptSpec, direction="write" + object_=prompt, annotation=PostedConfigPromptSpec, direction="write" ), "voice": convert_and_respect_annotation_metadata( object_=voice, annotation=PostedVoice, direction="write" @@ -249,6 +267,16 @@ def create_config( object_=_response.json(), ), ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -320,6 +348,16 @@ def list_config_versions( object_=_response.json(), ), ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -329,8 +367,9 @@ def create_config_version( self, id: str, *, + evi_version: str, version_description: typing.Optional[str] = OMIT, - prompt: typing.Optional[PostedPromptSpec] = OMIT, + prompt: typing.Optional[PostedConfigPromptSpec] = OMIT, voice: typing.Optional[PostedVoice] = OMIT, language_model: typing.Optional[PostedLanguageModel] = OMIT, ellm_model: typing.Optional[PostedEllmModel] = OMIT, @@ -346,10 +385,13 @@ def create_config_version( id : str Identifier for a Config. Formatted as a UUID. + evi_version : str + The version of the EVI used with this config. + version_description : typing.Optional[str] An optional description of the Config version. - prompt : typing.Optional[PostedPromptSpec] + prompt : typing.Optional[PostedConfigPromptSpec] voice : typing.Optional[PostedVoice] A voice specification associated with this Config version. @@ -386,11 +428,11 @@ def create_config_version( -------- from hume import HumeClient from hume.empathic_voice import ( + PostedConfigPromptSpec, PostedEllmModel, PostedEventMessageSpec, PostedEventMessageSpecs, PostedLanguageModel, - PostedPromptSpec, PostedVoice, ) @@ -400,7 +442,8 @@ def create_config_version( client.empathic_voice.configs.create_config_version( id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", version_description="This is an updated version of the Weather Assistant Config.", - prompt=PostedPromptSpec( + evi_version="2", + prompt=PostedConfigPromptSpec( id="af699d45-2985-42cc-91b9-af9e5da3bac5", version=0, ), @@ -435,9 +478,10 @@ def create_config_version( f"v0/evi/configs/{jsonable_encoder(id)}", method="POST", json={ + "evi_version": evi_version, "version_description": version_description, "prompt": convert_and_respect_annotation_metadata( - object_=prompt, annotation=PostedPromptSpec, direction="write" + object_=prompt, annotation=PostedConfigPromptSpec, direction="write" ), "voice": convert_and_respect_annotation_metadata( object_=voice, annotation=PostedVoice, direction="write" @@ -477,6 +521,16 @@ def create_config_version( object_=_response.json(), ), ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -515,6 +569,16 @@ def delete_config(self, id: str, *, request_options: typing.Optional[RequestOpti try: if 200 <= _response.status_code < 300: return + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -562,6 +626,16 @@ def update_config_name(self, id: str, *, name: str, request_options: typing.Opti try: if 200 <= _response.status_code < 300: return _response.text # type: ignore + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -579,7 +653,7 @@ def get_config_version( version : int Version number for a Config. - Configs, as well as Prompts and Tools, are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. + Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. @@ -617,6 +691,16 @@ def get_config_version( object_=_response.json(), ), ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -634,7 +718,7 @@ def delete_config_version( version : int Version number for a Config. - Configs, as well as Prompts and Tools, are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. + Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. @@ -665,6 +749,16 @@ def delete_config_version( try: if 200 <= _response.status_code < 300: return + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -687,7 +781,7 @@ def update_config_description( version : int Version number for a Config. - Configs, as well as Prompts and Tools, are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. + Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. @@ -733,6 +827,16 @@ def update_config_description( object_=_response.json(), ), ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -819,6 +923,16 @@ async def main() -> None: object_=_response.json(), ), ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -827,9 +941,10 @@ async def main() -> None: async def create_config( self, *, + evi_version: str, name: str, version_description: typing.Optional[str] = OMIT, - prompt: typing.Optional[PostedPromptSpec] = OMIT, + prompt: typing.Optional[PostedConfigPromptSpec] = OMIT, voice: typing.Optional[PostedVoice] = OMIT, language_model: typing.Optional[PostedLanguageModel] = OMIT, ellm_model: typing.Optional[PostedEllmModel] = OMIT, @@ -842,13 +957,16 @@ async def create_config( """ Parameters ---------- + evi_version : str + Specifies the EVI version to use. Use `"1"` for version 1, or `"2"` for the latest enhanced version. For a detailed comparison of the two versions, refer to our [guide](/docs/empathic-voice-interface-evi/evi-2). + name : str Name applied to all versions of a particular Config. version_description : typing.Optional[str] An optional description of the Config version. - prompt : typing.Optional[PostedPromptSpec] + prompt : typing.Optional[PostedConfigPromptSpec] voice : typing.Optional[PostedVoice] A voice specification associated with this Config. @@ -887,10 +1005,10 @@ async def create_config( from hume import AsyncHumeClient from hume.empathic_voice import ( + PostedConfigPromptSpec, PostedEventMessageSpec, PostedEventMessageSpecs, PostedLanguageModel, - PostedPromptSpec, PostedVoice, ) @@ -902,12 +1020,13 @@ async def create_config( async def main() -> None: await client.empathic_voice.configs.create_config( name="Weather Assistant Config", - prompt=PostedPromptSpec( + prompt=PostedConfigPromptSpec( id="af699d45-2985-42cc-91b9-af9e5da3bac5", version=0, ), + evi_version="2", voice=PostedVoice( - name="KORA", + name="SAMPLE VOICE", ), language_model=PostedLanguageModel( model_provider="ANTHROPIC", @@ -937,10 +1056,11 @@ async def main() -> None: "v0/evi/configs", method="POST", json={ + "evi_version": evi_version, "name": name, "version_description": version_description, "prompt": convert_and_respect_annotation_metadata( - object_=prompt, annotation=PostedPromptSpec, direction="write" + object_=prompt, annotation=PostedConfigPromptSpec, direction="write" ), "voice": convert_and_respect_annotation_metadata( object_=voice, annotation=PostedVoice, direction="write" @@ -980,6 +1100,16 @@ async def main() -> None: object_=_response.json(), ), ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -1059,6 +1189,16 @@ async def main() -> None: object_=_response.json(), ), ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -1068,8 +1208,9 @@ async def create_config_version( self, id: str, *, + evi_version: str, version_description: typing.Optional[str] = OMIT, - prompt: typing.Optional[PostedPromptSpec] = OMIT, + prompt: typing.Optional[PostedConfigPromptSpec] = OMIT, voice: typing.Optional[PostedVoice] = OMIT, language_model: typing.Optional[PostedLanguageModel] = OMIT, ellm_model: typing.Optional[PostedEllmModel] = OMIT, @@ -1085,10 +1226,13 @@ async def create_config_version( id : str Identifier for a Config. Formatted as a UUID. + evi_version : str + The version of the EVI used with this config. + version_description : typing.Optional[str] An optional description of the Config version. - prompt : typing.Optional[PostedPromptSpec] + prompt : typing.Optional[PostedConfigPromptSpec] voice : typing.Optional[PostedVoice] A voice specification associated with this Config version. @@ -1127,11 +1271,11 @@ async def create_config_version( from hume import AsyncHumeClient from hume.empathic_voice import ( + PostedConfigPromptSpec, PostedEllmModel, PostedEventMessageSpec, PostedEventMessageSpecs, PostedLanguageModel, - PostedPromptSpec, PostedVoice, ) @@ -1144,7 +1288,8 @@ async def main() -> None: await client.empathic_voice.configs.create_config_version( id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", version_description="This is an updated version of the Weather Assistant Config.", - prompt=PostedPromptSpec( + evi_version="2", + prompt=PostedConfigPromptSpec( id="af699d45-2985-42cc-91b9-af9e5da3bac5", version=0, ), @@ -1182,9 +1327,10 @@ async def main() -> None: f"v0/evi/configs/{jsonable_encoder(id)}", method="POST", json={ + "evi_version": evi_version, "version_description": version_description, "prompt": convert_and_respect_annotation_metadata( - object_=prompt, annotation=PostedPromptSpec, direction="write" + object_=prompt, annotation=PostedConfigPromptSpec, direction="write" ), "voice": convert_and_respect_annotation_metadata( object_=voice, annotation=PostedVoice, direction="write" @@ -1224,6 +1370,16 @@ async def main() -> None: object_=_response.json(), ), ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -1270,6 +1426,16 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -1327,6 +1493,16 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return _response.text # type: ignore + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -1344,7 +1520,7 @@ async def get_config_version( version : int Version number for a Config. - Configs, as well as Prompts and Tools, are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. + Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. @@ -1390,6 +1566,16 @@ async def main() -> None: object_=_response.json(), ), ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -1407,7 +1593,7 @@ async def delete_config_version( version : int Version number for a Config. - Configs, as well as Prompts and Tools, are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. + Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. @@ -1446,6 +1632,16 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -1468,7 +1664,7 @@ async def update_config_description( version : int Version number for a Config. - Configs, as well as Prompts and Tools, are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. + Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. @@ -1522,6 +1718,16 @@ async def main() -> None: object_=_response.json(), ), ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) diff --git a/src/hume/empathic_voice/custom_voices/__init__.py b/src/hume/empathic_voice/custom_voices/__init__.py new file mode 100644 index 00000000..f3ea2659 --- /dev/null +++ b/src/hume/empathic_voice/custom_voices/__init__.py @@ -0,0 +1,2 @@ +# This file was auto-generated by Fern from our API Definition. + diff --git a/src/hume/empathic_voice/custom_voices/client.py b/src/hume/empathic_voice/custom_voices/client.py new file mode 100644 index 00000000..8a472028 --- /dev/null +++ b/src/hume/empathic_voice/custom_voices/client.py @@ -0,0 +1,886 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ...core.client_wrapper import SyncClientWrapper +from ...core.request_options import RequestOptions +from ..types.return_paged_custom_voices import ReturnPagedCustomVoices +from ...core.pydantic_utilities import parse_obj_as +from ..errors.bad_request_error import BadRequestError +from ..types.error_response import ErrorResponse +from json.decoder import JSONDecodeError +from ...core.api_error import ApiError +from ..types.posted_custom_voice_base_voice import PostedCustomVoiceBaseVoice +from ..types.posted_custom_voice_parameters import PostedCustomVoiceParameters +from ..types.return_custom_voice import ReturnCustomVoice +from ...core.serialization import convert_and_respect_annotation_metadata +from ...core.jsonable_encoder import jsonable_encoder +from ...core.client_wrapper import AsyncClientWrapper + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class CustomVoicesClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def get_return_custom_voices_for_user( + self, + *, + page_number: typing.Optional[int] = None, + page_size: typing.Optional[int] = None, + name: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> ReturnPagedCustomVoices: + """ + Parameters + ---------- + page_number : typing.Optional[int] + Specifies the page number to retrieve, enabling pagination. + + This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. + + page_size : typing.Optional[int] + Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. + + For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. + + name : typing.Optional[str] + Filter to only include custom voices with this name. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ReturnPagedCustomVoices + Success + + Examples + -------- + from hume import HumeClient + + client = HumeClient( + api_key="YOUR_API_KEY", + ) + client.empathic_voice.custom_voices.get_return_custom_voices_for_user() + """ + _response = self._client_wrapper.httpx_client.request( + "v0/evi/custom_voices", + method="GET", + params={ + "page_number": page_number, + "page_size": page_size, + "name": name, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ReturnPagedCustomVoices, + parse_obj_as( + type_=ReturnPagedCustomVoices, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def create_new_custom_voice( + self, + *, + name: str, + base_voice: PostedCustomVoiceBaseVoice, + parameters: typing.Optional[PostedCustomVoiceParameters] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> ReturnCustomVoice: + """ + Parameters + ---------- + name : str + The name of the Custom Voice. Maximum length of 75 characters. Will be converted to all-uppercase. (e.g., "sample voice" becomes "SAMPLE VOICE") + + base_voice : PostedCustomVoiceBaseVoice + Specifies the base voice used to create the Custom Voice. + + parameters : typing.Optional[PostedCustomVoiceParameters] + The specified attributes of a Custom Voice. + + If no parameters are specified then all attributes will be set to their defaults, meaning no modfications will be made to the base voice. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ReturnCustomVoice + Created + + Examples + -------- + from hume import HumeClient + + client = HumeClient( + api_key="YOUR_API_KEY", + ) + client.empathic_voice.custom_voices.create_new_custom_voice( + name="name", + base_voice="ITO", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v0/evi/custom_voices", + method="POST", + json={ + "name": name, + "base_voice": base_voice, + "parameters": convert_and_respect_annotation_metadata( + object_=parameters, annotation=PostedCustomVoiceParameters, direction="write" + ), + "parameter_model": "20240715-4parameter", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ReturnCustomVoice, + parse_obj_as( + type_=ReturnCustomVoice, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_return_custom_voice_by_custom_voice_id( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> ReturnCustomVoice: + """ + Parameters + ---------- + id : str + Identifier for a Custom Voice. Formatted as a UUID. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ReturnCustomVoice + Success + + Examples + -------- + from hume import HumeClient + + client = HumeClient( + api_key="YOUR_API_KEY", + ) + client.empathic_voice.custom_voices.get_return_custom_voice_by_custom_voice_id( + id="id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"v0/evi/custom_voices/{jsonable_encoder(id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ReturnCustomVoice, + parse_obj_as( + type_=ReturnCustomVoice, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def add_new_custom_voice_version( + self, + id: str, + *, + name: str, + base_voice: PostedCustomVoiceBaseVoice, + parameters: typing.Optional[PostedCustomVoiceParameters] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> ReturnCustomVoice: + """ + Parameters + ---------- + id : str + Identifier for a Custom Voice. Formatted as a UUID. + + name : str + The name of the Custom Voice. Maximum length of 75 characters. Will be converted to all-uppercase. (e.g., "sample voice" becomes "SAMPLE VOICE") + + base_voice : PostedCustomVoiceBaseVoice + Specifies the base voice used to create the Custom Voice. + + parameters : typing.Optional[PostedCustomVoiceParameters] + The specified attributes of a Custom Voice. + + If no parameters are specified then all attributes will be set to their defaults, meaning no modfications will be made to the base voice. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ReturnCustomVoice + Created + + Examples + -------- + from hume import HumeClient + + client = HumeClient( + api_key="YOUR_API_KEY", + ) + client.empathic_voice.custom_voices.add_new_custom_voice_version( + id="id", + name="name", + base_voice="ITO", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"v0/evi/custom_voices/{jsonable_encoder(id)}", + method="POST", + json={ + "name": name, + "base_voice": base_voice, + "parameters": convert_and_respect_annotation_metadata( + object_=parameters, annotation=PostedCustomVoiceParameters, direction="write" + ), + "parameter_model": "20240715-4parameter", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ReturnCustomVoice, + parse_obj_as( + type_=ReturnCustomVoice, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def delete_custom_voice(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: + """ + Parameters + ---------- + id : str + Identifier for a Custom Voice. Formatted as a UUID. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from hume import HumeClient + + client = HumeClient( + api_key="YOUR_API_KEY", + ) + client.empathic_voice.custom_voices.delete_custom_voice( + id="id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"v0/evi/custom_voices/{jsonable_encoder(id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def update_custom_voice_name( + self, id: str, *, name: str, request_options: typing.Optional[RequestOptions] = None + ) -> str: + """ + Parameters + ---------- + id : str + Identifier for a Custom Voice. Formatted as a UUID. + + name : str + The name of the Custom Voice. Maximum length of 75 characters. Will be converted to all-uppercase. (e.g., "sample voice" becomes "SAMPLE VOICE") + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + str + Success + + Examples + -------- + from hume import HumeClient + + client = HumeClient( + api_key="YOUR_API_KEY", + ) + client.empathic_voice.custom_voices.update_custom_voice_name( + id="string", + name="string", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"v0/evi/custom_voices/{jsonable_encoder(id)}", + method="PATCH", + json={ + "name": name, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return _response.text # type: ignore + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncCustomVoicesClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def get_return_custom_voices_for_user( + self, + *, + page_number: typing.Optional[int] = None, + page_size: typing.Optional[int] = None, + name: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> ReturnPagedCustomVoices: + """ + Parameters + ---------- + page_number : typing.Optional[int] + Specifies the page number to retrieve, enabling pagination. + + This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. + + page_size : typing.Optional[int] + Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. + + For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. + + name : typing.Optional[str] + Filter to only include custom voices with this name. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ReturnPagedCustomVoices + Success + + Examples + -------- + import asyncio + + from hume import AsyncHumeClient + + client = AsyncHumeClient( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.empathic_voice.custom_voices.get_return_custom_voices_for_user() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v0/evi/custom_voices", + method="GET", + params={ + "page_number": page_number, + "page_size": page_size, + "name": name, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ReturnPagedCustomVoices, + parse_obj_as( + type_=ReturnPagedCustomVoices, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def create_new_custom_voice( + self, + *, + name: str, + base_voice: PostedCustomVoiceBaseVoice, + parameters: typing.Optional[PostedCustomVoiceParameters] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> ReturnCustomVoice: + """ + Parameters + ---------- + name : str + The name of the Custom Voice. Maximum length of 75 characters. Will be converted to all-uppercase. (e.g., "sample voice" becomes "SAMPLE VOICE") + + base_voice : PostedCustomVoiceBaseVoice + Specifies the base voice used to create the Custom Voice. + + parameters : typing.Optional[PostedCustomVoiceParameters] + The specified attributes of a Custom Voice. + + If no parameters are specified then all attributes will be set to their defaults, meaning no modfications will be made to the base voice. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ReturnCustomVoice + Created + + Examples + -------- + import asyncio + + from hume import AsyncHumeClient + + client = AsyncHumeClient( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.empathic_voice.custom_voices.create_new_custom_voice( + name="name", + base_voice="ITO", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v0/evi/custom_voices", + method="POST", + json={ + "name": name, + "base_voice": base_voice, + "parameters": convert_and_respect_annotation_metadata( + object_=parameters, annotation=PostedCustomVoiceParameters, direction="write" + ), + "parameter_model": "20240715-4parameter", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ReturnCustomVoice, + parse_obj_as( + type_=ReturnCustomVoice, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_return_custom_voice_by_custom_voice_id( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> ReturnCustomVoice: + """ + Parameters + ---------- + id : str + Identifier for a Custom Voice. Formatted as a UUID. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ReturnCustomVoice + Success + + Examples + -------- + import asyncio + + from hume import AsyncHumeClient + + client = AsyncHumeClient( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.empathic_voice.custom_voices.get_return_custom_voice_by_custom_voice_id( + id="id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"v0/evi/custom_voices/{jsonable_encoder(id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ReturnCustomVoice, + parse_obj_as( + type_=ReturnCustomVoice, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def add_new_custom_voice_version( + self, + id: str, + *, + name: str, + base_voice: PostedCustomVoiceBaseVoice, + parameters: typing.Optional[PostedCustomVoiceParameters] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> ReturnCustomVoice: + """ + Parameters + ---------- + id : str + Identifier for a Custom Voice. Formatted as a UUID. + + name : str + The name of the Custom Voice. Maximum length of 75 characters. Will be converted to all-uppercase. (e.g., "sample voice" becomes "SAMPLE VOICE") + + base_voice : PostedCustomVoiceBaseVoice + Specifies the base voice used to create the Custom Voice. + + parameters : typing.Optional[PostedCustomVoiceParameters] + The specified attributes of a Custom Voice. + + If no parameters are specified then all attributes will be set to their defaults, meaning no modfications will be made to the base voice. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ReturnCustomVoice + Created + + Examples + -------- + import asyncio + + from hume import AsyncHumeClient + + client = AsyncHumeClient( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.empathic_voice.custom_voices.add_new_custom_voice_version( + id="id", + name="name", + base_voice="ITO", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"v0/evi/custom_voices/{jsonable_encoder(id)}", + method="POST", + json={ + "name": name, + "base_voice": base_voice, + "parameters": convert_and_respect_annotation_metadata( + object_=parameters, annotation=PostedCustomVoiceParameters, direction="write" + ), + "parameter_model": "20240715-4parameter", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ReturnCustomVoice, + parse_obj_as( + type_=ReturnCustomVoice, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def delete_custom_voice(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: + """ + Parameters + ---------- + id : str + Identifier for a Custom Voice. Formatted as a UUID. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + import asyncio + + from hume import AsyncHumeClient + + client = AsyncHumeClient( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.empathic_voice.custom_voices.delete_custom_voice( + id="id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"v0/evi/custom_voices/{jsonable_encoder(id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def update_custom_voice_name( + self, id: str, *, name: str, request_options: typing.Optional[RequestOptions] = None + ) -> str: + """ + Parameters + ---------- + id : str + Identifier for a Custom Voice. Formatted as a UUID. + + name : str + The name of the Custom Voice. Maximum length of 75 characters. Will be converted to all-uppercase. (e.g., "sample voice" becomes "SAMPLE VOICE") + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + str + Success + + Examples + -------- + import asyncio + + from hume import AsyncHumeClient + + client = AsyncHumeClient( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.empathic_voice.custom_voices.update_custom_voice_name( + id="string", + name="string", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"v0/evi/custom_voices/{jsonable_encoder(id)}", + method="PATCH", + json={ + "name": name, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return _response.text # type: ignore + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/hume/empathic_voice/errors/__init__.py b/src/hume/empathic_voice/errors/__init__.py new file mode 100644 index 00000000..14350df6 --- /dev/null +++ b/src/hume/empathic_voice/errors/__init__.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +from .bad_request_error import BadRequestError + +__all__ = ["BadRequestError"] diff --git a/src/hume/empathic_voice/errors/bad_request_error.py b/src/hume/empathic_voice/errors/bad_request_error.py new file mode 100644 index 00000000..0df4d6bc --- /dev/null +++ b/src/hume/empathic_voice/errors/bad_request_error.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +from ...core.api_error import ApiError +from ..types.error_response import ErrorResponse + + +class BadRequestError(ApiError): + def __init__(self, body: ErrorResponse): + super().__init__(status_code=400, body=body) diff --git a/src/hume/empathic_voice/prompts/client.py b/src/hume/empathic_voice/prompts/client.py index 52c7a59e..b222b5c6 100644 --- a/src/hume/empathic_voice/prompts/client.py +++ b/src/hume/empathic_voice/prompts/client.py @@ -7,6 +7,8 @@ from ..types.return_prompt import ReturnPrompt from ..types.return_paged_prompts import ReturnPagedPrompts from ...core.pydantic_utilities import parse_obj_as +from ..errors.bad_request_error import BadRequestError +from ..types.error_response import ErrorResponse from json.decoder import JSONDecodeError from ...core.api_error import ApiError from ...core.jsonable_encoder import jsonable_encoder @@ -105,6 +107,16 @@ def list_prompts( ) _items = _parsed_response.prompts_page return SyncPager(has_next=_has_next, items=_items, get_next=_get_next) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -174,6 +186,16 @@ def create_prompt( object_=_response.json(), ), ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -245,6 +267,16 @@ def list_prompt_versions( object_=_response.json(), ), ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -314,6 +346,16 @@ def create_prompt_verison( object_=_response.json(), ), ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -352,6 +394,16 @@ def delete_prompt(self, id: str, *, request_options: typing.Optional[RequestOpti try: if 200 <= _response.status_code < 300: return + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -399,6 +451,16 @@ def update_prompt_name(self, id: str, *, name: str, request_options: typing.Opti try: if 200 <= _response.status_code < 300: return _response.text # type: ignore + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -416,7 +478,7 @@ def get_prompt_version( version : int Version number for a Prompt. - Prompts, as well as Configs and Tools, are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. + Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. @@ -454,6 +516,16 @@ def get_prompt_version( object_=_response.json(), ), ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -471,7 +543,7 @@ def delete_prompt_version( version : int Version number for a Prompt. - Prompts, as well as Configs and Tools, are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. + Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. @@ -502,6 +574,16 @@ def delete_prompt_version( try: if 200 <= _response.status_code < 300: return + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -524,7 +606,7 @@ def update_prompt_description( version : int Version number for a Prompt. - Prompts, as well as Configs and Tools, are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. + Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. @@ -570,6 +652,16 @@ def update_prompt_description( object_=_response.json(), ), ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -672,6 +764,16 @@ async def main() -> None: ) _items = _parsed_response.prompts_page return AsyncPager(has_next=_has_next, items=_items, get_next=_get_next) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -749,6 +851,16 @@ async def main() -> None: object_=_response.json(), ), ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -828,6 +940,16 @@ async def main() -> None: object_=_response.json(), ), ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -905,6 +1027,16 @@ async def main() -> None: object_=_response.json(), ), ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -951,6 +1083,16 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -1008,6 +1150,16 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return _response.text # type: ignore + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -1025,7 +1177,7 @@ async def get_prompt_version( version : int Version number for a Prompt. - Prompts, as well as Configs and Tools, are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. + Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. @@ -1071,6 +1223,16 @@ async def main() -> None: object_=_response.json(), ), ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -1088,7 +1250,7 @@ async def delete_prompt_version( version : int Version number for a Prompt. - Prompts, as well as Configs and Tools, are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. + Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. @@ -1127,6 +1289,16 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -1149,7 +1321,7 @@ async def update_prompt_description( version : int Version number for a Prompt. - Prompts, as well as Configs and Tools, are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. + Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. @@ -1203,6 +1375,16 @@ async def main() -> None: object_=_response.json(), ), ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) diff --git a/src/hume/empathic_voice/tools/client.py b/src/hume/empathic_voice/tools/client.py index accfa4e6..83c50ff1 100644 --- a/src/hume/empathic_voice/tools/client.py +++ b/src/hume/empathic_voice/tools/client.py @@ -7,6 +7,8 @@ from ..types.return_user_defined_tool import ReturnUserDefinedTool from ..types.return_paged_user_defined_tools import ReturnPagedUserDefinedTools from ...core.pydantic_utilities import parse_obj_as +from ..errors.bad_request_error import BadRequestError +from ..types.error_response import ErrorResponse from json.decoder import JSONDecodeError from ...core.api_error import ApiError from ...core.jsonable_encoder import jsonable_encoder @@ -105,6 +107,16 @@ def list_tools( ) _items = _parsed_response.tools_page return SyncPager(has_next=_has_next, items=_items, get_next=_get_next) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -185,6 +197,16 @@ def create_tool( object_=_response.json(), ), ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -256,6 +278,16 @@ def list_tool_versions( object_=_response.json(), ), ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -335,6 +367,16 @@ def create_tool_version( object_=_response.json(), ), ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -373,6 +415,16 @@ def delete_tool(self, id: str, *, request_options: typing.Optional[RequestOption try: if 200 <= _response.status_code < 300: return + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -420,6 +472,16 @@ def update_tool_name(self, id: str, *, name: str, request_options: typing.Option try: if 200 <= _response.status_code < 300: return _response.text # type: ignore + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -437,7 +499,7 @@ def get_tool_version( version : int Version number for a Tool. - Tools, as well as Configs and Prompts, are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. + Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. @@ -475,6 +537,16 @@ def get_tool_version( object_=_response.json(), ), ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -492,7 +564,7 @@ def delete_tool_version( version : int Version number for a Tool. - Tools, as well as Configs and Prompts, are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. + Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. @@ -523,6 +595,16 @@ def delete_tool_version( try: if 200 <= _response.status_code < 300: return + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -545,7 +627,7 @@ def update_tool_description( version : int Version number for a Tool. - Tools, as well as Configs and Prompts, are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. + Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. @@ -591,6 +673,16 @@ def update_tool_description( object_=_response.json(), ), ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -693,6 +785,16 @@ async def main() -> None: ) _items = _parsed_response.tools_page return AsyncPager(has_next=_has_next, items=_items, get_next=_get_next) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -781,6 +883,16 @@ async def main() -> None: object_=_response.json(), ), ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -860,6 +972,16 @@ async def main() -> None: object_=_response.json(), ), ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -947,6 +1069,16 @@ async def main() -> None: object_=_response.json(), ), ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -993,6 +1125,16 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -1050,6 +1192,16 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return _response.text # type: ignore + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -1067,7 +1219,7 @@ async def get_tool_version( version : int Version number for a Tool. - Tools, as well as Configs and Prompts, are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. + Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. @@ -1113,6 +1265,16 @@ async def main() -> None: object_=_response.json(), ), ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -1130,7 +1292,7 @@ async def delete_tool_version( version : int Version number for a Tool. - Tools, as well as Configs and Prompts, are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. + Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. @@ -1169,6 +1331,16 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -1191,7 +1363,7 @@ async def update_tool_description( version : int Version number for a Tool. - Tools, as well as Configs and Prompts, are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. + Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. @@ -1245,6 +1417,16 @@ async def main() -> None: object_=_response.json(), ), ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) diff --git a/src/hume/empathic_voice/types/__init__.py b/src/hume/empathic_voice/types/__init__.py index 70d20965..67704b0c 100644 --- a/src/hume/empathic_voice/types/__init__.py +++ b/src/hume/empathic_voice/types/__init__.py @@ -16,6 +16,7 @@ from .emotion_scores import EmotionScores from .encoding import Encoding from .error_level import ErrorLevel +from .error_response import ErrorResponse from .extended_voice_args import ExtendedVoiceArgs from .function_call_response_input import FunctionCallResponseInput from .http_validation_error import HttpValidationError @@ -25,8 +26,10 @@ from .pause_assistant_message import PauseAssistantMessage from .posted_builtin_tool import PostedBuiltinTool from .posted_builtin_tool_name import PostedBuiltinToolName +from .posted_config_prompt_spec import PostedConfigPromptSpec from .posted_custom_voice import PostedCustomVoice -from .posted_custom_voice_name import PostedCustomVoiceName +from .posted_custom_voice_base_voice import PostedCustomVoiceBaseVoice +from .posted_custom_voice_parameters import PostedCustomVoiceParameters from .posted_ellm_model import PostedEllmModel from .posted_event_message_spec import PostedEventMessageSpec from .posted_event_message_specs import PostedEventMessageSpecs @@ -39,7 +42,6 @@ from .posted_timeout_specs_max_duration import PostedTimeoutSpecsMaxDuration from .posted_user_defined_tool_spec import PostedUserDefinedToolSpec from .posted_voice import PostedVoice -from .posted_voice_name import PostedVoiceName from .prosody_inference import ProsodyInference from .resume_assistant_message import ResumeAssistantMessage from .return_active_chat_count import ReturnActiveChatCount @@ -61,6 +63,8 @@ from .return_config import ReturnConfig from .return_config_spec import ReturnConfigSpec from .return_custom_voice import ReturnCustomVoice +from .return_custom_voice_base_voice import ReturnCustomVoiceBaseVoice +from .return_custom_voice_parameters import ReturnCustomVoiceParameters from .return_ellm_model import ReturnEllmModel from .return_event_message_spec import ReturnEventMessageSpec from .return_event_message_specs import ReturnEventMessageSpecs @@ -82,7 +86,6 @@ from .return_user_defined_tool_tool_type import ReturnUserDefinedToolToolType from .return_user_defined_tool_version_type import ReturnUserDefinedToolVersionType from .return_voice import ReturnVoice -from .return_voice_name import ReturnVoiceName from .role import Role from .session_settings import SessionSettings from .text_input import TextInput @@ -118,6 +121,7 @@ "EmotionScores", "Encoding", "ErrorLevel", + "ErrorResponse", "ExtendedVoiceArgs", "FunctionCallResponseInput", "HttpValidationError", @@ -127,8 +131,10 @@ "PauseAssistantMessage", "PostedBuiltinTool", "PostedBuiltinToolName", + "PostedConfigPromptSpec", "PostedCustomVoice", - "PostedCustomVoiceName", + "PostedCustomVoiceBaseVoice", + "PostedCustomVoiceParameters", "PostedEllmModel", "PostedEventMessageSpec", "PostedEventMessageSpecs", @@ -141,7 +147,6 @@ "PostedTimeoutSpecsMaxDuration", "PostedUserDefinedToolSpec", "PostedVoice", - "PostedVoiceName", "ProsodyInference", "ResumeAssistantMessage", "ReturnActiveChatCount", @@ -163,6 +168,8 @@ "ReturnConfig", "ReturnConfigSpec", "ReturnCustomVoice", + "ReturnCustomVoiceBaseVoice", + "ReturnCustomVoiceParameters", "ReturnEllmModel", "ReturnEventMessageSpec", "ReturnEventMessageSpecs", @@ -184,7 +191,6 @@ "ReturnUserDefinedToolToolType", "ReturnUserDefinedToolVersionType", "ReturnVoice", - "ReturnVoiceName", "Role", "SessionSettings", "TextInput", diff --git a/src/hume/empathic_voice/types/posted_custom_voice_name.py b/src/hume/empathic_voice/types/error_response.py similarity index 63% rename from src/hume/empathic_voice/types/posted_custom_voice_name.py rename to src/hume/empathic_voice/types/error_response.py index 10c0f3c8..2a5aab74 100644 --- a/src/hume/empathic_voice/types/posted_custom_voice_name.py +++ b/src/hume/empathic_voice/types/error_response.py @@ -1,20 +1,14 @@ # This file was auto-generated by Fern from our API Definition. from ...core.pydantic_utilities import UniversalBaseModel -import pydantic -from ...core.pydantic_utilities import IS_PYDANTIC_V2 import typing +from ...core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic -class PostedCustomVoiceName(UniversalBaseModel): - """ - A custom voice name change to be posted to the server - """ - - name: str = pydantic.Field() - """ - String with the name of the voice to use. Maximum length of 75 characters. Will be converted to all-uppercase. - """ +class ErrorResponse(UniversalBaseModel): + error: typing.Optional[str] = None + message: typing.Optional[str] = None if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/hume/empathic_voice/types/posted_config_prompt_spec.py b/src/hume/empathic_voice/types/posted_config_prompt_spec.py new file mode 100644 index 00000000..9aca0430 --- /dev/null +++ b/src/hume/empathic_voice/types/posted_config_prompt_spec.py @@ -0,0 +1,36 @@ +# This file was auto-generated by Fern from our API Definition. + +from ...core.pydantic_utilities import UniversalBaseModel +import typing +import pydantic +from ...core.pydantic_utilities import IS_PYDANTIC_V2 + + +class PostedConfigPromptSpec(UniversalBaseModel): + """ + Identifies which prompt to use in a a config OR how to create a new prompt to use in the config + """ + + id: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifier for a Prompt. Formatted as a UUID. + """ + + version: typing.Optional[int] = pydantic.Field(default=None) + """ + Version number for a Prompt. Version numbers should be integers. The combination of configId and version number is unique. + """ + + text: typing.Optional[str] = pydantic.Field(default=None) + """ + Text used to create a new prompt for a particular config. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/hume/empathic_voice/types/posted_custom_voice.py b/src/hume/empathic_voice/types/posted_custom_voice.py index fba80c5c..f690183b 100644 --- a/src/hume/empathic_voice/types/posted_custom_voice.py +++ b/src/hume/empathic_voice/types/posted_custom_voice.py @@ -2,38 +2,41 @@ from ...core.pydantic_utilities import UniversalBaseModel import pydantic +from .posted_custom_voice_base_voice import PostedCustomVoiceBaseVoice import typing +from .posted_custom_voice_parameters import PostedCustomVoiceParameters from ...core.pydantic_utilities import IS_PYDANTIC_V2 class PostedCustomVoice(UniversalBaseModel): """ - A custom voice specifications posted to the server - """ + A Custom Voice specification to be associated with this Config. - name: str = pydantic.Field() - """ - String with the name of the voice to use. Maximum length of 75 characters. Will be converted to all-uppercase. + If a Custom Voice specification is not provided then the [name](/reference/empathic-voice-interface-evi/configs/create-config#request.body.voice.name) of a base voice or previously created Custom Voice must be provided. + + See our [Voices guide](/docs/empathic-voice-interface-evi/voices) for a tutorial on how to craft a Custom Voice. """ - base_voice: str = pydantic.Field() + name: str = pydantic.Field() """ - The voice the custom voice is based off of. + The name of the Custom Voice. Maximum length of 75 characters. Will be converted to all-uppercase. (e.g., "sample voice" becomes "SAMPLE VOICE") """ - speech_rate_multiplier: typing.Optional[float] = pydantic.Field(default=None) + base_voice: PostedCustomVoiceBaseVoice = pydantic.Field() """ - The speech rate multiplier for this custom voice. + Specifies the base voice used to create the Custom Voice. """ - parameter_model: str = pydantic.Field() + parameter_model: typing.Literal["20240715-4parameter"] = pydantic.Field(default="20240715-4parameter") """ - The name of the parameter model used to define which attributes are used by `parameters`. + The name of the parameter model used to define which attributes are used by the `parameters` field. Currently, only `20240715-4parameter` is supported as the parameter model. """ - parameters: typing.Optional[typing.Dict[str, typing.Optional[float]]] = pydantic.Field(default=None) + parameters: typing.Optional[PostedCustomVoiceParameters] = pydantic.Field(default=None) """ - Voice specification for a Config. + The specified attributes of a Custom Voice. + + If no parameters are specified then all attributes will be set to their defaults, meaning no modfications will be made to the base voice. """ if IS_PYDANTIC_V2: diff --git a/src/hume/empathic_voice/types/posted_custom_voice_base_voice.py b/src/hume/empathic_voice/types/posted_custom_voice_base_voice.py new file mode 100644 index 00000000..e2fd2477 --- /dev/null +++ b/src/hume/empathic_voice/types/posted_custom_voice_base_voice.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostedCustomVoiceBaseVoice = typing.Union[ + typing.Literal["ITO", "KORA", "DACHER", "AURA", "FINN", "STELLA", "WHIMSY"], typing.Any +] diff --git a/src/hume/empathic_voice/types/posted_custom_voice_parameters.py b/src/hume/empathic_voice/types/posted_custom_voice_parameters.py new file mode 100644 index 00000000..ad8533d4 --- /dev/null +++ b/src/hume/empathic_voice/types/posted_custom_voice_parameters.py @@ -0,0 +1,51 @@ +# This file was auto-generated by Fern from our API Definition. + +from ...core.pydantic_utilities import UniversalBaseModel +import typing +import pydantic +from ...core.pydantic_utilities import IS_PYDANTIC_V2 + + +class PostedCustomVoiceParameters(UniversalBaseModel): + """ + The specified attributes of a Custom Voice. + + If no parameters are specified then all attributes will be set to their defaults, meaning no modfications will be made to the base voice. + """ + + gender: typing.Optional[int] = pydantic.Field(default=None) + """ + The vocalization of gender, ranging between masculine and feminine. + + The default value is `0`, with a minimum of `-100` (more masculine) and a maximum of `100` (more feminine). A value of `0` leaves this parameter unchanged from the base voice. + """ + + huskiness: typing.Optional[int] = pydantic.Field(default=None) + """ + The texture of the voice, ranging between bright and husky. + + The default value is `0`, with a minimum of `-100` (brighter) and a maximum of `100` (huskier). A value of `0` leaves this parameter unchanged from the base voice. + """ + + nasality: typing.Optional[int] = pydantic.Field(default=None) + """ + The openness of the voice, ranging between resonant and nasal. + + The default value is `0`, with a minimum of `-100` (more resonant) and a maximum of `100` (more nasal). A value of `0` leaves this parameter unchanged from the base voice. + """ + + pitch: typing.Optional[int] = pydantic.Field(default=None) + """ + The frequency of the voice, ranging between low and high. + + The default value is `0`, with a minimum of `-100` (lower) and a maximum of `100` (higher). A value of `0` leaves this parameter unchanged from the base voice. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/hume/empathic_voice/types/posted_prompt_spec.py b/src/hume/empathic_voice/types/posted_prompt_spec.py index 93428a28..73bfea83 100644 --- a/src/hume/empathic_voice/types/posted_prompt_spec.py +++ b/src/hume/empathic_voice/types/posted_prompt_spec.py @@ -1,9 +1,9 @@ # This file was auto-generated by Fern from our API Definition. from ...core.pydantic_utilities import UniversalBaseModel -import pydantic import typing from ...core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class PostedPromptSpec(UniversalBaseModel): @@ -11,19 +11,7 @@ class PostedPromptSpec(UniversalBaseModel): A Prompt associated with this Config. """ - id: str = pydantic.Field() - """ - Identifier for a Prompt. Formatted as a UUID. - """ - - version: typing.Optional[int] = pydantic.Field(default=None) - """ - Version number for a Prompt. - - Prompts, as well as Configs and Tools, are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. - - Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. - """ + version: typing.Optional[typing.Optional[typing.Any]] = None if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/hume/empathic_voice/types/posted_user_defined_tool_spec.py b/src/hume/empathic_voice/types/posted_user_defined_tool_spec.py index 779152bf..790a1089 100644 --- a/src/hume/empathic_voice/types/posted_user_defined_tool_spec.py +++ b/src/hume/empathic_voice/types/posted_user_defined_tool_spec.py @@ -20,7 +20,7 @@ class PostedUserDefinedToolSpec(UniversalBaseModel): """ Version number for a Tool. - Tools, as well as Configs and Prompts, are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. + Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. """ diff --git a/src/hume/empathic_voice/types/posted_voice.py b/src/hume/empathic_voice/types/posted_voice.py index 26f49977..58f22575 100644 --- a/src/hume/empathic_voice/types/posted_voice.py +++ b/src/hume/empathic_voice/types/posted_voice.py @@ -3,7 +3,7 @@ from ...core.pydantic_utilities import UniversalBaseModel import typing import pydantic -from .posted_voice_name import PostedVoiceName +from .posted_custom_voice import PostedCustomVoice from ...core.pydantic_utilities import IS_PYDANTIC_V2 @@ -17,11 +17,17 @@ class PostedVoice(UniversalBaseModel): The provider of the voice to use. Currently, only `HUME_AI` is supported as the voice provider. """ - name: typing.Optional[PostedVoiceName] = pydantic.Field(default=None) + name: typing.Optional[str] = pydantic.Field(default=None) """ - String with the name of the voice to use. Maximum length of 75 characters. Will be converted to all-uppercase. + Specifies the name of the voice to use. + + This can be either the name of a previously created Custom Voice or one of our 7 base voices: `ITO`, `KORA`, `DACHER`, `AURA`, `FINN`, `WHIMSY`, or `STELLA`. + + The name will be automatically converted to uppercase (e.g., "Ito" becomes "ITO"). If a name is not specified, then a [Custom Voice](/reference/empathic-voice-interface-evi/configs/create-config#request.body.voice.custom_voice) specification must be provided. """ + custom_voice: typing.Optional[PostedCustomVoice] = None + if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: diff --git a/src/hume/empathic_voice/types/posted_voice_name.py b/src/hume/empathic_voice/types/posted_voice_name.py deleted file mode 100644 index 2e64d1a3..00000000 --- a/src/hume/empathic_voice/types/posted_voice_name.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PostedVoiceName = typing.Union[typing.Literal["ITO", "DACHER", "KORA"], typing.Any] diff --git a/src/hume/empathic_voice/types/return_config.py b/src/hume/empathic_voice/types/return_config.py index df5299a0..1a28c95d 100644 --- a/src/hume/empathic_voice/types/return_config.py +++ b/src/hume/empathic_voice/types/return_config.py @@ -28,11 +28,16 @@ class ReturnConfig(UniversalBaseModel): """ Version number for a Config. - Configs, as well as Prompts and Tools, are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. + Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. """ + evi_version: typing.Optional[str] = pydantic.Field(default=None) + """ + Specifies the EVI version to use. Use `"1"` for version 1, or `"2"` for the latest enhanced version. For a detailed comparison of the two versions, refer to our [guide](/docs/empathic-voice-interface-evi/evi-2). + """ + version_description: typing.Optional[str] = pydantic.Field(default=None) """ An optional description of the Config version. diff --git a/src/hume/empathic_voice/types/return_config_spec.py b/src/hume/empathic_voice/types/return_config_spec.py index c6f62fc8..0d204dc6 100644 --- a/src/hume/empathic_voice/types/return_config_spec.py +++ b/src/hume/empathic_voice/types/return_config_spec.py @@ -20,7 +20,7 @@ class ReturnConfigSpec(UniversalBaseModel): """ Version number for a Config. - Configs, as well as Prompts and Tools, are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. + Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. """ diff --git a/src/hume/empathic_voice/types/return_custom_voice.py b/src/hume/empathic_voice/types/return_custom_voice.py index c264ea6d..752ebc6f 100644 --- a/src/hume/empathic_voice/types/return_custom_voice.py +++ b/src/hume/empathic_voice/types/return_custom_voice.py @@ -2,13 +2,15 @@ from ...core.pydantic_utilities import UniversalBaseModel import pydantic +from .return_custom_voice_base_voice import ReturnCustomVoiceBaseVoice import typing +from .return_custom_voice_parameters import ReturnCustomVoiceParameters from ...core.pydantic_utilities import IS_PYDANTIC_V2 class ReturnCustomVoice(UniversalBaseModel): """ - A custom voice specification returned from the server + A Custom Voice specification associated with this Config. """ id: str = pydantic.Field() @@ -18,42 +20,41 @@ class ReturnCustomVoice(UniversalBaseModel): version: int = pydantic.Field() """ - Version number for a Custom Voice. Version numbers should be integers. The combination of custom_voice_id and version number is unique. + Version number for a Custom Voice. + + Custom Voices, Prompts, Configs, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. + + Version numbers are integer values representing different iterations of the Custom Voice. Each update to the Custom Voice increments its version number. """ name: str = pydantic.Field() """ - String with the name of the voice to use. Maximum length of 75 characters. Will be converted to all-uppercase. + The name of the Custom Voice. Maximum length of 75 characters. """ created_on: int = pydantic.Field() """ - The timestamp when the first version of this prompt was created. + Time at which the Custom Voice was created. Measured in seconds since the Unix epoch. """ modified_on: int = pydantic.Field() """ - The timestamp when this version of the prompt was created. + Time at which the Custom Voice was last modified. Measured in seconds since the Unix epoch. """ - base_voice: str = pydantic.Field() + base_voice: ReturnCustomVoiceBaseVoice = pydantic.Field() """ - The voice the custom voice is based off of. + The base voice used to create the Custom Voice. """ - speech_rate_multiplier: typing.Optional[float] = pydantic.Field(default=None) + parameter_model: typing.Literal["20240715-4parameter"] = pydantic.Field(default="20240715-4parameter") """ - The speech rate multiplier for this custom voice. + The name of the parameter model used to define which attributes are used by the `parameters` field. Currently, only `20240715-4parameter` is supported as the parameter model. """ - parameter_model: str = pydantic.Field() + parameters: ReturnCustomVoiceParameters = pydantic.Field() """ - The name of the parameter model used to define which attributes are used by `parameters`. - """ - - parameters: typing.Dict[str, float] = pydantic.Field() - """ - Voice specification for a Config. + The specified attributes of a Custom Voice. If a parameter's value is `0` (default), it will not be included in the response. """ if IS_PYDANTIC_V2: diff --git a/src/hume/empathic_voice/types/return_custom_voice_base_voice.py b/src/hume/empathic_voice/types/return_custom_voice_base_voice.py new file mode 100644 index 00000000..7f451828 --- /dev/null +++ b/src/hume/empathic_voice/types/return_custom_voice_base_voice.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ReturnCustomVoiceBaseVoice = typing.Union[ + typing.Literal["ITO", "KORA", "DACHER", "AURA", "FINN", "STELLA", "WHIMSY"], typing.Any +] diff --git a/src/hume/empathic_voice/types/return_custom_voice_parameters.py b/src/hume/empathic_voice/types/return_custom_voice_parameters.py new file mode 100644 index 00000000..b5f98a98 --- /dev/null +++ b/src/hume/empathic_voice/types/return_custom_voice_parameters.py @@ -0,0 +1,49 @@ +# This file was auto-generated by Fern from our API Definition. + +from ...core.pydantic_utilities import UniversalBaseModel +import typing +import pydantic +from ...core.pydantic_utilities import IS_PYDANTIC_V2 + + +class ReturnCustomVoiceParameters(UniversalBaseModel): + """ + The specified attributes of a Custom Voice. If a parameter's value is `0` (default), it will not be included in the response. + """ + + gender: typing.Optional[int] = pydantic.Field(default=None) + """ + The vocalization of gender, ranging between masculine and feminine. + + The default value is `0`, with a minimum of `-100` (more masculine) and a maximum of `100` (more feminine). A value of `0` leaves this parameter unchanged from the base voice. + """ + + huskiness: typing.Optional[int] = pydantic.Field(default=None) + """ + The texture of the voice, ranging between bright and husky. + + The default value is `0`, with a minimum of `-100` (brighter) and a maximum of `100` (huskier). A value of `0` leaves this parameter unchanged from the base voice. + """ + + nasality: typing.Optional[int] = pydantic.Field(default=None) + """ + The openness of the voice, ranging between resonant and nasal. + + The default value is `0`, with a minimum of `-100` (more resonant) and a maximum of `100` (more nasal). A value of `0` leaves this parameter unchanged from the base voice. + """ + + pitch: typing.Optional[int] = pydantic.Field(default=None) + """ + The frequency of the voice, ranging between low and high. + + The default value is `0`, with a minimum of `-100` (lower) and a maximum of `100` (higher). A value of `0` leaves this parameter unchanged from the base voice. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/hume/empathic_voice/types/return_paged_custom_voices.py b/src/hume/empathic_voice/types/return_paged_custom_voices.py index 8649922c..734fe862 100644 --- a/src/hume/empathic_voice/types/return_paged_custom_voices.py +++ b/src/hume/empathic_voice/types/return_paged_custom_voices.py @@ -14,22 +14,26 @@ class ReturnPagedCustomVoices(UniversalBaseModel): page_number: int = pydantic.Field() """ - The page number of the returned results. + The page number of the returned list. + + This value corresponds to the `page_number` parameter specified in the request. Pagination uses zero-based indexing. """ page_size: int = pydantic.Field() """ - The number of results returned per page. + The maximum number of items returned per page. + + This value corresponds to the `page_size` parameter specified in the request. """ total_pages: int = pydantic.Field() """ - The total number of pages in the collection + The total number of pages in the collection. """ custom_voices_page: typing.List[ReturnCustomVoice] = pydantic.Field() """ - List of custom voices returned for the specified page number and page size. + List of Custom Voices for the specified `page_number` and `page_size`. """ if IS_PYDANTIC_V2: diff --git a/src/hume/empathic_voice/types/return_prompt.py b/src/hume/empathic_voice/types/return_prompt.py index 92919952..840dea9c 100644 --- a/src/hume/empathic_voice/types/return_prompt.py +++ b/src/hume/empathic_voice/types/return_prompt.py @@ -21,7 +21,7 @@ class ReturnPrompt(UniversalBaseModel): """ Version number for a Prompt. - Prompts, as well as Configs and Tools, are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. + Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. """ diff --git a/src/hume/empathic_voice/types/return_user_defined_tool.py b/src/hume/empathic_voice/types/return_user_defined_tool.py index df2711dc..9af9d000 100644 --- a/src/hume/empathic_voice/types/return_user_defined_tool.py +++ b/src/hume/empathic_voice/types/return_user_defined_tool.py @@ -27,7 +27,7 @@ class ReturnUserDefinedTool(UniversalBaseModel): """ Version number for a Tool. - Tools, as well as Configs and Prompts, are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. + Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. """ diff --git a/src/hume/empathic_voice/types/return_voice.py b/src/hume/empathic_voice/types/return_voice.py index 24b3f554..c78378e9 100644 --- a/src/hume/empathic_voice/types/return_voice.py +++ b/src/hume/empathic_voice/types/return_voice.py @@ -3,7 +3,7 @@ from ...core.pydantic_utilities import UniversalBaseModel import typing import pydantic -from .return_voice_name import ReturnVoiceName +from .return_custom_voice import ReturnCustomVoice from ...core.pydantic_utilities import IS_PYDANTIC_V2 @@ -17,11 +17,15 @@ class ReturnVoice(UniversalBaseModel): The provider of the voice to use. Currently, only `HUME_AI` is supported as the voice provider. """ - name: typing.Optional[ReturnVoiceName] = pydantic.Field(default=None) + name: typing.Optional[str] = pydantic.Field(default=None) """ - String with the name of the voice to use. Maximum length of 75 characters. Will be converted to all-uppercase. + The name of the specified voice. + + This will either be the name of a previously created Custom Voice or one of our 7 base voices: `ITO`, `KORA`, `DACHER`, `AURA`, `FINN`, `WHIMSY`, or `STELLA`. """ + custom_voice: ReturnCustomVoice + if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: diff --git a/src/hume/empathic_voice/types/return_voice_name.py b/src/hume/empathic_voice/types/return_voice_name.py deleted file mode 100644 index d84f4d40..00000000 --- a/src/hume/empathic_voice/types/return_voice_name.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -ReturnVoiceName = typing.Union[typing.Literal["ITO", "DACHER", "KORA"], typing.Any] diff --git a/src/hume/expression_measurement/__init__.py b/src/hume/expression_measurement/__init__.py index 08e71c56..008b73ad 100644 --- a/src/hume/expression_measurement/__init__.py +++ b/src/hume/expression_measurement/__init__.py @@ -1,312 +1,5 @@ # This file was auto-generated by Fern from our API Definition. from . import batch, stream -from .batch import ( - Alternative, - Bcp47Tag, - BoundingBox, - BurstPrediction, - Classification, - CompletedEmbeddingGeneration, - CompletedInference, - CompletedState, - CompletedTlInference, - CompletedTraining, - CustomModel, - CustomModelId, - CustomModelPrediction, - CustomModelRequest, - CustomModelVersionId, - CustomModelsInferenceJob, - CustomModelsTrainingJob, - Dataset, - DatasetId, - DatasetVersionId, - DescriptionsScore, - Direction, - EmbeddingGenerationBaseRequest, - EmbeddingGenerationJob, - EmotionScore, - Error, - EvaluationArgs, - Face, - FacePrediction, - FacemeshPrediction, - FacsScore, - Failed, - FailedState, - File, - Granularity, - GroupedPredictionsBurstPrediction, - GroupedPredictionsFacePrediction, - GroupedPredictionsFacemeshPrediction, - GroupedPredictionsLanguagePrediction, - GroupedPredictionsNerPrediction, - GroupedPredictionsProsodyPrediction, - InProgress, - InProgressState, - InferenceBaseRequest, - InferenceJob, - InferencePrediction, - InferenceRequest, - InferenceResults, - InferenceSourcePredictResult, - JobEmbeddingGeneration, - JobId, - JobInference, - JobTlInference, - JobTraining, - Language, - LanguagePrediction, - Models, - ModelsPredictions, - Ner, - NerPrediction, - Null, - PositionInterval, - PredictionsOptionalNullBurstPrediction, - PredictionsOptionalNullFacePrediction, - PredictionsOptionalNullFacemeshPrediction, - PredictionsOptionalTranscriptionMetadataLanguagePrediction, - PredictionsOptionalTranscriptionMetadataNerPrediction, - PredictionsOptionalTranscriptionMetadataProsodyPrediction, - Prosody, - ProsodyPrediction, - Queued, - QueuedState, - RegistryFileDetail, - Regression, - SentimentScore, - SortBy, - Source, - SourceFile, - SourceTextSource, - SourceUrl, - StateEmbeddingGeneration, - StateEmbeddingGenerationCompletedEmbeddingGeneration, - StateEmbeddingGenerationFailed, - StateEmbeddingGenerationInProgress, - StateEmbeddingGenerationQueued, - StateInference, - StateTlInference, - StateTlInferenceCompletedTlInference, - StateTlInferenceFailed, - StateTlInferenceInProgress, - StateTlInferenceQueued, - StateTraining, - StateTrainingCompletedTraining, - StateTrainingFailed, - StateTrainingInProgress, - StateTrainingQueued, - Status, - Tag, - Target, - Task, - TaskClassification, - TaskRegression, - TextSource, - TimeInterval, - TlInferenceBaseRequest, - TlInferencePrediction, - TlInferenceResults, - TlInferenceSourcePredictResult, - ToxicityScore, - TrainingBaseRequest, - TrainingCustomModel, - Transcription, - TranscriptionMetadata, - Type, - Unconfigurable, - UnionJob, - UnionPredictResult, - Url, - ValidationArgs, - When, - Window, -) -from .stream import ( - Config, - EmotionEmbedding, - EmotionEmbeddingItem, - JobDetails, - Sentiment, - SentimentItem, - StreamBoundingBox, - StreamErrorMessage, - StreamModelPredictionsBurst, - StreamModelPredictionsBurstPredictionsItem, - StreamModelPredictionsFace, - StreamModelPredictionsFacePredictionsItem, - StreamModelPredictionsFacemesh, - StreamModelPredictionsFacemeshPredictionsItem, - StreamModelPredictionsJobDetails, - StreamModelPredictionsLanguage, - StreamModelPredictionsLanguagePredictionsItem, - StreamModelPredictionsProsody, - StreamModelPredictionsProsodyPredictionsItem, - StreamModelsEndpointPayload, - StreamModelsEndpointPayloadModelsFace, - StreamModelsEndpointPayloadModelsLanguage, - StreamWarningMessage, - StreamWarningMessageJobDetails, - SubscribeEvent, - TextPosition, - TimeRange, - Toxicity, - ToxicityItem, -) -__all__ = [ - "Alternative", - "Bcp47Tag", - "BoundingBox", - "BurstPrediction", - "Classification", - "CompletedEmbeddingGeneration", - "CompletedInference", - "CompletedState", - "CompletedTlInference", - "CompletedTraining", - "Config", - "CustomModel", - "CustomModelId", - "CustomModelPrediction", - "CustomModelRequest", - "CustomModelVersionId", - "CustomModelsInferenceJob", - "CustomModelsTrainingJob", - "Dataset", - "DatasetId", - "DatasetVersionId", - "DescriptionsScore", - "Direction", - "EmbeddingGenerationBaseRequest", - "EmbeddingGenerationJob", - "EmotionEmbedding", - "EmotionEmbeddingItem", - "EmotionScore", - "Error", - "EvaluationArgs", - "Face", - "FacePrediction", - "FacemeshPrediction", - "FacsScore", - "Failed", - "FailedState", - "File", - "Granularity", - "GroupedPredictionsBurstPrediction", - "GroupedPredictionsFacePrediction", - "GroupedPredictionsFacemeshPrediction", - "GroupedPredictionsLanguagePrediction", - "GroupedPredictionsNerPrediction", - "GroupedPredictionsProsodyPrediction", - "InProgress", - "InProgressState", - "InferenceBaseRequest", - "InferenceJob", - "InferencePrediction", - "InferenceRequest", - "InferenceResults", - "InferenceSourcePredictResult", - "JobDetails", - "JobEmbeddingGeneration", - "JobId", - "JobInference", - "JobTlInference", - "JobTraining", - "Language", - "LanguagePrediction", - "Models", - "ModelsPredictions", - "Ner", - "NerPrediction", - "Null", - "PositionInterval", - "PredictionsOptionalNullBurstPrediction", - "PredictionsOptionalNullFacePrediction", - "PredictionsOptionalNullFacemeshPrediction", - "PredictionsOptionalTranscriptionMetadataLanguagePrediction", - "PredictionsOptionalTranscriptionMetadataNerPrediction", - "PredictionsOptionalTranscriptionMetadataProsodyPrediction", - "Prosody", - "ProsodyPrediction", - "Queued", - "QueuedState", - "RegistryFileDetail", - "Regression", - "Sentiment", - "SentimentItem", - "SentimentScore", - "SortBy", - "Source", - "SourceFile", - "SourceTextSource", - "SourceUrl", - "StateEmbeddingGeneration", - "StateEmbeddingGenerationCompletedEmbeddingGeneration", - "StateEmbeddingGenerationFailed", - "StateEmbeddingGenerationInProgress", - "StateEmbeddingGenerationQueued", - "StateInference", - "StateTlInference", - "StateTlInferenceCompletedTlInference", - "StateTlInferenceFailed", - "StateTlInferenceInProgress", - "StateTlInferenceQueued", - "StateTraining", - "StateTrainingCompletedTraining", - "StateTrainingFailed", - "StateTrainingInProgress", - "StateTrainingQueued", - "Status", - "StreamBoundingBox", - "StreamErrorMessage", - "StreamModelPredictionsBurst", - "StreamModelPredictionsBurstPredictionsItem", - "StreamModelPredictionsFace", - "StreamModelPredictionsFacePredictionsItem", - "StreamModelPredictionsFacemesh", - "StreamModelPredictionsFacemeshPredictionsItem", - "StreamModelPredictionsJobDetails", - "StreamModelPredictionsLanguage", - "StreamModelPredictionsLanguagePredictionsItem", - "StreamModelPredictionsProsody", - "StreamModelPredictionsProsodyPredictionsItem", - "StreamModelsEndpointPayload", - "StreamModelsEndpointPayloadModelsFace", - "StreamModelsEndpointPayloadModelsLanguage", - "StreamWarningMessage", - "StreamWarningMessageJobDetails", - "SubscribeEvent", - "Tag", - "Target", - "Task", - "TaskClassification", - "TaskRegression", - "TextPosition", - "TextSource", - "TimeInterval", - "TimeRange", - "TlInferenceBaseRequest", - "TlInferencePrediction", - "TlInferenceResults", - "TlInferenceSourcePredictResult", - "Toxicity", - "ToxicityItem", - "ToxicityScore", - "TrainingBaseRequest", - "TrainingCustomModel", - "Transcription", - "TranscriptionMetadata", - "Type", - "Unconfigurable", - "UnionJob", - "UnionPredictResult", - "Url", - "ValidationArgs", - "When", - "Window", - "batch", - "stream", -] +__all__ = ["batch", "stream"] diff --git a/tests/empathic_voice/test_custom_voices.py b/tests/empathic_voice/test_custom_voices.py new file mode 100644 index 00000000..73004c09 --- /dev/null +++ b/tests/empathic_voice/test_custom_voices.py @@ -0,0 +1,164 @@ +# This file was auto-generated by Fern from our API Definition. + +from hume import HumeClient +from hume import AsyncHumeClient +import typing +from ..utilities import validate_response + + +async def test_get_return_custom_voices_for_user(client: HumeClient, async_client: AsyncHumeClient) -> None: + expected_response: typing.Any = { + "page_number": 1, + "page_size": 1, + "total_pages": 1, + "custom_voices_page": [ + { + "id": "id", + "version": 1, + "name": "name", + "created_on": 1000000, + "modified_on": 1000000, + "base_voice": "ITO", + "parameter_model": "20240715-4parameter", + "parameters": {}, + } + ], + } + expected_types: typing.Any = { + "page_number": "integer", + "page_size": "integer", + "total_pages": "integer", + "custom_voices_page": ( + "list", + { + 0: { + "id": None, + "version": "integer", + "name": None, + "created_on": None, + "modified_on": None, + "base_voice": None, + "parameter_model": None, + "parameters": {}, + } + }, + ), + } + response = client.empathic_voice.custom_voices.get_return_custom_voices_for_user() + validate_response(response, expected_response, expected_types) + + async_response = await async_client.empathic_voice.custom_voices.get_return_custom_voices_for_user() + validate_response(async_response, expected_response, expected_types) + + +async def test_create_new_custom_voice(client: HumeClient, async_client: AsyncHumeClient) -> None: + expected_response: typing.Any = { + "id": "id", + "version": 1, + "name": "name", + "created_on": 1000000, + "modified_on": 1000000, + "base_voice": "ITO", + "parameter_model": "20240715-4parameter", + "parameters": {"gender": 1, "huskiness": 1, "nasality": 1, "pitch": 1}, + } + expected_types: typing.Any = { + "id": None, + "version": "integer", + "name": None, + "created_on": None, + "modified_on": None, + "base_voice": None, + "parameter_model": None, + "parameters": {"gender": "integer", "huskiness": "integer", "nasality": "integer", "pitch": "integer"}, + } + response = client.empathic_voice.custom_voices.create_new_custom_voice(name="name", base_voice="ITO") + validate_response(response, expected_response, expected_types) + + async_response = await async_client.empathic_voice.custom_voices.create_new_custom_voice( + name="name", base_voice="ITO" + ) + validate_response(async_response, expected_response, expected_types) + + +async def test_get_return_custom_voice_by_custom_voice_id(client: HumeClient, async_client: AsyncHumeClient) -> None: + expected_response: typing.Any = { + "id": "id", + "version": 1, + "name": "name", + "created_on": 1000000, + "modified_on": 1000000, + "base_voice": "ITO", + "parameter_model": "20240715-4parameter", + "parameters": {"gender": 1, "huskiness": 1, "nasality": 1, "pitch": 1}, + } + expected_types: typing.Any = { + "id": None, + "version": "integer", + "name": None, + "created_on": None, + "modified_on": None, + "base_voice": None, + "parameter_model": None, + "parameters": {"gender": "integer", "huskiness": "integer", "nasality": "integer", "pitch": "integer"}, + } + response = client.empathic_voice.custom_voices.get_return_custom_voice_by_custom_voice_id(id="id") + validate_response(response, expected_response, expected_types) + + async_response = await async_client.empathic_voice.custom_voices.get_return_custom_voice_by_custom_voice_id(id="id") + validate_response(async_response, expected_response, expected_types) + + +async def test_add_new_custom_voice_version(client: HumeClient, async_client: AsyncHumeClient) -> None: + expected_response: typing.Any = { + "id": "id", + "version": 1, + "name": "name", + "created_on": 1000000, + "modified_on": 1000000, + "base_voice": "ITO", + "parameter_model": "20240715-4parameter", + "parameters": {"gender": 1, "huskiness": 1, "nasality": 1, "pitch": 1}, + } + expected_types: typing.Any = { + "id": None, + "version": "integer", + "name": None, + "created_on": None, + "modified_on": None, + "base_voice": None, + "parameter_model": None, + "parameters": {"gender": "integer", "huskiness": "integer", "nasality": "integer", "pitch": "integer"}, + } + response = client.empathic_voice.custom_voices.add_new_custom_voice_version(id="id", name="name", base_voice="ITO") + validate_response(response, expected_response, expected_types) + + async_response = await async_client.empathic_voice.custom_voices.add_new_custom_voice_version( + id="id", name="name", base_voice="ITO" + ) + validate_response(async_response, expected_response, expected_types) + + +async def test_delete_custom_voice(client: HumeClient, async_client: AsyncHumeClient) -> None: + # Type ignore to avoid mypy complaining about the function not being meant to return a value + assert ( + client.empathic_voice.custom_voices.delete_custom_voice(id="id") # type: ignore[func-returns-value] + is None + ) + + assert ( + await async_client.empathic_voice.custom_voices.delete_custom_voice(id="id") # type: ignore[func-returns-value] + is None + ) + + +async def test_update_custom_voice_name(client: HumeClient, async_client: AsyncHumeClient) -> None: + expected_response: typing.Any = "string" + expected_types: typing.Any = None + response = client.empathic_voice.custom_voices.update_custom_voice_name(id="string", name="string") + validate_response(response, expected_response, expected_types) + + async_response = await async_client.empathic_voice.custom_voices.update_custom_voice_name( + id="string", name="string" + ) + validate_response(async_response, expected_response, expected_types) diff --git a/tests/expression_measurement/batch/__init__.py b/tests/expression_measurement/batch/__init__.py new file mode 100644 index 00000000..f3ea2659 --- /dev/null +++ b/tests/expression_measurement/batch/__init__.py @@ -0,0 +1,2 @@ +# This file was auto-generated by Fern from our API Definition. + diff --git a/tests/expression_measurement/test_batch.py b/tests/expression_measurement/batch/test_root.py similarity index 99% rename from tests/expression_measurement/test_batch.py rename to tests/expression_measurement/batch/test_root.py index f250e8f8..921f2372 100644 --- a/tests/expression_measurement/test_batch.py +++ b/tests/expression_measurement/batch/test_root.py @@ -3,7 +3,7 @@ from hume import HumeClient from hume import AsyncHumeClient import typing -from ..utilities import validate_response +from ...utilities import validate_response async def test_list_jobs(client: HumeClient, async_client: AsyncHumeClient) -> None: