Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

release: 4.22.0 #568

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 0 additions & 6 deletions .prettierrc

This file was deleted.

7 changes: 7 additions & 0 deletions .prettierrc.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
{
"arrowParens": "always",
"experimentalTernaries": true,
"printWidth": 110,
"singleQuote": true,
"trailingComma": "all"
}
2 changes: 1 addition & 1 deletion .release-please-manifest.json
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
{
".": "4.21.0"
".": "4.22.0"
}
13 changes: 13 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,18 @@
# Changelog

## 4.22.0 (2023-12-15)

Full Changelog: [v4.21.0...v4.22.0](https://github.com/openai/openai-node/compare/v4.21.0...v4.22.0)

### Features

* **api:** add optional `name` argument + improve docs ([#569](https://github.com/openai/openai-node/issues/569)) ([3b68ace](https://github.com/openai/openai-node/commit/3b68ace533976aedbf642d9b018d0de8d9a8bb88))


### Chores

* update prettier ([#567](https://github.com/openai/openai-node/issues/567)) ([83dec2a](https://github.com/openai/openai-node/commit/83dec2af62c481d7de16d8a3644aa239ded9e30c))

## 4.21.0 (2023-12-11)

Full Changelog: [v4.20.1...v4.21.0](https://github.com/openai/openai-node/compare/v4.20.1...v4.21.0)
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ You can import in Deno via:
<!-- x-release-please-start-version -->

```ts
import OpenAI from 'https://deno.land/x/openai@v4.21.0/mod.ts';
import OpenAI from 'https://deno.land/x/openai@v4.22.0/mod.ts';
```

<!-- x-release-please-end -->
Expand Down
2 changes: 1 addition & 1 deletion build-deno
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g
Usage:

\`\`\`ts
import OpenAI from "https://deno.land/x/openai@v4.21.0/mod.ts";
import OpenAI from "https://deno.land/x/openai@v4.22.0/mod.ts";

const client = new OpenAI();
\`\`\`
Expand Down
8 changes: 4 additions & 4 deletions package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "openai",
"version": "4.21.0",
"version": "4.22.0",
"description": "The official TypeScript library for the OpenAI API",
"author": "OpenAI <support@openai.com>",
"types": "dist/index.d.ts",
Expand All @@ -15,7 +15,7 @@
"build": "bash ./build",
"prepack": "echo 'to pack, run yarn build && (cd dist; yarn pack)' && exit 1",
"prepublishOnly": "echo 'to publish, run yarn build && (cd dist; yarn publish)' && exit 1",
"format": "prettier --write --cache --cache-strategy metadata . !dist",
"format": "prettier --write --cache --cache-strategy metadata . !dist",
"prepare": "if [ $(basename $(dirname $PWD)) = 'node_modules' ]; then npm run build; fi",
"tsn": "ts-node -r tsconfig-paths/register",
"lint": "eslint --ext ts,js .",
Expand All @@ -37,10 +37,10 @@
"@typescript-eslint/eslint-plugin": "^6.7.0",
"@typescript-eslint/parser": "^6.7.0",
"eslint": "^8.49.0",
"eslint-plugin-prettier": "^4.0.0",
"eslint-plugin-prettier": "^5.0.0",
"eslint-plugin-unused-imports": "^2.0.0",
"jest": "^29.4.0",
"prettier": "rattrayalex/prettier#postfix-ternaries",
"prettier": "^3.0.0",
"ts-jest": "^29.1.0",
"ts-morph": "^19.0.0",
"ts-node": "^10.5.0",
Expand Down
3 changes: 2 additions & 1 deletion src/error.ts
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,8 @@ export class APIError extends OpenAIError {
private static makeMessage(status: number | undefined, error: any, message: string | undefined) {
const msg =
error?.message ?
typeof error.message === 'string' ? error.message
typeof error.message === 'string' ?
error.message
: JSON.stringify(error.message)
: error ? JSON.stringify(error)
: message;
Expand Down
16 changes: 7 additions & 9 deletions src/lib/AbstractChatCompletionRunner.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ import {
type ChatCompletionMessage,
type ChatCompletionMessageParam,
type ChatCompletionCreateParams,
type ChatCompletionAssistantMessageParam,
type ChatCompletionTool,
} from 'openai/resources/chat/completions';
import { APIUserAbortError, OpenAIError } from 'openai/error';
Expand Down Expand Up @@ -90,7 +89,6 @@ export abstract class AbstractChatCompletionRunner<
}

protected _addMessage(message: ChatCompletionMessageParam, emit = true) {
// @ts-expect-error this works around a bug in the Azure OpenAI API in which `content` is missing instead of null.
if (!('content' in message)) message.content = null;

this.messages.push(message);
Expand Down Expand Up @@ -217,7 +215,7 @@ export abstract class AbstractChatCompletionRunner<
}

#getFinalContent(): string | null {
return this.#getFinalMessage().content;
return this.#getFinalMessage().content ?? null;
}

/**
Expand All @@ -229,12 +227,12 @@ export abstract class AbstractChatCompletionRunner<
return this.#getFinalContent();
}

#getFinalMessage(): ChatCompletionAssistantMessageParam {
#getFinalMessage(): ChatCompletionMessage {
let i = this.messages.length;
while (i-- > 0) {
const message = this.messages[i];
if (isAssistantMessage(message)) {
return message;
return { ...message, content: message.content ?? null };
}
}
throw new OpenAIError('stream ended without producing a ChatCompletionMessage with role=assistant');
Expand Down Expand Up @@ -652,10 +650,10 @@ type CustomEvents<Event extends string> = {
: (...args: any[]) => void;
};

type ListenerForEvent<
Events extends CustomEvents<any>,
Event extends keyof Events,
> = Event extends keyof AbstractChatCompletionRunnerEvents ? AbstractChatCompletionRunnerEvents[Event]
type ListenerForEvent<Events extends CustomEvents<any>, Event extends keyof Events> = Event extends (
keyof AbstractChatCompletionRunnerEvents
) ?
AbstractChatCompletionRunnerEvents[Event]
: Events[Event];

type ListenersForEvent<Events extends CustomEvents<any>, Event extends keyof Events> = Array<{
Expand Down
4 changes: 3 additions & 1 deletion src/resources/audio/speech.ts
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,9 @@ export interface SpeechCreateParams {

/**
* The voice to use when generating the audio. Supported voices are `alloy`,
* `echo`, `fable`, `onyx`, `nova`, and `shimmer`.
* `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are
* available in the
* [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech/voice-options).
*/
voice: 'alloy' | 'echo' | 'fable' | 'onyx' | 'nova' | 'shimmer';

Expand Down
91 changes: 56 additions & 35 deletions src/resources/chat/completions.ts
Original file line number Diff line number Diff line change
Expand Up @@ -105,21 +105,28 @@ export namespace ChatCompletion {

export interface ChatCompletionAssistantMessageParam {
/**
* The contents of the assistant message.
* The role of the messages author, in this case `assistant`.
*/
content: string | null;
role: 'assistant';

/**
* The role of the messages author, in this case `assistant`.
* The contents of the assistant message. Required unless `tool_calls` or
* `function_call` is specified.
*/
role: 'assistant';
content?: string | null;

/**
* Deprecated and replaced by `tool_calls`. The name and arguments of a function
* that should be called, as generated by the model.
*/
function_call?: ChatCompletionAssistantMessageParam.FunctionCall;

/**
* An optional name for the participant. Provides the model information to
* differentiate between participants of the same role.
*/
name?: string;

/**
* The tool calls generated by the model, such as function calls.
*/
Expand Down Expand Up @@ -309,7 +316,8 @@ export namespace ChatCompletionContentPartImage {
url: string;

/**
* Specifies the detail level of the image.
* Specifies the detail level of the image. Learn more in the
* [Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding).
*/
detail?: 'auto' | 'low' | 'high';
}
Expand Down Expand Up @@ -340,9 +348,9 @@ export interface ChatCompletionFunctionCallOption {

export interface ChatCompletionFunctionMessageParam {
/**
* The return value from the function call, to return to the model.
* The contents of the function message.
*/
content: string | null;
content: string;

/**
* The name of the function to call.
Expand Down Expand Up @@ -451,12 +459,12 @@ export namespace ChatCompletionMessageToolCall {
* function.
*/
export interface ChatCompletionNamedToolChoice {
function?: ChatCompletionNamedToolChoice.Function;
function: ChatCompletionNamedToolChoice.Function;

/**
* The type of the tool. Currently, only `function` is supported.
*/
type?: 'function';
type: 'function';
}

export namespace ChatCompletionNamedToolChoice {
Expand All @@ -477,12 +485,18 @@ export interface ChatCompletionSystemMessageParam {
/**
* The contents of the system message.
*/
content: string | null;
content: string;

/**
* The role of the messages author, in this case `system`.
*/
role: 'system';

/**
* An optional name for the participant. Provides the model information to
* differentiate between participants of the same role.
*/
name?: string;
}

export interface ChatCompletionTool {
Expand Down Expand Up @@ -511,7 +525,7 @@ export interface ChatCompletionToolMessageParam {
/**
* The contents of the tool message.
*/
content: string | null;
content: string;

/**
* The role of the messages author, in this case `tool`.
Expand All @@ -528,12 +542,18 @@ export interface ChatCompletionUserMessageParam {
/**
* The contents of the user message.
*/
content: string | Array<ChatCompletionContentPart> | null;
content: string | Array<ChatCompletionContentPart>;

/**
* The role of the messages author, in this case `user`.
*/
role: 'user';

/**
* An optional name for the participant. Provides the model information to
* differentiate between participants of the same role.
*/
name?: string;
}

/**
Expand Down Expand Up @@ -567,19 +587,19 @@ export interface ChatCompletionCreateParamsBase {
| 'gpt-4-32k'
| 'gpt-4-32k-0314'
| 'gpt-4-32k-0613'
| 'gpt-3.5-turbo-1106'
| 'gpt-3.5-turbo'
| 'gpt-3.5-turbo-16k'
| 'gpt-3.5-turbo-0301'
| 'gpt-3.5-turbo-0613'
| 'gpt-3.5-turbo-1106'
| 'gpt-3.5-turbo-16k-0613';

/**
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their
* existing frequency in the text so far, decreasing the model's likelihood to
* repeat the same line verbatim.
*
* [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details)
* [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
*/
frequency_penalty?: number | null;

Expand Down Expand Up @@ -627,7 +647,9 @@ export interface ChatCompletionCreateParamsBase {
max_tokens?: number | null;

/**
* How many chat completion choices to generate for each input message.
* How many chat completion choices to generate for each input message. Note that
* you will be charged based on the number of generated tokens across all of the
* choices. Keep `n` as `1` to minimize costs.
*/
n?: number | null;

Expand All @@ -636,7 +658,7 @@ export interface ChatCompletionCreateParamsBase {
* whether they appear in the text so far, increasing the model's likelihood to
* talk about new topics.
*
* [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details)
* [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
*/
presence_penalty?: number | null;

Expand All @@ -649,10 +671,10 @@ export interface ChatCompletionCreateParamsBase {
* **Important:** when using JSON mode, you **must** also instruct the model to
* produce JSON yourself via a system or user message. Without this, the model may
* generate an unending stream of whitespace until the generation reaches the token
* limit, resulting in increased latency and appearance of a "stuck" request. Also
* note that the message content may be partially cut off if
* `finish_reason="length"`, which indicates the generation exceeded `max_tokens`
* or the conversation exceeded the max context length.
* limit, resulting in a long-running and seemingly "stuck" request. Also note that
* the message content may be partially cut off if `finish_reason="length"`, which
* indicates the generation exceeded `max_tokens` or the conversation exceeded the
* max context length.
*/
response_format?: ChatCompletionCreateParams.ResponseFormat;

Expand Down Expand Up @@ -734,23 +756,22 @@ export namespace ChatCompletionCreateParams {
*/
name: string;

/**
* A description of what the function does, used by the model to choose when and
* how to call the function.
*/
description?: string;

/**
* The parameters the functions accepts, described as a JSON Schema object. See the
* [guide](https://platform.openai.com/docs/guides/gpt/function-calling) for
* examples, and the
* [guide](https://platform.openai.com/docs/guides/text-generation/function-calling)
* for examples, and the
* [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
* documentation about the format.
*
* To describe a function that accepts no parameters, provide the value
* `{"type": "object", "properties": {}}`.
*/
parameters: Shared.FunctionParameters;

/**
* A description of what the function does, used by the model to choose when and
* how to call the function.
* Omitting `parameters` defines a function with an empty parameter list.
*/
description?: string;
parameters?: Shared.FunctionParameters;
}

/**
Expand All @@ -762,10 +783,10 @@ export namespace ChatCompletionCreateParams {
* **Important:** when using JSON mode, you **must** also instruct the model to
* produce JSON yourself via a system or user message. Without this, the model may
* generate an unending stream of whitespace until the generation reaches the token
* limit, resulting in increased latency and appearance of a "stuck" request. Also
* note that the message content may be partially cut off if
* `finish_reason="length"`, which indicates the generation exceeded `max_tokens`
* or the conversation exceeded the max context length.
* limit, resulting in a long-running and seemingly "stuck" request. Also note that
* the message content may be partially cut off if `finish_reason="length"`, which
* indicates the generation exceeded `max_tokens` or the conversation exceeded the
* max context length.
*/
export interface ResponseFormat {
/**
Expand Down
Loading