From be1b13a724b0194bd62fae3c5945e0782bef5df8 Mon Sep 17 00:00:00 2001 From: Jose Arriaga Maldonado Date: Mon, 5 Feb 2024 22:27:03 -0800 Subject: [PATCH 01/18] Update TypeSpec --- audio/main.tsp | 1 - audio/models.tsp | 154 +- audio/operations.tsp | 67 +- {edits => chat}/main.tsp | 0 completions/chat-meta.tsp => chat/meta.tsp | 15 +- chat/models.tsp | 593 ++ chat/operations.tsp | 22 + common/models.tsp | 55 +- completions/models.tsp | 454 +- completions/operations.tsp | 16 +- edits/models.tsp | 69 - edits/operations.tsp | 19 - embeddings/models.tsp | 81 +- embeddings/operations.tsp | 4 +- files/models.tsp | 97 +- files/operations.tsp | 46 +- fine-tuning/models.tsp | 5 +- fine-tuning/operations.tsp | 39 +- images/models.tsp | 149 +- images/operations.tsp | 4 +- main.tsp | 5 +- models/main.tsp | 1 + models/models.tsp | 38 + models/operations.tsp | 47 + {moderation => moderations}/main.tsp | 0 {moderation => moderations}/models.tsp | 24 +- {moderation => moderations}/operations.tsp | 1 + openapi.yaml | 9334 +++++++++++++++----- package-lock.json | 632 +- package.json | 13 +- 30 files changed, 8745 insertions(+), 3240 deletions(-) rename {edits => chat}/main.tsp (100%) rename completions/chat-meta.tsp => chat/meta.tsp (94%) create mode 100644 chat/models.tsp create mode 100644 chat/operations.tsp delete mode 100644 edits/models.tsp delete mode 100644 edits/operations.tsp create mode 100644 models/main.tsp create mode 100644 models/models.tsp create mode 100644 models/operations.tsp rename {moderation => moderations}/main.tsp (100%) rename {moderation => moderations}/models.tsp (89%) rename {moderation => moderations}/operations.tsp (98%) diff --git a/audio/main.tsp b/audio/main.tsp index c6458821f..144c4aeaf 100644 --- a/audio/main.tsp +++ b/audio/main.tsp @@ -1,2 +1 @@ import "./operations.tsp"; -import "./models.tsp"; diff --git a/audio/models.tsp b/audio/models.tsp index a2a440a90..ba931af7e 100644 --- a/audio/models.tsp +++ b/audio/models.tsp @@ -1,6 +1,42 @@ -namespace OpenAI; +import "../common/models.tsp"; + +using TypeSpec.Http; using TypeSpec.OpenAPI; +namespace OpenAI; + +model CreateSpeechRequest { + /** One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd` */ + @extension("x-oaiTypeLabel", "string") + `model`: string | TEXT_TO_SPEECH_MODELS; + + /** + * The text to generate audio for. The maximum length is 4096 characters. + */ + @maxLength(4096) + input: string; + + /** + * The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, + * `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the + * [Text to speech guide](/docs/guides/text-to-speech/voice-options). + */ + voice: "alloy" | "echo" | "fable" | "onyx" | "nova" | "shimmer"; + + /** + * The format of the transcript output, in one of these options: json, text, srt, verbose_json, or + * vtt. + */ + response_format?: "json" | "text" | "srt" | "verbose_json" | "vtt" = "json"; + + /** + * The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default. + */ + @minValue(0.25) + @maxValue(4.0) + speed?: float64 = 1.0; +} + model CreateTranscriptionRequest { /** * The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, @@ -12,7 +48,14 @@ model CreateTranscriptionRequest { /** ID of the model to use. Only `whisper-1` is currently available. */ @extension("x-oaiTypeLabel", "string") - `model`: string | "whisper-1"; + `model`: string | SPEECH_TO_TEXT_MODELS; + + /** + * The language of the input audio. Supplying the input language in + * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy + * and latency. + */ + language?: string; /** * An optional text to guide the model's style or continue a previous audio segment. The @@ -35,18 +78,6 @@ model CreateTranscriptionRequest { @minValue(0) @maxValue(1) temperature?: float64 = 0; - - /** - * The language of the input audio. Supplying the input language in - * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy - * and latency. - */ - language?: string; -} - -// Note: This does not currently support the non-default response format types. -model CreateTranscriptionResponse { - text: string; } model CreateTranslationRequest { @@ -60,7 +91,7 @@ model CreateTranslationRequest { /** ID of the model to use. Only `whisper-1` is currently available. */ @extension("x-oaiTypeLabel", "string") - `model`: string | "whisper-1"; + `model`: string | SPEECH_TO_TEXT_MODELS; /** * An optional text to guide the model's style or continue a previous audio segment. The @@ -86,7 +117,98 @@ model CreateTranslationRequest { temperature?: float64 = 0; } -// Note: This does not currently support the non-default response format types. +// TODO: This model is not defined in the OpenAI API spec. +model CreateTranscriptionResponse { + /** The transcribed text for the provided audio data. */ + text: string; + + /** The label that describes which operation type generated the accompanying response data. */ + task?: "transcribe"; + + /** The spoken language that was detected in the audio data. */ + language?: string; + + /** + * The total duration of the audio processed to produce accompanying transcription information. + */ + @encode("seconds", float64) + duration?: duration; + + /** + * A collection of information about the timing, probabilities, and other detail of each processed + * audio segment. + */ + segments?: AudioSegment[]; +} + +// TODO: This model is not defined in the OpenAI API spec. model CreateTranslationResponse { + /** The translated text for the provided audio data. */ text: string; + + /** The label that describes which operation type generated the accompanying response data. */ + task?: "translate"; + + /** The spoken language that was detected in the audio data. */ + language?: string; + + /** The total duration of the audio processed to produce accompanying translation information. */ + @encode("seconds", float64) + duration?: duration; + + /** + * A collection of information about the timing, probabilities, and other detail of each processed + * audio segment. + */ + segments?: AudioSegment[]; } + +alias TEXT_TO_SPEECH_MODELS = + | "tts-1" + | "tts-1-hd"; + +alias SPEECH_TO_TEXT_MODELS = + | "whisper-1"; + +// TODO: This model is not defined in the OpenAI API spec. +model AudioSegment { + /** The zero-based index of this segment. */ + id: safeint; + + /** + * The seek position associated with the processing of this audio segment. Seek positions are + * expressed as hundredths of seconds. The model may process several segments from a single seek + * position, so while the seek position will never represent a later time than the segment's + * start, the segment's start may represent a significantly later time than the segment's + * associated seek position. + */ + seek: safeint; + + /** The time at which this segment started relative to the beginning of the audio. */ + @encode("seconds", float64) + start: duration; + + /** The time at which this segment ended relative to the beginning of the audio. */ + @encode("seconds", float64) + end: duration; + + /** The text that was part of this audio segment. */ + text: string; + + /** The token IDs matching the text in this audio segment. */ + tokens: TokenArray; + + /** The temperature score associated with this audio segment. */ + @minValue(0) + @maxValue(1) + temperature: float64; + + /** The average log probability associated with this audio segment. */ + avg_logprob: float64; + + /** The compression ratio of this audio segment. */ + compression_ratio: float64; + + /** The probability of no speech detection within this audio segment. */ + no_speech_prob: float64; +} \ No newline at end of file diff --git a/audio/operations.tsp b/audio/operations.tsp index 636fb941a..a598297bf 100644 --- a/audio/operations.tsp +++ b/audio/operations.tsp @@ -2,34 +2,57 @@ import "@typespec/http"; import "@typespec/openapi"; import "../common/errors.tsp"; +import "./models.tsp"; using TypeSpec.Http; using TypeSpec.OpenAPI; namespace OpenAI; + @route("/audio") -namespace Audio { +interface Audio { + @route("speech") + @post + @operationId("createSpeech") + @tag("OpenAI") + @summary("Generates audio from the input text.") + createSpeech( + @body speech: CreateSpeechRequest, + ): { + @header contentType: "application/octet-stream"; + @header("Transfer-Encoding") transferEncoding: "chunked"; + @body @encode("binary") audio: bytes; + }; + @route("transcriptions") - interface Transcriptions { - @post - @operationId("createTranscription") - @tag("OpenAI") - @summary("Transcribes audio into the input language.") - createTranscription( - @header contentType: "multipart/form-data", - @body audio: CreateTranscriptionRequest, - ): CreateTranscriptionResponse | ErrorResponse; - } + @post + @operationId("createTranscription") + @tag("OpenAI") + @summary("Transcribes audio into the input language.") + createTranscription( + @header contentType: "multipart/form-data", + @body audio: CreateTranscriptionRequest, + ): + | CreateTranscriptionResponse + | { + @header contentType: "text/plain"; + @body text: string; + } + | ErrorResponse; @route("translations") - interface Translations { - @post - @operationId("createTranslation") - @tag("OpenAI") - @summary("Transcribes audio into the input language.") - createTranslation( - @header contentType: "multipart/form-data", - @body audio: CreateTranslationRequest, - ): CreateTranslationResponse | ErrorResponse; - } -} + @post + @operationId("createTranslation") + @tag("OpenAI") + @summary("Transcribes audio into the input language.") + createTranslation( + @header contentType: "multipart/form-data", + @body audio: CreateTranslationRequest, + ): + | CreateTranslationResponse + | { + @header contentType: "text/plain"; + @body text: string; + } + | ErrorResponse; +} \ No newline at end of file diff --git a/edits/main.tsp b/chat/main.tsp similarity index 100% rename from edits/main.tsp rename to chat/main.tsp diff --git a/completions/chat-meta.tsp b/chat/meta.tsp similarity index 94% rename from completions/chat-meta.tsp rename to chat/meta.tsp index 7823da41d..3296a6c42 100644 --- a/completions/chat-meta.tsp +++ b/chat/meta.tsp @@ -1,6 +1,9 @@ +import "./operations.tsp"; + +namespace OpenAI; using TypeSpec.OpenAPI; -@@extension(OpenAI.Completions.createCompletion, +@@extension(OpenAI.Chat.createChatCompletion, "x-oaiMeta", { name: "Create chat completion", @@ -166,3 +169,13 @@ using TypeSpec.OpenAPI; ], } ); + +// TODO: Fill in example here. +@@extension(OpenAI.CreateChatCompletionResponse, + "x-oaiMeta", + { + name: "The chat completion object", + group: "chat", + example: "", + } +); \ No newline at end of file diff --git a/chat/models.tsp b/chat/models.tsp new file mode 100644 index 000000000..a80178e99 --- /dev/null +++ b/chat/models.tsp @@ -0,0 +1,593 @@ +import "../common/models.tsp"; + +using TypeSpec.OpenAPI; + +namespace OpenAI; + +model CreateChatCompletionRequest { + /** + * A list of messages comprising the conversation so far. + * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). + */ + @minItems(1) + messages: ChatCompletionRequestMessage[]; + + /** + * ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) + * table for details on which models work with the Chat API. + */ + @extension("x-oaiTypeLabel", "string") + `model`: string | CHAT_COMPLETION_MODELS; + + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing + * frequency in the text so far, decreasing the model's likelihood to repeat the same line + * verbatim. + * + * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + */ + frequency_penalty?: Penalty | null = 0; + + /** + * Modify the likelihood of specified tokens appearing in the completion. + * + * Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an + * associated bias value from -100 to 100. Mathematically, the bias is added to the logits + * generated by the model prior to sampling. The exact effect will vary per model, but values + * between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 + * should result in a ban or exclusive selection of the relevant token. + */ + @extension("x-oaiTypeLabel", "map") + logit_bias?: Record | null = null; + + /** + * Whether to return log probabilities of the output tokens or not. If true, returns the log + * probabilities of each output token returned in the `content` of `message`. This option is + * currently not available on the `gpt-4-vision-preview` model. + */ + logprobs?: boolean | null = false; + + /** + * An integer between 0 and 5 specifying the number of most likely tokens to return at each token + * position, each with an associated log probability. `logprobs` must be set to `true` if this + * parameter is used. + */ + top_logprobs?: TopLogprobs | null; + + /** + * The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. + * + * The total length of input tokens and generated tokens is limited by the model's context length. + * [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + * for counting tokens. + */ + max_tokens?: MaxTokens | null = 16; + + /** + * How many chat completion choices to generate for each input message. Note that you will be + * charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to + * minimize costs. + */ + n?: N | null = 1; + + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear + * in the text so far, increasing the model's likelihood to talk about new topics. + * + * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + */ + presence_penalty?: Penalty | null = 0; + + /** + * An object specifying the format that the model must output. Compatible with + * [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and `gpt-3.5-turbo-1106`. + * + * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the + * model generates is valid JSON. + * + * **Important:** when using JSON mode, you **must** also instruct the model to produce JSON + * yourself via a system or user message. Without this, the model may generate an unending stream + * of whitespace until the generation reaches the token limit, resulting in a long-running and + * seemingly "stuck" request. Also note that the message content may be partially cut off if + * `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the + * conversation exceeded the max context length. + */ + response_format?: { + /** Must be one of `text` or `json_object`. */ + type?: "text" | "json_object" = "text"; + }; + + /** + * This feature is in Beta. + * + * If specified, our system will make a best effort to sample deterministically, such that + * repeated requests with the same `seed` and parameters should return the same result. + * + * Determinism is not guaranteed, and you should refer to the `system_fingerprint` response + * parameter to monitor changes in the backend. + */ + @extension( + "x-oaiMeta", + { + beta: true + } + ) + seed?: Seed | null; + + // TODO: Consider inlining when https://github.com/microsoft/typespec/issues/2356 is resolved + // https://github.com/microsoft/typespec/issues/2355 + /** Up to 4 sequences where the API will stop generating further tokens. */ + stop?: Stop | null = null; + + /** + * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + * as they become available, with the stream terminated by a `data: [DONE]` message. + * [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + */ + stream?: boolean | null = false; + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + * more random, while lower values like 0.2 will make it more focused and deterministic. + * + * We generally recommend altering this or `top_p` but not both. + */ + temperature?: Temperature | null = 1; + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model considers + * the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising + * the top 10% probability mass are considered. + * + * We generally recommend altering this or `temperature` but not both. + */ + top_p?: TopP | null = 1; + + /** + * A list of tools the model may call. Currently, only functions are supported as a tool. Use this + * to provide a list of functions the model may generate JSON inputs for. */ + tools?: ChatCompletionTool[]; + + tool_choice?: ChatCompletionToolChoiceOption; + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and detect + * abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + */ + user?: User; + + /** + * Deprecated in favor of `tool_choice`. + * + * Controls which (if any) function is called by the model. `none` means the model will not call a + * function and instead generates a message. `auto` means the model can pick between generating a + * message or calling a function. Specifying a particular function via `{"name": "my_function"}` + * forces the model to call that function. + * + * `none` is the default when no functions are present. `auto` is the default if functions are + * present. + */ + #deprecated "deprecated" + @extension("x-oaiExpandable", true) + function_call?: "none" | "auto" | ChatCompletionFunctionCallOption; + + /** + * Deprecated in favor of `tools`. + * + * A list of functions the model may generate JSON inputs for. + */ + #deprecated "deprecated" + @minItems(1) + @maxItems(128) + functions?: ChatCompletionFunctions[]; +} + +/** Represents a chat completion response returned by model, based on the provided input. */ +model CreateChatCompletionResponse { + /** A unique identifier for the chat completion. */ + id: string; + + /** A list of chat completion choices. Can be more than one if `n` is greater than 1. */ + choices: { + /** + * The reason the model stopped generating tokens. This will be `stop` if the model hit a + * natural stop point or a provided stop sequence, `length` if the maximum number of tokens + * specified in the request was reached, `content_filter` if content was omitted due to a flag + * from our content filters, `tool_calls` if the model called a tool, or `function_call` + * (deprecated) if the model called a function. + */ + finish_reason: "stop" | "length" | "tool_calls" | "content_filter" | "function_call"; + + /** The index of the choice in the list of choices. */ + index: safeint; + + message: ChatCompletionResponseMessage; + + /** Log probability information for the choice. */ + logprobs: { + content: ChatCompletionTokenLogprob[] | null; + } | null; + }[]; + + /** The Unix timestamp (in seconds) of when the chat completion was created. */ + @encode("unixTimestamp", int32) + created: utcDateTime; + + /** The model used for the chat completion. */ + `model`: string; + + /** + * This fingerprint represents the backend configuration that the model runs with. + * + * Can be used in conjunction with the `seed` request parameter to understand when backend changes + * have been made that might impact determinism. + */ + system_fingerprint?: string; + + /** The object type, which is always `chat.completion`. */ + object: "chat.completion"; + + usage?: CompletionUsage; +} + +alias CHAT_COMPLETION_MODELS = + | "gpt-4-0125-preview" + | "gpt-4-turbo-preview" + | "gpt-4-1106-preview" + | "gpt-4-vision-preview" + | "gpt-4" + | "gpt-4-0314" + | "gpt-4-0613" + | "gpt-4-32k" + | "gpt-4-32k-0314" + | "gpt-4-32k-0613" + | "gpt-3.5-turbo" + | "gpt-3.5-turbo-16k" + | "gpt-3.5-turbo-0301" + | "gpt-3.5-turbo-0613" + | "gpt-3.5-turbo-1106" + | "gpt-3.5-turbo-16k-0613"; + +@minValue(-2) +@maxValue(2) +scalar Penalty extends float64; + +@minValue(0) +@maxValue(2) +scalar Temperature extends float64; + +@minValue(0) +@maxValue(1) +scalar TopP extends float64; + +@minValue(0) +@maxValue(5) +scalar TopLogprobs extends safeint; + +@minValue(1) +@maxValue(128) +scalar N extends safeint; + +@minValue(0) +scalar MaxTokens extends safeint; + +// TODO: Min and max exceed the limits of safeint. +@minValue(-9223372036854775808) +@maxValue(9223372036854775807) +scalar Seed extends safeint; + +@oneOf +union Stop { + string, + StopSequences, +} + +@minItems(1) +@maxItems(4) +model StopSequences is string[]; + +/** Usage statistics for the completion request. */ +model CompletionUsage { + /** Number of tokens in the prompt. */ + prompt_tokens: safeint; + + /** Number of tokens in the generated completion */ + completion_tokens: safeint; + + /** Total number of tokens used in the request (prompt + completion). */ + total_tokens: safeint; +} + +model ChatCompletionTool { + /** The type of the tool. Currently, only `function` is supported. */ + type: "function"; + + function: FunctionObject; +} + +/** + * Controls which (if any) function is called by the model. `none` means the model will not call a + * function and instead generates a message. `auto` means the model can pick between generating a + * message or calling a function. Specifying a particular function via + * `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that + * function. + * + * `none` is the default when no functions are present. `auto` is the default if functions are + * present. + */ +@oneOf +@extension("x-oaiExpandable", true) +union ChatCompletionToolChoiceOption { + "none", + "auto", + ChatCompletionNamedToolChoice, +} + +/** Specifies a tool the model should use. Use to force the model to call a specific function. */ +model ChatCompletionNamedToolChoice { + /** The type of the tool. Currently, only `function` is supported. */ + type: "function"; + + function: { + /** The name of the function to call. */ + name: string; + } +} + +@oneOf +union ChatCompletionRequestUserMessageContent { + /** The text contents of the message. */ + string, + + /** + * An array of content parts with a defined type, each can be of type `text` or `image_url` when + * passing in images. You can pass multiple images by adding multiple `image_url` content parts. + * Image input is only supported when using the `gpt-4-visual-preview` model. + */ + ChatCompletionRequestMessageContentParts, +}; + +@minItems(1) +model ChatCompletionRequestMessageContentParts is ChatCompletionRequestMessageContentPart[]; + +@oneOf +@extension("x-oaiExpandable", true) +union ChatCompletionRequestMessageContentPart { + ChatCompletionRequestMessageContentPartText, + ChatCompletionRequestMessageContentPartImage, +} + +model ChatCompletionRequestMessageContentPartText { + /** The type of the content part. */ + type: "text"; + + /** The text content. */ + text: string; +} + +model ChatCompletionRequestMessageContentPartImage { + /** The type of the content part. */ + type: "image_url"; + + image_url: { + /** Either a URL of the image or the base64 encoded image data. */ + url: url | string; + + /** + * Specifies the detail level of the image. Learn more in the + * [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding). + */ + detail?: "auto" | "low" | "high" = "auto"; + } +} + +/** The tool calls generated by the model, such as function calls. */ +model ChatCompletionMessageToolCalls is ChatCompletionMessageToolCall[]; + +model ChatCompletionMessageToolCall { + // TODO: index included when streaming + /** The ID of the tool call. */ + id: string; + + /** The type of the tool. Currently, only `function` is supported. */ + type: "function"; + + /** The function that the model called. */ + function: { + /** The name of the function to call. */ + name: string; + + /** + * The arguments to call the function with, as generated by the model in JSON format. Note that + * the model does not always generate valid JSON, and may hallucinate parameters not defined by + * your function schema. Validate the arguments in your code before calling your function. + */ + arguments: string; + } +}; + +@oneOf +@extension("x-oaiExpandable", true) +union ChatCompletionRequestMessage { + ChatCompletionRequestSystemMessage, + ChatCompletionRequestUserMessage, + ChatCompletionRequestAssistantMessage, + ChatCompletionRequestToolMessage, + ChatCompletionRequestFunctionMessage, +} + +model ChatCompletionRequestSystemMessage { + /** The contents of the system message. */ + @extension("x-oaiExpandable", true) + content: string , + + /** The role of the messages author, in this case `system`. */ + role: "system", + + /** + * An optional name for the participant. Provides the model information to differentiate between + * participants of the same role. + */ + name?: string; +} + +model ChatCompletionRequestUserMessage { + /** The contents of the system message. */ + @extension("x-oaiExpandable", true) + content: ChatCompletionRequestUserMessageContent, + + /** The role of the messages author, in this case `user`. */ + role: "user", + + /** + * An optional name for the participant. Provides the model information to differentiate between + * participants of the same role. + */ + name?: string; +} + +model ChatCompletionRequestAssistantMessage { + /** + * The contents of the assistant message. Required unless `tool_calls` or `function_call` is' + * specified. + */ + content?: string | null, + + /** The role of the messages author, in this case `assistant`. */ + role: "assistant", + + /** + * An optional name for the participant. Provides the model information to differentiate between + * participants of the same role. + */ + name?: string; + + tool_calls?: ChatCompletionMessageToolCalls; + + /** + * Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be + * called, as generated by the model. + */ + #deprecated "deprecated" + function_call?: { + /** + * The arguments to call the function with, as generated by the model in JSON format. Note that + * the model does not always generate valid JSON, and may hallucinate parameters not defined by + * your function schema. Validate the arguments in your code before calling your function. + */ + arguments: string; + + /** The name of the function to call. */ + name: string; + + } +} + +model ChatCompletionRequestToolMessage { + /** The role of the messages author, in this case `tool`. */ + role: "tool", + + /** The contents of the tool message. */ + content: string; + + /** Tool call that this message is responding to. */ + tool_call_id: string; +} + +model ChatCompletionRequestFunctionMessage { + /** The role of the messages author, in this case `function`. */ + role: "function", + + /** The contents of the function message. */ + content: string | null; + + /** The name of the function to call. */ + name: string; +} + +model ChatCompletionResponseMessage { + /** The contents of the message. */ + content: string | null; + + tool_calls?: ChatCompletionMessageToolCalls; + + /** The role of the author of this message. */ + role: "assistant"; + + /** Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. */ + #deprecated "deprecated" + function_call?: { + /** + * The arguments to call the function with, as generated by the model in JSON format. Note that + * the model does not always generate valid JSON, and may hallucinate parameters not defined by + * your function schema. Validate the arguments in your code before calling your function. + */ + arguments: string; + + /** The name of the function to call. */ + name: string; + }; +} + +model ChatCompletionTokenLogprob { + /** The token. */ + token: string; + + /** The log probability of this token. */ + logprob: float64; + + /** + * A list of integers representing the UTF-8 bytes representation of the token. Useful in + * instances where characters are represented by multiple tokens and their byte representations + * must be combined to generate the correct text representation. Can be `null` if there is no + * bytes representation for the token. + */ + bytes: safeint[] | null; + + /** + * List of the most likely tokens and their log probability, at this token position. In rare + * cases, there may be fewer than the number of requested `top_logprobs` returned. + */ + top_logprobs: { + /** The token. */ + token: string; + + /** The log probability of this token. */ + logprob: float64; + + /** + * A list of integers representing the UTF-8 bytes representation of the token. Useful in + * instances where characters are represented by multiple tokens and their byte representations + * must be combined to generate the correct text representation. Can be `null` if there is no + * bytes representation for the token. + */ + bytes: safeint[] | null; + }[]; +} + +/** + * Specifying a particular function via `{"name": "my_function"}` forces the model to call that + * function. + */ +model ChatCompletionFunctionCallOption { + /** The name of the function to call. */ + name: string; +} + +#deprecated "deprecated" +model ChatCompletionFunctions { + /** + * A description of what the function does, used by the model to choose when and how to call the + * function. + */ + description?: string; + + /** + * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and + * dashes, with a maximum length of 64. + */ + name: string; + + parameters?: FunctionParameters; +} \ No newline at end of file diff --git a/chat/operations.tsp b/chat/operations.tsp new file mode 100644 index 000000000..c2c5aa364 --- /dev/null +++ b/chat/operations.tsp @@ -0,0 +1,22 @@ +import "@typespec/http"; +import "@typespec/openapi"; + +import "../common/errors.tsp"; +import "./models.tsp"; + +using TypeSpec.Http; +using TypeSpec.OpenAPI; + +namespace OpenAI; + +@route("/chat") +interface Chat { + @route("completions") + @post + @operationId("createChatCompletion") + @tag("OpenAI") + @summary("Creates a model response for the given chat conversation.") + createChatCompletion( + ...CreateChatCompletionRequest, + ): CreateChatCompletionResponse | ErrorResponse; +} \ No newline at end of file diff --git a/common/models.tsp b/common/models.tsp index d6d0d4f91..b5bfcc9b3 100644 --- a/common/models.tsp +++ b/common/models.tsp @@ -1,34 +1,6 @@ -namespace OpenAI; using TypeSpec.OpenAPI; -model ListModelsResponse { - object: string; - data: Model[]; -} - -/** Describes an OpenAI model offering that can be used with the API. */ -model Model { - /** The model identifier, which can be referenced in the API endpoints. */ - id: string; - - /** The object type, which is always "model". */ - object: "model"; - - /** The Unix timestamp (in seconds) when the model was created. */ - @encode("unixTimestamp", int32) - created: utcDateTime; - - /** The organization that owns the model. */ - owned_by: string; -} - -model DeleteModelResponse { - id: string; - object: string; - deleted: boolean; -} - -// this is using yaml refs instead of a def in the openapi, not sure if that's required? +namespace OpenAI; scalar User extends string; @@ -37,3 +9,28 @@ model TokenArray is safeint[]; @minItems(1) model TokenArrayArray is TokenArray[]; + +model FunctionObject { + /** + * A description of what the function does, used by the model to choose when and how to call the + * function. + */ + description?: string; + + /** + * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and + * dashes, with a maximum length of 64. + */ + name: string; + + parameters?: FunctionParameters; +} + +/** + * The parameters the functions accepts, described as a JSON Schema object. See the + * [guide](/docs/guides/gpt/function-calling) for examples, and the + * [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation + * about the format.\n\nTo describe a function that accepts no parameters, provide the value + * `{\"type\": \"object\", \"properties\": {}}`. + */ +model FunctionParameters is Record; \ No newline at end of file diff --git a/completions/models.tsp b/completions/models.tsp index 5aa332b32..bfe7bf03c 100644 --- a/completions/models.tsp +++ b/completions/models.tsp @@ -1,80 +1,46 @@ -namespace OpenAI; -using TypeSpec.OpenAPI; +import "../common/models.tsp"; +import "../chat/models.tsp"; -alias CHAT_COMPLETION_MODELS = - | "gpt4" - | "gpt-4-0314" - | "gpt-4-0613" - | "gpt-4-32k" - | "gpt-4-32k-0314" - | "gpt-4-32k-0613" - | "gpt-3.5-turbo" - | "gpt-3.5-turbo-16k" - | "gpt-3.5-turbo-0301" - | "gpt-3.5-turbo-0613" - | "gpt-3.5-turbo-16k-0613"; +using TypeSpec.OpenAPI; -alias COMPLETION_MODELS = - | "babbage-002" - | "davinci-002" - | "text-davinci-003" - | "text-davinci-002" - | "text-davinci-001" - | "code-davinci-002" - | "text-curie-001" - | "text-babbage-001" - | "text-ada-001"; +namespace OpenAI; -alias SharedCompletionProperties = { +model CreateCompletionRequest { /** - * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output - * more random, while lower values like 0.2 will make it more focused and deterministic. - * - * We generally recommend altering this or `top_p` but not both. + * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + * see all of your available models, or see our [Model overview](/docs/models/overview) for + * descriptions of them. */ - temperature?: Temperature | null = 1; + @extension("x-oaiTypeLabel", "string") + `model`: string | COMPLETION_MODELS; /** - * An alternative to sampling with temperature, called nucleus sampling, where the model considers - * the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising - * the top 10% probability mass are considered. + * The prompt(s) to generate completions for, encoded as a string, array of strings, array of + * tokens, or array of token arrays. * - * We generally recommend altering this or `temperature` but not both. + * Note that <|endoftext|> is the document separator that the model sees during training, so if a + * prompt is not specified the model will generate as if from the beginning of a new document. */ - top_p?: TopP | null = 1; + // TODO: consider inlining when https://github.com/microsoft/typespec/issues/2356 fixed + prompt: Prompt = "<|endoftext|>"; /** - * How many completions to generate for each prompt. + * Generates `best_of` completions server-side and returns the "best" (the one with the highest + * log probability per token). Results cannot be streamed. + * + * When used with `n`, `best_of` controls the number of candidate completions and `n` specifies + * how many to return – `best_of` must be greater than `n`. + * * **Note:** Because this parameter generates many completions, it can quickly consume your token * quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. */ - n?: N | null = 1; - - /** - * The maximum number of [tokens](/tokenizer) to generate in the completion. - * - * The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) - * for counting tokens. - */ - max_tokens?: MaxTokens | null = 16; - - // todo: consider inlining when https://github.com/microsoft/typespec/issues/2356 is resolved - // https://github.com/microsoft/typespec/issues/2355 - /** Up to 4 sequences where the API will stop generating further tokens. */ - stop?: Stop = null; + @minValue(0) + @maxValue(20) + best_of?: safeint | null = 1; - // needs default - // https://github.com/microsoft/typespec/issues/1646 - /** - * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear - * in the text so far, increasing the model's likelihood to talk about new topics. - * - * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) - */ - presence_penalty?: Penalty | null; + /** Echo back the prompt in addition to the completion */ + echo?: boolean | null = false; - // needs default /** * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing * frequency in the text so far, decreasing the model's likelihood to repeat the same line @@ -82,293 +48,116 @@ alias SharedCompletionProperties = { * * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) */ - frequency_penalty?: Penalty | null; + frequency_penalty?: Penalty | null = 0; - // needs default of null /** * Modify the likelihood of specified tokens appearing in the completion. - * Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an - * associated bias value from -100 to 100. Mathematically, the bias is added to the logits - * generated by the model prior to sampling. The exact effect will vary per model, but values - * between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 - * should result in a ban or exclusive selection of the relevant token. + * + * Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an + * associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) + * to convert text to token IDs. Mathematically, the bias is added to the logits generated by the + * model prior to sampling. The exact effect will vary per model, but values between -1 and 1 + * should decrease or increase likelihood of selection; values like -100 or 100 should result in a + * ban or exclusive selection of the relevant token. + * + * As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being + * generated. */ @extension("x-oaiTypeLabel", "map") - logit_bias?: Record | null; - - /** - * A unique identifier representing your end-user, which can help OpenAI to monitor and detect - * abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). - */ - user?: User; + logit_bias?: Record | null = null; /** - * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only - * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - * as they become available, with the stream terminated by a `data: [DONE]` message. - * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). - */ - stream?: boolean | null = true; -}; - -@oneOf -union Stop { - string, - StopSequences, - null, -} - -@minValue(-2) -@maxValue(2) -scalar Penalty extends float64; - -@minItems(1) -@maxItems(4) -model StopSequences is string[]; - -@minValue(0) -@maxValue(2) -scalar Temperature extends float64; - -@minValue(0) -@maxValue(1) -scalar TopP extends float64; - -@minValue(1) -@maxValue(128) -scalar N extends safeint; - -@minValue(0) -scalar MaxTokens extends safeint; - -model CreateChatCompletionRequest { - /** - * ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) - * table for details on which models work with the Chat API. - */ - @extension("x-oaiTypeLabel", "string") - `model`: string | CHAT_COMPLETION_MODELS; - - /** - * A list of messages comprising the conversation so far. - * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). + * Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. + * For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The + * API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` + * elements in the response. + * + * The maximum value for `logprobs` is 5. */ - @minItems(1) - messages: ChatCompletionRequestMessage[]; - - /** A list of functions the model may generate JSON inputs for. */ - @minItems(1) - @maxItems(128) - functions?: ChatCompletionFunctions[]; + @minValue(0) + @maxValue(5) + logprobs?: safeint | null = null; /** - * Controls how the model responds to function calls. `none` means the model does not call a - * function, and responds to the end-user. `auto` means the model can pick between an end-user or - * calling a function. Specifying a particular function via `{\"name":\ \"my_function\"}` forces the - * model to call that function. `none` is the default when no functions are present. `auto` is the - * default if functions are present. + * The maximum number of [tokens](/tokenizer) to generate in the completion. + * + * The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + * for counting tokens. */ - function_call?: "none" | "auto" | ChatCompletionFunctionCallOption; - - ...SharedCompletionProperties; -} - -model ChatCompletionFunctionCallOption { - /** The name of the function to call. */ - name: string; -} + max_tokens?: MaxTokens | null = 16; -model ChatCompletionFunctions { /** - * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and - * dashes, with a maximum length of 64. + * How many completions to generate for each prompt. + * + * **Note:** Because this parameter generates many completions, it can quickly consume your token + * quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. */ - name: string; + n?: N | null = 1; /** - * A description of what the function does, used by the model to choose when and how to call the - * function. + * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear + * in the text so far, increasing the model's likelihood to talk about new topics. + * + * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) */ - description?: string; + presence_penalty?: Penalty | null = 0; - /** - * The parameters the functions accepts, described as a JSON Schema object. See the - * [guide](/docs/guides/gpt/function-calling) for examples, and the - * [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation - * about the format.\n\nTo describe a function that accepts no parameters, provide the value - * `{\"type\": \"object\", \"properties\": {}}`. + /** + * If specified, our system will make a best effort to sample deterministically, such that + * repeated requests with the same `seed` and parameters should return the same result. + * + * Determinism is not guaranteed, and you should refer to the `system_fingerprint` response + * parameter to monitor changes in the backend. */ - parameters: ChatCompletionFunctionParameters; -} - -model ChatCompletionFunctionParameters is Record; - -model ChatCompletionRequestMessage { - /** The role of the messages author. One of `system`, `user`, `assistant`, or `function`. */ - role: "system" | "user" | "assistant" | "function"; + @extension( + "x-oaiMeta", + { + beta: true + } + ) + seed?: Seed | null; - /** - * The contents of the message. `content` is required for all messages, and may be null for - * assistant messages with function calls. - */ - content: string | null; + // todo: consider inlining when https://github.com/microsoft/typespec/issues/2356 is resolved + // https://github.com/microsoft/typespec/issues/2355 + /** Up to 4 sequences where the API will stop generating further tokens. */ + stop?: Stop | null = null; - // TODO: the constraints are not specified in the API /** - * The name of the author of this message. `name` is required if role is `function`, and it - * should be the name of the function whose response is in the `content`. May contain a-z, - * A-Z, 0-9, and underscores, with a maximum length of 64 characters. + * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + * as they become available, with the stream terminated by a `data: [DONE]` message. + * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). */ - name?: string; - - /** The name and arguments of a function that should be called, as generated by the model. */ - function_call?: { - /** The name of the function to call. */ - name: string; + stream?: boolean | null = false; - /** - * The arguments to call the function with, as generated by the model in JSON format. Note that - * the model does not always generate valid JSON, and may hallucinate parameters not defined by - * your function schema. Validate the arguments in your code before calling your function. - */ - arguments: string; - }; -} - -/** Represents a chat completion response returned by model, based on the provided input. */ -// TODO: Fill in example here. -@extension( - "x-oaiMeta", - { - name: "The chat completion object", - group: "chat", - example: "", - } -) -model CreateChatCompletionResponse { - /** A unique identifier for the chat completion. */ - id: string; - - /** The object type, which is always `chat.completion`. */ - object: string; - - /** The Unix timestamp (in seconds) of when the chat completion was created. */ - @encode("unixTimestamp", int32) - created: utcDateTime; - - /** The model used for the chat completion. */ - `model`: string; - - /** A list of chat completion choices. Can be more than one if `n` is greater than 1. */ - choices: { - /** The index of the choice in the list of choices. */ - index: safeint; - - message: ChatCompletionResponseMessage; - - /** - * The reason the model stopped generating tokens. This will be `stop` if the model hit a - * natural stop point or a provided stop sequence, `length` if the maximum number of tokens - * specified in the request was reached, `content_filter` if the content was omitted due to - * a flag from our content filters, or `function_call` if the model called a function. - */ - finish_reason: "stop" | "length" | "function_call" | "content_filter"; - }[]; - - usage?: CompletionUsage; -} - -/** Usage statistics for the completion request. */ -model CompletionUsage { - /** Number of tokens in the prompt. */ - prompt_tokens: safeint; - - /** Number of tokens in the generated completion */ - completion_tokens: safeint; - - /** Total number of tokens used in the request (prompt + completion). */ - total_tokens: safeint; -} - -model ChatCompletionResponseMessage { - /** The role of the author of this message. */ - role: "system" | "user" | "assistant" | "function"; - - /** The contents of the message. */ - content: string | null; - - /** The name and arguments of a function that should be called, as generated by the model. */ - function_call?: { - /** The name of the function to call. */ - name: string; - - /** - * The arguments to call the function with, as generated by the model in JSON format. Note that - * the model does not always generate valid JSON, and may hallucinate parameters not defined by - * your function schema. Validate the arguments in your code before calling your function. - */ - arguments: string; - }; -} - -model CreateCompletionRequest { - /** - * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to - * see all of your available models, or see our [Model overview](/docs/models/overview) for - * descriptions of them. - */ - @extension("x-oaiTypeLabel", "string") - `model`: string | COMPLETION_MODELS; + /** The suffix that comes after a completion of inserted text. */ + suffix?: string | null = null; /** - * The prompt(s) to generate completions for, encoded as a string, array of strings, array of - * tokens, or array of token arrays. + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + * more random, while lower values like 0.2 will make it more focused and deterministic. * - * Note that <|endoftext|> is the document separator that the model sees during training, so if a - * prompt is not specified the model will generate as if from the beginning of a new document. + * We generally recommend altering this or `top_p` but not both. */ - // TODO: consider inlining when https://github.com/microsoft/typespec/issues/2356 fixed - prompt: Prompt = "<|endoftext|>"; - - /** The suffix that comes after a completion of inserted text. */ - suffix?: string | null = null; - - ...SharedCompletionProperties; + temperature?: Temperature | null = 1; /** - * Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. - * For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The - * API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` - * elements in the response. + * An alternative to sampling with temperature, called nucleus sampling, where the model considers + * the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising + * the top 10% probability mass are considered. * - * The maximum value for `logprobs` is 5. + * We generally recommend altering this or `temperature` but not both. */ - logprobs?: safeint | null = null; - - /** Echo back the prompt in addition to the completion */ - echo?: boolean | null = false; + top_p?: TopP | null = 1; /** - * Generates `best_of` completions server-side and returns the "best" (the one with the highest - * log probability per token). Results cannot be streamed. - * - * When used with `n`, `best_of` controls the number of candidate completions and `n` specifies - * how many to return – `best_of` must be greater than `n`. - * - * **Note:** Because this parameter generates many completions, it can quickly consume your token - * quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + * A unique identifier representing your end-user, which can help OpenAI to monitor and detect + * abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). */ - best_of?: safeint | null = 1; + user?: User; } -@oneOf -union Prompt { - string, - string[], - TokenArray, - TokenArrayArray, - null, -} /** * Represents a completion response from the API. Note: both the streamed and non-streamed response * objects share the same shape (unlike the chat endpoint). @@ -385,16 +174,6 @@ model CreateCompletionResponse { /** A unique identifier for the completion. */ id: string; - /** The object type, which is always `text_completion`. */ - object: string; - - /** The Unix timestamp (in seconds) of when the completion was created. */ - @encode("unixTimestamp", int32) - created: utcDateTime; - - /** The model used for the completion. */ - `model`: string; - /** The list of completion choices the model generated for the input. */ choices: { index: safeint; @@ -416,5 +195,38 @@ model CreateCompletionResponse { finish_reason: "stop" | "length" | "content_filter"; }[]; + /** The Unix timestamp (in seconds) of when the completion was created. */ + @encode("unixTimestamp", int32) + created: utcDateTime; + + /** The model used for the completion. */ + `model`: string; + + /** + * This fingerprint represents the backend configuration that the model runs with. + * + * Can be used in conjunction with the `seed` request parameter to understand when backend changes + * have been made that might impact determinism. + */ + system_fingerprint?: string; + + /** The object type, which is always `text_completion`. */ + object: "text_completion"; + + /** Usage statistics for the completion request. */ usage?: CompletionUsage; } + +alias COMPLETION_MODELS = + | "gpt-3.5-turbo-instruct" + | "davinci-002" + | "babbage-002"; + +@oneOf +union Prompt { + string, + string[], + TokenArray, + TokenArrayArray, + null, +} \ No newline at end of file diff --git a/completions/operations.tsp b/completions/operations.tsp index d53245f7c..e84d3d44c 100644 --- a/completions/operations.tsp +++ b/completions/operations.tsp @@ -3,30 +3,18 @@ import "@typespec/openapi"; import "../common/errors.tsp"; import "./models.tsp"; -import "./chat-meta.tsp"; using TypeSpec.Http; using TypeSpec.OpenAPI; namespace OpenAI; -@route("/chat") -namespace Chat { - @route("/completions") - interface Completions { - @tag("OpenAI") - @post - @operationId("createChatCompletion") - createChatCompletion( - ...CreateChatCompletionRequest, - ): CreateChatCompletionResponse | ErrorResponse; - } -} @route("/completions") interface Completions { - @tag("OpenAI") @post @operationId("createCompletion") + @tag("OpenAI") + @summary("Creates a completion for the provided prompt and parameters.") createCompletion( ...CreateCompletionRequest, ): CreateCompletionResponse | ErrorResponse; diff --git a/edits/models.tsp b/edits/models.tsp deleted file mode 100644 index d76372649..000000000 --- a/edits/models.tsp +++ /dev/null @@ -1,69 +0,0 @@ -namespace OpenAI; -using TypeSpec.OpenAPI; - -model CreateEditRequest { - /** - * ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` - * model with this endpoint. - */ - @extension("x-oaiTypeLabel", "string") - `model`: string | "text-davinci-edit-001" | "code-davinci-edit-001"; - - /** The input text to use as a starting point for the edit. */ - input?: string | null = ""; - - /** The instruction that tells the model how to edit the prompt. */ - instruction: string; - - /** How many edits to generate for the input and instruction. */ - n?: EditN | null = 1; - - /** - * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output - * more random, while lower values like 0.2 will make it more focused and deterministic. - * - * We generally recommend altering this or `top_p` but not both. - */ - temperature?: Temperature | null = 1; - - /** - * An alternative to sampling with temperature, called nucleus sampling, where the model considers - * the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising - * the top 10% probability mass are considered. - * - * We generally recommend altering this or `temperature` but not both. - */ - top_p?: TopP | null = 1; -} - -#deprecated "deprecated" -model CreateEditResponse { - /** The object type, which is always `edit`. */ - object: "edit"; - - /** The Unix timestamp (in seconds) of when the edit was created. */ - @encode("unixTimestamp", int32) - created: utcDateTime; - - /** description: A list of edit choices. Can be more than one if `n` is greater than 1. */ - choices: { - /** The edited result. */ - text: string; - - /** The index of the choice in the list of choices. */ - index: safeint; - - /** - * The reason the model stopped generating tokens. This will be `stop` if the model hit a - * natural stop point or a provided stop sequence, or `length` if the maximum number of tokens - * specified in the request was reached. - */ - finish_reason: "stop" | "length"; - }[]; - - usage: CompletionUsage; -} - -@minValue(0) -@maxValue(20) -scalar EditN extends safeint; diff --git a/edits/operations.tsp b/edits/operations.tsp deleted file mode 100644 index 08497364e..000000000 --- a/edits/operations.tsp +++ /dev/null @@ -1,19 +0,0 @@ -import "@typespec/http"; -import "@typespec/openapi"; - -import "../common/errors.tsp"; -import "./models.tsp"; - -using TypeSpec.Http; -using TypeSpec.OpenAPI; - -namespace OpenAI; - -@route("/edits") -interface Edits { - #deprecated "deprecated" - @post - @tag("OpenAI") - @operationId("createEdit") - createEdit(@body edit: CreateEditRequest): CreateEditResponse | ErrorResponse; -} diff --git a/embeddings/models.tsp b/embeddings/models.tsp index ab46275b2..fa80cac4b 100644 --- a/embeddings/models.tsp +++ b/embeddings/models.tsp @@ -1,33 +1,57 @@ import "../common/models.tsp"; -namespace OpenAI; using TypeSpec.OpenAPI; -model CreateEmbeddingRequest { - /** ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. */ - @extension("x-oaiTypeLabel", "string") - `model`: string | "text-embedding-ada-002"; +namespace OpenAI; +model CreateEmbeddingRequest { /** * Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a * single request, pass an array of strings or array of token arrays. Each input must not exceed - * the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an empty string. + * the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an + * empty string. * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) * for counting tokens. */ + @extension("x-oaiExpandable", true) input: string | string[] | TokenArray | TokenArrayArray; + /** + * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + * see all of your available models, or see our [Model overview](/docs/models/overview) for + * descriptions of them. + */ + @extension("x-oaiTypeLabel", "string") + `model`: string | EMBEDDINGS_MODELS; + + /** + * The format to return the embeddings in. Can be either `float` or + * [`base64`](https://pypi.org/project/pybase64/). + */ + encoding_format?: "float" | "base64" | null = "float"; + + /** + * The number of dimensions the resulting output embeddings should have. Only supported in + * `text-embedding-3` and later models. + */ + dimensions?: safeint; + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and detect + * abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + */ user?: User; } -model CreateEmbeddingResponse { - /** The object type, which is always "embedding". */ - object: "embedding"; - - /** The name of the model used to generate the embedding. */ - `model`: string; +model CreateEmbeddingResponse { /** The list of embeddings generated by the model. */ data: Embedding[]; + + /** The name of the model used to generate the embedding. */ + `model`: string; + + /** The object type, which is always "embedding". */ + object: "list"; /** The usage information for the request. */ usage: { @@ -39,17 +63,40 @@ model CreateEmbeddingResponse { }; } +alias EMBEDDINGS_MODELS = + | "text-embedding-ada-002" + | "text-embedding-3-small" + | "text-embedding-3-large"; + /** Represents an embedding vector returned by embedding endpoint. */ +@extension( + "x-oaiMeta", + { + name: "The embedding object", + example: | """ + { + "object": "embedding", + "embedding": [ + 0.0023064255, + -0.009327292, + .... (1536 floats total for ada-002) + -0.0028842222, + ], + "index": 0 + } + """ + } +) model Embedding { /** The index of the embedding in the list of embeddings. */ index: safeint; - /** The object type, which is always "embedding". */ - object: "embedding"; - /** - * The embedding vector, which is a list of floats. The length of vector depends on the model as\ + * The embedding vector, which is a list of floats. The length of vector depends on the model as * listed in the [embedding guide](/docs/guides/embeddings). */ - embedding: float64[]; + embedding: float64[] | string; + + /** The object type, which is always "embedding". */ + object: "embedding"; } diff --git a/embeddings/operations.tsp b/embeddings/operations.tsp index 012d97c58..c53347e6f 100644 --- a/embeddings/operations.tsp +++ b/embeddings/operations.tsp @@ -11,10 +11,10 @@ namespace OpenAI; @route("/embeddings") interface Embeddings { - @tag("OpenAI") - @summary("Creates an embedding vector representing the input text.") @post @operationId("createEmbedding") + @tag("OpenAI") + @summary("Creates an embedding vector representing the input text.") createEmbedding( @body embedding: CreateEmbeddingRequest, ): CreateEmbeddingResponse | ErrorResponse; diff --git a/files/models.tsp b/files/models.tsp index 990c1ea11..72a263413 100644 --- a/files/models.tsp +++ b/files/models.tsp @@ -1,70 +1,91 @@ -namespace OpenAI; using TypeSpec.OpenAPI; -model ListFilesResponse { - object: string; // presumably this is always some constant, but not defined. - data: OpenAIFile[]; -} +namespace OpenAI; model CreateFileRequest { /** - * Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. - * - * If the `purpose` is set to "fine-tune", the file will be used for fine-tuning. + * The file object (not file name) to be uploaded. */ @encode("binary") file: bytes; /** - * The intended purpose of the uploaded documents. Use "fine-tune" for - * [fine-tuning](/docs/api-reference/fine-tuning). This allows us to validate the format of the - * uploaded file. + * The intended purpose of the uploaded file. Use "fine-tune" for + * [Fine-tuning](/docs/api-reference/fine-tuning) and "assistants" for + * [Assistants](/docs/api-reference/assistants) and [Messages](/docs/api-reference/messages). This + * allows us to validate the format of the uploaded file is correct for fine-tuning. */ - purpose: string; + purpose: "fine-tune" | "assistants"; +} + +model ListFilesResponse { + data: OpenAIFile[]; + object: "list"; +} + +model DeleteFileResponse { + id: string; + object: "file"; + deleted: boolean; } +alias FILE_PURPOSE = + | "fine-tune" + | "fine-tune-results" + | "assistants" + | "assistants_output"; + /** The `File` object represents a document that has been uploaded to OpenAI. */ +@extension( + "x-oaiMeta", + { + name: "The file object", + example: | """ + { + "id": "file-abc123", + "object": "file", + "bytes": 120000, + "created_at": 1677610602, + "filename": "salesOverview.pdf", + "purpose": "assistants", + } + """ + } +) model OpenAIFile { /** The file identifier, which can be referenced in the API endpoints. */ id: string; - /** The object type, which is always "file". */ - object: "file"; - - /** The size of the file in bytes. */ + /** The size of the file, in bytes. */ bytes: safeint; /** The Unix timestamp (in seconds) for when the file was created. */ @encode("unixTimestamp", int32) - createdAt: utcDateTime; + created_at: utcDateTime; /** The name of the file. */ filename: string; - /** The intended purpose of the file. Currently, only "fine-tune" is supported. */ - purpose: string; + /** The object type, which is always "file". */ + object: "file"; - /** - * The current status of the file, which can be either `uploaded`, `processed`, `pending`, - * `error`, `deleting` or `deleted`. + /** + * The intended purpose of the file. Supported values are `fine-tune`, `fine-tune-results`, + * `assistants`, and `assistants_output`. */ - status: - | "uploaded" - | "processed" - | "pending" - | "error" - | "deleting" - | "deleted"; + purpose: FILE_PURPOSE; /** - * Additional details about the status of the file. If the file is in the `error` state, this will - * include a message describing the error. + * Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or + * `error`. */ - status_details?: string | null; -} + #deprecated "deprecated" + status: "uploaded" | "processed" | "error"; -model DeleteFileResponse { - id: string; - object: string; - deleted: boolean; -} + /** + * Deprecated. For details on why a fine-tuning training file failed validation, see the `error` + * field on `fine_tuning.job`. + */ + #deprecated "deprecated" + status_details?: string; +} \ No newline at end of file diff --git a/files/operations.tsp b/files/operations.tsp index 2e601ae03..eec58470a 100644 --- a/files/operations.tsp +++ b/files/operations.tsp @@ -11,46 +11,58 @@ namespace OpenAI; @route("/files") interface Files { - @tag("OpenAI") - @get - @summary("Returns a list of files that belong to the user's organization.") - @operationId("listFiles") - listFiles(): ListFilesResponse | ErrorResponse; - - @tag("OpenAI") @post - @summary("Returns a list of files that belong to the user's organization.") @operationId("createFile") + @tag("OpenAI") + @summary(""" + Upload a file that can be used across various endpoints. The size of all the files uploaded by + one organization can be up to 100 GB. + + The size of individual files can be a maximum of 512 MB or 2 million tokens for Assistants. See + the [Assistants Tools guide](/docs/assistants/tools) to learn more about the types of files + supported. The Fine-tuning API only supports `.jsonl` files. + + Please [contact us](https://help.openai.com/) if you need to increase these storage limits. + """) createFile( @header contentType: "multipart/form-data", @body file: CreateFileRequest, ): OpenAIFile | ErrorResponse; + @get + @operationId("listFiles") @tag("OpenAI") - @post - @summary("Returns information about a specific file.") + @summary("Returns a list of files that belong to the user's organization.") + listFiles( + /** Only return files with the given purpose. */ + @query purpose?: FILE_PURPOSE, + ): ListFilesResponse | ErrorResponse; + + @route("{file_id}") + @get @operationId("retrieveFile") - @route("/files/{file_id}") + @tag("OpenAI") + @summary("Returns information about a specific file.") retrieveFile( /** The ID of the file to use for this request. */ @path file_id: string, ): OpenAIFile | ErrorResponse; - @tag("OpenAI") + @route("{file_id}") @delete - @summary("Delete a file") @operationId("deleteFile") - @route("/files/{file_id}") + @tag("OpenAI") + @summary("Delete a file") deleteFile( /** The ID of the file to use for this request. */ @path file_id: string, ): DeleteFileResponse | ErrorResponse; - @route("/files/{file_id}/content") - @tag("OpenAI") + @route("{file_id}/content") @get - @summary("Returns the contents of the specified file.") @operationId("downloadFile") + @tag("OpenAI") + @summary("Returns the contents of the specified file.") downloadFile( /** The ID of the file to use for this request. */ @path file_id: string, diff --git a/fine-tuning/models.tsp b/fine-tuning/models.tsp index bf846072b..9afce6d02 100644 --- a/fine-tuning/models.tsp +++ b/fine-tuning/models.tsp @@ -1,6 +1,9 @@ -namespace OpenAI; +import "../files/models.tsp"; + using TypeSpec.OpenAPI; +namespace OpenAI; + model FineTuningJob { /** The object identifier, which can be referenced in the API endpoints. */ id: string; diff --git a/fine-tuning/operations.tsp b/fine-tuning/operations.tsp index 15491f62e..258382397 100644 --- a/fine-tuning/operations.tsp +++ b/fine-tuning/operations.tsp @@ -151,41 +151,4 @@ interface FineTunes { /** The ID of the fine-tune job to cancel */ @path fine_tune_id: string, ): FineTune | ErrorResponse; -} - -@route("/models") -interface Models { - @get - @tag("OpenAI") - @summary(""" - Lists the currently available models, and provides basic information about each one such as the - owner and availability. - """) - @operationId("listModels") - listModels(): ListModelsResponse | ErrorResponse; - - @get - @route("{model}") - @operationId("retrieveModel") - @tag("OpenAI") - @summary(""" - Retrieves a model instance, providing basic information about the model such as the owner and - permissioning. - """) - retrieve( - /** The ID of the model to use for this request. */ - @path `model`: string, - ): Model | ErrorResponse; - - @delete - @route("{model}") - @operationId("deleteModel") - @tag("OpenAI") - @summary(""" - Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. - """) - delete( - /** The model to delete */ - @path `model`: string, - ): DeleteModelResponse | ErrorResponse; -} +} \ No newline at end of file diff --git a/images/models.tsp b/images/models.tsp index 3d7020b51..42f14f067 100644 --- a/images/models.tsp +++ b/images/models.tsp @@ -1,51 +1,58 @@ import "../common/models.tsp"; -namespace OpenAI; using TypeSpec.OpenAPI; -alias SharedImageProperties = { - /** The number of images to generate. Must be between 1 and 10. */ - n?: ImagesN | null = 1; - - /** The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. */ - size?: IMAGE_SIZES | null = "1024x1024"; - - /** The format in which the generated images are returned. Must be one of `url` or `b64_json`. */ - response_format?: "url" | "b64_json" | null = "url"; - - user?: User; -}; +namespace OpenAI; model CreateImageRequest { - /** A text description of the desired image(s). The maximum length is 1000 characters. */ + /** + * A text description of the desired image(s). The maximum length is 1000 characters for + * `dall-e-2` and 4000 characters for `dall-e-3`. + */ prompt: string; - ...SharedImageProperties; -} + /** The model to use for image generation. */ + @extension("x-oaiTypeLabel", "string") + `model`?: string | "dall-e-2" | "dall-e-3" = "dall-e-2"; -model ImagesResponse { - @encode("unixTimestamp", int32) - created: utcDateTime; + /** + * The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is + * supported. + */ + n?: ImagesN | null = 1; - data: Image[]; -} + /** + * The quality of the image that will be generated. `hd` creates images with finer details and + * greater consistency across the image. This param is only supported for `dall-e-3`. + * + * TODO: Confirm that this is actually nullable. + */ + quality?: "standard" | "hd" | null = "standard"; -alias IMAGE_SIZES = "256x256" | "512x512" | "1024x1024"; + /** The format in which the generated images are returned. Must be one of `url` or `b64_json`. */ + response_format?: "url" | "b64_json" | null = "url"; -/** Represents the url or the content of an image generated by the OpenAI API. */ -model Image { - /** The URL of the generated image, if `response_format` is `url` (default). */ - url?: url; + /** + * The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for + * `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. + */ + size?: "256x256" | "512x512" | "1024x1024" | "1792x1024" | "1024x1792" | null = "1024x1024"; - /** The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. */ - @encode("base64", string) - b64_json?: bytes; + /** + * The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes the model + * to lean towards generating hyper-real and dramatic images. Natural causes the model to produce + * more natural, less hyper-real looking images. This param is only supported for `dall-e-3`. + */ + style?: "vivid" | "natural" | null = "vivid"; + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and detect + * abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + */ + user?: User; } model CreateImageEditRequest { - /** A text description of the desired image(s). The maximum length is 1000 characters. */ - prompt: string; - /** * The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not * provided, image must have transparency, which will be used as the mask. @@ -53,6 +60,9 @@ model CreateImageEditRequest { @encode("binary") image: bytes; + /** A text description of the desired image(s). The maximum length is 1000 characters. */ + prompt: string; + /** * An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where * `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions @@ -61,7 +71,26 @@ model CreateImageEditRequest { @encode("binary") mask?: bytes; - ...SharedImageProperties; + /** The model to use for image generation. Only `dall-e-2` is supported at this time. */ + @extension("x-oaiTypeLabel", "string") + `model`?: string | "dall-e-2" = "dall-e-2"; + + /** + * The number of images to generate. Must be between 1 and 10. + */ + n?: ImagesN | null = 1; + + /** The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. */ + size?: "256x256" | "512x512" | "1024x1024" | null = "1024x1024"; + + /** The format in which the generated images are returned. Must be one of `url` or `b64_json`. */ + response_format?: "url" | "b64_json" | null = "url"; + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and detect + * abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + */ + user?: User; } model CreateImageVariationRequest { @@ -72,9 +101,61 @@ model CreateImageVariationRequest { @encode("binary") image: bytes; - ...SharedImageProperties; + /** The model to use for image generation. Only `dall-e-2` is supported at this time. */ + @extension("x-oaiTypeLabel", "string") + `model`?: string | "dall-e-2" = "dall-e-2"; + + /** + * The number of images to generate. Must be between 1 and 10. + */ + n?: ImagesN | null = 1; + + /** The format in which the generated images are returned. Must be one of `url` or `b64_json`. */ + response_format?: "url" | "b64_json" | null = "url"; + + /** The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. */ + size?: "256x256" | "512x512" | "1024x1024" | null = "1024x1024"; + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and detect + * abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + */ + user?: User; +} + +model ImagesResponse { + @encode("unixTimestamp", int32) + created: utcDateTime; + + data: Image[]; } @minValue(1) @maxValue(10) scalar ImagesN extends safeint; + +/** Represents the url or the content of an image generated by the OpenAI API. */ +@extension( + "x-oaiMeta", + { + name: "The image object", + example: | """ + { + "url": "...", + "revised_prompt": "..." + } + """ + } +) +model Image { + /** The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. */ + @encode("base64", string) + b64_json?: bytes; + + /** The URL of the generated image, if `response_format` is `url` (default). */ + url?: url; + + /** The prompt that was used to generate the image, if there was any revision to the prompt. */ + revised_prompt?: string; +} + diff --git a/images/operations.tsp b/images/operations.tsp index 09203262b..59928894f 100644 --- a/images/operations.tsp +++ b/images/operations.tsp @@ -16,7 +16,9 @@ interface Images { @operationId("createImage") @tag("OpenAI") @summary("Creates an image given a prompt") - createImage(@body image: CreateImageRequest): ImagesResponse | ErrorResponse; + createImage( + @body image: CreateImageRequest + ): ImagesResponse | ErrorResponse; @route("edits") @post diff --git a/main.tsp b/main.tsp index 2ea8cbbc3..fe7b08d3a 100644 --- a/main.tsp +++ b/main.tsp @@ -3,13 +3,14 @@ import "@typespec/openapi3"; import "@typespec/openapi"; import "./audio"; +import "./chat"; import "./completions"; -import "./edits"; import "./embeddings"; import "./files"; import "./fine-tuning"; import "./images"; -import "./moderation"; +import "./models"; +import "./moderations"; using TypeSpec.Http; diff --git a/models/main.tsp b/models/main.tsp new file mode 100644 index 000000000..6a754bcb5 --- /dev/null +++ b/models/main.tsp @@ -0,0 +1 @@ +import "./operations.tsp"; \ No newline at end of file diff --git a/models/models.tsp b/models/models.tsp new file mode 100644 index 000000000..68774d259 --- /dev/null +++ b/models/models.tsp @@ -0,0 +1,38 @@ +using TypeSpec.OpenAPI; + +namespace OpenAI; + +model ListModelsResponse { + object: "list"; + data: Model[]; +} + +model DeleteModelResponse { + id: string; + deleted: boolean; + object: "model"; +} + +/** Describes an OpenAI model offering that can be used with the API. */ +// TODO: Fill in example here. +@extension( + "x-oaiMeta", + { + name: "The model object", + example: "*retrieve_model_response" + } +) +model Model { + /** The model identifier, which can be referenced in the API endpoints. */ + id: string; + + /** The Unix timestamp (in seconds) when the model was created. */ + @encode("unixTimestamp", int32) + created: utcDateTime; + + /** The object type, which is always "model". */ + object: "model"; + + /** The organization that owns the model. */ + owned_by: string; +} \ No newline at end of file diff --git a/models/operations.tsp b/models/operations.tsp new file mode 100644 index 000000000..f017e1585 --- /dev/null +++ b/models/operations.tsp @@ -0,0 +1,47 @@ +import "@typespec/http"; +import "@typespec/openapi"; + +import "../common/errors.tsp"; +import "./models.tsp"; + +using TypeSpec.Http; +using TypeSpec.OpenAPI; + +namespace OpenAI; + +@route("/models") +interface Models { + @get + @operationId("listModels") + @tag("OpenAI") + @summary(""" + Lists the currently available models, and provides basic information about each one such as the + owner and availability. + """) + listModels(): ListModelsResponse | ErrorResponse; + + @route("{model}") + @get + @operationId("retrieveModel") + @tag("OpenAI") + @summary(""" + Retrieves a model instance, providing basic information about the model such as the owner and + permissioning. + """) + retrieve( + /** The ID of the model to use for this request. */ + @path `model`: string, + ): Model | ErrorResponse; + + @route("{model}") + @delete + @operationId("deleteModel") + @tag("OpenAI") + @summary(""" + Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. + """) + delete( + /** The model to delete */ + @path `model`: string, + ): DeleteModelResponse | ErrorResponse; +} diff --git a/moderation/main.tsp b/moderations/main.tsp similarity index 100% rename from moderation/main.tsp rename to moderations/main.tsp diff --git a/moderation/models.tsp b/moderations/models.tsp similarity index 89% rename from moderation/models.tsp rename to moderations/models.tsp index f47b21be1..a572a6f2e 100644 --- a/moderation/models.tsp +++ b/moderations/models.tsp @@ -1,6 +1,7 @@ -namespace OpenAI; using TypeSpec.OpenAPI; +namespace OpenAI; + model CreateModerationRequest { /** The input text to classify */ input: string | string[]; @@ -13,9 +14,20 @@ model CreateModerationRequest { * of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. */ @extension("x-oaiTypeLabel", "string") - `model`?: string | "text-moderation-latest" | "text-moderation-stable" = "text-moderation-latest"; + `model`?: string | MODERATION_MODELS = "text-moderation-latest"; } +/** + * Represents policy compliance report by OpenAI's content moderation model against a given input. + */ +// TODO: Fill in example here. +@extension( + "x-oaiMeta", + { + name: "The moderation object", + example: "*moderation_example" + } +) model CreateModerationResponse { /** The unique identifier for the moderation request. */ id: string; @@ -66,7 +78,7 @@ model CreateModerationResponse { * Content that encourages performing acts of self-harm, such as suicide, cutting, and eating * disorders, or that gives instructions or advice on how to commit such acts. */ - `self-harm/instructive`: boolean; + `self-harm/instructions`: boolean; /** * Content meant to arouse sexual excitement, such as the description of sexual activity, or @@ -105,7 +117,7 @@ model CreateModerationResponse { `self-harm/intent`: float64; /** The score for the category 'self-harm/instructive'. */ - `self-harm/instructive`: float64; + `self-harm/instructions`: float64; /** The score for the category 'sexual'. */ sexual: float64; @@ -121,3 +133,7 @@ model CreateModerationResponse { }; }[]; } + +alias MODERATION_MODELS = + | "text-moderation-latest" + | "text-moderation-stable"; \ No newline at end of file diff --git a/moderation/operations.tsp b/moderations/operations.tsp similarity index 98% rename from moderation/operations.tsp rename to moderations/operations.tsp index 5f29bc3be..8efad4cdb 100644 --- a/moderation/operations.tsp +++ b/moderations/operations.tsp @@ -11,6 +11,7 @@ namespace OpenAI; @route("/moderations") interface Moderations { + @post @operationId("createModeration") @tag("OpenAI") @summary("Classifies if text violates OpenAI's Content Policy") diff --git a/openapi.yaml b/openapi.yaml index 011ccf375..a6e16ee12 100644 --- a/openapi.yaml +++ b/openapi.yaml @@ -13,8 +13,26 @@ info: servers: - url: https://api.openai.com/v1 tags: - - name: OpenAI - description: The OpenAI REST API + - name: Assistants + description: Build Assistants that can call models and use tools. + - name: Audio + description: Learn how to turn audio into text or text into audio. + - name: Chat + description: Given a list of messages comprising a conversation, the model will return a response. + - name: Completions + description: Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position. + - name: Embeddings + description: Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms. + - name: Fine-tuning + description: Manage fine-tuning jobs to tailor a model to your specific training data. + - name: Files + description: Files are used to upload documents that can be used with features like Assistants and Fine-tuning. + - name: Images + description: Given a prompt and/or an input image, the model will generate a new image. + - name: Models + description: List and describe the various models available in the API. + - name: Moderations + description: Given a input text, outputs if the model classifies it as violating OpenAI's content policy. paths: # Note: When adding an endpoint, make sure you also add it in the `groups` section, in the end of this file, # under the appropriate group @@ -22,7 +40,7 @@ paths: post: operationId: createChatCompletion tags: - - OpenAI + - Chat summary: Creates a model response for the given chat conversation. requestBody: required: true @@ -45,7 +63,7 @@ paths: Returns a [chat completion](/docs/api-reference/chat/object) object, or a streamed sequence of [chat completion chunk](/docs/api-reference/chat/streaming) objects if the request is streamed. path: create examples: - - title: No Streaming + - title: Default request: curl: | curl https://api.openai.com/v1/chat/completions \ @@ -65,11 +83,10 @@ paths: ] }' python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") + from openai import OpenAI + client = OpenAI() - completion = openai.ChatCompletion.create( + completion = client.chat.completions.create( model="VAR_model_id", messages=[ {"role": "system", "content": "You are a helpful assistant."}, @@ -99,12 +116,111 @@ paths: "object": "chat.completion", "created": 1677652288, "model": "gpt-3.5-turbo-0613", + "system_fingerprint": "fp_44709d6fcb", "choices": [{ "index": 0, "message": { "role": "assistant", "content": "\n\nHello there, how may I assist you today?", }, + "logprobs": null, + "finish_reason": "stop" + }], + "usage": { + "prompt_tokens": 9, + "completion_tokens": 12, + "total_tokens": 21 + } + } + - title: Image input + request: + curl: | + curl https://api.openai.com/v1/chat/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "gpt-4-vision-preview", + "messages": [ + { + "role": "user", + "content": [ + { + "type": "text", + "text": "What’s in this image?" + }, + { + "type": "image_url", + "image_url": { + "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" + } + } + ] + } + ], + "max_tokens": 300 + }' + python: | + from openai import OpenAI + + client = OpenAI() + + response = client.chat.completions.create( + model="gpt-4-vision-preview", + messages=[ + { + "role": "user", + "content": [ + {"type": "text", "text": "What’s in this image?"}, + { + "type": "image_url", + "image_url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", + }, + ], + } + ], + max_tokens=300, + ) + + print(response.choices[0]) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const response = await openai.chat.completions.create({ + model: "gpt-4-vision-preview", + messages: [ + { + role: "user", + content: [ + { type: "text", text: "What’s in this image?" }, + { + type: "image_url", + image_url: + "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", + }, + ], + }, + ], + }); + console.log(response.choices[0]); + } + main(); + response: &chat_completion_image_example | + { + "id": "chatcmpl-123", + "object": "chat.completion", + "created": 1677652288, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": "fp_44709d6fcb", + "choices": [{ + "index": 0, + "message": { + "role": "assistant", + "content": "\n\nHello there, how may I assist you today?", + }, + "logprobs": null, "finish_reason": "stop" }], "usage": { @@ -134,11 +250,10 @@ paths: "stream": true }' python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") + from openai import OpenAI + client = OpenAI() - completion = openai.ChatCompletion.create( + completion = client.chat.completions.create( model="VAR_model_id", messages=[ {"role": "system", "content": "You are a helpful assistant."}, @@ -172,24 +287,403 @@ paths: main(); response: &chat_completion_chunk_example | + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0613", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]} + + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0613", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"content":"Hello"},"logprobs":null,"finish_reason":null}]} + + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0613", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"content":"!"},"logprobs":null,"finish_reason":null}]} + + .... + + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0613", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"content":" today"},"logprobs":null,"finish_reason":null}]} + + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0613", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"content":"?"},"logprobs":null,"finish_reason":null}]} + + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0613", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]} + - title: Functions + request: + curl: | + curl https://api.openai.com/v1/chat/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "gpt-3.5-turbo", + "messages": [ + { + "role": "user", + "content": "What is the weather like in Boston?" + } + ], + "tools": [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA" + }, + "unit": { + "type": "string", + "enum": ["celsius", "fahrenheit"] + } + }, + "required": ["location"] + } + } + } + ], + "tool_choice": "auto" + }' + python: | + from openai import OpenAI + client = OpenAI() + + tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + } + } + ] + messages = [{"role": "user", "content": "What's the weather like in Boston today?"}] + completion = client.chat.completions.create( + model="VAR_model_id", + messages=messages, + tools=tools, + tool_choice="auto" + ) + + print(completion) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const messages = [{"role": "user", "content": "What's the weather like in Boston today?"}]; + const tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + } + } + ]; + + const response = await openai.chat.completions.create({ + model: "gpt-3.5-turbo", + messages: messages, + tools: tools, + tool_choice: "auto", + }); + + console.log(response); + } + + main(); + response: &chat_completion_function_example | + { + "id": "chatcmpl-abc123", + "object": "chat.completion", + "created": 1699896916, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": null, + "tool_calls": [ + { + "id": "call_abc123", + "type": "function", + "function": { + "name": "get_current_weather", + "arguments": "{\n\"location\": \"Boston, MA\"\n}" + } + } + ] + }, + "logprobs": null, + "finish_reason": "tool_calls" + } + ], + "usage": { + "prompt_tokens": 82, + "completion_tokens": 17, + "total_tokens": 99 + } + } + - title: Logprobs + request: + curl: | + curl https://api.openai.com/v1/chat/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "VAR_model_id", + "messages": [ + { + "role": "user", + "content": "Hello!" + } + ], + "logprobs": true, + "top_logprobs": 2 + }' + python: | + from openai import OpenAI + client = OpenAI() + + completion = client.chat.completions.create( + model="VAR_model_id", + messages=[ + {"role": "user", "content": "Hello!"} + ], + logprobs=True, + top_logprobs=2 + ) + + print(completion.choices[0].message) + print(completion.choices[0].logprobs) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const completion = await openai.chat.completions.create({ + messages: [{ role: "user", content: "Hello!" }], + model: "VAR_model_id", + logprobs: true, + top_logprobs: 2, + }); + + console.log(completion.choices[0]); + } + + main(); + response: | { "id": "chatcmpl-123", - "object": "chat.completion.chunk", - "created": 1677652288, - "model": "gpt-3.5-turbo", - "choices": [{ - "index": 0, - "delta": { - "content": "Hello", - }, - "finish_reason": "stop" - }] + "object": "chat.completion", + "created": 1702685778, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "Hello! How can I assist you today?" + }, + "logprobs": { + "content": [ + { + "token": "Hello", + "logprob": -0.31725305, + "bytes": [72, 101, 108, 108, 111], + "top_logprobs": [ + { + "token": "Hello", + "logprob": -0.31725305, + "bytes": [72, 101, 108, 108, 111] + }, + { + "token": "Hi", + "logprob": -1.3190403, + "bytes": [72, 105] + } + ] + }, + { + "token": "!", + "logprob": -0.02380986, + "bytes": [ + 33 + ], + "top_logprobs": [ + { + "token": "!", + "logprob": -0.02380986, + "bytes": [33] + }, + { + "token": " there", + "logprob": -3.787621, + "bytes": [32, 116, 104, 101, 114, 101] + } + ] + }, + { + "token": " How", + "logprob": -0.000054669687, + "bytes": [32, 72, 111, 119], + "top_logprobs": [ + { + "token": " How", + "logprob": -0.000054669687, + "bytes": [32, 72, 111, 119] + }, + { + "token": "<|end|>", + "logprob": -10.953937, + "bytes": null + } + ] + }, + { + "token": " can", + "logprob": -0.015801601, + "bytes": [32, 99, 97, 110], + "top_logprobs": [ + { + "token": " can", + "logprob": -0.015801601, + "bytes": [32, 99, 97, 110] + }, + { + "token": " may", + "logprob": -4.161023, + "bytes": [32, 109, 97, 121] + } + ] + }, + { + "token": " I", + "logprob": -3.7697225e-6, + "bytes": [ + 32, + 73 + ], + "top_logprobs": [ + { + "token": " I", + "logprob": -3.7697225e-6, + "bytes": [32, 73] + }, + { + "token": " assist", + "logprob": -13.596657, + "bytes": [32, 97, 115, 115, 105, 115, 116] + } + ] + }, + { + "token": " assist", + "logprob": -0.04571125, + "bytes": [32, 97, 115, 115, 105, 115, 116], + "top_logprobs": [ + { + "token": " assist", + "logprob": -0.04571125, + "bytes": [32, 97, 115, 115, 105, 115, 116] + }, + { + "token": " help", + "logprob": -3.1089056, + "bytes": [32, 104, 101, 108, 112] + } + ] + }, + { + "token": " you", + "logprob": -5.4385737e-6, + "bytes": [32, 121, 111, 117], + "top_logprobs": [ + { + "token": " you", + "logprob": -5.4385737e-6, + "bytes": [32, 121, 111, 117] + }, + { + "token": " today", + "logprob": -12.807695, + "bytes": [32, 116, 111, 100, 97, 121] + } + ] + }, + { + "token": " today", + "logprob": -0.0040071653, + "bytes": [32, 116, 111, 100, 97, 121], + "top_logprobs": [ + { + "token": " today", + "logprob": -0.0040071653, + "bytes": [32, 116, 111, 100, 97, 121] + }, + { + "token": "?", + "logprob": -5.5247097, + "bytes": [63] + } + ] + }, + { + "token": "?", + "logprob": -0.0008108172, + "bytes": [63], + "top_logprobs": [ + { + "token": "?", + "logprob": -0.0008108172, + "bytes": [63] + }, + { + "token": "?\n", + "logprob": -7.184561, + "bytes": [63, 10] + } + ] + } + ] + }, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 9, + "completion_tokens": 9, + "total_tokens": 18 + }, + "system_fingerprint": null } + /completions: post: operationId: createCompletion tags: - - OpenAI + - Completions summary: Creates a completion for the provided prompt and parameters. requestBody: required: true @@ -206,6 +700,7 @@ paths: $ref: "#/components/schemas/CreateCompletionResponse" x-oaiMeta: name: Create completion + group: completions returns: | Returns a [completion](/docs/api-reference/completions/object) object, or a sequence of completion objects if the request is streamed. legacy: true @@ -223,10 +718,10 @@ paths: "temperature": 0 }' python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.Completion.create( + from openai import OpenAI + client = OpenAI() + + client.completions.create( model="VAR_model_id", prompt="Say this is a test", max_tokens=7, @@ -254,6 +749,7 @@ paths: "object": "text_completion", "created": 1589478378, "model": "VAR_model_id", + "system_fingerprint": "fp_44709d6fcb", "choices": [ { "text": "\n\nThis is indeed a test", @@ -282,17 +778,17 @@ paths: "stream": true }' python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - for chunk in openai.Completion.create( + from openai import OpenAI + client = OpenAI() + + for chunk in client.completions.create( model="VAR_model_id", prompt="Say this is a test", max_tokens=7, temperature=0, stream=True ): - print(chunk['choices'][0]['text']) + print(chunk.choices[0].text) node.js: |- import OpenAI from "openai"; @@ -324,90 +820,14 @@ paths: } ], "model": "gpt-3.5-turbo-instruct" + "system_fingerprint": "fp_44709d6fcb", } - /edits: - post: - operationId: createEdit - deprecated: true - tags: - - OpenAI - summary: Creates a new edit for the provided input, instruction, and parameters. - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/CreateEditRequest" - responses: - "200": - description: OK - content: - application/json: - schema: - $ref: "#/components/schemas/CreateEditResponse" - x-oaiMeta: - name: Create edit - returns: | - Returns an [edit](/docs/api-reference/edits/object) object. - group: edits - examples: - request: - curl: | - curl https://api.openai.com/v1/edits \ - -H "Content-Type: application/json" \ - -H "Authorization: Bearer $OPENAI_API_KEY" \ - -d '{ - "model": "VAR_model_id", - "input": "What day of the wek is it?", - "instruction": "Fix the spelling mistakes" - }' - python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.Edit.create( - model="VAR_model_id", - input="What day of the wek is it?", - instruction="Fix the spelling mistakes" - ) - node.js: |- - import OpenAI from "openai"; - - const openai = new OpenAI(); - - async function main() { - const edit = await openai.edits.create({ - model: "VAR_model_id", - input: "What day of the wek is it?", - instruction: "Fix the spelling mistakes.", - }); - - console.log(edit); - } - - main(); - response: &edit_example | - { - "object": "edit", - "created": 1589478378, - "choices": [ - { - "text": "What day of the week is it?", - "index": 0, - } - ], - "usage": { - "prompt_tokens": 25, - "completion_tokens": 32, - "total_tokens": 57 - } - } /images/generations: post: operationId: createImage tags: - - OpenAI + - Images summary: Creates an image given a prompt. requestBody: required: true @@ -424,6 +844,7 @@ paths: $ref: "#/components/schemas/ImagesResponse" x-oaiMeta: name: Create image + group: images returns: Returns a list of [image](/docs/api-reference/images/object) objects. examples: request: @@ -432,17 +853,19 @@ paths: -H "Content-Type: application/json" \ -H "Authorization: Bearer $OPENAI_API_KEY" \ -d '{ + "model": "dall-e-3", "prompt": "A cute baby sea otter", - "n": 2, + "n": 1, "size": "1024x1024" }' python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.Image.create( + from openai import OpenAI + client = OpenAI() + + client.images.generate( + model="dall-e-3", prompt="A cute baby sea otter", - n=2, + n=1, size="1024x1024" ) node.js: |- @@ -451,7 +874,7 @@ paths: const openai = new OpenAI(); async function main() { - const image = await openai.images.generate({ prompt: "A cute baby sea otter" }); + const image = await openai.images.generate({ model: "dall-e-3", prompt: "A cute baby sea otter" }); console.log(image.data); } @@ -468,12 +891,11 @@ paths: } ] } - /images/edits: post: operationId: createImageEdit tags: - - OpenAI + - Images summary: Creates an edited or extended image given an original image and a prompt. requestBody: required: true @@ -490,6 +912,7 @@ paths: $ref: "#/components/schemas/ImagesResponse" x-oaiMeta: name: Create image edit + group: images returns: Returns a list of [image](/docs/api-reference/images/object) objects. examples: request: @@ -502,10 +925,10 @@ paths: -F n=2 \ -F size="1024x1024" python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.Image.create_edit( + from openai import OpenAI + client = OpenAI() + + client.images.edit( image=open("otter.png", "rb"), mask=open("mask.png", "rb"), prompt="A cute baby sea otter wearing a beret", @@ -540,12 +963,11 @@ paths: } ] } - /images/variations: post: operationId: createImageVariation tags: - - OpenAI + - Images summary: Creates a variation of a given image. requestBody: required: true @@ -562,6 +984,7 @@ paths: $ref: "#/components/schemas/ImagesResponse" x-oaiMeta: name: Create image variation + group: images returns: Returns a list of [image](/docs/api-reference/images/object) objects. examples: request: @@ -572,11 +995,11 @@ paths: -F n=2 \ -F size="1024x1024" python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.Image.create_variation( - image=open("otter.png", "rb"), + from openai import OpenAI + client = OpenAI() + + response = client.images.create_variation( + image=open("image_edit_original.png", "rb"), n=2, size="1024x1024" ) @@ -611,7 +1034,7 @@ paths: post: operationId: createEmbedding tags: - - OpenAI + - Embeddings summary: Creates an embedding vector representing the input text. requestBody: required: true @@ -628,6 +1051,7 @@ paths: $ref: "#/components/schemas/CreateEmbeddingResponse" x-oaiMeta: name: Create embeddings + group: embeddings returns: A list of [embedding](/docs/api-reference/embeddings/object) objects. examples: request: @@ -637,15 +1061,17 @@ paths: -H "Content-Type: application/json" \ -d '{ "input": "The food was delicious and the waiter...", - "model": "text-embedding-ada-002" + "model": "text-embedding-ada-002", + "encoding_format": "float" }' python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.Embedding.create( + from openai import OpenAI + client = OpenAI() + + client.embeddings.create( model="text-embedding-ada-002", - input="The food was delicious and the waiter..." + input="The food was delicious and the waiter...", + encoding_format="float" ) node.js: |- import OpenAI from "openai"; @@ -656,6 +1082,7 @@ paths: const embedding = await openai.embeddings.create({ model: "text-embedding-ada-002", input: "The quick brown fox jumped over the lazy dog", + encoding_format: "float", }); console.log(embedding); @@ -684,74 +1111,149 @@ paths: } } - /audio/transcriptions: + /audio/speech: post: - operationId: createTranscription + operationId: createSpeech tags: - - OpenAI - summary: Transcribes audio into the input language. + - Audio + summary: Generates audio from the input text. requestBody: required: true content: - multipart/form-data: + application/json: schema: - $ref: "#/components/schemas/CreateTranscriptionRequest" + $ref: "#/components/schemas/CreateSpeechRequest" responses: "200": description: OK + headers: + Transfer-Encoding: + schema: + type: string + description: chunked content: - application/json: + application/octet-stream: schema: - $ref: "#/components/schemas/CreateTranscriptionResponse" + type: string + format: binary x-oaiMeta: - name: Create transcription - returns: The transcriped text. + name: Create speech + group: audio + returns: The audio file content. examples: request: curl: | - curl https://api.openai.com/v1/audio/transcriptions \ + curl https://api.openai.com/v1/audio/speech \ -H "Authorization: Bearer $OPENAI_API_KEY" \ - -H "Content-Type: multipart/form-data" \ - -F file="@/path/to/file/audio.mp3" \ - -F model="whisper-1" + -H "Content-Type: application/json" \ + -d '{ + "model": "tts-1", + "input": "The quick brown fox jumped over the lazy dog.", + "voice": "alloy" + }' \ + --output speech.mp3 python: | - import os + from pathlib import Path import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - audio_file = open("audio.mp3", "rb") - transcript = openai.Audio.transcribe("whisper-1", audio_file) - node: |- + + speech_file_path = Path(__file__).parent / "speech.mp3" + response = openai.audio.speech.create( + model="tts-1", + voice="alloy", + input="The quick brown fox jumped over the lazy dog." + ) + response.stream_to_file(speech_file_path) + node: | import fs from "fs"; + import path from "path"; import OpenAI from "openai"; const openai = new OpenAI(); + const speechFile = path.resolve("./speech.mp3"); + async function main() { - const transcription = await openai.audio.transcriptions.create({ - file: fs.createReadStream("audio.mp3"), - model: "whisper-1", + const mp3 = await openai.audio.speech.create({ + model: "tts-1", + voice: "alloy", + input: "Today is a wonderful day to build something people love!", }); - - console.log(transcription.text); + console.log(speechFile); + const buffer = Buffer.from(await mp3.arrayBuffer()); + await fs.promises.writeFile(speechFile, buffer); } main(); - response: | - { - "text": "Imagine the wildest idea that you've ever had, and you're curious about how it might scale to something that's a 100, a 1,000 times bigger. This is a place where you can get to do that." - } - - /audio/translations: + /audio/transcriptions: post: - operationId: createTranslation + operationId: createTranscription tags: - - OpenAI - summary: Translates audio into English. + - Audio + summary: Transcribes audio into the input language. requestBody: required: true content: multipart/form-data: schema: - $ref: "#/components/schemas/CreateTranslationRequest" + $ref: "#/components/schemas/CreateTranscriptionRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/CreateTranscriptionResponse" + x-oaiMeta: + name: Create transcription + group: audio + returns: The transcribed text. + examples: + request: + curl: | + curl https://api.openai.com/v1/audio/transcriptions \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: multipart/form-data" \ + -F file="@/path/to/file/audio.mp3" \ + -F model="whisper-1" + python: | + from openai import OpenAI + client = OpenAI() + + audio_file = open("speech.mp3", "rb") + transcript = client.audio.transcriptions.create( + model="whisper-1", + file=audio_file + ) + node: | + import fs from "fs"; + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const transcription = await openai.audio.transcriptions.create({ + file: fs.createReadStream("audio.mp3"), + model: "whisper-1", + }); + + console.log(transcription.text); + } + main(); + response: | + { + "text": "Imagine the wildest idea that you've ever had, and you're curious about how it might scale to something that's a 100, a 1,000 times bigger. This is a place where you can get to do that." + } + /audio/translations: + post: + operationId: createTranslation + tags: + - Audio + summary: Translates audio into English. + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/CreateTranslationRequest" responses: "200": description: OK @@ -761,6 +1263,7 @@ paths: $ref: "#/components/schemas/CreateTranslationResponse" x-oaiMeta: name: Create translation + group: audio returns: The translated text. examples: request: @@ -771,21 +1274,29 @@ paths: -F file="@/path/to/file/german.m4a" \ -F model="whisper-1" python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - audio_file = open("german.m4a", "rb") - transcript = openai.Audio.translate("whisper-1", audio_file) + from openai import OpenAI + client = OpenAI() + + audio_file = open("speech.mp3", "rb") + transcript = client.audio.translations.create( + model="whisper-1", + file=audio_file + ) node: | - const { Configuration, OpenAIApi } = require("openai"); - const configuration = new Configuration({ - apiKey: process.env.OPENAI_API_KEY, - }); - const openai = new OpenAIApi(configuration); - const resp = await openai.createTranslation( - fs.createReadStream("audio.mp3"), - "whisper-1" - ); + import fs from "fs"; + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const translation = await openai.audio.translations.create({ + file: fs.createReadStream("speech.mp3"), + model: "whisper-1", + }); + + console.log(translation.text); + } + main(); response: | { "text": "Hello, my name is Wolfgang and I come from Germany. Where are you heading today?" @@ -795,8 +1306,15 @@ paths: get: operationId: listFiles tags: - - OpenAI + - Files summary: Returns a list of files that belong to the user's organization. + parameters: + - in: query + name: purpose + required: false + schema: + type: string + description: Only return files with the given purpose. responses: "200": description: OK @@ -806,17 +1324,18 @@ paths: $ref: "#/components/schemas/ListFilesResponse" x-oaiMeta: name: List files - returns: A list of [file](/docs/api-reference/files/object) objects. + group: files + returns: A list of [File](/docs/api-reference/files/object) objects. examples: request: curl: | curl https://api.openai.com/v1/files \ -H "Authorization: Bearer $OPENAI_API_KEY" python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.File.list() + from openai import OpenAI + client = OpenAI() + + client.files.list() node.js: |- import OpenAI from "openai"; @@ -839,8 +1358,8 @@ paths: "object": "file", "bytes": 175, "created_at": 1613677385, - "filename": "train.jsonl", - "purpose": "search" + "filename": "salesOverview.pdf", + "purpose": "assistants", }, { "id": "file-abc123", @@ -848,7 +1367,7 @@ paths: "bytes": 140, "created_at": 1613779121, "filename": "puppy.jsonl", - "purpose": "search" + "purpose": "fine-tune", } ], "object": "list" @@ -856,10 +1375,13 @@ paths: post: operationId: createFile tags: - - OpenAI + - Files summary: | - Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit. + Upload a file that can be used across various endpoints. The size of all the files uploaded by one organization can be up to 100 GB. + + The size of individual files can be a maximum of 512 MB or 2 million tokens for Assistants. See the [Assistants Tools guide](/docs/assistants/tools) to learn more about the types of files supported. The Fine-tuning API only supports `.jsonl` files. + Please [contact us](https://help.openai.com/) if you need to increase these storage limits. requestBody: required: true content: @@ -875,7 +1397,8 @@ paths: $ref: "#/components/schemas/OpenAIFile" x-oaiMeta: name: Upload file - returns: The uploaded [file](/docs/api-reference/files/object) object. + group: files + returns: The uploaded [File](/docs/api-reference/files/object) object. examples: request: curl: | @@ -884,12 +1407,12 @@ paths: -F purpose="fine-tune" \ -F file="@mydata.jsonl" python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.File.create( + from openai import OpenAI + client = OpenAI() + + client.files.create( file=open("mydata.jsonl", "rb"), - purpose='fine-tune' + purpose="fine-tune" ) node.js: |- import fs from "fs"; @@ -911,18 +1434,16 @@ paths: { "id": "file-abc123", "object": "file", - "bytes": 140, - "created_at": 1613779121, + "bytes": 120000, + "created_at": 1677610602, "filename": "mydata.jsonl", "purpose": "fine-tune", - "status": "uploaded" | "processed" | "pending" | "error" } - /files/{file_id}: delete: operationId: deleteFile tags: - - OpenAI + - Files summary: Delete a file. parameters: - in: path @@ -940,6 +1461,7 @@ paths: $ref: "#/components/schemas/DeleteFileResponse" x-oaiMeta: name: Delete file + group: files returns: Deletion status. examples: request: @@ -948,10 +1470,10 @@ paths: -X DELETE \ -H "Authorization: Bearer $OPENAI_API_KEY" python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.File.delete("file-abc123") + from openai import OpenAI + client = OpenAI() + + client.files.delete("file-abc123") node.js: |- import OpenAI from "openai"; @@ -973,7 +1495,7 @@ paths: get: operationId: retrieveFile tags: - - OpenAI + - Files summary: Returns information about a specific file. parameters: - in: path @@ -991,17 +1513,18 @@ paths: $ref: "#/components/schemas/OpenAIFile" x-oaiMeta: name: Retrieve file - returns: The [file](/docs/api-reference/files/object) object matching the specified ID. + group: files + returns: The [File](/docs/api-reference/files/object) object matching the specified ID. examples: request: curl: | curl https://api.openai.com/v1/files/file-abc123 \ -H "Authorization: Bearer $OPENAI_API_KEY" python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.File.retrieve("file-abc123") + from openai import OpenAI + client = OpenAI() + + client.files.retrieve("file-abc123") node.js: |- import OpenAI from "openai"; @@ -1018,17 +1541,16 @@ paths: { "id": "file-abc123", "object": "file", - "bytes": 140, - "created_at": 1613779657, + "bytes": 120000, + "created_at": 1677610602, "filename": "mydata.jsonl", - "purpose": "fine-tune" + "purpose": "fine-tune", } - /files/{file_id}/content: get: operationId: downloadFile tags: - - OpenAI + - Files summary: Returns the contents of the specified file. parameters: - in: path @@ -1046,6 +1568,7 @@ paths: type: string x-oaiMeta: name: Retrieve file content + group: files returns: The file content. examples: request: @@ -1053,10 +1576,10 @@ paths: curl https://api.openai.com/v1/files/file-abc123/content \ -H "Authorization: Bearer $OPENAI_API_KEY" > file.jsonl python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - content = openai.File.download("file-abc123") + from openai import OpenAI + client = OpenAI() + + content = client.files.retrieve_content("file-abc123") node.js: | import OpenAI from "openai"; @@ -1074,9 +1597,9 @@ paths: post: operationId: createFineTuningJob tags: - - OpenAI + - Fine-tuning summary: | - Creates a job that fine-tunes a specified model from a given dataset. + Creates a fine-tuning job which begins the process of creating a new model from a given dataset. Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. @@ -1096,23 +1619,27 @@ paths: $ref: "#/components/schemas/FineTuningJob" x-oaiMeta: name: Create fine-tuning job + group: fine-tuning returns: A [fine-tuning.job](/docs/api-reference/fine-tuning/object) object. examples: - - title: No hyperparameters + - title: Default request: curl: | curl https://api.openai.com/v1/fine_tuning/jobs \ -H "Content-Type: application/json" \ -H "Authorization: Bearer $OPENAI_API_KEY" \ -d '{ - "training_file": "file-abc123" - "model": "gpt-3.5-turbo", + "training_file": "file-BK7bzQj3FfZFXr7DbL6xJwfo", + "model": "gpt-3.5-turbo" }' python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.FineTuningJob.create(training_file="file-abc123", model="gpt-3.5-turbo") + from openai import OpenAI + client = OpenAI() + + client.fine_tuning.jobs.create( + training_file="file-abc123", + model="gpt-3.5-turbo" + ) node.js: | import OpenAI from "openai"; @@ -1130,7 +1657,7 @@ paths: response: | { "object": "fine_tuning.job", - "id": "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + "id": "ftjob-abc123", "model": "gpt-3.5-turbo-0613", "created_at": 1614807352, "fine_tuned_model": null, @@ -1140,24 +1667,30 @@ paths: "validation_file": null, "training_file": "file-abc123", } - - title: Hyperparameters + - title: Epochs request: curl: | curl https://api.openai.com/v1/fine_tuning/jobs \ -H "Content-Type: application/json" \ -H "Authorization: Bearer $OPENAI_API_KEY" \ -d '{ - "training_file": "file-abc123" + "training_file": "file-abc123", "model": "gpt-3.5-turbo", "hyperparameters": { "n_epochs": 2 } }' python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.FineTuningJob.create(training_file="file-abc123", model="gpt-3.5-turbo", hyperparameters={"n_epochs":2}) + from openai import OpenAI + client = OpenAI() + + client.fine_tuning.jobs.create( + training_file="file-abc123", + model="gpt-3.5-turbo", + hyperparameters={ + "n_epochs":2 + } + ) node.js: | import OpenAI from "openai"; @@ -1167,7 +1700,7 @@ paths: const fineTune = await openai.fineTuning.jobs.create({ training_file: "file-abc123", model: "gpt-3.5-turbo", - hyperparameters: { n_epochs: 2 }, + hyperparameters: { n_epochs: 2 } }); console.log(fineTune); @@ -1177,7 +1710,7 @@ paths: response: | { "object": "fine_tuning.job", - "id": "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + "id": "ftjob-abc123", "model": "gpt-3.5-turbo-0613", "created_at": 1614807352, "fine_tuned_model": null, @@ -1186,12 +1719,60 @@ paths: "status": "queued", "validation_file": null, "training_file": "file-abc123", - "hyperparameters":{"n_epochs":2}, + "hyperparameters": {"n_epochs": 2}, + } + - title: Validation file + request: + curl: | + curl https://api.openai.com/v1/fine_tuning/jobs \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "training_file": "file-abc123", + "validation_file": "file-abc123", + "model": "gpt-3.5-turbo" + }' + python: | + from openai import OpenAI + client = OpenAI() + + client.fine_tuning.jobs.create( + training_file="file-abc123", + validation_file="file-def456", + model="gpt-3.5-turbo" + ) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const fineTune = await openai.fineTuning.jobs.create({ + training_file: "file-abc123", + validation_file: "file-abc123" + }); + + console.log(fineTune); + } + + main(); + response: | + { + "object": "fine_tuning.job", + "id": "ftjob-abc123", + "model": "gpt-3.5-turbo-0613", + "created_at": 1614807352, + "fine_tuned_model": null, + "organization_id": "org-123", + "result_files": [], + "status": "queued", + "validation_file": "file-abc123", + "training_file": "file-abc123", } get: operationId: listPaginatedFineTuningJobs tags: - - OpenAI + - Fine-tuning summary: | List your organization's fine-tuning jobs parameters: @@ -1217,6 +1798,7 @@ paths: $ref: "#/components/schemas/ListPaginatedFineTuningJobsResponse" x-oaiMeta: name: List fine-tuning jobs + group: fine-tuning returns: A list of paginated [fine-tuning job](/docs/api-reference/fine-tuning/object) objects. examples: request: @@ -1224,10 +1806,10 @@ paths: curl https://api.openai.com/v1/fine_tuning/jobs?limit=2 \ -H "Authorization: Bearer $OPENAI_API_KEY" python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.FineTuningJob.list() + from openai import OpenAI + client = OpenAI() + + client.fine_tuning.jobs.list() node.js: |- import OpenAI from "openai"; @@ -1263,7 +1845,7 @@ paths: get: operationId: retrieveFineTuningJob tags: - - OpenAI + - Fine-tuning summary: | Get info about a fine-tuning job. @@ -1286,25 +1868,26 @@ paths: $ref: "#/components/schemas/FineTuningJob" x-oaiMeta: name: Retrieve fine-tuning job - returns: The [fine-tuning](/docs/api-reference/fine-tunes/object) object with the given ID. + group: fine-tuning + returns: The [fine-tuning](/docs/api-reference/fine-tuning/object) object with the given ID. examples: request: curl: | curl https://api.openai.com/v1/fine_tuning/jobs/ft-AF1WoRqd3aJAHsqc9NY7iL8F \ -H "Authorization: Bearer $OPENAI_API_KEY" python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.FineTuningJob.retrieve("ft-anaKUAgnnBkNGB3QcSr4pImR") + from openai import OpenAI + client = OpenAI() + + client.fine_tuning.jobs.retrieve("ftjob-abc123") node.js: | import OpenAI from "openai"; const openai = new OpenAI(); async function main() { - const fineTune = await openai.fineTuning.jobs.retrieve("ft-AF1WoRqd3aJAHsqc9NY7iL8F"); - + const fineTune = await openai.fineTuning.jobs.retrieve("ftjob-abc123"); + console.log(fineTune); } @@ -1312,7 +1895,7 @@ paths: response: &fine_tuning_example | { "object": "fine_tuning.job", - "id": "ft-zRdUkP4QeZqeYjDcQL0wwam1", + "id": "ftjob-abc123", "model": "davinci-002", "created_at": 1692661014, "finished_at": 1692661190, @@ -1329,12 +1912,11 @@ paths: }, "trained_tokens": 5768 } - /fine_tuning/jobs/{fine_tuning_job_id}/events: get: operationId: listFineTuningEvents tags: - - OpenAI + - Fine-tuning summary: | Get status updates for a fine-tuning job. parameters: @@ -1368,24 +1950,28 @@ paths: $ref: "#/components/schemas/ListFineTuningJobEventsResponse" x-oaiMeta: name: List fine-tuning events + group: fine-tuning returns: A list of fine-tuning event objects. examples: request: curl: | - curl https://api.openai.com/v1/fine_tuning/jobs/ft-AF1WoRqd3aJAHsqc9NY7iL8F/events \ + curl https://api.openai.com/v1/fine_tuning/jobs/ftjob-abc123/events \ -H "Authorization: Bearer $OPENAI_API_KEY" python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.FineTuningJob.list_events(id="ft-w9WJrnTe9vcVopaTy9LrlGQv", limit=2) + from openai import OpenAI + client = OpenAI() + + client.fine_tuning.jobs.list_events( + fine_tuning_job_id="ftjob-abc123", + limit=2 + ) node.js: |- import OpenAI from "openai"; const openai = new OpenAI(); async function main() { - const list = await openai.fineTuning.list_events(id="ft-w9WJrnTe9vcVopaTy9LrlGQv", limit=2); + const list = await openai.fineTuning.list_events(id="ftjob-abc123", limit=2); for await (const fineTune of list) { console.log(fineTune); @@ -1418,12 +2004,11 @@ paths: ], "has_more": true } - /fine_tuning/jobs/{fine_tuning_job_id}/cancel: post: operationId: cancelFineTuningJob tags: - - OpenAI + - Fine-tuning summary: | Immediately cancel a fine-tune job. parameters: @@ -1444,24 +2029,25 @@ paths: $ref: "#/components/schemas/FineTuningJob" x-oaiMeta: name: Cancel fine-tuning + group: fine-tuning returns: The cancelled [fine-tuning](/docs/api-reference/fine-tuning/object) object. examples: request: curl: | - curl -X POST https://api.openai.com/v1/fine_tuning/jobs/ft-AF1WoRqd3aJAHsqc9NY7iL8F/cancel \ + curl -X POST https://api.openai.com/v1/fine_tuning/jobs/ftjob-abc123/cancel \ -H "Authorization: Bearer $OPENAI_API_KEY" python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.FineTuningJob.cancel("ft-anaKUAgnnBkNGB3QcSr4pImR") + from openai import OpenAI + client = OpenAI() + + client.fine_tuning.jobs.cancel("ftjob-abc123") node.js: |- import OpenAI from "openai"; const openai = new OpenAI(); async function main() { - const fineTune = await openai.fineTuning.jobs.cancel("ft-AF1WoRqd3aJAHsqc9NY7iL8F"); + const fineTune = await openai.fineTuning.jobs.cancel("ftjob-abc123"); console.log(fineTune); } @@ -1469,7 +2055,7 @@ paths: response: | { "object": "fine_tuning.job", - "id": "ft-gleYLJhWh1YFufiy29AahVpj", + "id": "ftjob-abc123", "model": "gpt-3.5-turbo-0613", "created_at": 1689376978, "fine_tuned_model": null, @@ -1483,2690 +2069,6608 @@ paths: "training_file": "file-abc123" } - /fine-tunes: - post: - operationId: createFineTune - deprecated: true + /models: + get: + operationId: listModels tags: - - OpenAI - summary: | - Creates a job that fine-tunes a specified model from a given dataset. - - Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. - - [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/CreateFineTuneRequest" + - Models + summary: Lists the currently available models, and provides basic information about each one such as the owner and availability. responses: "200": description: OK content: application/json: schema: - $ref: "#/components/schemas/FineTune" + $ref: "#/components/schemas/ListModelsResponse" x-oaiMeta: - name: Create fine-tune - returns: A [fine-tune](/docs/api-reference/fine-tunes/object) object. + name: List models + group: models + returns: A list of [model](/docs/api-reference/models/object) objects. examples: request: curl: | - curl https://api.openai.com/v1/fine-tunes \ - -H "Content-Type: application/json" \ - -H "Authorization: Bearer $OPENAI_API_KEY" \ - -d '{ - "training_file": "file-abc123" - }' + curl https://api.openai.com/v1/models \ + -H "Authorization: Bearer $OPENAI_API_KEY" python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.FineTune.create(training_file="file-abc123") - node.js: | + from openai import OpenAI + client = OpenAI() + + client.models.list() + node.js: |- import OpenAI from "openai"; const openai = new OpenAI(); async function main() { - const fineTune = await openai.fineTunes.create({ - training_file: "file-abc123" - }); + const list = await openai.models.list(); - console.log(fineTune); + for await (const model of list) { + console.log(model); + } } - main(); response: | { - "id": "ft-AF1WoRqd3aJAHsqc9NY7iL8F", - "object": "fine-tune", - "model": "curie", - "created_at": 1614807352, - "events": [ + "object": "list", + "data": [ { - "object": "fine-tune-event", - "created_at": 1614807352, - "level": "info", - "message": "Job enqueued. Waiting for jobs ahead to complete. Queue number: 0." - } - ], - "fine_tuned_model": null, - "hyperparams": { - "batch_size": 4, - "learning_rate_multiplier": 0.1, - "n_epochs": 4, - "prompt_loss_weight": 0.1, - }, - "organization_id": "org-123", - "result_files": [], - "status": "pending", - "validation_files": [], - "training_files": [ + "id": "model-id-0", + "object": "model", + "created": 1686935002, + "owned_by": "organization-owner" + }, { - "id": "file-abc123", - "object": "file", - "bytes": 1547276, - "created_at": 1610062281, - "filename": "my-data-train.jsonl", - "purpose": "fine-tune-train" - } + "id": "model-id-1", + "object": "model", + "created": 1686935002, + "owned_by": "organization-owner", + }, + { + "id": "model-id-2", + "object": "model", + "created": 1686935002, + "owned_by": "openai" + }, ], - "updated_at": 1614807352, + "object": "list" } + /models/{model}: get: - operationId: listFineTunes - deprecated: true + operationId: retrieveModel tags: - - OpenAI - summary: | - List your organization's fine-tuning jobs + - Models + summary: Retrieves a model instance, providing basic information about the model such as the owner and permissioning. + parameters: + - in: path + name: model + required: true + schema: + type: string + # ideally this will be an actual ID, so this will always work from browser + example: gpt-3.5-turbo + description: The ID of the model to use for this request responses: "200": description: OK content: application/json: schema: - $ref: "#/components/schemas/ListFineTunesResponse" + $ref: "#/components/schemas/Model" x-oaiMeta: - name: List fine-tunes - returns: A list of [fine-tune](/docs/api-reference/fine-tunes/object) objects. + name: Retrieve model + group: models + returns: The [model](/docs/api-reference/models/object) object matching the specified ID. examples: request: curl: | - curl https://api.openai.com/v1/fine-tunes \ + curl https://api.openai.com/v1/models/VAR_model_id \ -H "Authorization: Bearer $OPENAI_API_KEY" python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.FineTune.list() + from openai import OpenAI + client = OpenAI() + + client.models.retrieve("VAR_model_id") node.js: |- import OpenAI from "openai"; const openai = new OpenAI(); async function main() { - const list = await openai.fineTunes.list(); + const model = await openai.models.retrieve("gpt-3.5-turbo"); - for await (const fineTune of list) { - console.log(fineTune); - } + console.log(model); } main(); - response: | + response: &retrieve_model_response | { - "object": "list", - "data": [ - { - "id": "ft-AF1WoRqd3aJAHsqc9NY7iL8F", - "object": "fine-tune", - "model": "curie", - "created_at": 1614807352, - "fine_tuned_model": null, - "hyperparams": { ... }, - "organization_id": "org-123", - "result_files": [], - "status": "pending", - "validation_files": [], - "training_files": [ { ... } ], - "updated_at": 1614807352, - }, - { ... }, - { ... } - ] + "id": "VAR_model_id", + "object": "model", + "created": 1686935002, + "owned_by": "openai" } - - /fine-tunes/{fine_tune_id}: - get: - operationId: retrieveFineTune - deprecated: true + delete: + operationId: deleteModel tags: - - OpenAI - summary: | - Gets info about the fine-tune job. - - [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + - Models + summary: Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. parameters: - in: path - name: fine_tune_id + name: model required: true schema: type: string - example: ft-AF1WoRqd3aJAHsqc9NY7iL8F - description: | - The ID of the fine-tune job + example: ft:gpt-3.5-turbo:acemeco:suffix:abc123 + description: The model to delete responses: "200": description: OK content: application/json: schema: - $ref: "#/components/schemas/FineTune" + $ref: "#/components/schemas/DeleteModelResponse" x-oaiMeta: - name: Retrieve fine-tune - returns: The [fine-tune](/docs/api-reference/fine-tunes/object) object with the given ID. + name: Delete a fine-tuned model + group: models + returns: Deletion status. examples: request: curl: | - curl https://api.openai.com/v1/fine-tunes/ft-AF1WoRqd3aJAHsqc9NY7iL8F \ + curl https://api.openai.com/v1/models/ft:gpt-3.5-turbo:acemeco:suffix:abc123 \ + -X DELETE \ -H "Authorization: Bearer $OPENAI_API_KEY" python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.FineTune.retrieve(id="ft-AF1WoRqd3aJAHsqc9NY7iL8F") + from openai import OpenAI + client = OpenAI() + + client.models.delete("ft:gpt-3.5-turbo:acemeco:suffix:abc123") node.js: |- import OpenAI from "openai"; const openai = new OpenAI(); async function main() { - const fineTune = await openai.fineTunes.retrieve("ft-AF1WoRqd3aJAHsqc9NY7iL8F"); + const model = await openai.models.del("ft:gpt-3.5-turbo:acemeco:suffix:abc123"); - console.log(fineTune); + console.log(model); } - main(); - response: &fine_tune_example | + response: | { - "id": "ft-AF1WoRqd3aJAHsqc9NY7iL8F", - "object": "fine-tune", - "model": "curie", - "created_at": 1614807352, - "events": [ - { - "object": "fine-tune-event", - "created_at": 1614807352, - "level": "info", - "message": "Job enqueued. Waiting for jobs ahead to complete. Queue number: 0." - }, - { - "object": "fine-tune-event", - "created_at": 1614807356, - "level": "info", - "message": "Job started." - }, - { - "object": "fine-tune-event", - "created_at": 1614807861, - "level": "info", - "message": "Uploaded snapshot: curie:ft-acmeco-2021-03-03-21-44-20." - }, - { - "object": "fine-tune-event", - "created_at": 1614807864, - "level": "info", - "message": "Uploaded result files: file-abc123." - }, - { - "object": "fine-tune-event", - "created_at": 1614807864, - "level": "info", - "message": "Job succeeded." - } - ], - "fine_tuned_model": "curie:ft-acmeco-2021-03-03-21-44-20", - "hyperparams": { - "batch_size": 4, - "learning_rate_multiplier": 0.1, - "n_epochs": 4, - "prompt_loss_weight": 0.1, - }, - "organization_id": "org-123", - "result_files": [ - { - "id": "file-abc123", - "object": "file", - "bytes": 81509, - "created_at": 1614807863, - "filename": "compiled_results.csv", - "purpose": "fine-tune-results" - } - ], - "status": "succeeded", - "validation_files": [], - "training_files": [ - { - "id": "file-abc123", - "object": "file", - "bytes": 1547276, - "created_at": 1610062281, - "filename": "my-data-train.jsonl", - "purpose": "fine-tune-train" - } - ], - "updated_at": 1614807865, + "id": "ft:gpt-3.5-turbo:acemeco:suffix:abc123", + "object": "model", + "deleted": true } - /fine-tunes/{fine_tune_id}/cancel: + /moderations: post: - operationId: cancelFineTune - deprecated: true + operationId: createModeration tags: - - OpenAI - summary: | - Immediately cancel a fine-tune job. - parameters: - - in: path - name: fine_tune_id - required: true - schema: - type: string - example: ft-AF1WoRqd3aJAHsqc9NY7iL8F - description: | - The ID of the fine-tune job to cancel + - Moderations + summary: Classifies if text violates OpenAI's Content Policy + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateModerationRequest" responses: "200": description: OK content: application/json: schema: - $ref: "#/components/schemas/FineTune" + $ref: "#/components/schemas/CreateModerationResponse" x-oaiMeta: - name: Cancel fine-tune - returns: The cancelled [fine-tune](/docs/api-reference/fine-tunes/object) object. + name: Create moderation + group: moderations + returns: A [moderation](/docs/api-reference/moderations/object) object. examples: request: curl: | - curl https://api.openai.com/v1/fine-tunes/ft-AF1WoRqd3aJAHsqc9NY7iL8F/cancel \ - -H "Authorization: Bearer $OPENAI_API_KEY" + curl https://api.openai.com/v1/moderations \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "input": "I want to kill them." + }' python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.FineTune.cancel(id="ft-AF1WoRqd3aJAHsqc9NY7iL8F") - node.js: |- + from openai import OpenAI + client = OpenAI() + + client.moderations.create(input="I want to kill them.") + node.js: | import OpenAI from "openai"; const openai = new OpenAI(); async function main() { - const fineTune = await openai.fineTunes.cancel("ft-AF1WoRqd3aJAHsqc9NY7iL8F"); + const moderation = await openai.moderations.create({ input: "I want to kill them." }); - console.log(fineTune); + console.log(moderation); } main(); - response: | + response: &moderation_example | { - "id": "ft-xhrpBbvVUzYGo8oUO1FY4nI7", - "object": "fine-tune", - "model": "curie", - "created_at": 1614807770, - "events": [ { ... } ], - "fine_tuned_model": null, - "hyperparams": { ... }, - "organization_id": "org-123", - "result_files": [], - "status": "cancelled", - "validation_files": [], - "training_files": [ + "id": "modr-XXXXX", + "model": "text-moderation-005", + "results": [ { - "id": "file-abc123", - "object": "file", - "bytes": 1547276, - "created_at": 1610062281, - "filename": "my-data-train.jsonl", - "purpose": "fine-tune-train" + "flagged": true, + "categories": { + "sexual": false, + "hate": false, + "harassment": false, + "self-harm": false, + "sexual/minors": false, + "hate/threatening": false, + "violence/graphic": false, + "self-harm/intent": false, + "self-harm/instructions": false, + "harassment/threatening": true, + "violence": true, + }, + "category_scores": { + "sexual": 1.2282071e-06, + "hate": 0.010696256, + "harassment": 0.29842457, + "self-harm": 1.5236925e-08, + "sexual/minors": 5.7246268e-08, + "hate/threatening": 0.0060676364, + "violence/graphic": 4.435014e-06, + "self-harm/intent": 8.098441e-10, + "self-harm/instructions": 2.8498655e-11, + "harassment/threatening": 0.63055265, + "violence": 0.99011886, + } } - ], - "updated_at": 1614807789, + ] } - /fine-tunes/{fine_tune_id}/events: + /assistants: get: - operationId: listFineTuneEvents - deprecated: true + operationId: listAssistants tags: - - OpenAI - summary: | - Get fine-grained status updates for a fine-tune job. + - Assistants + summary: Returns a list of assistants. parameters: - - in: path - name: fine_tune_id - required: true + - name: limit + in: query + description: &pagination_limit_param_description | + A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. + required: false + schema: + type: integer + default: 20 + - name: order + in: query + description: &pagination_order_param_description | + Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order. schema: type: string - example: ft-AF1WoRqd3aJAHsqc9NY7iL8F - description: | - The ID of the fine-tune job to get events for. - - in: query - name: stream - required: false + default: desc + enum: ["asc", "desc"] + - name: after + in: query + description: &pagination_after_param_description | + A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list. schema: - type: boolean - default: false - description: | - Whether to stream events for the fine-tune job. If set to true, - events will be sent as data-only - [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - as they become available. The stream will terminate with a - `data: [DONE]` message when the job is finished (succeeded, cancelled, - or failed). - - If set to false, only events generated so far will be returned. + type: string + - name: before + in: query + description: &pagination_before_param_description | + A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. + schema: + type: string responses: "200": description: OK content: application/json: schema: - $ref: "#/components/schemas/ListFineTuneEventsResponse" + $ref: "#/components/schemas/ListAssistantsResponse" x-oaiMeta: - name: List fine-tune events - returns: A list of fine-tune event objects. + name: List assistants + group: assistants + beta: true + returns: A list of [assistant](/docs/api-reference/assistants/object) objects. examples: request: curl: | - curl https://api.openai.com/v1/fine-tunes/ft-AF1WoRqd3aJAHsqc9NY7iL8F/events \ - -H "Authorization: Bearer $OPENAI_API_KEY" + curl "https://api.openai.com/v1/assistants?order=desc&limit=20" \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v1" python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.FineTune.list_events(id="ft-AF1WoRqd3aJAHsqc9NY7iL8F") + from openai import OpenAI + client = OpenAI() + + my_assistants = client.beta.assistants.list( + order="desc", + limit="20", + ) + print(my_assistants.data) node.js: |- import OpenAI from "openai"; const openai = new OpenAI(); async function main() { - const fineTune = await openai.fineTunes.listEvents("ft-AF1WoRqd3aJAHsqc9NY7iL8F"); + const myAssistants = await openai.beta.assistants.list({ + order: "desc", + limit: "20", + }); - console.log(fineTune); + console.log(myAssistants.data); } + main(); - response: | + response: &list_assistants_example | { "object": "list", "data": [ { - "object": "fine-tune-event", - "created_at": 1614807352, - "level": "info", - "message": "Job enqueued. Waiting for jobs ahead to complete. Queue number: 0." - }, - { - "object": "fine-tune-event", - "created_at": 1614807356, - "level": "info", - "message": "Job started." - }, - { - "object": "fine-tune-event", - "created_at": 1614807861, - "level": "info", - "message": "Uploaded snapshot: curie:ft-acmeco-2021-03-03-21-44-20." + "id": "asst_abc123", + "object": "assistant", + "created_at": 1698982736, + "name": "Coding Tutor", + "description": null, + "model": "gpt-4", + "instructions": "You are a helpful assistant designed to make me better at coding!", + "tools": [], + "file_ids": [], + "metadata": {} }, { - "object": "fine-tune-event", - "created_at": 1614807864, - "level": "info", - "message": "Uploaded result files: file-abc123" + "id": "asst_abc456", + "object": "assistant", + "created_at": 1698982718, + "name": "My Assistant", + "description": null, + "model": "gpt-4", + "instructions": "You are a helpful assistant designed to make me better at coding!", + "tools": [], + "file_ids": [], + "metadata": {} }, { - "object": "fine-tune-event", - "created_at": 1614807864, - "level": "info", - "message": "Job succeeded." + "id": "asst_abc789", + "object": "assistant", + "created_at": 1698982643, + "name": null, + "description": null, + "model": "gpt-4", + "instructions": null, + "tools": [], + "file_ids": [], + "metadata": {} } - ] + ], + "first_id": "asst_abc123", + "last_id": "asst_abc789", + "has_more": false } + post: + operationId: createAssistant + tags: + - Assistants + summary: Create an assistant with a model and instructions. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateAssistantRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/AssistantObject" + x-oaiMeta: + name: Create assistant + group: assistants + beta: true + returns: An [assistant](/docs/api-reference/assistants/object) object. + examples: + - title: Code Interpreter + request: + curl: | + curl "https://api.openai.com/v1/assistants" \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v1" \ + -d '{ + "instructions": "You are a personal math tutor. When asked a question, write and run Python code to answer the question.", + "name": "Math Tutor", + "tools": [{"type": "code_interpreter"}], + "model": "gpt-4" + }' - /models: + python: | + from openai import OpenAI + client = OpenAI() + + my_assistant = client.beta.assistants.create( + instructions="You are a personal math tutor. When asked a question, write and run Python code to answer the question.", + name="Math Tutor", + tools=[{"type": "code_interpreter"}], + model="gpt-4", + ) + print(my_assistant) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const myAssistant = await openai.beta.assistants.create({ + instructions: + "You are a personal math tutor. When asked a question, write and run Python code to answer the question.", + name: "Math Tutor", + tools: [{ type: "code_interpreter" }], + model: "gpt-4", + }); + + console.log(myAssistant); + } + + main(); + response: &create_assistants_example | + { + "id": "asst_abc123", + "object": "assistant", + "created_at": 1698984975, + "name": "Math Tutor", + "description": null, + "model": "gpt-4", + "instructions": "You are a personal math tutor. When asked a question, write and run Python code to answer the question.", + "tools": [ + { + "type": "code_interpreter" + } + ], + "file_ids": [], + "metadata": {} + } + - title: Files + request: + curl: | + curl https://api.openai.com/v1/assistants \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v1" \ + -d '{ + "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies.", + "tools": [{"type": "retrieval"}], + "model": "gpt-4", + "file_ids": ["file-abc123"] + }' + python: | + from openai import OpenAI + client = OpenAI() + + my_assistant = client.beta.assistants.create( + instructions="You are an HR bot, and you have access to files to answer employee questions about company policies.", + name="HR Helper", + tools=[{"type": "retrieval"}], + model="gpt-4", + file_ids=["file-abc123"], + ) + print(my_assistant) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const myAssistant = await openai.beta.assistants.create({ + instructions: + "You are an HR bot, and you have access to files to answer employee questions about company policies.", + name: "HR Helper", + tools: [{ type: "retrieval" }], + model: "gpt-4", + file_ids: ["file-abc123"], + }); + + console.log(myAssistant); + } + + main(); + response: | + { + "id": "asst_abc123", + "object": "assistant", + "created_at": 1699009403, + "name": "HR Helper", + "description": null, + "model": "gpt-4", + "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies.", + "tools": [ + { + "type": "retrieval" + } + ], + "file_ids": [ + "file-abc123" + ], + "metadata": {} + } + + /assistants/{assistant_id}: get: - operationId: listModels + operationId: getAssistant tags: - - OpenAI - summary: Lists the currently available models, and provides basic information about each one such as the owner and availability. + - Assistants + summary: Retrieves an assistant. + parameters: + - in: path + name: assistant_id + required: true + schema: + type: string + description: The ID of the assistant to retrieve. responses: "200": description: OK content: application/json: schema: - $ref: "#/components/schemas/ListModelsResponse" + $ref: "#/components/schemas/AssistantObject" x-oaiMeta: - name: List models - returns: A list of [model](/docs/api-reference/models/object) objects. + name: Retrieve assistant + group: assistants + beta: true + returns: The [assistant](/docs/api-reference/assistants/object) object matching the specified ID. examples: request: curl: | - curl https://api.openai.com/v1/models \ - -H "Authorization: Bearer $OPENAI_API_KEY" + curl https://api.openai.com/v1/assistants/asst_abc123 \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v1" python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.Model.list() + from openai import OpenAI + client = OpenAI() + + my_assistant = client.beta.assistants.retrieve("asst_abc123") + print(my_assistant) node.js: |- import OpenAI from "openai"; const openai = new OpenAI(); async function main() { - const list = await openai.models.list(); + const myAssistant = await openai.beta.assistants.retrieve( + "asst_abc123" + ); - for await (const model of list) { - console.log(model); - } + console.log(myAssistant); } + main(); response: | { - "object": "list", - "data": [ - { - "id": "model-id-0", - "object": "model", - "created": 1686935002, - "owned_by": "organization-owner" - }, + "id": "asst_abc123", + "object": "assistant", + "created_at": 1699009709, + "name": "HR Helper", + "description": null, + "model": "gpt-4", + "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies.", + "tools": [ { - "id": "model-id-1", - "object": "model", - "created": 1686935002, - "owned_by": "organization-owner", - }, - { - "id": "model-id-2", - "object": "model", - "created": 1686935002, - "owned_by": "openai" - }, + "type": "retrieval" + } ], - "object": "list" + "file_ids": [ + "file-abc123" + ], + "metadata": {} } - - /models/{model}: - get: - operationId: retrieveModel + post: + operationId: modifyAssistant tags: - - OpenAI - summary: Retrieves a model instance, providing basic information about the model such as the owner and permissioning. + - Assistants + summary: Modifies an assistant. parameters: - in: path - name: model + name: assistant_id required: true schema: type: string - # ideally this will be an actual ID, so this will always work from browser - example: gpt-3.5-turbo - description: The ID of the model to use for this request + description: The ID of the assistant to modify. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ModifyAssistantRequest" responses: "200": description: OK content: application/json: schema: - $ref: "#/components/schemas/Model" + $ref: "#/components/schemas/AssistantObject" x-oaiMeta: - name: Retrieve model - returns: The [model](/docs/api-reference/models/object) object matching the specified ID. + name: Modify assistant + group: assistants + beta: true + returns: The modified [assistant](/docs/api-reference/assistants/object) object. examples: request: curl: | - curl https://api.openai.com/v1/models/VAR_model_id \ - -H "Authorization: Bearer $OPENAI_API_KEY" + curl https://api.openai.com/v1/assistants/asst_abc123 \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v1" \ + -d '{ + "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", + "tools": [{"type": "retrieval"}], + "model": "gpt-4", + "file_ids": ["file-abc123", "file-abc456"] + }' python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.Model.retrieve("VAR_model_id") + from openai import OpenAI + client = OpenAI() + + my_updated_assistant = client.beta.assistants.update( + "asst_abc123", + instructions="You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", + name="HR Helper", + tools=[{"type": "retrieval"}], + model="gpt-4", + file_ids=["file-abc123", "file-abc456"], + ) + + print(my_updated_assistant) node.js: |- import OpenAI from "openai"; const openai = new OpenAI(); async function main() { - const model = await openai.models.retrieve("gpt-3.5-turbo"); + const myUpdatedAssistant = await openai.beta.assistants.update( + "asst_abc123", + { + instructions: + "You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", + name: "HR Helper", + tools: [{ type: "retrieval" }], + model: "gpt-4", + file_ids: [ + "file-abc123", + "file-abc456", + ], + } + ); - console.log(model); + console.log(myUpdatedAssistant); } main(); - response: &retrieve_model_response | + response: | { - "id": "VAR_model_id", - "object": "model", - "created": 1686935002, - "owned_by": "openai" + "id": "asst_abc123", + "object": "assistant", + "created_at": 1699009709, + "name": "HR Helper", + "description": null, + "model": "gpt-4", + "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", + "tools": [ + { + "type": "retrieval" + } + ], + "file_ids": [ + "file-abc123", + "file-abc456" + ], + "metadata": {} } delete: - operationId: deleteModel + operationId: deleteAssistant tags: - - OpenAI - summary: Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. + - Assistants + summary: Delete an assistant. parameters: - in: path - name: model + name: assistant_id required: true schema: type: string - example: ft:gpt-3.5-turbo:acemeco:suffix:abc123 - description: The model to delete + description: The ID of the assistant to delete. responses: "200": description: OK content: application/json: schema: - $ref: "#/components/schemas/DeleteModelResponse" + $ref: "#/components/schemas/DeleteAssistantResponse" x-oaiMeta: - name: Delete fine-tune model - returns: Deletion status. + name: Delete assistant + group: assistants + beta: true + returns: Deletion status examples: request: curl: | - curl https://api.openai.com/v1/models/ft:gpt-3.5-turbo:acemeco:suffix:abc123 \ - -X DELETE \ - -H "Authorization: Bearer $OPENAI_API_KEY" + curl https://api.openai.com/v1/assistants/asst_abc123 \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v1" \ + -X DELETE python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.Model.delete("ft:gpt-3.5-turbo:acemeco:suffix:abc123") + from openai import OpenAI + client = OpenAI() + + response = client.beta.assistants.delete("asst_abc123") + print(response) node.js: |- import OpenAI from "openai"; const openai = new OpenAI(); async function main() { - const model = await openai.models.del("ft:gpt-3.5-turbo:acemeco:suffix:abc123"); + const response = await openai.beta.assistants.del("asst_abc123"); - console.log(model); + console.log(response); } main(); response: | { - "id": "ft:gpt-3.5-turbo:acemeco:suffix:abc123", - "object": "model", + "id": "asst_abc123", + "object": "assistant.deleted", "deleted": true } - /moderations: + /threads: post: - operationId: createModeration + operationId: createThread tags: - - OpenAI - summary: Classifies if text violates OpenAI's Content Policy + - Assistants + summary: Create a thread. + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/CreateThreadRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ThreadObject" + x-oaiMeta: + name: Create thread + group: threads + beta: true + returns: A [thread](/docs/api-reference/threads) object. + examples: + - title: Empty + request: + curl: | + curl https://api.openai.com/v1/threads \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v1" \ + -d '' + python: | + from openai import OpenAI + client = OpenAI() + + empty_thread = client.beta.threads.create() + print(empty_thread) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const emptyThread = await openai.beta.threads.create(); + + console.log(emptyThread); + } + + main(); + response: | + { + "id": "thread_abc123", + "object": "thread", + "created_at": 1699012949, + "metadata": {} + } + - title: Messages + request: + curl: | + curl https://api.openai.com/v1/threads \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v1" \ + -d '{ + "messages": [{ + "role": "user", + "content": "Hello, what is AI?", + "file_ids": ["file-abc123"] + }, { + "role": "user", + "content": "How does AI work? Explain it in simple terms." + }] + }' + python: | + from openai import OpenAI + client = OpenAI() + + message_thread = client.beta.threads.create( + messages=[ + { + "role": "user", + "content": "Hello, what is AI?", + "file_ids": ["file-abc123"], + }, + { + "role": "user", + "content": "How does AI work? Explain it in simple terms." + }, + ] + ) + + print(message_thread) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const messageThread = await openai.beta.threads.create({ + messages: [ + { + role: "user", + content: "Hello, what is AI?", + file_ids: ["file-abc123"], + }, + { + role: "user", + content: "How does AI work? Explain it in simple terms.", + }, + ], + }); + + console.log(messageThread); + } + + main(); + response: | + { + id: 'thread_abc123', + object: 'thread', + created_at: 1699014083, + metadata: {} + } + + /threads/{thread_id}: + get: + operationId: getThread + tags: + - Assistants + summary: Retrieves a thread. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the thread to retrieve. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ThreadObject" + x-oaiMeta: + name: Retrieve thread + group: threads + beta: true + returns: The [thread](/docs/api-reference/threads/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123 \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v1" + python: | + from openai import OpenAI + client = OpenAI() + + my_thread = client.beta.threads.retrieve("thread_abc123") + print(my_thread) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const myThread = await openai.beta.threads.retrieve( + "thread_abc123" + ); + + console.log(myThread); + } + + main(); + response: | + { + "id": "thread_abc123", + "object": "thread", + "created_at": 1699014083, + "metadata": {} + } + post: + operationId: modifyThread + tags: + - Assistants + summary: Modifies a thread. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the thread to modify. Only the `metadata` can be modified. requestBody: required: true content: application/json: schema: - $ref: "#/components/schemas/CreateModerationRequest" + $ref: "#/components/schemas/ModifyThreadRequest" responses: "200": description: OK content: application/json: schema: - $ref: "#/components/schemas/CreateModerationResponse" + $ref: "#/components/schemas/ThreadObject" x-oaiMeta: - name: Create moderation - returns: A [moderation](/docs/api-reference/moderations/object) object. + name: Modify thread + group: threads + beta: true + returns: The modified [thread](/docs/api-reference/threads/object) object matching the specified ID. examples: request: curl: | - curl https://api.openai.com/v1/moderations \ + curl https://api.openai.com/v1/threads/thread_abc123 \ -H "Content-Type: application/json" \ -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v1" \ -d '{ - "input": "I want to kill them." - }' + "metadata": { + "modified": "true", + "user": "abc123" + } + }' python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.Moderation.create( - input="I want to kill them.", + from openai import OpenAI + client = OpenAI() + + my_updated_thread = client.beta.threads.update( + "thread_abc123", + metadata={ + "modified": "true", + "user": "abc123" + } ) - node.js: | + print(my_updated_thread) + node.js: |- import OpenAI from "openai"; const openai = new OpenAI(); async function main() { - const moderation = await openai.moderations.create({ input: "I want to kill them." }); + const updatedThread = await openai.beta.threads.update( + "thread_abc123", + { + metadata: { modified: "true", user: "abc123" }, + } + ); - console.log(moderation); + console.log(updatedThread); } + main(); - response: &moderation_example | + response: | { - "id": "modr-XXXXX", - "model": "text-moderation-005", - "results": [ - { - "flagged": true, - "categories": { - "sexual": false, - "hate": false, - "harassment": false, - "self-harm": false, - "sexual/minors": false, - "hate/threatening": false, - "violence/graphic": false, - "self-harm/intent": false, - "self-harm/instructions": false, - "harassment/threatening": true, - "violence": true, - }, - "category_scores": { - "sexual": 1.2282071e-06, - "hate": 0.010696256, - "harassment": 0.29842457, - "self-harm": 1.5236925e-08, - "sexual/minors": 5.7246268e-08, - "hate/threatening": 0.0060676364, - "violence/graphic": 4.435014e-06, - "self-harm/intent": 8.098441e-10, - "self-harm/instructions": 2.8498655e-11, - "harassment/threatening": 0.63055265, - "violence": 0.99011886, - } - } - ] + "id": "thread_abc123", + "object": "thread", + "created_at": 1699014083, + "metadata": { + "modified": "true", + "user": "abc123" + } } + delete: + operationId: deleteThread + tags: + - Assistants + summary: Delete a thread. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the thread to delete. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/DeleteThreadResponse" + x-oaiMeta: + name: Delete thread + group: threads + beta: true + returns: Deletion status + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123 \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v1" \ + -X DELETE + python: | + from openai import OpenAI + client = OpenAI() -components: + response = client.beta.threads.delete("thread_abc123") + print(response) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const response = await openai.beta.threads.del("thread_abc123"); + + console.log(response); + } + main(); + response: | + { + "id": "thread_abc123", + "object": "thread.deleted", + "deleted": true + } + + /threads/{thread_id}/messages: + get: + operationId: listMessages + tags: + - Assistants + summary: Returns a list of messages for a given thread. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the [thread](/docs/api-reference/threads) the messages belong to. + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: order + in: query + description: *pagination_order_param_description + schema: + type: string + default: desc + enum: ["asc", "desc"] + - name: after + in: query + description: *pagination_after_param_description + schema: + type: string + - name: before + in: query + description: *pagination_before_param_description + schema: + type: string + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListMessagesResponse" + x-oaiMeta: + name: List messages + group: threads + beta: true + returns: A list of [message](/docs/api-reference/messages) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/messages \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v1" + python: | + from openai import OpenAI + client = OpenAI() + + thread_messages = client.beta.threads.messages.list("thread_abc123") + print(thread_messages.data) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const threadMessages = await openai.beta.threads.messages.list( + "thread_abc123" + ); + + console.log(threadMessages.data); + } + + main(); + response: | + { + "object": "list", + "data": [ + { + "id": "msg_abc123", + "object": "thread.message", + "created_at": 1699016383, + "thread_id": "thread_abc123", + "role": "user", + "content": [ + { + "type": "text", + "text": { + "value": "How does AI work? Explain it in simple terms.", + "annotations": [] + } + } + ], + "file_ids": [], + "assistant_id": null, + "run_id": null, + "metadata": {} + }, + { + "id": "msg_abc456", + "object": "thread.message", + "created_at": 1699016383, + "thread_id": "thread_abc123", + "role": "user", + "content": [ + { + "type": "text", + "text": { + "value": "Hello, what is AI?", + "annotations": [] + } + } + ], + "file_ids": [ + "file-abc123" + ], + "assistant_id": null, + "run_id": null, + "metadata": {} + } + ], + "first_id": "msg_abc123", + "last_id": "msg_abc456", + "has_more": false + } + post: + operationId: createMessage + tags: + - Assistants + summary: Create a message. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the [thread](/docs/api-reference/threads) to create a message for. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateMessageRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/MessageObject" + x-oaiMeta: + name: Create message + group: threads + beta: true + returns: A [message](/docs/api-reference/messages/object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/messages \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v1" \ + -d '{ + "role": "user", + "content": "How does AI work? Explain it in simple terms." + }' + python: | + from openai import OpenAI + client = OpenAI() + + thread_message = client.beta.threads.messages.create( + "thread_abc123", + role="user", + content="How does AI work? Explain it in simple terms.", + ) + print(thread_message) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const threadMessages = await openai.beta.threads.messages.create( + "thread_abc123", + { role: "user", content: "How does AI work? Explain it in simple terms." } + ); + + console.log(threadMessages); + } + + main(); + response: | + { + "id": "msg_abc123", + "object": "thread.message", + "created_at": 1699017614, + "thread_id": "thread_abc123", + "role": "user", + "content": [ + { + "type": "text", + "text": { + "value": "How does AI work? Explain it in simple terms.", + "annotations": [] + } + } + ], + "file_ids": [], + "assistant_id": null, + "run_id": null, + "metadata": {} + } + + /threads/{thread_id}/messages/{message_id}: + get: + operationId: getMessage + tags: + - Assistants + summary: Retrieve a message. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the [thread](/docs/api-reference/threads) to which this message belongs. + - in: path + name: message_id + required: true + schema: + type: string + description: The ID of the message to retrieve. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/MessageObject" + x-oaiMeta: + name: Retrieve message + group: threads + beta: true + returns: The [message](/docs/api-reference/threads/messages/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/messages/msg_abc123 \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v1" + python: | + from openai import OpenAI + client = OpenAI() + + message = client.beta.threads.messages.retrieve( + message_id="msg_abc123", + thread_id="thread_abc123", + ) + print(message) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const message = await openai.beta.threads.messages.retrieve( + "thread_abc123", + "msg_abc123" + ); + + console.log(message); + } + + main(); + response: | + { + "id": "msg_abc123", + "object": "thread.message", + "created_at": 1699017614, + "thread_id": "thread_abc123", + "role": "user", + "content": [ + { + "type": "text", + "text": { + "value": "How does AI work? Explain it in simple terms.", + "annotations": [] + } + } + ], + "file_ids": [], + "assistant_id": null, + "run_id": null, + "metadata": {} + } + post: + operationId: modifyMessage + tags: + - Assistants + summary: Modifies a message. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the thread to which this message belongs. + - in: path + name: message_id + required: true + schema: + type: string + description: The ID of the message to modify. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ModifyMessageRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/MessageObject" + x-oaiMeta: + name: Modify message + group: threads + beta: true + returns: The modified [message](/docs/api-reference/threads/messages/object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/messages/msg_abc123 \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v1" \ + -d '{ + "metadata": { + "modified": "true", + "user": "abc123" + } + }' + python: | + from openai import OpenAI + client = OpenAI() + + message = client.beta.threads.messages.update( + message_id="msg_abc12", + thread_id="thread_abc123", + metadata={ + "modified": "true", + "user": "abc123", + }, + ) + print(message) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const message = await openai.beta.threads.messages.update( + "thread_abc123", + "msg_abc123", + { + metadata: { + modified: "true", + user: "abc123", + }, + } + }' + response: | + { + "id": "msg_abc123", + "object": "thread.message", + "created_at": 1699017614, + "thread_id": "thread_abc123", + "role": "user", + "content": [ + { + "type": "text", + "text": { + "value": "How does AI work? Explain it in simple terms.", + "annotations": [] + } + } + ], + "file_ids": [], + "assistant_id": null, + "run_id": null, + "metadata": { + "modified": "true", + "user": "abc123" + } + } + + /threads/runs: + post: + operationId: createThreadAndRun + tags: + - Assistants + summary: Create a thread and run it in one request. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateThreadAndRunRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/RunObject" + x-oaiMeta: + name: Create thread and run + group: threads + beta: true + returns: A [run](/docs/api-reference/runs/object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/runs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" \ + -d '{ + "assistant_id": "asst_abc123", + "thread": { + "messages": [ + {"role": "user", "content": "Explain deep learning to a 5 year old."} + ] + } + }' + python: | + from openai import OpenAI + client = OpenAI() + + run = client.beta.threads.create_and_run( + assistant_id="asst_abc123", + thread={ + "messages": [ + {"role": "user", "content": "Explain deep learning to a 5 year old."} + ] + } + ) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const run = await openai.beta.threads.createAndRun({ + assistant_id: "asst_abc123", + thread: { + messages: [ + { role: "user", content: "Explain deep learning to a 5 year old." }, + ], + }, + }); + + console.log(run); + } + + main(); + response: | + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1699076792, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "queued", + "started_at": null, + "expires_at": 1699077392, + "cancelled_at": null, + "failed_at": null, + "completed_at": null, + "last_error": null, + "model": "gpt-4", + "instructions": "You are a helpful assistant.", + "tools": [], + "file_ids": [], + "metadata": {}, + "usage": null + } + + /threads/{thread_id}/runs: + get: + operationId: listRuns + tags: + - Assistants + summary: Returns a list of runs belonging to a thread. + parameters: + - name: thread_id + in: path + required: true + schema: + type: string + description: The ID of the thread the run belongs to. + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: order + in: query + description: *pagination_order_param_description + schema: + type: string + default: desc + enum: ["asc", "desc"] + - name: after + in: query + description: *pagination_after_param_description + schema: + type: string + - name: before + in: query + description: *pagination_before_param_description + schema: + type: string + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListRunsResponse" + x-oaiMeta: + name: List runs + group: threads + beta: true + returns: A list of [run](/docs/api-reference/runs/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/runs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" + python: | + from openai import OpenAI + client = OpenAI() + + runs = client.beta.threads.runs.list( + "thread_abc123" + ) + print(runs) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const runs = await openai.beta.threads.runs.list( + "thread_abc123" + ); + + console.log(runs); + } + + main(); + response: | + { + "object": "list", + "data": [ + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1699075072, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "completed", + "started_at": 1699075072, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699075073, + "last_error": null, + "model": "gpt-3.5-turbo", + "instructions": null, + "tools": [ + { + "type": "code_interpreter" + } + ], + "file_ids": [ + "file-abc123", + "file-abc456" + ], + "metadata": {}, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + } + }, + { + "id": "run_abc456", + "object": "thread.run", + "created_at": 1699063290, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "completed", + "started_at": 1699063290, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699063291, + "last_error": null, + "model": "gpt-3.5-turbo", + "instructions": null, + "tools": [ + { + "type": "code_interpreter" + } + ], + "file_ids": [ + "file-abc123", + "file-abc456" + ], + "metadata": {}, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + } + } + ], + "first_id": "run_abc123", + "last_id": "run_abc456", + "has_more": false + } + post: + operationId: createRun + tags: + - Assistants + summary: Create a run. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the thread to run. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateRunRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/RunObject" + x-oaiMeta: + name: Create run + group: threads + beta: true + returns: A [run](/docs/api-reference/runs/object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/runs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" \ + -d '{ + "assistant_id": "asst_abc123" + }' + python: | + from openai import OpenAI + client = OpenAI() + + run = client.beta.threads.runs.create( + thread_id="thread_abc123", + assistant_id="asst_abc123" + ) + print(run) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const run = await openai.beta.threads.runs.create( + "thread_abc123", + { assistant_id: "asst_abc123" } + ); + + console.log(run); + } + + main(); + response: &run_object_example | + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1699063290, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "queued", + "started_at": 1699063290, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699063291, + "last_error": null, + "model": "gpt-4", + "instructions": null, + "tools": [ + { + "type": "code_interpreter" + } + ], + "file_ids": [ + "file-abc123", + "file-abc456" + ], + "metadata": {}, + "usage": null + } + + /threads/{thread_id}/runs/{run_id}: + get: + operationId: getRun + tags: + - Assistants + summary: Retrieves a run. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the [thread](/docs/api-reference/threads) that was run. + - in: path + name: run_id + required: true + schema: + type: string + description: The ID of the run to retrieve. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/RunObject" + x-oaiMeta: + name: Retrieve run + group: threads + beta: true + returns: The [run](/docs/api-reference/runs/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v1" + python: | + from openai import OpenAI + client = OpenAI() + + run = client.beta.threads.runs.retrieve( + thread_id="thread_abc123", + run_id="run_abc123" + ) + print(run) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const run = await openai.beta.threads.runs.retrieve( + "thread_abc123", + "run_abc123" + ); + + console.log(run); + } + + main(); + response: | + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1699075072, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "completed", + "started_at": 1699075072, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699075073, + "last_error": null, + "model": "gpt-3.5-turbo", + "instructions": null, + "tools": [ + { + "type": "code_interpreter" + } + ], + "file_ids": [ + "file-abc123", + "file-abc456" + ], + "metadata": {}, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + } + } + post: + operationId: modifyRun + tags: + - Assistants + summary: Modifies a run. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the [thread](/docs/api-reference/threads) that was run. + - in: path + name: run_id + required: true + schema: + type: string + description: The ID of the run to modify. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ModifyRunRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/RunObject" + x-oaiMeta: + name: Modify run + group: threads + beta: true + returns: The modified [run](/docs/api-reference/runs/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" \ + -d '{ + "metadata": { + "user_id": "user_abc123" + } + }' + python: | + from openai import OpenAI + client = OpenAI() + + run = client.beta.threads.runs.update( + thread_id="thread_abc123", + run_id="run_abc123", + metadata={"user_id": "user_abc123"}, + ) + print(run) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const run = await openai.beta.threads.runs.update( + "thread_abc123", + "run_abc123", + { + metadata: { + user_id: "user_abc123", + }, + } + ); + + console.log(run); + } + + main(); + response: | + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1699075072, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "completed", + "started_at": 1699075072, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699075073, + "last_error": null, + "model": "gpt-3.5-turbo", + "instructions": null, + "tools": [ + { + "type": "code_interpreter" + } + ], + "file_ids": [ + "file-abc123", + "file-abc456" + ], + "metadata": { + "user_id": "user_abc123" + }, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + } + } + + /threads/{thread_id}/runs/{run_id}/submit_tool_outputs: + post: + operationId: submitToolOuputsToRun + tags: + - Assistants + summary: | + When a run has the `status: "requires_action"` and `required_action.type` is `submit_tool_outputs`, this endpoint can be used to submit the outputs from the tool calls once they're all completed. All outputs must be submitted in a single request. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the [thread](/docs/api-reference/threads) to which this run belongs. + - in: path + name: run_id + required: true + schema: + type: string + description: The ID of the run that requires the tool output submission. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/SubmitToolOutputsRunRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/RunObject" + x-oaiMeta: + name: Submit tool outputs to run + group: threads + beta: true + returns: The modified [run](/docs/api-reference/runs/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/submit_tool_outputs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" \ + -d '{ + "tool_outputs": [ + { + "tool_call_id": "call_abc123", + "output": "28C" + } + ] + }' + python: | + from openai import OpenAI + client = OpenAI() + + run = client.beta.threads.runs.submit_tool_outputs( + thread_id="thread_abc123", + run_id="run_abc123", + tool_outputs=[ + { + "tool_call_id": "call_abc123", + "output": "28C" + } + ] + ) + print(run) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const run = await openai.beta.threads.runs.submitToolOutputs( + "thread_abc123", + "run_abc123", + { + tool_outputs: [ + { + tool_call_id: "call_abc123", + output: "28C", + }, + ], + } + ); + + console.log(run); + } + + main(); + response: | + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1699075592, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "queued", + "started_at": 1699075592, + "expires_at": 1699076192, + "cancelled_at": null, + "failed_at": null, + "completed_at": null, + "last_error": null, + "model": "gpt-4", + "instructions": "You tell the weather.", + "tools": [ + { + "type": "function", + "function": { + "name": "get_weather", + "description": "Determine weather in my location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state e.g. San Francisco, CA" + }, + "unit": { + "type": "string", + "enum": [ + "c", + "f" + ] + } + }, + "required": [ + "location" + ] + } + } + } + ], + "file_ids": [], + "metadata": {}, + "usage": null + } + + /threads/{thread_id}/runs/{run_id}/cancel: + post: + operationId: cancelRun + tags: + - Assistants + summary: Cancels a run that is `in_progress`. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the thread to which this run belongs. + - in: path + name: run_id + required: true + schema: + type: string + description: The ID of the run to cancel. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/RunObject" + x-oaiMeta: + name: Cancel a run + group: threads + beta: true + returns: The modified [run](/docs/api-reference/runs/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/cancel \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v1" \ + -X POST + python: | + from openai import OpenAI + client = OpenAI() + + run = client.beta.threads.runs.cancel( + thread_id="thread_abc123", + run_id="run_abc123" + ) + print(run) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const run = await openai.beta.threads.runs.cancel( + "thread_abc123", + "run_abc123" + ); + + console.log(run); + } + + main(); + response: | + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1699076126, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "cancelling", + "started_at": 1699076126, + "expires_at": 1699076726, + "cancelled_at": null, + "failed_at": null, + "completed_at": null, + "last_error": null, + "model": "gpt-4", + "instructions": "You summarize books.", + "tools": [ + { + "type": "retrieval" + } + ], + "file_ids": [], + "metadata": {}, + "usage": null + } + + /threads/{thread_id}/runs/{run_id}/steps: + get: + operationId: listRunSteps + tags: + - Assistants + summary: Returns a list of run steps belonging to a run. + parameters: + - name: thread_id + in: path + required: true + schema: + type: string + description: The ID of the thread the run and run steps belong to. + - name: run_id + in: path + required: true + schema: + type: string + description: The ID of the run the run steps belong to. + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: order + in: query + description: *pagination_order_param_description + schema: + type: string + default: desc + enum: ["asc", "desc"] + - name: after + in: query + description: *pagination_after_param_description + schema: + type: string + - name: before + in: query + description: *pagination_before_param_description + schema: + type: string + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListRunStepsResponse" + x-oaiMeta: + name: List run steps + group: threads + beta: true + returns: A list of [run step](/docs/api-reference/runs/step-object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/steps \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" + python: | + from openai import OpenAI + client = OpenAI() + + run_steps = client.beta.threads.runs.steps.list( + thread_id="thread_abc123", + run_id="run_abc123" + ) + print(run_steps) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const runStep = await openai.beta.threads.runs.steps.list( + "thread_abc123", + "run_abc123" + ); + console.log(runStep); + } + + main(); + response: | + { + "object": "list", + "data": [ + { + "id": "step_abc123", + "object": "thread.run.step", + "created_at": 1699063291, + "run_id": "run_abc123", + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "type": "message_creation", + "status": "completed", + "cancelled_at": null, + "completed_at": 1699063291, + "expired_at": null, + "failed_at": null, + "last_error": null, + "step_details": { + "type": "message_creation", + "message_creation": { + "message_id": "msg_abc123" + } + }, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + } + } + ], + "first_id": "step_abc123", + "last_id": "step_abc456", + "has_more": false + } + + /threads/{thread_id}/runs/{run_id}/steps/{step_id}: + get: + operationId: getRunStep + tags: + - Assistants + summary: Retrieves a run step. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the thread to which the run and run step belongs. + - in: path + name: run_id + required: true + schema: + type: string + description: The ID of the run to which the run step belongs. + - in: path + name: step_id + required: true + schema: + type: string + description: The ID of the run step to retrieve. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/RunStepObject" + x-oaiMeta: + name: Retrieve run step + group: threads + beta: true + returns: The [run step](/docs/api-reference/runs/step-object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/steps/step_abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" + python: | + from openai import OpenAI + client = OpenAI() + + run_step = client.beta.threads.runs.steps.retrieve( + thread_id="thread_abc123", + run_id="run_abc123", + step_id="step_abc123" + ) + print(run_step) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const runStep = await openai.beta.threads.runs.steps.retrieve( + "thread_abc123", + "run_abc123", + "step_abc123" + ); + console.log(runStep); + } + + main(); + response: &run_step_object_example | + { + "id": "step_abc123", + "object": "thread.run.step", + "created_at": 1699063291, + "run_id": "run_abc123", + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "type": "message_creation", + "status": "completed", + "cancelled_at": null, + "completed_at": 1699063291, + "expired_at": null, + "failed_at": null, + "last_error": null, + "step_details": { + "type": "message_creation", + "message_creation": { + "message_id": "msg_abc123" + } + }, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + } + } + + /assistants/{assistant_id}/files: + get: + operationId: listAssistantFiles + tags: + - Assistants + summary: Returns a list of assistant files. + parameters: + - name: assistant_id + in: path + description: The ID of the assistant the file belongs to. + required: true + schema: + type: string + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: order + in: query + description: *pagination_order_param_description + schema: + type: string + default: desc + enum: ["asc", "desc"] + - name: after + in: query + description: *pagination_after_param_description + schema: + type: string + - name: before + in: query + description: *pagination_before_param_description + schema: + type: string + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListAssistantFilesResponse" + x-oaiMeta: + name: List assistant files + group: assistants + beta: true + returns: A list of [assistant file](/docs/api-reference/assistants/file-object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/assistants/asst_abc123/files \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" + python: | + from openai import OpenAI + client = OpenAI() + + assistant_files = client.beta.assistants.files.list( + assistant_id="asst_abc123" + ) + print(assistant_files) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const assistantFiles = await openai.beta.assistants.files.list( + "asst_abc123" + ); + console.log(assistantFiles); + } + + main(); + response: | + { + "object": "list", + "data": [ + { + "id": "file-abc123", + "object": "assistant.file", + "created_at": 1699060412, + "assistant_id": "asst_abc123" + }, + { + "id": "file-abc456", + "object": "assistant.file", + "created_at": 1699060412, + "assistant_id": "asst_abc123" + } + ], + "first_id": "file-abc123", + "last_id": "file-abc456", + "has_more": false + } + post: + operationId: createAssistantFile + tags: + - Assistants + summary: Create an assistant file by attaching a [File](/docs/api-reference/files) to an [assistant](/docs/api-reference/assistants). + parameters: + - in: path + name: assistant_id + required: true + schema: + type: string + example: file-abc123 + description: | + The ID of the assistant for which to create a File. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateAssistantFileRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/AssistantFileObject" + x-oaiMeta: + name: Create assistant file + group: assistants + beta: true + returns: An [assistant file](/docs/api-reference/assistants/file-object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/assistants/asst_abc123/files \ + -H 'Authorization: Bearer $OPENAI_API_KEY"' \ + -H 'Content-Type: application/json' \ + -H 'OpenAI-Beta: assistants=v1' \ + -d '{ + "file_id": "file-abc123" + }' + python: | + from openai import OpenAI + client = OpenAI() + + assistant_file = client.beta.assistants.files.create( + assistant_id="asst_abc123", + file_id="file-abc123" + ) + print(assistant_file) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const myAssistantFile = await openai.beta.assistants.files.create( + "asst_abc123", + { + file_id: "file-abc123" + } + ); + console.log(myAssistantFile); + } + + main(); + response: &assistant_file_object | + { + "id": "file-abc123", + "object": "assistant.file", + "created_at": 1699055364, + "assistant_id": "asst_abc123" + } + + /assistants/{assistant_id}/files/{file_id}: + get: + operationId: getAssistantFile + tags: + - Assistants + summary: Retrieves an AssistantFile. + parameters: + - in: path + name: assistant_id + required: true + schema: + type: string + description: The ID of the assistant who the file belongs to. + - in: path + name: file_id + required: true + schema: + type: string + description: The ID of the file we're getting. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/AssistantFileObject" + x-oaiMeta: + name: Retrieve assistant file + group: assistants + beta: true + returns: The [assistant file](/docs/api-reference/assistants/file-object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/assistants/asst_abc123/files/file-abc123 \ + -H 'Authorization: Bearer $OPENAI_API_KEY"' \ + -H 'Content-Type: application/json' \ + -H 'OpenAI-Beta: assistants=v1' + python: | + from openai import OpenAI + client = OpenAI() + + assistant_file = client.beta.assistants.files.retrieve( + assistant_id="asst_abc123", + file_id="file-abc123" + ) + print(assistant_file) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const myAssistantFile = await openai.beta.assistants.files.retrieve( + "asst_abc123", + "file-abc123" + ); + console.log(myAssistantFile); + } + + main(); + response: *assistant_file_object + delete: + operationId: deleteAssistantFile + tags: + - Assistants + summary: Delete an assistant file. + parameters: + - in: path + name: assistant_id + required: true + schema: + type: string + description: The ID of the assistant that the file belongs to. + - in: path + name: file_id + required: true + schema: + type: string + description: The ID of the file to delete. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/DeleteAssistantFileResponse" + x-oaiMeta: + name: Delete assistant file + group: assistants + beta: true + returns: Deletion status + examples: + request: + curl: | + curl https://api.openai.com/v1/assistants/asst_abc123/files/file-abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" \ + -X DELETE + python: | + from openai import OpenAI + client = OpenAI() + + deleted_assistant_file = client.beta.assistants.files.delete( + assistant_id="asst_abc123", + file_id="file-abc123" + ) + print(deleted_assistant_file) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const deletedAssistantFile = await openai.beta.assistants.files.del( + "asst_abc123", + "file-abc123" + ); + console.log(deletedAssistantFile); + } + + main(); + response: | + { + id: "file-abc123", + object: "assistant.file.deleted", + deleted: true + } + + /threads/{thread_id}/messages/{message_id}/files: + get: + operationId: listMessageFiles + tags: + - Assistants + summary: Returns a list of message files. + parameters: + - name: thread_id + in: path + description: The ID of the thread that the message and files belong to. + required: true + schema: + type: string + - name: message_id + in: path + description: The ID of the message that the files belongs to. + required: true + schema: + type: string + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: order + in: query + description: *pagination_order_param_description + schema: + type: string + default: desc + enum: ["asc", "desc"] + - name: after + in: query + description: *pagination_after_param_description + schema: + type: string + - name: before + in: query + description: *pagination_before_param_description + schema: + type: string + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListMessageFilesResponse" + x-oaiMeta: + name: List message files + group: threads + beta: true + returns: A list of [message file](/docs/api-reference/messages/file-object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/messages/msg_abc123/files \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" + python: | + from openai import OpenAI + client = OpenAI() + + message_files = client.beta.threads.messages.files.list( + thread_id="thread_abc123", + message_id="msg_abc123" + ) + print(message_files) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const messageFiles = await openai.beta.threads.messages.files.list( + "thread_abc123", + "msg_abc123" + ); + console.log(messageFiles); + } + + main(); + response: | + { + "object": "list", + "data": [ + { + "id": "file-abc123", + "object": "thread.message.file", + "created_at": 1699061776, + "message_id": "msg_abc123" + }, + { + "id": "file-abc123", + "object": "thread.message.file", + "created_at": 1699061776, + "message_id": "msg_abc123" + } + ], + "first_id": "file-abc123", + "last_id": "file-abc123", + "has_more": false + } + + /threads/{thread_id}/messages/{message_id}/files/{file_id}: + get: + operationId: getMessageFile + tags: + - Assistants + summary: Retrieves a message file. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + example: thread_abc123 + description: The ID of the thread to which the message and File belong. + - in: path + name: message_id + required: true + schema: + type: string + example: msg_abc123 + description: The ID of the message the file belongs to. + - in: path + name: file_id + required: true + schema: + type: string + example: file-abc123 + description: The ID of the file being retrieved. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/MessageFileObject" + x-oaiMeta: + name: Retrieve message file + group: threads + beta: true + returns: The [message file](/docs/api-reference/messages/file-object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/messages/msg_abc123/files/file-abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" + python: | + from openai import OpenAI + client = OpenAI() + + message_files = client.beta.threads.messages.files.retrieve( + thread_id="thread_abc123", + message_id="msg_abc123", + file_id="file-abc123" + ) + print(message_files) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const messageFile = await openai.beta.threads.messages.files.retrieve( + "thread_abc123", + "msg_abc123", + "file-abc123" + ); + console.log(messageFile); + } + + main(); + response: | + { + "id": "file-abc123", + "object": "thread.message.file", + "created_at": 1699061776, + "message_id": "msg_abc123" + } + +components: + securitySchemes: + ApiKeyAuth: + type: http + scheme: "bearer" + + schemas: + Error: + type: object + properties: + code: + type: string + nullable: true + message: + type: string + nullable: false + param: + type: string + nullable: true + type: + type: string + nullable: false + required: + - type + - message + - param + - code + ErrorResponse: + type: object + properties: + error: + $ref: "#/components/schemas/Error" + required: + - error + + ListModelsResponse: + type: object + properties: + object: + type: string + enum: [list] + data: + type: array + items: + $ref: "#/components/schemas/Model" + required: + - object + - data + DeleteModelResponse: + type: object + properties: + id: + type: string + deleted: + type: boolean + object: + type: string + required: + - id + - object + - deleted + + CreateCompletionRequest: + type: object + properties: + model: + description: &model_description | + ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + anyOf: + - type: string + - type: string + enum: ["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"] + x-oaiTypeLabel: string + prompt: + description: &completions_prompt_description | + The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. + + Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. + default: "<|endoftext|>" + nullable: true + oneOf: + - type: string + default: "" + example: "This is a test." + - type: array + items: + type: string + default: "" + example: "This is a test." + - type: array + minItems: 1 + items: + type: integer + example: "[1212, 318, 257, 1332, 13]" + - type: array + minItems: 1 + items: + type: array + minItems: 1 + items: + type: integer + example: "[[1212, 318, 257, 1332, 13]]" + best_of: + type: integer + default: 1 + minimum: 0 + maximum: 20 + nullable: true + description: &completions_best_of_description | + Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. + + When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. + + **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + echo: + type: boolean + default: false + nullable: true + description: &completions_echo_description > + Echo back the prompt in addition to the completion + frequency_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: &completions_frequency_penalty_description | + Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + + [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) + logit_bias: &completions_logit_bias + type: object + x-oaiTypeLabel: map + default: null + nullable: true + additionalProperties: + type: integer + description: &completions_logit_bias_description | + Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + + As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. + logprobs: &completions_logprobs_configuration + type: integer + minimum: 0 + maximum: 5 + default: null + nullable: true + description: &completions_logprobs_description | + Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. + + The maximum value for `logprobs` is 5. + max_tokens: + type: integer + minimum: 0 + default: 16 + example: 16 + nullable: true + description: &completions_max_tokens_description | + The maximum number of [tokens](/tokenizer) that can be generated in the completion. + + The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + n: + type: integer + minimum: 1 + maximum: 128 + default: 1 + example: 1 + nullable: true + description: &completions_completions_description | + How many completions to generate for each prompt. + + **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + presence_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: &completions_presence_penalty_description | + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + + [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) + seed: &completions_seed_param + type: integer + minimum: -9223372036854775808 + maximum: 9223372036854775807 + nullable: true + description: | + If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. + + Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + stop: + description: &completions_stop_description > + Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + default: null + nullable: true + oneOf: + - type: string + default: <|endoftext|> + example: "\n" + nullable: true + - type: array + minItems: 1 + maxItems: 4 + items: + type: string + example: '["\n"]' + stream: + description: > + Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + type: boolean + nullable: true + default: false + suffix: + description: The suffix that comes after a completion of inserted text. + default: null + nullable: true + type: string + example: "test." + temperature: + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: &completions_temperature_description | + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + + We generally recommend altering this or `top_p` but not both. + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: &completions_top_p_description | + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + user: &end_user_param_configuration + type: string + example: user-1234 + description: | + A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + required: + - model + - prompt + + CreateCompletionResponse: + type: object + description: | + Represents a completion response from the API. Note: both the streamed and non-streamed response objects share the same shape (unlike the chat endpoint). + properties: + id: + type: string + description: A unique identifier for the completion. + choices: + type: array + description: The list of completion choices the model generated for the input prompt. + items: + type: object + required: + - finish_reason + - index + - logprobs + - text + properties: + finish_reason: + type: string + description: &completion_finish_reason_description | + The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, + `length` if the maximum number of tokens specified in the request was reached, + or `content_filter` if content was omitted due to a flag from our content filters. + enum: ["stop", "length", "content_filter"] + index: + type: integer + logprobs: + type: object + nullable: true + properties: + text_offset: + type: array + items: + type: integer + token_logprobs: + type: array + items: + type: number + tokens: + type: array + items: + type: string + top_logprobs: + type: array + items: + type: object + additionalProperties: + type: number + text: + type: string + created: + type: integer + description: The Unix timestamp (in seconds) of when the completion was created. + model: + type: string + description: The model used for completion. + system_fingerprint: + type: string + description: | + This fingerprint represents the backend configuration that the model runs with. + + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + object: + type: string + description: The object type, which is always "text_completion" + enum: [text_completion] + usage: + $ref: "#/components/schemas/CompletionUsage" + required: + - id + - object + - created + - model + - choices + x-oaiMeta: + name: The completion object + legacy: true + example: | + { + "id": "cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7", + "object": "text_completion", + "created": 1589478378, + "model": "gpt-3.5-turbo", + "choices": [ + { + "text": "\n\nThis is indeed a test", + "index": 0, + "logprobs": null, + "finish_reason": "length" + } + ], + "usage": { + "prompt_tokens": 5, + "completion_tokens": 7, + "total_tokens": 12 + } + } + + ChatCompletionRequestMessageContentPart: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartImage" + x-oaiExpandable: true + + ChatCompletionRequestMessageContentPartImage: + type: object + title: Image content part + properties: + type: + type: string + enum: ["image_url"] + description: The type of the content part. + image_url: + type: object + properties: + url: + type: string + description: Either a URL of the image or the base64 encoded image data. + format: uri + detail: + type: string + description: Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding). + enum: ["auto", "low", "high"] + default: "auto" + required: + - url + required: + - type + - image_url + + ChatCompletionRequestMessageContentPartText: + type: object + title: Text content part + properties: + type: + type: string + enum: ["text"] + description: The type of the content part. + text: + type: string + description: The text content. + required: + - type + - text + + ChatCompletionRequestMessage: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestSystemMessage" + - $ref: "#/components/schemas/ChatCompletionRequestUserMessage" + - $ref: "#/components/schemas/ChatCompletionRequestAssistantMessage" + - $ref: "#/components/schemas/ChatCompletionRequestToolMessage" + - $ref: "#/components/schemas/ChatCompletionRequestFunctionMessage" + x-oaiExpandable: true + + ChatCompletionRequestSystemMessage: + type: object + title: System message + properties: + content: + description: The contents of the system message. + type: string + role: + type: string + enum: ["system"] + description: The role of the messages author, in this case `system`. + name: + type: string + description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. + required: + - content + - role + + ChatCompletionRequestUserMessage: + type: object + title: User message + properties: + content: + description: | + The contents of the user message. + oneOf: + - type: string + description: The text contents of the message. + title: Text content + - type: array + description: An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4-visual-preview` model. + title: Array of content parts + items: + $ref: "#/components/schemas/ChatCompletionRequestMessageContentPart" + minItems: 1 + x-oaiExpandable: true + role: + type: string + enum: ["user"] + description: The role of the messages author, in this case `user`. + name: + type: string + description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. + required: + - content + - role + + ChatCompletionRequestAssistantMessage: + type: object + title: Assistant message + properties: + content: + nullable: true + type: string + description: | + The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. + role: + type: string + enum: ["assistant"] + description: The role of the messages author, in this case `assistant`. + name: + type: string + description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. + tool_calls: + $ref: "#/components/schemas/ChatCompletionMessageToolCalls" + function_call: + type: object + deprecated: true + description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." + properties: + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + name: + type: string + description: The name of the function to call. + required: + - arguments + - name + required: + - role + + ChatCompletionRequestToolMessage: + type: object + title: Tool message + properties: + role: + type: string + enum: ["tool"] + description: The role of the messages author, in this case `tool`. + content: + type: string + description: The contents of the tool message. + tool_call_id: + type: string + description: Tool call that this message is responding to. + required: + - role + - content + - tool_call_id + + ChatCompletionRequestFunctionMessage: + type: object + title: Function message + deprecated: true + properties: + role: + type: string + enum: ["function"] + description: The role of the messages author, in this case `function`. + content: + nullable: true + type: string + description: The contents of the function message. + name: + type: string + description: The name of the function to call. + required: + - role + - content + - name + + FunctionParameters: + type: object + description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. \n\nOmitting `parameters` defines a function with an empty parameter list." + additionalProperties: true + + ChatCompletionFunctions: + type: object + deprecated: true + properties: + description: + type: string + description: A description of what the function does, used by the model to choose when and how to call the function. + name: + type: string + description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + parameters: + $ref: "#/components/schemas/FunctionParameters" + required: + - name + + ChatCompletionFunctionCallOption: + type: object + description: > + Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. + properties: + name: + type: string + description: The name of the function to call. + required: + - name + + ChatCompletionTool: + type: object + properties: + type: + type: string + enum: ["function"] + description: The type of the tool. Currently, only `function` is supported. + function: + $ref: "#/components/schemas/FunctionObject" + required: + - type + - function + + FunctionObject: + type: object + properties: + description: + type: string + description: A description of what the function does, used by the model to choose when and how to call the function. + name: + type: string + description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + parameters: + $ref: "#/components/schemas/FunctionParameters" + required: + - name + + ChatCompletionToolChoiceOption: + description: | + Controls which (if any) function is called by the model. + `none` means the model will not call a function and instead generates a message. + `auto` means the model can pick between generating a message or calling a function. + Specifying a particular function via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that function. + + `none` is the default when no functions are present. `auto` is the default if functions are present. + oneOf: + - type: string + description: > + `none` means the model will not call a function and instead generates a message. + `auto` means the model can pick between generating a message or calling a function. + enum: [none, auto] + - $ref: "#/components/schemas/ChatCompletionNamedToolChoice" + x-oaiExpandable: true + + ChatCompletionNamedToolChoice: + type: object + description: Specifies a tool the model should use. Use to force the model to call a specific function. + properties: + type: + type: string + enum: ["function"] + description: The type of the tool. Currently, only `function` is supported. + function: + type: object + properties: + name: + type: string + description: The name of the function to call. + required: + - name + required: + - type + - function + + ChatCompletionMessageToolCalls: + type: array + description: The tool calls generated by the model, such as function calls. + items: + $ref: "#/components/schemas/ChatCompletionMessageToolCall" + + ChatCompletionMessageToolCall: + type: object + properties: + # TODO: index included when streaming + id: + type: string + description: The ID of the tool call. + type: + type: string + enum: ["function"] + description: The type of the tool. Currently, only `function` is supported. + function: + type: object + description: The function that the model called. + properties: + name: + type: string + description: The name of the function to call. + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + required: + - name + - arguments + required: + - id + - type + - function + + ChatCompletionMessageToolCallChunk: + type: object + properties: + index: + type: integer + id: + type: string + description: The ID of the tool call. + type: + type: string + enum: ["function"] + description: The type of the tool. Currently, only `function` is supported. + function: + type: object + properties: + name: + type: string + description: The name of the function to call. + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + required: + - index + + # Note, this isn't referenced anywhere, but is kept as a convenience to record all possible roles in one place. + ChatCompletionRole: + type: string + description: The role of the author of a message + enum: + - system + - user + - assistant + - tool + - function + + ChatCompletionResponseMessage: + type: object + description: A chat completion message generated by the model. + properties: + content: + type: string + description: The contents of the message. + nullable: true + tool_calls: + $ref: "#/components/schemas/ChatCompletionMessageToolCalls" + role: + type: string + enum: ["assistant"] + description: The role of the author of this message. + function_call: + type: object + deprecated: true + description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." + properties: + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + name: + type: string + description: The name of the function to call. + required: + - name + - arguments + required: + - role + - content + + ChatCompletionStreamResponseDelta: + type: object + description: A chat completion delta generated by streamed model responses. + properties: + content: + type: string + description: The contents of the chunk message. + nullable: true + function_call: + deprecated: true + type: object + description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." + properties: + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + name: + type: string + description: The name of the function to call. + tool_calls: + type: array + items: + $ref: "#/components/schemas/ChatCompletionMessageToolCallChunk" + role: + type: string + enum: ["system", "user", "assistant", "tool"] + description: The role of the author of this message. + + CreateChatCompletionRequest: + type: object + properties: + messages: + description: A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + type: array + minItems: 1 + items: + $ref: "#/components/schemas/ChatCompletionRequestMessage" + model: + description: ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. + example: "gpt-3.5-turbo" + anyOf: + - type: string + - type: string + enum: + [ + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0301", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-16k-0613", + ] + x-oaiTypeLabel: string + frequency_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: *completions_frequency_penalty_description + logit_bias: + type: object + x-oaiTypeLabel: map + default: null + nullable: true + additionalProperties: + type: integer + description: | + Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + logprobs: + description: Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. This option is currently not available on the `gpt-4-vision-preview` model. + type: boolean + default: false + nullable: true + top_logprobs: + description: An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + type: integer + minimum: 0 + maximum: 5 + nullable: true + max_tokens: + description: | + The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. + + The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + type: integer + nullable: true + n: + type: integer + minimum: 1 + maximum: 128 + default: 1 + example: 1 + nullable: true + description: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + presence_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: *completions_presence_penalty_description + response_format: + type: object + description: | + An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + properties: + type: + type: string + enum: ["text", "json_object"] + example: "json_object" + default: "text" + description: Must be one of `text` or `json_object`. + seed: + type: integer + minimum: -9223372036854775808 + maximum: 9223372036854775807 + nullable: true + description: | + This feature is in Beta. + If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. + Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + x-oaiMeta: + beta: true + stop: + description: | + Up to 4 sequences where the API will stop generating further tokens. + default: null + oneOf: + - type: string + nullable: true + - type: array + minItems: 1 + maxItems: 4 + items: + type: string + stream: + description: > + If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + type: boolean + nullable: true + default: false + temperature: + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: *completions_temperature_description + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: *completions_top_p_description + tools: + type: array + description: > + A list of tools the model may call. Currently, only functions are supported as a tool. + Use this to provide a list of functions the model may generate JSON inputs for. + items: + $ref: "#/components/schemas/ChatCompletionTool" + tool_choice: + $ref: "#/components/schemas/ChatCompletionToolChoiceOption" + user: *end_user_param_configuration + function_call: + deprecated: true + description: | + Deprecated in favor of `tool_choice`. + + Controls which (if any) function is called by the model. + `none` means the model will not call a function and instead generates a message. + `auto` means the model can pick between generating a message or calling a function. + Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. + + `none` is the default when no functions are present. `auto` is the default if functions are present. + oneOf: + - type: string + description: > + `none` means the model will not call a function and instead generates a message. + `auto` means the model can pick between generating a message or calling a function. + enum: [none, auto] + - $ref: "#/components/schemas/ChatCompletionFunctionCallOption" + x-oaiExpandable: true + functions: + deprecated: true + description: | + Deprecated in favor of `tools`. + + A list of functions the model may generate JSON inputs for. + type: array + minItems: 1 + maxItems: 128 + items: + $ref: "#/components/schemas/ChatCompletionFunctions" + + required: + - model + - messages + + CreateChatCompletionResponse: + type: object + description: Represents a chat completion response returned by model, based on the provided input. + properties: + id: + type: string + description: A unique identifier for the chat completion. + choices: + type: array + description: A list of chat completion choices. Can be more than one if `n` is greater than 1. + items: + type: object + required: + - finish_reason + - index + - message + - logprobs + properties: + finish_reason: + type: string + description: &chat_completion_finish_reason_description | + The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, + `length` if the maximum number of tokens specified in the request was reached, + `content_filter` if content was omitted due to a flag from our content filters, + `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function. + enum: + [ + "stop", + "length", + "tool_calls", + "content_filter", + "function_call", + ] + index: + type: integer + description: The index of the choice in the list of choices. + message: + $ref: "#/components/schemas/ChatCompletionResponseMessage" + logprobs: &chat_completion_response_logprobs + description: Log probability information for the choice. + type: object + nullable: true + properties: + content: + description: A list of message content tokens with log probability information. + type: array + items: + $ref: "#/components/schemas/ChatCompletionTokenLogprob" + nullable: true + required: + - content + created: + type: integer + description: The Unix timestamp (in seconds) of when the chat completion was created. + model: + type: string + description: The model used for the chat completion. + system_fingerprint: + type: string + description: | + This fingerprint represents the backend configuration that the model runs with. + + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + object: + type: string + description: The object type, which is always `chat.completion`. + enum: [chat.completion] + usage: + $ref: "#/components/schemas/CompletionUsage" + required: + - choices + - created + - id + - model + - object + x-oaiMeta: + name: The chat completion object + group: chat + example: *chat_completion_example + + CreateChatCompletionFunctionResponse: + type: object + description: Represents a chat completion response returned by model, based on the provided input. + properties: + id: + type: string + description: A unique identifier for the chat completion. + choices: + type: array + description: A list of chat completion choices. Can be more than one if `n` is greater than 1. + items: + type: object + required: + - finish_reason + - index + - message + - logprobs + properties: + finish_reason: + type: string + description: + &chat_completion_function_finish_reason_description | + The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, or `function_call` if the model called a function. + enum: ["stop", "length", "function_call", "content_filter"] + index: + type: integer + description: The index of the choice in the list of choices. + message: + $ref: "#/components/schemas/ChatCompletionResponseMessage" + created: + type: integer + description: The Unix timestamp (in seconds) of when the chat completion was created. + model: + type: string + description: The model used for the chat completion. + system_fingerprint: + type: string + description: | + This fingerprint represents the backend configuration that the model runs with. + + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + object: + type: string + description: The object type, which is always `chat.completion`. + enum: [chat.completion] + usage: + $ref: "#/components/schemas/CompletionUsage" + required: + - choices + - created + - id + - model + - object + x-oaiMeta: + name: The chat completion object + group: chat + example: *chat_completion_function_example + + ChatCompletionTokenLogprob: + type: object + properties: + token: &chat_completion_response_logprobs_token + description: The token. + type: string + logprob: &chat_completion_response_logprobs_token_logprob + description: The log probability of this token. + type: number + bytes: &chat_completion_response_logprobs_bytes + description: A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. + type: array + items: + type: integer + nullable: true + top_logprobs: + description: List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned. + type: array + items: + type: object + properties: + token: *chat_completion_response_logprobs_token + logprob: *chat_completion_response_logprobs_token_logprob + bytes: *chat_completion_response_logprobs_bytes + required: + - token + - logprob + - bytes + required: + - token + - logprob + - bytes + - top_logprobs + + ListPaginatedFineTuningJobsResponse: + type: object + properties: + data: + type: array + items: + $ref: "#/components/schemas/FineTuningJob" + has_more: + type: boolean + object: + type: string + enum: [list] + required: + - object + - data + - has_more + + CreateChatCompletionStreamResponse: + type: object + description: Represents a streamed chunk of a chat completion response returned by model, based on the provided input. + properties: + id: + type: string + description: A unique identifier for the chat completion. Each chunk has the same ID. + choices: + type: array + description: A list of chat completion choices. Can be more than one if `n` is greater than 1. + items: + type: object + required: + - delta + - finish_reason + - index + properties: + delta: + $ref: "#/components/schemas/ChatCompletionStreamResponseDelta" + logprobs: *chat_completion_response_logprobs + finish_reason: + type: string + description: *chat_completion_finish_reason_description + enum: + [ + "stop", + "length", + "tool_calls", + "content_filter", + "function_call", + ] + nullable: true + index: + type: integer + description: The index of the choice in the list of choices. + created: + type: integer + description: The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. + model: + type: string + description: The model to generate the completion. + system_fingerprint: + type: string + description: | + This fingerprint represents the backend configuration that the model runs with. + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + object: + type: string + description: The object type, which is always `chat.completion.chunk`. + enum: [chat.completion.chunk] + required: + - choices + - created + - id + - model + - object + x-oaiMeta: + name: The chat completion chunk object + group: chat + example: *chat_completion_chunk_example + + CreateChatCompletionImageResponse: + type: object + description: Represents a streamed chunk of a chat completion response returned by model, based on the provided input. + x-oaiMeta: + name: The chat completion chunk object + group: chat + example: *chat_completion_image_example + + CreateImageRequest: + type: object + properties: + prompt: + description: A text description of the desired image(s). The maximum length is 1000 characters for `dall-e-2` and 4000 characters for `dall-e-3`. + type: string + example: "A cute baby sea otter" + model: + anyOf: + - type: string + - type: string + enum: ["dall-e-2", "dall-e-3"] + x-oaiTypeLabel: string + default: "dall-e-2" + example: "dall-e-3" + nullable: true + description: The model to use for image generation. + n: &images_n + type: integer + minimum: 1 + maximum: 10 + default: 1 + example: 1 + nullable: true + description: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. + quality: + type: string + enum: ["standard", "hd"] + default: "standard" + example: "standard" + description: The quality of the image that will be generated. `hd` creates images with finer details and greater consistency across the image. This param is only supported for `dall-e-3`. + response_format: &images_response_format + type: string + enum: ["url", "b64_json"] + default: "url" + example: "url" + nullable: true + description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. + size: &images_size + type: string + enum: ["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"] + default: "1024x1024" + example: "1024x1024" + nullable: true + description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. + style: + type: string + enum: ["vivid", "natural"] + default: "vivid" + example: "vivid" + nullable: true + description: The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images. This param is only supported for `dall-e-3`. + user: *end_user_param_configuration + required: + - prompt + + ImagesResponse: + properties: + created: + type: integer + data: + type: array + items: + $ref: "#/components/schemas/Image" + required: + - created + - data + + Image: + type: object + description: Represents the url or the content of an image generated by the OpenAI API. + properties: + b64_json: + type: string + description: The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. + url: + type: string + description: The URL of the generated image, if `response_format` is `url` (default). + revised_prompt: + type: string + description: The prompt that was used to generate the image, if there was any revision to the prompt. + x-oaiMeta: + name: The image object + example: | + { + "url": "...", + "revised_prompt": "..." + } + + CreateImageEditRequest: + type: object + properties: + image: + description: The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask. + type: string + format: binary + prompt: + description: A text description of the desired image(s). The maximum length is 1000 characters. + type: string + example: "A cute baby sea otter wearing a beret" + mask: + description: An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`. + type: string + format: binary + model: + anyOf: + - type: string + - type: string + enum: ["dall-e-2"] + x-oaiTypeLabel: string + default: "dall-e-2" + example: "dall-e-2" + nullable: true + description: The model to use for image generation. Only `dall-e-2` is supported at this time. + n: + type: integer + minimum: 1 + maximum: 10 + default: 1 + example: 1 + nullable: true + description: The number of images to generate. Must be between 1 and 10. + size: &dalle2_images_size + type: string + enum: ["256x256", "512x512", "1024x1024"] + default: "1024x1024" + example: "1024x1024" + nullable: true + description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + response_format: *images_response_format + user: *end_user_param_configuration + required: + - prompt + - image + + CreateImageVariationRequest: + type: object + properties: + image: + description: The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square. + type: string + format: binary + model: + anyOf: + - type: string + - type: string + enum: ["dall-e-2"] + x-oaiTypeLabel: string + default: "dall-e-2" + example: "dall-e-2" + nullable: true + description: The model to use for image generation. Only `dall-e-2` is supported at this time. + n: *images_n + response_format: *images_response_format + size: *dalle2_images_size + user: *end_user_param_configuration + required: + - image + + CreateModerationRequest: + type: object + properties: + input: + description: The input text to classify + oneOf: + - type: string + default: "" + example: "I want to kill them." + - type: array + items: + type: string + default: "" + example: "I want to kill them." + model: + description: | + Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. + + The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. + nullable: false + default: "text-moderation-latest" + example: "text-moderation-stable" + anyOf: + - type: string + - type: string + enum: ["text-moderation-latest", "text-moderation-stable"] + x-oaiTypeLabel: string + required: + - input - securitySchemes: - ApiKeyAuth: - type: http - scheme: 'bearer' + CreateModerationResponse: + type: object + description: Represents policy compliance report by OpenAI's content moderation model against a given input. + properties: + id: + type: string + description: The unique identifier for the moderation request. + model: + type: string + description: The model used to generate the moderation results. + results: + type: array + description: A list of moderation objects. + items: + type: object + properties: + flagged: + type: boolean + description: Whether the content violates [OpenAI's usage policies](/policies/usage-policies). + categories: + type: object + description: A list of the categories, and whether they are flagged or not. + properties: + hate: + type: boolean + description: Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harassment. + hate/threatening: + type: boolean + description: Hateful content that also includes violence or serious harm towards the targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. + harassment: + type: boolean + description: Content that expresses, incites, or promotes harassing language towards any target. + harassment/threatening: + type: boolean + description: Harassment content that also includes violence or serious harm towards any target. + self-harm: + type: boolean + description: Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders. + self-harm/intent: + type: boolean + description: Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders. + self-harm/instructions: + type: boolean + description: Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts. + sexual: + type: boolean + description: Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness). + sexual/minors: + type: boolean + description: Sexual content that includes an individual who is under 18 years old. + violence: + type: boolean + description: Content that depicts death, violence, or physical injury. + violence/graphic: + type: boolean + description: Content that depicts death, violence, or physical injury in graphic detail. + required: + - hate + - hate/threatening + - harassment + - harassment/threatening + - self-harm + - self-harm/intent + - self-harm/instructions + - sexual + - sexual/minors + - violence + - violence/graphic + category_scores: + type: object + description: A list of the categories along with their scores as predicted by model. + properties: + hate: + type: number + description: The score for the category 'hate'. + hate/threatening: + type: number + description: The score for the category 'hate/threatening'. + harassment: + type: number + description: The score for the category 'harassment'. + harassment/threatening: + type: number + description: The score for the category 'harassment/threatening'. + self-harm: + type: number + description: The score for the category 'self-harm'. + self-harm/intent: + type: number + description: The score for the category 'self-harm/intent'. + self-harm/instructions: + type: number + description: The score for the category 'self-harm/instructions'. + sexual: + type: number + description: The score for the category 'sexual'. + sexual/minors: + type: number + description: The score for the category 'sexual/minors'. + violence: + type: number + description: The score for the category 'violence'. + violence/graphic: + type: number + description: The score for the category 'violence/graphic'. + required: + - hate + - hate/threatening + - harassment + - harassment/threatening + - self-harm + - self-harm/intent + - self-harm/instructions + - sexual + - sexual/minors + - violence + - violence/graphic + required: + - flagged + - categories + - category_scores + required: + - id + - model + - results + x-oaiMeta: + name: The moderation object + example: *moderation_example - schemas: - Error: + ListFilesResponse: type: object properties: - type: + data: + type: array + items: + $ref: "#/components/schemas/OpenAIFile" + object: type: string - nullable: false - message: + enum: [list] + required: + - object + - data + + CreateFileRequest: + type: object + additionalProperties: false + properties: + file: + description: | + The File object (not file name) to be uploaded. type: string - nullable: false - param: + format: binary + purpose: + description: | + The intended purpose of the uploaded file. + + Use "fine-tune" for [Fine-tuning](/docs/api-reference/fine-tuning) and "assistants" for [Assistants](/docs/api-reference/assistants) and [Messages](/docs/api-reference/messages). This allows us to validate the format of the uploaded file is correct for fine-tuning. type: string - nullable: true - code: + enum: ["fine-tune", "assistants"] + required: + - file + - purpose + + DeleteFileResponse: + type: object + properties: + id: type: string - nullable: true + object: + type: string + enum: [file] + deleted: + type: boolean required: - - type - - message - - param - - code + - id + - object + - deleted - ErrorResponse: + CreateFineTuningJobRequest: type: object properties: - error: - $ref: "#/components/schemas/Error" + model: + description: | + The name of the model to fine-tune. You can select one of the + [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + example: "gpt-3.5-turbo" + anyOf: + - type: string + - type: string + enum: ["babbage-002", "davinci-002", "gpt-3.5-turbo"] + x-oaiTypeLabel: string + training_file: + description: | + The ID of an uploaded file that contains training data. + + See [upload file](/docs/api-reference/files/upload) for how to upload a file. + + Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. + + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + type: string + example: "file-abc123" + hyperparameters: + type: object + description: The hyperparameters used for the fine-tuning job. + properties: + batch_size: + description: | + Number of examples in each batch. A larger batch size means that model parameters + are updated less frequently, but with lower variance. + oneOf: + - type: string + enum: [auto] + - type: integer + minimum: 1 + maximum: 256 + default: auto + learning_rate_multiplier: + description: | + Scaling factor for the learning rate. A smaller learning rate may be useful to avoid + overfitting. + oneOf: + - type: string + enum: [auto] + - type: number + minimum: 0 + exclusiveMinimum: true + default: auto + n_epochs: + description: | + The number of epochs to train the model for. An epoch refers to one full cycle + through the training dataset. + oneOf: + - type: string + enum: [auto] + - type: integer + minimum: 1 + maximum: 50 + default: auto + suffix: + description: | + A string of up to 18 characters that will be added to your fine-tuned model name. + + For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + type: string + minLength: 1 + maxLength: 40 + default: null + nullable: true + validation_file: + description: | + The ID of an uploaded file that contains validation data. + + If you provide this file, the data is used to generate validation + metrics periodically during fine-tuning. These metrics can be viewed in + the fine-tuning results file. + The same data should not be present in both train and validation files. + + Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. + + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + type: string + nullable: true + example: "file-abc123" required: - - error + - model + - training_file - ListModelsResponse: + ListFineTuningJobEventsResponse: type: object properties: - object: - type: string data: type: array items: - $ref: "#/components/schemas/Model" - required: - - object - - data - - DeleteModelResponse: - type: object - properties: - id: - type: string + $ref: "#/components/schemas/FineTuningJobEvent" object: type: string - deleted: - type: boolean + enum: [list] required: - - id - object - - deleted + - data - CreateCompletionRequest: + CreateEmbeddingRequest: type: object + additionalProperties: false properties: - model: - description: &model_description | - ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. - anyOf: - - type: string - - type: string - enum: - [ - "babbage-002", - "davinci-002", - "gpt-3.5-turbo-instruct", - "text-davinci-003", - "text-davinci-002", - "text-davinci-001", - "code-davinci-002", - "text-curie-001", - "text-babbage-001", - "text-ada-001", - ] - x-oaiTypeLabel: string - prompt: - description: &completions_prompt_description | - The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. - - Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. - default: "<|endoftext|>" - nullable: true + input: + description: | + Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + example: "The quick brown fox jumped over the lazy dog" oneOf: - type: string + title: string + description: The string that will be turned into an embedding. default: "" example: "This is a test." - type: array + title: array + description: The array of strings that will be turned into an embedding. + minItems: 1 + maxItems: 2048 items: type: string default: "" - example: "This is a test." + example: "['This is a test.']" - type: array + title: array + description: The array of integers that will be turned into an embedding. minItems: 1 + maxItems: 2048 items: type: integer example: "[1212, 318, 257, 1332, 13]" - type: array + title: array + description: The array of arrays containing integers that will be turned into an embedding. minItems: 1 + maxItems: 2048 items: type: array minItems: 1 items: type: integer example: "[[1212, 318, 257, 1332, 13]]" - suffix: - description: The suffix that comes after a completion of inserted text. - default: null - nullable: true + x-oaiExpandable: true + model: + description: *model_description + example: "text-embedding-3-small" + anyOf: + - type: string + - type: string + enum: ["text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large"] + x-oaiTypeLabel: string + encoding_format: + description: "The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/)." + example: "float" + default: "float" type: string - example: "test." - max_tokens: - type: integer - minimum: 0 - default: 16 - example: 16 - nullable: true - description: &completions_max_tokens_description | - The maximum number of [tokens](/tokenizer) to generate in the completion. - - The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens. - temperature: - type: number - minimum: 0 - maximum: 2 - default: 1 - example: 1 - nullable: true - description: &completions_temperature_description | - What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. - - We generally recommend altering this or `top_p` but not both. - top_p: - type: number - minimum: 0 - maximum: 1 - default: 1 - example: 1 - nullable: true - description: &completions_top_p_description | - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or `temperature` but not both. - n: + enum: ["float", "base64"] + dimensions: + description: | + The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. type: integer minimum: 1 - maximum: 128 - default: 1 - example: 1 - nullable: true - description: &completions_completions_description | - How many completions to generate for each prompt. + user: *end_user_param_configuration + required: + - model + - input - **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. - stream: - description: > - Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). - type: boolean - nullable: true - default: false - logprobs: &completions_logprobs_configuration - type: integer - minimum: 0 - maximum: 5 - default: null - nullable: true - description: &completions_logprobs_description | - Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. + CreateEmbeddingResponse: + type: object + properties: + data: + type: array + description: The list of embeddings generated by the model. + items: + $ref: "#/components/schemas/Embedding" + model: + type: string + description: The name of the model used to generate the embedding. + object: + type: string + description: The object type, which is always "list". + enum: [list] + usage: + type: object + description: The usage information for the request. + properties: + prompt_tokens: + type: integer + description: The number of tokens used by the prompt. + total_tokens: + type: integer + description: The total number of tokens used by the request. + required: + - prompt_tokens + - total_tokens + required: + - object + - model + - data + - usage - The maximum value for `logprobs` is 5. - echo: - type: boolean - default: false - nullable: true - description: &completions_echo_description > - Echo back the prompt in addition to the completion - stop: - description: &completions_stop_description > - Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. - default: null - nullable: true - oneOf: + CreateTranscriptionRequest: + type: object + additionalProperties: false + properties: + file: + description: | + The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + type: string + x-oaiTypeLabel: file + format: binary + model: + description: | + ID of the model to use. Only `whisper-1` is currently available. + example: whisper-1 + anyOf: - type: string - default: <|endoftext|> - example: "\n" - nullable: true - - type: array - minItems: 1 - maxItems: 4 - items: - type: string - example: '["\n"]' - presence_penalty: + - type: string + enum: ["whisper-1"] + x-oaiTypeLabel: string + language: + description: | + The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. + type: string + prompt: + description: | + An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + type: string + response_format: + description: | + The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. + type: string + enum: + - json + - text + - srt + - verbose_json + - vtt + default: json + temperature: + description: | + The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. type: number default: 0 - minimum: -2 - maximum: 2 - nullable: true - description: &completions_presence_penalty_description | - Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + required: + - file + - model - [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) - frequency_penalty: + # Note: This does not currently support the non-default response format types. + CreateTranscriptionResponse: + type: object + properties: + text: + type: string + required: + - text + + CreateTranslationRequest: + type: object + additionalProperties: false + properties: + file: + description: | + The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + type: string + x-oaiTypeLabel: file + format: binary + model: + description: | + ID of the model to use. Only `whisper-1` is currently available. + example: whisper-1 + anyOf: + - type: string + - type: string + enum: ["whisper-1"] + x-oaiTypeLabel: string + prompt: + description: | + An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. + type: string + response_format: + description: | + The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. + type: string + default: json + temperature: + description: | + The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. type: number default: 0 - minimum: -2 - maximum: 2 - nullable: true - description: &completions_frequency_penalty_description | - Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - - [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) - best_of: - type: integer - default: 1 - minimum: 0 - maximum: 20 - nullable: true - description: &completions_best_of_description | - Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. - - When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. - - **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. - logit_bias: &completions_logit_bias - type: object - x-oaiTypeLabel: map - default: null - nullable: true - additionalProperties: - type: integer - description: &completions_logit_bias_description | - Modify the likelihood of specified tokens appearing in the completion. - - Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + required: + - file + - model - As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. - user: &end_user_param_configuration + # Note: This does not currently support the non-default response format types. + CreateTranslationResponse: + type: object + properties: + text: type: string - example: user-1234 + required: + - text + + CreateSpeechRequest: + type: object + additionalProperties: false + properties: + model: description: | - A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd` + anyOf: + - type: string + - type: string + enum: ["tts-1", "tts-1-hd"] + x-oaiTypeLabel: string + input: + type: string + description: The text to generate audio for. The maximum length is 4096 characters. + maxLength: 4096 + voice: + description: The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options). + type: string + enum: ["alloy", "echo", "fable", "onyx", "nova", "shimmer"] + response_format: + description: "The format to audio in. Supported formats are `mp3`, `opus`, `aac`, and `flac`." + default: "mp3" + type: string + enum: ["mp3", "opus", "aac", "flac"] + speed: + description: "The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default." + type: number + default: 1.0 + minimum: 0.25 + maximum: 4.0 required: - model - - prompt + - input + - voice - CreateCompletionResponse: - type: object - description: | - Represents a completion response from the API. Note: both the streamed and non-streamed response objects share the same shape (unlike the chat endpoint). + Model: + title: Model + description: Describes an OpenAI model offering that can be used with the API. properties: id: type: string - description: A unique identifier for the completion. - object: - type: string - description: The object type, which is always "text_completion" + description: The model identifier, which can be referenced in the API endpoints. created: type: integer - description: The Unix timestamp (in seconds) of when the completion was created. - model: + description: The Unix timestamp (in seconds) when the model was created. + object: type: string - description: The model used for completion. - choices: - type: array - description: The list of completion choices the model generated for the input prompt. - items: - type: object - required: - - text - - index - - logprobs - - finish_reason - properties: - text: - type: string - index: - type: integer - logprobs: - type: object - nullable: true - properties: - tokens: - type: array - items: - type: string - token_logprobs: - type: array - items: - type: number - top_logprobs: - type: array - items: - type: object - additionalProperties: - type: integer - text_offset: - type: array - items: - type: integer - finish_reason: - type: string - description: &completion_finish_reason_description | - The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, - `length` if the maximum number of tokens specified in the request was reached, - or `content_filter` if content was omitted due to a flag from our content filters. - enum: ["stop", "length", "content_filter"] - usage: - $ref: "#/components/schemas/CompletionUsage" + description: The object type, which is always "model". + enum: [model] + owned_by: + type: string + description: The organization that owns the model. required: - id - object - created - - model - - choices + - owned_by x-oaiMeta: - name: The completion object - legacy: true - example: | - { - "id": "cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7", - "object": "text_completion", - "created": 1589478378, - "model": "gpt-3.5-turbo", - "choices": [ - { - "text": "\n\nThis is indeed a test", - "index": 0, - "logprobs": null, - "finish_reason": "length" - } - ], - "usage": { - "prompt_tokens": 5, - "completion_tokens": 7, - "total_tokens": 12 - } - } + name: The model object + example: *retrieve_model_response - ChatCompletionRequestMessage: - type: object + OpenAIFile: + title: OpenAIFile + description: The `File` object represents a document that has been uploaded to OpenAI. properties: - role: + id: type: string - enum: ["system", "user", "assistant", "function"] - description: The role of the messages author. One of `system`, `user`, `assistant`, or `function`. - content: + description: The file identifier, which can be referenced in the API endpoints. + bytes: + type: integer + description: The size of the file, in bytes. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the file was created. + filename: type: string - nullable: true - description: The contents of the message. `content` is required for all messages, and may be null for assistant messages with function calls. - name: + description: The name of the file. + object: type: string - description: The name of the author of this message. `name` is required if role is `function`, and it should be the name of the function whose response is in the `content`. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters. - function_call: - type: object - description: The name and arguments of a function that should be called, as generated by the model. - properties: - name: - type: string - description: The name of the function to call. - arguments: - type: string - description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. - required: - - name - - arguments - required: - - role - - content - - ChatCompletionFunctionParameters: - type: object - description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/gpt/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.\n\nTo describe a function that accepts no parameters, provide the value `{\"type\": \"object\", \"properties\": {}}`." - additionalProperties: true - - ChatCompletionFunctions: - type: object - properties: - name: + description: The object type, which is always `file`. + enum: ["file"] + purpose: type: string - description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. - description: + description: The intended purpose of the file. Supported values are `fine-tune`, `fine-tune-results`, `assistants`, and `assistants_output`. + enum: + [ + "fine-tune", + "fine-tune-results", + "assistants", + "assistants_output", + ] + status: type: string - description: A description of what the function does, used by the model to choose when and how to call the function. - parameters: - $ref: "#/components/schemas/ChatCompletionFunctionParameters" + deprecated: true + description: Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or `error`. + enum: ["uploaded", "processed", "error"] + status_details: + type: string + deprecated: true + description: Deprecated. For details on why a fine-tuning training file failed validation, see the `error` field on `fine_tuning.job`. required: - - name - - parameters - - ChatCompletionFunctionCallOption: + - id + - object + - bytes + - created_at + - filename + - purpose + - status + x-oaiMeta: + name: The file object + example: | + { + "id": "file-abc123", + "object": "file", + "bytes": 120000, + "created_at": 1677610602, + "filename": "salesOverview.pdf", + "purpose": "assistants", + } + Embedding: type: object + description: | + Represents an embedding vector returned by embedding endpoint. properties: - name: + index: + type: integer + description: The index of the embedding in the list of embeddings. + embedding: + type: array + description: | + The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the [embedding guide](/docs/guides/embeddings). + items: + type: number + object: type: string - description: The name of the function to call. + description: The object type, which is always "embedding". + enum: [embedding] required: - - name + - index + - object + - embedding + x-oaiMeta: + name: The embedding object + example: | + { + "object": "embedding", + "embedding": [ + 0.0023064255, + -0.009327292, + .... (1536 floats total for ada-002) + -0.0028842222, + ], + "index": 0 + } - ChatCompletionResponseMessage: + FineTuningJob: type: object - description: A chat completion message generated by the model. + title: FineTuningJob + description: | + The `fine_tuning.job` object represents a fine-tuning job that has been created through the API. properties: - role: - type: string - enum: ["system", "user", "assistant", "function"] - description: The role of the author of this message. - content: + id: type: string - description: The contents of the message. - nullable: true - function_call: + description: The object identifier, which can be referenced in the API endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the fine-tuning job was created. + error: type: object - description: The name and arguments of a function that should be called, as generated by the model. + nullable: true + description: For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure. properties: - name: + code: type: string - description: The name of the function to call. - arguments: + description: A machine-readable error code. + message: type: string - description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + description: A human-readable error message. + param: + type: string + description: The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific. + nullable: true required: - - name - - arguments - required: - - role - - content - - ChatCompletionStreamResponseDelta: - type: object - description: A chat completion delta generated by streamed model responses. - properties: - role: - type: string - enum: ["system", "user", "assistant", "function"] - description: The role of the author of this message. - content: + - code + - message + - param + fine_tuned_model: type: string - description: The contents of the chunk message. nullable: true - function_call: + description: The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. + finished_at: + type: integer + nullable: true + description: The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. + hyperparameters: type: object - description: The name and arguments of a function that should be called, as generated by the model. + description: The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. properties: - name: - type: string - description: The name of the function to call. - arguments: - type: string - description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + n_epochs: + oneOf: + - type: string + enum: [auto] + - type: integer + minimum: 1 + maximum: 50 + default: auto + description: + The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. - CreateChatCompletionRequest: - type: object - properties: + "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. + required: + - n_epochs model: - description: ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. - example: "gpt-3.5-turbo" - anyOf: - - type: string - - type: string - enum: - [ - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0301", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-16k-0613", - ] - x-oaiTypeLabel: string - messages: - description: A list of messages comprising the conversation so far. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). - type: array - minItems: 1 - items: - $ref: "#/components/schemas/ChatCompletionRequestMessage" - functions: - description: A list of functions the model may generate JSON inputs for. + type: string + description: The base model that is being fine-tuned. + object: + type: string + description: The object type, which is always "fine_tuning.job". + enum: [fine_tuning.job] + organization_id: + type: string + description: The organization that owns the fine-tuning job. + result_files: type: array - minItems: 1 - maxItems: 128 + description: The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](/docs/api-reference/files/retrieve-contents). items: - $ref: "#/components/schemas/ChatCompletionFunctions" - function_call: - description: "Controls how the model responds to function calls. `none` means the model does not call a function, and responds to the end-user. `auto` means the model can pick between an end-user or calling a function. Specifying a particular function via `{\"name\": \"my_function\"}` forces the model to call that function. `none` is the default when no functions are present. `auto` is the default if functions are present." - oneOf: - - type: string - enum: [none, auto] - - $ref: "#/components/schemas/ChatCompletionFunctionCallOption" - temperature: - type: number - minimum: 0 - maximum: 2 - default: 1 - example: 1 - nullable: true - description: *completions_temperature_description - top_p: - type: number - minimum: 0 - maximum: 1 - default: 1 - example: 1 - nullable: true - description: *completions_top_p_description - n: + type: string + example: file-abc123 + status: + type: string + description: The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. + enum: + [ + "validating_files", + "queued", + "running", + "succeeded", + "failed", + "cancelled", + ] + trained_tokens: type: integer - minimum: 1 - maximum: 128 - default: 1 - example: 1 nullable: true - description: How many chat completion choices to generate for each input message. - stream: - description: > - If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). - type: boolean + description: The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. + training_file: + type: string + description: The file ID used for training. You can retrieve the training data with the [Files API](/docs/api-reference/files/retrieve-contents). + validation_file: + type: string nullable: true - default: false - stop: - description: | - Up to 4 sequences where the API will stop generating further tokens. - default: null - oneOf: - - type: string - nullable: true - - type: array - minItems: 1 - maxItems: 4 - items: - type: string - max_tokens: - description: | - The maximum number of [tokens](/tokenizer) to generate in the chat completion. + description: The file ID used for validation. You can retrieve the validation results with the [Files API](/docs/api-reference/files/retrieve-contents). + required: + - created_at + - error + - finished_at + - fine_tuned_model + - hyperparameters + - id + - model + - object + - organization_id + - result_files + - status + - trained_tokens + - training_file + - validation_file + x-oaiMeta: + name: The fine-tuning job object + example: *fine_tuning_example - The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens. - default: inf + FineTuningJobEvent: + type: object + description: Fine-tuning job event object + properties: + id: + type: string + created_at: type: integer - nullable: true - presence_penalty: - type: number - default: 0 - minimum: -2 - maximum: 2 - nullable: true - description: *completions_presence_penalty_description - frequency_penalty: - type: number - default: 0 - minimum: -2 - maximum: 2 - nullable: true - description: *completions_frequency_penalty_description - logit_bias: - type: object - x-oaiTypeLabel: map - default: null - nullable: true - additionalProperties: - type: integer - description: | - Modify the likelihood of specified tokens appearing in the completion. + level: + type: string + enum: ["info", "warn", "error"] + message: + type: string + object: + type: string + enum: [fine_tuning.job.event] + required: + - id + - object + - created_at + - level + - message + x-oaiMeta: + name: The fine-tuning job event object + example: | + { + "object": "fine_tuning.job.event", + "id": "ftevent-abc123" + "created_at": 1677610602, + "level": "info", + "message": "Created fine-tuning job" + } - Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. - user: *end_user_param_configuration + CompletionUsage: + type: object + description: Usage statistics for the completion request. + properties: + completion_tokens: + type: integer + description: Number of tokens in the generated completion. + prompt_tokens: + type: integer + description: Number of tokens in the prompt. + total_tokens: + type: integer + description: Total number of tokens used in the request (prompt + completion). required: - - model - - messages + - prompt_tokens + - completion_tokens + - total_tokens - CreateChatCompletionResponse: + RunCompletionUsage: type: object - description: Represents a chat completion response returned by model, based on the provided input. + description: Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.). properties: - id: - type: string - description: A unique identifier for the chat completion. - object: - type: string - description: The object type, which is always `chat.completion`. - created: + completion_tokens: type: integer - description: The Unix timestamp (in seconds) of when the chat completion was created. - model: - type: string - description: The model used for the chat completion. - choices: - type: array - description: A list of chat completion choices. Can be more than one if `n` is greater than 1. - items: - type: object - required: - - index - - message - - finish_reason - properties: - index: - type: integer - description: The index of the choice in the list of choices. - message: - $ref: "#/components/schemas/ChatCompletionResponseMessage" - finish_reason: - type: string - description: &chat_completion_finish_reason_description | - The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, - `length` if the maximum number of tokens specified in the request was reached, - `content_filter` if content was omitted due to a flag from our content filters, - or `function_call` if the model called a function. - enum: ["stop", "length", "function_call", "content_filter"] - usage: - $ref: "#/components/schemas/CompletionUsage" + description: Number of completion tokens used over the course of the run. + prompt_tokens: + type: integer + description: Number of prompt tokens used over the course of the run. + total_tokens: + type: integer + description: Total number of tokens used (prompt + completion). required: - - id - - object - - created - - model - - choices - x-oaiMeta: - name: The chat completion object - group: chat - example: *chat_completion_example + - prompt_tokens + - completion_tokens + - total_tokens + nullable: true - ListPaginatedFineTuningJobsResponse: + RunStepCompletionUsage: type: object + description: Usage statistics related to the run step. This value will be `null` while the run step's status is `in_progress`. properties: - object: - type: string - data: - type: array - items: - $ref: "#/components/schemas/FineTuningJob" - has_more: - type: boolean + completion_tokens: + type: integer + description: Number of completion tokens used over the course of the run step. + prompt_tokens: + type: integer + description: Number of prompt tokens used over the course of the run step. + total_tokens: + type: integer + description: Total number of tokens used (prompt + completion). required: - - object - - data - - has_more + - prompt_tokens + - completion_tokens + - total_tokens + nullable: true - CreateChatCompletionStreamResponse: + AssistantObject: type: object - description: Represents a streamed chunk of a chat completion response returned by model, based on the provided input. + title: Assistant + description: Represents an `assistant` that can call the model and use tools. properties: id: + description: The identifier, which can be referenced in API endpoints. type: string - description: A unique identifier for the chat completion chunk. object: + description: The object type, which is always `assistant`. type: string - description: The object type, which is always `chat.completion.chunk`. - created: + enum: [assistant] + created_at: + description: The Unix timestamp (in seconds) for when the assistant was created. type: integer - description: The Unix timestamp (in seconds) of when the chat completion chunk was created. + name: + description: &assistant_name_param_description | + The name of the assistant. The maximum length is 256 characters. + type: string + maxLength: 256 + nullable: true + description: + description: &assistant_description_param_description | + The description of the assistant. The maximum length is 512 characters. + type: string + maxLength: 512 + nullable: true model: + description: *model_description type: string - description: The model to generate the completion. - choices: + instructions: + description: &assistant_instructions_param_description | + The system instructions that the assistant uses. The maximum length is 32768 characters. + type: string + maxLength: 32768 + nullable: true + tools: + description: &assistant_tools_param_description | + A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`. + default: [] type: array - description: A list of chat completion choices. Can be more than one if `n` is greater than 1. + maxItems: 128 items: - type: object - required: - - index - - delta - - finish_reason - properties: - index: - type: integer - description: The index of the choice in the list of choices. - delta: - $ref: "#/components/schemas/ChatCompletionStreamResponseDelta" - finish_reason: - type: string - description: *chat_completion_finish_reason_description - enum: ["stop", "length", "function_call"] - nullable: true + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsRetrieval" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + file_ids: + description: &assistant_file_param_description | + A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. + default: [] + type: array + maxItems: 20 + items: + type: string + metadata: + description: &metadata_description | + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true required: - id - object - - created + - created_at + - name + - description - model - - choices + - instructions + - tools + - file_ids + - metadata x-oaiMeta: - name: The chat completion chunk object - group: chat - example: *chat_completion_chunk_example + name: The assistant object + beta: true + example: *create_assistants_example - CreateEditRequest: + CreateAssistantRequest: type: object + additionalProperties: false properties: model: - description: ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` model with this endpoint. - example: "text-davinci-edit-001" + description: *model_description anyOf: - type: string - - type: string - enum: ["text-davinci-edit-001", "code-davinci-edit-001"] - x-oaiTypeLabel: string - input: - description: The input text to use as a starting point for the edit. + name: + description: *assistant_name_param_description type: string - default: "" nullable: true - example: "What day of the wek is it?" - instruction: - description: The instruction that tells the model how to edit the prompt. + maxLength: 256 + description: + description: *assistant_description_param_description type: string - example: "Fix the spelling mistakes." - n: - type: integer - minimum: 1 - maximum: 20 - default: 1 - example: 1 - nullable: true - description: How many edits to generate for the input and instruction. - temperature: - type: number - minimum: 0 - maximum: 2 - default: 1 - example: 1 - nullable: true - description: *completions_temperature_description - top_p: - type: number - minimum: 0 - maximum: 1 - default: 1 - example: 1 nullable: true - description: *completions_top_p_description - required: - - model - - instruction - - CreateEditResponse: - type: object - title: Edit - deprecated: true - properties: - object: + maxLength: 512 + instructions: + description: *assistant_instructions_param_description type: string - description: The object type, which is always `edit`. - created: - type: integer - description: The Unix timestamp (in seconds) of when the edit was created. - choices: + nullable: true + maxLength: 32768 + tools: + description: *assistant_tools_param_description + default: [] type: array - description: A list of edit choices. Can be more than one if `n` is greater than 1. + maxItems: 128 items: - type: object - required: - - text - - index - - finish_reason - properties: - text: - type: string - description: The edited result. - index: - type: integer - description: The index of the choice in the list of choices. - finish_reason: - type: string - description: *completion_finish_reason_description - enum: ["stop", "length"] - usage: - $ref: "#/components/schemas/CompletionUsage" + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsRetrieval" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + file_ids: + description: *assistant_file_param_description + default: [] + maxItems: 20 + type: array + items: + type: string + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true required: - - object - - created - - choices - - usage - x-oaiMeta: - name: The edit object - example: *edit_example + - model - CreateImageRequest: + ModifyAssistantRequest: type: object + additionalProperties: false properties: - prompt: - description: A text description of the desired image(s). The maximum length is 1000 characters. + model: + description: *model_description + anyOf: + - type: string + name: + description: *assistant_name_param_description type: string - example: "A cute baby sea otter" - n: &images_n - type: integer - minimum: 1 - maximum: 10 - default: 1 - example: 1 nullable: true - description: The number of images to generate. Must be between 1 and 10. - size: &images_size + maxLength: 256 + description: + description: *assistant_description_param_description type: string - enum: ["256x256", "512x512", "1024x1024"] - default: "1024x1024" - example: "1024x1024" nullable: true - description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. - response_format: &images_response_format + maxLength: 512 + instructions: + description: *assistant_instructions_param_description type: string - enum: ["url", "b64_json"] - default: "url" - example: "url" nullable: true - description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. - user: *end_user_param_configuration - required: - - prompt - - ImagesResponse: - properties: - created: - type: integer - data: + maxLength: 32768 + tools: + description: *assistant_tools_param_description + default: [] type: array + maxItems: 128 items: - $ref: "#/components/schemas/Image" - required: - - created - - data + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsRetrieval" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + file_ids: + description: | + A list of [File](/docs/api-reference/files) IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. If a file was previously attached to the list but does not show up in the list, it will be deleted from the assistant. + default: [] + type: array + maxItems: 20 + items: + type: string + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true - Image: + DeleteAssistantResponse: type: object - description: Represents the url or the content of an image generated by the OpenAI API. properties: - url: + id: type: string - description: The URL of the generated image, if `response_format` is `url` (default). - b64_json: + deleted: + type: boolean + object: type: string - description: The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. - x-oaiMeta: - name: The image object - example: | - { - "url": "..." - } + enum: [assistant.deleted] + required: + - id + - object + - deleted - CreateImageEditRequest: + ListAssistantsResponse: type: object properties: - image: - description: The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask. + object: type: string - format: binary - mask: - description: An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`. + example: "list" + data: + type: array + items: + $ref: "#/components/schemas/AssistantObject" + first_id: type: string - format: binary - prompt: - description: A text description of the desired image(s). The maximum length is 1000 characters. + example: "asst_abc123" + last_id: type: string - example: "A cute baby sea otter wearing a beret" - n: *images_n - size: *images_size - response_format: *images_response_format - user: *end_user_param_configuration + example: "asst_abc456" + has_more: + type: boolean + example: false required: - - prompt - - image + - object + - data + - first_id + - last_id + - has_more + x-oaiMeta: + name: List assistants response object + group: chat + example: *list_assistants_example - CreateImageVariationRequest: + AssistantToolsCode: type: object + title: Code interpreter tool properties: - image: - description: The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square. + type: type: string - format: binary - n: *images_n - size: *images_size - response_format: *images_response_format - user: *end_user_param_configuration + description: "The type of tool being defined: `code_interpreter`" + enum: ["code_interpreter"] required: - - image + - type - CreateModerationRequest: + AssistantToolsRetrieval: type: object + title: Retrieval tool properties: - input: - description: The input text to classify - oneOf: - - type: string - default: "" - example: "I want to kill them." - - type: array - items: - type: string - default: "" - example: "I want to kill them." - model: - description: | - Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. + type: + type: string + description: "The type of tool being defined: `retrieval`" + enum: ["retrieval"] + required: + - type - The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. - nullable: false - default: "text-moderation-latest" - example: "text-moderation-stable" - anyOf: - - type: string - - type: string - enum: ["text-moderation-latest", "text-moderation-stable"] - x-oaiTypeLabel: string + AssistantToolsFunction: + type: object + title: Function tool + properties: + type: + type: string + description: "The type of tool being defined: `function`" + enum: ["function"] + function: + $ref: "#/components/schemas/FunctionObject" required: - - input + - type + - function - CreateModerationResponse: + RunObject: type: object - description: Represents policy compliance report by OpenAI's content moderation model against a given input. + title: A run on a thread + description: Represents an execution run on a [thread](/docs/api-reference/threads). properties: id: + description: The identifier, which can be referenced in API endpoints. type: string - description: The unique identifier for the moderation request. + object: + description: The object type, which is always `thread.run`. + type: string + enum: ["thread.run"] + created_at: + description: The Unix timestamp (in seconds) for when the run was created. + type: integer + thread_id: + description: The ID of the [thread](/docs/api-reference/threads) that was executed on as a part of this run. + type: string + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) used for execution of this run. + type: string + status: + description: The status of the run, which can be either `queued`, `in_progress`, `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, or `expired`. + type: string + enum: + [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] + required_action: + type: object + description: Details on the action required to continue the run. Will be `null` if no action is required. + nullable: true + properties: + type: + description: For now, this is always `submit_tool_outputs`. + type: string + enum: ["submit_tool_outputs"] + submit_tool_outputs: + type: object + description: Details on the tool outputs needed for this run to continue. + properties: + tool_calls: + type: array + description: A list of the relevant tool calls. + items: + $ref: "#/components/schemas/RunToolCallObject" + required: + - tool_calls + required: + - type + - submit_tool_outputs + last_error: + type: object + description: The last error associated with this run. Will be `null` if there are no errors. + nullable: true + properties: + code: + type: string + description: One of `server_error` or `rate_limit_exceeded`. + enum: ["server_error", "rate_limit_exceeded"] + message: + type: string + description: A human-readable description of the error. + required: + - code + - message + expires_at: + description: The Unix timestamp (in seconds) for when the run will expire. + type: integer + started_at: + description: The Unix timestamp (in seconds) for when the run was started. + type: integer + nullable: true + cancelled_at: + description: The Unix timestamp (in seconds) for when the run was cancelled. + type: integer + nullable: true + failed_at: + description: The Unix timestamp (in seconds) for when the run failed. + type: integer + nullable: true + completed_at: + description: The Unix timestamp (in seconds) for when the run was completed. + type: integer + nullable: true model: + description: The model that the [assistant](/docs/api-reference/assistants) used for this run. type: string - description: The model used to generate the moderation results. - results: + instructions: + description: The instructions that the [assistant](/docs/api-reference/assistants) used for this run. + type: string + tools: + description: The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. + default: [] type: array - description: A list of moderation objects. + maxItems: 20 items: - type: object - properties: - flagged: - type: boolean - description: Whether the content violates [OpenAI's usage policies](/policies/usage-policies). - categories: - type: object - description: A list of the categories, and whether they are flagged or not. - properties: - hate: - type: boolean - description: Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harrassment. - hate/threatening: - type: boolean - description: Hateful content that also includes violence or serious harm towards the targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. - harassment: - type: boolean - description: Content that expresses, incites, or promotes harassing language towards any target. - harassment/threatening: - type: boolean - description: Harassment content that also includes violence or serious harm towards any target. - self-harm: - type: boolean - description: Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders. - self-harm/intent: - type: boolean - description: Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders. - self-harm/instructions: - type: boolean - description: Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts. - sexual: - type: boolean - description: Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness). - sexual/minors: - type: boolean - description: Sexual content that includes an individual who is under 18 years old. - violence: - type: boolean - description: Content that depicts death, violence, or physical injury. - violence/graphic: - type: boolean - description: Content that depicts death, violence, or physical injury in graphic detail. - required: - - hate - - hate/threatening - - harassment - - harassment/threatening - - self-harm - - self-harm/intent - - self-harm/instructions - - sexual - - sexual/minors - - violence - - violence/graphic - category_scores: - type: object - description: A list of the categories along with their scores as predicted by model. - properties: - hate: - type: number - description: The score for the category 'hate'. - hate/threatening: - type: number - description: The score for the category 'hate/threatening'. - harassment: - type: number - description: The score for the category 'harassment'. - harassment/threatening: - type: number - description: The score for the category 'harassment/threatening'. - self-harm: - type: number - description: The score for the category 'self-harm'. - self-harm/intent: - type: number - description: The score for the category 'self-harm/intent'. - self-harm/instructions: - type: number - description: The score for the category 'self-harm/instructions'. - sexual: - type: number - description: The score for the category 'sexual'. - sexual/minors: - type: number - description: The score for the category 'sexual/minors'. - violence: - type: number - description: The score for the category 'violence'. - violence/graphic: - type: number - description: The score for the category 'violence/graphic'. - required: - - hate - - hate/threatening - - harassment - - harassment/threatening - - self-harm - - self-harm/intent - - self-harm/instructions - - sexual - - sexual/minors - - violence - - violence/graphic - required: - - flagged - - categories - - category_scores + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsRetrieval" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + file_ids: + description: The list of [File](/docs/api-reference/files) IDs the [assistant](/docs/api-reference/assistants) used for this run. + default: [] + type: array + items: + type: string + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + usage: + $ref: "#/components/schemas/RunCompletionUsage" + required: + - id + - object + - created_at + - thread_id + - assistant_id + - status + - required_action + - last_error + - expires_at + - started_at + - cancelled_at + - failed_at + - completed_at + - model + - instructions + - tools + - file_ids + - metadata + - usage + x-oaiMeta: + name: The run object + beta: true + example: | + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1698107661, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "completed", + "started_at": 1699073476, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699073498, + "last_error": null, + "model": "gpt-4", + "instructions": null, + "tools": [{"type": "retrieval"}, {"type": "code_interpreter"}], + "file_ids": [], + "metadata": {}, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + } + } + CreateRunRequest: + type: object + additionalProperties: false + properties: + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. + type: string + model: + description: The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. + type: string + nullable: true + instructions: + description: Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of the assistant. This is useful for modifying the behavior on a per-run basis. + type: string + nullable: true + additional_instructions: + description: Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. + type: string + nullable: true + tools: + description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. + nullable: true + type: array + maxItems: 20 + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsRetrieval" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true required: - - id - - model - - results - x-oaiMeta: - name: The moderation object - example: *moderation_example - - ListFilesResponse: + - thread_id + - assistant_id + ListRunsResponse: type: object properties: object: type: string + example: "list" data: type: array items: - $ref: "#/components/schemas/OpenAIFile" + $ref: "#/components/schemas/RunObject" + first_id: + type: string + example: "run_abc123" + last_id: + type: string + example: "run_abc456" + has_more: + type: boolean + example: false required: - object - data - - CreateFileRequest: + - first_id + - last_id + - has_more + ModifyRunRequest: type: object additionalProperties: false properties: - file: - description: | - Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + SubmitToolOutputsRunRequest: + type: object + additionalProperties: false + properties: + tool_outputs: + description: A list of tools for which the outputs are being submitted. + type: array + items: + type: object + properties: + tool_call_id: + type: string + description: The ID of the tool call in the `required_action` object within the run object the output is being submitted for. + output: + type: string + description: The output of the tool call to be submitted to continue the run. + required: + - tool_outputs - If the `purpose` is set to "fine-tune", the file will be used for fine-tuning. + RunToolCallObject: + type: object + description: Tool call objects + properties: + id: type: string - format: binary - purpose: - description: | - The intended purpose of the uploaded documents. + description: The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs) endpoint. + type: + type: string + description: The type of tool call the output is required for. For now, this is always `function`. + enum: ["function"] + function: + type: object + description: The function definition. + properties: + name: + type: string + description: The name of the function. + arguments: + type: string + description: The arguments that the model expects you to pass to the function. + required: + - name + - arguments + required: + - id + - type + - function - Use "fine-tune" for [fine-tuning](/docs/api-reference/fine-tuning). This allows us to validate the format of the uploaded file. + CreateThreadAndRunRequest: + type: object + additionalProperties: false + properties: + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. type: string + thread: + $ref: "#/components/schemas/CreateThreadRequest" + description: If no thread is provided, an empty thread will be created. + model: + description: The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. + type: string + nullable: true + instructions: + description: Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. + type: string + nullable: true + tools: + description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. + nullable: true + type: array + maxItems: 20 + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsRetrieval" + - $ref: "#/components/schemas/AssistantToolsFunction" + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true required: - - file - - purpose + - thread_id + - assistant_id - DeleteFileResponse: + ThreadObject: type: object + title: Thread + description: Represents a thread that contains [messages](/docs/api-reference/messages). properties: id: + description: The identifier, which can be referenced in API endpoints. type: string object: + description: The object type, which is always `thread`. type: string - deleted: - type: boolean + enum: ["thread"] + created_at: + description: The Unix timestamp (in seconds) for when the thread was created. + type: integer + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true required: - id - object - - deleted + - created_at + - metadata + x-oaiMeta: + name: The thread object + beta: true + example: | + { + "id": "thread_abc123", + "object": "thread", + "created_at": 1698107661, + "metadata": {} + } - CreateFineTuningJobRequest: + CreateThreadRequest: type: object + additionalProperties: false properties: - training_file: - description: | - The ID of an uploaded file that contains training data. - - See [upload file](/docs/api-reference/files/upload) for how to upload a file. - - Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. - - See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. - type: string - example: "file-abc123" - validation_file: - description: | - The ID of an uploaded file that contains validation data. - - If you provide this file, the data is used to generate validation - metrics periodically during fine-tuning. These metrics can be viewed in - the fine-tuning results file. - The same data should not be present in both train and validation files. - - Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. - - See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. - type: string + messages: + description: A list of [messages](/docs/api-reference/messages) to start the thread with. + type: array + items: + $ref: "#/components/schemas/CreateMessageRequest" + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map nullable: true - example: "file-abc123" - model: - description: | - The name of the model to fine-tune. You can select one of the - [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). - example: "gpt-3.5-turbo" - anyOf: - - type: string - - type: string - enum: ["babbage-002", "davinci-002", "gpt-3.5-turbo"] - x-oaiTypeLabel: string - hyperparameters: + + ModifyThreadRequest: + type: object + additionalProperties: false + properties: + metadata: + description: *metadata_description type: object - description: The hyperparameters used for the fine-tuning job. - properties: - n_epochs: - description: | - The number of epochs to train the model for. An epoch refers to one - full cycle through the training dataset. - oneOf: - - type: string - enum: [auto] - - type: integer - minimum: 1 - maximum: 50 - default: auto - suffix: - description: | - A string of up to 18 characters that will be added to your fine-tuned model name. + x-oaiTypeLabel: map + nullable: true - For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + DeleteThreadResponse: + type: object + properties: + id: type: string - minLength: 1 - maxLength: 40 - default: null - nullable: true + deleted: + type: boolean + object: + type: string + enum: [thread.deleted] required: - - training_file - - model + - id + - object + - deleted - ListFineTuningJobEventsResponse: - type: object + ListThreadsResponse: properties: object: type: string + example: "list" data: type: array items: - $ref: "#/components/schemas/FineTuningJobEvent" + $ref: "#/components/schemas/ThreadObject" + first_id: + type: string + example: "asst_abc123" + last_id: + type: string + example: "asst_abc456" + has_more: + type: boolean + example: false required: - object - data + - first_id + - last_id + - has_more - CreateFineTuneRequest: + MessageObject: type: object + title: The message object + description: Represents a message within a [thread](/docs/api-reference/threads). properties: - training_file: - description: | - The ID of an uploaded file that contains training data. - - See [upload file](/docs/api-reference/files/upload) for how to upload a file. - - Your dataset must be formatted as a JSONL file, where each training - example is a JSON object with the keys "prompt" and "completion". - Additionally, you must upload your file with the purpose `fine-tune`. - - See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more details. + id: + description: The identifier, which can be referenced in API endpoints. type: string - example: "file-abc123" - validation_file: - description: | - The ID of an uploaded file that contains validation data. - - If you provide this file, the data is used to generate validation - metrics periodically during fine-tuning. These metrics can be viewed in - the [fine-tuning results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). - Your train and validation data should be mutually exclusive. - - Your dataset must be formatted as a JSONL file, where each validation - example is a JSON object with the keys "prompt" and "completion". - Additionally, you must upload your file with the purpose `fine-tune`. - - See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more details. + object: + description: The object type, which is always `thread.message`. type: string - nullable: true - example: "file-abc123" - model: - description: | - The name of the base model to fine-tune. You can select one of "ada", - "babbage", "curie", "davinci", or a fine-tuned model created after 2022-04-21 and before 2023-08-22. - To learn more about these models, see the - [Models](/docs/models) documentation. - default: "curie" - example: "curie" - nullable: true - anyOf: - - type: string - - type: string - enum: ["ada", "babbage", "curie", "davinci"] - x-oaiTypeLabel: string - n_epochs: - description: | - The number of epochs to train the model for. An epoch refers to one - full cycle through the training dataset. - default: 4 - type: integer - nullable: true - batch_size: - description: | - The batch size to use for training. The batch size is the number of - training examples used to train a single forward and backward pass. - - By default, the batch size will be dynamically configured to be - ~0.2% of the number of examples in the training set, capped at 256 - - in general, we've found that larger batch sizes tend to work better - for larger datasets. - default: null - type: integer - nullable: true - learning_rate_multiplier: - description: | - The learning rate multiplier to use for training. - The fine-tuning learning rate is the original learning rate used for - pretraining multiplied by this value. - - By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 - depending on final `batch_size` (larger learning rates tend to - perform better with larger batch sizes). We recommend experimenting - with values in the range 0.02 to 0.2 to see what produces the best - results. - default: null - type: number - nullable: true - prompt_loss_weight: - description: | - The weight to use for loss on the prompt tokens. This controls how - much the model tries to learn to generate the prompt (as compared - to the completion which always has a weight of 1.0), and can add - a stabilizing effect to training when completions are short. - - If prompts are extremely long (relative to completions), it may make - sense to reduce this weight so as to avoid over-prioritizing - learning the prompt. - default: 0.01 - type: number - nullable: true - compute_classification_metrics: - description: | - If set, we calculate classification-specific metrics such as accuracy - and F-1 score using the validation set at the end of every epoch. - These metrics can be viewed in the [results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). - - In order to compute classification metrics, you must provide a - `validation_file`. Additionally, you must - specify `classification_n_classes` for multiclass classification or - `classification_positive_class` for binary classification. - type: boolean - default: false - nullable: true - classification_n_classes: - description: | - The number of classes in a classification task. - - This parameter is required for multiclass classification. + enum: ["thread.message"] + created_at: + description: The Unix timestamp (in seconds) for when the message was created. type: integer - default: null - nullable: true - classification_positive_class: - description: | - The positive class in binary classification. - - This parameter is needed to generate precision, recall, and F1 - metrics when doing binary classification. + thread_id: + description: The [thread](/docs/api-reference/threads) ID that this message belongs to. type: string - default: null - nullable: true - classification_betas: - description: | - If this is provided, we calculate F-beta scores at the specified - beta values. The F-beta score is a generalization of F-1 score. - This is only used for binary classification. - - With a beta of 1 (i.e. the F-1 score), precision and recall are - given the same weight. A larger beta score puts more weight on - recall and less on precision. A smaller beta score puts more weight - on precision and less on recall. + role: + description: The entity that produced the message. One of `user` or `assistant`. + type: string + enum: ["user", "assistant"] + content: + description: The content of the message in array of text and/or images. type: array items: - type: number - example: [0.6, 1, 1.5, 2] - default: null - nullable: true - suffix: - description: | - A string of up to 40 characters that will be added to your fine-tuned model name. - - For example, a `suffix` of "custom-model-name" would produce a model name like `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`. - type: string - minLength: 1 - maxLength: 40 - default: null + oneOf: + - $ref: "#/components/schemas/MessageContentImageFileObject" + - $ref: "#/components/schemas/MessageContentTextObject" + x-oaiExpandable: true + assistant_id: + description: If applicable, the ID of the [assistant](/docs/api-reference/assistants) that authored this message. + type: string nullable: true - required: - - training_file - - ListFineTunesResponse: - type: object - properties: - object: + run_id: + description: If applicable, the ID of the [run](/docs/api-reference/runs) associated with the authoring of this message. type: string - data: + nullable: true + file_ids: + description: A list of [file](/docs/api-reference/files) IDs that the assistant should use. Useful for tools like retrieval and code_interpreter that can access files. A maximum of 10 files can be attached to a message. + default: [] + maxItems: 10 type: array items: - $ref: "#/components/schemas/FineTune" + type: string + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true required: + - id - object - - data + - created_at + - thread_id + - role + - content + - assistant_id + - run_id + - file_ids + - metadata + x-oaiMeta: + name: The message object + beta: true + example: | + { + "id": "msg_abc123", + "object": "thread.message", + "created_at": 1698983503, + "thread_id": "thread_abc123", + "role": "assistant", + "content": [ + { + "type": "text", + "text": { + "value": "Hi! How can I help you today?", + "annotations": [] + } + } + ], + "file_ids": [], + "assistant_id": "asst_abc123", + "run_id": "run_abc123", + "metadata": {} + } - ListFineTuneEventsResponse: + CreateMessageRequest: type: object + additionalProperties: false + required: + - role + - content properties: - object: + role: type: string - data: + enum: ["user"] + description: The role of the entity that is creating the message. Currently only `user` is supported. + content: + type: string + minLength: 1 + maxLength: 32768 + description: The content of the message. + file_ids: + description: A list of [File](/docs/api-reference/files) IDs that the message should use. There can be a maximum of 10 files attached to a message. Useful for tools like `retrieval` and `code_interpreter` that can access and use files. + default: [] type: array + minItems: 1 + maxItems: 10 items: - $ref: "#/components/schemas/FineTuneEvent" - required: - - object - - data + type: string + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true - CreateEmbeddingRequest: + ModifyMessageRequest: type: object additionalProperties: false properties: - model: - description: *model_description - example: "text-embedding-ada-002" - anyOf: - - type: string - - type: string - enum: ["text-embedding-ada-002"] - x-oaiTypeLabel: string - input: - description: | - Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an empty string. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens. - example: "The quick brown fox jumped over the lazy dog" - oneOf: - - type: string - default: "" - example: "This is a test." - - type: array - items: - type: string - default: "" - example: "This is a test." - - type: array - minItems: 1 - items: - type: integer - example: "[1212, 318, 257, 1332, 13]" - - type: array - minItems: 1 - items: - type: array - minItems: 1 - items: - type: integer - example: "[[1212, 318, 257, 1332, 13]]" - user: *end_user_param_configuration - required: - - model - - input + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true - CreateEmbeddingResponse: + DeleteMessageResponse: type: object properties: + id: + type: string + deleted: + type: boolean object: type: string - description: The object type, which is always "embedding". - model: + enum: [thread.message.deleted] + required: + - id + - object + - deleted + + ListMessagesResponse: + properties: + object: type: string - description: The name of the model used to generate the embedding. + example: "list" data: type: array - description: The list of embeddings generated by the model. items: - $ref: "#/components/schemas/Embedding" - usage: - type: object - description: The usage information for the request. - properties: - prompt_tokens: - type: integer - description: The number of tokens used by the prompt. - total_tokens: - type: integer - description: The total number of tokens used by the request. - required: - - prompt_tokens - - total_tokens + $ref: "#/components/schemas/MessageObject" + first_id: + type: string + example: "msg_abc123" + last_id: + type: string + example: "msg_abc123" + has_more: + type: boolean + example: false required: - object - - model - data - - usage + - first_id + - last_id + - has_more - CreateTranscriptionRequest: + MessageContentImageFileObject: + title: Image file type: object - additionalProperties: false + description: References an image [File](/docs/api-reference/files) in the content of a message. properties: - file: - description: | - The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. - type: string - x-oaiTypeLabel: file - format: binary - model: - description: | - ID of the model to use. Only `whisper-1` is currently available. - example: whisper-1 - anyOf: - - type: string - - type: string - enum: ["whisper-1"] - x-oaiTypeLabel: string - prompt: - description: | - An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. - type: string - response_format: - description: | - The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. - type: string - enum: - - json - - text - - srt - - verbose_json - - vtt - default: json - temperature: - description: | - The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. - type: number - default: 0 - language: - description: | - The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. + type: + description: Always `image_file`. type: string + enum: ["image_file"] + image_file: + type: object + properties: + file_id: + description: The [File](/docs/api-reference/files) ID of the image in the message content. + type: string + required: + - file_id required: - - file - - model + - type + - image_file - # Note: This does not currently support the non-default response format types. - CreateTranscriptionResponse: + MessageContentTextObject: + title: Text type: object + description: The text content that is part of a message. properties: - text: + type: + description: Always `text`. type: string + enum: ["text"] + text: + type: object + properties: + value: + description: The data that makes up the text. + type: string + annotations: + type: array + items: + oneOf: + - $ref: "#/components/schemas/MessageContentTextAnnotationsFileCitationObject" + - $ref: "#/components/schemas/MessageContentTextAnnotationsFilePathObject" + x-oaiExpandable: true + required: + - value + - annotations required: + - type - text - CreateTranslationRequest: + MessageContentTextAnnotationsFileCitationObject: + title: File citation type: object - additionalProperties: false + description: A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "retrieval" tool to search files. properties: - file: - description: | - The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. - type: string - x-oaiTypeLabel: file - format: binary - model: - description: | - ID of the model to use. Only `whisper-1` is currently available. - example: whisper-1 - anyOf: - - type: string - - type: string - enum: ["whisper-1"] - x-oaiTypeLabel: string - prompt: - description: | - An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. - type: string - response_format: - description: | - The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. + type: + description: Always `file_citation`. type: string - default: json - temperature: - description: | - The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. - type: number - default: 0 - required: - - file - - model - - # Note: This does not currently support the non-default response format types. - CreateTranslationResponse: - type: object - properties: + enum: ["file_citation"] text: + description: The text in the message content that needs to be replaced. type: string + file_citation: + type: object + properties: + file_id: + description: The ID of the specific File the citation is from. + type: string + quote: + description: The specific quote in the file. + type: string + required: + - file_id + - quote + start_index: + type: integer + minimum: 0 + end_index: + type: integer + minimum: 0 required: + - type - text + - file_citation + - start_index + - end_index - Model: - title: Model - description: Describes an OpenAI model offering that can be used with the API. + MessageContentTextAnnotationsFilePathObject: + title: File path + type: object + description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. properties: - id: + type: + description: Always `file_path`. type: string - description: The model identifier, which can be referenced in the API endpoints. - object: + enum: ["file_path"] + text: + description: The text in the message content that needs to be replaced. type: string - description: The object type, which is always "model". - created: + file_path: + type: object + properties: + file_id: + description: The ID of the file that was generated. + type: string + required: + - file_id + start_index: type: integer - description: The Unix timestamp (in seconds) when the model was created. - owned_by: - type: string - description: The organization that owns the model. - required: - - id - - object - - created - - owned_by - x-oaiMeta: - name: The model object - example: *retrieve_model_response + minimum: 0 + end_index: + type: integer + minimum: 0 + required: + - type + - text + - file_path + - start_index + - end_index - OpenAIFile: - title: OpenAIFile + RunStepObject: + type: object + title: Run steps description: | - The `File` object represents a document that has been uploaded to OpenAI. + Represents a step in execution of a run. properties: id: + description: The identifier of the run step, which can be referenced in API endpoints. type: string - description: The file identifier, which can be referenced in the API endpoints. object: + description: The object type, which is always `thread.run.step`. type: string - description: The object type, which is always "file". - bytes: - type: integer - description: The size of the file in bytes. + enum: ["thread.run.step"] created_at: + description: The Unix timestamp (in seconds) for when the run step was created. type: integer - description: The Unix timestamp (in seconds) for when the file was created. - filename: + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) associated with the run step. type: string - description: The name of the file. - purpose: + thread_id: + description: The ID of the [thread](/docs/api-reference/threads) that was run. type: string - description: The intended purpose of the file. Currently, only "fine-tune" is supported. - status: + run_id: + description: The ID of the [run](/docs/api-reference/runs) that this run step is a part of. type: string - description: The current status of the file, which can be either `uploaded`, `processed`, `pending`, `error`, `deleting` or `deleted`. - status_details: + type: + description: The type of run step, which can be either `message_creation` or `tool_calls`. type: string + enum: ["message_creation", "tool_calls"] + status: + description: The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, `completed`, or `expired`. + type: string + enum: ["in_progress", "cancelled", "failed", "completed", "expired"] + step_details: + type: object + description: The details of the run step. + oneOf: + - $ref: "#/components/schemas/RunStepDetailsMessageCreationObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsObject" + x-oaiExpandable: true + last_error: + type: object + description: The last error associated with this run step. Will be `null` if there are no errors. nullable: true - description: | - Additional details about the status of the file. If the file is in the `error` state, this will include a message describing the error. + properties: + code: + type: string + description: One of `server_error` or `rate_limit_exceeded`. + enum: ["server_error", "rate_limit_exceeded"] + message: + type: string + description: A human-readable description of the error. + required: + - code + - message + expired_at: + description: The Unix timestamp (in seconds) for when the run step expired. A step is considered expired if the parent run is expired. + type: integer + nullable: true + cancelled_at: + description: The Unix timestamp (in seconds) for when the run step was cancelled. + type: integer + nullable: true + failed_at: + description: The Unix timestamp (in seconds) for when the run step failed. + type: integer + nullable: true + completed_at: + description: The Unix timestamp (in seconds) for when the run step completed. + type: integer + nullable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + usage: + $ref: "#/components/schemas/RunStepCompletionUsage" required: - id - object - - bytes - created_at - - filename - - purpose - - format + - assistant_id + - thread_id + - run_id + - type + - status + - step_details + - last_error + - expired_at + - cancelled_at + - failed_at + - completed_at + - metadata + - usage x-oaiMeta: - name: The file object - example: | - { - "id": "file-abc123", - "object": "file", - "bytes": 120000, - "created_at": 1677610602, - "filename": "my_file.jsonl", - "purpose": "fine-tune", - "status": "uploaded", - "status_details": null - } - Embedding: - type: object - description: | - Represents an embedding vector returned by embedding endpoint. + name: The run step object + beta: true + example: *run_step_object_example + + ListRunStepsResponse: properties: - index: - type: integer - description: The index of the embedding in the list of embeddings. object: type: string - description: The object type, which is always "embedding". - embedding: + example: "list" + data: type: array - description: | - The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the [embedding guide](/docs/guides/embeddings). items: - type: number + $ref: "#/components/schemas/RunStepObject" + first_id: + type: string + example: "step_abc123" + last_id: + type: string + example: "step_abc456" + has_more: + type: boolean + example: false required: - - index - object - - embedding - x-oaiMeta: - name: The embedding object - example: | - { - "object": "embedding", - "embedding": [ - 0.0023064255, - -0.009327292, - .... (1536 floats total for ada-002) - -0.0028842222, - ], - "index": 0 - } + - data + - first_id + - last_id + - has_more - FineTuningJob: - title: FineTuningJob - description: | - The `fine_tuning.job` object represents a fine-tuning job that has been created through the API. + RunStepDetailsMessageCreationObject: + title: Message creation + type: object + description: Details of the message creation by the run step. properties: - id: + type: + description: Always `message_creation`. type: string - description: The object identifier, which can be referenced in the API endpoints. - object: + enum: ["message_creation"] + message_creation: + type: object + properties: + message_id: + type: string + description: The ID of the message that was created by this run step. + required: + - message_id + required: + - type + - message_creation + + RunStepDetailsToolCallsObject: + title: Tool calls + type: object + description: Details of the tool call. + properties: + type: + description: Always `tool_calls`. type: string - description: The object type, which is always "fine_tuning.job". - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the fine-tuning job was created. - finished_at: - type: integer - nullable: true - description: The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. - model: + enum: ["tool_calls"] + tool_calls: + type: array + description: | + An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `retrieval`, or `function`. + items: + oneOf: + - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsRetrievalObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsFunctionObject" + x-oaiExpandable: true + required: + - type + - tool_calls + + RunStepDetailsToolCallsCodeObject: + title: Code interpreter tool call + type: object + description: Details of the Code Interpreter tool call the run step was involved in. + properties: + id: type: string - description: The base model that is being fine-tuned. - fine_tuned_model: + description: The ID of the tool call. + type: type: string - nullable: true - description: The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. - organization_id: + description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. + enum: ["code_interpreter"] + code_interpreter: + type: object + description: The Code Interpreter tool call definition. + required: + - input + - outputs + properties: + input: + type: string + description: The input to the Code Interpreter tool call. + outputs: + type: array + description: The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type. + items: + type: object + oneOf: + - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeOutputLogsObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeOutputImageObject" + x-oaiExpandable: true + required: + - id + - type + - code_interpreter + + RunStepDetailsToolCallsCodeOutputLogsObject: + title: Code interpreter log output + type: object + description: Text output from the Code Interpreter tool call as part of a run step. + properties: + type: + description: Always `logs`. type: string - description: The organization that owns the fine-tuning job. - status: + enum: ["logs"] + logs: type: string - description: The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. - hyperparameters: + description: The text output from the Code Interpreter tool call. + required: + - type + - logs + + RunStepDetailsToolCallsCodeOutputImageObject: + title: Code interpreter image output + type: object + properties: + type: + description: Always `image`. + type: string + enum: ["image"] + image: type: object - description: The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. properties: - n_epochs: - oneOf: - - type: string - enum: [auto] - - type: integer - minimum: 1 - maximum: 50 - default: auto - description: - The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. - - "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. + file_id: + description: The [file](/docs/api-reference/files) ID of the image. + type: string required: - - n_epochs - training_file: + - file_id + required: + - type + - image + + RunStepDetailsToolCallsRetrievalObject: + title: Retrieval tool call + type: object + properties: + id: type: string - description: The file ID used for training. You can retrieve the training data with the [Files API](/docs/api-reference/files/retrieve-contents). - validation_file: + description: The ID of the tool call object. + type: type: string - nullable: true - description: The file ID used for validation. You can retrieve the validation results with the [Files API](/docs/api-reference/files/retrieve-contents). - result_files: - type: array - description: The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](/docs/api-reference/files/retrieve-contents). - items: - type: string - example: file-abc123 - trained_tokens: - type: integer - nullable: true - description: The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. - error: + description: The type of tool call. This is always going to be `retrieval` for this type of tool call. + enum: ["retrieval"] + retrieval: type: object - nullable: true - description: For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure. + description: For now, this is always going to be an empty object. + x-oaiTypeLabel: map + required: + - id + - type + - retrieval + + RunStepDetailsToolCallsFunctionObject: + type: object + title: Function tool call + properties: + id: + type: string + description: The ID of the tool call object. + type: + type: string + description: The type of tool call. This is always going to be `function` for this type of tool call. + enum: ["function"] + function: + type: object + description: The definition of the function that was called. properties: - message: + name: type: string - description: A human-readable error message. - code: + description: The name of the function. + arguments: type: string - description: A machine-readable error code. - param: + description: The arguments passed to the function. + output: type: string - description: The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific. + description: The output of the function. This will be `null` if the outputs have not been [submitted](/docs/api-reference/runs/submitToolOutputs) yet. nullable: true required: - - message - - code - - param + - name + - arguments + - output required: - id - - object - - created_at - - finished_at - - model - - fine_tuned_model - - organization_id - - status - - hyperparameters - - training_file - - validation_file - - result_files - - trained_tokens - - error - x-oaiMeta: - name: The fine-tuning job object - example: *fine_tuning_example + - type + - function - FineTuningEvent: - title: FineTuningEvent + AssistantFileObject: + type: object + title: Assistant files + description: A list of [Files](/docs/api-reference/files) attached to an `assistant`. properties: + id: + description: The identifier, which can be referenced in API endpoints. + type: string object: + description: The object type, which is always `assistant.file`. type: string + enum: [assistant.file] created_at: + description: The Unix timestamp (in seconds) for when the assistant file was created. type: integer - level: - type: string - message: + assistant_id: + description: The assistant ID that the file is attached to. type: string - data: - oneOf: - - type: string - default: none - enum: [none, string] - type: - oneOf: - - type: string - default: none - enum: ["message", "metrics"] required: + - id - object - - created_at - - level - - message - x-oiMeta: - name: The fine-tuning event object + - created_at + - assistant_id + x-oaiMeta: + name: The assistant file object + beta: true example: | { - "object": "fine_tuning.job.event", - "created_at": "1689376978", - "level": "info" | "warn" | "error", - "message": "", - "data": null | JSON, - "type": "message"| "metrics" + "id": "file-abc123", + "object": "assistant.file", + "created_at": 1699055364, + "assistant_id": "asst_abc123" } - FineTune: - title: FineTune - deprecated: true - description: | - The `FineTune` object represents a legacy fine-tune job that has been created through the API. + CreateAssistantFileRequest: + type: object + additionalProperties: false + properties: + file_id: + description: A [File](/docs/api-reference/files) ID (with `purpose="assistants"`) that the assistant should use. Useful for tools like `retrieval` and `code_interpreter` that can access files. + type: string + required: + - file_id + + DeleteAssistantFileResponse: + type: object + description: Deletes the association between the assistant and the file, but does not delete the [File](/docs/api-reference/files) object itself. properties: id: type: string - description: The object identifier, which can be referenced in the API endpoints. + deleted: + type: boolean object: type: string - description: The object type, which is always "fine-tune". - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the fine-tuning job was created. - updated_at: - type: integer - description: The Unix timestamp (in seconds) for when the fine-tuning job was last updated. - model: - type: string - description: The base model that is being fine-tuned. - fine_tuned_model: - type: string - nullable: true - description: The name of the fine-tuned model that is being created. - organization_id: - type: string - description: The organization that owns the fine-tuning job. - status: + enum: [assistant.file.deleted] + required: + - id + - object + - deleted + ListAssistantFilesResponse: + properties: + object: type: string - description: The current status of the fine-tuning job, which can be either `created`, `running`, `succeeded`, `failed`, or `cancelled`. - hyperparams: - type: object - description: The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/hyperparameters) for more details. - properties: - n_epochs: - type: integer - description: | - The number of epochs to train the model for. An epoch refers to one - full cycle through the training dataset. - batch_size: - type: integer - description: | - The batch size to use for training. The batch size is the number of - training examples used to train a single forward and backward pass. - prompt_loss_weight: - type: number - description: | - The weight to use for loss on the prompt tokens. - learning_rate_multiplier: - type: number - description: | - The learning rate multiplier to use for training. - compute_classification_metrics: - type: boolean - description: | - The classification metrics to compute using the validation dataset at the end of every epoch. - classification_positive_class: - type: string - description: | - The positive class to use for computing classification metrics. - classification_n_classes: - type: integer - description: | - The number of classes to use for computing classification metrics. - required: - - n_epochs - - batch_size - - prompt_loss_weight - - learning_rate_multiplier - training_files: - type: array - description: The list of files used for training. - items: - $ref: "#/components/schemas/OpenAIFile" - validation_files: - type: array - description: The list of files used for validation. - items: - $ref: "#/components/schemas/OpenAIFile" - result_files: - type: array - description: The compiled results files for the fine-tuning job. - items: - $ref: "#/components/schemas/OpenAIFile" - events: + example: "list" + data: type: array - description: The list of events that have been observed in the lifecycle of the FineTune job. items: - $ref: "#/components/schemas/FineTuneEvent" + $ref: "#/components/schemas/AssistantFileObject" + first_id: + type: string + example: "file-abc123" + last_id: + type: string + example: "file-abc456" + has_more: + type: boolean + example: false required: - - id - object - - created_at - - updated_at - - model - - fine_tuned_model - - organization_id - - status - - hyperparams - - training_files - - validation_files - - result_files - x-oaiMeta: - name: The fine-tune object - example: *fine_tune_example + - data + - items + - first_id + - last_id + - has_more - FineTuningJobEvent: - title: FineTuningJobEvent + MessageFileObject: + type: object + title: Message files + description: A list of files attached to a `message`. properties: id: + description: The identifier, which can be referenced in API endpoints. type: string object: + description: The object type, which is always `thread.message.file`. type: string + enum: ["thread.message.file"] created_at: + description: The Unix timestamp (in seconds) for when the message file was created. type: integer - level: - type: string - enum: ["info", "warn", "error"] - message: + message_id: + description: The ID of the [message](/docs/api-reference/messages) that the [File](/docs/api-reference/files) is attached to. type: string required: - id - object - created_at - - level - - message - x-oiMeta: - name: The fine-tuning job event object + - message_id + x-oaiMeta: + name: The message file object + beta: true example: | { - "object": "event", - "id": "ftevent-abc123" - "created_at": 1677610602, - "level": "info", - "message": "Created fine-tuning job" + "id": "file-abc123", + "object": "thread.message.file", + "created_at": 1698107661, + "message_id": "message_QLoItBbqwyAJEzlTy4y9kOMM", + "file_id": "file-abc123" } - FineTuneEvent: - title: FineTuneEvent + ListMessageFilesResponse: properties: object: type: string - created_at: - type: integer - level: + example: "list" + data: + type: array + items: + $ref: "#/components/schemas/MessageFileObject" + first_id: type: string - message: + example: "file-abc123" + last_id: type: string + example: "file-abc456" + has_more: + type: boolean + example: false required: - object - - created_at - - level - - message - x-oiMeta: - name: The fine-tune event object - example: | - { - "object": "event", - "created_at": 1677610602, - "level": "info", - "message": "Created fine-tune job" - } - CompletionUsage: - type: object - description: Usage statistics for the completion request. - properties: - prompt_tokens: - type: integer - description: Number of tokens in the prompt. - completion_tokens: - type: integer - description: Number of tokens in the generated completion. - total_tokens: - type: integer - description: Total number of tokens used in the request (prompt + completion). - required: - - prompt_tokens - - completion_tokens - - total_tokens + - data + - items + - first_id + - last_id + - has_more security: - ApiKeyAuth: [] - x-oaiMeta: groups: # > General Notes @@ -4193,10 +8697,13 @@ x-oaiMeta: - id: audio title: Audio description: | - Learn how to turn audio into text. + Learn how to turn audio into text or text into audio. Related guide: [Speech to text](/docs/guides/speech-to-text) sections: + - type: endpoint + key: createSpeech + path: createSpeech - type: endpoint key: createTranscription path: createTranscription @@ -4208,31 +8715,17 @@ x-oaiMeta: description: | Given a list of messages comprising a conversation, the model will return a response. - Related guide: [Chat completions](/docs/guides/gpt) + Related guide: [Chat Completions](/docs/guides/text-generation) sections: + - type: endpoint + key: createChatCompletion + path: create - type: object key: CreateChatCompletionResponse path: object - type: object key: CreateChatCompletionStreamResponse path: streaming - - type: endpoint - key: createChatCompletion - path: create - - id: completions - title: Completions - legacy: true - description: | - Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position. We recommend most users use our Chat completions API. [Learn more](/docs/deprecations/2023-07-06-gpt-and-embeddings) - - Related guide: [Legacy Completions](/docs/guides/gpt/completions-api) - sections: - - type: object - key: CreateCompletionResponse - path: object - - type: endpoint - key: createCompletion - path: create - id: embeddings title: Embeddings description: | @@ -4240,59 +8733,63 @@ x-oaiMeta: Related guide: [Embeddings](/docs/guides/embeddings) sections: - - type: object - key: Embedding - path: object - type: endpoint key: createEmbedding path: create + - type: object + key: Embedding + path: object - id: fine-tuning title: Fine-tuning description: | Manage fine-tuning jobs to tailor a model to your specific training data. - Related guide: [fine-tune models](/docs/guides/fine-tuning) + Related guide: [Fine-tune models](/docs/guides/fine-tuning) sections: - - type: object - path: object - key: FineTuningJob - type: endpoint key: createFineTuningJob path: create - type: endpoint key: listPaginatedFineTuningJobs + path: list + - type: endpoint + key: listFineTuningEvents + path: list-events - type: endpoint key: retrieveFineTuningJob path: retrieve - type: endpoint key: cancelFineTuningJob path: cancel - - type: endpoint - key: listFineTuningEvents - path: list-events + - type: object + key: FineTuningJob + path: object + - type: object + key: FineTuningJobEvent + path: event-object - id: files title: Files description: | - Files are used to upload documents that can be used with features like [fine-tuning](/docs/api-reference/fine-tuning). + Files are used to upload documents that can be used with features like [Assistants](/docs/api-reference/assistants) and [Fine-tuning](/docs/api-reference/fine-tuning). sections: - - type: object - key: OpenAIFile - path: object - - type: endpoint - key: listFiles - path: list - type: endpoint key: createFile path: create - type: endpoint - key: deleteFile - path: delete + key: listFiles + path: list - type: endpoint key: retrieveFile path: retrieve + - type: endpoint + key: deleteFile + path: delete - type: endpoint key: downloadFile path: retrieve-contents + - type: object + key: OpenAIFile + path: object - id: images title: Images description: | @@ -4300,9 +8797,6 @@ x-oaiMeta: Related guide: [Image generation](/docs/guides/images) sections: - - type: object - key: Image - path: object - type: endpoint key: createImage path: create @@ -4312,14 +8806,14 @@ x-oaiMeta: - type: endpoint key: createImageVariation path: createVariation + - type: object + key: Image + path: object - id: models title: Models description: | List and describe the various models available in the API. You can refer to the [Models](/docs/models) documentation to understand what models are available and the differences between them. sections: - - type: object - key: Model - path: object - type: endpoint key: listModels path: list @@ -4329,6 +8823,9 @@ x-oaiMeta: - type: endpoint key: deleteModel path: delete + - type: object + key: Model + path: object - id: moderations title: Moderations description: | @@ -4336,47 +8833,158 @@ x-oaiMeta: Related guide: [Moderations](/docs/guides/moderation) sections: + - type: endpoint + key: createModeration + path: create - type: object key: CreateModerationResponse path: object + - id: assistants + title: Assistants + beta: true + description: | + Build assistants that can call models and use tools to perform tasks. + + [Get started with the Assistants API](/docs/assistants) + sections: - type: endpoint - key: createModeration - path: create - - id: fine-tunes - title: Fine-tunes - deprecated: true + key: createAssistant + path: createAssistant + - type: endpoint + key: createAssistantFile + path: createAssistantFile + - type: endpoint + key: listAssistants + path: listAssistants + - type: endpoint + key: listAssistantFiles + path: listAssistantFiles + - type: endpoint + key: getAssistant + path: getAssistant + - type: endpoint + key: getAssistantFile + path: getAssistantFile + - type: endpoint + key: modifyAssistant + path: modifyAssistant + - type: endpoint + key: deleteAssistant + path: deleteAssistant + - type: endpoint + key: deleteAssistantFile + path: deleteAssistantFile + - type: object + key: AssistantObject + path: object + - type: object + key: AssistantFileObject + path: file-object + - id: threads + title: Threads + beta: true description: | - Manage legacy fine-tuning jobs to tailor a model to your specific training data. + Create threads that assistants can interact with. - We recommend transitioning to the updating [fine-tuning API](/docs/guides/fine-tuning) + Related guide: [Assistants](/docs/assistants/overview) sections: + - type: endpoint + key: createThread + path: createThread + - type: endpoint + key: getThread + path: getThread + - type: endpoint + key: modifyThread + path: modifyThread + - type: endpoint + key: deleteThread + path: deleteThread - type: object + key: ThreadObject path: object - key: FineTune + - id: messages + title: Messages + beta: true + description: | + Create messages within threads + + Related guide: [Assistants](/docs/assistants/overview) + sections: - type: endpoint - key: createFineTune - path: create + key: createMessage + path: createMessage - type: endpoint - key: listFineTunes - path: list + key: listMessages + path: listMessages - type: endpoint - key: retrieveFineTune - path: retrieve + key: listMessageFiles + path: listMessageFiles - type: endpoint - key: cancelFineTune - path: cancel + key: getMessage + path: getMessage - type: endpoint - key: listFineTuneEvents - path: list-events - - id: edits - title: Edits - deprecated: true + key: getMessageFile + path: getMessageFile + - type: endpoint + key: modifyMessage + path: modifyMessage + - type: object + key: MessageObject + path: object + - type: object + key: MessageFileObject + path: file-object + - id: runs + title: Runs + beta: true description: | - Given a prompt and an instruction, the model will return an edited version of the prompt. + Represents an execution run on a thread. + + Related guide: [Assistants](/docs/assistants/overview) sections: + - type: endpoint + key: createRun + path: createRun + - type: endpoint + key: createThreadAndRun + path: createThreadAndRun + - type: endpoint + key: listRuns + path: listRuns + - type: endpoint + key: listRunSteps + path: listRunSteps + - type: endpoint + key: getRun + path: getRun + - type: endpoint + key: getRunStep + path: getRunStep + - type: endpoint + key: modifyRun + path: modifyRun + - type: endpoint + key: submitToolOuputsToRun + path: submitToolOutputs + - type: endpoint + key: cancelRun + path: cancelRun - type: object - key: CreateEditResponse + key: RunObject path: object + - type: object + key: RunStepObject + path: step-object + - id: completions + title: Completions + legacy: true + description: | + Given a prompt, the model will return one or more predicted completions along with the probabilities of alternative tokens at each position. Most developer should use our [Chat Completions API](/docs/guides/text-generation/text-generation-models) to leverage our best and newest models. Most models that support the legacy Completions endpoint [will be shut off on January 4th, 2024](/docs/deprecations/2023-07-06-gpt-and-embeddings). + sections: - type: endpoint - key: createEdit + key: createCompletion path: create + - type: object + key: CreateCompletionResponse + path: object \ No newline at end of file diff --git a/package-lock.json b/package-lock.json index a77b672a3..1c8159a55 100644 --- a/package-lock.json +++ b/package-lock.json @@ -8,18 +8,77 @@ "name": "openai-tsp", "version": "0.1.0", "dependencies": { - "@typespec/compiler": "^0.49.0-dev.11", - "@typespec/openapi": "^0.49.0-dev.4", - "@typespec/openapi3": "^0.49.0-dev.10", - "@typespec/rest": "^0.49.0-dev.3" + "@azure-tools/typespec-csharp": "latest", + "@typespec/compiler": "^0.52.0", + "@typespec/http": "^0.52.0", + "@typespec/openapi": "^0.52.0", + "@typespec/openapi3": "^0.52.0", + "@typespec/rest": "^0.52.0", + "@typespec/versioning": "^0.52.0" + } + }, + "node_modules/@autorest/csharp": { + "version": "3.0.0-beta.20240202.1", + "resolved": "https://registry.npmjs.org/@autorest/csharp/-/csharp-3.0.0-beta.20240202.1.tgz", + "integrity": "sha512-us+dLFipCJbR0uDLiUg7nFsVpV2bJB6CWSTKc30WbE8HvrX0inYxm97LcDw3k3EiOM5frQGSYetWJgXbyuW+jw==" + }, + "node_modules/@azure-tools/typespec-azure-core": { + "version": "0.38.0", + "resolved": "https://registry.npmjs.org/@azure-tools/typespec-azure-core/-/typespec-azure-core-0.38.0.tgz", + "integrity": "sha512-ASM+njC2lpzPykzw2OicWIaAOH+OBe3bVMrufEnINBjlr7owAtudvjrTLLWmAVMBciL/YOF579KdyjxTbaxJ5A==", + "peer": true, + "engines": { + "node": ">=18.0.0" + }, + "peerDependencies": { + "@typespec/compiler": "~0.52.0", + "@typespec/http": "~0.52.0", + "@typespec/rest": "~0.52.0" + } + }, + "node_modules/@azure-tools/typespec-client-generator-core": { + "version": "0.38.0", + "resolved": "https://registry.npmjs.org/@azure-tools/typespec-client-generator-core/-/typespec-client-generator-core-0.38.0.tgz", + "integrity": "sha512-DUDIHJikz3Ai8uPk3vKFoMkkGPUxoD5DbGdwkN/pQxaL6Aze8HV4LGEOGtvaIu0SsGjCX9G3XPAXoBoupYgXbw==", + "peer": true, + "dependencies": { + "change-case": "~5.3.0", + "pluralize": "^8.0.0" + }, + "engines": { + "node": ">=18.0.0" + }, + "peerDependencies": { + "@typespec/compiler": "~0.52.0", + "@typespec/http": "~0.52.0", + "@typespec/rest": "~0.52.0", + "@typespec/versioning": "~0.52.0" + } + }, + "node_modules/@azure-tools/typespec-csharp": { + "version": "0.2.0-beta.20240202.1", + "resolved": "https://registry.npmjs.org/@azure-tools/typespec-csharp/-/typespec-csharp-0.2.0-beta.20240202.1.tgz", + "integrity": "sha512-V+AvsqS7OMUcYIdIcVEyhS3i2MVP/rPZqCzDpIOJAcH/4oK4dHVz4RcgB/7+fM+PY7agWrM3zvR5XTkAeBqL9A==", + "dependencies": { + "@autorest/csharp": "3.0.0-beta.20240202.1", + "json-serialize-refs": "0.1.0-0", + "winston": "^3.8.2" + }, + "peerDependencies": { + "@azure-tools/typespec-azure-core": ">=0.36.0 <1.0.0", + "@azure-tools/typespec-client-generator-core": ">=0.36.0 <1.0.0", + "@typespec/compiler": ">=0.50.0 <1.0.0", + "@typespec/http": ">=0.50.0 <1.0.0", + "@typespec/rest": ">=0.50.0 <1.0.0", + "@typespec/versioning": ">=0.50.0 <1.0.0" } }, "node_modules/@babel/code-frame": { - "version": "7.22.13", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.13.tgz", - "integrity": "sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w==", + "version": "7.23.5", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.23.5.tgz", + "integrity": "sha512-CgH3s1a96LipHCmSUmYFPwY7MNx8C3avkq7i4Wl3cfa662ldtUe4VM1TPXX70pfmrlWTb6jLqTYrZyT2ZTJBgA==", "dependencies": { - "@babel/highlight": "^7.22.13", + "@babel/highlight": "^7.23.4", "chalk": "^2.4.2" }, "engines": { @@ -27,19 +86,19 @@ } }, "node_modules/@babel/helper-validator-identifier": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.5.tgz", - "integrity": "sha512-aJXu+6lErq8ltp+JhkJUfk1MTGyuA4v7f3pA+BJ5HLfNC6nAQ0Cpi9uOquUj8Hehg0aUiHzWQbOVJGao6ztBAQ==", + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz", + "integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/highlight": { - "version": "7.22.13", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.13.tgz", - "integrity": "sha512-C/BaXcnnvBCmHTpz/VGZ8jgtE2aYlW4hxDhseJAWZb7gqGM/qtCK6iZUb0TyKFf7BOUsBH7Q7fkRsDRhg1XklQ==", + "version": "7.23.4", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.23.4.tgz", + "integrity": "sha512-acGdbYSfp2WheJoJm/EBBBLh/ID8KDc64ISZ9DYtBmC8/Q204PZJLHyzeB5qMzJ5trcOkybd78M4x2KWsUq++A==", "dependencies": { - "@babel/helper-validator-identifier": "^7.22.5", + "@babel/helper-validator-identifier": "^7.22.20", "chalk": "^2.4.2", "js-tokens": "^4.0.0" }, @@ -47,6 +106,24 @@ "node": ">=6.9.0" } }, + "node_modules/@colors/colors": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.6.0.tgz", + "integrity": "sha512-Ir+AOibqzrIsL6ajt3Rz3LskB7OiMVHqltZmspbW/TJuTVuyOMirVqAkjfY6JISiLHgyNqicAC8AyHHGzNd/dA==", + "engines": { + "node": ">=0.1.90" + } + }, + "node_modules/@dabh/diagnostics": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@dabh/diagnostics/-/diagnostics-2.0.3.tgz", + "integrity": "sha512-hrlQOIi7hAfzsMqlGSFyVucrx38O+j6wiGOf//H2ecvIEqYN4ADBSS2iLMh5UFyDunCNniUIPk/q3riFv45xRA==", + "dependencies": { + "colorspace": "1.1.x", + "enabled": "2.0.x", + "kuler": "^2.0.0" + } + }, "node_modules/@nodelib/fs.scandir": { "version": "2.1.5", "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", @@ -79,23 +156,39 @@ "node": ">= 8" } }, + "node_modules/@sindresorhus/merge-streams": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@sindresorhus/merge-streams/-/merge-streams-1.0.0.tgz", + "integrity": "sha512-rUV5WyJrJLoloD4NDN1V1+LDMDWOa4OTsT4yYJwQNpTU6FWxkxHpL7eu4w+DmiH8x/EAM1otkPE1+LaspIbplw==", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@types/triple-beam": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/@types/triple-beam/-/triple-beam-1.3.5.tgz", + "integrity": "sha512-6WaYesThRMCl19iryMYP7/x2OVgCtbIVflDGFpWnb9irXI3UjYE4AzmYuiUKY1AJstGijoY+MgUszMgRxIYTYw==" + }, "node_modules/@typespec/compiler": { - "version": "0.49.0-dev.11", - "resolved": "https://registry.npmjs.org/@typespec/compiler/-/compiler-0.49.0-dev.11.tgz", - "integrity": "sha512-SNt6hqu017JhwU3qPpolsGRKgSnb9Wc4FZs5FPQ6i1Ktubtgx9Ac9pxEdSNgOsdoBC3efzbpNCBasLGms0V+Fw==", + "version": "0.52.0", + "resolved": "https://registry.npmjs.org/@typespec/compiler/-/compiler-0.52.0.tgz", + "integrity": "sha512-36cZ5RWxRjL4SUe41KjPh3j3RQibpUoOzHcSllQJ3ByTSZdXv1zckMHLiRfaAbTXUADSAn2GMs4ZO3s8GdOGIQ==", "dependencies": { - "@babel/code-frame": "~7.22.13", + "@babel/code-frame": "~7.23.5", "ajv": "~8.12.0", - "change-case": "~4.1.2", - "globby": "~13.2.2", + "change-case": "~5.3.0", + "globby": "~14.0.0", "mustache": "~4.2.0", "picocolors": "~1.0.0", - "prettier": "~3.0.3", + "prettier": "~3.1.1", "prompts": "~2.4.2", "semver": "^7.5.4", "vscode-languageserver": "~9.0.0", "vscode-languageserver-textdocument": "~1.0.8", - "yaml": "~2.3.2", + "yaml": "~2.3.4", "yargs": "~17.7.2" }, "bin": { @@ -103,72 +196,70 @@ "tsp-server": "cmd/tsp-server.js" }, "engines": { - "node": ">=16.0.0" + "node": ">=18.0.0" } }, "node_modules/@typespec/http": { - "version": "0.48.0", - "resolved": "https://registry.npmjs.org/@typespec/http/-/http-0.48.0.tgz", - "integrity": "sha512-e+0Y0Ky71flUNZSRzCfoOm8XvXsSYGmQgB9VZFDbLl8mQlXwuTfib4tWrU531TCtZHMnylbXx2wAk5+3uC6b9g==", - "peer": true, + "version": "0.52.1", + "resolved": "https://registry.npmjs.org/@typespec/http/-/http-0.52.1.tgz", + "integrity": "sha512-2i7t6eSKi96F/zt1w0yJvhRhubYej0F9o8jDRhPA+TZI6SAxcv/Vyi+lkKnkOcu90HPH7b8T+YNizudb00BO6A==", "engines": { - "node": ">=16.0.0" + "node": ">=18.0.0" }, "peerDependencies": { - "@typespec/compiler": "~0.48.0" + "@typespec/compiler": "~0.52.0" } }, "node_modules/@typespec/openapi": { - "version": "0.49.0-dev.4", - "resolved": "https://registry.npmjs.org/@typespec/openapi/-/openapi-0.49.0-dev.4.tgz", - "integrity": "sha512-qH2borMxQoAdiMDvd88MTvlF2vFZUzusDFtxmKx/GEy+aqkw7pAnR0fqeCbPGR/P8a6slpDchusY/le3608yAQ==", + "version": "0.52.0", + "resolved": "https://registry.npmjs.org/@typespec/openapi/-/openapi-0.52.0.tgz", + "integrity": "sha512-2Otnu9glehxvp6TU7NOHEniBDDKufV03XTmeVGgGEmu/j+cveAMg8lA1/O0RBpS2oHGsCFnMEuPcR8M1c0LI+Q==", "engines": { - "node": ">=16.0.0" + "node": ">=18.0.0" }, "peerDependencies": { - "@typespec/compiler": "~0.48.1 || >=0.49.0-dev <0.49.0", - "@typespec/http": "~0.48.0 || >=0.49.0-dev <0.49.0" + "@typespec/compiler": "~0.52.0", + "@typespec/http": "~0.52.0" } }, "node_modules/@typespec/openapi3": { - "version": "0.49.0-dev.10", - "resolved": "https://registry.npmjs.org/@typespec/openapi3/-/openapi3-0.49.0-dev.10.tgz", - "integrity": "sha512-J9oiVJKv3pTcNIUzftHS676w4LOxvQe6fqAAx37Nql7SJ3AZrqHXwOrlxjMKZHifU7T+V/KZKF8Y6Li4ORPTPw==", + "version": "0.52.0", + "resolved": "https://registry.npmjs.org/@typespec/openapi3/-/openapi3-0.52.0.tgz", + "integrity": "sha512-PPhNdpKQD2iHJemOaRUhnaeFWa4ApW4HtcZI+jrg4hyNSIwDYxL0OwwRohKjRUKM98iacpXvEh+5rKtkPiY2Qw==", "dependencies": { - "yaml": "~2.3.2" + "yaml": "~2.3.4" }, "engines": { - "node": ">=16.0.0" + "node": ">=18.0.0" }, "peerDependencies": { - "@typespec/compiler": "~0.48.1 || >=0.49.0-dev <0.49.0", - "@typespec/http": "~0.48.0 || >=0.49.0-dev <0.49.0", - "@typespec/openapi": "~0.48.0 || >=0.49.0-dev <0.49.0", - "@typespec/versioning": "~0.48.0 || >=0.49.0-dev <0.49.0" + "@typespec/compiler": "~0.52.0", + "@typespec/http": "~0.52.0", + "@typespec/openapi": "~0.52.0", + "@typespec/versioning": "~0.52.0" } }, "node_modules/@typespec/rest": { - "version": "0.49.0-dev.3", - "resolved": "https://registry.npmjs.org/@typespec/rest/-/rest-0.49.0-dev.3.tgz", - "integrity": "sha512-/33xOp3N5wtUZ6O+kNssIzCEXR7+fjThtGysnsUL0lS8W3OesCgF9gKZH9fB0beaRlccmzFoRcHSOQLwalkfmg==", + "version": "0.52.0", + "resolved": "https://registry.npmjs.org/@typespec/rest/-/rest-0.52.0.tgz", + "integrity": "sha512-dLsY0fS60IVaAt4eCRcvEqorX/miPVV33du3dETTYYmbHtfEbvBKgTj/m6OH4noey7oaihlvLz5kYyLv8Am7zA==", "engines": { - "node": ">=16.0.0" + "node": ">=18.0.0" }, "peerDependencies": { - "@typespec/compiler": "~0.48.1 || >=0.49.0-dev <0.49.0", - "@typespec/http": "~0.48.0 || >=0.49.0-dev <0.49.0" + "@typespec/compiler": "~0.52.0", + "@typespec/http": "~0.52.0" } }, "node_modules/@typespec/versioning": { - "version": "0.48.0", - "resolved": "https://registry.npmjs.org/@typespec/versioning/-/versioning-0.48.0.tgz", - "integrity": "sha512-WF26vmMPwizhSnjX0ox23nbp7hthtB4cN/J5w1tlryXyp/BXySHoYsJEMK7fviSpj4WdreVXdM6wmRIG33zqig==", - "peer": true, + "version": "0.52.0", + "resolved": "https://registry.npmjs.org/@typespec/versioning/-/versioning-0.52.0.tgz", + "integrity": "sha512-Vr4WHaZiDOxJqRp8/u6X0R45E+rFKEprYmSZX0o5bzetj0cVjOIEbQZvDJCif1Uz0S3K0KKfqf/kYmdYWMJ7Dw==", "engines": { - "node": ">=16.0.0" + "node": ">=18.0.0" }, "peerDependencies": { - "@typespec/compiler": "~0.48.0" + "@typespec/compiler": "~0.52.0" } }, "node_modules/ajv": { @@ -205,6 +296,11 @@ "node": ">=4" } }, + "node_modules/async": { + "version": "3.2.5", + "resolved": "https://registry.npmjs.org/async/-/async-3.2.5.tgz", + "integrity": "sha512-baNZyqaaLhyLVKm/DlvdW051MSgO6b8eVfIezl9E5PqWxFgzLm/wQntEW4zOytVburDEr0JlALEpdOFwvErLsg==" + }, "node_modules/braces": { "version": "3.0.2", "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", @@ -216,25 +312,6 @@ "node": ">=8" } }, - "node_modules/camel-case": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/camel-case/-/camel-case-4.1.2.tgz", - "integrity": "sha512-gxGWBrTT1JuMx6R+o5PTXMmUnhnVzLQ9SNutD4YqKtI6ap897t3tKECYla6gCWEkplXnlNybEkZg9GEGxKFCgw==", - "dependencies": { - "pascal-case": "^3.1.2", - "tslib": "^2.0.3" - } - }, - "node_modules/capital-case": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/capital-case/-/capital-case-1.0.4.tgz", - "integrity": "sha512-ds37W8CytHgwnhGGTi88pcPyR15qoNkOpYwmMMfnWqqWgESapLqvDx6huFjQ5vqWSn2Z06173XNA7LtMOeUh1A==", - "dependencies": { - "no-case": "^3.0.4", - "tslib": "^2.0.3", - "upper-case-first": "^2.0.2" - } - }, "node_modules/chalk": { "version": "2.4.2", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", @@ -249,23 +326,9 @@ } }, "node_modules/change-case": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/change-case/-/change-case-4.1.2.tgz", - "integrity": "sha512-bSxY2ws9OtviILG1EiY5K7NNxkqg/JnRnFxLtKQ96JaviiIxi7djMrSd0ECT9AC+lttClmYwKw53BWpOMblo7A==", - "dependencies": { - "camel-case": "^4.1.2", - "capital-case": "^1.0.4", - "constant-case": "^3.0.4", - "dot-case": "^3.0.4", - "header-case": "^2.0.4", - "no-case": "^3.0.4", - "param-case": "^3.0.4", - "pascal-case": "^3.1.2", - "path-case": "^3.0.4", - "sentence-case": "^3.0.4", - "snake-case": "^3.0.4", - "tslib": "^2.0.3" - } + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/change-case/-/change-case-5.3.0.tgz", + "integrity": "sha512-Eykca0fGS/xYlx2fG5NqnGSnsWauhSGiSXYhB1kO6E909GUfo8S54u4UZNS7lMJmgZumZ2SUpWaoLgAcfQRICg==" }, "node_modules/cliui": { "version": "8.0.1", @@ -280,6 +343,15 @@ "node": ">=12" } }, + "node_modules/color": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/color/-/color-3.2.1.tgz", + "integrity": "sha512-aBl7dZI9ENN6fUGC7mWpMTPNHmWUSNan9tuWN6ahh5ZLNk9baLJOnSMlrQkHcrfFgz2/RigjUVAjdx36VcemKA==", + "dependencies": { + "color-convert": "^1.9.3", + "color-string": "^1.6.0" + } + }, "node_modules/color-convert": { "version": "1.9.3", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", @@ -293,34 +365,22 @@ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" }, - "node_modules/constant-case": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/constant-case/-/constant-case-3.0.4.tgz", - "integrity": "sha512-I2hSBi7Vvs7BEuJDr5dDHfzb/Ruj3FyvFyh7KLilAjNQw3Be+xgqUBA2W6scVEcL0hL1dwPRtIqEPVUCKkSsyQ==", + "node_modules/color-string": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.9.1.tgz", + "integrity": "sha512-shrVawQFojnZv6xM40anx4CkoDP+fZsw/ZerEMsW/pyzsRbElpsL/DBVW7q3ExxwusdNXI3lXpuhEZkzs8p5Eg==", "dependencies": { - "no-case": "^3.0.4", - "tslib": "^2.0.3", - "upper-case": "^2.0.2" - } - }, - "node_modules/dir-glob": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", - "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", - "dependencies": { - "path-type": "^4.0.0" - }, - "engines": { - "node": ">=8" + "color-name": "^1.0.0", + "simple-swizzle": "^0.2.2" } }, - "node_modules/dot-case": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/dot-case/-/dot-case-3.0.4.tgz", - "integrity": "sha512-Kv5nKlh6yRrdrGvxeJ2e5y2eRUpkUosIW4A2AS38zwSz27zu7ufDwQPi5Jhs3XAlGNetl3bmnGhQsMtkKJnj3w==", + "node_modules/colorspace": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/colorspace/-/colorspace-1.1.4.tgz", + "integrity": "sha512-BgvKJiuVu1igBUF2kEjRCZXol6wiiGbY5ipL/oVPwm0BL9sIpMIzM8IK7vwuxIIzOXMV3Ey5w+vxhm0rR/TN8w==", "dependencies": { - "no-case": "^3.0.4", - "tslib": "^2.0.3" + "color": "^3.1.3", + "text-hex": "1.0.x" } }, "node_modules/emoji-regex": { @@ -328,6 +388,11 @@ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" }, + "node_modules/enabled": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/enabled/-/enabled-2.0.0.tgz", + "integrity": "sha512-AKrN98kuwOzMIdAizXGI86UFBoo26CL21UM763y1h/GMSJ4/OHU9k2YlsmBpyScFo/wbLzWQJBMCW4+IO3/+OQ==" + }, "node_modules/escalade": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", @@ -350,9 +415,9 @@ "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" }, "node_modules/fast-glob": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.1.tgz", - "integrity": "sha512-kNFPyjhh5cKjrUltxs+wFx+ZkbRaxxmZ+X0ZU31SOsxCEtP9VPgtq2teZw1DebupL5GmDaNQ6yKMMVcM41iqDg==", + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.2.tgz", + "integrity": "sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==", "dependencies": { "@nodelib/fs.stat": "^2.0.2", "@nodelib/fs.walk": "^1.2.3", @@ -365,13 +430,18 @@ } }, "node_modules/fastq": { - "version": "1.15.0", - "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.15.0.tgz", - "integrity": "sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw==", + "version": "1.17.0", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.17.0.tgz", + "integrity": "sha512-zGygtijUMT7jnk3h26kUms3BkSDp4IfIKjmnqI2tvx6nuBfiF1UqOxbnLfzdv+apBy+53oaImsKtMw/xYbW+1w==", "dependencies": { "reusify": "^1.0.4" } }, + "node_modules/fecha": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/fecha/-/fecha-4.2.3.tgz", + "integrity": "sha512-OP2IUU6HeYKJi3i0z4A19kHMQoLVs4Hc+DPqqxI2h/DPZHTm/vjsfC6P0b4jCMy14XizLBqvndQ+UilD7707Jw==" + }, "node_modules/fill-range": { "version": "7.0.1", "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", @@ -383,6 +453,11 @@ "node": ">=8" } }, + "node_modules/fn.name": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/fn.name/-/fn.name-1.1.0.tgz", + "integrity": "sha512-GRnmB5gPyJpAhTQdSZTSp9uaPSvl09KoYcMQtsB9rQoOmzs9dH6ffeccH+Z+cv6P68Hu5bC6JjRh4Ah/mHSNRw==" + }, "node_modules/get-caller-file": { "version": "2.0.5", "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", @@ -403,18 +478,19 @@ } }, "node_modules/globby": { - "version": "13.2.2", - "resolved": "https://registry.npmjs.org/globby/-/globby-13.2.2.tgz", - "integrity": "sha512-Y1zNGV+pzQdh7H39l9zgB4PJqjRNqydvdYCDG4HFXM4XuvSaQQlEc91IU1yALL8gUTDomgBAfz3XJdmUS+oo0w==", + "version": "14.0.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-14.0.0.tgz", + "integrity": "sha512-/1WM/LNHRAOH9lZta77uGbq0dAEQM+XjNesWwhlERDVenqothRbnzTrL3/LrIoEPPjeUHC3vrS6TwoyxeHs7MQ==", "dependencies": { - "dir-glob": "^3.0.1", - "fast-glob": "^3.3.0", + "@sindresorhus/merge-streams": "^1.0.0", + "fast-glob": "^3.3.2", "ignore": "^5.2.4", - "merge2": "^1.4.1", - "slash": "^4.0.0" + "path-type": "^5.0.0", + "slash": "^5.1.0", + "unicorn-magic": "^0.1.0" }, "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + "node": ">=18" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" @@ -428,23 +504,24 @@ "node": ">=4" } }, - "node_modules/header-case": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/header-case/-/header-case-2.0.4.tgz", - "integrity": "sha512-H/vuk5TEEVZwrR0lp2zed9OCo1uAILMlx0JEMgC26rzyJJ3N1v6XkwHHXJQdR2doSjcGPM6OKPYoJgf0plJ11Q==", - "dependencies": { - "capital-case": "^1.0.4", - "tslib": "^2.0.3" - } - }, "node_modules/ignore": { - "version": "5.2.4", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.4.tgz", - "integrity": "sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ==", + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.1.tgz", + "integrity": "sha512-5Fytz/IraMjqpwfd34ke28PTVMjZjJG2MPn5t7OE4eUCUNf8BAa7b5WUS9/Qvr6mwOQS7Mk6vdsMno5he+T8Xw==", "engines": { "node": ">= 4" } }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + }, + "node_modules/is-arrayish": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz", + "integrity": "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ==" + }, "node_modules/is-extglob": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", @@ -480,6 +557,17 @@ "node": ">=0.12.0" } }, + "node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/js-tokens": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", @@ -490,6 +578,11 @@ "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" }, + "node_modules/json-serialize-refs": { + "version": "0.1.0-0", + "resolved": "https://registry.npmjs.org/json-serialize-refs/-/json-serialize-refs-0.1.0-0.tgz", + "integrity": "sha512-SnNMfW2RRPDXIMKa8zdLb59UjMSI1UFZCtIb8ae68GcZ0a6x8b77lIWqqTOdq1azzmkXupD6UWriPLd0JCrFng==" + }, "node_modules/kleur": { "version": "3.0.3", "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", @@ -498,12 +591,25 @@ "node": ">=6" } }, - "node_modules/lower-case": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/lower-case/-/lower-case-2.0.2.tgz", - "integrity": "sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg==", + "node_modules/kuler": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/kuler/-/kuler-2.0.0.tgz", + "integrity": "sha512-Xq9nH7KlWZmXAtodXDDRE7vs6DU1gTU8zYDHDiWLSip45Egwq3plLHzPn27NgvzL2r1LMPC1vdqh98sQxtqj4A==" + }, + "node_modules/logform": { + "version": "2.6.0", + "resolved": "https://registry.npmjs.org/logform/-/logform-2.6.0.tgz", + "integrity": "sha512-1ulHeNPp6k/LD8H91o7VYFBng5i1BDE7HoKxVbZiGFidS1Rj65qcywLxX+pVfAPoQJEjRdvKcusKwOupHCVOVQ==", "dependencies": { - "tslib": "^2.0.3" + "@colors/colors": "1.6.0", + "@types/triple-beam": "^1.3.2", + "fecha": "^4.2.0", + "ms": "^2.1.1", + "safe-stable-stringify": "^2.3.1", + "triple-beam": "^1.3.0" + }, + "engines": { + "node": ">= 12.0.0" } }, "node_modules/lru-cache": { @@ -537,6 +643,11 @@ "node": ">=8.6" } }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" + }, "node_modules/mustache": { "version": "4.2.0", "resolved": "https://registry.npmjs.org/mustache/-/mustache-4.2.0.tgz", @@ -545,48 +656,23 @@ "mustache": "bin/mustache" } }, - "node_modules/no-case": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/no-case/-/no-case-3.0.4.tgz", - "integrity": "sha512-fgAN3jGAh+RoxUGZHTSOLJIqUc2wmoBwGR4tbpNAKmmovFoWq0OdRkb0VkldReO2a2iBT/OEulG9XSUc10r3zg==", - "dependencies": { - "lower-case": "^2.0.2", - "tslib": "^2.0.3" - } - }, - "node_modules/param-case": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/param-case/-/param-case-3.0.4.tgz", - "integrity": "sha512-RXlj7zCYokReqWpOPH9oYivUzLYZ5vAPIfEmCTNViosC78F8F0H9y7T7gG2M39ymgutxF5gcFEsyZQSph9Bp3A==", - "dependencies": { - "dot-case": "^3.0.4", - "tslib": "^2.0.3" - } - }, - "node_modules/pascal-case": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/pascal-case/-/pascal-case-3.1.2.tgz", - "integrity": "sha512-uWlGT3YSnK9x3BQJaOdcZwrnV6hPpd8jFH1/ucpiLRPh/2zCVJKS19E4GvYHvaCcACn3foXZ0cLB9Wrx1KGe5g==", - "dependencies": { - "no-case": "^3.0.4", - "tslib": "^2.0.3" - } - }, - "node_modules/path-case": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/path-case/-/path-case-3.0.4.tgz", - "integrity": "sha512-qO4qCFjXqVTrcbPt/hQfhTQ+VhFsqNKOPtytgNKkKxSoEp3XPUQ8ObFuePylOIok5gjn69ry8XiULxCwot3Wfg==", + "node_modules/one-time": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/one-time/-/one-time-1.0.0.tgz", + "integrity": "sha512-5DXOiRKwuSEcQ/l0kGCF6Q3jcADFv5tSmRaJck/OqkVFcOzutB134KRSfF0xDrL39MNnqxbHBbUUcjZIhTgb2g==", "dependencies": { - "dot-case": "^3.0.4", - "tslib": "^2.0.3" + "fn.name": "1.x.x" } }, "node_modules/path-type": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", - "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-5.0.0.tgz", + "integrity": "sha512-5HviZNaZcfqP95rwpv+1HDgUamezbqdSYTyzjTvwtJSnIH+3vnbmWsItli8OFEndS984VT55M3jduxZbX351gg==", "engines": { - "node": ">=8" + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/picocolors": { @@ -605,10 +691,19 @@ "url": "https://github.com/sponsors/jonschlinkert" } }, + "node_modules/pluralize": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/pluralize/-/pluralize-8.0.0.tgz", + "integrity": "sha512-Nc3IT5yHzflTfbjgqWcCPpo7DaKy4FnpB0l/zCAW0Tc7jxAiuqSxHasntB3D7887LSrA93kDJ9IXovxJYxyLCA==", + "peer": true, + "engines": { + "node": ">=4" + } + }, "node_modules/prettier": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.0.3.tgz", - "integrity": "sha512-L/4pUDMxcNa8R/EthV08Zt42WBO4h1rarVtK0K+QJG0X187OLo7l699jWw0GKuwzkPQ//jMFA/8Xm6Fh3J/DAg==", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.1.1.tgz", + "integrity": "sha512-22UbSzg8luF4UuZtzgiUOfcGM8s4tjBv6dJRT7j275NXsy2jb4aJa4NNveul5x4eqlF1wuhuR2RElK71RvmVaw==", "bin": { "prettier": "bin/prettier.cjs" }, @@ -658,6 +753,19 @@ } ] }, + "node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, "node_modules/require-directory": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", @@ -705,6 +813,33 @@ "queue-microtask": "^1.2.2" } }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/safe-stable-stringify": { + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/safe-stable-stringify/-/safe-stable-stringify-2.4.3.tgz", + "integrity": "sha512-e2bDA2WJT0wxseVd4lsDP4+3ONX6HpMXQa1ZhFQ7SU+GjvORCmShbCMltrtIDfkYhVHrOcPtj+KhmDBdPdZD1g==", + "engines": { + "node": ">=10" + } + }, "node_modules/semver": { "version": "7.5.4", "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", @@ -719,14 +854,12 @@ "node": ">=10" } }, - "node_modules/sentence-case": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/sentence-case/-/sentence-case-3.0.4.tgz", - "integrity": "sha512-8LS0JInaQMCRoQ7YUytAo/xUu5W2XnQxV2HI/6uM6U7CITS1RqPElr30V6uIqyMKM9lJGRVFy5/4CuzcixNYSg==", + "node_modules/simple-swizzle": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.2.tgz", + "integrity": "sha512-JA//kQgZtbuY83m+xT+tXJkmJncGMTFT+C+g2h2R9uxkYIrE2yy9sgmcLhCnw57/WSD+Eh3J97FPEDFnbXnDUg==", "dependencies": { - "no-case": "^3.0.4", - "tslib": "^2.0.3", - "upper-case-first": "^2.0.2" + "is-arrayish": "^0.3.1" } }, "node_modules/sisteransi": { @@ -735,23 +868,30 @@ "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==" }, "node_modules/slash": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-4.0.0.tgz", - "integrity": "sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==", + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-5.1.0.tgz", + "integrity": "sha512-ZA6oR3T/pEyuqwMgAKT0/hAv8oAXckzbkmR0UkUosQ+Mc4RxGoJkRmwHgHufaenlyAgE1Mxgpdcrf75y6XcnDg==", "engines": { - "node": ">=12" + "node": ">=14.16" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/snake-case": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/snake-case/-/snake-case-3.0.4.tgz", - "integrity": "sha512-LAOh4z89bGQvl9pFfNF8V146i7o7/CqFPbqzYgP+yYzDIDeS9HaNFtXABamRW+AQzEVODcvE79ljJ+8a9YSdMg==", + "node_modules/stack-trace": { + "version": "0.0.10", + "resolved": "https://registry.npmjs.org/stack-trace/-/stack-trace-0.0.10.tgz", + "integrity": "sha512-KGzahc7puUKkzyMt+IqAep+TVNbKP+k2Lmwhub39m1AsTSkaDutx56aDCo+HLDzf/D26BIHTJWNiTG1KAJiQCg==", + "engines": { + "node": "*" + } + }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", "dependencies": { - "dot-case": "^3.0.4", - "tslib": "^2.0.3" + "safe-buffer": "~5.2.0" } }, "node_modules/string-width": { @@ -789,6 +929,11 @@ "node": ">=4" } }, + "node_modules/text-hex": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/text-hex/-/text-hex-1.0.0.tgz", + "integrity": "sha512-uuVGNWzgJ4yhRaNSiubPY7OjISw4sw4E5Uv0wbjp+OzcbmVU/rsT8ujgcXJhn9ypzsgr5vlzpPqP+MBBKcGvbg==" + }, "node_modules/to-regex-range": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", @@ -800,25 +945,23 @@ "node": ">=8.0" } }, - "node_modules/tslib": { - "version": "2.6.2", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz", - "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==" - }, - "node_modules/upper-case": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/upper-case/-/upper-case-2.0.2.tgz", - "integrity": "sha512-KgdgDGJt2TpuwBUIjgG6lzw2GWFRCW9Qkfkiv0DxqHHLYJHmtmdUIKcZd8rHgFSjopVTlw6ggzCm1b8MFQwikg==", - "dependencies": { - "tslib": "^2.0.3" + "node_modules/triple-beam": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/triple-beam/-/triple-beam-1.4.1.tgz", + "integrity": "sha512-aZbgViZrg1QNcG+LULa7nhZpJTZSLm/mXnHXnbAbjmN5aSa0y7V+wvv6+4WaBtpISJzThKy+PIPxc1Nq1EJ9mg==", + "engines": { + "node": ">= 14.0.0" } }, - "node_modules/upper-case-first": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/upper-case-first/-/upper-case-first-2.0.2.tgz", - "integrity": "sha512-514ppYHBaKwfJRK/pNC6c/OxfGa0obSnAl106u97Ed0I625Nin96KAjttZF6ZL3e1XLtphxnqrOi9iWgm+u+bg==", - "dependencies": { - "tslib": "^2.0.3" + "node_modules/unicorn-magic": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/unicorn-magic/-/unicorn-magic-0.1.0.tgz", + "integrity": "sha512-lRfVq8fE8gz6QMBuDM6a+LO3IAzTi05H6gCVaUpir2E1Rwpo4ZUog45KpNXKC/Mn3Yb9UDuHumeFTo9iV/D9FQ==", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/uri-js": { @@ -829,6 +972,11 @@ "punycode": "^2.1.0" } }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" + }, "node_modules/vscode-jsonrpc": { "version": "8.2.0", "resolved": "https://registry.npmjs.org/vscode-jsonrpc/-/vscode-jsonrpc-8.2.0.tgz", @@ -867,6 +1015,40 @@ "resolved": "https://registry.npmjs.org/vscode-languageserver-types/-/vscode-languageserver-types-3.17.4.tgz", "integrity": "sha512-9YXi5pA3XF2V+NUQg6g+lulNS0ncRCKASYdK3Cs7kiH9sVFXWq27prjkC/B8M/xJLRPPRSPCHVMuBTgRNFh2sQ==" }, + "node_modules/winston": { + "version": "3.11.0", + "resolved": "https://registry.npmjs.org/winston/-/winston-3.11.0.tgz", + "integrity": "sha512-L3yR6/MzZAOl0DsysUXHVjOwv8mKZ71TrA/41EIduGpOOV5LQVodqN+QdQ6BS6PJ/RdIshZhq84P/fStEZkk7g==", + "dependencies": { + "@colors/colors": "^1.6.0", + "@dabh/diagnostics": "^2.0.2", + "async": "^3.2.3", + "is-stream": "^2.0.0", + "logform": "^2.4.0", + "one-time": "^1.0.0", + "readable-stream": "^3.4.0", + "safe-stable-stringify": "^2.3.1", + "stack-trace": "0.0.x", + "triple-beam": "^1.3.0", + "winston-transport": "^4.5.0" + }, + "engines": { + "node": ">= 12.0.0" + } + }, + "node_modules/winston-transport": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/winston-transport/-/winston-transport-4.6.0.tgz", + "integrity": "sha512-wbBA9PbPAHxKiygo7ub7BYRiKxms0tpfU2ljtWzb3SjRjv5yl6Ozuy/TkXf00HTAt+Uylo3gSkNwzc4ME0wiIg==", + "dependencies": { + "logform": "^2.3.2", + "readable-stream": "^3.6.0", + "triple-beam": "^1.3.0" + }, + "engines": { + "node": ">= 12.0.0" + } + }, "node_modules/wrap-ansi": { "version": "7.0.0", "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", @@ -927,9 +1109,9 @@ "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" }, "node_modules/yaml": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.3.2.tgz", - "integrity": "sha512-N/lyzTPaJasoDmfV7YTrYCI0G/3ivm/9wdG0aHuheKowWQwGTsK0Eoiw6utmzAnI6pkJa0DUVygvp3spqqEKXg==", + "version": "2.3.4", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.3.4.tgz", + "integrity": "sha512-8aAvwVUSHpfEqTQ4w/KMlf3HcRdt50E5ODIQJBw1fQ5RL34xabzxtUlzTXVqc4rkZsPbvrXKWnABCD7kWSmocA==", "engines": { "node": ">= 14" } diff --git a/package.json b/package.json index eba48058d..f43e693fb 100644 --- a/package.json +++ b/package.json @@ -3,12 +3,13 @@ "version": "0.1.0", "type": "module", "dependencies": { - "@typespec/compiler": "^0.49.0-dev.11", - "@typespec/openapi": "^0.49.0-dev.4", - "@typespec/openapi3": "^0.49.0-dev.10", - "@typespec/rest": "^0.49.0-dev.3", - "@typespec/http": "^0.49.0-dev.0", - "@typespec/versioning": "^0.49.0-dev.0" + "@azure-tools/typespec-csharp": "latest", + "@typespec/compiler": "^0.52.0", + "@typespec/http": "^0.52.0", + "@typespec/openapi": "^0.52.0", + "@typespec/openapi3": "^0.52.0", + "@typespec/rest": "^0.52.0", + "@typespec/versioning": "^0.52.0" }, "private": true } From 587222bd1553d9989848413df093ecf066f7ce2e Mon Sep 17 00:00:00 2001 From: Jose Arriaga Maldonado Date: Wed, 7 Feb 2024 10:15:50 -0800 Subject: [PATCH 02/18] Add Assistants API --- assistants/main.tsp | 1 + assistants/meta.tsp | 42 ++++ assistants/models.tsp | 233 ++++++++++++++++++ assistants/operations.tsp | 161 +++++++++++++ main.tsp | 4 + messages/main.tsp | 1 + messages/meta.tsp | 52 ++++ messages/models.tsp | 211 +++++++++++++++++ messages/operations.tsp | 142 +++++++++++ runs/main.tsp | 1 + runs/meta.tsp | 50 ++++ runs/models.tsp | 486 ++++++++++++++++++++++++++++++++++++++ runs/operations.tsp | 185 +++++++++++++++ threads/main.tsp | 1 + threads/meta.tsp | 22 ++ threads/models.tsp | 55 +++++ threads/operations.tsp | 52 ++++ 17 files changed, 1699 insertions(+) create mode 100644 assistants/main.tsp create mode 100644 assistants/meta.tsp create mode 100644 assistants/models.tsp create mode 100644 assistants/operations.tsp create mode 100644 messages/main.tsp create mode 100644 messages/meta.tsp create mode 100644 messages/models.tsp create mode 100644 messages/operations.tsp create mode 100644 runs/main.tsp create mode 100644 runs/meta.tsp create mode 100644 runs/models.tsp create mode 100644 runs/operations.tsp create mode 100644 threads/main.tsp create mode 100644 threads/meta.tsp create mode 100644 threads/models.tsp create mode 100644 threads/operations.tsp diff --git a/assistants/main.tsp b/assistants/main.tsp new file mode 100644 index 000000000..6a754bcb5 --- /dev/null +++ b/assistants/main.tsp @@ -0,0 +1 @@ +import "./operations.tsp"; \ No newline at end of file diff --git a/assistants/meta.tsp b/assistants/meta.tsp new file mode 100644 index 000000000..fd2481ecb --- /dev/null +++ b/assistants/meta.tsp @@ -0,0 +1,42 @@ +import "./models.tsp"; + +import "@typespec/openapi"; + +using TypeSpec.OpenAPI; + +namespace OpenAI; + +// TODO: Fill in example here. +@@extension(OpenAI.ListAssistantsResponse, + "x-oaiMeta", + """ + name: List assistants response object + group: chat + example: *list_assistants_example + """ +); + +// TODO: Fill in example here. +@@extension(OpenAI.AssistantObject, + "x-oaiMeta", + """ + name: The assistant object + beta: true + example: *create_assistants_example + """ +); + +@@extension(OpenAI.AssistantFileObject, + "x-oaiMeta", + """ + name: The assistant file object + beta: true + example: | + { + "id": "file-abc123", + "object": "assistant.file", + "created_at": 1699055364, + "assistant_id": "asst_abc123" + } + """ +); \ No newline at end of file diff --git a/assistants/models.tsp b/assistants/models.tsp new file mode 100644 index 000000000..358d95d48 --- /dev/null +++ b/assistants/models.tsp @@ -0,0 +1,233 @@ +import "../common/models.tsp"; + +using TypeSpec.Http; +using TypeSpec.OpenAPI; + +namespace OpenAI; + +model CreateAssistantRequest { + /** + * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + * see all of your available models, or see our [Model overview](/docs/models/overview) for + * descriptions of them. + */ + `model`: string; + + /** The name of the assistant. The maximum length is 256 characters. */ + @maxLength(256) + name?: string | null; + + /** The description of the assistant. The maximum length is 512 characters. */ + @maxLength(512) + description?: string | null; + + /** The system instructions that the assistant uses. The maximum length is 32768 characters. */ + @maxLength(32768) + instructions?: string | null; + + /** + * A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. + * Tools can be of types `code_interpreter`, `retrieval`, or `function`. + */ + + tools?: CreateAssistantRequestTools = []; // TODO: Double-check default empty array + + /** + * A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a + * maximum of 20 files attached to the assistant. Files are ordered by their creation date in + * ascending order. + */ + @maxItems(20) + file_ids?: string[] = []; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + * additional information about the object in a structured format. Keys can be a maximum of 64 + * characters long and values can be a maxium of 512 characters long. + */ + @extension("x-oaiTypeLabel", "map") + metadata?: Record | null; +} + +model ModifyAssistantRequest { + /** + * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + * see all of your available models, or see our [Model overview](/docs/models/overview) for + * descriptions of them. + */ + `model`?: string; + + /** The name of the assistant. The maximum length is 256 characters. */ + @maxLength(256) + name?: string | null; + + /** The description of the assistant. The maximum length is 512 characters. */ + @maxLength(512) + description?: string | null; + + /** The system instructions that the assistant uses. The maximum length is 32768 characters. */ + @maxLength(32768) + instructions?: string | null; + + /** + * A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. + * Tools can be of types `code_interpreter`, `retrieval`, or `function`. + */ + @maxItems(128) + tools?: CreateAssistantRequestTools[] = []; // TODO: Double-check default empty array + + /** + * A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a + * maximum of 20 files attached to the assistant. Files are ordered by their creation date in + * ascending order. + */ + @maxItems(20) + file_ids?: string[] = []; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + * additional information about the object in a structured format. Keys can be a maximum of 64 + * characters long and values can be a maxium of 512 characters long. + */ + @extension("x-oaiTypeLabel", "map") + metadata?: Record | null; +} + +model CreateAssistantFileRequest { + /** + * A [File](/docs/api-reference/files) ID (with `purpose="assistants"`) that the assistant should + * use. Useful for tools like `retrieval` and `code_interpreter` that can access files. + */ + file_id: string; +} + +model ListAssistantsResponse { + object: "list"; + data: AssistantObject[]; + first_id: string; + last_id: string; + has_more: boolean; +} + +model DeleteAssistantResponse { + id: string; + deleted: boolean; + object: "assistant.deleted"; +} + +model ListAssistantFilesResponse { + object: "list"; + data: AssistantFileObject[]; + first_id: string; + last_id: string; + has_more: boolean; +} + +/** + * Deletes the association between the assistant and the file, but does not delete the + * [File](/docs/api-reference/files) object itself. + */ +model DeleteAssistantFileResponse { + id: string; + deleted: boolean; + object: "assistant.file.deleted"; +} + +@maxItems(128) +model CreateAssistantRequestTools is CreateAssistantRequestTool[]; + +@oneOf +@extension("x-oaiExpandable", true) +union CreateAssistantRequestTool { + AssistantToolsCode, + AssistantToolsRetrieval, + AssistantToolsFunction +} + +model AssistantToolsCode { + /** The type of tool being defined: `code_interpreter` */ + type: "code_interpreter"; +} + +model AssistantToolsRetrieval { + /** The type of tool being defined: `retrieval` */ + type: "retrieval"; +} + +model AssistantToolsFunction { + /** The type of tool being defined: `function` */ + type: "function"; + + function: FunctionObject; +} + +/** Represents an `assistant` that can call the model and use tools. */ +model AssistantObject { + /** The identifier, which can be referenced in API endpoints. */ + id: string; + + /** The object type, which is always `assistant`. */ + object: "assistant"; + + /** The Unix timestamp (in seconds) for when the assistant was created. */ + @encode("unixTimestamp", int32) + created_at: utcDateTime; + + /** The name of the assistant. The maximum length is 256 characters. */ + @maxLength(256) + name: string | null; + + /** The description of the assistant. The maximum length is 512 characters. */ + @maxLength(512) + description: string | null; + + /** + * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + * see all of your available models, or see our [Model overview](/docs/models/overview) for + * descriptions of them. + */ + `model`: string; + + /** The system instructions that the assistant uses. The maximum length is 32768 characters. */ + @maxLength(32768) + instructions: string | null; + + /** + * A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. + * Tools can be of types `code_interpreter`, `retrieval`, or `function`. + */ + @maxItems(128) + tools: CreateAssistantRequestTools[] = []; // TODO: Double-check default empty array + + /** + * A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a + * maximum of 20 files attached to the assistant. Files are ordered by their creation date in + * ascending order. + */ + @maxItems(20) + file_ids: string[] = []; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + * additional information about the object in a structured format. Keys can be a maximum of 64 + * characters long and values can be a maxium of 512 characters long. + */ + @extension("x-oaiTypeLabel", "map") + metadata: Record | null; +} + +/** A list of [Files](/docs/api-reference/files) attached to an `assistant`. */ +model AssistantFileObject { + /** The identifier, which can be referenced in API endpoints. */ + id: string; + + /** The object type, which is always `assistant.file`. */ + object: "assistant.file"; + + /** The Unix timestamp (in seconds) for when the assistant file was created. */ + @encode("unixTimestamp", int32) + created_at: utcDateTime; + + /** The assistant ID that the file is attached to. */ + assistant_id: string; +} \ No newline at end of file diff --git a/assistants/operations.tsp b/assistants/operations.tsp new file mode 100644 index 000000000..e98f03e7e --- /dev/null +++ b/assistants/operations.tsp @@ -0,0 +1,161 @@ +import "@typespec/http"; +import "@typespec/openapi"; + +import "../common/errors.tsp"; +import "./models.tsp"; + +using TypeSpec.Http; +using TypeSpec.OpenAPI; + +namespace OpenAI; + +@route("/assistants") +interface Assistants { + @post + @operationId("createAssistant") + @tag("OpenAI") + @summary("Create an assistant with a model and instructions.") + createAssistant( + @body assistant: CreateAssistantRequest, + ): AssistantObject | ErrorResponse; + + @get + @operationId("listAssistants") + @tag("OpenAI") + @summary("Returns a list of assistants.") + listFiles( + /** + * A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + * default is 20. + */ + @query limit?: int32 = 20; + + /** + * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + * for descending order. + */ + @query order?: "asc" | "desc" = "desc"; + + /** + * A cursor for use in pagination. `after` is an object ID that defines your place in the list. + * For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + * subsequent call can include after=obj_foo in order to fetch the next page of the list. + */ + @query after?: string; + + /** + * A cursor for use in pagination. `before` is an object ID that defines your place in the list. + * For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + * subsequent call can include before=obj_foo in order to fetch the previous page of the list. + */ + @query before?: string; + ): ListAssistantsResponse | ErrorResponse; + + @route("{assistant_id}") + @get + @operationId("getAssistant") + @tag("OpenAI") + @summary("Retrieves an assistant.") + getAssistant( + /** The ID of the assistant to retrieve. */ + @path assistant_id: string, + ): AssistantObject | ErrorResponse; + + @route("{assistant_id}") + @post + @operationId("modifyAssistant") + @tag("OpenAI") + @summary("Modifies an assistant.") + modifyAssistant( + /** The ID of the assistant to modify. */ + @path assistant_id: string, + + @body assistant: ModifyAssistantRequest, + ): AssistantObject | ErrorResponse; + + @route("{assistant_id}") + @delete + @operationId("deleteAssistant") + @tag("OpenAI") + @summary("Delete an assistant.") + deleteAssistant( + /** The ID of the assistant to delete. */ + @path assistant_id: string, + ): DeleteAssistantResponse | ErrorResponse; + + @route("{assistant_id}/files") + @post + @operationId("createAssistantFile") + @tag("OpenAI") + @summary(""" + Create an assistant file by attaching a [File](/docs/api-reference/files) to a + [assistant](/docs/api-reference/assistants). + """) + createAssistantFile( + /** The ID of the assistant for which to create a file. */ + @path assistant_id: string, + @body file: CreateAssistantFileRequest, + ): AssistantFileObject | ErrorResponse; + + @route("{assistant_id}/files") + @get + @operationId("listAssistantFiles") + @tag("OpenAI") + @summary("Returns a list of assistant files.") + listAssistantFiles( + /** The ID of the assistant the file belongs to. */ + @path assistant_id: string, + + /** + * A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + * default is 20. + */ + @query limit?: int32 = 20; + + /** + * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + * for descending order. + */ + @query order?: "asc" | "desc" = "desc"; + + /** + * A cursor for use in pagination. `after` is an object ID that defines your place in the list. + * For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + * subsequent call can include after=obj_foo in order to fetch the next page of the list. + */ + @query after?: string; + + /** + * A cursor for use in pagination. `before` is an object ID that defines your place in the list. + * For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + * subsequent call can include before=obj_foo in order to fetch the previous page of the list. + */ + @query before?: string; + ): ListAssistantFilesResponse | ErrorResponse; + + @route("{assistant_id}/files/{file_id}") + @get + @operationId("getAssistantFile") + @tag("OpenAI") + @summary("Retrieves an assistant file.") + getAssistantFile( + /** The ID of the assistant the file belongs to. */ + @path assistant_id: string, + + /** The ID of the file we're getting. */ + @path file_id: string, + ): AssistantFileObject | ErrorResponse; + + @route("{assistant_id}/files/{file_id}") + @delete + @operationId("deleteAssistantFile") + @tag("OpenAI") + @summary("Delete an assistant file.") + deleteAssistantFile( + /** The ID of the assistant the file belongs to. */ + @path assistant_id: string, + + /** The ID of the file to delete. */ + @path file_id: string, + ): DeleteAssistantFileResponse | ErrorResponse; +} diff --git a/main.tsp b/main.tsp index fe7b08d3a..604a75f8c 100644 --- a/main.tsp +++ b/main.tsp @@ -3,14 +3,18 @@ import "@typespec/openapi3"; import "@typespec/openapi"; import "./audio"; +import "./assistants"; import "./chat"; import "./completions"; import "./embeddings"; import "./files"; import "./fine-tuning"; import "./images"; +import "./messages"; import "./models"; import "./moderations"; +import "./runs"; +import "./threads"; using TypeSpec.Http; diff --git a/messages/main.tsp b/messages/main.tsp new file mode 100644 index 000000000..6a754bcb5 --- /dev/null +++ b/messages/main.tsp @@ -0,0 +1 @@ +import "./operations.tsp"; \ No newline at end of file diff --git a/messages/meta.tsp b/messages/meta.tsp new file mode 100644 index 000000000..682702c76 --- /dev/null +++ b/messages/meta.tsp @@ -0,0 +1,52 @@ +import "./models.tsp"; + +import "@typespec/openapi"; + +using TypeSpec.OpenAPI; + +namespace OpenAI; + +@@extension(OpenAI.MessageObject, + "x-oaiMeta", + """ + name: The message object + beta: true + example: | + { + "id": "msg_abc123", + "object": "thread.message", + "created_at": 1698983503, + "thread_id": "thread_abc123", + "role": "assistant", + "content": [ + { + "type": "text", + "text": { + "value": "Hi! How can I help you today?", + "annotations": [] + } + } + ], + "file_ids": [], + "assistant_id": "asst_abc123", + "run_id": "run_abc123", + "metadata": {} + } + """ +); + +@@extension(OpenAI.MessageFileObject, + "x-oaiMeta", + """ + name: The message file object + beta: true + example: | + { + "id": "file-abc123", + "object": "thread.message.file", + "created_at": 1698107661, + "message_id": "message_QLoItBbqwyAJEzlTy4y9kOMM", + "file_id": "file-abc123" + } + """ +); \ No newline at end of file diff --git a/messages/models.tsp b/messages/models.tsp new file mode 100644 index 000000000..075bef16d --- /dev/null +++ b/messages/models.tsp @@ -0,0 +1,211 @@ +import "../common/models.tsp"; + +using TypeSpec.Http; +using TypeSpec.OpenAPI; + +namespace OpenAI; + +model CreateMessageRequest { + /** The role of the entity that is creating the message. Currently only `user` is supported. */ + role: "user"; + + /** The content of the message. */ + @minLength(1) + @maxLength(32768) + content: string; + + /** + * A list of [File](/docs/api-reference/files) IDs that the message should use. There can be a + * maximum of 10 files attached to a message. Useful for tools like `retrieval` and + * `code_interpreter` that can access and use files. + */ + @minItems(1) + @maxItems(10) + file_ids?: string[] = []; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + * additional information about the object in a structured format. Keys can be a maximum of 64 + * characters long and values can be a maxium of 512 characters long. + */ + @extension("x-oaiTypeLabel", "map") + metadata?: Record | null; +} + +model ModifyMessageRequest { + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + * additional information about the object in a structured format. Keys can be a maximum of 64 + * characters long and values can be a maxium of 512 characters long. + */ + @extension("x-oaiTypeLabel", "map") + metadata?: Record | null; +} + +model ListMessagesResponse { + object: "list"; + data: MessageObject[]; + first_id: string; + last_id: string; + has_more: boolean; +} + +model ListMessageFilesResponse { + object: "list"; + data: MessageFileObject[]; + first_id: string; + last_id: string; + has_more: boolean; +} + +model MessageObject { + /** The identifier, which can be referenced in API endpoints. */ + id: string; + + /** The object type, which is always `thread.message`. */ + object: "thread.message"; + + /** The Unix timestamp (in seconds) for when the message was created. */ + @encode("unixTimestamp", int32) + created_at: utcDateTime; + + /** The [thread](/docs/api-reference/threads) ID that this message belongs to. */ + thread_id: string; + + /** The entity that produced the message. One of `user` or `assistant`. */ + role: "user" | "assistant"; + + /** The content of the message in array of text and/or images. */ + content: MessageObjectContent[]; + + /** + * If applicable, the ID of the [assistant](/docs/api-reference/assistants) that authored this + * message. + */ + assistant_id: string | null; + + /** + * If applicable, the ID of the [run](/docs/api-reference/runs) associated with the authoring of + * this message. + */ + run_id: string | null; + + /** + * A list of [file](/docs/api-reference/files) IDs that the assistant should use. Useful for + * tools like retrieval and code_interpreter that can access files. A maximum of 10 files can be + * attached to a message. + */ + @maxItems(10) + file_ids: string[] = []; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + * additional information about the object in a structured format. Keys can be a maximum of 64 + * characters long and values can be a maxium of 512 characters long. + */ + @extension("x-oaiTypeLabel", "map") + metadata: Record | null; +} + +@oneOf +@extension("x-oaiExpandable", true) +union MessageObjectContent { + MessageContentImageFileObject, + MessageContentTextObject, +} + +/** References an image [File](/docs/api-reference/files) in the content of a message. */ +model MessageContentImageFileObject { + /** Always `image_file`. */ + type: "image_file"; + + image_file: { + /** The [File](/docs/api-reference/files) ID of the image in the message content. */ + file_id: string; + } +} + +model MessageContentTextObject { + /** Always `text`. */ + type: "text"; + + text: { + /** The data that makes up the text. */ + value: string; + + annotations: MessageContentTextObjectAnnotations[]; + } +} + +@oneOf +@extension("x-oaiExpandable", true) +union MessageContentTextObjectAnnotations { + MessageContentTextAnnotationsFileCitationObject, + MessageContentTextAnnotationsFilePathObject, +} + +/** + * A citation within the message that points to a specific quote from a specific File associated + * with the assistant or the message. Generated when the assistant uses the "retrieval" tool to + * search files. + */ +model MessageContentTextAnnotationsFileCitationObject { + /** Always `file_citation`. */ + type: "file_citation"; + + /** The text in the message content that needs to be replaced. */ + text: string; + + file_citation: { + /** The ID of the specific File the citation is from. */ + file_id: string; + + /** The specific quote in the file. */ + quote: string; + }; + + @minValue(0) + start_index: safeint; + + @minValue(0) + end_index: safeint; +} + +/** + * A URL for the file that's generated when the assistant used the `code_interpreter` tool to + * generate a file. + */ +model MessageContentTextAnnotationsFilePathObject { + /** Always `file_path`. */ + type: "file_path"; + + /** The text in the message content that needs to be replaced. */ + text: string; + + file_path: { + /** The ID of the file that was generated. */ + file_id: string; + }; + + @minValue(0) + start_index: safeint; + + @minValue(0) + end_index: safeint; +} + +/** A list of files attached to a `message`. */ +model MessageFileObject { + /** TThe identifier, which can be referenced in API endpoints. */ + id: string; + + /** The object type, which is always `thread.message.file`. */ + object: "thread.message.file"; + + /** The Unix timestamp (in seconds) for when the message file was created. */ + @encode("unixTimestamp", int32) + created_at: utcDateTime; + + /** The ID of the [message](/docs/api-reference/messages) that the [File](/docs/api-reference/files) is attached to. */ + message_id: string; +} \ No newline at end of file diff --git a/messages/operations.tsp b/messages/operations.tsp new file mode 100644 index 000000000..3cdf4ea84 --- /dev/null +++ b/messages/operations.tsp @@ -0,0 +1,142 @@ +import "@typespec/http"; +import "@typespec/openapi"; + +import "../common/errors.tsp"; +import "./models.tsp"; + +using TypeSpec.Http; +using TypeSpec.OpenAPI; + +namespace OpenAI; + +@route("threads/{thread_id}/messages") +interface Messages { + @post + @operationId("createMessage") + @tag("OpenAI") + @summary("Create a message.") + createMessage( + /** The ID of the [thread](/docs/api-reference/threads) to create a message for. */ + @path thread_id: string, + + @body message: CreateMessageRequest, + ): MessageObject | ErrorResponse; + + @get + @operationId("listMessages") + @tag("OpenAI") + @summary("Returns a list of messages for a given thread.") + listMessages( + /** The ID of the [thread](/docs/api-reference/threads) the messages belong to. */ + @path thread_id: string, + + /** + * A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + * default is 20. + */ + @query limit?: int32 = 20; + + /** + * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + * for descending order. + */ + @query order?: "asc" | "desc" = "desc"; + + /** + * A cursor for use in pagination. `after` is an object ID that defines your place in the list. + * For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + * subsequent call can include after=obj_foo in order to fetch the next page of the list. + */ + @query after?: string; + + /** + * A cursor for use in pagination. `before` is an object ID that defines your place in the list. + * For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + * subsequent call can include before=obj_foo in order to fetch the previous page of the list. + */ + @query before?: string; + ): ListMessagesResponse | ErrorResponse; + + @route("{message_id}") + @get + @operationId("getMessage") + @tag("OpenAI") + @summary("Retrieve a message.") + getMessage( + /** The ID of the [thread](/docs/api-reference/threads) to which this message belongs. */ + @path thread_id: string, + + /** The ID of the message to retrieve. */ + @path message_id: string, + ): MessageObject | ErrorResponse; + + @route("{message_id}") + @post + @operationId("modifyMessage") + @tag("OpenAI") + @summary("Modifies a message.") + modifyMessage( + /** The ID of the thread to which this message belongs. */ + @path thread_id: string, + + /** The ID of the message to modify. */ + @path message_id: string, + + @body message: ModifyMessageRequest, + ): MessageObject | ErrorResponse; + + @route("{message_id}/files") + @get + @operationId("listMessageFiles") + @tag("OpenAI") + @summary("Returns a list of message files.") + listMessageFiles( + /** The ID of the thread that the message and files belong to. */ + @path thread_id: string, + + /** The ID of the message that the files belongs to. */ + @path message_id: string, + + /** + * A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + * default is 20. + */ + @query limit?: int32 = 20; + + /** + * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + * for descending order. + */ + @query order?: "asc" | "desc" = "desc"; + + /** + * A cursor for use in pagination. `after` is an object ID that defines your place in the list. + * For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + * subsequent call can include after=obj_foo in order to fetch the next page of the list. + */ + @query after?: string; + + /** + * A cursor for use in pagination. `before` is an object ID that defines your place in the list. + * For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + * subsequent call can include before=obj_foo in order to fetch the previous page of the list. + */ + @query before?: string; + ): ListMessageFilesResponse | ErrorResponse; + + @route("{message_id}/files/{file_id}") + @get + @operationId("getMessageFile") + @tag("OpenAI") + @summary("RRetrieves a message file.") + getMessageFile( + /** The ID of the thread to which the message and File belong. */ + @path thread_id: string, + + /** The ID of the message the file belongs to. */ + @path message_id: string, + + /** The ID of the file being retrieved. */ + @path file_id: string, + ): MessageFileObject | ErrorResponse; +} diff --git a/runs/main.tsp b/runs/main.tsp new file mode 100644 index 000000000..6a754bcb5 --- /dev/null +++ b/runs/main.tsp @@ -0,0 +1 @@ +import "./operations.tsp"; \ No newline at end of file diff --git a/runs/meta.tsp b/runs/meta.tsp new file mode 100644 index 000000000..6819970c2 --- /dev/null +++ b/runs/meta.tsp @@ -0,0 +1,50 @@ +import "./models.tsp"; + +import "@typespec/openapi"; + +using TypeSpec.OpenAPI; + +namespace OpenAI; + +@@extension(OpenAI.RunObject, + "x-oaiMeta", + """ + name: The run object + beta: true + example: | + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1698107661, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "completed", + "started_at": 1699073476, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699073498, + "last_error": null, + "model": "gpt-4", + "instructions": null, + "tools": [{"type": "retrieval"}, {"type": "code_interpreter"}], + "file_ids": [], + "metadata": {}, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + } + } + """ +); + +// TODO: Fill in example here. +@@extension(OpenAI.RunStepObject, + "x-oaiMeta", + """ + name: The run step object + beta: true + example: *run_step_object_example + """ +); \ No newline at end of file diff --git a/runs/models.tsp b/runs/models.tsp new file mode 100644 index 000000000..113e43124 --- /dev/null +++ b/runs/models.tsp @@ -0,0 +1,486 @@ +import "../common/models.tsp"; +import "../assistants/models.tsp"; +import "../threads/models.tsp"; + +using TypeSpec.Http; +using TypeSpec.OpenAPI; + +namespace OpenAI; + +model CreateRunRequest { + /** The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. */ + assistant_id: string; + + /** + * The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value + * is provided here, it will override the model associated with the assistant. If not, the model + * associated with the assistant will be used. */ + `model`?: string | null; + + /** + * Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of the assistant. + * This is useful for modifying the behavior on a per-run basis. + */ + instructions?: string | null; + + /** + * Appends additional instructions at the end of the instructions for the run. This is useful for + * modifying the behavior on a per-run basis without overriding other instructions. + */ + additional_instructions?: string | null; + + /** + * Override the tools the assistant can use for this run. This is useful for modifying the + * behavior on a per-run basis. + */ + tools?: CreateRunRequestTools | null; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + * additional information about the object in a structured format. Keys can be a maximum of 64 + * characters long and values can be a maxium of 512 characters long. + */ + @extension("x-oaiTypeLabel", "map") + metadata?: Record | null; +} + +model ModifyRunRequest { + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + * additional information about the object in a structured format. Keys can be a maximum of 64 + * characters long and values can be a maxium of 512 characters long. + */ + @extension("x-oaiTypeLabel", "map") + metadata?: Record | null; +} + +model CreateThreadAndRunRequest { + /** The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. */ + assistant_id: string; + + /** If no thread is provided, an empty thread will be created. */ + thread?: CreateThreadRequest; + + /** + * The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is + * provided here, it will override the model associated with the assistant. If not, the model + * associated with the assistant will be used. + */ + `model`?: string | null; + + /** + * Override the default system message of the assistant. This is useful for modifying the behavior + * on a per-run basis. + */ + instructions?: string | null; + + /** + * Override the tools the assistant can use for this run. This is useful for modifying the + * behavior on a per-run basis. + */ + tools?: CreateRunRequestTools | null; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + * additional information about the object in a structured format. Keys can be a maximum of 64 + * characters long and values can be a maxium of 512 characters long. + */ + @extension("x-oaiTypeLabel", "map") + metadata?: Record | null; +} + +model SubmitToolOutputsRunRequest { + /** A list of tools for which the outputs are being submitted. */ + tool_outputs: { + /** + * The ID of the tool call in the `required_action` object within the run object the output is + * being submitted for. */ + tool_call_id?: string; + + /** The output of the tool call to be submitted to continue the run. */ + output?: string; + } +} + +model ListRunsResponse { + object: "list"; + data: RunObject[]; + first_id: string; + last_id: string; + has_more: boolean; +} + +model ListRunStepsResponse { + object: "list"; + data: RunStepObject[]; + first_id: string; + last_id: string; + has_more: boolean; +} + +@maxItems(20) +model CreateRunRequestTools is CreateRunRequestTool[]; + +@oneOf +@extension("x-oaiExpandable", true) +union CreateRunRequestTool { + AssistantToolsCode, + AssistantToolsRetrieval, + AssistantToolsFunction +} + +@oneOf +@extension("x-oaiExpandable", true) +union RunStepDetails { + RunStepDetailsMessageCreationObject, + RunStepDetailsToolCallsObject, +} + +/** Details of the message creation by the run step. */ +model RunStepDetailsMessageCreationObject { + /** Details of the message creation by the run step. */ + type: "message_creation"; + + message_creation: { + /** The ID of the message that was created by this run step. */ + message_id: string; + } +} + +/** Details of the tool call. */ +model RunStepDetailsToolCallsObject { + /** Always `tool_calls`. */ + type: "tool_calls"; + + /** + * An array of tool calls the run step was involved in. These can be associated with one of three + * types of tools: `code_interpreter`, `retrieval`, or `function`. + */ + tool_calls: RunStepDetailsToolCallsObjectToolCalls; +} + +model RunStepDetailsToolCallsObjectToolCalls is RunStepDetailsToolCallsObjectToolCall[]; + +@oneOf +@extension("x-oaiExpandable", true) +union RunStepDetailsToolCallsObjectToolCall { + RunStepDetailsToolCallsCodeObject, + RunStepDetailsToolCallsRetrievalObject, + RunStepDetailsToolCallsFunctionObject, +} + +/** Details of the Code Interpreter tool call the run step was involved in. */ +model RunStepDetailsToolCallsCodeObject { + /** The ID of the tool call. */ + id: string; + + /** + * The type of tool call. This is always going to be `code_interpreter` for this type of tool + * call. + */ + type: "code_interpreter"; + + /** The Code Interpreter tool call definition. */ + code_interpreter: { + /** The input to the Code Interpreter tool call. */ + input: string; + + /** + * The outputs from the Code Interpreter tool call. Code Interpreter can output one or more + * items, including text (`logs`) or images (`image`). Each of these are represented by a + * different object type. + */ + outputs: RunStepDetailsToolCallsCodeOutputs; + } +} + +model RunStepDetailsToolCallsCodeOutputs is RunStepDetailsToolCallsCodeOutput[]; + +@oneOf +@extension("x-oaiExpandable", true) +union RunStepDetailsToolCallsCodeOutput { + RunStepDetailsToolCallsCodeOutputLogsObject, + RunStepDetailsToolCallsCodeOutputImageObject, +} + +/** Text output from the Code Interpreter tool call as part of a run step. */ +model RunStepDetailsToolCallsCodeOutputLogsObject { + /** Always `logs`. */ + type: "logs"; + + /** The text output from the Code Interpreter tool call. */ + logs: string; +} + +model RunStepDetailsToolCallsCodeOutputImageObject { + /** Always `image`. */ + type: "image"; + + image: { + /** The [file](/docs/api-reference/files) ID of the image. */ + file_id: string; + } +} + +model RunStepDetailsToolCallsRetrievalObject { + /** The ID of the tool call object. */ + id: string; + + /** The type of tool call. This is always going to be `retrieval` for this type of tool call. */ + type: "retrieval"; + + /** For now, this is always going to be an empty object. */ + @extension("x-oaiTypeLabel", "map") + retrieval: { }; // TODO: Is this the appropriate way to represent an empty object? +} + +model RunStepDetailsToolCallsFunctionObject { + /** The ID of the tool call object. */ + id: string; + + /** The type of tool call. This is always going to be `function` for this type of tool call. */ + type: "function"; + + /** The definition of the function that was called. */ + function: { + /** The name of the function. */ + name: string; + + /** The arguments passed to the function. */ + arguments: string; + + /** + * The output of the function. This will be `null` if the outputs have not been + * [submitted](/docs/api-reference/runs/submitToolOutputs) yet. + */ + output: string | null; + } +} + +/** + * Usage statistics related to the run. This value will be `null` if the run is not in a terminal + * state (i.e. `in_progress`, `queued`, etc.). + */ +model RunCompletionUsage { + /** Number of completion tokens used over the course of the run. */ + completion_tokens: safeint; + + /** Number of prompt tokens used over the course of the run. */ + prompt_tokens: safeint; + + /** Total number of tokens used (prompt + completion). */ + total_tokens: safeint; +} + +/** + * Usage statistics related to the run step. This value will be `null` while the run step's status + * is `in_progress`. + */ +model RunStepCompletionUsage { + /** Number of completion tokens used over the course of the run step. */ + completion_tokens: safeint; + + /** Number of prompt tokens used over the course of the run step. */ + prompt_tokens: safeint; + + /** Total number of tokens used (prompt + completion). */ + total_tokens: safeint; +} + +/** Represents an execution run on a [thread](/docs/api-reference/threads). */ +model RunObject { + /** The identifier, which can be referenced in API endpoints. */ + id: string; + + /** The object type, which is always `thread.run`. */ + object: "thread.run"; + + /** The Unix timestamp (in seconds) for when the run was created. */ + @encode("unixTimestamp", int32) + created_at: utcDateTime; + + /** + * The ID of the [thread](/docs/api-reference/threads) that was executed on as a part of this + * run. + */ + thread_id: string; + + /** The ID of the [assistant](/docs/api-reference/assistants) used for execution of this run. */ + assistant_id: string; + + /** + * The status of the run, which can be either `queued`, `in_progress`, `requires_action`, + * `cancelling`, `cancelled`, `failed`, `completed`, or `expired`. + */ + status: + | "queued" + | "in_progress" + | "requires_action" + | "cancelling" + | "cancelled" + | "failed" + | "completed" + | "expired"; + + /** + * Details on the action required to continue the run. Will be `null` if no action is + * required. + */ + required_action: { + /** For now, this is always `submit_tool_outputs`. */ + type: "ubmit_tool_outputs"; + + /** Details on the tool outputs needed for this run to continue. */ + submit_tool_outputs: { + /** A list of the relevant tool calls. */ + tool_calls: RunToolCallObject[]; + } + } | null; + + /** The last error associated with this run. Will be `null` if there are no errors. */ + last_error: { + /** One of `server_error` or `rate_limit_exceeded`. */ + code: "server_error" | "rate_limit_exceeded"; + + /** A human-readable description of the error. */ + message: string; + } | null; + + /** The Unix timestamp (in seconds) for when the run will expire. */ + @encode("unixTimestamp", int32) + expires_at: utcDateTime; + + /** The Unix timestamp (in seconds) for when the run was started. */ + @encode("unixTimestamp", int32) + started_at: utcDateTime | null; + + /** The Unix timestamp (in seconds) for when the run was cancelled. */ + @encode("unixTimestamp", int32) + cancelled_at: utcDateTime | null; + + /** The Unix timestamp (in seconds) for when the run failed. */ + @encode("unixTimestamp", int32) + failed_at: utcDateTime | null; + + /** The Unix timestamp (in seconds) for when the run was completed. */ + @encode("unixTimestamp", int32) + completed_at: utcDateTime | null; + + /** The model that the [assistant](/docs/api-reference/assistants) used for this run. */ + `model`: string; + + /** The instructions that the [assistant](/docs/api-reference/assistants) used for this run. */ + instructions: string; + + /** The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. */ + tools: CreateRunRequestTools; + + /** + * The list of [File](/docs/api-reference/files) IDs the + * [assistant](/docs/api-reference/assistants) used for this run. + */ + file_ids: string[] = []; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + * additional information about the object in a structured format. Keys can be a maximum of 64 + * characters long and values can be a maxium of 512 characters long. + */ + @extension("x-oaiTypeLabel", "map") + metadata: Record | null; + + usage: RunCompletionUsage | null; +} + +/** Represents a step in execution of a run. */ +model RunStepObject { + /** The identifier of the run step, which can be referenced in API endpoints. */ + id: string; + + /** The object type, which is always `thread.run.step`. */ + object: "thread.run.step"; + + /** The Unix timestamp (in seconds) for when the run step was created. */ + @encode("unixTimestamp", int32) + created_at: utcDateTime; + + /** The ID of the [assistant](/docs/api-reference/assistants) associated with the run step. */ + assistant_id: string; + + /** The ID of the [thread](/docs/api-reference/threads) that was run. */ + thread_id: string; + + /** The ID of the [run](/docs/api-reference/runs) that this run step is a part of. */ + run_id: string; + + /** The type of run step, which can be either `message_creation` or `tool_calls`. */ + type: "message_creation" | "tool_calls"; + + /** + * The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, + * `completed`, or `expired`. + */ + status: "in_progress" | "cancelled" | "failed" | "completed" | "expired"; + + /** The details of the run step. */ + step_details: RunStepDetails; + + /** The last error associated with this run step. Will be `null` if there are no errors. */ + last_error: { + /** One of `server_error` or `rate_limit_exceeded`. */ + code: "server_error" | "rate_limit_exceeded"; + + /** A human-readable description of the error. */ + message: string; + } | null; + + /** + * The Unix timestamp (in seconds) for when the run step expired. A step is considered expired + * if the parent run is expired. + */ + @encode("unixTimestamp", int32) + expires_at: utcDateTime; + + /** The Unix timestamp (in seconds) for when the run step was cancelled. */ + @encode("unixTimestamp", int32) + cancelled_at: utcDateTime | null; + + /** The Unix timestamp (in seconds) for when the run step failed. */ + @encode("unixTimestamp", int32) + failed_at: utcDateTime | null; + + /** T The Unix timestamp (in seconds) for when the run step completed.. */ + @encode("unixTimestamp", int32) + completed_at: utcDateTime | null; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + * additional information about the object in a structured format. Keys can be a maximum of 64 + * characters long and values can be a maxium of 512 characters long. + */ + @extension("x-oaiTypeLabel", "map") + metadata: Record | null; + + usage: RunCompletionUsage | null; +} + +/** Tool call objects */ +model RunToolCallObject { + /** + * The ID of the tool call. This ID must be referenced when you submit the tool outputs in using + * the [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs) endpoint. + */ + id: string; + + /** The type of tool call the output is required for. For now, this is always `function`. */ + type: "function"; + + /** The function definition. */ + function: { + /** The name of the function. */ + name: string; + + /** The arguments that the model expects you to pass to the function. */ + arguments: string; + } +} \ No newline at end of file diff --git a/runs/operations.tsp b/runs/operations.tsp new file mode 100644 index 000000000..0a7c3ab2b --- /dev/null +++ b/runs/operations.tsp @@ -0,0 +1,185 @@ +import "@typespec/http"; +import "@typespec/openapi"; + +import "../common/errors.tsp"; +import "./models.tsp"; + +using TypeSpec.Http; +using TypeSpec.OpenAPI; + +namespace OpenAI; + +@route("threads") +interface Runs { + @route("runs") + @post + @operationId("createThreadAndRun") + @tag("OpenAI") + @summary("Create a thread and run it in one request.") + createThreadAndRun( + @body threadAndRun: CreateThreadAndRunRequest; + ): RunObject | ErrorResponse; + + @route("{thread_id}/runs") + @post + @operationId("createRun") + @tag("OpenAI") + @summary("Create a run.") + createRun( + /** The ID of the thread to run. */ + @path thread_id: string, + + @body run: CreateRunRequest, + ): RunObject | ErrorResponse; + + @route("thread_id}/runs") + @get + @operationId("listRuns") + @tag("OpenAI") + @summary("Returns a list of runs belonging to a thread.") + listRuns( + /** The ID of the thread the run belongs to. */ + @path thread_id: string, + + /** + * A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + * default is 20. + */ + @query limit?: int32 = 20; + + /** + * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + * for descending order. + */ + @query order?: "asc" | "desc" = "desc"; + + /** + * A cursor for use in pagination. `after` is an object ID that defines your place in the list. + * For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + * subsequent call can include after=obj_foo in order to fetch the next page of the list. + */ + @query after?: string; + + /** + * A cursor for use in pagination. `before` is an object ID that defines your place in the list. + * For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + * subsequent call can include before=obj_foo in order to fetch the previous page of the list. + */ + @query before?: string; + ): ListRunsResponse | ErrorResponse; + + @route("{thread_id}/runs/{run_id}") + @get + @operationId("getRun") + @tag("OpenAI") + @summary("Retrieves a run.") + getRun( + /** The ID of the [thread](/docs/api-reference/threads) that was run. */ + @path thread_id: string, + + /** The ID of the run to retrieve. */ + @path run_id: string, + ): RunObject | ErrorResponse; + + @route("{thread_id}/runs/{run_id}") + @post + @operationId("modifyRun") + @tag("OpenAI") + @summary("Modifies a run.") + modifyRun( + /** The ID of the [thread](/docs/api-reference/threads) that was run. */ + @path thread_id: string, + + /** The ID of the run to modify. */ + @path run_id: string, + + @body run: ModifyRunRequest, + ): RunObject | ErrorResponse; + + @route("{thread_id}/runs/{run_id}/cancel") + @post + @operationId("cancelRun") + @tag("OpenAI") + @summary("Cancels a run that is `in_progress`.") + cancelRun( + /** The ID of the thread to which this run belongs. */ + @path thread_id: string, + + /** The ID of the run to cancel. */ + @path run_id: string, + ): RunObject | ErrorResponse; + + @route("{thread_id}/runs/{run_id}/submit_tool_outputs") + @post + @operationId("submitToolOuputsToRun") + @tag("OpenAI") + @summary(""" + When a run has the `status: "requires_action"` and `required_action.type` is + `submit_tool_outputs`, this endpoint can be used to submit the outputs from the tool calls once + they're all completed. All outputs must be submitted in a single request. + """) + submitToolOuputsToRun( + /** The ID of the [thread](/docs/api-reference/threads) to which this run belongs. */ + @path thread_id: string, + + /** The ID of the run that requires the tool output submission. */ + @path run_id: string, + + @body submitToolOutputsRun: SubmitToolOutputsRunRequest, + ): RunObject | ErrorResponse; + + @route("{thread_id}/runs/{run_id}/steps") + @get + @operationId("listRunSteps") + @tag("OpenAI") + @summary("Returns a list of run steps belonging to a run.") + listRunSteps( + /** The ID of the thread the run and run steps belong to. */ + @path thread_id: string, + + /** The ID of the run the run steps belong to. */ + @path run_id: string, + + /** + * A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + * default is 20. + */ + @query limit?: int32 = 20; + + /** + * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + * for descending order. + */ + @query order?: "asc" | "desc" = "desc"; + + /** + * A cursor for use in pagination. `after` is an object ID that defines your place in the list. + * For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + * subsequent call can include after=obj_foo in order to fetch the next page of the list. + */ + @query after?: string; + + /** + * A cursor for use in pagination. `before` is an object ID that defines your place in the list. + * For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + * subsequent call can include before=obj_foo in order to fetch the previous page of the list. + */ + @query before?: string; + ): ListRunStepsResponse | ErrorResponse; + + @route("{thread_id}/runs/{run_id}/steps/{step_id}") + @get + @operationId("getRunStep") + @tag("OpenAI") + @summary("Retrieves a run step.") + getRunStep( + /** The ID of the thread to which the run and run step belongs. */ + @path thread_id: string, + + /** The ID of the run to which the run step belongs. */ + @path run_id: string, + + /** The ID of the run step to retrieve. */ + @path step_id: string, + ): RunStepObject | ErrorResponse; +} \ No newline at end of file diff --git a/threads/main.tsp b/threads/main.tsp new file mode 100644 index 000000000..6a754bcb5 --- /dev/null +++ b/threads/main.tsp @@ -0,0 +1 @@ +import "./operations.tsp"; \ No newline at end of file diff --git a/threads/meta.tsp b/threads/meta.tsp new file mode 100644 index 000000000..9a6edf95b --- /dev/null +++ b/threads/meta.tsp @@ -0,0 +1,22 @@ +import "./models.tsp"; + +import "@typespec/openapi"; + +using TypeSpec.OpenAPI; + +namespace OpenAI; + +@@extension(OpenAI.ThreadObject, + "x-oaiMeta", + """ + name: The thread object + beta: true + example: | + { + "id": "thread_abc123", + "object": "thread", + "created_at": 1698107661, + "metadata": {} + } + """ +); \ No newline at end of file diff --git a/threads/models.tsp b/threads/models.tsp new file mode 100644 index 000000000..4bdb28fae --- /dev/null +++ b/threads/models.tsp @@ -0,0 +1,55 @@ +import "../common/models.tsp"; +import "../messages/models.tsp"; + +using TypeSpec.Http; +using TypeSpec.OpenAPI; + +namespace OpenAI; + +model CreateThreadRequest { + /** A list of [messages](/docs/api-reference/messages) to start the thread with. */ + messages?: CreateMessageRequest[]; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + * additional information about the object in a structured format. Keys can be a maximum of 64 + * characters long and values can be a maxium of 512 characters long. + */ + metadata?: Record | null; +} + +model ModifyThreadRequest { + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + * additional information about the object in a structured format. Keys can be a maximum of 64 + * characters long and values can be a maxium of 512 characters long. + */ + metadata?: Record | null; +} + +model DeleteThreadResponse { + id: string; + deleted: boolean; + object: "thread.deleted"; +} + +/** Represents a thread that contains [messages](/docs/api-reference/messages). */ +model ThreadObject { + /** The identifier, which can be referenced in API endpoints. */ + id: string; + + /** The object type, which is always `thread`. */ + object: "thread"; + + /** The Unix timestamp (in seconds) for when the thread was created. */ + @encode("unixTimestamp", int32) + created_at: utcDateTime; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + * additional information about the object in a structured format. Keys can be a maximum of 64 + * characters long and values can be a maxium of 512 characters long. + */ + @extension("x-oaiTypeLabel", "map") + metadata: Record | null; +} \ No newline at end of file diff --git a/threads/operations.tsp b/threads/operations.tsp new file mode 100644 index 000000000..e2599024e --- /dev/null +++ b/threads/operations.tsp @@ -0,0 +1,52 @@ +import "@typespec/http"; +import "@typespec/openapi"; + +import "../common/errors.tsp"; +import "./models.tsp"; + +using TypeSpec.Http; +using TypeSpec.OpenAPI; + +namespace OpenAI; + +@route("/threads") +interface Threads { + @post + @operationId("createThread") + @tag("OpenAI") + @summary("Create a thread.") + createThread( + @body thread: CreateThreadRequest, + ): ThreadObject | ErrorResponse; + + @route("{thread_id}") + @get + @operationId("getThread") + @tag("OpenAI") + @summary("Retrieves a thread.") + getThread( + /** The ID of the thread to retrieve. */ + @path thread_id: string, + ): ThreadObject | ErrorResponse; + + @route("{thread_id}") + @post + @operationId("modifyThread") + @tag("OpenAI") + @summary("Modifies a thread.") + modifyThread( + /** The ID of the thread to modify. Only the `metadata` can be modified. */ + @path thread_id: string, + @body thread: ModifyThreadRequest, + ): ThreadObject | ErrorResponse; + + @route("{thread_id}") + @delete + @operationId("deleteThread") + @tag("OpenAI") + @summary("Delete a thread.") + deleteThread( + /** The ID of the thread to delete. */ + @path thread_id: string, + ): DeleteThreadResponse | ErrorResponse; +} From dd840f70516fa3037b8a161878b7b00778f782c4 Mon Sep 17 00:00:00 2001 From: Jose Arriaga Maldonado Date: Wed, 7 Feb 2024 15:41:48 -0800 Subject: [PATCH 03/18] Re-generate OpenAPI spec with a few fixes --- assistants/models.tsp | 8 +- assistants/operations.tsp | 18 +- audio/models.tsp | 15 +- audio/operations.tsp | 14 +- chat/meta.tsp | 3 +- chat/models.tsp | 59 +- chat/operations.tsp | 2 +- common/models.tsp | 2 + completions/meta.tsp | 35 + completions/models.tsp | 42 +- completions/operations.tsp | 2 +- embeddings/meta.tsp | 24 + embeddings/models.tsp | 41 +- embeddings/operations.tsp | 2 +- files/meta.tsp | 22 + files/models.tsp | 16 - files/operations.tsp | 13 +- fine-tuning/operations.tsp | 20 +- images/meta.tsp | 18 + images/models.tsp | 18 +- images/operations.tsp | 6 +- messages/models.tsp | 5 +- messages/operations.tsp | 14 +- models/meta.tsp | 15 + models/models.tsp | 10 +- models/operations.tsp | 6 +- moderations/meta.tsp | 15 + moderations/models.tsp | 18 +- moderations/operations.tsp | 2 +- readme.md | 2 + runs/models.tsp | 4 +- runs/operations.tsp | 18 +- threads/operations.tsp | 8 +- tsp-output/@typespec/openapi3/openapi.yaml | 6993 ++++++++++++++------ 34 files changed, 5344 insertions(+), 2146 deletions(-) create mode 100644 completions/meta.tsp create mode 100644 embeddings/meta.tsp create mode 100644 files/meta.tsp create mode 100644 images/meta.tsp create mode 100644 models/meta.tsp create mode 100644 moderations/meta.tsp diff --git a/assistants/models.tsp b/assistants/models.tsp index 358d95d48..acea5a1d4 100644 --- a/assistants/models.tsp +++ b/assistants/models.tsp @@ -30,7 +30,7 @@ model CreateAssistantRequest { * Tools can be of types `code_interpreter`, `retrieval`, or `function`. */ - tools?: CreateAssistantRequestTools = []; // TODO: Double-check default empty array + tools?: CreateAssistantRequestTools = []; /** * A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a @@ -73,8 +73,7 @@ model ModifyAssistantRequest { * A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. * Tools can be of types `code_interpreter`, `retrieval`, or `function`. */ - @maxItems(128) - tools?: CreateAssistantRequestTools[] = []; // TODO: Double-check default empty array + tools?: CreateAssistantRequestTools = []; /** * A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a @@ -196,8 +195,7 @@ model AssistantObject { * A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. * Tools can be of types `code_interpreter`, `retrieval`, or `function`. */ - @maxItems(128) - tools: CreateAssistantRequestTools[] = []; // TODO: Double-check default empty array + tools: CreateAssistantRequestTools = []; /** * A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a diff --git a/assistants/operations.tsp b/assistants/operations.tsp index e98f03e7e..2883342b1 100644 --- a/assistants/operations.tsp +++ b/assistants/operations.tsp @@ -13,7 +13,7 @@ namespace OpenAI; interface Assistants { @post @operationId("createAssistant") - @tag("OpenAI") + @tag("Assistants") @summary("Create an assistant with a model and instructions.") createAssistant( @body assistant: CreateAssistantRequest, @@ -21,7 +21,7 @@ interface Assistants { @get @operationId("listAssistants") - @tag("OpenAI") + @tag("Assistants") @summary("Returns a list of assistants.") listFiles( /** @@ -54,7 +54,7 @@ interface Assistants { @route("{assistant_id}") @get @operationId("getAssistant") - @tag("OpenAI") + @tag("Assistants") @summary("Retrieves an assistant.") getAssistant( /** The ID of the assistant to retrieve. */ @@ -64,7 +64,7 @@ interface Assistants { @route("{assistant_id}") @post @operationId("modifyAssistant") - @tag("OpenAI") + @tag("Assistants") @summary("Modifies an assistant.") modifyAssistant( /** The ID of the assistant to modify. */ @@ -76,7 +76,7 @@ interface Assistants { @route("{assistant_id}") @delete @operationId("deleteAssistant") - @tag("OpenAI") + @tag("Assistants") @summary("Delete an assistant.") deleteAssistant( /** The ID of the assistant to delete. */ @@ -86,7 +86,7 @@ interface Assistants { @route("{assistant_id}/files") @post @operationId("createAssistantFile") - @tag("OpenAI") + @tag("Assistants") @summary(""" Create an assistant file by attaching a [File](/docs/api-reference/files) to a [assistant](/docs/api-reference/assistants). @@ -100,7 +100,7 @@ interface Assistants { @route("{assistant_id}/files") @get @operationId("listAssistantFiles") - @tag("OpenAI") + @tag("Assistants") @summary("Returns a list of assistant files.") listAssistantFiles( /** The ID of the assistant the file belongs to. */ @@ -136,7 +136,7 @@ interface Assistants { @route("{assistant_id}/files/{file_id}") @get @operationId("getAssistantFile") - @tag("OpenAI") + @tag("Assistants") @summary("Retrieves an assistant file.") getAssistantFile( /** The ID of the assistant the file belongs to. */ @@ -149,7 +149,7 @@ interface Assistants { @route("{assistant_id}/files/{file_id}") @delete @operationId("deleteAssistantFile") - @tag("OpenAI") + @tag("Assistants") @summary("Delete an assistant file.") deleteAssistantFile( /** The ID of the assistant the file belongs to. */ diff --git a/audio/models.tsp b/audio/models.tsp index ba931af7e..53d7757c3 100644 --- a/audio/models.tsp +++ b/audio/models.tsp @@ -23,11 +23,8 @@ model CreateSpeechRequest { */ voice: "alloy" | "echo" | "fable" | "onyx" | "nova" | "shimmer"; - /** - * The format of the transcript output, in one of these options: json, text, srt, verbose_json, or - * vtt. - */ - response_format?: "json" | "text" | "srt" | "verbose_json" | "vtt" = "json"; + /** The format to audio in. Supported formats are `mp3`, `opus`, `aac`, and `flac`. */ + response_format?: "mp3" | "opus" | "aac" | "flac" = "mp3"; /** * The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default. @@ -75,6 +72,7 @@ model CreateTranscriptionRequest { * the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to * automatically increase the temperature until certain thresholds are hit. */ + // NOTE: Min and max values are absent in the OpenAPI spec but mentioned in the description. @minValue(0) @maxValue(1) temperature?: float64 = 0; @@ -112,12 +110,13 @@ model CreateTranslationRequest { * the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to * automatically increase the temperature until certain thresholds are hit. */ + // NOTE: Min and max values are absent in the OpenAPI spec but mentioned in the description. @minValue(0) @maxValue(1) temperature?: float64 = 0; } -// TODO: This model is not defined in the OpenAI API spec. +// NOTE: This model is not defined in the OpenAI API spec. model CreateTranscriptionResponse { /** The transcribed text for the provided audio data. */ text: string; @@ -141,7 +140,7 @@ model CreateTranscriptionResponse { segments?: AudioSegment[]; } -// TODO: This model is not defined in the OpenAI API spec. +// NOTE: This model is not defined in the OpenAI API spec. model CreateTranslationResponse { /** The translated text for the provided audio data. */ text: string; @@ -170,7 +169,7 @@ alias TEXT_TO_SPEECH_MODELS = alias SPEECH_TO_TEXT_MODELS = | "whisper-1"; -// TODO: This model is not defined in the OpenAI API spec. +// NOTE: This model is not defined in the OpenAI API spec. model AudioSegment { /** The zero-based index of this segment. */ id: safeint; diff --git a/audio/operations.tsp b/audio/operations.tsp index a598297bf..ee1cb428e 100644 --- a/audio/operations.tsp +++ b/audio/operations.tsp @@ -14,20 +14,22 @@ interface Audio { @route("speech") @post @operationId("createSpeech") - @tag("OpenAI") + @tag("Audio") @summary("Generates audio from the input text.") createSpeech( @body speech: CreateSpeechRequest, ): { + /** chunked */ + @header("Transfer-Encoding") transferEncoding?: string; + @header contentType: "application/octet-stream"; - @header("Transfer-Encoding") transferEncoding: "chunked"; @body @encode("binary") audio: bytes; }; @route("transcriptions") @post @operationId("createTranscription") - @tag("OpenAI") + @tag("Audio") @summary("Transcribes audio into the input language.") createTranscription( @header contentType: "multipart/form-data", @@ -35,6 +37,7 @@ interface Audio { ): | CreateTranscriptionResponse | { + // TODO: Is this the appropriate way to describe the multiple possible response types? @header contentType: "text/plain"; @body text: string; } @@ -43,14 +46,15 @@ interface Audio { @route("translations") @post @operationId("createTranslation") - @tag("OpenAI") - @summary("Transcribes audio into the input language.") + @tag("Audio") + @summary("Translates audio into English..") createTranslation( @header contentType: "multipart/form-data", @body audio: CreateTranslationRequest, ): | CreateTranslationResponse | { + // TODO: Is this the appropriate way to describe the multiple possible response types? @header contentType: "text/plain"; @body text: string; } diff --git a/chat/meta.tsp b/chat/meta.tsp index 3296a6c42..6df478c87 100644 --- a/chat/meta.tsp +++ b/chat/meta.tsp @@ -1,8 +1,9 @@ import "./operations.tsp"; -namespace OpenAI; using TypeSpec.OpenAPI; +namespace OpenAI; + @@extension(OpenAI.Chat.createChatCompletion, "x-oaiMeta", { diff --git a/chat/models.tsp b/chat/models.tsp index a80178e99..d0d19bc06 100644 --- a/chat/models.tsp +++ b/chat/models.tsp @@ -26,7 +26,9 @@ model CreateChatCompletionRequest { * * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) */ - frequency_penalty?: Penalty | null = 0; + @minValue(-2) + @maxValue(2) + frequency_penalty?: float64 | null = 0; /** * Modify the likelihood of specified tokens appearing in the completion. @@ -52,7 +54,9 @@ model CreateChatCompletionRequest { * position, each with an associated log probability. `logprobs` must be set to `true` if this * parameter is used. */ - top_logprobs?: TopLogprobs | null; + @minValue(0) + @maxValue(5) + top_logprobs?: safeint | null; /** * The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. @@ -61,14 +65,17 @@ model CreateChatCompletionRequest { * [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) * for counting tokens. */ - max_tokens?: MaxTokens | null = 16; + @minValue(0) + max_tokens?: safeint | null = 16; /** * How many chat completion choices to generate for each input message. Note that you will be * charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to * minimize costs. */ - n?: N | null = 1; + @minValue(1) + @maxValue(128) + n?: safeint | null = 1; /** * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear @@ -76,7 +83,9 @@ model CreateChatCompletionRequest { * * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) */ - presence_penalty?: Penalty | null = 0; + @minValue(-2) + @maxValue(2) + presence_penalty?: float64 | null = 0; /** * An object specifying the format that the model must output. Compatible with @@ -112,7 +121,9 @@ model CreateChatCompletionRequest { beta: true } ) - seed?: Seed | null; + @minValue(-9223372036854775808) // TODO: Min and max exceed the limits of safeint. + @maxValue(9223372036854775807) + seed?: safeint | null; // TODO: Consider inlining when https://github.com/microsoft/typespec/issues/2356 is resolved // https://github.com/microsoft/typespec/issues/2355 @@ -133,7 +144,9 @@ model CreateChatCompletionRequest { * * We generally recommend altering this or `top_p` but not both. */ - temperature?: Temperature | null = 1; + @minValue(0) + @maxValue(2) + temperature?: float64 | null = 1; /** * An alternative to sampling with temperature, called nucleus sampling, where the model considers @@ -142,7 +155,9 @@ model CreateChatCompletionRequest { * * We generally recommend altering this or `temperature` but not both. */ - top_p?: TopP | null = 1; + @minValue(0) + @maxValue(1) + top_p?: float64 | null = 1; /** * A list of tools the model may call. Currently, only functions are supported as a tool. Use this @@ -249,34 +264,6 @@ alias CHAT_COMPLETION_MODELS = | "gpt-3.5-turbo-1106" | "gpt-3.5-turbo-16k-0613"; -@minValue(-2) -@maxValue(2) -scalar Penalty extends float64; - -@minValue(0) -@maxValue(2) -scalar Temperature extends float64; - -@minValue(0) -@maxValue(1) -scalar TopP extends float64; - -@minValue(0) -@maxValue(5) -scalar TopLogprobs extends safeint; - -@minValue(1) -@maxValue(128) -scalar N extends safeint; - -@minValue(0) -scalar MaxTokens extends safeint; - -// TODO: Min and max exceed the limits of safeint. -@minValue(-9223372036854775808) -@maxValue(9223372036854775807) -scalar Seed extends safeint; - @oneOf union Stop { string, diff --git a/chat/operations.tsp b/chat/operations.tsp index c2c5aa364..0b2972421 100644 --- a/chat/operations.tsp +++ b/chat/operations.tsp @@ -14,7 +14,7 @@ interface Chat { @route("completions") @post @operationId("createChatCompletion") - @tag("OpenAI") + @tag("Chat") @summary("Creates a model response for the given chat conversation.") createChatCompletion( ...CreateChatCompletionRequest, diff --git a/common/models.tsp b/common/models.tsp index b5bfcc9b3..611e53e7a 100644 --- a/common/models.tsp +++ b/common/models.tsp @@ -33,4 +33,6 @@ model FunctionObject { * about the format.\n\nTo describe a function that accepts no parameters, provide the value * `{\"type\": \"object\", \"properties\": {}}`. */ +// TODO: The generated spec produces "additionalProperties: {}" for this instead of +// "additionalProperties: true". Are they equivalent? model FunctionParameters is Record; \ No newline at end of file diff --git a/completions/meta.tsp b/completions/meta.tsp new file mode 100644 index 000000000..708dea11d --- /dev/null +++ b/completions/meta.tsp @@ -0,0 +1,35 @@ +import "./models.tsp"; +import "./operations.tsp"; + +using TypeSpec.OpenAPI; + +namespace OpenAI; + +// TODO: Fill in example here. +@@extension(OpenAI.CreateCompletionResponse, + "x-oaiMeta", + """ + name: The completion object + legacy: true, + example: | + { + "id": "cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7", + "object": "text_completion", + "created": 1589478378, + "model": "gpt-3.5-turbo", + "choices": [ + { + "text": "\n\nThis is indeed a test", + "index": 0, + "logprobs": null, + "finish_reason": "length" + } + ], + "usage": { + "prompt_tokens": 5, + "completion_tokens": 7, + "total_tokens": 12 + } + } + """ +); \ No newline at end of file diff --git a/completions/models.tsp b/completions/models.tsp index bfe7bf03c..4122d8bda 100644 --- a/completions/models.tsp +++ b/completions/models.tsp @@ -22,7 +22,7 @@ model CreateCompletionRequest { * prompt is not specified the model will generate as if from the beginning of a new document. */ // TODO: consider inlining when https://github.com/microsoft/typespec/issues/2356 fixed - prompt: Prompt = "<|endoftext|>"; + prompt: Prompt | null = "<|endoftext|>"; /** * Generates `best_of` completions server-side and returns the "best" (the one with the highest @@ -48,7 +48,9 @@ model CreateCompletionRequest { * * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) */ - frequency_penalty?: Penalty | null = 0; + @minValue(-2) + @maxValue(2) + frequency_penalty?: float64 | null = 0; /** * Modify the likelihood of specified tokens appearing in the completion. @@ -85,7 +87,8 @@ model CreateCompletionRequest { * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) * for counting tokens. */ - max_tokens?: MaxTokens | null = 16; + @minValue(0) + max_tokens?: safeint | null = 16; /** * How many completions to generate for each prompt. @@ -93,7 +96,9 @@ model CreateCompletionRequest { * **Note:** Because this parameter generates many completions, it can quickly consume your token * quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. */ - n?: N | null = 1; + @minValue(1) + @maxValue(128) + n?: safeint | null = 1; /** * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear @@ -101,7 +106,9 @@ model CreateCompletionRequest { * * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) */ - presence_penalty?: Penalty | null = 0; + @minValue(-2) + @maxValue(2) + presence_penalty?: float64 | null = 0; /** * If specified, our system will make a best effort to sample deterministically, such that @@ -116,9 +123,11 @@ model CreateCompletionRequest { beta: true } ) - seed?: Seed | null; + @minValue(-9223372036854775808) // TODO: Min and max exceed the limits of safeint. + @maxValue(9223372036854775807) + seed?: safeint | null; - // todo: consider inlining when https://github.com/microsoft/typespec/issues/2356 is resolved + // TODO: Consider inlining when https://github.com/microsoft/typespec/issues/2356 is resolved // https://github.com/microsoft/typespec/issues/2355 /** Up to 4 sequences where the API will stop generating further tokens. */ stop?: Stop | null = null; @@ -140,7 +149,9 @@ model CreateCompletionRequest { * * We generally recommend altering this or `top_p` but not both. */ - temperature?: Temperature | null = 1; + @minValue(0) + @maxValue(2) + temperature?: float64 | null = 1; /** * An alternative to sampling with temperature, called nucleus sampling, where the model considers @@ -149,7 +160,9 @@ model CreateCompletionRequest { * * We generally recommend altering this or `temperature` but not both. */ - top_p?: TopP | null = 1; + @minValue(0) + @maxValue(1) + top_p?: float64 | null = 1; /** * A unique identifier representing your end-user, which can help OpenAI to monitor and detect @@ -162,14 +175,6 @@ model CreateCompletionRequest { * Represents a completion response from the API. Note: both the streamed and non-streamed response * objects share the same shape (unlike the chat endpoint). */ -@extension( - "x-oaiMeta", - { - name: "The completion object", - legacy: true, - example: "", // fill in - } -) model CreateCompletionResponse { /** A unique identifier for the completion. */ id: string; @@ -192,6 +197,8 @@ model CreateCompletionResponse { * in the request was reached, or `content_filter` if content was omitted due to a flag from our * content filters. */ + // TODO: The generated spec includes other values like "tool_calls" and "function_call". + // Is it because we're importing /chat/models.tsp? finish_reason: "stop" | "length" | "content_filter"; }[]; @@ -228,5 +235,4 @@ union Prompt { string[], TokenArray, TokenArrayArray, - null, } \ No newline at end of file diff --git a/completions/operations.tsp b/completions/operations.tsp index e84d3d44c..b24f018df 100644 --- a/completions/operations.tsp +++ b/completions/operations.tsp @@ -13,7 +13,7 @@ namespace OpenAI; interface Completions { @post @operationId("createCompletion") - @tag("OpenAI") + @tag("Completions") @summary("Creates a completion for the provided prompt and parameters.") createCompletion( ...CreateCompletionRequest, diff --git a/embeddings/meta.tsp b/embeddings/meta.tsp new file mode 100644 index 000000000..284eb01dc --- /dev/null +++ b/embeddings/meta.tsp @@ -0,0 +1,24 @@ +import "./models.tsp"; +import "./operations.tsp"; + +using TypeSpec.OpenAPI; + +namespace OpenAI; + +@@extension(OpenAI.Embedding, + "x-oaiMeta", + """ + name: The embedding object + example: | + { + "object": "embedding", + "embedding": [ + 0.0023064255, + -0.009327292, + .... (1536 floats total for ada-002) + -0.0028842222, + ], + "index": 0 + } + """ +); \ No newline at end of file diff --git a/embeddings/models.tsp b/embeddings/models.tsp index fa80cac4b..fd9c13fde 100644 --- a/embeddings/models.tsp +++ b/embeddings/models.tsp @@ -14,7 +14,7 @@ model CreateEmbeddingRequest { * for counting tokens. */ @extension("x-oaiExpandable", true) - input: string | string[] | TokenArray | TokenArrayArray; + input: CreateEmbeddingRequestInput; /** * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to @@ -28,12 +28,13 @@ model CreateEmbeddingRequest { * The format to return the embeddings in. Can be either `float` or * [`base64`](https://pypi.org/project/pybase64/). */ - encoding_format?: "float" | "base64" | null = "float"; + encoding_format?: "float" | "base64" = "float"; /** * The number of dimensions the resulting output embeddings should have. Only supported in * `text-embedding-3` and later models. */ + @minValue(1) dimensions?: safeint; /** @@ -50,7 +51,7 @@ model CreateEmbeddingResponse { /** The name of the model used to generate the embedding. */ `model`: string; - /** The object type, which is always "embedding". */ + /** The object type, which is always "list". */ object: "list"; /** The usage information for the request. */ @@ -68,25 +69,23 @@ alias EMBEDDINGS_MODELS = | "text-embedding-3-small" | "text-embedding-3-large"; +@oneOf +union CreateEmbeddingRequestInput +{ + /** The string that will be turned into an embedding. */ + string, + + /** The array of strings that will be turned into an embedding. */ + string[], + + /** The array of integers that will be turned into an embedding. */ + TokenArray, + + /** The array of arrays containing integers that will be turned into an embedding. */ + TokenArrayArray; +} + /** Represents an embedding vector returned by embedding endpoint. */ -@extension( - "x-oaiMeta", - { - name: "The embedding object", - example: | """ - { - "object": "embedding", - "embedding": [ - 0.0023064255, - -0.009327292, - .... (1536 floats total for ada-002) - -0.0028842222, - ], - "index": 0 - } - """ - } -) model Embedding { /** The index of the embedding in the list of embeddings. */ index: safeint; diff --git a/embeddings/operations.tsp b/embeddings/operations.tsp index c53347e6f..61c8e1839 100644 --- a/embeddings/operations.tsp +++ b/embeddings/operations.tsp @@ -13,7 +13,7 @@ namespace OpenAI; interface Embeddings { @post @operationId("createEmbedding") - @tag("OpenAI") + @tag("Embeddings") @summary("Creates an embedding vector representing the input text.") createEmbedding( @body embedding: CreateEmbeddingRequest, diff --git a/files/meta.tsp b/files/meta.tsp new file mode 100644 index 000000000..d1c14977b --- /dev/null +++ b/files/meta.tsp @@ -0,0 +1,22 @@ +import "./models.tsp"; +import "./operations.tsp"; + +using TypeSpec.OpenAPI; + +namespace OpenAI; + +@@extension(OpenAI.OpenAIFile, + "x-oaiMeta", + """ + name: The file object + example: | + { + "id": "file-abc123", + "object": "file", + "bytes": 120000, + "created_at": 1677610602, + "filename": "salesOverview.pdf", + "purpose": "assistants", + } + """ +); \ No newline at end of file diff --git a/files/models.tsp b/files/models.tsp index 72a263413..fdd2b1f45 100644 --- a/files/models.tsp +++ b/files/models.tsp @@ -36,22 +36,6 @@ alias FILE_PURPOSE = | "assistants_output"; /** The `File` object represents a document that has been uploaded to OpenAI. */ -@extension( - "x-oaiMeta", - { - name: "The file object", - example: | """ - { - "id": "file-abc123", - "object": "file", - "bytes": 120000, - "created_at": 1677610602, - "filename": "salesOverview.pdf", - "purpose": "assistants", - } - """ - } -) model OpenAIFile { /** The file identifier, which can be referenced in the API endpoints. */ id: string; diff --git a/files/operations.tsp b/files/operations.tsp index eec58470a..dda5f95b8 100644 --- a/files/operations.tsp +++ b/files/operations.tsp @@ -13,7 +13,7 @@ namespace OpenAI; interface Files { @post @operationId("createFile") - @tag("OpenAI") + @tag("Files") @summary(""" Upload a file that can be used across various endpoints. The size of all the files uploaded by one organization can be up to 100 GB. @@ -31,17 +31,18 @@ interface Files { @get @operationId("listFiles") - @tag("OpenAI") + @tag("Files") @summary("Returns a list of files that belong to the user's organization.") listFiles( /** Only return files with the given purpose. */ + // NOTE: This is just a string in the OpenAPI spec. @query purpose?: FILE_PURPOSE, ): ListFilesResponse | ErrorResponse; @route("{file_id}") @get @operationId("retrieveFile") - @tag("OpenAI") + @tag("Files") @summary("Returns information about a specific file.") retrieveFile( /** The ID of the file to use for this request. */ @@ -51,7 +52,7 @@ interface Files { @route("{file_id}") @delete @operationId("deleteFile") - @tag("OpenAI") + @tag("Files") @summary("Delete a file") deleteFile( /** The ID of the file to use for this request. */ @@ -61,10 +62,10 @@ interface Files { @route("{file_id}/content") @get @operationId("downloadFile") - @tag("OpenAI") + @tag("Files") @summary("Returns the contents of the specified file.") downloadFile( /** The ID of the file to use for this request. */ @path file_id: string, - ): string | ErrorResponse; + ): string | ErrorResponse; // TODO: The OpenAPI spec says this is a string but the Content-Type is application/json? } diff --git a/fine-tuning/operations.tsp b/fine-tuning/operations.tsp index 258382397..07f48e802 100644 --- a/fine-tuning/operations.tsp +++ b/fine-tuning/operations.tsp @@ -22,14 +22,14 @@ namespace FineTuning { * [Learn more about fine-tuning](/docs/guides/fine-tuning) */ @post - @tag("OpenAI") + @tag("Fine-tuning") @operationId("createFineTuningJob") createFineTuningJob( @body job: CreateFineTuningJobRequest, ): FineTuningJob | ErrorResponse; @get - @tag("OpenAI") + @tag("Fine-tuning") @operationId("listPaginatedFineTuningJobs") listPaginatedFineTuningJobs( /** Identifier for the last job from the previous pagination request. */ @@ -45,7 +45,7 @@ namespace FineTuning { [Learn more about fine-tuning](/docs/guides/fine-tuning) """) @route("{fine_tuning_job_id}") - @tag("OpenAI") + @tag("Fine-tuning") @get @operationId("retrieveFineTuningJob") retrieveFineTuningJob( @@ -53,7 +53,7 @@ namespace FineTuning { ): FineTuningJob | ErrorResponse; @summary("Get status updates for a fine-tuning job.") - @tag("OpenAI") + @tag("Fine-tuning") @route("{fine_tuning_job_id}/events") @get @operationId("listFineTuningEvents") @@ -69,7 +69,7 @@ namespace FineTuning { ): ListFineTuningJobEventsResponse | ErrorResponse; @summary("Immediately cancel a fine-tune job.") - @tag("OpenAI") + @tag("Fine-tuning") @route("{fine_tuning_job_id}/cancel") @post @operationId("cancelFineTuningJob") @@ -84,7 +84,7 @@ namespace FineTuning { interface FineTunes { #deprecated "deprecated" @post - @tag("OpenAI") + @tag("Fine-tuning") @summary(""" Creates a job that fine-tunes a specified model from a given dataset. @@ -99,7 +99,7 @@ interface FineTunes { #deprecated "deprecated" @get - @tag("OpenAI") + @tag("Fine-tuning") @summary("List your organization's fine-tuning jobs") @operationId("listFineTunes") listFineTunes(): ListFineTunesResponse | ErrorResponse; @@ -107,7 +107,7 @@ interface FineTunes { #deprecated "deprecated" @get @route("{fine_tune_id}") - @tag("OpenAI") + @tag("Fine-tuning") @summary(""" Gets info about the fine-tune job. @@ -122,7 +122,7 @@ interface FineTunes { #deprecated "deprecated" @route("{fine_tune_id}/events") @get - @tag("OpenAI") + @tag("Fine-tuning") @summary("Get fine-grained status updates for a fine-tune job.") @operationId("listFineTuneEvents") listFineTuneEvents( @@ -144,7 +144,7 @@ interface FineTunes { #deprecated "deprecated" @route("{fine_tune_id}/cancel") @post - @tag("OpenAI") + @tag("Fine-tuning") @summary("Immediately cancel a fine-tune job.") @operationId("cancelFineTune") cancelFineTune( diff --git a/images/meta.tsp b/images/meta.tsp new file mode 100644 index 000000000..79e02b4ee --- /dev/null +++ b/images/meta.tsp @@ -0,0 +1,18 @@ +import "./models.tsp"; +import "./operations.tsp"; + +using TypeSpec.OpenAPI; + +namespace OpenAI; + +@@extension(OpenAI.Image, + "x-oaiMeta", + """ + name: "The image object", + example: | + { + "url": "...", + "revised_prompt": "..." + } + """ +); \ No newline at end of file diff --git a/images/models.tsp b/images/models.tsp index 42f14f067..e308ade23 100644 --- a/images/models.tsp +++ b/images/models.tsp @@ -19,14 +19,14 @@ model CreateImageRequest { * The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is * supported. */ + // TODO: This is generated as a "oneOf" in the tsp-output? n?: ImagesN | null = 1; /** * The quality of the image that will be generated. `hd` creates images with finer details and * greater consistency across the image. This param is only supported for `dall-e-3`. - * - * TODO: Confirm that this is actually nullable. */ + // NOTE: This is not marked as nullable in the OpenAPI spec. quality?: "standard" | "hd" | null = "standard"; /** The format in which the generated images are returned. Must be one of `url` or `b64_json`. */ @@ -61,6 +61,8 @@ model CreateImageEditRequest { image: bytes; /** A text description of the desired image(s). The maximum length is 1000 characters. */ + // NOTE: Max length is not defined in the OpenAI spec but mentioned in the description. + @maxLength(1000) prompt: string; /** @@ -135,18 +137,6 @@ model ImagesResponse { scalar ImagesN extends safeint; /** Represents the url or the content of an image generated by the OpenAI API. */ -@extension( - "x-oaiMeta", - { - name: "The image object", - example: | """ - { - "url": "...", - "revised_prompt": "..." - } - """ - } -) model Image { /** The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. */ @encode("base64", string) diff --git a/images/operations.tsp b/images/operations.tsp index 59928894f..4db5886ae 100644 --- a/images/operations.tsp +++ b/images/operations.tsp @@ -14,7 +14,7 @@ interface Images { @route("generations") @post @operationId("createImage") - @tag("OpenAI") + @tag("Images") @summary("Creates an image given a prompt") createImage( @body image: CreateImageRequest @@ -23,7 +23,7 @@ interface Images { @route("edits") @post @operationId("createImageEdit") - @tag("OpenAI") + @tag("Images") @summary("Creates an edited or extended image given an original image and a prompt.") createImageEdit( @header contentType: "multipart/form-data", @@ -33,7 +33,7 @@ interface Images { @route("variations") @post @operationId("createImageVariation") - @tag("OpenAI") + @tag("Images") @summary("Creates an edited or extended image given an original image and a prompt.") createImageVariation( @header contentType: "multipart/form-data", diff --git a/messages/models.tsp b/messages/models.tsp index 075bef16d..67d78db29 100644 --- a/messages/models.tsp +++ b/messages/models.tsp @@ -7,7 +7,7 @@ namespace OpenAI; model CreateMessageRequest { /** The role of the entity that is creating the message. Currently only `user` is supported. */ - role: "user"; + role: "user"; // TODO: The generated spec add "assistants" to this enum. /** The content of the message. */ @minLength(1) @@ -125,9 +125,10 @@ model MessageContentImageFileObject { } } +/** The text content that is part of a message. */ model MessageContentTextObject { /** Always `text`. */ - type: "text"; + type: "text"; // TODO: The generated spec adds "json_object" to this enum. text: { /** The data that makes up the text. */ diff --git a/messages/operations.tsp b/messages/operations.tsp index 3cdf4ea84..0c9843e0a 100644 --- a/messages/operations.tsp +++ b/messages/operations.tsp @@ -13,7 +13,7 @@ namespace OpenAI; interface Messages { @post @operationId("createMessage") - @tag("OpenAI") + @tag("Assistants") @summary("Create a message.") createMessage( /** The ID of the [thread](/docs/api-reference/threads) to create a message for. */ @@ -24,7 +24,7 @@ interface Messages { @get @operationId("listMessages") - @tag("OpenAI") + @tag("Assistants") @summary("Returns a list of messages for a given thread.") listMessages( /** The ID of the [thread](/docs/api-reference/threads) the messages belong to. */ @@ -60,7 +60,7 @@ interface Messages { @route("{message_id}") @get @operationId("getMessage") - @tag("OpenAI") + @tag("Assistants") @summary("Retrieve a message.") getMessage( /** The ID of the [thread](/docs/api-reference/threads) to which this message belongs. */ @@ -73,7 +73,7 @@ interface Messages { @route("{message_id}") @post @operationId("modifyMessage") - @tag("OpenAI") + @tag("Assistants") @summary("Modifies a message.") modifyMessage( /** The ID of the thread to which this message belongs. */ @@ -88,7 +88,7 @@ interface Messages { @route("{message_id}/files") @get @operationId("listMessageFiles") - @tag("OpenAI") + @tag("Assistants") @summary("Returns a list of message files.") listMessageFiles( /** The ID of the thread that the message and files belong to. */ @@ -127,8 +127,8 @@ interface Messages { @route("{message_id}/files/{file_id}") @get @operationId("getMessageFile") - @tag("OpenAI") - @summary("RRetrieves a message file.") + @tag("Assistants") + @summary("Retrieves a message file.") getMessageFile( /** The ID of the thread to which the message and File belong. */ @path thread_id: string, diff --git a/models/meta.tsp b/models/meta.tsp new file mode 100644 index 000000000..cf8dd65dc --- /dev/null +++ b/models/meta.tsp @@ -0,0 +1,15 @@ +import "./models.tsp"; +import "./operations.tsp"; + +using TypeSpec.OpenAPI; + +namespace OpenAI; + +// TODO: Fill in example here. +@@extension(OpenAI.Model, + "x-oaiMeta", + """ + name: "The model object", + example: "*retrieve_model_response" + """ +); \ No newline at end of file diff --git a/models/models.tsp b/models/models.tsp index 68774d259..f522ca893 100644 --- a/models/models.tsp +++ b/models/models.tsp @@ -10,18 +10,10 @@ model ListModelsResponse { model DeleteModelResponse { id: string; deleted: boolean; - object: "model"; + object: "model"; // NOTE: This is just a string in the OpenAPI spec, no enum. } /** Describes an OpenAI model offering that can be used with the API. */ -// TODO: Fill in example here. -@extension( - "x-oaiMeta", - { - name: "The model object", - example: "*retrieve_model_response" - } -) model Model { /** The model identifier, which can be referenced in the API endpoints. */ id: string; diff --git a/models/operations.tsp b/models/operations.tsp index f017e1585..74f91f332 100644 --- a/models/operations.tsp +++ b/models/operations.tsp @@ -13,7 +13,7 @@ namespace OpenAI; interface Models { @get @operationId("listModels") - @tag("OpenAI") + @tag("Models") @summary(""" Lists the currently available models, and provides basic information about each one such as the owner and availability. @@ -23,7 +23,7 @@ interface Models { @route("{model}") @get @operationId("retrieveModel") - @tag("OpenAI") + @tag("Models") @summary(""" Retrieves a model instance, providing basic information about the model such as the owner and permissioning. @@ -36,7 +36,7 @@ interface Models { @route("{model}") @delete @operationId("deleteModel") - @tag("OpenAI") + @tag("Models") @summary(""" Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. """) diff --git a/moderations/meta.tsp b/moderations/meta.tsp new file mode 100644 index 000000000..7d96cda38 --- /dev/null +++ b/moderations/meta.tsp @@ -0,0 +1,15 @@ +import "./models.tsp"; +import "./operations.tsp"; + +using TypeSpec.OpenAPI; + +namespace OpenAI; + +// TODO: Fill in example here. +@@extension(OpenAI.CreateModerationResponse, + "x-oaiMeta", + """ + name: "The moderation object", + example: "*moderation_example" + """ +); \ No newline at end of file diff --git a/moderations/models.tsp b/moderations/models.tsp index a572a6f2e..b844b2659 100644 --- a/moderations/models.tsp +++ b/moderations/models.tsp @@ -4,7 +4,7 @@ namespace OpenAI; model CreateModerationRequest { /** The input text to classify */ - input: string | string[]; + input: CreateModerationRequestInput; /** * Two content moderations models are available: `text-moderation-stable` and @@ -20,14 +20,6 @@ model CreateModerationRequest { /** * Represents policy compliance report by OpenAI's content moderation model against a given input. */ -// TODO: Fill in example here. -@extension( - "x-oaiMeta", - { - name: "The moderation object", - example: "*moderation_example" - } -) model CreateModerationResponse { /** The unique identifier for the moderation request. */ id: string; @@ -136,4 +128,10 @@ model CreateModerationResponse { alias MODERATION_MODELS = | "text-moderation-latest" - | "text-moderation-stable"; \ No newline at end of file + | "text-moderation-stable"; + +@oneOf +union CreateModerationRequestInput { + string, + string[] +} \ No newline at end of file diff --git a/moderations/operations.tsp b/moderations/operations.tsp index 8efad4cdb..7760ec2b2 100644 --- a/moderations/operations.tsp +++ b/moderations/operations.tsp @@ -13,7 +13,7 @@ namespace OpenAI; interface Moderations { @post @operationId("createModeration") - @tag("OpenAI") + @tag("Moderations") @summary("Classifies if text violates OpenAI's Content Policy") createModeration( @body content: CreateModerationRequest, diff --git a/readme.md b/readme.md index c4dd93188..b65856581 100644 --- a/readme.md +++ b/readme.md @@ -1,5 +1,7 @@ A conversion of the OpenAI OpenAPI to TypeSpec. +Snapshot: https://raw.githubusercontent.com/openai/openai-openapi/b648b7823135e6fa5148ac9a303c16fdad050da6/openapi.yaml + There are some deltas: ### Changes to API Semantics: diff --git a/runs/models.tsp b/runs/models.tsp index 113e43124..d5c33bb22 100644 --- a/runs/models.tsp +++ b/runs/models.tsp @@ -328,7 +328,7 @@ model RunObject { */ required_action: { /** For now, this is always `submit_tool_outputs`. */ - type: "ubmit_tool_outputs"; + type: "submit_tool_outputs"; /** Details on the tool outputs needed for this run to continue. */ submit_tool_outputs: { @@ -439,7 +439,7 @@ model RunStepObject { * if the parent run is expired. */ @encode("unixTimestamp", int32) - expires_at: utcDateTime; + expires_at: utcDateTime | null; /** The Unix timestamp (in seconds) for when the run step was cancelled. */ @encode("unixTimestamp", int32) diff --git a/runs/operations.tsp b/runs/operations.tsp index 0a7c3ab2b..ae6f35778 100644 --- a/runs/operations.tsp +++ b/runs/operations.tsp @@ -14,7 +14,7 @@ interface Runs { @route("runs") @post @operationId("createThreadAndRun") - @tag("OpenAI") + @tag("Assistants") @summary("Create a thread and run it in one request.") createThreadAndRun( @body threadAndRun: CreateThreadAndRunRequest; @@ -23,7 +23,7 @@ interface Runs { @route("{thread_id}/runs") @post @operationId("createRun") - @tag("OpenAI") + @tag("Assistants") @summary("Create a run.") createRun( /** The ID of the thread to run. */ @@ -35,7 +35,7 @@ interface Runs { @route("thread_id}/runs") @get @operationId("listRuns") - @tag("OpenAI") + @tag("Assistants") @summary("Returns a list of runs belonging to a thread.") listRuns( /** The ID of the thread the run belongs to. */ @@ -71,7 +71,7 @@ interface Runs { @route("{thread_id}/runs/{run_id}") @get @operationId("getRun") - @tag("OpenAI") + @tag("Assistants") @summary("Retrieves a run.") getRun( /** The ID of the [thread](/docs/api-reference/threads) that was run. */ @@ -84,7 +84,7 @@ interface Runs { @route("{thread_id}/runs/{run_id}") @post @operationId("modifyRun") - @tag("OpenAI") + @tag("Assistants") @summary("Modifies a run.") modifyRun( /** The ID of the [thread](/docs/api-reference/threads) that was run. */ @@ -99,7 +99,7 @@ interface Runs { @route("{thread_id}/runs/{run_id}/cancel") @post @operationId("cancelRun") - @tag("OpenAI") + @tag("Assistants") @summary("Cancels a run that is `in_progress`.") cancelRun( /** The ID of the thread to which this run belongs. */ @@ -112,7 +112,7 @@ interface Runs { @route("{thread_id}/runs/{run_id}/submit_tool_outputs") @post @operationId("submitToolOuputsToRun") - @tag("OpenAI") + @tag("Assistants") @summary(""" When a run has the `status: "requires_action"` and `required_action.type` is `submit_tool_outputs`, this endpoint can be used to submit the outputs from the tool calls once @@ -131,7 +131,7 @@ interface Runs { @route("{thread_id}/runs/{run_id}/steps") @get @operationId("listRunSteps") - @tag("OpenAI") + @tag("Assistants") @summary("Returns a list of run steps belonging to a run.") listRunSteps( /** The ID of the thread the run and run steps belong to. */ @@ -170,7 +170,7 @@ interface Runs { @route("{thread_id}/runs/{run_id}/steps/{step_id}") @get @operationId("getRunStep") - @tag("OpenAI") + @tag("Assistants") @summary("Retrieves a run step.") getRunStep( /** The ID of the thread to which the run and run step belongs. */ diff --git a/threads/operations.tsp b/threads/operations.tsp index e2599024e..fb1dc5d10 100644 --- a/threads/operations.tsp +++ b/threads/operations.tsp @@ -13,7 +13,7 @@ namespace OpenAI; interface Threads { @post @operationId("createThread") - @tag("OpenAI") + @tag("Assistants") @summary("Create a thread.") createThread( @body thread: CreateThreadRequest, @@ -22,7 +22,7 @@ interface Threads { @route("{thread_id}") @get @operationId("getThread") - @tag("OpenAI") + @tag("Assistants") @summary("Retrieves a thread.") getThread( /** The ID of the thread to retrieve. */ @@ -32,7 +32,7 @@ interface Threads { @route("{thread_id}") @post @operationId("modifyThread") - @tag("OpenAI") + @tag("Assistants") @summary("Modifies a thread.") modifyThread( /** The ID of the thread to modify. Only the `metadata` can be modified. */ @@ -43,7 +43,7 @@ interface Threads { @route("{thread_id}") @delete @operationId("deleteThread") - @tag("OpenAI") + @tag("Assistants") @summary("Delete a thread.") deleteThread( /** The ID of the thread to delete. */ diff --git a/tsp-output/@typespec/openapi3/openapi.yaml b/tsp-output/@typespec/openapi3/openapi.yaml index d37490680..f9d7b16a4 100644 --- a/tsp-output/@typespec/openapi3/openapi.yaml +++ b/tsp-output/@typespec/openapi3/openapi.yaml @@ -4,14 +4,23 @@ info: version: 2.0.0 description: The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. tags: - - name: OpenAI + - name: Fine-tuning + - name: Audio + - name: Assistants + - name: Chat + - name: Completions + - name: Embeddings + - name: Files + - name: Images + - name: Models + - name: Moderations paths: - /audio/transcriptions: + /assistants: post: tags: - - OpenAI - operationId: createTranscription - summary: Transcribes audio into the input language. + - Assistants + operationId: createAssistant + summary: Create an assistant with a model and instructions. parameters: [] responses: '200': @@ -19,7 +28,7 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/CreateTranscriptionResponse' + $ref: '#/components/schemas/AssistantObject' default: description: An unexpected error response. content: @@ -29,73 +38,118 @@ paths: requestBody: required: true content: - multipart/form-data: + application/json: schema: - $ref: '#/components/schemas/CreateTranscriptionRequest' - /audio/translations: - post: + $ref: '#/components/schemas/CreateAssistantRequest' + get: tags: - - OpenAI - operationId: createTranslation - summary: Transcribes audio into the input language. - parameters: [] + - Assistants + operationId: listAssistants + summary: Returns a list of assistants. + parameters: + - name: limit + in: query + required: false + description: |- + A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + default is 20. + schema: + type: integer + format: int32 + default: 20 + - name: order + in: query + required: false + description: |- + Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + for descending order. + schema: + type: string + enum: + - asc + - desc + - desc + - desc + - desc + - desc + - desc + default: desc + - name: after + in: query + required: false + description: |- + A cursor for use in pagination. `after` is an object ID that defines your place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + - name: before + in: query + required: false + description: |- + A cursor for use in pagination. `before` is an object ID that defines your place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the previous page of the list. + schema: + type: string responses: '200': description: The request has succeeded. content: application/json: schema: - $ref: '#/components/schemas/CreateTranslationResponse' + $ref: '#/components/schemas/ListAssistantsResponse' default: description: An unexpected error response. content: application/json: schema: $ref: '#/components/schemas/ErrorResponse' - requestBody: - required: true - content: - multipart/form-data: - schema: - $ref: '#/components/schemas/CreateTranslationRequest' - /chat/completions: - post: + /assistants/{assistant_id}: + get: tags: - - OpenAI - operationId: createChatCompletion - parameters: [] + - Assistants + operationId: getAssistant + summary: Retrieves an assistant. + parameters: + - name: assistant_id + in: path + required: true + description: The ID of the assistant to retrieve. + schema: + type: string responses: '200': description: The request has succeeded. content: application/json: schema: - $ref: '#/components/schemas/CreateChatCompletionResponse' + $ref: '#/components/schemas/AssistantObject' default: description: An unexpected error response. content: application/json: schema: $ref: '#/components/schemas/ErrorResponse' - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/CreateChatCompletionRequest' - /completions: post: tags: - - OpenAI - operationId: createCompletion - parameters: [] + - Assistants + operationId: modifyAssistant + summary: Modifies an assistant. + parameters: + - name: assistant_id + in: path + required: true + description: The ID of the assistant to modify. + schema: + type: string responses: '200': description: The request has succeeded. content: application/json: schema: - $ref: '#/components/schemas/CreateCompletionResponse' + $ref: '#/components/schemas/AssistantObject' default: description: An unexpected error response. content: @@ -107,195 +161,54 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/CreateCompletionRequest' - x-oaiMeta: - name: Create chat completion - group: chat - returns: |- - Returns a [chat completion](/docs/api-reference/chat/object) object, or a streamed sequence of - [chat completion chunk](/docs/api-reference/chat/streaming) objects if the request is streamed. - path: create - examples: - - title: No streaming - request: - curl: |- - curl https://api.openai.com/v1/chat/completions \ - -H "Content-Type: application/json" \ - -H "Authorization: Bearer $OPENAI_API_KEY" \ - -d '{ - "model": "VAR_model_id", - "messages": [ - { - "role": "system", - "content": "You are a helpful assistant." - }, - { - "role": "user", - "content": "Hello!" - } - ] - python: |- - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - - completion = openai.ChatCompletion.create( - model="VAR_model_id", - messages=[ - {"role": "system", "content": "You are a helpful assistant."}, - {"role": "user", "content": "Hello!"} - ] - ) - - print(completion.choices[0].message) - node.js: |- - import OpenAI from "openai"; - - const openai = new OpenAI(); - - async function main() { - const completion = await openai.chat.completions.create({ - messages: [{ role: "system", content: "string" }], - model: "VAR_model_id", - }); - - console.log(completion.choices[0]); - } - - main(); - response: |- - { - "id": "chatcmpl-123", - "object": "chat.completion", - "created": 1677652288, - "model": "gpt-3.5-turbo-0613", - "choices": [{ - "index": 0, - "message": { - "role": "assistant", - "content": " - - Hello there, how may I assist you today?", - }, - "finish_reason": "stop" - }], - "usage": { - "prompt_tokens": 9, - "completion_tokens": 12, - "total_tokens": 21 - } - } - - title: Streaming - request: - curl: |- - curl https://api.openai.com/v1/chat/completions \ - -H "Content-Type: application/json" \ - -H "Authorization: Bearer $OPENAI_API_KEY" \ - -d '{ - "model": "VAR_model_id", - "messages": [ - { - "role": "system", - "content": "You are a helpful assistant." - }, - { - "role": "user", - "content": "Hello!" - } - ], - "stream": true - }' - python: |- - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - - completion = openai.ChatCompletion.create( - model="VAR_model_id", - messages=[ - {"role": "system", "content": "You are a helpful assistant."}, - {"role": "user", "content": "Hello!"} - ], - stream=True - ) - - for chunk in completion: - print(chunk.choices[0].delta) - node.js: |- - import OpenAI from "openai"; - - const openai = new OpenAI(); - - async function main() { - const completion = await openai.chat.completions.create({ - model: "VAR_model_id", - messages: [ - {"role": "system", "content": "You are a helpful assistant."}, - {"role": "user", "content": "Hello!"} - ], - stream: true, - }); - - for await (const chunk of completion) { - console.log(chunk.choices[0].delta.content); - } - } - - main(); - response: |- - { - "id": "chatcmpl-123", - "object": "chat.completion.chunk", - "created": 1677652288, - "model": "gpt-3.5-turbo", - "choices": [{ - "index": 0, - "delta": { - "content": "Hello", - }, - "finish_reason": "stop" - }] - } - /edits: - post: + $ref: '#/components/schemas/ModifyAssistantRequest' + delete: tags: - - OpenAI - operationId: createEdit - parameters: [] + - Assistants + operationId: deleteAssistant + summary: Delete an assistant. + parameters: + - name: assistant_id + in: path + required: true + description: The ID of the assistant to delete. + schema: + type: string responses: '200': description: The request has succeeded. content: application/json: schema: - $ref: '#/components/schemas/CreateEditResponse' + $ref: '#/components/schemas/DeleteAssistantResponse' default: description: An unexpected error response. content: application/json: schema: $ref: '#/components/schemas/ErrorResponse' - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/CreateEditRequest' - deprecated: true - /embeddings: + /assistants/{assistant_id}/files: post: tags: - - OpenAI - operationId: createEmbedding - summary: Creates an embedding vector representing the input text. - parameters: [] + - Assistants + operationId: createAssistantFile + summary: |- + Create an assistant file by attaching a [File](/docs/api-reference/files) to a + [assistant](/docs/api-reference/assistants). + parameters: + - name: assistant_id + in: path + required: true + description: The ID of the assistant for which to create a file. + schema: + type: string responses: '200': description: The request has succeeded. content: application/json: schema: - $ref: '#/components/schemas/CreateEmbeddingResponse' + $ref: '#/components/schemas/AssistantFileObject' default: description: An unexpected error response. content: @@ -307,63 +220,125 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/CreateEmbeddingRequest' - /files: + $ref: '#/components/schemas/CreateAssistantFileRequest' get: tags: - - OpenAI - operationId: listFiles - summary: Returns a list of files that belong to the user's organization. - parameters: [] + - Assistants + operationId: listAssistantFiles + summary: Returns a list of assistant files. + parameters: + - name: assistant_id + in: path + required: true + description: The ID of the assistant the file belongs to. + schema: + type: string + - name: limit + in: query + required: false + description: |- + A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + default is 20. + schema: + type: integer + format: int32 + default: 20 + - name: order + in: query + required: false + description: |- + Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + for descending order. + schema: + type: string + enum: + - asc + - desc + - desc + - desc + - desc + - desc + - desc + default: desc + - name: after + in: query + required: false + description: |- + A cursor for use in pagination. `after` is an object ID that defines your place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + - name: before + in: query + required: false + description: |- + A cursor for use in pagination. `before` is an object ID that defines your place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the previous page of the list. + schema: + type: string responses: '200': description: The request has succeeded. content: application/json: schema: - $ref: '#/components/schemas/ListFilesResponse' + $ref: '#/components/schemas/ListAssistantFilesResponse' default: description: An unexpected error response. content: application/json: schema: $ref: '#/components/schemas/ErrorResponse' - post: + /assistants/{assistant_id}/files/{file_id}: + get: tags: - - OpenAI - operationId: createFile - summary: Returns a list of files that belong to the user's organization. - parameters: [] + - Assistants + operationId: getAssistantFile + summary: Retrieves an assistant file. + parameters: + - name: assistant_id + in: path + required: true + description: The ID of the assistant the file belongs to. + schema: + type: string + - name: file_id + in: path + required: true + description: The ID of the file we're getting. + schema: + type: string responses: '200': description: The request has succeeded. content: application/json: schema: - $ref: '#/components/schemas/OpenAIFile' + $ref: '#/components/schemas/AssistantFileObject' default: description: An unexpected error response. content: application/json: schema: $ref: '#/components/schemas/ErrorResponse' - requestBody: - required: true - content: - multipart/form-data: - schema: - $ref: '#/components/schemas/CreateFileRequest' - /files/files/{file_id}: - post: + delete: tags: - - OpenAI - operationId: retrieveFile - summary: Returns information about a specific file. + - Assistants + operationId: deleteAssistantFile + summary: Delete an assistant file. parameters: + - name: assistant_id + in: path + required: true + description: The ID of the assistant the file belongs to. + schema: + type: string - name: file_id in: path required: true - description: The ID of the file to use for this request. + description: The ID of the file to delete. schema: type: string responses: @@ -372,56 +347,84 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/OpenAIFile' + $ref: '#/components/schemas/DeleteAssistantFileResponse' default: description: An unexpected error response. content: application/json: schema: $ref: '#/components/schemas/ErrorResponse' - delete: + /audio/speech: + post: tags: - - OpenAI - operationId: deleteFile - summary: Delete a file - parameters: - - name: file_id - in: path - required: true - description: The ID of the file to use for this request. - schema: - type: string + - Audio + operationId: createSpeech + summary: Generates audio from the input text. + parameters: [] + responses: + '200': + description: The request has succeeded. + headers: + Transfer-Encoding: + required: false + description: chunked + schema: + type: string + content: + application/octet-stream: + schema: + type: string + format: binary + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateSpeechRequest' + /audio/transcriptions: + post: + tags: + - Audio + operationId: createTranscription + summary: Transcribes audio into the input language. + parameters: [] responses: '200': description: The request has succeeded. content: application/json: schema: - $ref: '#/components/schemas/DeleteFileResponse' + $ref: '#/components/schemas/CreateTranscriptionResponse' + text/plain: + schema: + type: string default: description: An unexpected error response. content: application/json: schema: $ref: '#/components/schemas/ErrorResponse' - /files/files/{file_id}/content: - get: + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/CreateTranscriptionRequestMultiPart' + /audio/translations: + post: tags: - - OpenAI - operationId: downloadFile - summary: Returns the contents of the specified file. - parameters: - - name: file_id - in: path - required: true - description: The ID of the file to use for this request. - schema: - type: string + - Audio + operationId: createTranslation + summary: Translates audio into English.. + parameters: [] responses: '200': description: The request has succeeded. content: application/json: + schema: + $ref: '#/components/schemas/CreateTranslationResponse' + text/plain: schema: type: string default: @@ -430,17 +433,18 @@ paths: application/json: schema: $ref: '#/components/schemas/ErrorResponse' - /fine-tunes: + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/CreateTranslationRequestMultiPart' + /chat/completions: post: tags: - - OpenAI - operationId: createFineTune - summary: |- - Creates a job that fine-tunes a specified model from a given dataset. - - Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. - - [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + - Chat + operationId: createChatCompletion + summary: Creates a model response for the given chat conversation. parameters: [] responses: '200': @@ -448,7 +452,7 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/FineTune' + $ref: '#/components/schemas/CreateChatCompletionResponse' default: description: An unexpected error response. content: @@ -460,13 +464,13 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/CreateFineTuneRequest' - deprecated: true - get: + $ref: '#/components/schemas/CreateChatCompletionRequest' + /completions: + post: tags: - - OpenAI - operationId: listFineTunes - summary: List your organization's fine-tuning jobs + - Completions + operationId: createCompletion + summary: Creates a completion for the provided prompt and parameters. parameters: [] responses: '200': @@ -474,7 +478,231 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/ListFineTunesResponse' + $ref: '#/components/schemas/CreateCompletionResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateCompletionRequest' + /embeddings: + post: + tags: + - Embeddings + operationId: createEmbedding + summary: Creates an embedding vector representing the input text. + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/CreateEmbeddingResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateEmbeddingRequest' + /files: + post: + tags: + - Files + operationId: createFile + summary: |- + Upload a file that can be used across various endpoints. The size of all the files uploaded by + one organization can be up to 100 GB. + + The size of individual files can be a maximum of 512 MB or 2 million tokens for Assistants. See + the [Assistants Tools guide](/docs/assistants/tools) to learn more about the types of files + supported. The Fine-tuning API only supports `.jsonl` files. + + Please [contact us](https://help.openai.com/) if you need to increase these storage limits. + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/OpenAIFile' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/CreateFileRequestMultiPart' + get: + tags: + - Files + operationId: listFiles + summary: Returns a list of files that belong to the user's organization. + parameters: + - name: purpose + in: query + required: false + description: Only return files with the given purpose. + schema: + type: string + enum: + - fine-tune + - fine-tune-results + - assistants + - assistants_output + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ListFilesResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /files/{file_id}: + get: + tags: + - Files + operationId: retrieveFile + summary: Returns information about a specific file. + parameters: + - name: file_id + in: path + required: true + description: The ID of the file to use for this request. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/OpenAIFile' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + delete: + tags: + - Files + operationId: deleteFile + summary: Delete a file + parameters: + - name: file_id + in: path + required: true + description: The ID of the file to use for this request. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/DeleteFileResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /files/{file_id}/content: + get: + tags: + - Files + operationId: downloadFile + summary: Returns the contents of the specified file. + parameters: + - name: file_id + in: path + required: true + description: The ID of the file to use for this request. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + type: string + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /fine-tunes: + post: + tags: + - Fine-tuning + operationId: createFineTune + summary: |- + Creates a job that fine-tunes a specified model from a given dataset. + + Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + + [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/FineTune' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateFineTuneRequest' + deprecated: true + get: + tags: + - Fine-tuning + operationId: listFineTunes + summary: List your organization's fine-tuning jobs + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ListFineTunesResponse' default: description: An unexpected error response. content: @@ -485,7 +713,7 @@ paths: /fine-tunes/{fine_tune_id}: get: tags: - - OpenAI + - Fine-tuning operationId: retrieveFineTune summary: |- Gets info about the fine-tune job. @@ -515,7 +743,7 @@ paths: /fine-tunes/{fine_tune_id}/cancel: post: tags: - - OpenAI + - Fine-tuning operationId: cancelFineTune summary: Immediately cancel a fine-tune job. parameters: @@ -542,7 +770,7 @@ paths: /fine-tunes/{fine_tune_id}/events: get: tags: - - OpenAI + - Fine-tuning operationId: listFineTuneEvents summary: Get fine-grained status updates for a fine-tune job. parameters: @@ -583,7 +811,7 @@ paths: /fine_tuning/jobs: post: tags: - - OpenAI + - Fine-tuning operationId: createFineTuningJob description: |- Creates a job that fine-tunes a specified model from a given dataset. @@ -614,7 +842,7 @@ paths: $ref: '#/components/schemas/CreateFineTuningJobRequest' get: tags: - - OpenAI + - Fine-tuning operationId: listPaginatedFineTuningJobs parameters: - name: after @@ -647,7 +875,7 @@ paths: /fine_tuning/jobs/{fine_tuning_job_id}: get: tags: - - OpenAI + - Fine-tuning operationId: retrieveFineTuningJob summary: |- Get info about a fine-tuning job. @@ -675,7 +903,7 @@ paths: /fine_tuning/jobs/{fine_tuning_job_id}/cancel: post: tags: - - OpenAI + - Fine-tuning operationId: cancelFineTuningJob summary: Immediately cancel a fine-tune job. parameters: @@ -701,7 +929,7 @@ paths: /fine_tuning/jobs/{fine_tuning_job_id}/events: get: tags: - - OpenAI + - Fine-tuning operationId: listFineTuningEvents summary: Get status updates for a fine-tuning job. parameters: @@ -740,7 +968,7 @@ paths: /images/edits: post: tags: - - OpenAI + - Images operationId: createImageEdit summary: Creates an edited or extended image given an original image and a prompt. parameters: [] @@ -762,11 +990,11 @@ paths: content: multipart/form-data: schema: - $ref: '#/components/schemas/CreateImageEditRequest' + $ref: '#/components/schemas/CreateImageEditRequestMultiPart' /images/generations: post: tags: - - OpenAI + - Images operationId: createImage summary: Creates an image given a prompt parameters: [] @@ -792,7 +1020,7 @@ paths: /images/variations: post: tags: - - OpenAI + - Images operationId: createImageVariation summary: Creates an edited or extended image given an original image and a prompt. parameters: [] @@ -814,11 +1042,11 @@ paths: content: multipart/form-data: schema: - $ref: '#/components/schemas/CreateImageVariationRequest' + $ref: '#/components/schemas/CreateImageVariationRequestMultiPart' /models: get: tags: - - OpenAI + - Models operationId: listModels summary: |- Lists the currently available models, and provides basic information about each one such as the @@ -840,7 +1068,7 @@ paths: /models/{model}: get: tags: - - OpenAI + - Models operationId: retrieveModel summary: |- Retrieves a model instance, providing basic information about the model such as the owner and @@ -867,7 +1095,7 @@ paths: $ref: '#/components/schemas/ErrorResponse' delete: tags: - - OpenAI + - Models operationId: deleteModel summary: Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. parameters: @@ -893,7 +1121,7 @@ paths: /moderations: post: tags: - - OpenAI + - Moderations operationId: createModeration summary: Classifies if text violates OpenAI's Content Policy parameters: [] @@ -916,89 +1144,2623 @@ paths: application/json: schema: $ref: '#/components/schemas/CreateModerationRequest' -security: - - BearerAuth: [] -components: - schemas: - ChatCompletionFunctionCallOption: - type: object - required: - - name - properties: - name: - type: string - description: The name of the function to call. - ChatCompletionFunctionParameters: - type: object - additionalProperties: {} - ChatCompletionFunctions: - type: object - required: - - name - - parameters - properties: - name: - type: string - description: |- - The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and - dashes, with a maximum length of 64. - description: - type: string - description: |- - A description of what the function does, used by the model to choose when and how to call the - function. - parameters: - allOf: - - $ref: '#/components/schemas/ChatCompletionFunctionParameters' - description: |- - The parameters the functions accepts, described as a JSON Schema object. See the - [guide](/docs/guides/gpt/function-calling) for examples, and the - [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation - about the format.\n\nTo describe a function that accepts no parameters, provide the value - `{\"type\": \"object\", \"properties\": {}}`. - ChatCompletionRequestMessage: - type: object - required: - - role - - content - properties: - role: - type: string - enum: - - system - - user - - assistant - - function - description: The role of the messages author. One of `system`, `user`, `assistant`, or `function`. - content: - type: string - nullable: true - description: |- - The contents of the message. `content` is required for all messages, and may be null for - assistant messages with function calls. - name: - type: string - description: |- - The name of the author of this message. `name` is required if role is `function`, and it - should be the name of the function whose response is in the `content`. May contain a-z, - A-Z, 0-9, and underscores, with a maximum length of 64 characters. - function_call: - type: object - description: The name and arguments of a function that should be called, as generated by the model. - required: - - name + /threads: + post: + tags: + - Assistants + operationId: createThread + summary: Create a thread. + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ThreadObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateThreadRequest' + /threads/runs: + post: + tags: + - Assistants + operationId: createThreadAndRun + summary: Create a thread and run it in one request. + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/RunObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateThreadAndRunRequest' + /threads/thread_id}/runs/{thread_id}: + get: + tags: + - Assistants + operationId: listRuns + summary: Returns a list of runs belonging to a thread. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the thread the run belongs to. + schema: + type: string + - name: limit + in: query + required: false + description: |- + A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + default is 20. + schema: + type: integer + format: int32 + default: 20 + - name: order + in: query + required: false + description: |- + Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + for descending order. + schema: + type: string + enum: + - asc + - desc + - desc + - desc + - desc + - desc + - desc + default: desc + - name: after + in: query + required: false + description: |- + A cursor for use in pagination. `after` is an object ID that defines your place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + - name: before + in: query + required: false + description: |- + A cursor for use in pagination. `before` is an object ID that defines your place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the previous page of the list. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ListRunsResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /threads/{thread_id}: + get: + tags: + - Assistants + operationId: getThread + summary: Retrieves a thread. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the thread to retrieve. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ThreadObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + post: + tags: + - Assistants + operationId: modifyThread + summary: Modifies a thread. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the thread to modify. Only the `metadata` can be modified. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ThreadObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ModifyThreadRequest' + delete: + tags: + - Assistants + operationId: deleteThread + summary: Delete a thread. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the thread to delete. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/DeleteThreadResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /threads/{thread_id}/messages: + post: + tags: + - Assistants + operationId: createMessage + summary: Create a message. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the [thread](/docs/api-reference/threads) to create a message for. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/MessageObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateMessageRequest' + get: + tags: + - Assistants + operationId: listMessages + summary: Returns a list of messages for a given thread. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the [thread](/docs/api-reference/threads) the messages belong to. + schema: + type: string + - name: limit + in: query + required: false + description: |- + A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + default is 20. + schema: + type: integer + format: int32 + default: 20 + - name: order + in: query + required: false + description: |- + Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + for descending order. + schema: + type: string + enum: + - asc + - desc + - desc + - desc + - desc + - desc + - desc + default: desc + - name: after + in: query + required: false + description: |- + A cursor for use in pagination. `after` is an object ID that defines your place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + - name: before + in: query + required: false + description: |- + A cursor for use in pagination. `before` is an object ID that defines your place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the previous page of the list. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ListMessagesResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /threads/{thread_id}/messages/{message_id}: + get: + tags: + - Assistants + operationId: getMessage + summary: Retrieve a message. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the [thread](/docs/api-reference/threads) to which this message belongs. + schema: + type: string + - name: message_id + in: path + required: true + description: The ID of the message to retrieve. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/MessageObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + post: + tags: + - Assistants + operationId: modifyMessage + summary: Modifies a message. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the thread to which this message belongs. + schema: + type: string + - name: message_id + in: path + required: true + description: The ID of the message to modify. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/MessageObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ModifyMessageRequest' + /threads/{thread_id}/messages/{message_id}/files: + get: + tags: + - Assistants + operationId: listMessageFiles + summary: Returns a list of message files. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the thread that the message and files belong to. + schema: + type: string + - name: message_id + in: path + required: true + description: The ID of the message that the files belongs to. + schema: + type: string + - name: limit + in: query + required: false + description: |- + A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + default is 20. + schema: + type: integer + format: int32 + default: 20 + - name: order + in: query + required: false + description: |- + Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + for descending order. + schema: + type: string + enum: + - asc + - desc + - desc + - desc + - desc + - desc + - desc + default: desc + - name: after + in: query + required: false + description: |- + A cursor for use in pagination. `after` is an object ID that defines your place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + - name: before + in: query + required: false + description: |- + A cursor for use in pagination. `before` is an object ID that defines your place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the previous page of the list. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ListMessageFilesResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /threads/{thread_id}/messages/{message_id}/files/{file_id}: + get: + tags: + - Assistants + operationId: getMessageFile + summary: Retrieves a message file. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the thread to which the message and File belong. + schema: + type: string + - name: message_id + in: path + required: true + description: The ID of the message the file belongs to. + schema: + type: string + - name: file_id + in: path + required: true + description: The ID of the file being retrieved. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/MessageFileObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /threads/{thread_id}/runs: + post: + tags: + - Assistants + operationId: createRun + summary: Create a run. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the thread to run. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/RunObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateRunRequest' + /threads/{thread_id}/runs/{run_id}: + get: + tags: + - Assistants + operationId: getRun + summary: Retrieves a run. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the [thread](/docs/api-reference/threads) that was run. + schema: + type: string + - name: run_id + in: path + required: true + description: The ID of the run to retrieve. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/RunObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + post: + tags: + - Assistants + operationId: modifyRun + summary: Modifies a run. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the [thread](/docs/api-reference/threads) that was run. + schema: + type: string + - name: run_id + in: path + required: true + description: The ID of the run to modify. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/RunObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ModifyRunRequest' + /threads/{thread_id}/runs/{run_id}/cancel: + post: + tags: + - Assistants + operationId: cancelRun + summary: Cancels a run that is `in_progress`. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the thread to which this run belongs. + schema: + type: string + - name: run_id + in: path + required: true + description: The ID of the run to cancel. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/RunObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /threads/{thread_id}/runs/{run_id}/steps: + get: + tags: + - Assistants + operationId: listRunSteps + summary: Returns a list of run steps belonging to a run. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the thread the run and run steps belong to. + schema: + type: string + - name: run_id + in: path + required: true + description: The ID of the run the run steps belong to. + schema: + type: string + - name: limit + in: query + required: false + description: |- + A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + default is 20. + schema: + type: integer + format: int32 + default: 20 + - name: order + in: query + required: false + description: |- + Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + for descending order. + schema: + type: string + enum: + - asc + - desc + - desc + - desc + - desc + - desc + - desc + default: desc + - name: after + in: query + required: false + description: |- + A cursor for use in pagination. `after` is an object ID that defines your place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + - name: before + in: query + required: false + description: |- + A cursor for use in pagination. `before` is an object ID that defines your place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the previous page of the list. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ListRunStepsResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /threads/{thread_id}/runs/{run_id}/steps/{step_id}: + get: + tags: + - Assistants + operationId: getRunStep + summary: Retrieves a run step. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the thread to which the run and run step belongs. + schema: + type: string + - name: run_id + in: path + required: true + description: The ID of the run to which the run step belongs. + schema: + type: string + - name: step_id + in: path + required: true + description: The ID of the run step to retrieve. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/RunStepObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /threads/{thread_id}/runs/{run_id}/submit_tool_outputs: + post: + tags: + - Assistants + operationId: submitToolOuputsToRun + summary: |- + When a run has the `status: "requires_action"` and `required_action.type` is + `submit_tool_outputs`, this endpoint can be used to submit the outputs from the tool calls once + they're all completed. All outputs must be submitted in a single request. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the [thread](/docs/api-reference/threads) to which this run belongs. + schema: + type: string + - name: run_id + in: path + required: true + description: The ID of the run that requires the tool output submission. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/RunObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/SubmitToolOutputsRunRequest' +security: + - BearerAuth: [] +components: + schemas: + AssistantFileObject: + type: object + required: + - id + - object + - created_at + - assistant_id + properties: + id: + type: string + description: The identifier, which can be referenced in API endpoints. + object: + type: string + enum: + - assistant.file + description: The object type, which is always `assistant.file`. + created_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) for when the assistant file was created. + assistant_id: + type: string + description: The assistant ID that the file is attached to. + description: A list of [Files](/docs/api-reference/files) attached to an `assistant`. + AssistantObject: + type: object + required: + - id + - object + - created_at + - name + - description + - model + - instructions + - tools + - file_ids + - metadata + properties: + id: + type: string + description: The identifier, which can be referenced in API endpoints. + object: + type: string + enum: + - assistant + description: The object type, which is always `assistant`. + created_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) for when the assistant was created. + name: + type: string + nullable: true + maxLength: 256 + description: The name of the assistant. The maximum length is 256 characters. + description: + type: string + nullable: true + maxLength: 512 + description: The description of the assistant. The maximum length is 512 characters. + model: + type: string + description: |- + ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + see all of your available models, or see our [Model overview](/docs/models/overview) for + descriptions of them. + instructions: + type: string + nullable: true + maxLength: 32768 + description: The system instructions that the assistant uses. The maximum length is 32768 characters. + tools: + allOf: + - $ref: '#/components/schemas/CreateAssistantRequestToolsItem' + description: |- + A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. + Tools can be of types `code_interpreter`, `retrieval`, or `function`. + default: [] + file_ids: + type: array + items: + type: string + maxItems: 20 + description: |- + A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a + maximum of 20 files attached to the assistant. Files are ordered by their creation date in + ascending order. + default: [] + metadata: + type: object + additionalProperties: + type: string + nullable: true + description: |- + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + description: Represents an `assistant` that can call the model and use tools. + AssistantToolsCode: + type: object + required: + - type + properties: + type: + type: string + enum: + - code_interpreter + description: 'The type of tool being defined: `code_interpreter`' + AssistantToolsFunction: + type: object + required: + - type + - function + properties: + type: + type: string + enum: + - function + description: 'The type of tool being defined: `function`' + function: + $ref: '#/components/schemas/FunctionObject' + AssistantToolsRetrieval: + type: object + required: + - type + properties: + type: + type: string + enum: + - retrieval + description: 'The type of tool being defined: `retrieval`' + AudioSegment: + type: object + required: + - id + - seek + - start + - end + - text + - tokens + - temperature + - avg_logprob + - compression_ratio + - no_speech_prob + properties: + id: + type: integer + format: int64 + description: The zero-based index of this segment. + seek: + type: integer + format: int64 + description: |- + The seek position associated with the processing of this audio segment. Seek positions are + expressed as hundredths of seconds. The model may process several segments from a single seek + position, so while the seek position will never represent a later time than the segment's + start, the segment's start may represent a significantly later time than the segment's + associated seek position. + start: + type: number + format: double + description: The time at which this segment started relative to the beginning of the audio. + end: + type: number + format: double + description: The time at which this segment ended relative to the beginning of the audio. + text: + type: string + description: The text that was part of this audio segment. + tokens: + allOf: + - $ref: '#/components/schemas/TokenArrayItem' + description: The token IDs matching the text in this audio segment. + temperature: + type: number + format: double + minimum: 0 + maximum: 1 + description: The temperature score associated with this audio segment. + avg_logprob: + type: number + format: double + description: The average log probability associated with this audio segment. + compression_ratio: + type: number + format: double + description: The compression ratio of this audio segment. + no_speech_prob: + type: number + format: double + description: The probability of no speech detection within this audio segment. + ChatCompletionFunctionCallOption: + type: object + required: + - name + properties: + name: + type: string + description: The name of the function to call. + description: |- + Specifying a particular function via `{"name": "my_function"}` forces the model to call that + function. + ChatCompletionFunctions: + type: object + required: + - name + properties: + description: + type: string + description: |- + A description of what the function does, used by the model to choose when and how to call the + function. + name: + type: string + description: |- + The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and + dashes, with a maximum length of 64. + parameters: + $ref: '#/components/schemas/FunctionParameters' + deprecated: true + ChatCompletionMessageToolCall: + type: object + required: + - id + - type + - function + properties: + id: + type: string + description: The ID of the tool call. + type: + type: string + enum: + - function + description: The type of the tool. Currently, only `function` is supported. + function: + type: object + properties: + name: + type: string + description: The name of the function to call. + arguments: + type: string + description: |- + The arguments to call the function with, as generated by the model in JSON format. Note that + the model does not always generate valid JSON, and may hallucinate parameters not defined by + your function schema. Validate the arguments in your code before calling your function. + required: + - name + - arguments + description: The function that the model called. + ChatCompletionMessageToolCallsItem: + type: array + items: + $ref: '#/components/schemas/ChatCompletionMessageToolCall' + description: The tool calls generated by the model, such as function calls. + ChatCompletionNamedToolChoice: + type: object + required: + - type + - function + properties: + type: + type: string + enum: + - function + description: The type of the tool. Currently, only `function` is supported. + function: + type: object + properties: + name: + type: string + description: The name of the function to call. + required: + - name + description: Specifies a tool the model should use. Use to force the model to call a specific function. + ChatCompletionRequestAssistantMessage: + type: object + required: + - role + properties: + content: + type: string + nullable: true + description: |- + The contents of the assistant message. Required unless `tool_calls` or `function_call` is' + specified. + role: + type: string + enum: + - assistant + description: The role of the messages author, in this case `assistant`. + name: + type: string + description: |- + An optional name for the participant. Provides the model information to differentiate between + participants of the same role. + tool_calls: + $ref: '#/components/schemas/ChatCompletionMessageToolCallsItem' + function_call: + type: object + properties: + arguments: + type: string + description: |- + The arguments to call the function with, as generated by the model in JSON format. Note that + the model does not always generate valid JSON, and may hallucinate parameters not defined by + your function schema. Validate the arguments in your code before calling your function. + name: + type: string + description: The name of the function to call. + required: + - arguments + - name + description: |- + Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be + called, as generated by the model. + deprecated: true + ChatCompletionRequestFunctionMessage: + type: object + required: + - role + - content + - name + properties: + role: + type: string + enum: + - function + description: The role of the messages author, in this case `function`. + content: + type: string + nullable: true + description: The contents of the function message. + name: + type: string + description: The name of the function to call. + ChatCompletionRequestMessage: + oneOf: + - $ref: '#/components/schemas/ChatCompletionRequestSystemMessage' + - $ref: '#/components/schemas/ChatCompletionRequestUserMessage' + - $ref: '#/components/schemas/ChatCompletionRequestAssistantMessage' + - $ref: '#/components/schemas/ChatCompletionRequestToolMessage' + - $ref: '#/components/schemas/ChatCompletionRequestFunctionMessage' + x-oaiExpandable: true + ChatCompletionRequestMessageContentPart: + oneOf: + - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' + - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartImage' + x-oaiExpandable: true + ChatCompletionRequestMessageContentPartImage: + type: object + required: + - type + - image_url + properties: + type: + type: string + enum: + - image_url + description: The type of the content part. + image_url: + type: object + properties: + url: + anyOf: + - type: string + format: uri + - type: string + description: Either a URL of the image or the base64 encoded image data. + detail: + type: string + enum: + - auto + - low + - high + description: |- + Specifies the detail level of the image. Learn more in the + [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding). + default: auto + required: + - url + ChatCompletionRequestMessageContentPartText: + type: object + required: + - type + - text + properties: + type: + type: string + enum: + - text + - json_object + description: The type of the content part. + text: + type: string + description: The text content. + ChatCompletionRequestMessageContentParts: + type: array + items: + $ref: '#/components/schemas/ChatCompletionRequestMessageContentPart' + minItems: 1 + ChatCompletionRequestSystemMessage: + type: object + required: + - content + - role + properties: + content: + type: string + description: The contents of the system message. + x-oaiExpandable: true + role: + type: string + enum: + - system + description: The role of the messages author, in this case `system`. + name: + type: string + description: |- + An optional name for the participant. Provides the model information to differentiate between + participants of the same role. + ChatCompletionRequestToolMessage: + type: object + required: + - role + - content + - tool_call_id + properties: + role: + type: string + enum: + - tool + description: The role of the messages author, in this case `tool`. + content: + type: string + description: The contents of the tool message. + tool_call_id: + type: string + description: Tool call that this message is responding to. + ChatCompletionRequestUserMessage: + type: object + required: + - content + - role + properties: + content: + allOf: + - $ref: '#/components/schemas/ChatCompletionRequestUserMessageContent' + description: The contents of the system message. + x-oaiExpandable: true + role: + type: string + enum: + - user + - assistant + description: The role of the messages author, in this case `user`. + name: + type: string + description: |- + An optional name for the participant. Provides the model information to differentiate between + participants of the same role. + ChatCompletionRequestUserMessageContent: + oneOf: + - type: string + - $ref: '#/components/schemas/ChatCompletionRequestMessageContentParts' + ChatCompletionResponseMessage: + type: object + required: + - content + - role + properties: + content: + type: string + nullable: true + description: The contents of the message. + tool_calls: + $ref: '#/components/schemas/ChatCompletionMessageToolCallsItem' + role: + type: string + enum: + - assistant + description: The role of the author of this message. + function_call: + type: object + properties: + arguments: + type: string + description: |- + The arguments to call the function with, as generated by the model in JSON format. Note that + the model does not always generate valid JSON, and may hallucinate parameters not defined by + your function schema. Validate the arguments in your code before calling your function. + name: + type: string + description: The name of the function to call. + required: - arguments + - name + description: Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. + deprecated: true + ChatCompletionTokenLogprob: + type: object + required: + - token + - logprob + - bytes + - top_logprobs + properties: + token: + type: string + description: The token. + logprob: + type: number + format: double + description: The log probability of this token. + bytes: + type: array + items: + type: integer + format: int64 + nullable: true + description: |- + A list of integers representing the UTF-8 bytes representation of the token. Useful in + instances where characters are represented by multiple tokens and their byte representations + must be combined to generate the correct text representation. Can be `null` if there is no + bytes representation for the token. + top_logprobs: + type: array + items: + type: object + properties: + token: + type: string + description: The token. + logprob: + type: number + format: double + description: The log probability of this token. + bytes: + type: array + items: + type: integer + format: int64 + nullable: true + description: |- + A list of integers representing the UTF-8 bytes representation of the token. Useful in + instances where characters are represented by multiple tokens and their byte representations + must be combined to generate the correct text representation. Can be `null` if there is no + bytes representation for the token. + required: + - token + - logprob + - bytes + description: |- + List of the most likely tokens and their log probability, at this token position. In rare + cases, there may be fewer than the number of requested `top_logprobs` returned. + ChatCompletionTool: + type: object + required: + - type + - function + properties: + type: + type: string + enum: + - function + description: The type of the tool. Currently, only `function` is supported. + function: + $ref: '#/components/schemas/FunctionObject' + ChatCompletionToolChoiceOption: + oneOf: + - type: string + enum: + - none + - auto + - auto + - $ref: '#/components/schemas/ChatCompletionNamedToolChoice' + description: |- + Controls which (if any) function is called by the model. `none` means the model will not call a + function and instead generates a message. `auto` means the model can pick between generating a + message or calling a function. Specifying a particular function via + `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that + function. + + `none` is the default when no functions are present. `auto` is the default if functions are + present. + x-oaiExpandable: true + CompletionUsage: + type: object + required: + - prompt_tokens + - completion_tokens + - total_tokens + properties: + prompt_tokens: + type: integer + format: int64 + description: Number of tokens in the prompt. + completion_tokens: + type: integer + format: int64 + description: Number of tokens in the generated completion + total_tokens: + type: integer + format: int64 + description: Total number of tokens used in the request (prompt + completion). + description: Usage statistics for the completion request. + CreateAssistantFileRequest: + type: object + required: + - file_id + properties: + file_id: + type: string + description: |- + A [File](/docs/api-reference/files) ID (with `purpose="assistants"`) that the assistant should + use. Useful for tools like `retrieval` and `code_interpreter` that can access files. + CreateAssistantRequest: + type: object + required: + - model + properties: + model: + type: string + description: |- + ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + see all of your available models, or see our [Model overview](/docs/models/overview) for + descriptions of them. + name: + type: string + nullable: true + maxLength: 256 + description: The name of the assistant. The maximum length is 256 characters. + description: + type: string + nullable: true + maxLength: 512 + description: The description of the assistant. The maximum length is 512 characters. + instructions: + type: string + nullable: true + maxLength: 32768 + description: The system instructions that the assistant uses. The maximum length is 32768 characters. + tools: + allOf: + - $ref: '#/components/schemas/CreateAssistantRequestToolsItem' + description: |- + A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. + Tools can be of types `code_interpreter`, `retrieval`, or `function`. + default: [] + file_ids: + type: array + items: + type: string + maxItems: 20 + description: |- + A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a + maximum of 20 files attached to the assistant. Files are ordered by their creation date in + ascending order. + default: [] + metadata: + type: object + additionalProperties: + type: string + nullable: true + description: |- + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + CreateAssistantRequestTool: + oneOf: + - $ref: '#/components/schemas/AssistantToolsCode' + - $ref: '#/components/schemas/AssistantToolsRetrieval' + - $ref: '#/components/schemas/AssistantToolsFunction' + x-oaiExpandable: true + CreateAssistantRequestToolsItem: + type: array + items: + $ref: '#/components/schemas/CreateAssistantRequestTool' + maxItems: 128 + CreateChatCompletionRequest: + type: object + required: + - messages + - model + properties: + messages: + type: array + items: + $ref: '#/components/schemas/ChatCompletionRequestMessage' + minItems: 1 + description: |- + A list of messages comprising the conversation so far. + [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). + model: + anyOf: + - type: string + - type: string + enum: + - gpt-4-0125-preview + - gpt-4-turbo-preview + - gpt-4-1106-preview + - gpt-4-vision-preview + - gpt-4 + - gpt-4-0314 + - gpt-4-0613 + - gpt-4-32k + - gpt-4-32k-0314 + - gpt-4-32k-0613 + - gpt-3.5-turbo + - gpt-3.5-turbo-16k + - gpt-3.5-turbo-0301 + - gpt-3.5-turbo-0613 + - gpt-3.5-turbo-1106 + - gpt-3.5-turbo-16k-0613 + description: |- + ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) + table for details on which models work with the Chat API. + x-oaiTypeLabel: string + frequency_penalty: + type: number + format: double + nullable: true + minimum: -2 + maximum: 2 + description: |- + Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing + frequency in the text so far, decreasing the model's likelihood to repeat the same line + verbatim. + + [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + default: 0 + logit_bias: + type: object + additionalProperties: + type: integer + format: int64 + nullable: true + description: |- + Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an + associated bias value from -100 to 100. Mathematically, the bias is added to the logits + generated by the model prior to sampling. The exact effect will vary per model, but values + between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 + should result in a ban or exclusive selection of the relevant token. + x-oaiTypeLabel: map + default: null + logprobs: + type: boolean + nullable: true + description: |- + Whether to return log probabilities of the output tokens or not. If true, returns the log + probabilities of each output token returned in the `content` of `message`. This option is + currently not available on the `gpt-4-vision-preview` model. + default: false + top_logprobs: + type: integer + format: int64 + nullable: true + minimum: 0 + maximum: 5 + description: |- + An integer between 0 and 5 specifying the number of most likely tokens to return at each token + position, each with an associated log probability. `logprobs` must be set to `true` if this + parameter is used. + max_tokens: + type: integer + format: int64 + nullable: true + minimum: 0 + description: |- + The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. + + The total length of input tokens and generated tokens is limited by the model's context length. + [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + for counting tokens. + default: 16 + n: + type: integer + format: int64 + nullable: true + minimum: 1 + maximum: 128 + description: |- + How many chat completion choices to generate for each input message. Note that you will be + charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to + minimize costs. + default: 1 + presence_penalty: + type: number + format: double + nullable: true + minimum: -2 + maximum: 2 + description: |- + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear + in the text so far, increasing the model's likelihood to talk about new topics. + + [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + default: 0 + response_format: + type: object + properties: + type: + type: string + enum: + - text + - json_object + description: Must be one of `text` or `json_object`. + default: text + description: |- + An object specifying the format that the model must output. Compatible with + [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the + model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON + yourself via a system or user message. Without this, the model may generate an unending stream + of whitespace until the generation reaches the token limit, resulting in a long-running and + seemingly "stuck" request. Also note that the message content may be partially cut off if + `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the + conversation exceeded the max context length. + seed: + type: integer + format: int64 + nullable: true + minimum: -9223372036854776000 + maximum: 9223372036854776000 + description: |- + This feature is in Beta. + + If specified, our system will make a best effort to sample deterministically, such that + repeated requests with the same `seed` and parameters should return the same result. + + Determinism is not guaranteed, and you should refer to the `system_fingerprint` response + parameter to monitor changes in the backend. + x-oaiMeta: + beta: true + stop: + oneOf: + - $ref: '#/components/schemas/Stop' + nullable: true + description: Up to 4 sequences where the API will stop generating further tokens. + default: null + stream: + type: boolean + nullable: true + description: |- + If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: [DONE]` message. + [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + default: false + temperature: + type: number + format: double + nullable: true + minimum: 0 + maximum: 2 + description: |- + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. + + We generally recommend altering this or `top_p` but not both. + default: 1 + top_p: + type: number + format: double + nullable: true + minimum: 0 + maximum: 1 + description: |- + An alternative to sampling with temperature, called nucleus sampling, where the model considers + the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising + the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + default: 1 + tools: + type: array + items: + $ref: '#/components/schemas/ChatCompletionTool' + description: |- + A list of tools the model may call. Currently, only functions are supported as a tool. Use this + to provide a list of functions the model may generate JSON inputs for. + tool_choice: + $ref: '#/components/schemas/ChatCompletionToolChoiceOption' + user: + allOf: + - $ref: '#/components/schemas/User' + description: |- + A unique identifier representing your end-user, which can help OpenAI to monitor and detect + abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + function_call: + anyOf: + - type: string + enum: + - none + - auto + - auto + - $ref: '#/components/schemas/ChatCompletionFunctionCallOption' + description: |- + Deprecated in favor of `tool_choice`. + + Controls which (if any) function is called by the model. `none` means the model will not call a + function and instead generates a message. `auto` means the model can pick between generating a + message or calling a function. Specifying a particular function via `{"name": "my_function"}` + forces the model to call that function. + + `none` is the default when no functions are present. `auto` is the default if functions are + present. + deprecated: true + x-oaiExpandable: true + functions: + type: array + items: + $ref: '#/components/schemas/ChatCompletionFunctions' + minItems: 1 + maxItems: 128 + description: |- + Deprecated in favor of `tools`. + + A list of functions the model may generate JSON inputs for. + deprecated: true + CreateChatCompletionResponse: + type: object + required: + - id + - choices + - created + - model + - object + properties: + id: + type: string + description: A unique identifier for the chat completion. + choices: + type: array + items: + type: object + properties: + finish_reason: + type: string + enum: + - stop + - length + - tool_calls + - content_filter + - function_call + - length + - content_filter + description: |- + The reason the model stopped generating tokens. This will be `stop` if the model hit a + natural stop point or a provided stop sequence, `length` if the maximum number of tokens + specified in the request was reached, `content_filter` if content was omitted due to a flag + from our content filters, `tool_calls` if the model called a tool, or `function_call` + (deprecated) if the model called a function. + index: + type: integer + format: int64 + description: The index of the choice in the list of choices. + message: + $ref: '#/components/schemas/ChatCompletionResponseMessage' + logprobs: + type: object + properties: + content: + type: array + items: + $ref: '#/components/schemas/ChatCompletionTokenLogprob' + nullable: true + required: + - content + nullable: true + description: Log probability information for the choice. + required: + - finish_reason + - index + - message + - logprobs + description: A list of chat completion choices. Can be more than one if `n` is greater than 1. + created: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) of when the chat completion was created. + model: + type: string + description: The model used for the chat completion. + system_fingerprint: + type: string + description: |- + This fingerprint represents the backend configuration that the model runs with. + + Can be used in conjunction with the `seed` request parameter to understand when backend changes + have been made that might impact determinism. + object: + type: string + enum: + - chat.completion + description: The object type, which is always `chat.completion`. + usage: + $ref: '#/components/schemas/CompletionUsage' + description: Represents a chat completion response returned by model, based on the provided input. + CreateCompletionRequest: + type: object + required: + - model + - prompt + properties: + model: + anyOf: + - type: string + - type: string + enum: + - gpt-3.5-turbo-instruct + - davinci-002 + - babbage-002 + description: |- + ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + see all of your available models, or see our [Model overview](/docs/models/overview) for + descriptions of them. + x-oaiTypeLabel: string + prompt: + oneOf: + - $ref: '#/components/schemas/Prompt' + nullable: true + description: |- + The prompt(s) to generate completions for, encoded as a string, array of strings, array of + tokens, or array of token arrays. + + Note that <|endoftext|> is the document separator that the model sees during training, so if a + prompt is not specified the model will generate as if from the beginning of a new document. + default: <|endoftext|> + best_of: + type: integer + format: int64 + nullable: true + minimum: 0 + maximum: 20 + description: |- + Generates `best_of` completions server-side and returns the "best" (the one with the highest + log probability per token). Results cannot be streamed. + + When used with `n`, `best_of` controls the number of candidate completions and `n` specifies + how many to return – `best_of` must be greater than `n`. + + **Note:** Because this parameter generates many completions, it can quickly consume your token + quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + default: 1 + echo: + type: boolean + nullable: true + description: Echo back the prompt in addition to the completion + default: false + frequency_penalty: + type: number + format: double + nullable: true + minimum: -2 + maximum: 2 + description: |- + Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing + frequency in the text so far, decreasing the model's likelihood to repeat the same line + verbatim. + + [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + default: 0 + logit_bias: + type: object + additionalProperties: + type: integer + format: int64 + nullable: true + description: |- + Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an + associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) + to convert text to token IDs. Mathematically, the bias is added to the logits generated by the + model prior to sampling. The exact effect will vary per model, but values between -1 and 1 + should decrease or increase likelihood of selection; values like -100 or 100 should result in a + ban or exclusive selection of the relevant token. + + As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being + generated. + x-oaiTypeLabel: map + default: null + logprobs: + type: integer + format: int64 + nullable: true + minimum: 0 + maximum: 5 + description: |- + Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. + For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The + API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` + elements in the response. + + The maximum value for `logprobs` is 5. + default: null + max_tokens: + type: integer + format: int64 + nullable: true + minimum: 0 + description: |- + The maximum number of [tokens](/tokenizer) to generate in the completion. + + The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + for counting tokens. + default: 16 + n: + type: integer + format: int64 + nullable: true + minimum: 1 + maximum: 128 + description: |- + How many completions to generate for each prompt. + + **Note:** Because this parameter generates many completions, it can quickly consume your token + quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + default: 1 + presence_penalty: + type: number + format: double + nullable: true + minimum: -2 + maximum: 2 + description: |- + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear + in the text so far, increasing the model's likelihood to talk about new topics. + + [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + default: 0 + seed: + type: integer + format: int64 + nullable: true + minimum: -9223372036854776000 + maximum: 9223372036854776000 + description: |- + If specified, our system will make a best effort to sample deterministically, such that + repeated requests with the same `seed` and parameters should return the same result. + + Determinism is not guaranteed, and you should refer to the `system_fingerprint` response + parameter to monitor changes in the backend. + x-oaiMeta: + beta: true + stop: + oneOf: + - $ref: '#/components/schemas/Stop' + nullable: true + description: Up to 4 sequences where the API will stop generating further tokens. + default: null + stream: + type: boolean + nullable: true + description: |- + If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: [DONE]` message. + [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). + default: false + suffix: + type: string + nullable: true + description: The suffix that comes after a completion of inserted text. + default: null + temperature: + type: number + format: double + nullable: true + minimum: 0 + maximum: 2 + description: |- + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. + + We generally recommend altering this or `top_p` but not both. + default: 1 + top_p: + type: number + format: double + nullable: true + minimum: 0 + maximum: 1 + description: |- + An alternative to sampling with temperature, called nucleus sampling, where the model considers + the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising + the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + default: 1 + user: + allOf: + - $ref: '#/components/schemas/User' + description: |- + A unique identifier representing your end-user, which can help OpenAI to monitor and detect + abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + CreateCompletionResponse: + type: object + required: + - id + - choices + - created + - model + - object + properties: + id: + type: string + description: A unique identifier for the completion. + choices: + type: array + items: + type: object + properties: + index: + type: integer + format: int64 + text: + type: string + logprobs: + type: object + properties: + tokens: + type: array + items: + type: string + token_logprobs: + type: array + items: + type: number + format: double + top_logprobs: + type: array + items: + type: object + additionalProperties: + type: integer + format: int64 + text_offset: + type: array + items: + type: integer + format: int64 + required: + - tokens + - token_logprobs + - top_logprobs + - text_offset + nullable: true + finish_reason: + type: string + enum: + - stop + - length + - tool_calls + - content_filter + - function_call + - length + - content_filter + description: |- + The reason the model stopped generating tokens. This will be `stop` if the model hit a + natural stop point or a provided stop sequence, or `content_filter` if content was omitted + due to a flag from our content filters, `length` if the maximum number of tokens specified + in the request was reached, or `content_filter` if content was omitted due to a flag from our + content filters. + required: + - index + - text + - logprobs + - finish_reason + description: The list of completion choices the model generated for the input. + created: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) of when the completion was created. + model: + type: string + description: The model used for the completion. + system_fingerprint: + type: string + description: |- + This fingerprint represents the backend configuration that the model runs with. + + Can be used in conjunction with the `seed` request parameter to understand when backend changes + have been made that might impact determinism. + object: + type: string + enum: + - text_completion + description: The object type, which is always `text_completion`. + usage: + allOf: + - $ref: '#/components/schemas/CompletionUsage' + description: Usage statistics for the completion request. + description: |- + Represents a completion response from the API. Note: both the streamed and non-streamed response + objects share the same shape (unlike the chat endpoint). + CreateEmbeddingRequest: + type: object + required: + - input + - model + properties: + input: + allOf: + - $ref: '#/components/schemas/CreateEmbeddingRequestInput' + description: |- + Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a + single request, pass an array of strings or array of token arrays. Each input must not exceed + the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an + empty string. + [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + for counting tokens. + x-oaiExpandable: true + model: + anyOf: + - type: string + - type: string + enum: + - text-embedding-ada-002 + - text-embedding-3-small + - text-embedding-3-large + description: |- + ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + see all of your available models, or see our [Model overview](/docs/models/overview) for + descriptions of them. + x-oaiTypeLabel: string + encoding_format: + type: string + enum: + - float + - base64 + description: |- + The format to return the embeddings in. Can be either `float` or + [`base64`](https://pypi.org/project/pybase64/). + default: float + dimensions: + type: integer + format: int64 + minimum: 1 + description: |- + The number of dimensions the resulting output embeddings should have. Only supported in + `text-embedding-3` and later models. + user: + allOf: + - $ref: '#/components/schemas/User' + description: |- + A unique identifier representing your end-user, which can help OpenAI to monitor and detect + abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + CreateEmbeddingRequestInput: + oneOf: + - type: string + - type: array + items: + type: string + - $ref: '#/components/schemas/TokenArrayItem' + - $ref: '#/components/schemas/TokenArrayArray' + CreateEmbeddingResponse: + type: object + required: + - data + - model + - object + - usage + properties: + data: + type: array + items: + $ref: '#/components/schemas/Embedding' + description: The list of embeddings generated by the model. + model: + type: string + description: The name of the model used to generate the embedding. + object: + type: string + enum: + - list + description: The object type, which is always "list". + usage: + type: object + properties: + prompt_tokens: + type: integer + format: int64 + description: The number of tokens used by the prompt. + total_tokens: + type: integer + format: int64 + description: The total number of tokens used by the request. + required: + - prompt_tokens + - total_tokens + description: The usage information for the request. + CreateFileRequestMultiPart: + type: object + required: + - file + - purpose + properties: + file: + type: string + format: binary + description: The file object (not file name) to be uploaded. + purpose: + type: string + enum: + - fine-tune + - assistants + description: |- + The intended purpose of the uploaded file. Use "fine-tune" for + [Fine-tuning](/docs/api-reference/fine-tuning) and "assistants" for + [Assistants](/docs/api-reference/assistants) and [Messages](/docs/api-reference/messages). This + allows us to validate the format of the uploaded file is correct for fine-tuning. + CreateFineTuneRequest: + type: object + required: + - training_file + properties: + training_file: + type: string + description: |- + The ID of an uploaded file that contains training data. + + See [upload file](/docs/api-reference/files/upload) for how to upload a file. + + Your dataset must be formatted as a JSONL file, where each training example is a JSON object + with the keys "prompt" and "completion". Additionally, you must upload your file with the + purpose `fine-tune`. + + See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more + details. + validation_file: + type: string + nullable: true + description: |- + The ID of an uploaded file that contains validation data. + + If you provide this file, the data is used to generate validation metrics periodically during + fine-tuning. These metrics can be viewed in the + [fine-tuning results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). + Your train and validation data should be mutually exclusive. + + Your dataset must be formatted as a JSONL file, where each validation example is a JSON object + with the keys "prompt" and "completion". Additionally, you must upload your file with the + purpose `fine-tune`. + + See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more + details. + model: + anyOf: + - type: string + - type: string + enum: + - ada + - babbage + - curie + - davinci + nullable: true + description: |- + The name of the base model to fine-tune. You can select one of "ada", "babbage", "curie", + "davinci", or a fine-tuned model created after 2022-04-21 and before 2023-08-22. To learn more + about these models, see the [Models](/docs/models) documentation. + x-oaiTypeLabel: string + n_epochs: + type: integer + format: int64 + nullable: true + description: |- + The number of epochs to train the model for. An epoch refers to one full cycle through the + training dataset. + default: 4 + batch_size: + type: integer + format: int64 + nullable: true + description: |- + The batch size to use for training. The batch size is the number of training examples used to + train a single forward and backward pass. + + By default, the batch size will be dynamically configured to be ~0.2% of the number of examples + in the training set, capped at 256 - in general, we've found that larger batch sizes tend to + work better for larger datasets. + default: null + learning_rate_multiplier: + type: number + format: double + nullable: true + description: |- + The learning rate multiplier to use for training. The fine-tuning learning rate is the original + learning rate used for pretraining multiplied by this value. + + By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 depending on final + `batch_size` (larger learning rates tend to perform better with larger batch sizes). We + recommend experimenting with values in the range 0.02 to 0.2 to see what produces the best + results. + default: null + prompt_loss_rate: + type: number + format: double + nullable: true + description: |- + The weight to use for loss on the prompt tokens. This controls how much the model tries to + learn to generate the prompt (as compared to the completion which always has a weight of 1.0), + and can add a stabilizing effect to training when completions are short. + + If prompts are extremely long (relative to completions), it may make sense to reduce this + weight so as to avoid over-prioritizing learning the prompt. + default: 0.01 + compute_classification_metrics: + type: boolean + nullable: true + description: |- + If set, we calculate classification-specific metrics such as accuracy and F-1 score using the + validation set at the end of every epoch. These metrics can be viewed in the + [results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). + + In order to compute classification metrics, you must provide a `validation_file`. Additionally, + you must specify `classification_n_classes` for multiclass classification or + `classification_positive_class` for binary classification. + default: false + classification_n_classes: + type: integer + format: int64 + nullable: true + description: |- + The number of classes in a classification task. + + This parameter is required for multiclass classification. + default: null + classification_positive_class: + type: string + nullable: true + description: |- + The positive class in binary classification. + + This parameter is needed to generate precision, recall, and F1 metrics when doing binary + classification. + default: null + classification_betas: + type: array + items: + type: number + format: double + nullable: true + description: |- + If this is provided, we calculate F-beta scores at the specified beta values. The F-beta score + is a generalization of F-1 score. This is only used for binary classification. + + With a beta of 1 (i.e. the F-1 score), precision and recall are given the same weight. A larger + beta score puts more weight on recall and less on precision. A smaller beta score puts more + weight on precision and less on recall. + default: null + suffix: + oneOf: + - $ref: '#/components/schemas/SuffixString' + nullable: true + description: |- + A string of up to 18 characters that will be added to your fine-tuned model name. + + For example, a `suffix` of "custom-model-name" would produce a model name like + `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`. + default: null + CreateFineTuningJobRequest: + type: object + required: + - training_file + - model + properties: + training_file: + type: string + description: |- + The ID of an uploaded file that contains training data. + + See [upload file](/docs/api-reference/files/upload) for how to upload a file. + + Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with + the purpose `fine-tune`. + + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + validation_file: + type: string + nullable: true + description: |- + The ID of an uploaded file that contains validation data. + + If you provide this file, the data is used to generate validation metrics periodically during + fine-tuning. These metrics can be viewed in the fine-tuning results file. The same data should + not be present in both train and validation files. + + Your dataset must be formatted as a JSONL file. You must upload your file with the purpose + `fine-tune`. + + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + model: + anyOf: + - type: string + - type: string + enum: + - babbage-002 + - davinci-002 + - gpt-3.5-turbo + description: |- + The name of the model to fine-tune. You can select one of the + [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + x-oaiTypeLabel: string + hyperparameters: + type: object properties: - name: - type: string - description: The name of the function to call. - arguments: - type: string + n_epochs: + anyOf: + - type: string + enum: + - auto + - low + - high + - $ref: '#/components/schemas/NEpochs' description: |- - The arguments to call the function with, as generated by the model in JSON format. Note that - the model does not always generate valid JSON, and may hallucinate parameters not defined by - your function schema. Validate the arguments in your code before calling your function. - ChatCompletionResponseMessage: + The number of epochs to train the model for. An epoch refers to one full cycle through the + training dataset. + default: auto + description: The hyperparameters used for the fine-tuning job. + suffix: + oneOf: + - $ref: '#/components/schemas/SuffixString' + nullable: true + description: |- + A string of up to 18 characters that will be added to your fine-tuned model name. + + For example, a `suffix` of "custom-model-name" would produce a model name like + `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + default: null + CreateImageEditRequestMultiPart: + type: object + required: + - image + - prompt + properties: + image: + type: string + format: binary + description: |- + The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not + provided, image must have transparency, which will be used as the mask. + prompt: + type: string + maxLength: 1000 + description: A text description of the desired image(s). The maximum length is 1000 characters. + mask: + type: string + format: binary + description: |- + An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where + `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions + as `image`. + model: + anyOf: + - type: string + - type: string + enum: + - dall-e-2 + description: The model to use for image generation. Only `dall-e-2` is supported at this time. + x-oaiTypeLabel: string + default: dall-e-2 + n: + oneOf: + - $ref: '#/components/schemas/ImagesN' + nullable: true + description: The number of images to generate. Must be between 1 and 10. + default: 1 + size: + type: string + enum: + - 256x256 + - 512x512 + - 1024x1024 + - 512x512 + - 1024x1024 + nullable: true + description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + default: 1024x1024 + response_format: + type: string + enum: + - url + - b64_json + - b64_json + nullable: true + description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. + default: url + user: + allOf: + - $ref: '#/components/schemas/User' + description: |- + A unique identifier representing your end-user, which can help OpenAI to monitor and detect + abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + CreateImageRequest: + type: object + required: + - prompt + properties: + prompt: + type: string + description: |- + A text description of the desired image(s). The maximum length is 1000 characters for + `dall-e-2` and 4000 characters for `dall-e-3`. + model: + anyOf: + - type: string + - type: string + enum: + - dall-e-2 + - dall-e-3 + description: The model to use for image generation. + x-oaiTypeLabel: string + default: dall-e-2 + n: + oneOf: + - $ref: '#/components/schemas/ImagesN' + nullable: true + description: |- + The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is + supported. + default: 1 + quality: + type: string + enum: + - standard + - hd + nullable: true + description: |- + The quality of the image that will be generated. `hd` creates images with finer details and + greater consistency across the image. This param is only supported for `dall-e-3`. + default: standard + response_format: + type: string + enum: + - url + - b64_json + nullable: true + description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. + default: url + size: + type: string + enum: + - 256x256 + - 512x512 + - 1024x1024 + - 1792x1024 + - 1024x1792 + nullable: true + description: |- + The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for + `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. + default: 1024x1024 + style: + type: string + enum: + - vivid + - natural + nullable: true + description: |- + The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes the model + to lean towards generating hyper-real and dramatic images. Natural causes the model to produce + more natural, less hyper-real looking images. This param is only supported for `dall-e-3`. + default: vivid + user: + allOf: + - $ref: '#/components/schemas/User' + description: |- + A unique identifier representing your end-user, which can help OpenAI to monitor and detect + abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + CreateImageVariationRequestMultiPart: + type: object + required: + - image + properties: + image: + type: string + format: binary + description: |- + The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, + and square. + model: + anyOf: + - type: string + - type: string + enum: + - dall-e-2 + description: The model to use for image generation. Only `dall-e-2` is supported at this time. + x-oaiTypeLabel: string + default: dall-e-2 + n: + oneOf: + - $ref: '#/components/schemas/ImagesN' + nullable: true + description: The number of images to generate. Must be between 1 and 10. + default: 1 + response_format: + type: string + enum: + - url + - b64_json + - b64_json + nullable: true + description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. + default: url + size: + type: string + enum: + - 256x256 + - 512x512 + - 1024x1024 + - 512x512 + - 1024x1024 + nullable: true + description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + default: 1024x1024 + user: + allOf: + - $ref: '#/components/schemas/User' + description: |- + A unique identifier representing your end-user, which can help OpenAI to monitor and detect + abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + CreateMessageRequest: type: object required: - role @@ -1007,1950 +3769,2293 @@ components: role: type: string enum: - - system - user - assistant - - function - description: The role of the author of this message. + description: The role of the entity that is creating the message. Currently only `user` is supported. content: type: string - nullable: true - description: The contents of the message. - function_call: + minLength: 1 + maxLength: 32768 + description: The content of the message. + file_ids: + type: array + items: + type: string + minItems: 1 + maxItems: 10 + description: |- + A list of [File](/docs/api-reference/files) IDs that the message should use. There can be a + maximum of 10 files attached to a message. Useful for tools like `retrieval` and + `code_interpreter` that can access and use files. + default: [] + metadata: type: object - description: The name and arguments of a function that should be called, as generated by the model. - required: - - name - - arguments - properties: - name: - type: string - description: The name of the function to call. - arguments: - type: string - description: |- - The arguments to call the function with, as generated by the model in JSON format. Note that - the model does not always generate valid JSON, and may hallucinate parameters not defined by - your function schema. Validate the arguments in your code before calling your function. - CompletionUsage: + additionalProperties: + type: string + nullable: true + description: |- + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + CreateModerationRequest: + type: object + required: + - input + properties: + input: + allOf: + - $ref: '#/components/schemas/CreateModerationRequestInput' + description: The input text to classify + model: + anyOf: + - type: string + - type: string + enum: + - text-moderation-latest + - text-moderation-stable + description: |- + Two content moderations models are available: `text-moderation-stable` and + `text-moderation-latest`. The default is `text-moderation-latest` which will be automatically + upgraded over time. This ensures you are always using our most accurate model. If you use + `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy + of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. + x-oaiTypeLabel: string + default: text-moderation-latest + CreateModerationRequestInput: + oneOf: + - type: string + - type: array + items: + type: string + CreateModerationResponse: + type: object + required: + - id + - model + - results + properties: + id: + type: string + description: The unique identifier for the moderation request. + model: + type: string + description: The model used to generate the moderation results. + results: + type: array + items: + type: object + properties: + flagged: + type: boolean + description: Whether the content violates [OpenAI's usage policies](/policies/usage-policies). + categories: + type: object + properties: + hate: + type: boolean + description: |- + Content that expresses, incites, or promotes hate based on race, gender, ethnicity, + religion, nationality, sexual orientation, disability status, or caste. Hateful content + aimed at non-protected groups (e.g., chess players) is harrassment. + hate/threatening: + type: boolean + description: |- + Hateful content that also includes violence or serious harm towards the targeted group + based on race, gender, ethnicity, religion, nationality, sexual orientation, disability + status, or caste. + harassment: + type: boolean + description: Content that expresses, incites, or promotes harassing language towards any target. + harassment/threatening: + type: boolean + description: Harassment content that also includes violence or serious harm towards any target. + self-harm: + type: boolean + description: |- + Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, + and eating disorders. + self-harm/intent: + type: boolean + description: |- + Content where the speaker expresses that they are engaging or intend to engage in acts of + self-harm, such as suicide, cutting, and eating disorders. + self-harm/instructions: + type: boolean + description: |- + Content that encourages performing acts of self-harm, such as suicide, cutting, and eating + disorders, or that gives instructions or advice on how to commit such acts. + sexual: + type: boolean + description: |- + Content meant to arouse sexual excitement, such as the description of sexual activity, or + that promotes sexual services (excluding sex education and wellness). + sexual/minors: + type: boolean + description: Sexual content that includes an individual who is under 18 years old. + violence: + type: boolean + description: Content that depicts death, violence, or physical injury. + violence/graphic: + type: boolean + description: Content that depicts death, violence, or physical injury in graphic detail. + required: + - hate + - hate/threatening + - harassment + - harassment/threatening + - self-harm + - self-harm/intent + - self-harm/instructions + - sexual + - sexual/minors + - violence + - violence/graphic + description: A list of the categories, and whether they are flagged or not. + category_scores: + type: object + properties: + hate: + type: number + format: double + description: The score for the category 'hate'. + hate/threatening: + type: number + format: double + description: The score for the category 'hate/threatening'. + harassment: + type: number + format: double + description: The score for the category 'harassment'. + harassment/threatening: + type: number + format: double + description: The score for the category 'harassment/threatening'. + self-harm: + type: number + format: double + description: The score for the category 'self-harm'. + self-harm/intent: + type: number + format: double + description: The score for the category 'self-harm/intent'. + self-harm/instructions: + type: number + format: double + description: The score for the category 'self-harm/instructive'. + sexual: + type: number + format: double + description: The score for the category 'sexual'. + sexual/minors: + type: number + format: double + description: The score for the category 'sexual/minors'. + violence: + type: number + format: double + description: The score for the category 'violence'. + violence/graphic: + type: number + format: double + description: The score for the category 'violence/graphic'. + required: + - hate + - hate/threatening + - harassment + - harassment/threatening + - self-harm + - self-harm/intent + - self-harm/instructions + - sexual + - sexual/minors + - violence + - violence/graphic + description: A list of the categories along with their scores as predicted by model. + required: + - flagged + - categories + - category_scores + description: A list of moderation objects. + description: Represents policy compliance report by OpenAI's content moderation model against a given input. + CreateRunRequest: type: object - description: Usage statistics for the completion request. required: - - prompt_tokens - - completion_tokens - - total_tokens + - assistant_id properties: - prompt_tokens: - type: integer - format: int64 - description: Number of tokens in the prompt. - completion_tokens: - type: integer - format: int64 - description: Number of tokens in the generated completion - total_tokens: - type: integer - format: int64 - description: Total number of tokens used in the request (prompt + completion). - CreateChatCompletionRequest: + assistant_id: + type: string + description: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. + model: + type: string + nullable: true + description: |- + The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value + is provided here, it will override the model associated with the assistant. If not, the model + associated with the assistant will be used. + instructions: + type: string + nullable: true + description: |- + Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of the assistant. + This is useful for modifying the behavior on a per-run basis. + additional_instructions: + type: string + nullable: true + description: |- + Appends additional instructions at the end of the instructions for the run. This is useful for + modifying the behavior on a per-run basis without overriding other instructions. + tools: + type: object + allOf: + - $ref: '#/components/schemas/CreateRunRequestToolsItem' + nullable: true + description: |- + Override the tools the assistant can use for this run. This is useful for modifying the + behavior on a per-run basis. + metadata: + type: object + additionalProperties: + type: string + nullable: true + description: |- + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + CreateRunRequestTool: + oneOf: + - $ref: '#/components/schemas/AssistantToolsCode' + - $ref: '#/components/schemas/AssistantToolsRetrieval' + - $ref: '#/components/schemas/AssistantToolsFunction' + x-oaiExpandable: true + CreateRunRequestToolsItem: + type: array + items: + $ref: '#/components/schemas/CreateRunRequestTool' + maxItems: 20 + CreateSpeechRequest: type: object required: - model - - messages + - input + - voice properties: model: anyOf: - type: string - type: string enum: - - gpt4 - - gpt-4-0314 - - gpt-4-0613 - - gpt-4-32k - - gpt-4-32k-0314 - - gpt-4-32k-0613 - - gpt-3.5-turbo - - gpt-3.5-turbo-16k - - gpt-3.5-turbo-0301 - - gpt-3.5-turbo-0613 - - gpt-3.5-turbo-16k-0613 - description: |- - ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) - table for details on which models work with the Chat API. + - tts-1 + - tts-1-hd + description: 'One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd`' x-oaiTypeLabel: string - messages: - type: array - items: - $ref: '#/components/schemas/ChatCompletionRequestMessage' - description: |- - A list of messages comprising the conversation so far. - [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). - minItems: 1 - functions: - type: array - items: - $ref: '#/components/schemas/ChatCompletionFunctions' - description: A list of functions the model may generate JSON inputs for. - minItems: 1 - maxItems: 128 - function_call: - anyOf: - - type: string - enum: - - none - - auto - - $ref: '#/components/schemas/ChatCompletionFunctionCallOption' - description: |- - Controls how the model responds to function calls. `none` means the model does not call a - function, and responds to the end-user. `auto` means the model can pick between an end-user or - calling a function. Specifying a particular function via `{\"name":\ \"my_function\"}` forces the - model to call that function. `none` is the default when no functions are present. `auto` is the - default if functions are present. - temperature: - oneOf: - - $ref: '#/components/schemas/Temperature' - nullable: true - description: |- - What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. - - We generally recommend altering this or `top_p` but not both. - default: 1 - top_p: - oneOf: - - $ref: '#/components/schemas/TopP' - nullable: true + input: + type: string + maxLength: 4096 + description: The text to generate audio for. The maximum length is 4096 characters. + voice: + type: string + enum: + - alloy + - echo + - fable + - onyx + - nova + - shimmer description: |- - An alternative to sampling with temperature, called nucleus sampling, where the model considers - the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising - the top 10% probability mass are considered. - - We generally recommend altering this or `temperature` but not both. + The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, + `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the + [Text to speech guide](/docs/guides/text-to-speech/voice-options). + response_format: + type: string + enum: + - mp3 + - opus + - aac + - flac + description: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, and `flac`. + default: mp3 + speed: + type: number + format: double + minimum: 0.25 + maximum: 4 + description: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default. default: 1 - n: - oneOf: - - $ref: '#/components/schemas/N' + CreateThreadAndRunRequest: + type: object + required: + - assistant_id + properties: + assistant_id: + type: string + description: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. + thread: + allOf: + - $ref: '#/components/schemas/CreateThreadRequest' + description: If no thread is provided, an empty thread will be created. + model: + type: string nullable: true description: |- - How many completions to generate for each prompt. - **Note:** Because this parameter generates many completions, it can quickly consume your token - quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. - default: 1 - max_tokens: - oneOf: - - $ref: '#/components/schemas/MaxTokens' + The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is + provided here, it will override the model associated with the assistant. If not, the model + associated with the assistant will be used. + instructions: + type: string nullable: true description: |- - The maximum number of [tokens](/tokenizer) to generate in the completion. - - The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) - for counting tokens. - default: 16 - stop: + Override the default system message of the assistant. This is useful for modifying the behavior + on a per-run basis. + tools: + type: object allOf: - - $ref: '#/components/schemas/Stop' - description: Up to 4 sequences where the API will stop generating further tokens. - default: null - presence_penalty: - oneOf: - - $ref: '#/components/schemas/Penalty' - nullable: true - description: |- - Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear - in the text so far, increasing the model's likelihood to talk about new topics. - - [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) - frequency_penalty: - oneOf: - - $ref: '#/components/schemas/Penalty' + - $ref: '#/components/schemas/CreateRunRequestToolsItem' nullable: true description: |- - Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing - frequency in the text so far, decreasing the model's likelihood to repeat the same line - verbatim. - - [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) - logit_bias: + Override the tools the assistant can use for this run. This is useful for modifying the + behavior on a per-run basis. + metadata: type: object - description: |- - Modify the likelihood of specified tokens appearing in the completion. - Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an - associated bias value from -100 to 100. Mathematically, the bias is added to the logits - generated by the model prior to sampling. The exact effect will vary per model, but values - between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 - should result in a ban or exclusive selection of the relevant token. additionalProperties: - type: integer - format: int64 + type: string nullable: true - x-oaiTypeLabel: map - user: - allOf: - - $ref: '#/components/schemas/User' description: |- - A unique identifier representing your end-user, which can help OpenAI to monitor and detect - abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). - stream: - type: boolean + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + CreateThreadRequest: + type: object + properties: + messages: + type: array + items: + $ref: '#/components/schemas/CreateMessageRequest' + description: A list of [messages](/docs/api-reference/messages) to start the thread with. + metadata: + type: object + additionalProperties: + type: string nullable: true description: |- - If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only - [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - as they become available, with the stream terminated by a `data: [DONE]` message. - [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). - default: true - CreateChatCompletionResponse: + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + CreateTranscriptionRequestMultiPart: type: object - description: Represents a chat completion response returned by model, based on the provided input. required: - - id - - object - - created + - file - model - - choices properties: - id: + file: + type: string + format: binary + description: |- + The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, + mpeg, mpga, m4a, ogg, wav, or webm. + x-oaiTypeLabel: file + model: + anyOf: + - type: string + - type: string + enum: + - whisper-1 + description: ID of the model to use. Only `whisper-1` is currently available. + x-oaiTypeLabel: string + language: + type: string + description: |- + The language of the input audio. Supplying the input language in + [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy + and latency. + prompt: + type: string + description: |- + An optional text to guide the model's style or continue a previous audio segment. The + [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + response_format: + type: string + enum: + - json + - text + - srt + - verbose_json + - vtt + - text + - srt + - verbose_json + - vtt + description: |- + The format of the transcript output, in one of these options: json, text, srt, verbose_json, or + vtt. + default: json + temperature: + type: number + format: double + minimum: 0 + maximum: 1 + description: |- + The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more + random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, + the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to + automatically increase the temperature until certain thresholds are hit. + default: 0 + CreateTranscriptionResponse: + type: object + required: + - text + properties: + text: type: string - description: A unique identifier for the chat completion. - object: + description: The transcribed text for the provided audio data. + task: type: string - description: The object type, which is always `chat.completion`. - created: - type: integer - format: unixtime - description: The Unix timestamp (in seconds) of when the chat completion was created. - model: + enum: + - transcribe + description: The label that describes which operation type generated the accompanying response data. + language: type: string - description: The model used for the chat completion. - choices: + description: The spoken language that was detected in the audio data. + duration: + type: number + format: double + description: The total duration of the audio processed to produce accompanying transcription information. + segments: type: array items: - type: object - required: - - index - - message - - finish_reason - properties: - index: - type: integer - format: int64 - description: The index of the choice in the list of choices. - message: - $ref: '#/components/schemas/ChatCompletionResponseMessage' - finish_reason: - type: string - enum: - - stop - - length - - function_call - - content_filter - description: |- - The reason the model stopped generating tokens. This will be `stop` if the model hit a - natural stop point or a provided stop sequence, `length` if the maximum number of tokens - specified in the request was reached, `content_filter` if the content was omitted due to - a flag from our content filters, or `function_call` if the model called a function. - description: A list of chat completion choices. Can be more than one if `n` is greater than 1. - usage: - $ref: '#/components/schemas/CompletionUsage' - x-oaiMeta: - name: The chat completion object - group: chat - example: '' - CreateCompletionRequest: + $ref: '#/components/schemas/AudioSegment' + description: |- + A collection of information about the timing, probabilities, and other detail of each processed + audio segment. + CreateTranslationRequestMultiPart: type: object required: + - file - model - - prompt properties: + file: + type: string + format: binary + description: |- + The audio file object (not file name) to translate, in one of these formats: flac, mp3, mp4, + mpeg, mpga, m4a, ogg, wav, or webm. + x-oaiTypeLabel: file model: anyOf: - type: string - type: string enum: - - babbage-002 - - davinci-002 - - text-davinci-003 - - text-davinci-002 - - text-davinci-001 - - code-davinci-002 - - text-curie-001 - - text-babbage-001 - - text-ada-001 - description: |- - ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to - see all of your available models, or see our [Model overview](/docs/models/overview) for - descriptions of them. + - whisper-1 + description: ID of the model to use. Only `whisper-1` is currently available. x-oaiTypeLabel: string prompt: - allOf: - - $ref: '#/components/schemas/Prompt' - description: |- - The prompt(s) to generate completions for, encoded as a string, array of strings, array of - tokens, or array of token arrays. - - Note that <|endoftext|> is the document separator that the model sees during training, so if a - prompt is not specified the model will generate as if from the beginning of a new document. - default: <|endoftext|> - suffix: type: string - nullable: true - description: The suffix that comes after a completion of inserted text. - default: null - temperature: - oneOf: - - $ref: '#/components/schemas/Temperature' - nullable: true - description: |- - What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. - - We generally recommend altering this or `top_p` but not both. - default: 1 - top_p: - oneOf: - - $ref: '#/components/schemas/TopP' - nullable: true - description: |- - An alternative to sampling with temperature, called nucleus sampling, where the model considers - the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising - the top 10% probability mass are considered. - - We generally recommend altering this or `temperature` but not both. - default: 1 - n: - oneOf: - - $ref: '#/components/schemas/N' - nullable: true - description: |- - How many completions to generate for each prompt. - **Note:** Because this parameter generates many completions, it can quickly consume your token - quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. - default: 1 - max_tokens: - oneOf: - - $ref: '#/components/schemas/MaxTokens' - nullable: true - description: |- - The maximum number of [tokens](/tokenizer) to generate in the completion. - - The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) - for counting tokens. - default: 16 - stop: - allOf: - - $ref: '#/components/schemas/Stop' - description: Up to 4 sequences where the API will stop generating further tokens. - default: null - presence_penalty: - oneOf: - - $ref: '#/components/schemas/Penalty' - nullable: true - description: |- - Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear - in the text so far, increasing the model's likelihood to talk about new topics. - - [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) - frequency_penalty: - oneOf: - - $ref: '#/components/schemas/Penalty' - nullable: true - description: |- - Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing - frequency in the text so far, decreasing the model's likelihood to repeat the same line - verbatim. - - [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) - logit_bias: - type: object - description: |- - Modify the likelihood of specified tokens appearing in the completion. - Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an - associated bias value from -100 to 100. Mathematically, the bias is added to the logits - generated by the model prior to sampling. The exact effect will vary per model, but values - between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 - should result in a ban or exclusive selection of the relevant token. - additionalProperties: - type: integer - format: int64 - nullable: true - x-oaiTypeLabel: map - user: - allOf: - - $ref: '#/components/schemas/User' description: |- - A unique identifier representing your end-user, which can help OpenAI to monitor and detect - abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). - stream: - type: boolean - nullable: true + An optional text to guide the model's style or continue a previous audio segment. The + [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + response_format: + type: string + enum: + - json + - text + - srt + - verbose_json + - vtt + - text + - srt + - verbose_json + - vtt description: |- - If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only - [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - as they become available, with the stream terminated by a `data: [DONE]` message. - [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). - default: true - logprobs: - type: integer - format: int64 - nullable: true + The format of the transcript output, in one of these options: json, text, srt, verbose_json, or + vtt. + default: json + temperature: + type: number + format: double + minimum: 0 + maximum: 1 description: |- - Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. - For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The - API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` - elements in the response. - - The maximum value for `logprobs` is 5. - default: null - echo: - type: boolean - nullable: true - description: Echo back the prompt in addition to the completion - default: false - best_of: - type: integer - format: int64 - nullable: true + The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more + random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, + the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to + automatically increase the temperature until certain thresholds are hit. + default: 0 + CreateTranslationResponse: + type: object + required: + - text + properties: + text: + type: string + description: The translated text for the provided audio data. + task: + type: string + enum: + - translate + description: The label that describes which operation type generated the accompanying response data. + language: + type: string + description: The spoken language that was detected in the audio data. + duration: + type: number + format: double + description: The total duration of the audio processed to produce accompanying translation information. + segments: + type: array + items: + $ref: '#/components/schemas/AudioSegment' description: |- - Generates `best_of` completions server-side and returns the "best" (the one with the highest - log probability per token). Results cannot be streamed. - - When used with `n`, `best_of` controls the number of candidate completions and `n` specifies - how many to return – `best_of` must be greater than `n`. - - **Note:** Because this parameter generates many completions, it can quickly consume your token - quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. - default: 1 - CreateCompletionResponse: + A collection of information about the timing, probabilities, and other detail of each processed + audio segment. + DeleteAssistantFileResponse: type: object + required: + - id + - deleted + - object + properties: + id: + type: string + deleted: + type: boolean + object: + type: string + enum: + - assistant.file.deleted description: |- - Represents a completion response from the API. Note: both the streamed and non-streamed response - objects share the same shape (unlike the chat endpoint). + Deletes the association between the assistant and the file, but does not delete the + [File](/docs/api-reference/files) object itself. + DeleteAssistantResponse: + type: object + required: + - id + - deleted + - object + properties: + id: + type: string + deleted: + type: boolean + object: + type: string + enum: + - assistant.deleted + DeleteFileResponse: + type: object required: - id - object - - created - - model - - choices + - deleted properties: id: type: string - description: A unique identifier for the completion. object: type: string - description: The object type, which is always `text_completion`. - created: - type: integer - format: unixtime - description: The Unix timestamp (in seconds) of when the completion was created. - model: - type: string - description: The model used for the completion. - choices: - type: array - items: - type: object - required: - - index - - text - - logprobs - - finish_reason - properties: - index: - type: integer - format: int64 - text: - type: string - logprobs: - type: object - required: - - tokens - - token_logprobs - - top_logprobs - - text_offset - properties: - tokens: - type: array - items: - type: string - token_logprobs: - type: array - items: - type: number - format: double - top_logprobs: - type: array - items: - type: object - additionalProperties: - type: integer - format: int64 - text_offset: - type: array - items: - type: integer - format: int64 - nullable: true - finish_reason: - type: string - enum: - - stop - - length - - content_filter - description: |- - The reason the model stopped generating tokens. This will be `stop` if the model hit a - natural stop point or a provided stop sequence, or `content_filter` if content was omitted - due to a flag from our content filters, `length` if the maximum number of tokens specified - in the request was reached, or `content_filter` if content was omitted due to a flag from our - content filters. - description: The list of completion choices the model generated for the input. - usage: - $ref: '#/components/schemas/CompletionUsage' - x-oaiMeta: - name: The completion object - legacy: true - example: '' - CreateEditRequest: + enum: + - file + deleted: + type: boolean + DeleteModelResponse: type: object required: - - model - - instruction + - id + - deleted + - object properties: - model: - anyOf: - - type: string - - type: string - enum: - - text-davinci-edit-001 - - code-davinci-edit-001 - description: |- - ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` - model with this endpoint. - x-oaiTypeLabel: string - input: + id: type: string - nullable: true - description: The input text to use as a starting point for the edit. - default: '' - instruction: + deleted: + type: boolean + object: type: string - description: The instruction that tells the model how to edit the prompt. - n: - oneOf: - - $ref: '#/components/schemas/EditN' - nullable: true - description: How many edits to generate for the input and instruction. - default: 1 - temperature: - oneOf: - - $ref: '#/components/schemas/Temperature' - nullable: true - description: |- - What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. - - We generally recommend altering this or `top_p` but not both. - default: 1 - top_p: - oneOf: - - $ref: '#/components/schemas/TopP' - nullable: true - description: |- - An alternative to sampling with temperature, called nucleus sampling, where the model considers - the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising - the top 10% probability mass are considered. - - We generally recommend altering this or `temperature` but not both. - default: 1 - CreateEditResponse: + enum: + - model + DeleteThreadResponse: type: object required: + - id + - deleted - object - - created - - choices - - usage properties: + id: + type: string + deleted: + type: boolean object: type: string enum: - - edit - description: The object type, which is always `edit`. - created: - type: integer - format: unixtime - description: The Unix timestamp (in seconds) of when the edit was created. - choices: - type: array - items: - type: object - required: - - text - - index - - finish_reason - properties: - text: - type: string - description: The edited result. - index: - type: integer - format: int64 - description: The index of the choice in the list of choices. - finish_reason: - type: string - enum: - - stop - - length - description: |- - The reason the model stopped generating tokens. This will be `stop` if the model hit a - natural stop point or a provided stop sequence, or `length` if the maximum number of tokens - specified in the request was reached. - description: 'description: A list of edit choices. Can be more than one if `n` is greater than 1.' - usage: - $ref: '#/components/schemas/CompletionUsage' - CreateEmbeddingRequest: + - thread.deleted + Embedding: type: object required: - - model - - input + - index + - embedding + - object properties: - model: - anyOf: - - type: string - - type: string - enum: - - text-embedding-ada-002 - description: ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. - x-oaiTypeLabel: string - input: + index: + type: integer + format: int64 + description: The index of the embedding in the list of embeddings. + embedding: anyOf: - - type: string - type: array items: - type: string - - $ref: '#/components/schemas/TokenArray' - - $ref: '#/components/schemas/TokenArrayArray' + type: number + format: double + - type: string description: |- - Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a - single request, pass an array of strings or array of token arrays. Each input must not exceed - the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an empty string. - [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) - for counting tokens. - user: - $ref: '#/components/schemas/User' - CreateEmbeddingResponse: + The embedding vector, which is a list of floats. The length of vector depends on the model as + listed in the [embedding guide](/docs/guides/embeddings). + object: + type: string + enum: + - embedding + description: The object type, which is always "embedding". + description: Represents an embedding vector returned by embedding endpoint. + Error: + type: object + required: + - type + - message + - param + - code + properties: + type: + type: string + message: + type: string + param: + type: string + nullable: true + code: + type: string + nullable: true + ErrorResponse: + type: object + required: + - error + properties: + error: + $ref: '#/components/schemas/Error' + FineTune: type: object required: + - id - object + - created_at + - updated_at - model - - data - - usage + - fine_tuned_model + - organization_id + - status + - hyperparams + - training_files + - validation_files + - result_files properties: + id: + type: string + description: The object identifier, which can be referenced in the API endpoints. object: type: string enum: - - embedding - description: The object type, which is always "embedding". - model: - type: string - description: The name of the model used to generate the embedding. - data: - type: array - items: - $ref: '#/components/schemas/Embedding' - description: The list of embeddings generated by the model. - usage: + - fine-tune + - fine-tune-results + - assistants + - assistants_output + description: The object type, which is always "fine-tune". + created_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) for when the fine-tuning job was created. + updated_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) for when the fine-tuning job was last updated. + model: + type: string + description: The base model that is being fine-tuned. + fine_tuned_model: + type: string + nullable: true + description: The name of the fine-tuned model that is being created. + organization_id: + type: string + description: The organization that owns the fine-tuning job. + status: + type: string + enum: + - created + - pending + - running + - succeeded + - failed + - cancelled + - running + - succeeded + - failed + - cancelled + description: |- + The current status of the fine-tuning job, which can be either `created`, `running`, + `succeeded`, `failed`, or `cancelled`. + hyperparams: type: object - description: The usage information for the request. - required: - - prompt_tokens - - total_tokens properties: - prompt_tokens: + n_epochs: type: integer format: int64 - description: The number of tokens used by the prompt. - total_tokens: + description: |- + The number of epochs to train the model for. An epoch refers to one full cycle through the + training dataset. + batch_size: type: integer format: int64 - description: The total number of tokens used by the request. - CreateFileRequest: + description: |- + The batch size to use for training. The batch size is the number of training examples used to + train a single forward and backward pass. + prompt_loss_weight: + type: number + format: double + description: The weight to use for loss on the prompt tokens. + learning_rate_multiplier: + type: number + format: double + description: The learning rate multiplier to use for training. + compute_classification_metrics: + type: boolean + description: The classification metrics to compute using the validation dataset at the end of every epoch. + classification_positive_class: + type: string + description: The positive class to use for computing classification metrics. + classification_n_classes: + type: integer + format: int64 + description: The number of classes to use for computing classification metrics. + required: + - n_epochs + - batch_size + - prompt_loss_weight + - learning_rate_multiplier + description: |- + The hyperparameters used for the fine-tuning job. See the + [fine-tuning guide](/docs/guides/legacy-fine-tuning/hyperparameters) for more details. + training_files: + type: array + items: + $ref: '#/components/schemas/OpenAIFile' + description: The list of files used for training. + validation_files: + type: array + items: + $ref: '#/components/schemas/OpenAIFile' + description: The list of files used for validation. + result_files: + type: array + items: + $ref: '#/components/schemas/OpenAIFile' + description: The compiled results files for the fine-tuning job. + events: + type: array + items: + $ref: '#/components/schemas/FineTuneEvent' + description: The list of events that have been observed in the lifecycle of the FineTune job. + description: The `FineTune` object represents a legacy fine-tune job that has been created through the API. + deprecated: true + FineTuneEvent: type: object required: - - file - - purpose + - object + - created_at + - level + - message properties: - file: + object: type: string - format: binary - description: |- - Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. - - If the `purpose` is set to "fine-tune", the file will be used for fine-tuning. - purpose: + created_at: + type: integer + format: unixtime + level: type: string - description: |- - The intended purpose of the uploaded documents. Use "fine-tune" for - [fine-tuning](/docs/api-reference/fine-tuning). This allows us to validate the format of the - uploaded file. - CreateFineTuneRequest: + message: + type: string + FineTuningEvent: type: object required: - - training_file + - object + - created_at + - level + - message properties: - training_file: - type: string - description: |- - The ID of an uploaded file that contains training data. - - See [upload file](/docs/api-reference/files/upload) for how to upload a file. - - Your dataset must be formatted as a JSONL file, where each training example is a JSON object - with the keys "prompt" and "completion". Additionally, you must upload your file with the - purpose `fine-tune`. - - See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more - details. - validation_file: + object: type: string - nullable: true - description: |- - The ID of an uploaded file that contains validation data. - - If you provide this file, the data is used to generate validation metrics periodically during - fine-tuning. These metrics can be viewed in the - [fine-tuning results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). - Your train and validation data should be mutually exclusive. - - Your dataset must be formatted as a JSONL file, where each validation example is a JSON object - with the keys "prompt" and "completion". Additionally, you must upload your file with the - purpose `fine-tune`. - - See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more - details. - model: - anyOf: - - type: string - - type: string - enum: - - ada - - babbage - - curie - - davinci - nullable: true - description: |- - The name of the base model to fine-tune. You can select one of "ada", "babbage", "curie", - "davinci", or a fine-tuned model created after 2022-04-21 and before 2023-08-22. To learn more - about these models, see the [Models](/docs/models) documentation. - x-oaiTypeLabel: string - n_epochs: - type: integer - format: int64 - nullable: true - description: |- - The number of epochs to train the model for. An epoch refers to one full cycle through the - training dataset. - default: 4 - batch_size: - type: integer - format: int64 - nullable: true - description: |- - The batch size to use for training. The batch size is the number of training examples used to - train a single forward and backward pass. - - By default, the batch size will be dynamically configured to be ~0.2% of the number of examples - in the training set, capped at 256 - in general, we've found that larger batch sizes tend to - work better for larger datasets. - default: null - learning_rate_multiplier: - type: number - format: double - nullable: true - description: |- - The learning rate multiplier to use for training. The fine-tuning learning rate is the original - learning rate used for pretraining multiplied by this value. - - By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 depending on final - `batch_size` (larger learning rates tend to perform better with larger batch sizes). We - recommend experimenting with values in the range 0.02 to 0.2 to see what produces the best - results. - default: null - prompt_loss_rate: - type: number - format: double - nullable: true - description: |- - The weight to use for loss on the prompt tokens. This controls how much the model tries to - learn to generate the prompt (as compared to the completion which always has a weight of 1.0), - and can add a stabilizing effect to training when completions are short. - - If prompts are extremely long (relative to completions), it may make sense to reduce this - weight so as to avoid over-prioritizing learning the prompt. - default: 0.01 - compute_classification_metrics: - type: boolean - nullable: true - description: |- - If set, we calculate classification-specific metrics such as accuracy and F-1 score using the - validation set at the end of every epoch. These metrics can be viewed in the - [results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). - - In order to compute classification metrics, you must provide a `validation_file`. Additionally, - you must specify `classification_n_classes` for multiclass classification or - `classification_positive_class` for binary classification. - default: false - classification_n_classes: + created_at: type: integer - format: int64 - nullable: true - description: |- - The number of classes in a classification task. - - This parameter is required for multiclass classification. - default: null - classification_positive_class: + format: unixtime + level: type: string + message: + type: string + data: + type: object + additionalProperties: {} nullable: true - description: |- - The positive class in binary classification. - - This parameter is needed to generate precision, recall, and F1 metrics when doing binary - classification. - default: null - classification_betas: - type: array - items: - type: number - format: double - nullable: true - description: |- - If this is provided, we calculate F-beta scores at the specified beta values. The F-beta score - is a generalization of F-1 score. This is only used for binary classification. - - With a beta of 1 (i.e. the F-1 score), precision and recall are given the same weight. A larger - beta score puts more weight on recall and less on precision. A smaller beta score puts more - weight on precision and less on recall. - default: null - suffix: - oneOf: - - $ref: '#/components/schemas/SuffixString' - nullable: true - description: |- - A string of up to 18 characters that will be added to your fine-tuned model name. - - For example, a `suffix` of "custom-model-name" would produce a model name like - `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`. - default: null - CreateFineTuningJobRequest: + type: + type: string + enum: + - message + - metrics + FineTuningJob: type: object required: - - training_file + - id + - object + - created_at + - finished_at - model + - fine_tuned_model + - organization_id + - status + - hyperparameters + - training_file + - validation_file + - result_files + - trained_tokens + - error properties: - training_file: + id: type: string - description: |- - The ID of an uploaded file that contains training data. - - See [upload file](/docs/api-reference/files/upload) for how to upload a file. - - Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with - the purpose `fine-tune`. - - See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. - validation_file: + description: The object identifier, which can be referenced in the API endpoints. + object: + type: string + enum: + - fine_tuning.job + description: The object type, which is always "fine_tuning.job". + created_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) for when the fine-tuning job was created. + finished_at: type: string + format: date-time nullable: true description: |- - The ID of an uploaded file that contains validation data. - - If you provide this file, the data is used to generate validation metrics periodically during - fine-tuning. These metrics can be viewed in the fine-tuning results file. The same data should - not be present in both train and validation files. - - Your dataset must be formatted as a JSONL file. You must upload your file with the purpose - `fine-tune`. - - See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be + null if the fine-tuning job is still running. model: - anyOf: - - type: string - - type: string - enum: - - babbage-002 - - davinci-002 - - gpt-3.5-turbo + type: string + description: The base model that is being fine-tuned. + fine_tuned_model: + type: string + nullable: true description: |- - The name of the model to fine-tune. You can select one of the - [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). - x-oaiTypeLabel: string + The name of the fine-tuned model that is being created. The value will be null if the + fine-tuning job is still running. + organization_id: + type: string + description: The organization that owns the fine-tuning job. + status: + type: string + enum: + - created + - pending + - running + - succeeded + - failed + - cancelled + - running + - succeeded + - failed + - cancelled + description: |- + The current status of the fine-tuning job, which can be either `created`, `pending`, `running`, + `succeeded`, `failed`, or `cancelled`. hyperparameters: type: object - description: The hyperparameters used for the fine-tuning job. properties: n_epochs: anyOf: - type: string enum: - auto + - low + - high - $ref: '#/components/schemas/NEpochs' description: |- The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. + + "Auto" decides the optimal number of epochs based on the size of the dataset. If setting the + number manually, we support any number between 1 and 50 epochs. default: auto - suffix: - oneOf: - - $ref: '#/components/schemas/SuffixString' + description: |- + The hyperparameters used for the fine-tuning job. See the + [fine-tuning guide](/docs/guides/fine-tuning) for more details. + training_file: + type: string + description: |- + The file ID used for training. You can retrieve the training data with the + [Files API](/docs/api-reference/files/retrieve-contents). + validation_file: + type: string nullable: true description: |- - A string of up to 18 characters that will be added to your fine-tuned model name. - - For example, a `suffix` of "custom-model-name" would produce a model name like - `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. - default: null - CreateImageEditRequest: + The file ID used for validation. You can retrieve the validation results with the + [Files API](/docs/api-reference/files/retrieve-contents). + result_files: + type: array + items: + type: string + description: |- + The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the + [Files API](/docs/api-reference/files/retrieve-contents). + trained_tokens: + type: integer + format: int64 + nullable: true + description: |- + The total number of billable tokens processed by this fine tuning job. The value will be null + if the fine-tuning job is still running. + error: + type: object + properties: + message: + type: string + description: A human-readable error message. + code: + type: string + description: A machine-readable error code. + param: + type: string + nullable: true + description: |- + The parameter that was invalid, usually `training_file` or `validation_file`. This field + will be null if the failure was not parameter-specific. + nullable: true + description: |- + For fine-tuning jobs that have `failed`, this will contain more information on the cause of the + failure. + FineTuningJobEvent: type: object required: - - prompt - - image + - id + - object + - created_at + - level + - message properties: - prompt: + id: type: string - description: A text description of the desired image(s). The maximum length is 1000 characters. - image: + object: + type: string + created_at: + type: integer + format: unixtime + level: + type: string + enum: + - info + - warn + - error + message: + type: string + FunctionObject: + type: object + required: + - name + properties: + description: type: string - format: binary description: |- - The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not - provided, image must have transparency, which will be used as the mask. - mask: + A description of what the function does, used by the model to choose when and how to call the + function. + name: type: string - format: binary description: |- - An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where - `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions - as `image`. - n: - oneOf: - - $ref: '#/components/schemas/ImagesN' - nullable: true - description: The number of images to generate. Must be between 1 and 10. - default: 1 - size: + The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and + dashes, with a maximum length of 64. + parameters: + $ref: '#/components/schemas/FunctionParameters' + FunctionParameters: + type: object + additionalProperties: {} + description: |- + The parameters the functions accepts, described as a JSON Schema object. See the + [guide](/docs/guides/gpt/function-calling) for examples, and the + [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation + about the format.\n\nTo describe a function that accepts no parameters, provide the value + `{\"type\": \"object\", \"properties\": {}}`. + Image: + type: object + properties: + b64_json: type: string - enum: - - 256x256 - - 512x512 - - 1024x1024 - nullable: true - description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. - default: 1024x1024 - response_format: + format: base64 + description: The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. + url: type: string - enum: - - url - - b64_json - nullable: true - description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. - default: url - user: - $ref: '#/components/schemas/User' - CreateImageRequest: + format: uri + description: The URL of the generated image, if `response_format` is `url` (default). + revised_prompt: + type: string + description: The prompt that was used to generate the image, if there was any revision to the prompt. + description: Represents the url or the content of an image generated by the OpenAI API. + ImagesN: + type: integer + format: int64 + minimum: 1 + maximum: 10 + ImagesResponse: + type: object + required: + - created + - data + properties: + created: + type: integer + format: unixtime + data: + type: array + items: + $ref: '#/components/schemas/Image' + ListAssistantFilesResponse: type: object required: - - prompt + - object + - data + - first_id + - last_id + - has_more properties: - prompt: - type: string - description: A text description of the desired image(s). The maximum length is 1000 characters. - n: - oneOf: - - $ref: '#/components/schemas/ImagesN' - nullable: true - description: The number of images to generate. Must be between 1 and 10. - default: 1 - size: + object: type: string enum: - - 256x256 - - 512x512 - - 1024x1024 - nullable: true - description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. - default: 1024x1024 - response_format: + - list + data: + type: array + items: + $ref: '#/components/schemas/AssistantFileObject' + first_id: type: string - enum: - - url - - b64_json - nullable: true - description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. - default: url - user: - $ref: '#/components/schemas/User' - CreateImageVariationRequest: + last_id: + type: string + has_more: + type: boolean + ListAssistantsResponse: type: object required: - - image + - object + - data + - first_id + - last_id + - has_more properties: - image: - type: string - format: binary - description: |- - The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, - and square. - n: - oneOf: - - $ref: '#/components/schemas/ImagesN' - nullable: true - description: The number of images to generate. Must be between 1 and 10. - default: 1 - size: + object: type: string enum: - - 256x256 - - 512x512 - - 1024x1024 - nullable: true - description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. - default: 1024x1024 - response_format: + - list + data: + type: array + items: + $ref: '#/components/schemas/AssistantObject' + first_id: type: string - enum: - - url - - b64_json - nullable: true - description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. - default: url - user: - $ref: '#/components/schemas/User' - CreateModerationRequest: + last_id: + type: string + has_more: + type: boolean + ListFilesResponse: type: object required: - - input + - data + - object properties: - input: - anyOf: - - type: string - - type: array - items: - type: string - description: The input text to classify - model: - anyOf: - - type: string - - type: string - enum: - - text-moderation-latest - - text-moderation-stable - description: |- - Two content moderations models are available: `text-moderation-stable` and - `text-moderation-latest`. The default is `text-moderation-latest` which will be automatically - upgraded over time. This ensures you are always using our most accurate model. If you use - `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy - of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. - x-oaiTypeLabel: string - default: text-moderation-latest - CreateModerationResponse: + data: + type: array + items: + $ref: '#/components/schemas/OpenAIFile' + object: + type: string + enum: + - list + ListFineTuneEventsResponse: type: object required: - - id - - model - - results + - object + - data properties: - id: - type: string - description: The unique identifier for the moderation request. - model: + object: type: string - description: The model used to generate the moderation results. - results: + data: type: array items: - type: object - required: - - flagged - - categories - - category_scores - properties: - flagged: - type: boolean - description: Whether the content violates [OpenAI's usage policies](/policies/usage-policies). - categories: - type: object - description: A list of the categories, and whether they are flagged or not. - required: - - hate - - hate/threatening - - harassment - - harassment/threatening - - self-harm - - self-harm/intent - - self-harm/instructive - - sexual - - sexual/minors - - violence - - violence/graphic - properties: - hate: - type: boolean - description: |- - Content that expresses, incites, or promotes hate based on race, gender, ethnicity, - religion, nationality, sexual orientation, disability status, or caste. Hateful content - aimed at non-protected groups (e.g., chess players) is harrassment. - hate/threatening: - type: boolean - description: |- - Hateful content that also includes violence or serious harm towards the targeted group - based on race, gender, ethnicity, religion, nationality, sexual orientation, disability - status, or caste. - harassment: - type: boolean - description: Content that expresses, incites, or promotes harassing language towards any target. - harassment/threatening: - type: boolean - description: Harassment content that also includes violence or serious harm towards any target. - self-harm: - type: boolean - description: |- - Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, - and eating disorders. - self-harm/intent: - type: boolean - description: |- - Content where the speaker expresses that they are engaging or intend to engage in acts of - self-harm, such as suicide, cutting, and eating disorders. - self-harm/instructive: - type: boolean - description: |- - Content that encourages performing acts of self-harm, such as suicide, cutting, and eating - disorders, or that gives instructions or advice on how to commit such acts. - sexual: - type: boolean - description: |- - Content meant to arouse sexual excitement, such as the description of sexual activity, or - that promotes sexual services (excluding sex education and wellness). - sexual/minors: - type: boolean - description: Sexual content that includes an individual who is under 18 years old. - violence: - type: boolean - description: Content that depicts death, violence, or physical injury. - violence/graphic: - type: boolean - description: Content that depicts death, violence, or physical injury in graphic detail. - category_scores: - type: object - description: A list of the categories along with their scores as predicted by model. - required: - - hate - - hate/threatening - - harassment - - harassment/threatening - - self-harm - - self-harm/intent - - self-harm/instructive - - sexual - - sexual/minors - - violence - - violence/graphic - properties: - hate: - type: number - format: double - description: The score for the category 'hate'. - hate/threatening: - type: number - format: double - description: The score for the category 'hate/threatening'. - harassment: - type: number - format: double - description: The score for the category 'harassment'. - harassment/threatening: - type: number - format: double - description: The score for the category 'harassment/threatening'. - self-harm: - type: number - format: double - description: The score for the category 'self-harm'. - self-harm/intent: - type: number - format: double - description: The score for the category 'self-harm/intent'. - self-harm/instructive: - type: number - format: double - description: The score for the category 'self-harm/instructive'. - sexual: - type: number - format: double - description: The score for the category 'sexual'. - sexual/minors: - type: number - format: double - description: The score for the category 'sexual/minors'. - violence: - type: number - format: double - description: The score for the category 'violence'. - violence/graphic: - type: number - format: double - description: The score for the category 'violence/graphic'. - description: A list of moderation objects. - CreateTranscriptionRequest: + $ref: '#/components/schemas/FineTuneEvent' + ListFineTunesResponse: type: object required: - - file - - model + - object + - data properties: - file: + object: type: string - format: binary - description: |- - The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, - mpeg, mpga, m4a, ogg, wav, or webm. - x-oaiTypeLabel: file - model: - anyOf: - - type: string - - type: string - enum: - - whisper-1 - description: ID of the model to use. Only `whisper-1` is currently available. - x-oaiTypeLabel: string - prompt: + data: + type: array + items: + $ref: '#/components/schemas/FineTune' + ListFineTuningJobEventsResponse: + type: object + required: + - object + - data + properties: + object: type: string - description: |- - An optional text to guide the model's style or continue a previous audio segment. The - [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. - response_format: + data: + type: array + items: + $ref: '#/components/schemas/FineTuningJobEvent' + ListMessageFilesResponse: + type: object + required: + - object + - data + - first_id + - last_id + - has_more + properties: + object: type: string enum: - - json - - text - - srt - - verbose_json - - vtt - description: |- - The format of the transcript output, in one of these options: json, text, srt, verbose_json, or - vtt. - default: json - temperature: - type: number - format: double - description: |- - The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more - random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, - the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to - automatically increase the temperature until certain thresholds are hit. - minimum: 0 - maximum: 1 - default: 0 - language: + - list + data: + type: array + items: + $ref: '#/components/schemas/MessageFileObject' + first_id: type: string - description: |- - The language of the input audio. Supplying the input language in - [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy - and latency. - CreateTranscriptionResponse: + last_id: + type: string + has_more: + type: boolean + ListMessagesResponse: type: object required: - - text + - object + - data + - first_id + - last_id + - has_more properties: - text: + object: + type: string + enum: + - list + data: + type: array + items: + $ref: '#/components/schemas/MessageObject' + first_id: type: string - CreateTranslationRequest: + last_id: + type: string + has_more: + type: boolean + ListModelsResponse: type: object required: - - file - - model + - object + - data properties: - file: + object: type: string - format: binary - description: |- - The audio file object (not file name) to translate, in one of these formats: flac, mp3, mp4, - mpeg, mpga, m4a, ogg, wav, or webm. - x-oaiTypeLabel: file - model: - anyOf: - - type: string - - type: string - enum: - - whisper-1 - description: ID of the model to use. Only `whisper-1` is currently available. - x-oaiTypeLabel: string - prompt: + enum: + - list + data: + type: array + items: + $ref: '#/components/schemas/Model' + ListPaginatedFineTuningJobsResponse: + type: object + required: + - object + - data + - has_more + properties: + object: type: string - description: |- - An optional text to guide the model's style or continue a previous audio segment. The - [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. - response_format: + data: + type: array + items: + $ref: '#/components/schemas/FineTuningJob' + has_more: + type: boolean + ListRunStepsResponse: + type: object + required: + - object + - data + - first_id + - last_id + - has_more + properties: + object: type: string enum: - - json - - text - - srt - - verbose_json - - vtt - description: |- - The format of the transcript output, in one of these options: json, text, srt, verbose_json, or - vtt. - default: json - temperature: - type: number - format: double - description: |- - The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more - random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, - the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to - automatically increase the temperature until certain thresholds are hit. - minimum: 0 - maximum: 1 - default: 0 - CreateTranslationResponse: + - list + data: + type: array + items: + $ref: '#/components/schemas/RunStepObject' + first_id: + type: string + last_id: + type: string + has_more: + type: boolean + ListRunsResponse: + type: object + required: + - object + - data + - first_id + - last_id + - has_more + properties: + object: + type: string + enum: + - list + data: + type: array + items: + $ref: '#/components/schemas/RunObject' + first_id: + type: string + last_id: + type: string + has_more: + type: boolean + MessageContentImageFileObject: + type: object + required: + - type + - image_file + properties: + type: + type: string + enum: + - image_file + description: Always `image_file`. + image_file: + type: object + properties: + file_id: + type: string + description: The [File](/docs/api-reference/files) ID of the image in the message content. + required: + - file_id + description: References an image [File](/docs/api-reference/files) in the content of a message. + MessageContentTextAnnotationsFileCitationObject: type: object required: + - type - text + - file_citation + - start_index + - end_index properties: + type: + type: string + enum: + - file_citation + description: Always `file_citation`. text: type: string - DeleteFileResponse: + description: The text in the message content that needs to be replaced. + file_citation: + type: object + properties: + file_id: + type: string + description: The ID of the specific File the citation is from. + quote: + type: string + description: The specific quote in the file. + required: + - file_id + - quote + start_index: + type: integer + format: int64 + minimum: 0 + end_index: + type: integer + format: int64 + minimum: 0 + description: |- + A citation within the message that points to a specific quote from a specific File associated + with the assistant or the message. Generated when the assistant uses the "retrieval" tool to + search files. + MessageContentTextAnnotationsFilePathObject: type: object required: - - id - - object - - deleted + - type + - text + - file_path + - start_index + - end_index properties: - id: + type: type: string - object: + enum: + - file_path + description: Always `file_path`. + text: type: string - deleted: - type: boolean - DeleteModelResponse: + description: The text in the message content that needs to be replaced. + file_path: + type: object + properties: + file_id: + type: string + description: The ID of the file that was generated. + required: + - file_id + start_index: + type: integer + format: int64 + minimum: 0 + end_index: + type: integer + format: int64 + minimum: 0 + description: |- + A URL for the file that's generated when the assistant used the `code_interpreter` tool to + generate a file. + MessageContentTextObject: + type: object + required: + - type + - text + properties: + type: + type: string + enum: + - text + - json_object + description: Always `text`. + text: + type: object + properties: + value: + type: string + description: The data that makes up the text. + annotations: + type: array + items: + $ref: '#/components/schemas/MessageContentTextObjectAnnotations' + required: + - value + - annotations + description: The text content that is part of a message. + MessageContentTextObjectAnnotations: + oneOf: + - $ref: '#/components/schemas/MessageContentTextAnnotationsFileCitationObject' + - $ref: '#/components/schemas/MessageContentTextAnnotationsFilePathObject' + x-oaiExpandable: true + MessageFileObject: type: object required: - id - object - - deleted + - created_at + - message_id properties: id: type: string + description: TThe identifier, which can be referenced in API endpoints. object: type: string - deleted: - type: boolean - EditN: - type: integer - format: int64 - minimum: 0 - maximum: 20 - Embedding: + enum: + - thread.message.file + description: The object type, which is always `thread.message.file`. + created_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) for when the message file was created. + message_id: + type: string + description: The ID of the [message](/docs/api-reference/messages) that the [File](/docs/api-reference/files) is attached to. + description: A list of files attached to a `message`. + MessageObject: type: object - description: Represents an embedding vector returned by embedding endpoint. required: - - index + - id - object - - embedding + - created_at + - thread_id + - role + - content + - assistant_id + - run_id + - file_ids + - metadata properties: - index: - type: integer - format: int64 - description: The index of the embedding in the list of embeddings. + id: + type: string + description: The identifier, which can be referenced in API endpoints. object: type: string enum: - - embedding - description: The object type, which is always "embedding". - embedding: - type: array - items: - type: number - format: double - description: |- - The embedding vector, which is a list of floats. The length of vector depends on the model as\ - listed in the [embedding guide](/docs/guides/embeddings). - Error: - type: object - required: - - type - - message - - param - - code - properties: - type: + - thread.message + description: The object type, which is always `thread.message`. + created_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) for when the message was created. + thread_id: type: string - message: + description: The [thread](/docs/api-reference/threads) ID that this message belongs to. + role: type: string - param: + enum: + - user + - assistant + description: The entity that produced the message. One of `user` or `assistant`. + content: + type: array + items: + $ref: '#/components/schemas/MessageObjectContent' + description: The content of the message in array of text and/or images. + assistant_id: type: string nullable: true - code: + description: |- + If applicable, the ID of the [assistant](/docs/api-reference/assistants) that authored this + message. + run_id: type: string nullable: true - ErrorResponse: - type: object - required: - - error - properties: - error: - $ref: '#/components/schemas/Error' - FineTune: + description: |- + If applicable, the ID of the [run](/docs/api-reference/runs) associated with the authoring of + this message. + file_ids: + type: array + items: + type: string + maxItems: 10 + description: |- + A list of [file](/docs/api-reference/files) IDs that the assistant should use. Useful for + tools like retrieval and code_interpreter that can access files. A maximum of 10 files can be + attached to a message. + default: [] + metadata: + type: object + additionalProperties: + type: string + nullable: true + description: |- + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + MessageObjectContent: + oneOf: + - $ref: '#/components/schemas/MessageContentImageFileObject' + - $ref: '#/components/schemas/MessageContentTextObject' + x-oaiExpandable: true + Model: type: object - description: The `FineTune` object represents a legacy fine-tune job that has been created through the API. required: - id + - created - object - - created_at - - updated_at - - model - - fine_tuned_model - - organization_id - - status - - hyperparams - - training_files - - validation_files - - result_files + - owned_by properties: id: type: string - description: The object identifier, which can be referenced in the API endpoints. + description: The model identifier, which can be referenced in the API endpoints. + created: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) when the model was created. object: type: string enum: - - fine-tune - description: The object type, which is always "fine-tune". - created_at: - type: integer - format: unixtime - description: The Unix timestamp (in seconds) for when the fine-tuning job was created. - updated_at: - type: integer - format: unixtime - description: The Unix timestamp (in seconds) for when the fine-tuning job was last updated. + - model + description: The object type, which is always "model". + owned_by: + type: string + description: The organization that owns the model. + description: Describes an OpenAI model offering that can be used with the API. + ModifyAssistantRequest: + type: object + properties: model: type: string - description: The base model that is being fine-tuned. - fine_tuned_model: + description: |- + ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + see all of your available models, or see our [Model overview](/docs/models/overview) for + descriptions of them. + name: type: string nullable: true - description: The name of the fine-tuned model that is being created. - organization_id: + maxLength: 256 + description: The name of the assistant. The maximum length is 256 characters. + description: type: string - description: The organization that owns the fine-tuning job. - status: + nullable: true + maxLength: 512 + description: The description of the assistant. The maximum length is 512 characters. + instructions: type: string - enum: - - created - - running - - succeeded - - failed - - cancelled - description: |- - The current status of the fine-tuning job, which can be either `created`, `running`, - `succeeded`, `failed`, or `cancelled`. - hyperparams: - type: object + nullable: true + maxLength: 32768 + description: The system instructions that the assistant uses. The maximum length is 32768 characters. + tools: + allOf: + - $ref: '#/components/schemas/CreateAssistantRequestToolsItem' description: |- - The hyperparameters used for the fine-tuning job. See the - [fine-tuning guide](/docs/guides/legacy-fine-tuning/hyperparameters) for more details. - required: - - n_epochs - - batch_size - - prompt_loss_weight - - learning_rate_multiplier - properties: - n_epochs: - type: integer - format: int64 - description: |- - The number of epochs to train the model for. An epoch refers to one full cycle through the - training dataset. - batch_size: - type: integer - format: int64 - description: |- - The batch size to use for training. The batch size is the number of training examples used to - train a single forward and backward pass. - prompt_loss_weight: - type: number - format: double - description: The weight to use for loss on the prompt tokens. - learning_rate_multiplier: - type: number - format: double - description: The learning rate multiplier to use for training. - compute_classification_metrics: - type: boolean - description: The classification metrics to compute using the validation dataset at the end of every epoch. - classification_positive_class: - type: string - description: The positive class to use for computing classification metrics. - classification_n_classes: - type: integer - format: int64 - description: The number of classes to use for computing classification metrics. - training_files: - type: array - items: - $ref: '#/components/schemas/OpenAIFile' - description: The list of files used for training. - validation_files: - type: array - items: - $ref: '#/components/schemas/OpenAIFile' - description: The list of files used for validation. - result_files: - type: array - items: - $ref: '#/components/schemas/OpenAIFile' - description: The compiled results files for the fine-tuning job. - events: + A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. + Tools can be of types `code_interpreter`, `retrieval`, or `function`. + default: [] + file_ids: type: array items: - $ref: '#/components/schemas/FineTuneEvent' - description: The list of events that have been observed in the lifecycle of the FineTune job. - FineTuneEvent: + type: string + maxItems: 20 + description: |- + A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a + maximum of 20 files attached to the assistant. Files are ordered by their creation date in + ascending order. + default: [] + metadata: + type: object + additionalProperties: + type: string + nullable: true + description: |- + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + ModifyMessageRequest: type: object - required: - - object - - created_at - - level - - message properties: - object: - type: string - created_at: - type: integer - format: unixtime - level: - type: string - message: - type: string - FineTuningEvent: + metadata: + type: object + additionalProperties: + type: string + nullable: true + description: |- + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + ModifyRunRequest: + type: object + properties: + metadata: + type: object + additionalProperties: + type: string + nullable: true + description: |- + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + ModifyThreadRequest: + type: object + properties: + metadata: + type: object + additionalProperties: + type: string + nullable: true + description: |- + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + NEpochs: + type: integer + format: int64 + minimum: 1 + maximum: 50 + OpenAIFile: type: object required: - - object + - id + - bytes - created_at - - level - - message + - filename + - object + - purpose + - status properties: - object: + id: type: string + description: The file identifier, which can be referenced in the API endpoints. + bytes: + type: integer + format: int64 + description: The size of the file, in bytes. created_at: type: integer format: unixtime - level: + description: The Unix timestamp (in seconds) for when the file was created. + filename: type: string - message: + description: The name of the file. + object: type: string - data: - type: object - additionalProperties: {} - nullable: true - type: + enum: + - file + description: The object type, which is always "file". + purpose: type: string enum: - - message - - metrics - FineTuningJob: + - fine-tune + - fine-tune-results + - assistants + - assistants_output + description: |- + The intended purpose of the file. Supported values are `fine-tune`, `fine-tune-results`, + `assistants`, and `assistants_output`. + status: + type: string + enum: + - uploaded + - processed + - error + description: |- + Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or + `error`. + deprecated: true + status_details: + type: string + description: |- + Deprecated. For details on why a fine-tuning training file failed validation, see the `error` + field on `fine_tuning.job`. + deprecated: true + description: The `File` object represents a document that has been uploaded to OpenAI. + Prompt: + oneOf: + - type: string + - type: array + items: + type: string + - $ref: '#/components/schemas/TokenArrayItem' + - $ref: '#/components/schemas/TokenArrayArray' + RunCompletionUsage: + type: object + required: + - completion_tokens + - prompt_tokens + - total_tokens + properties: + completion_tokens: + type: integer + format: int64 + description: Number of completion tokens used over the course of the run. + prompt_tokens: + type: integer + format: int64 + description: Number of prompt tokens used over the course of the run. + total_tokens: + type: integer + format: int64 + description: Total number of tokens used (prompt + completion). + description: |- + Usage statistics related to the run. This value will be `null` if the run is not in a terminal + state (i.e. `in_progress`, `queued`, etc.). + RunObject: type: object required: - id - object - created_at - - finished_at - - model - - fine_tuned_model - - organization_id + - thread_id + - assistant_id - status - - hyperparameters - - training_file - - validation_file - - result_files - - trained_tokens - - error + - required_action + - last_error + - expires_at + - started_at + - cancelled_at + - failed_at + - completed_at + - model + - instructions + - tools + - file_ids + - metadata + - usage properties: id: type: string - description: The object identifier, which can be referenced in the API endpoints. + description: The identifier, which can be referenced in API endpoints. object: type: string enum: - - fine_tuning.job - description: The object type, which is always "fine_tuning.job". + - thread.run + description: The object type, which is always `thread.run`. created_at: type: integer format: unixtime - description: The Unix timestamp (in seconds) for when the fine-tuning job was created. - finished_at: - type: string - format: date-time - nullable: true - description: |- - The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be - null if the fine-tuning job is still running. - model: - type: string - description: The base model that is being fine-tuned. - fine_tuned_model: + description: The Unix timestamp (in seconds) for when the run was created. + thread_id: type: string - nullable: true description: |- - The name of the fine-tuned model that is being created. The value will be null if the - fine-tuning job is still running. - organization_id: + The ID of the [thread](/docs/api-reference/threads) that was executed on as a part of this + run. + assistant_id: type: string - description: The organization that owns the fine-tuning job. + description: The ID of the [assistant](/docs/api-reference/assistants) used for execution of this run. status: type: string enum: - - created - - pending - - running - - succeeded - - failed + - queued + - in_progress + - requires_action + - cancelling - cancelled + - failed + - completed + - expired description: |- - The current status of the fine-tuning job, which can be either `created`, `pending`, `running`, - `succeeded`, `failed`, or `cancelled`. - hyperparameters: + The status of the run, which can be either `queued`, `in_progress`, `requires_action`, + `cancelling`, `cancelled`, `failed`, `completed`, or `expired`. + required_action: type: object - description: |- - The hyperparameters used for the fine-tuning job. See the - [fine-tuning guide](/docs/guides/fine-tuning) for more details. properties: - n_epochs: - anyOf: - - type: string - enum: - - auto - - $ref: '#/components/schemas/NEpochs' - description: |- - The number of epochs to train the model for. An epoch refers to one full cycle through the - training dataset. - - "Auto" decides the optimal number of epochs based on the size of the dataset. If setting the - number manually, we support any number between 1 and 50 epochs. - default: auto - training_file: - type: string - description: |- - The file ID used for training. You can retrieve the training data with the - [Files API](/docs/api-reference/files/retrieve-contents). - validation_file: - type: string - nullable: true - description: |- - The file ID used for validation. You can retrieve the validation results with the - [Files API](/docs/api-reference/files/retrieve-contents). - result_files: - type: array - items: - type: string - description: |- - The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the - [Files API](/docs/api-reference/files/retrieve-contents). - trained_tokens: - type: integer - format: int64 + type: + type: string + enum: + - submit_tool_outputs + description: For now, this is always `submit_tool_outputs`. + submit_tool_outputs: + type: object + properties: + tool_calls: + type: array + items: + $ref: '#/components/schemas/RunToolCallObject' + description: A list of the relevant tool calls. + required: + - tool_calls + description: Details on the tool outputs needed for this run to continue. + required: + - type + - submit_tool_outputs nullable: true description: |- - The total number of billable tokens processed by this fine tuning job. The value will be null - if the fine-tuning job is still running. - error: + Details on the action required to continue the run. Will be `null` if no action is + required. + last_error: type: object - description: |- - For fine-tuning jobs that have `failed`, this will contain more information on the cause of the - failure. properties: - message: - type: string - description: A human-readable error message. code: type: string - description: A machine-readable error code. - param: + enum: + - server_error + - rate_limit_exceeded + description: One of `server_error` or `rate_limit_exceeded`. + message: type: string - nullable: true - description: |- - The parameter that was invalid, usually `training_file` or `validation_file`. This field - will be null if the failure was not parameter-specific. + description: A human-readable description of the error. + required: + - code + - message nullable: true - FineTuningJobEvent: - type: object - required: - - id - - object - - created_at - - level - - message - properties: - id: - type: string - object: - type: string - created_at: + description: The last error associated with this run. Will be `null` if there are no errors. + expires_at: type: integer format: unixtime - level: + description: The Unix timestamp (in seconds) for when the run will expire. + started_at: type: string - enum: - - info - - warn - - error - message: + format: date-time + nullable: true + description: The Unix timestamp (in seconds) for when the run was started. + cancelled_at: type: string - Image: - type: object - description: Represents the url or the content of an image generated by the OpenAI API. - properties: - url: + format: date-time + nullable: true + description: The Unix timestamp (in seconds) for when the run was cancelled. + failed_at: type: string - format: uri - description: The URL of the generated image, if `response_format` is `url` (default). - b64_json: + format: date-time + nullable: true + description: The Unix timestamp (in seconds) for when the run failed. + completed_at: type: string - format: base64 - description: The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. - ImagesN: - type: integer - format: int64 - minimum: 1 - maximum: 10 - ImagesResponse: + format: date-time + nullable: true + description: The Unix timestamp (in seconds) for when the run was completed. + model: + type: string + description: The model that the [assistant](/docs/api-reference/assistants) used for this run. + instructions: + type: string + description: The instructions that the [assistant](/docs/api-reference/assistants) used for this run. + tools: + allOf: + - $ref: '#/components/schemas/CreateRunRequestToolsItem' + description: The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. + file_ids: + type: array + items: + type: string + description: |- + The list of [File](/docs/api-reference/files) IDs the + [assistant](/docs/api-reference/assistants) used for this run. + default: [] + metadata: + type: object + additionalProperties: + type: string + nullable: true + description: |- + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + usage: + type: object + allOf: + - $ref: '#/components/schemas/RunCompletionUsage' + nullable: true + description: Represents an execution run on a [thread](/docs/api-reference/threads). + RunStepCompletionUsage: type: object required: - - created - - data + - completion_tokens + - prompt_tokens + - total_tokens properties: - created: + completion_tokens: type: integer - format: unixtime - data: - type: array - items: - $ref: '#/components/schemas/Image' - ListFilesResponse: + format: int64 + description: Number of completion tokens used over the course of the run step. + prompt_tokens: + type: integer + format: int64 + description: Number of prompt tokens used over the course of the run step. + total_tokens: + type: integer + format: int64 + description: Total number of tokens used (prompt + completion). + description: |- + Usage statistics related to the run step. This value will be `null` while the run step's status + is `in_progress`. + RunStepDetails: + oneOf: + - $ref: '#/components/schemas/RunStepDetailsMessageCreationObject' + - $ref: '#/components/schemas/RunStepDetailsToolCallsObject' + x-oaiExpandable: true + RunStepDetailsMessageCreationObject: type: object required: - - object - - data - properties: - object: - type: string - data: - type: array - items: - $ref: '#/components/schemas/OpenAIFile' - ListFineTuneEventsResponse: + - type + - message_creation + properties: + type: + type: string + enum: + - message_creation + description: Details of the message creation by the run step. + message_creation: + type: object + properties: + message_id: + type: string + description: The ID of the message that was created by this run step. + required: + - message_id + description: Details of the message creation by the run step. + RunStepDetailsToolCallsCodeObject: type: object required: - - object - - data + - id + - type + - code_interpreter properties: - object: + id: type: string - data: - type: array - items: - $ref: '#/components/schemas/FineTuneEvent' - ListFineTunesResponse: + description: The ID of the tool call. + type: + type: string + enum: + - code_interpreter + description: |- + The type of tool call. This is always going to be `code_interpreter` for this type of tool + call. + code_interpreter: + type: object + properties: + input: + type: string + description: The input to the Code Interpreter tool call. + outputs: + allOf: + - $ref: '#/components/schemas/RunStepDetailsToolCallsCodeOutputs' + description: |- + The outputs from the Code Interpreter tool call. Code Interpreter can output one or more + items, including text (`logs`) or images (`image`). Each of these are represented by a + different object type. + required: + - input + - outputs + description: The Code Interpreter tool call definition. + description: Details of the Code Interpreter tool call the run step was involved in. + RunStepDetailsToolCallsCodeOutput: + oneOf: + - $ref: '#/components/schemas/RunStepDetailsToolCallsCodeOutputLogsObject' + - $ref: '#/components/schemas/RunStepDetailsToolCallsCodeOutputImageObject' + x-oaiExpandable: true + RunStepDetailsToolCallsCodeOutputImageObject: type: object required: - - object - - data + - type + - image properties: - object: + type: type: string - data: - type: array - items: - $ref: '#/components/schemas/FineTune' - ListFineTuningJobEventsResponse: + enum: + - image + description: Always `image`. + image: + type: object + properties: + file_id: + type: string + description: The [file](/docs/api-reference/files) ID of the image. + required: + - file_id + RunStepDetailsToolCallsCodeOutputLogsObject: type: object required: - - object - - data + - type + - logs properties: - object: + type: type: string - data: - type: array - items: - $ref: '#/components/schemas/FineTuningJobEvent' - ListModelsResponse: + enum: + - logs + description: Always `logs`. + logs: + type: string + description: The text output from the Code Interpreter tool call. + description: Text output from the Code Interpreter tool call as part of a run step. + RunStepDetailsToolCallsCodeOutputs: + type: array + items: + $ref: '#/components/schemas/RunStepDetailsToolCallsCodeOutput' + RunStepDetailsToolCallsFunctionObject: type: object required: - - object - - data + - id + - type + - function properties: - object: + id: type: string - data: - type: array - items: - $ref: '#/components/schemas/Model' - ListPaginatedFineTuningJobsResponse: + description: The ID of the tool call object. + type: + type: string + enum: + - function + description: The type of tool call. This is always going to be `function` for this type of tool call. + function: + type: object + properties: + name: + type: string + description: The name of the function. + arguments: + type: string + description: The arguments passed to the function. + output: + type: string + nullable: true + description: |- + The output of the function. This will be `null` if the outputs have not been + [submitted](/docs/api-reference/runs/submitToolOutputs) yet. + required: + - name + - arguments + - output + description: The definition of the function that was called. + RunStepDetailsToolCallsObject: type: object required: - - object - - data - - has_more + - type + - tool_calls properties: - object: + type: type: string - data: - type: array - items: - $ref: '#/components/schemas/FineTuningJob' - has_more: - type: boolean - MaxTokens: - type: integer - format: int64 - minimum: 0 - Model: + enum: + - tool_calls + description: Always `tool_calls`. + tool_calls: + allOf: + - $ref: '#/components/schemas/RunStepDetailsToolCallsObjectToolCallsItem' + description: |- + An array of tool calls the run step was involved in. These can be associated with one of three + types of tools: `code_interpreter`, `retrieval`, or `function`. + description: Details of the tool call. + RunStepDetailsToolCallsObjectToolCall: + oneOf: + - $ref: '#/components/schemas/RunStepDetailsToolCallsCodeObject' + - $ref: '#/components/schemas/RunStepDetailsToolCallsRetrievalObject' + - $ref: '#/components/schemas/RunStepDetailsToolCallsFunctionObject' + x-oaiExpandable: true + RunStepDetailsToolCallsObjectToolCallsItem: + type: array + items: + $ref: '#/components/schemas/RunStepDetailsToolCallsObjectToolCall' + RunStepDetailsToolCallsRetrievalObject: type: object - description: Describes an OpenAI model offering that can be used with the API. required: - id - - object - - created - - owned_by + - type + - retrieval properties: id: type: string - description: The model identifier, which can be referenced in the API endpoints. - object: + description: The ID of the tool call object. + type: type: string enum: - - model - description: The object type, which is always "model". - created: - type: integer - format: unixtime - description: The Unix timestamp (in seconds) when the model was created. - owned_by: - type: string - description: The organization that owns the model. - N: - type: integer - format: int64 - minimum: 1 - maximum: 128 - NEpochs: - type: integer - format: int64 - minimum: 1 - maximum: 50 - OpenAIFile: + - retrieval + description: The type of tool call. This is always going to be `retrieval` for this type of tool call. + retrieval: + type: object + description: For now, this is always going to be an empty object. + x-oaiTypeLabel: map + RunStepObject: type: object - description: The `File` object represents a document that has been uploaded to OpenAI. required: - id - object - - bytes - - createdAt - - filename - - purpose + - created_at + - assistant_id + - thread_id + - run_id + - type - status + - step_details + - last_error + - expires_at + - cancelled_at + - failed_at + - completed_at + - metadata + - usage properties: id: type: string - description: The file identifier, which can be referenced in the API endpoints. + description: The identifier of the run step, which can be referenced in API endpoints. object: type: string enum: - - file - description: The object type, which is always "file". - bytes: - type: integer - format: int64 - description: The size of the file in bytes. - createdAt: + - thread.run.step + description: The object type, which is always `thread.run.step`. + created_at: type: integer format: unixtime - description: The Unix timestamp (in seconds) for when the file was created. - filename: + description: The Unix timestamp (in seconds) for when the run step was created. + assistant_id: type: string - description: The name of the file. - purpose: + description: The ID of the [assistant](/docs/api-reference/assistants) associated with the run step. + thread_id: + type: string + description: The ID of the [thread](/docs/api-reference/threads) that was run. + run_id: + type: string + description: The ID of the [run](/docs/api-reference/runs) that this run step is a part of. + type: type: string - description: The intended purpose of the file. Currently, only "fine-tune" is supported. + enum: + - message_creation + - tool_calls + description: The type of run step, which can be either `message_creation` or `tool_calls`. status: type: string enum: - - uploaded - - processed - - pending - - error - - deleting - - deleted + - in_progress + - cancelled + - failed + - completed + - expired description: |- - The current status of the file, which can be either `uploaded`, `processed`, `pending`, - `error`, `deleting` or `deleted`. - status_details: + The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, + `completed`, or `expired`. + step_details: + allOf: + - $ref: '#/components/schemas/RunStepDetails' + description: The details of the run step. + last_error: + type: object + properties: + code: + type: string + enum: + - server_error + - rate_limit_exceeded + description: One of `server_error` or `rate_limit_exceeded`. + message: + type: string + description: A human-readable description of the error. + required: + - code + - message + nullable: true + description: The last error associated with this run step. Will be `null` if there are no errors. + expires_at: type: string + format: date-time nullable: true description: |- - Additional details about the status of the file. If the file is in the `error` state, this will - include a message describing the error. - Penalty: - type: number - format: double - minimum: -2 - maximum: 2 - Prompt: - oneOf: - - type: string - - type: array - items: + The Unix timestamp (in seconds) for when the run step expired. A step is considered expired + if the parent run is expired. + cancelled_at: + type: string + format: date-time + nullable: true + description: The Unix timestamp (in seconds) for when the run step was cancelled. + failed_at: + type: string + format: date-time + nullable: true + description: The Unix timestamp (in seconds) for when the run step failed. + completed_at: + type: string + format: date-time + nullable: true + description: T The Unix timestamp (in seconds) for when the run step completed.. + metadata: + type: object + additionalProperties: type: string - - $ref: '#/components/schemas/TokenArray' - - $ref: '#/components/schemas/TokenArrayArray' - nullable: true + nullable: true + description: |- + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + usage: + type: object + allOf: + - $ref: '#/components/schemas/RunCompletionUsage' + nullable: true + description: Represents a step in execution of a run. + RunToolCallObject: + type: object + required: + - id + - type + - function + properties: + id: + type: string + description: |- + The ID of the tool call. This ID must be referenced when you submit the tool outputs in using + the [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs) endpoint. + type: + type: string + enum: + - function + description: The type of tool call the output is required for. For now, this is always `function`. + function: + type: object + properties: + name: + type: string + description: The name of the function. + arguments: + type: string + description: The arguments that the model expects you to pass to the function. + required: + - name + - arguments + description: The function definition. + description: Tool call objects Stop: oneOf: - type: string - $ref: '#/components/schemas/StopSequences' - nullable: true StopSequences: type: array items: type: string minItems: 1 maxItems: 4 + SubmitToolOutputsRunRequest: + type: object + required: + - tool_outputs + properties: + tool_outputs: + type: object + properties: + tool_call_id: + type: string + description: |- + The ID of the tool call in the `required_action` object within the run object the output is + being submitted for. + output: + type: string + description: The output of the tool call to be submitted to continue the run. + description: A list of tools for which the outputs are being submitted. SuffixString: type: string minLength: 1 maxLength: 40 - Temperature: - type: number - format: double - minimum: 0 - maximum: 2 - TokenArray: + ThreadObject: + type: object + required: + - id + - object + - created_at + - metadata + properties: + id: + type: string + description: The identifier, which can be referenced in API endpoints. + object: + type: string + enum: + - thread + description: The object type, which is always `thread`. + created_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) for when the thread was created. + metadata: + type: object + additionalProperties: + type: string + nullable: true + description: |- + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + description: Represents a thread that contains [messages](/docs/api-reference/messages). + TokenArrayArray: type: array items: - type: integer - format: int64 + $ref: '#/components/schemas/TokenArrayItem' minItems: 1 - TokenArrayArray: + TokenArrayItem: type: array items: - $ref: '#/components/schemas/TokenArray' + type: integer + format: int64 minItems: 1 - TopP: - type: number - format: double - minimum: 0 - maximum: 1 User: type: string securitySchemes: From 9733bb9eb3c679da241981d714700d87df4771de Mon Sep 17 00:00:00 2001 From: Jose Arriaga Maldonado Date: Wed, 7 Feb 2024 17:28:06 -0800 Subject: [PATCH 04/18] Remove duplicate readme --- README.md | 3 --- readme.md => readme2.md | 4 ++-- 2 files changed, 2 insertions(+), 5 deletions(-) delete mode 100644 README.md rename readme.md => readme2.md (90%) diff --git a/README.md b/README.md deleted file mode 100644 index 04bcd9be3..000000000 --- a/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# OpenAPI spec for the OpenAI API - -This repository contains an [OpenAPI](https://www.openapis.org/) specification for the [OpenAI API](https://platform.openai.com/docs/api-reference). diff --git a/readme.md b/readme2.md similarity index 90% rename from readme.md rename to readme2.md index b65856581..eff608de4 100644 --- a/readme.md +++ b/readme2.md @@ -1,10 +1,10 @@ -A conversion of the OpenAI OpenAPI to TypeSpec. +# A conversion of the OpenAI OpenAPI to TypeSpec Snapshot: https://raw.githubusercontent.com/openai/openai-openapi/b648b7823135e6fa5148ac9a303c16fdad050da6/openapi.yaml There are some deltas: -### Changes to API Semantics: +### Changes to API Semantics - Many things are missing defaults (mostly due to bug where we can't set null defaults) - Error responses have been added. From a952d4128be677bbf4e15e0896d2a89462d30169 Mon Sep 17 00:00:00 2001 From: Jose Arriaga Maldonado Date: Wed, 7 Feb 2024 17:28:41 -0800 Subject: [PATCH 05/18] Rename README --- readme2.md => README.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename readme2.md => README.md (100%) diff --git a/readme2.md b/README.md similarity index 100% rename from readme2.md rename to README.md From 8eb95a03932094693b8fdce855d9f18af6748e22 Mon Sep 17 00:00:00 2001 From: Jose Arriaga Maldonado Date: Thu, 8 Feb 2024 14:00:51 -0800 Subject: [PATCH 06/18] Edit a couple of comments --- chat/models.tsp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chat/models.tsp b/chat/models.tsp index d0d19bc06..160afb122 100644 --- a/chat/models.tsp +++ b/chat/models.tsp @@ -359,6 +359,7 @@ model ChatCompletionRequestMessageContentPartImage { image_url: { /** Either a URL of the image or the base64 encoded image data. */ + // TODO: The original OpenAPI spec only describes this as a URL. url: url | string; /** @@ -373,7 +374,6 @@ model ChatCompletionRequestMessageContentPartImage { model ChatCompletionMessageToolCalls is ChatCompletionMessageToolCall[]; model ChatCompletionMessageToolCall { - // TODO: index included when streaming /** The ID of the tool call. */ id: string; From b6bfedfed72b52f2343d9f29c25219d8702a2aa3 Mon Sep 17 00:00:00 2001 From: Jose Arriaga Maldonado Date: Thu, 8 Feb 2024 14:15:30 -0800 Subject: [PATCH 07/18] Fix listAssistants name --- assistants/operations.tsp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/assistants/operations.tsp b/assistants/operations.tsp index 2883342b1..350f68d7b 100644 --- a/assistants/operations.tsp +++ b/assistants/operations.tsp @@ -23,7 +23,7 @@ interface Assistants { @operationId("listAssistants") @tag("Assistants") @summary("Returns a list of assistants.") - listFiles( + listAssistants( /** * A limit on the number of objects to be returned. Limit can range between 1 and 100, and the * default is 20. From 11bb01fe71ab2b591aa86fbc851e3679b4048f22 Mon Sep 17 00:00:00 2001 From: Jose Arriaga Maldonado Date: Fri, 9 Feb 2024 10:23:06 -0800 Subject: [PATCH 08/18] Fix listRuns path --- runs/operations.tsp | 2 +- tsp-output/@typespec/openapi3/openapi.yaml | 141 ++++++++++----------- 2 files changed, 71 insertions(+), 72 deletions(-) diff --git a/runs/operations.tsp b/runs/operations.tsp index ae6f35778..8bf1f0df1 100644 --- a/runs/operations.tsp +++ b/runs/operations.tsp @@ -32,7 +32,7 @@ interface Runs { @body run: CreateRunRequest, ): RunObject | ErrorResponse; - @route("thread_id}/runs") + @route("{thread_id}/runs") @get @operationId("listRuns") @tag("Assistants") diff --git a/tsp-output/@typespec/openapi3/openapi.yaml b/tsp-output/@typespec/openapi3/openapi.yaml index f9d7b16a4..8f62d8667 100644 --- a/tsp-output/@typespec/openapi3/openapi.yaml +++ b/tsp-output/@typespec/openapi3/openapi.yaml @@ -1196,77 +1196,6 @@ paths: application/json: schema: $ref: '#/components/schemas/CreateThreadAndRunRequest' - /threads/thread_id}/runs/{thread_id}: - get: - tags: - - Assistants - operationId: listRuns - summary: Returns a list of runs belonging to a thread. - parameters: - - name: thread_id - in: path - required: true - description: The ID of the thread the run belongs to. - schema: - type: string - - name: limit - in: query - required: false - description: |- - A limit on the number of objects to be returned. Limit can range between 1 and 100, and the - default is 20. - schema: - type: integer - format: int32 - default: 20 - - name: order - in: query - required: false - description: |- - Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` - for descending order. - schema: - type: string - enum: - - asc - - desc - - desc - - desc - - desc - - desc - - desc - default: desc - - name: after - in: query - required: false - description: |- - A cursor for use in pagination. `after` is an object ID that defines your place in the list. - For instance, if you make a list request and receive 100 objects, ending with obj_foo, your - subsequent call can include after=obj_foo in order to fetch the next page of the list. - schema: - type: string - - name: before - in: query - required: false - description: |- - A cursor for use in pagination. `before` is an object ID that defines your place in the list. - For instance, if you make a list request and receive 100 objects, ending with obj_foo, your - subsequent call can include before=obj_foo in order to fetch the previous page of the list. - schema: - type: string - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/ListRunsResponse' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' /threads/{thread_id}: get: tags: @@ -1667,6 +1596,76 @@ paths: application/json: schema: $ref: '#/components/schemas/CreateRunRequest' + get: + tags: + - Assistants + operationId: listRuns + summary: Returns a list of runs belonging to a thread. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the thread the run belongs to. + schema: + type: string + - name: limit + in: query + required: false + description: |- + A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + default is 20. + schema: + type: integer + format: int32 + default: 20 + - name: order + in: query + required: false + description: |- + Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + for descending order. + schema: + type: string + enum: + - asc + - desc + - desc + - desc + - desc + - desc + - desc + default: desc + - name: after + in: query + required: false + description: |- + A cursor for use in pagination. `after` is an object ID that defines your place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + - name: before + in: query + required: false + description: |- + A cursor for use in pagination. `before` is an object ID that defines your place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the previous page of the list. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ListRunsResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' /threads/{thread_id}/runs/{run_id}: get: tags: From 4e9e7aa6963c616b48f74c51eb486d2ecbc626de Mon Sep 17 00:00:00 2001 From: Jose Arriaga Maldonado Date: Wed, 14 Feb 2024 16:56:35 -0800 Subject: [PATCH 09/18] Add initial .NET generated library --- .dotnet/OpenAI.sln | 50 + .dotnet/src/Generated/Assistants.cs | 1352 ++++ .dotnet/src/Generated/Audio.cs | 421 ++ .dotnet/src/Generated/Chat.cs | 183 + .dotnet/src/Generated/Completions.cs | 183 + .dotnet/src/Generated/Embeddings.cs | 183 + .dotnet/src/Generated/Files.cs | 683 ++ .dotnet/src/Generated/FineTunes.cs | 736 ++ .dotnet/src/Generated/FineTuning.cs | 55 + .dotnet/src/Generated/FineTuningJobs.cs | 710 ++ .dotnet/src/Generated/Images.cs | 421 ++ .dotnet/src/Generated/Messages.cs | 1037 +++ .../AssistantFileObject.Serialization.cs | 156 + .../Generated/Models/AssistantFileObject.cs | 91 + .../Models/AssistantFileObjectObject.cs | 45 + .../Models/AssistantObject.Serialization.cs | 302 + .../src/Generated/Models/AssistantObject.cs | 202 + .../Generated/Models/AssistantObjectObject.cs | 45 + .../Models/AudioSegment.Serialization.cs | 214 + .dotnet/src/Generated/Models/AudioSegment.cs | 147 + ...pletionFunctionCallOption.Serialization.cs | 132 + .../ChatCompletionFunctionCallOption.cs | 76 + .../ChatCompletionFunctions.Serialization.cs | 158 + .../Models/ChatCompletionFunctions.cs | 97 + ...CompletionMessageToolCall.Serialization.cs | 148 + .../Models/ChatCompletionMessageToolCall.cs | 85 + ...onMessageToolCallFunction.Serialization.cs | 140 + .../ChatCompletionMessageToolCallFunction.cs | 92 + .../ChatCompletionMessageToolCallType.cs | 45 + ...CompletionNamedToolChoice.Serialization.cs | 140 + .../Models/ChatCompletionNamedToolChoice.cs | 78 + ...onNamedToolChoiceFunction.Serialization.cs | 132 + .../ChatCompletionNamedToolChoiceFunction.cs | 73 + .../ChatCompletionNamedToolChoiceType.cs | 45 + ...CompletionResponseMessage.Serialization.cs | 192 + .../Models/ChatCompletionResponseMessage.cs | 84 + ...sponseMessageFunctionCall.Serialization.cs | 140 + ...atCompletionResponseMessageFunctionCall.cs | 92 + .../ChatCompletionResponseMessageRole.cs | 45 + ...hatCompletionTokenLogprob.Serialization.cs | 188 + .../Models/ChatCompletionTokenLogprob.cs | 117 + ...ionTokenLogprobTopLogprob.Serialization.cs | 170 + .../ChatCompletionTokenLogprobTopLogprob.cs | 101 + .../ChatCompletionTool.Serialization.cs | 140 + .../Generated/Models/ChatCompletionTool.cs | 78 + .../Models/ChatCompletionToolType.cs | 45 + .../Models/CompletionUsage.Serialization.cs | 148 + .../src/Generated/Models/CompletionUsage.cs | 81 + ...reateAssistantFileRequest.Serialization.cs | 132 + .../Models/CreateAssistantFileRequest.cs | 82 + .../CreateAssistantRequest.Serialization.cs | 303 + .../Models/CreateAssistantRequest.cs | 161 + ...eateChatCompletionRequest.Serialization.cs | 582 ++ .../Models/CreateChatCompletionRequest.cs | 512 ++ .../CreateChatCompletionRequestModel.cs | 90 + ...tionRequestResponseFormat.Serialization.cs | 139 + ...eateChatCompletionRequestResponseFormat.cs | 62 + ...ChatCompletionRequestResponseFormatType.cs | 48 + ...ateChatCompletionResponse.Serialization.cs | 200 + .../Models/CreateChatCompletionResponse.cs | 117 + ...tCompletionResponseChoice.Serialization.cs | 168 + .../CreateChatCompletionResponseChoice.cs | 109 + ...hatCompletionResponseChoiceFinishReason.cs | 57 + ...ionResponseChoiceLogprobs.Serialization.cs | 154 + ...ateChatCompletionResponseChoiceLogprobs.cs | 70 + .../CreateChatCompletionResponseObject.cs | 45 + .../CreateCompletionRequest.Serialization.cs | 509 ++ .../Models/CreateCompletionRequest.cs | 401 ++ .../Models/CreateCompletionRequestModel.cs | 51 + .../CreateCompletionResponse.Serialization.cs | 200 + .../Models/CreateCompletionResponse.cs | 120 + ...eCompletionResponseChoice.Serialization.cs | 168 + .../Models/CreateCompletionResponseChoice.cs | 109 + ...ateCompletionResponseChoiceFinishReason.cs | 51 + ...ionResponseChoiceLogprobs.Serialization.cs | 219 + .../CreateCompletionResponseChoiceLogprobs.cs | 95 + .../Models/CreateCompletionResponseObject.cs | 45 + .../CreateEmbeddingRequest.Serialization.cs | 188 + .../Models/CreateEmbeddingRequest.cs | 186 + .../CreateEmbeddingRequestEncodingFormat.cs | 48 + .../Models/CreateEmbeddingRequestModel.cs | 51 + .../CreateEmbeddingResponse.Serialization.cs | 166 + .../Models/CreateEmbeddingResponse.cs | 93 + .../Models/CreateEmbeddingResponseObject.cs | 45 + ...ateEmbeddingResponseUsage.Serialization.cs | 140 + .../Models/CreateEmbeddingResponseUsage.cs | 75 + .../Models/CreateFileRequest.Serialization.cs | 140 + .../src/Generated/Models/CreateFileRequest.cs | 109 + .../Models/CreateFileRequestPurpose.cs | 48 + .../CreateFineTuneRequest.Serialization.cs | 386 ++ .../Generated/Models/CreateFineTuneRequest.cs | 295 + .../Models/CreateFineTuneRequestModel.cs | 54 + ...reateFineTuningJobRequest.Serialization.cs | 201 + .../Models/CreateFineTuningJobRequest.cs | 159 + ...JobRequestHyperparameters.Serialization.cs | 146 + ...eateFineTuningJobRequestHyperparameters.cs | 106 + .../Models/CreateFineTuningJobRequestModel.cs | 51 + .../CreateImageEditRequest.Serialization.cs | 234 + .../Models/CreateImageEditRequest.cs | 153 + .../Models/CreateImageEditRequestModel.cs | 45 + .../CreateImageEditRequestResponseFormat.cs | 48 + .../Models/CreateImageEditRequestSize.cs | 51 + .../CreateImageRequest.Serialization.cs | 241 + .../Generated/Models/CreateImageRequest.cs | 142 + .../Models/CreateImageRequestModel.cs | 48 + .../Models/CreateImageRequestQuality.cs | 48 + .../CreateImageRequestResponseFormat.cs | 48 + .../Models/CreateImageRequestSize.cs | 57 + .../Models/CreateImageRequestStyle.cs | 48 + ...eateImageVariationRequest.Serialization.cs | 211 + .../Models/CreateImageVariationRequest.cs | 121 + .../CreateImageVariationRequestModel.cs | 45 + ...eateImageVariationRequestResponseFormat.cs | 48 + .../Models/CreateImageVariationRequestSize.cs | 51 + .../CreateMessageRequest.Serialization.cs | 198 + .../Generated/Models/CreateMessageRequest.cs | 104 + .../Models/CreateMessageRequestRole.cs | 45 + .../CreateModerationRequest.Serialization.cs | 154 + .../Models/CreateModerationRequest.cs | 129 + .../Models/CreateModerationRequestModel.cs | 48 + .../CreateModerationResponse.Serialization.cs | 158 + .../Models/CreateModerationResponse.cs | 88 + ...eModerationResponseResult.Serialization.cs | 148 + .../Models/CreateModerationResponseResult.cs | 86 + ...nResponseResultCategories.Serialization.cs | 212 + ...reateModerationResponseResultCategories.cs | 189 + ...ponseResultCategoryScores.Serialization.cs | 212 + ...eModerationResponseResultCategoryScores.cs | 129 + .../Models/CreateRunRequest.Serialization.cs | 285 + .../src/Generated/Models/CreateRunRequest.cs | 156 + .../CreateSpeechRequest.Serialization.cs | 178 + .../Generated/Models/CreateSpeechRequest.cs | 105 + .../Models/CreateSpeechRequestModel.cs | 48 + .../CreateSpeechRequestResponseFormat.cs | 54 + .../Models/CreateSpeechRequestVoice.cs | 60 + ...CreateThreadAndRunRequest.Serialization.cs | 277 + .../Models/CreateThreadAndRunRequest.cs | 150 + .../CreateThreadRequest.Serialization.cs | 182 + .../Generated/Models/CreateThreadRequest.cs | 77 + ...reateTranscriptionRequest.Serialization.cs | 192 + .../Models/CreateTranscriptionRequest.cs | 147 + .../Models/CreateTranscriptionRequestModel.cs | 45 + ...reateTranscriptionRequestResponseFormat.cs | 57 + ...eateTranscriptionResponse.Serialization.cs | 198 + .../Models/CreateTranscriptionResponse.cs | 96 + .../Models/CreateTranscriptionResponseTask.cs | 45 + .../CreateTranslationRequest.Serialization.cs | 181 + .../Models/CreateTranslationRequest.cs | 135 + .../Models/CreateTranslationRequestModel.cs | 45 + .../CreateTranslationRequestResponseFormat.cs | 57 + ...CreateTranslationResponse.Serialization.cs | 198 + .../Models/CreateTranslationResponse.cs | 96 + .../Models/CreateTranslationResponseTask.cs | 45 + ...leteAssistantFileResponse.Serialization.cs | 148 + .../Models/DeleteAssistantFileResponse.cs | 86 + .../DeleteAssistantFileResponseObject.cs | 45 + .../DeleteAssistantResponse.Serialization.cs | 148 + .../Models/DeleteAssistantResponse.cs | 83 + .../Models/DeleteAssistantResponseObject.cs | 45 + .../DeleteFileResponse.Serialization.cs | 148 + .../Generated/Models/DeleteFileResponse.cs | 84 + .../Models/DeleteFileResponseObject.cs | 45 + .../DeleteModelResponse.Serialization.cs | 148 + .../Generated/Models/DeleteModelResponse.cs | 83 + .../Models/DeleteModelResponseObject.cs | 45 + .../DeleteThreadResponse.Serialization.cs | 148 + .../Generated/Models/DeleteThreadResponse.cs | 83 + .../Models/DeleteThreadResponseObject.cs | 45 + .../Models/Embedding.Serialization.cs | 155 + .dotnet/src/Generated/Models/Embedding.cs | 130 + .../src/Generated/Models/EmbeddingObject.cs | 45 + .../Models/FineTune.Serialization.cs | 287 + .dotnet/src/Generated/Models/FineTune.cs | 169 + .../Models/FineTuneEvent.Serialization.cs | 156 + .dotnet/src/Generated/Models/FineTuneEvent.cs | 93 + .../FineTuneHyperparams.Serialization.cs | 197 + .../Generated/Models/FineTuneHyperparams.cs | 117 + .../src/Generated/Models/FineTuneObject.cs | 45 + .../src/Generated/Models/FineTuneStatus.cs | 57 + .../Models/FineTuningJob.Serialization.cs | 306 + .dotnet/src/Generated/Models/FineTuningJob.cs | 237 + .../FineTuningJobError.Serialization.cs | 169 + .../Generated/Models/FineTuningJobError.cs | 76 + .../FineTuningJobEvent.Serialization.cs | 164 + .../Generated/Models/FineTuningJobEvent.cs | 99 + .../Models/FineTuningJobEventLevel.cs | 51 + ...eTuningJobHyperparameters.Serialization.cs | 146 + .../Models/FineTuningJobHyperparameters.cs | 112 + .../Generated/Models/FineTuningJobObject.cs | 45 + .../Generated/Models/FineTuningJobStatus.cs | 60 + .../Models/FunctionObject.Serialization.cs | 158 + .../src/Generated/Models/FunctionObject.cs | 96 + .../FunctionParameters.Serialization.cs | 118 + .../Generated/Models/FunctionParameters.cs | 65 + .../Generated/Models/Image.Serialization.cs | 165 + .dotnet/src/Generated/Models/Image.cs | 85 + .../Models/ImagesResponse.Serialization.cs | 150 + .../src/Generated/Models/ImagesResponse.cs | 80 + ...istAssistantFilesResponse.Serialization.cs | 174 + .../Models/ListAssistantFilesResponse.cs | 99 + .../ListAssistantFilesResponseObject.cs | 45 + .../ListAssistantsResponse.Serialization.cs | 174 + .../Models/ListAssistantsResponse.cs | 99 + .../Models/ListAssistantsResponseObject.cs | 45 + .../Models/ListFilesResponse.Serialization.cs | 150 + .../src/Generated/Models/ListFilesResponse.cs | 78 + .../Models/ListFilesResponseObject.cs | 45 + ...istFineTuneEventsResponse.Serialization.cs | 150 + .../Models/ListFineTuneEventsResponse.cs | 81 + .../ListFineTunesResponse.Serialization.cs | 150 + .../Generated/Models/ListFineTunesResponse.cs | 81 + ...neTuningJobEventsResponse.Serialization.cs | 150 + .../Models/ListFineTuningJobEventsResponse.cs | 81 + .../ListMessageFilesResponse.Serialization.cs | 174 + .../Models/ListMessageFilesResponse.cs | 99 + .../Models/ListMessageFilesResponseObject.cs | 45 + .../ListMessagesResponse.Serialization.cs | 174 + .../Generated/Models/ListMessagesResponse.cs | 99 + .../Models/ListMessagesResponseObject.cs | 45 + .../ListModelsResponse.Serialization.cs | 150 + .../Generated/Models/ListModelsResponse.cs | 79 + .../Models/ListModelsResponseObject.cs | 45 + .dotnet/src/Generated/Models/ListOrder.cs | 47 + ...tedFineTuningJobsResponse.Serialization.cs | 158 + .../ListPaginatedFineTuningJobsResponse.cs | 87 + .../ListRunStepsResponse.Serialization.cs | 174 + .../Generated/Models/ListRunStepsResponse.cs | 99 + .../Models/ListRunStepsResponseObject.cs | 45 + .../Models/ListRunsResponse.Serialization.cs | 174 + .../src/Generated/Models/ListRunsResponse.cs | 99 + .../Models/ListRunsResponseObject.cs | 45 + .../Models/MessageFileObject.Serialization.cs | 156 + .../src/Generated/Models/MessageFileObject.cs | 91 + .../Models/MessageFileObjectObject.cs | 45 + .../Models/MessageObject.Serialization.cs | 290 + .dotnet/src/Generated/Models/MessageObject.cs | 201 + .../Generated/Models/MessageObjectObject.cs | 45 + .../src/Generated/Models/MessageObjectRole.cs | 48 + .../Generated/Models/Model.Serialization.cs | 156 + .dotnet/src/Generated/Models/Model.cs | 91 + .dotnet/src/Generated/Models/ModelObject.cs | 45 + .../ModifyAssistantRequest.Serialization.cs | 306 + .../Models/ModifyAssistantRequest.cs | 147 + .../ModifyMessageRequest.Serialization.cs | 157 + .../Generated/Models/ModifyMessageRequest.cs | 72 + .../Models/ModifyRunRequest.Serialization.cs | 157 + .../src/Generated/Models/ModifyRunRequest.cs | 72 + .../ModifyThreadRequest.Serialization.cs | 157 + .../Generated/Models/ModifyThreadRequest.cs | 72 + .../Models/OpenAIFile.Serialization.cs | 191 + .dotnet/src/Generated/Models/OpenAIFile.cs | 137 + .../src/Generated/Models/OpenAIFileObject.cs | 45 + .../src/Generated/Models/OpenAIFilePurpose.cs | 54 + .../src/Generated/Models/OpenAIFileStatus.cs | 51 + .../RunCompletionUsage.Serialization.cs | 148 + .../Generated/Models/RunCompletionUsage.cs | 84 + .../Models/RunObject.Serialization.cs | 422 ++ .dotnet/src/Generated/Models/RunObject.cs | 264 + .../RunObjectLastError.Serialization.cs | 140 + .../Generated/Models/RunObjectLastError.cs | 79 + .../Models/RunObjectLastErrorCode.cs | 48 + .../src/Generated/Models/RunObjectObject.cs | 45 + .../RunObjectRequiredAction.Serialization.cs | 140 + .../Models/RunObjectRequiredAction.cs | 78 + ...edActionSubmitToolOutputs.Serialization.cs | 142 + ...unObjectRequiredActionSubmitToolOutputs.cs | 74 + .../Models/RunObjectRequiredActionType.cs | 45 + .../src/Generated/Models/RunObjectStatus.cs | 66 + ...ailsMessageCreationObject.Serialization.cs | 140 + .../RunStepDetailsMessageCreationObject.cs | 78 + ...tionObjectMessageCreation.Serialization.cs | 132 + ...ilsMessageCreationObjectMessageCreation.cs | 73 + ...RunStepDetailsMessageCreationObjectType.cs | 45 + ...tepDetailsToolCallsObject.Serialization.cs | 169 + .../Models/RunStepDetailsToolCallsObject.cs | 115 + .../RunStepDetailsToolCallsObjectType.cs | 45 + .../Models/RunStepObject.Serialization.cs | 354 + .dotnet/src/Generated/Models/RunStepObject.cs | 236 + .../RunStepObjectLastError.Serialization.cs | 140 + .../Models/RunStepObjectLastError.cs | 79 + .../Models/RunStepObjectLastErrorCode.cs | 48 + .../Generated/Models/RunStepObjectObject.cs | 45 + .../Generated/Models/RunStepObjectStatus.cs | 57 + .../src/Generated/Models/RunStepObjectType.cs | 48 + .../Models/RunToolCallObject.Serialization.cs | 148 + .../src/Generated/Models/RunToolCallObject.cs | 94 + ...RunToolCallObjectFunction.Serialization.cs | 140 + .../Models/RunToolCallObjectFunction.cs | 80 + .../Generated/Models/RunToolCallObjectType.cs | 45 + ...bmitToolOutputsRunRequest.Serialization.cs | 132 + .../Models/SubmitToolOutputsRunRequest.cs | 73 + ...putsRunRequestToolOutputs.Serialization.cs | 146 + .../SubmitToolOutputsRunRequestToolOutputs.cs | 72 + .../Models/ThreadObject.Serialization.cs | 179 + .dotnet/src/Generated/Models/ThreadObject.cs | 102 + .../Generated/Models/ThreadObjectObject.cs | 45 + .dotnet/src/Generated/ModelsOps.cs | 419 ++ .dotnet/src/Generated/Moderations.cs | 183 + .dotnet/src/Generated/OpenAIClient.cs | 158 + .dotnet/src/Generated/OpenAIClientOptions.cs | 13 + .dotnet/src/Generated/OpenAIModelFactory.cs | 1769 +++++ .dotnet/src/Generated/Runs.cs | 1442 ++++ .dotnet/src/Generated/Threads.cs | 555 ++ .dotnet/src/OpenAI.csproj | 16 + .../tests/Generated/Tests/AssistantsTests.cs | 22 + .dotnet/tests/Generated/Tests/AudioTests.cs | 22 + .dotnet/tests/Generated/Tests/ChatTests.cs | 22 + .../tests/Generated/Tests/CompletionsTests.cs | 22 + .../tests/Generated/Tests/EmbeddingsTests.cs | 22 + .dotnet/tests/Generated/Tests/FilesTests.cs | 22 + .../Generated/Tests/FineTuningJobsTests.cs | 22 + .dotnet/tests/Generated/Tests/ImagesTests.cs | 22 + .../tests/Generated/Tests/MessagesTests.cs | 22 + .../tests/Generated/Tests/ModelsOpsTests.cs | 22 + .../tests/Generated/Tests/ModerationsTests.cs | 22 + .dotnet/tests/Generated/Tests/RunsTests.cs | 22 + .dotnet/tests/Generated/Tests/ThreadsTests.cs | 22 + .dotnet/tests/OpenAI.Tests.csproj | 18 + .../@typespec/openapi3/openapi.yaml | 6019 +++++++++++++++++ .gitignore | 178 + assistants/operations.tsp | 5 +- common/models.tsp | 5 + files/operations.tsp | 2 +- messages/operations.tsp | 5 +- runs/operations.tsp | 5 +- tsp-output/@typespec/openapi3/openapi.yaml | 72 +- tspconfig.yaml | 4 + 327 files changed, 51978 insertions(+), 66 deletions(-) create mode 100644 .dotnet/OpenAI.sln create mode 100644 .dotnet/src/Generated/Assistants.cs create mode 100644 .dotnet/src/Generated/Audio.cs create mode 100644 .dotnet/src/Generated/Chat.cs create mode 100644 .dotnet/src/Generated/Completions.cs create mode 100644 .dotnet/src/Generated/Embeddings.cs create mode 100644 .dotnet/src/Generated/Files.cs create mode 100644 .dotnet/src/Generated/FineTunes.cs create mode 100644 .dotnet/src/Generated/FineTuning.cs create mode 100644 .dotnet/src/Generated/FineTuningJobs.cs create mode 100644 .dotnet/src/Generated/Images.cs create mode 100644 .dotnet/src/Generated/Messages.cs create mode 100644 .dotnet/src/Generated/Models/AssistantFileObject.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/AssistantFileObject.cs create mode 100644 .dotnet/src/Generated/Models/AssistantFileObjectObject.cs create mode 100644 .dotnet/src/Generated/Models/AssistantObject.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/AssistantObject.cs create mode 100644 .dotnet/src/Generated/Models/AssistantObjectObject.cs create mode 100644 .dotnet/src/Generated/Models/AudioSegment.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/AudioSegment.cs create mode 100644 .dotnet/src/Generated/Models/ChatCompletionFunctionCallOption.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/ChatCompletionFunctionCallOption.cs create mode 100644 .dotnet/src/Generated/Models/ChatCompletionFunctions.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/ChatCompletionFunctions.cs create mode 100644 .dotnet/src/Generated/Models/ChatCompletionMessageToolCall.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/ChatCompletionMessageToolCall.cs create mode 100644 .dotnet/src/Generated/Models/ChatCompletionMessageToolCallFunction.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/ChatCompletionMessageToolCallFunction.cs create mode 100644 .dotnet/src/Generated/Models/ChatCompletionMessageToolCallType.cs create mode 100644 .dotnet/src/Generated/Models/ChatCompletionNamedToolChoice.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/ChatCompletionNamedToolChoice.cs create mode 100644 .dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceFunction.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceFunction.cs create mode 100644 .dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceType.cs create mode 100644 .dotnet/src/Generated/Models/ChatCompletionResponseMessage.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/ChatCompletionResponseMessage.cs create mode 100644 .dotnet/src/Generated/Models/ChatCompletionResponseMessageFunctionCall.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/ChatCompletionResponseMessageFunctionCall.cs create mode 100644 .dotnet/src/Generated/Models/ChatCompletionResponseMessageRole.cs create mode 100644 .dotnet/src/Generated/Models/ChatCompletionTokenLogprob.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/ChatCompletionTokenLogprob.cs create mode 100644 .dotnet/src/Generated/Models/ChatCompletionTokenLogprobTopLogprob.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/ChatCompletionTokenLogprobTopLogprob.cs create mode 100644 .dotnet/src/Generated/Models/ChatCompletionTool.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/ChatCompletionTool.cs create mode 100644 .dotnet/src/Generated/Models/ChatCompletionToolType.cs create mode 100644 .dotnet/src/Generated/Models/CompletionUsage.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/CompletionUsage.cs create mode 100644 .dotnet/src/Generated/Models/CreateAssistantFileRequest.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/CreateAssistantFileRequest.cs create mode 100644 .dotnet/src/Generated/Models/CreateAssistantRequest.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/CreateAssistantRequest.cs create mode 100644 .dotnet/src/Generated/Models/CreateChatCompletionRequest.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/CreateChatCompletionRequest.cs create mode 100644 .dotnet/src/Generated/Models/CreateChatCompletionRequestModel.cs create mode 100644 .dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormat.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormat.cs create mode 100644 .dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormatType.cs create mode 100644 .dotnet/src/Generated/Models/CreateChatCompletionResponse.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/CreateChatCompletionResponse.cs create mode 100644 .dotnet/src/Generated/Models/CreateChatCompletionResponseChoice.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/CreateChatCompletionResponseChoice.cs create mode 100644 .dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceFinishReason.cs create mode 100644 .dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceLogprobs.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceLogprobs.cs create mode 100644 .dotnet/src/Generated/Models/CreateChatCompletionResponseObject.cs create mode 100644 .dotnet/src/Generated/Models/CreateCompletionRequest.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/CreateCompletionRequest.cs create mode 100644 .dotnet/src/Generated/Models/CreateCompletionRequestModel.cs create mode 100644 .dotnet/src/Generated/Models/CreateCompletionResponse.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/CreateCompletionResponse.cs create mode 100644 .dotnet/src/Generated/Models/CreateCompletionResponseChoice.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/CreateCompletionResponseChoice.cs create mode 100644 .dotnet/src/Generated/Models/CreateCompletionResponseChoiceFinishReason.cs create mode 100644 .dotnet/src/Generated/Models/CreateCompletionResponseChoiceLogprobs.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/CreateCompletionResponseChoiceLogprobs.cs create mode 100644 .dotnet/src/Generated/Models/CreateCompletionResponseObject.cs create mode 100644 .dotnet/src/Generated/Models/CreateEmbeddingRequest.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/CreateEmbeddingRequest.cs create mode 100644 .dotnet/src/Generated/Models/CreateEmbeddingRequestEncodingFormat.cs create mode 100644 .dotnet/src/Generated/Models/CreateEmbeddingRequestModel.cs create mode 100644 .dotnet/src/Generated/Models/CreateEmbeddingResponse.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/CreateEmbeddingResponse.cs create mode 100644 .dotnet/src/Generated/Models/CreateEmbeddingResponseObject.cs create mode 100644 .dotnet/src/Generated/Models/CreateEmbeddingResponseUsage.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/CreateEmbeddingResponseUsage.cs create mode 100644 .dotnet/src/Generated/Models/CreateFileRequest.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/CreateFileRequest.cs create mode 100644 .dotnet/src/Generated/Models/CreateFileRequestPurpose.cs create mode 100644 .dotnet/src/Generated/Models/CreateFineTuneRequest.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/CreateFineTuneRequest.cs create mode 100644 .dotnet/src/Generated/Models/CreateFineTuneRequestModel.cs create mode 100644 .dotnet/src/Generated/Models/CreateFineTuningJobRequest.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/CreateFineTuningJobRequest.cs create mode 100644 .dotnet/src/Generated/Models/CreateFineTuningJobRequestHyperparameters.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/CreateFineTuningJobRequestHyperparameters.cs create mode 100644 .dotnet/src/Generated/Models/CreateFineTuningJobRequestModel.cs create mode 100644 .dotnet/src/Generated/Models/CreateImageEditRequest.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/CreateImageEditRequest.cs create mode 100644 .dotnet/src/Generated/Models/CreateImageEditRequestModel.cs create mode 100644 .dotnet/src/Generated/Models/CreateImageEditRequestResponseFormat.cs create mode 100644 .dotnet/src/Generated/Models/CreateImageEditRequestSize.cs create mode 100644 .dotnet/src/Generated/Models/CreateImageRequest.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/CreateImageRequest.cs create mode 100644 .dotnet/src/Generated/Models/CreateImageRequestModel.cs create mode 100644 .dotnet/src/Generated/Models/CreateImageRequestQuality.cs create mode 100644 .dotnet/src/Generated/Models/CreateImageRequestResponseFormat.cs create mode 100644 .dotnet/src/Generated/Models/CreateImageRequestSize.cs create mode 100644 .dotnet/src/Generated/Models/CreateImageRequestStyle.cs create mode 100644 .dotnet/src/Generated/Models/CreateImageVariationRequest.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/CreateImageVariationRequest.cs create mode 100644 .dotnet/src/Generated/Models/CreateImageVariationRequestModel.cs create mode 100644 .dotnet/src/Generated/Models/CreateImageVariationRequestResponseFormat.cs create mode 100644 .dotnet/src/Generated/Models/CreateImageVariationRequestSize.cs create mode 100644 .dotnet/src/Generated/Models/CreateMessageRequest.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/CreateMessageRequest.cs create mode 100644 .dotnet/src/Generated/Models/CreateMessageRequestRole.cs create mode 100644 .dotnet/src/Generated/Models/CreateModerationRequest.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/CreateModerationRequest.cs create mode 100644 .dotnet/src/Generated/Models/CreateModerationRequestModel.cs create mode 100644 .dotnet/src/Generated/Models/CreateModerationResponse.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/CreateModerationResponse.cs create mode 100644 .dotnet/src/Generated/Models/CreateModerationResponseResult.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/CreateModerationResponseResult.cs create mode 100644 .dotnet/src/Generated/Models/CreateModerationResponseResultCategories.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/CreateModerationResponseResultCategories.cs create mode 100644 .dotnet/src/Generated/Models/CreateModerationResponseResultCategoryScores.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/CreateModerationResponseResultCategoryScores.cs create mode 100644 .dotnet/src/Generated/Models/CreateRunRequest.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/CreateRunRequest.cs create mode 100644 .dotnet/src/Generated/Models/CreateSpeechRequest.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/CreateSpeechRequest.cs create mode 100644 .dotnet/src/Generated/Models/CreateSpeechRequestModel.cs create mode 100644 .dotnet/src/Generated/Models/CreateSpeechRequestResponseFormat.cs create mode 100644 .dotnet/src/Generated/Models/CreateSpeechRequestVoice.cs create mode 100644 .dotnet/src/Generated/Models/CreateThreadAndRunRequest.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/CreateThreadAndRunRequest.cs create mode 100644 .dotnet/src/Generated/Models/CreateThreadRequest.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/CreateThreadRequest.cs create mode 100644 .dotnet/src/Generated/Models/CreateTranscriptionRequest.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/CreateTranscriptionRequest.cs create mode 100644 .dotnet/src/Generated/Models/CreateTranscriptionRequestModel.cs create mode 100644 .dotnet/src/Generated/Models/CreateTranscriptionRequestResponseFormat.cs create mode 100644 .dotnet/src/Generated/Models/CreateTranscriptionResponse.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/CreateTranscriptionResponse.cs create mode 100644 .dotnet/src/Generated/Models/CreateTranscriptionResponseTask.cs create mode 100644 .dotnet/src/Generated/Models/CreateTranslationRequest.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/CreateTranslationRequest.cs create mode 100644 .dotnet/src/Generated/Models/CreateTranslationRequestModel.cs create mode 100644 .dotnet/src/Generated/Models/CreateTranslationRequestResponseFormat.cs create mode 100644 .dotnet/src/Generated/Models/CreateTranslationResponse.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/CreateTranslationResponse.cs create mode 100644 .dotnet/src/Generated/Models/CreateTranslationResponseTask.cs create mode 100644 .dotnet/src/Generated/Models/DeleteAssistantFileResponse.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/DeleteAssistantFileResponse.cs create mode 100644 .dotnet/src/Generated/Models/DeleteAssistantFileResponseObject.cs create mode 100644 .dotnet/src/Generated/Models/DeleteAssistantResponse.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/DeleteAssistantResponse.cs create mode 100644 .dotnet/src/Generated/Models/DeleteAssistantResponseObject.cs create mode 100644 .dotnet/src/Generated/Models/DeleteFileResponse.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/DeleteFileResponse.cs create mode 100644 .dotnet/src/Generated/Models/DeleteFileResponseObject.cs create mode 100644 .dotnet/src/Generated/Models/DeleteModelResponse.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/DeleteModelResponse.cs create mode 100644 .dotnet/src/Generated/Models/DeleteModelResponseObject.cs create mode 100644 .dotnet/src/Generated/Models/DeleteThreadResponse.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/DeleteThreadResponse.cs create mode 100644 .dotnet/src/Generated/Models/DeleteThreadResponseObject.cs create mode 100644 .dotnet/src/Generated/Models/Embedding.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/Embedding.cs create mode 100644 .dotnet/src/Generated/Models/EmbeddingObject.cs create mode 100644 .dotnet/src/Generated/Models/FineTune.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/FineTune.cs create mode 100644 .dotnet/src/Generated/Models/FineTuneEvent.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/FineTuneEvent.cs create mode 100644 .dotnet/src/Generated/Models/FineTuneHyperparams.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/FineTuneHyperparams.cs create mode 100644 .dotnet/src/Generated/Models/FineTuneObject.cs create mode 100644 .dotnet/src/Generated/Models/FineTuneStatus.cs create mode 100644 .dotnet/src/Generated/Models/FineTuningJob.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/FineTuningJob.cs create mode 100644 .dotnet/src/Generated/Models/FineTuningJobError.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/FineTuningJobError.cs create mode 100644 .dotnet/src/Generated/Models/FineTuningJobEvent.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/FineTuningJobEvent.cs create mode 100644 .dotnet/src/Generated/Models/FineTuningJobEventLevel.cs create mode 100644 .dotnet/src/Generated/Models/FineTuningJobHyperparameters.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/FineTuningJobHyperparameters.cs create mode 100644 .dotnet/src/Generated/Models/FineTuningJobObject.cs create mode 100644 .dotnet/src/Generated/Models/FineTuningJobStatus.cs create mode 100644 .dotnet/src/Generated/Models/FunctionObject.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/FunctionObject.cs create mode 100644 .dotnet/src/Generated/Models/FunctionParameters.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/FunctionParameters.cs create mode 100644 .dotnet/src/Generated/Models/Image.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/Image.cs create mode 100644 .dotnet/src/Generated/Models/ImagesResponse.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/ImagesResponse.cs create mode 100644 .dotnet/src/Generated/Models/ListAssistantFilesResponse.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/ListAssistantFilesResponse.cs create mode 100644 .dotnet/src/Generated/Models/ListAssistantFilesResponseObject.cs create mode 100644 .dotnet/src/Generated/Models/ListAssistantsResponse.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/ListAssistantsResponse.cs create mode 100644 .dotnet/src/Generated/Models/ListAssistantsResponseObject.cs create mode 100644 .dotnet/src/Generated/Models/ListFilesResponse.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/ListFilesResponse.cs create mode 100644 .dotnet/src/Generated/Models/ListFilesResponseObject.cs create mode 100644 .dotnet/src/Generated/Models/ListFineTuneEventsResponse.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/ListFineTuneEventsResponse.cs create mode 100644 .dotnet/src/Generated/Models/ListFineTunesResponse.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/ListFineTunesResponse.cs create mode 100644 .dotnet/src/Generated/Models/ListFineTuningJobEventsResponse.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/ListFineTuningJobEventsResponse.cs create mode 100644 .dotnet/src/Generated/Models/ListMessageFilesResponse.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/ListMessageFilesResponse.cs create mode 100644 .dotnet/src/Generated/Models/ListMessageFilesResponseObject.cs create mode 100644 .dotnet/src/Generated/Models/ListMessagesResponse.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/ListMessagesResponse.cs create mode 100644 .dotnet/src/Generated/Models/ListMessagesResponseObject.cs create mode 100644 .dotnet/src/Generated/Models/ListModelsResponse.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/ListModelsResponse.cs create mode 100644 .dotnet/src/Generated/Models/ListModelsResponseObject.cs create mode 100644 .dotnet/src/Generated/Models/ListOrder.cs create mode 100644 .dotnet/src/Generated/Models/ListPaginatedFineTuningJobsResponse.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/ListPaginatedFineTuningJobsResponse.cs create mode 100644 .dotnet/src/Generated/Models/ListRunStepsResponse.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/ListRunStepsResponse.cs create mode 100644 .dotnet/src/Generated/Models/ListRunStepsResponseObject.cs create mode 100644 .dotnet/src/Generated/Models/ListRunsResponse.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/ListRunsResponse.cs create mode 100644 .dotnet/src/Generated/Models/ListRunsResponseObject.cs create mode 100644 .dotnet/src/Generated/Models/MessageFileObject.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/MessageFileObject.cs create mode 100644 .dotnet/src/Generated/Models/MessageFileObjectObject.cs create mode 100644 .dotnet/src/Generated/Models/MessageObject.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/MessageObject.cs create mode 100644 .dotnet/src/Generated/Models/MessageObjectObject.cs create mode 100644 .dotnet/src/Generated/Models/MessageObjectRole.cs create mode 100644 .dotnet/src/Generated/Models/Model.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/Model.cs create mode 100644 .dotnet/src/Generated/Models/ModelObject.cs create mode 100644 .dotnet/src/Generated/Models/ModifyAssistantRequest.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/ModifyAssistantRequest.cs create mode 100644 .dotnet/src/Generated/Models/ModifyMessageRequest.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/ModifyMessageRequest.cs create mode 100644 .dotnet/src/Generated/Models/ModifyRunRequest.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/ModifyRunRequest.cs create mode 100644 .dotnet/src/Generated/Models/ModifyThreadRequest.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/ModifyThreadRequest.cs create mode 100644 .dotnet/src/Generated/Models/OpenAIFile.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/OpenAIFile.cs create mode 100644 .dotnet/src/Generated/Models/OpenAIFileObject.cs create mode 100644 .dotnet/src/Generated/Models/OpenAIFilePurpose.cs create mode 100644 .dotnet/src/Generated/Models/OpenAIFileStatus.cs create mode 100644 .dotnet/src/Generated/Models/RunCompletionUsage.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/RunCompletionUsage.cs create mode 100644 .dotnet/src/Generated/Models/RunObject.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/RunObject.cs create mode 100644 .dotnet/src/Generated/Models/RunObjectLastError.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/RunObjectLastError.cs create mode 100644 .dotnet/src/Generated/Models/RunObjectLastErrorCode.cs create mode 100644 .dotnet/src/Generated/Models/RunObjectObject.cs create mode 100644 .dotnet/src/Generated/Models/RunObjectRequiredAction.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/RunObjectRequiredAction.cs create mode 100644 .dotnet/src/Generated/Models/RunObjectRequiredActionSubmitToolOutputs.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/RunObjectRequiredActionSubmitToolOutputs.cs create mode 100644 .dotnet/src/Generated/Models/RunObjectRequiredActionType.cs create mode 100644 .dotnet/src/Generated/Models/RunObjectStatus.cs create mode 100644 .dotnet/src/Generated/Models/RunStepDetailsMessageCreationObject.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/RunStepDetailsMessageCreationObject.cs create mode 100644 .dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectMessageCreation.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectMessageCreation.cs create mode 100644 .dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectType.cs create mode 100644 .dotnet/src/Generated/Models/RunStepDetailsToolCallsObject.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/RunStepDetailsToolCallsObject.cs create mode 100644 .dotnet/src/Generated/Models/RunStepDetailsToolCallsObjectType.cs create mode 100644 .dotnet/src/Generated/Models/RunStepObject.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/RunStepObject.cs create mode 100644 .dotnet/src/Generated/Models/RunStepObjectLastError.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/RunStepObjectLastError.cs create mode 100644 .dotnet/src/Generated/Models/RunStepObjectLastErrorCode.cs create mode 100644 .dotnet/src/Generated/Models/RunStepObjectObject.cs create mode 100644 .dotnet/src/Generated/Models/RunStepObjectStatus.cs create mode 100644 .dotnet/src/Generated/Models/RunStepObjectType.cs create mode 100644 .dotnet/src/Generated/Models/RunToolCallObject.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/RunToolCallObject.cs create mode 100644 .dotnet/src/Generated/Models/RunToolCallObjectFunction.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/RunToolCallObjectFunction.cs create mode 100644 .dotnet/src/Generated/Models/RunToolCallObjectType.cs create mode 100644 .dotnet/src/Generated/Models/SubmitToolOutputsRunRequest.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/SubmitToolOutputsRunRequest.cs create mode 100644 .dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutputs.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutputs.cs create mode 100644 .dotnet/src/Generated/Models/ThreadObject.Serialization.cs create mode 100644 .dotnet/src/Generated/Models/ThreadObject.cs create mode 100644 .dotnet/src/Generated/Models/ThreadObjectObject.cs create mode 100644 .dotnet/src/Generated/ModelsOps.cs create mode 100644 .dotnet/src/Generated/Moderations.cs create mode 100644 .dotnet/src/Generated/OpenAIClient.cs create mode 100644 .dotnet/src/Generated/OpenAIClientOptions.cs create mode 100644 .dotnet/src/Generated/OpenAIModelFactory.cs create mode 100644 .dotnet/src/Generated/Runs.cs create mode 100644 .dotnet/src/Generated/Threads.cs create mode 100644 .dotnet/src/OpenAI.csproj create mode 100644 .dotnet/tests/Generated/Tests/AssistantsTests.cs create mode 100644 .dotnet/tests/Generated/Tests/AudioTests.cs create mode 100644 .dotnet/tests/Generated/Tests/ChatTests.cs create mode 100644 .dotnet/tests/Generated/Tests/CompletionsTests.cs create mode 100644 .dotnet/tests/Generated/Tests/EmbeddingsTests.cs create mode 100644 .dotnet/tests/Generated/Tests/FilesTests.cs create mode 100644 .dotnet/tests/Generated/Tests/FineTuningJobsTests.cs create mode 100644 .dotnet/tests/Generated/Tests/ImagesTests.cs create mode 100644 .dotnet/tests/Generated/Tests/MessagesTests.cs create mode 100644 .dotnet/tests/Generated/Tests/ModelsOpsTests.cs create mode 100644 .dotnet/tests/Generated/Tests/ModerationsTests.cs create mode 100644 .dotnet/tests/Generated/Tests/RunsTests.cs create mode 100644 .dotnet/tests/Generated/Tests/ThreadsTests.cs create mode 100644 .dotnet/tests/OpenAI.Tests.csproj create mode 100644 .dotnet/tsp-output/@typespec/openapi3/openapi.yaml create mode 100644 .gitignore create mode 100644 tspconfig.yaml diff --git a/.dotnet/OpenAI.sln b/.dotnet/OpenAI.sln new file mode 100644 index 000000000..5d1edd8c3 --- /dev/null +++ b/.dotnet/OpenAI.sln @@ -0,0 +1,50 @@ +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 16 +VisualStudioVersion = 16.0.29709.97 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "OpenAI", "src\OpenAI.csproj", "{28FF4005-4467-4E36-92E7-DEA27DEB1519}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "OpenAI.Tests", "tests\OpenAI.Tests.csproj", "{1F1CD1D4-9932-4B73-99D8-C252A67D4B46}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Release|Any CPU = Release|Any CPU + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {B0C276D1-2930-4887-B29A-D1A33E7009A2}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {B0C276D1-2930-4887-B29A-D1A33E7009A2}.Debug|Any CPU.Build.0 = Debug|Any CPU + {B0C276D1-2930-4887-B29A-D1A33E7009A2}.Release|Any CPU.ActiveCfg = Release|Any CPU + {B0C276D1-2930-4887-B29A-D1A33E7009A2}.Release|Any CPU.Build.0 = Release|Any CPU + {8E9A77AC-792A-4432-8320-ACFD46730401}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {8E9A77AC-792A-4432-8320-ACFD46730401}.Debug|Any CPU.Build.0 = Debug|Any CPU + {8E9A77AC-792A-4432-8320-ACFD46730401}.Release|Any CPU.ActiveCfg = Release|Any CPU + {8E9A77AC-792A-4432-8320-ACFD46730401}.Release|Any CPU.Build.0 = Release|Any CPU + {A4241C1F-A53D-474C-9E4E-075054407E74}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {A4241C1F-A53D-474C-9E4E-075054407E74}.Debug|Any CPU.Build.0 = Debug|Any CPU + {A4241C1F-A53D-474C-9E4E-075054407E74}.Release|Any CPU.ActiveCfg = Release|Any CPU + {A4241C1F-A53D-474C-9E4E-075054407E74}.Release|Any CPU.Build.0 = Release|Any CPU + {FA8BD3F1-8616-47B6-974C-7576CDF4717E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {FA8BD3F1-8616-47B6-974C-7576CDF4717E}.Debug|Any CPU.Build.0 = Debug|Any CPU + {FA8BD3F1-8616-47B6-974C-7576CDF4717E}.Release|Any CPU.ActiveCfg = Release|Any CPU + {FA8BD3F1-8616-47B6-974C-7576CDF4717E}.Release|Any CPU.Build.0 = Release|Any CPU + {85677AD3-C214-42FA-AE6E-49B956CAC8DC}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {85677AD3-C214-42FA-AE6E-49B956CAC8DC}.Debug|Any CPU.Build.0 = Debug|Any CPU + {85677AD3-C214-42FA-AE6E-49B956CAC8DC}.Release|Any CPU.ActiveCfg = Release|Any CPU + {85677AD3-C214-42FA-AE6E-49B956CAC8DC}.Release|Any CPU.Build.0 = Release|Any CPU + {28FF4005-4467-4E36-92E7-DEA27DEB1519}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {28FF4005-4467-4E36-92E7-DEA27DEB1519}.Debug|Any CPU.Build.0 = Debug|Any CPU + {28FF4005-4467-4E36-92E7-DEA27DEB1519}.Release|Any CPU.ActiveCfg = Release|Any CPU + {28FF4005-4467-4E36-92E7-DEA27DEB1519}.Release|Any CPU.Build.0 = Release|Any CPU + {1F1CD1D4-9932-4B73-99D8-C252A67D4B46}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {1F1CD1D4-9932-4B73-99D8-C252A67D4B46}.Debug|Any CPU.Build.0 = Debug|Any CPU + {1F1CD1D4-9932-4B73-99D8-C252A67D4B46}.Release|Any CPU.ActiveCfg = Release|Any CPU + {1F1CD1D4-9932-4B73-99D8-C252A67D4B46}.Release|Any CPU.Build.0 = Release|Any CPU + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {A97F4B90-2591-4689-B1F8-5F21FE6D6CAE} + EndGlobalSection +EndGlobal diff --git a/.dotnet/src/Generated/Assistants.cs b/.dotnet/src/Generated/Assistants.cs new file mode 100644 index 000000000..748820476 --- /dev/null +++ b/.dotnet/src/Generated/Assistants.cs @@ -0,0 +1,1352 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.ClientModel.Primitives.Pipeline; +using System.Threading; +using System.Threading.Tasks; +using OpenAI.Models; + +namespace OpenAI +{ + // Data plane generated sub-client. + /// The Assistants sub-client. + public partial class Assistants + { + private const string AuthorizationHeader = "Authorization"; + private readonly KeyCredential _keyCredential; + private const string AuthorizationApiKeyPrefix = "Bearer"; + private readonly MessagePipeline _pipeline; + private readonly Uri _endpoint; + + /// The ClientDiagnostics is used to provide tracing support for the client library. + internal TelemetrySource ClientDiagnostics { get; } + + /// The HTTP pipeline for sending and receiving REST requests and responses. + public virtual MessagePipeline Pipeline => _pipeline; + + /// Initializes a new instance of Assistants for mocking. + protected Assistants() + { + } + + /// Initializes a new instance of Assistants. + /// The handler for diagnostic messaging in the client. + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// The key credential to copy. + /// OpenAI Endpoint. + internal Assistants(TelemetrySource clientDiagnostics, MessagePipeline pipeline, KeyCredential keyCredential, Uri endpoint) + { + ClientDiagnostics = clientDiagnostics; + _pipeline = pipeline; + _keyCredential = keyCredential; + _endpoint = endpoint; + } + + /// Create an assistant with a model and instructions. + /// The to use. + /// The cancellation token to use. + /// is null. + public virtual async Task> CreateAssistantAsync(CreateAssistantRequest assistant, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNull(assistant, nameof(assistant)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = assistant.ToRequestBody(); + Result result = await CreateAssistantAsync(content, context).ConfigureAwait(false); + return Result.FromValue(AssistantObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Create an assistant with a model and instructions. + /// The to use. + /// The cancellation token to use. + /// is null. + public virtual Result CreateAssistant(CreateAssistantRequest assistant, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNull(assistant, nameof(assistant)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = assistant.ToRequestBody(); + Result result = CreateAssistant(content, context); + return Result.FromValue(AssistantObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Create an assistant with a model and instructions. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CreateAssistantAsync(RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Assistants.CreateAssistant"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateAssistantRequest(content, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Create an assistant with a model and instructions. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result CreateAssistant(RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Assistants.CreateAssistant"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateAssistantRequest(content, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Returns a list of assistants. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// The cancellation token to use. + public virtual async Task> GetAssistantsAsync(int? limit = null, ListOrder? order = null, string after = null, string before = null, CancellationToken cancellationToken = default) + { + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = await GetAssistantsAsync(limit, order?.ToString(), after, before, context).ConfigureAwait(false); + return Result.FromValue(ListAssistantsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Returns a list of assistants. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// The cancellation token to use. + public virtual Result GetAssistants(int? limit = null, ListOrder? order = null, string after = null, string before = null, CancellationToken cancellationToken = default) + { + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = GetAssistants(limit, order?.ToString(), after, before, context); + return Result.FromValue(ListAssistantsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Returns a list of assistants. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. Allowed values: "asc" | "desc" + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task GetAssistantsAsync(int? limit, string order, string after, string before, RequestOptions context) + { + using var scope = ClientDiagnostics.CreateSpan("Assistants.GetAssistants"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetAssistantsRequest(limit, order, after, before, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Returns a list of assistants. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. Allowed values: "asc" | "desc" + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result GetAssistants(int? limit, string order, string after, string before, RequestOptions context) + { + using var scope = ClientDiagnostics.CreateSpan("Assistants.GetAssistants"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetAssistantsRequest(limit, order, after, before, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Retrieves an assistant. + /// The ID of the assistant to retrieve. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> GetAssistantAsync(string assistantId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = await GetAssistantAsync(assistantId, context).ConfigureAwait(false); + return Result.FromValue(AssistantObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Retrieves an assistant. + /// The ID of the assistant to retrieve. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual Result GetAssistant(string assistantId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = GetAssistant(assistantId, context); + return Result.FromValue(AssistantObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Retrieves an assistant. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the assistant to retrieve. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task GetAssistantAsync(string assistantId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + + using var scope = ClientDiagnostics.CreateSpan("Assistants.GetAssistant"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetAssistantRequest(assistantId, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Retrieves an assistant. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the assistant to retrieve. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result GetAssistant(string assistantId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + + using var scope = ClientDiagnostics.CreateSpan("Assistants.GetAssistant"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetAssistantRequest(assistantId, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Modifies an assistant. + /// The ID of the assistant to modify. + /// The to use. + /// The cancellation token to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> ModifyAssistantAsync(string assistantId, ModifyAssistantRequest assistant, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + ClientUtilities.AssertNotNull(assistant, nameof(assistant)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = assistant.ToRequestBody(); + Result result = await ModifyAssistantAsync(assistantId, content, context).ConfigureAwait(false); + return Result.FromValue(AssistantObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Modifies an assistant. + /// The ID of the assistant to modify. + /// The to use. + /// The cancellation token to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + public virtual Result ModifyAssistant(string assistantId, ModifyAssistantRequest assistant, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + ClientUtilities.AssertNotNull(assistant, nameof(assistant)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = assistant.ToRequestBody(); + Result result = ModifyAssistant(assistantId, content, context); + return Result.FromValue(AssistantObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Modifies an assistant. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the assistant to modify. + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task ModifyAssistantAsync(string assistantId, RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Assistants.ModifyAssistant"); + scope.Start(); + try + { + using PipelineMessage message = CreateModifyAssistantRequest(assistantId, content, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Modifies an assistant. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the assistant to modify. + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result ModifyAssistant(string assistantId, RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Assistants.ModifyAssistant"); + scope.Start(); + try + { + using PipelineMessage message = CreateModifyAssistantRequest(assistantId, content, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Delete an assistant. + /// The ID of the assistant to delete. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> DeleteAssistantAsync(string assistantId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = await DeleteAssistantAsync(assistantId, context).ConfigureAwait(false); + return Result.FromValue(DeleteAssistantResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Delete an assistant. + /// The ID of the assistant to delete. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual Result DeleteAssistant(string assistantId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = DeleteAssistant(assistantId, context); + return Result.FromValue(DeleteAssistantResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Delete an assistant. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the assistant to delete. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task DeleteAssistantAsync(string assistantId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + + using var scope = ClientDiagnostics.CreateSpan("Assistants.DeleteAssistant"); + scope.Start(); + try + { + using PipelineMessage message = CreateDeleteAssistantRequest(assistantId, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Delete an assistant. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the assistant to delete. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result DeleteAssistant(string assistantId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + + using var scope = ClientDiagnostics.CreateSpan("Assistants.DeleteAssistant"); + scope.Start(); + try + { + using PipelineMessage message = CreateDeleteAssistantRequest(assistantId, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// Create an assistant file by attaching a [File](/docs/api-reference/files) to a + /// [assistant](/docs/api-reference/assistants). + /// + /// The ID of the assistant for which to create a file. + /// The to use. + /// The cancellation token to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> CreateAssistantFileAsync(string assistantId, CreateAssistantFileRequest file, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + ClientUtilities.AssertNotNull(file, nameof(file)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = file.ToRequestBody(); + Result result = await CreateAssistantFileAsync(assistantId, content, context).ConfigureAwait(false); + return Result.FromValue(AssistantFileObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// Create an assistant file by attaching a [File](/docs/api-reference/files) to a + /// [assistant](/docs/api-reference/assistants). + /// + /// The ID of the assistant for which to create a file. + /// The to use. + /// The cancellation token to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + public virtual Result CreateAssistantFile(string assistantId, CreateAssistantFileRequest file, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + ClientUtilities.AssertNotNull(file, nameof(file)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = file.ToRequestBody(); + Result result = CreateAssistantFile(assistantId, content, context); + return Result.FromValue(AssistantFileObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Create an assistant file by attaching a [File](/docs/api-reference/files) to a + /// [assistant](/docs/api-reference/assistants). + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the assistant for which to create a file. + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CreateAssistantFileAsync(string assistantId, RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Assistants.CreateAssistantFile"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateAssistantFileRequest(assistantId, content, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Create an assistant file by attaching a [File](/docs/api-reference/files) to a + /// [assistant](/docs/api-reference/assistants). + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the assistant for which to create a file. + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result CreateAssistantFile(string assistantId, RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Assistants.CreateAssistantFile"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateAssistantFileRequest(assistantId, content, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Returns a list of assistant files. + /// The ID of the assistant the file belongs to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> GetAssistantFilesAsync(string assistantId, int? limit = null, ListOrder? order = null, string after = null, string before = null, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = await GetAssistantFilesAsync(assistantId, limit, order?.ToString(), after, before, context).ConfigureAwait(false); + return Result.FromValue(ListAssistantFilesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Returns a list of assistant files. + /// The ID of the assistant the file belongs to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual Result GetAssistantFiles(string assistantId, int? limit = null, ListOrder? order = null, string after = null, string before = null, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = GetAssistantFiles(assistantId, limit, order?.ToString(), after, before, context); + return Result.FromValue(ListAssistantFilesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Returns a list of assistant files. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the assistant the file belongs to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. Allowed values: "asc" | "desc" + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task GetAssistantFilesAsync(string assistantId, int? limit, string order, string after, string before, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + + using var scope = ClientDiagnostics.CreateSpan("Assistants.GetAssistantFiles"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetAssistantFilesRequest(assistantId, limit, order, after, before, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Returns a list of assistant files. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the assistant the file belongs to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. Allowed values: "asc" | "desc" + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result GetAssistantFiles(string assistantId, int? limit, string order, string after, string before, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + + using var scope = ClientDiagnostics.CreateSpan("Assistants.GetAssistantFiles"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetAssistantFilesRequest(assistantId, limit, order, after, before, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Retrieves an assistant file. + /// The ID of the assistant the file belongs to. + /// The ID of the file we're getting. + /// The cancellation token to use. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual async Task> GetAssistantFileAsync(string assistantId, string fileId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = await GetAssistantFileAsync(assistantId, fileId, context).ConfigureAwait(false); + return Result.FromValue(AssistantFileObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Retrieves an assistant file. + /// The ID of the assistant the file belongs to. + /// The ID of the file we're getting. + /// The cancellation token to use. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual Result GetAssistantFile(string assistantId, string fileId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = GetAssistantFile(assistantId, fileId, context); + return Result.FromValue(AssistantFileObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Retrieves an assistant file. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the assistant the file belongs to. + /// The ID of the file we're getting. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task GetAssistantFileAsync(string assistantId, string fileId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + using var scope = ClientDiagnostics.CreateSpan("Assistants.GetAssistantFile"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetAssistantFileRequest(assistantId, fileId, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Retrieves an assistant file. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the assistant the file belongs to. + /// The ID of the file we're getting. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result GetAssistantFile(string assistantId, string fileId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + using var scope = ClientDiagnostics.CreateSpan("Assistants.GetAssistantFile"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetAssistantFileRequest(assistantId, fileId, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Delete an assistant file. + /// The ID of the assistant the file belongs to. + /// The ID of the file to delete. + /// The cancellation token to use. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual async Task> DeleteAssistantFileAsync(string assistantId, string fileId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = await DeleteAssistantFileAsync(assistantId, fileId, context).ConfigureAwait(false); + return Result.FromValue(DeleteAssistantFileResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Delete an assistant file. + /// The ID of the assistant the file belongs to. + /// The ID of the file to delete. + /// The cancellation token to use. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual Result DeleteAssistantFile(string assistantId, string fileId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = DeleteAssistantFile(assistantId, fileId, context); + return Result.FromValue(DeleteAssistantFileResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Delete an assistant file. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the assistant the file belongs to. + /// The ID of the file to delete. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task DeleteAssistantFileAsync(string assistantId, string fileId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + using var scope = ClientDiagnostics.CreateSpan("Assistants.DeleteAssistantFile"); + scope.Start(); + try + { + using PipelineMessage message = CreateDeleteAssistantFileRequest(assistantId, fileId, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Delete an assistant file. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the assistant the file belongs to. + /// The ID of the file to delete. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result DeleteAssistantFile(string assistantId, string fileId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + using var scope = ClientDiagnostics.CreateSpan("Assistants.DeleteAssistantFile"); + scope.Start(); + try + { + using PipelineMessage message = CreateDeleteAssistantFileRequest(assistantId, fileId, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + internal PipelineMessage CreateCreateAssistantRequest(RequestBody content, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("POST"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/assistants", false); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + request.SetHeaderValue("Content-Type", "application/json"); + request.Content = content; + return message; + } + + internal PipelineMessage CreateGetAssistantsRequest(int? limit, string order, string after, string before, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("GET"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/assistants", false); + if (limit != null) + { + uri.AppendQuery("limit", limit.Value, true); + } + if (order != null) + { + uri.AppendQuery("order", order, true); + } + if (after != null) + { + uri.AppendQuery("after", after, true); + } + if (before != null) + { + uri.AppendQuery("before", before, true); + } + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateGetAssistantRequest(string assistantId, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("GET"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/assistants/", false); + uri.AppendPath(assistantId, true); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateModifyAssistantRequest(string assistantId, RequestBody content, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("POST"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/assistants/", false); + uri.AppendPath(assistantId, true); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + request.SetHeaderValue("Content-Type", "application/json"); + request.Content = content; + return message; + } + + internal PipelineMessage CreateDeleteAssistantRequest(string assistantId, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("DELETE"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/assistants/", false); + uri.AppendPath(assistantId, true); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateCreateAssistantFileRequest(string assistantId, RequestBody content, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("POST"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/assistants/", false); + uri.AppendPath(assistantId, true); + uri.AppendPath("/files", false); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + request.SetHeaderValue("Content-Type", "application/json"); + request.Content = content; + return message; + } + + internal PipelineMessage CreateGetAssistantFilesRequest(string assistantId, int? limit, string order, string after, string before, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("GET"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/assistants/", false); + uri.AppendPath(assistantId, true); + uri.AppendPath("/files", false); + if (limit != null) + { + uri.AppendQuery("limit", limit.Value, true); + } + if (order != null) + { + uri.AppendQuery("order", order, true); + } + if (after != null) + { + uri.AppendQuery("after", after, true); + } + if (before != null) + { + uri.AppendQuery("before", before, true); + } + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateGetAssistantFileRequest(string assistantId, string fileId, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("GET"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/assistants/", false); + uri.AppendPath(assistantId, true); + uri.AppendPath("/files/", false); + uri.AppendPath(fileId, true); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateDeleteAssistantFileRequest(string assistantId, string fileId, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("DELETE"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/assistants/", false); + uri.AppendPath(assistantId, true); + uri.AppendPath("/files/", false); + uri.AppendPath(fileId, true); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + return message; + } + + private static RequestOptions DefaultRequestContext = new RequestOptions(); + internal static RequestOptions FromCancellationToken(CancellationToken cancellationToken = default) + { + if (!cancellationToken.CanBeCanceled) + { + return DefaultRequestContext; + } + + return new RequestOptions() { CancellationToken = cancellationToken }; + } + + private static ResponseErrorClassifier _responseErrorClassifier200; + private static ResponseErrorClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= new StatusResponseClassifier(stackalloc ushort[] { 200 }); + } +} diff --git a/.dotnet/src/Generated/Audio.cs b/.dotnet/src/Generated/Audio.cs new file mode 100644 index 000000000..6acfba501 --- /dev/null +++ b/.dotnet/src/Generated/Audio.cs @@ -0,0 +1,421 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.ClientModel.Primitives.Pipeline; +using System.Threading; +using System.Threading.Tasks; +using OpenAI.Models; + +namespace OpenAI +{ + // Data plane generated sub-client. + /// The Audio sub-client. + public partial class Audio + { + private const string AuthorizationHeader = "Authorization"; + private readonly KeyCredential _keyCredential; + private const string AuthorizationApiKeyPrefix = "Bearer"; + private readonly MessagePipeline _pipeline; + private readonly Uri _endpoint; + + /// The ClientDiagnostics is used to provide tracing support for the client library. + internal TelemetrySource ClientDiagnostics { get; } + + /// The HTTP pipeline for sending and receiving REST requests and responses. + public virtual MessagePipeline Pipeline => _pipeline; + + /// Initializes a new instance of Audio for mocking. + protected Audio() + { + } + + /// Initializes a new instance of Audio. + /// The handler for diagnostic messaging in the client. + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// The key credential to copy. + /// OpenAI Endpoint. + internal Audio(TelemetrySource clientDiagnostics, MessagePipeline pipeline, KeyCredential keyCredential, Uri endpoint) + { + ClientDiagnostics = clientDiagnostics; + _pipeline = pipeline; + _keyCredential = keyCredential; + _endpoint = endpoint; + } + + /// Generates audio from the input text. + /// The to use. + /// The cancellation token to use. + /// is null. + public virtual async Task> CreateSpeechAsync(CreateSpeechRequest speech, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNull(speech, nameof(speech)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = speech.ToRequestBody(); + Result result = await CreateSpeechAsync(content, context).ConfigureAwait(false); + return Result.FromValue(result.GetRawResponse().Content, result.GetRawResponse()); + } + + /// Generates audio from the input text. + /// The to use. + /// The cancellation token to use. + /// is null. + public virtual Result CreateSpeech(CreateSpeechRequest speech, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNull(speech, nameof(speech)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = speech.ToRequestBody(); + Result result = CreateSpeech(content, context); + return Result.FromValue(result.GetRawResponse().Content, result.GetRawResponse()); + } + + /// + /// [Protocol Method] Generates audio from the input text. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CreateSpeechAsync(RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Audio.CreateSpeech"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateSpeechRequest(content, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Generates audio from the input text. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result CreateSpeech(RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Audio.CreateSpeech"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateSpeechRequest(content, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Transcribes audio into the input language. + /// The to use. + /// The cancellation token to use. + /// is null. + public virtual async Task> CreateTranscriptionAsync(CreateTranscriptionRequest audio, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNull(audio, nameof(audio)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = audio.ToRequestBody(); + Result result = await CreateTranscriptionAsync(content, context).ConfigureAwait(false); + return Result.FromValue(CreateTranscriptionResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Transcribes audio into the input language. + /// The to use. + /// The cancellation token to use. + /// is null. + public virtual Result CreateTranscription(CreateTranscriptionRequest audio, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNull(audio, nameof(audio)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = audio.ToRequestBody(); + Result result = CreateTranscription(content, context); + return Result.FromValue(CreateTranscriptionResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Transcribes audio into the input language. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CreateTranscriptionAsync(RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Audio.CreateTranscription"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateTranscriptionRequest(content, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Transcribes audio into the input language. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result CreateTranscription(RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Audio.CreateTranscription"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateTranscriptionRequest(content, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Translates audio into English.. + /// The to use. + /// The cancellation token to use. + /// is null. + public virtual async Task> CreateTranslationAsync(CreateTranslationRequest audio, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNull(audio, nameof(audio)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = audio.ToRequestBody(); + Result result = await CreateTranslationAsync(content, context).ConfigureAwait(false); + return Result.FromValue(CreateTranslationResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Translates audio into English.. + /// The to use. + /// The cancellation token to use. + /// is null. + public virtual Result CreateTranslation(CreateTranslationRequest audio, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNull(audio, nameof(audio)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = audio.ToRequestBody(); + Result result = CreateTranslation(content, context); + return Result.FromValue(CreateTranslationResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Translates audio into English.. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CreateTranslationAsync(RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Audio.CreateTranslation"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateTranslationRequest(content, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Translates audio into English.. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result CreateTranslation(RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Audio.CreateTranslation"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateTranslationRequest(content, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + internal PipelineMessage CreateCreateSpeechRequest(RequestBody content, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("POST"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/audio/speech", false); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/octet-stream"); + request.SetHeaderValue("Content-Type", "application/json"); + request.Content = content; + return message; + } + + internal PipelineMessage CreateCreateTranscriptionRequest(RequestBody content, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("POST"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/audio/transcriptions", false); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + request.SetHeaderValue("content-type", "multipart/form-data"); + request.Content = content; + return message; + } + + internal PipelineMessage CreateCreateTranslationRequest(RequestBody content, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("POST"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/audio/translations", false); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + request.SetHeaderValue("content-type", "multipart/form-data"); + request.Content = content; + return message; + } + + private static RequestOptions DefaultRequestContext = new RequestOptions(); + internal static RequestOptions FromCancellationToken(CancellationToken cancellationToken = default) + { + if (!cancellationToken.CanBeCanceled) + { + return DefaultRequestContext; + } + + return new RequestOptions() { CancellationToken = cancellationToken }; + } + + private static ResponseErrorClassifier _responseErrorClassifier200; + private static ResponseErrorClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= new StatusResponseClassifier(stackalloc ushort[] { 200 }); + } +} diff --git a/.dotnet/src/Generated/Chat.cs b/.dotnet/src/Generated/Chat.cs new file mode 100644 index 000000000..8c8e0546e --- /dev/null +++ b/.dotnet/src/Generated/Chat.cs @@ -0,0 +1,183 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.ClientModel.Primitives.Pipeline; +using System.Threading; +using System.Threading.Tasks; +using OpenAI.Models; + +namespace OpenAI +{ + // Data plane generated sub-client. + /// The Chat sub-client. + public partial class Chat + { + private const string AuthorizationHeader = "Authorization"; + private readonly KeyCredential _keyCredential; + private const string AuthorizationApiKeyPrefix = "Bearer"; + private readonly MessagePipeline _pipeline; + private readonly Uri _endpoint; + + /// The ClientDiagnostics is used to provide tracing support for the client library. + internal TelemetrySource ClientDiagnostics { get; } + + /// The HTTP pipeline for sending and receiving REST requests and responses. + public virtual MessagePipeline Pipeline => _pipeline; + + /// Initializes a new instance of Chat for mocking. + protected Chat() + { + } + + /// Initializes a new instance of Chat. + /// The handler for diagnostic messaging in the client. + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// The key credential to copy. + /// OpenAI Endpoint. + internal Chat(TelemetrySource clientDiagnostics, MessagePipeline pipeline, KeyCredential keyCredential, Uri endpoint) + { + ClientDiagnostics = clientDiagnostics; + _pipeline = pipeline; + _keyCredential = keyCredential; + _endpoint = endpoint; + } + + /// Creates a model response for the given chat conversation. + /// The to use. + /// The cancellation token to use. + /// is null. + public virtual async Task> CreateChatCompletionAsync(CreateChatCompletionRequest createChatCompletionRequest, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNull(createChatCompletionRequest, nameof(createChatCompletionRequest)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = createChatCompletionRequest.ToRequestBody(); + Result result = await CreateChatCompletionAsync(content, context).ConfigureAwait(false); + return Result.FromValue(CreateChatCompletionResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Creates a model response for the given chat conversation. + /// The to use. + /// The cancellation token to use. + /// is null. + public virtual Result CreateChatCompletion(CreateChatCompletionRequest createChatCompletionRequest, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNull(createChatCompletionRequest, nameof(createChatCompletionRequest)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = createChatCompletionRequest.ToRequestBody(); + Result result = CreateChatCompletion(content, context); + return Result.FromValue(CreateChatCompletionResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Creates a model response for the given chat conversation. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CreateChatCompletionAsync(RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Chat.CreateChatCompletion"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateChatCompletionRequest(content, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Creates a model response for the given chat conversation. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result CreateChatCompletion(RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Chat.CreateChatCompletion"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateChatCompletionRequest(content, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + internal PipelineMessage CreateCreateChatCompletionRequest(RequestBody content, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("POST"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/chat/completions", false); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + request.SetHeaderValue("Content-Type", "application/json"); + request.Content = content; + return message; + } + + private static RequestOptions DefaultRequestContext = new RequestOptions(); + internal static RequestOptions FromCancellationToken(CancellationToken cancellationToken = default) + { + if (!cancellationToken.CanBeCanceled) + { + return DefaultRequestContext; + } + + return new RequestOptions() { CancellationToken = cancellationToken }; + } + + private static ResponseErrorClassifier _responseErrorClassifier200; + private static ResponseErrorClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= new StatusResponseClassifier(stackalloc ushort[] { 200 }); + } +} diff --git a/.dotnet/src/Generated/Completions.cs b/.dotnet/src/Generated/Completions.cs new file mode 100644 index 000000000..aaa1f585d --- /dev/null +++ b/.dotnet/src/Generated/Completions.cs @@ -0,0 +1,183 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.ClientModel.Primitives.Pipeline; +using System.Threading; +using System.Threading.Tasks; +using OpenAI.Models; + +namespace OpenAI +{ + // Data plane generated sub-client. + /// The Completions sub-client. + public partial class Completions + { + private const string AuthorizationHeader = "Authorization"; + private readonly KeyCredential _keyCredential; + private const string AuthorizationApiKeyPrefix = "Bearer"; + private readonly MessagePipeline _pipeline; + private readonly Uri _endpoint; + + /// The ClientDiagnostics is used to provide tracing support for the client library. + internal TelemetrySource ClientDiagnostics { get; } + + /// The HTTP pipeline for sending and receiving REST requests and responses. + public virtual MessagePipeline Pipeline => _pipeline; + + /// Initializes a new instance of Completions for mocking. + protected Completions() + { + } + + /// Initializes a new instance of Completions. + /// The handler for diagnostic messaging in the client. + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// The key credential to copy. + /// OpenAI Endpoint. + internal Completions(TelemetrySource clientDiagnostics, MessagePipeline pipeline, KeyCredential keyCredential, Uri endpoint) + { + ClientDiagnostics = clientDiagnostics; + _pipeline = pipeline; + _keyCredential = keyCredential; + _endpoint = endpoint; + } + + /// Creates a completion for the provided prompt and parameters. + /// The to use. + /// The cancellation token to use. + /// is null. + public virtual async Task> CreateCompletionAsync(CreateCompletionRequest createCompletionRequest, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNull(createCompletionRequest, nameof(createCompletionRequest)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = createCompletionRequest.ToRequestBody(); + Result result = await CreateCompletionAsync(content, context).ConfigureAwait(false); + return Result.FromValue(CreateCompletionResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Creates a completion for the provided prompt and parameters. + /// The to use. + /// The cancellation token to use. + /// is null. + public virtual Result CreateCompletion(CreateCompletionRequest createCompletionRequest, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNull(createCompletionRequest, nameof(createCompletionRequest)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = createCompletionRequest.ToRequestBody(); + Result result = CreateCompletion(content, context); + return Result.FromValue(CreateCompletionResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Creates a completion for the provided prompt and parameters. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CreateCompletionAsync(RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Completions.CreateCompletion"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateCompletionRequest(content, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Creates a completion for the provided prompt and parameters. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result CreateCompletion(RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Completions.CreateCompletion"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateCompletionRequest(content, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + internal PipelineMessage CreateCreateCompletionRequest(RequestBody content, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("POST"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/completions", false); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + request.SetHeaderValue("Content-Type", "application/json"); + request.Content = content; + return message; + } + + private static RequestOptions DefaultRequestContext = new RequestOptions(); + internal static RequestOptions FromCancellationToken(CancellationToken cancellationToken = default) + { + if (!cancellationToken.CanBeCanceled) + { + return DefaultRequestContext; + } + + return new RequestOptions() { CancellationToken = cancellationToken }; + } + + private static ResponseErrorClassifier _responseErrorClassifier200; + private static ResponseErrorClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= new StatusResponseClassifier(stackalloc ushort[] { 200 }); + } +} diff --git a/.dotnet/src/Generated/Embeddings.cs b/.dotnet/src/Generated/Embeddings.cs new file mode 100644 index 000000000..d42141a8f --- /dev/null +++ b/.dotnet/src/Generated/Embeddings.cs @@ -0,0 +1,183 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.ClientModel.Primitives.Pipeline; +using System.Threading; +using System.Threading.Tasks; +using OpenAI.Models; + +namespace OpenAI +{ + // Data plane generated sub-client. + /// The Embeddings sub-client. + public partial class Embeddings + { + private const string AuthorizationHeader = "Authorization"; + private readonly KeyCredential _keyCredential; + private const string AuthorizationApiKeyPrefix = "Bearer"; + private readonly MessagePipeline _pipeline; + private readonly Uri _endpoint; + + /// The ClientDiagnostics is used to provide tracing support for the client library. + internal TelemetrySource ClientDiagnostics { get; } + + /// The HTTP pipeline for sending and receiving REST requests and responses. + public virtual MessagePipeline Pipeline => _pipeline; + + /// Initializes a new instance of Embeddings for mocking. + protected Embeddings() + { + } + + /// Initializes a new instance of Embeddings. + /// The handler for diagnostic messaging in the client. + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// The key credential to copy. + /// OpenAI Endpoint. + internal Embeddings(TelemetrySource clientDiagnostics, MessagePipeline pipeline, KeyCredential keyCredential, Uri endpoint) + { + ClientDiagnostics = clientDiagnostics; + _pipeline = pipeline; + _keyCredential = keyCredential; + _endpoint = endpoint; + } + + /// Creates an embedding vector representing the input text. + /// The to use. + /// The cancellation token to use. + /// is null. + public virtual async Task> CreateEmbeddingAsync(CreateEmbeddingRequest embedding, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNull(embedding, nameof(embedding)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = embedding.ToRequestBody(); + Result result = await CreateEmbeddingAsync(content, context).ConfigureAwait(false); + return Result.FromValue(CreateEmbeddingResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Creates an embedding vector representing the input text. + /// The to use. + /// The cancellation token to use. + /// is null. + public virtual Result CreateEmbedding(CreateEmbeddingRequest embedding, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNull(embedding, nameof(embedding)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = embedding.ToRequestBody(); + Result result = CreateEmbedding(content, context); + return Result.FromValue(CreateEmbeddingResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Creates an embedding vector representing the input text. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CreateEmbeddingAsync(RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Embeddings.CreateEmbedding"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateEmbeddingRequest(content, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Creates an embedding vector representing the input text. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result CreateEmbedding(RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Embeddings.CreateEmbedding"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateEmbeddingRequest(content, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + internal PipelineMessage CreateCreateEmbeddingRequest(RequestBody content, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("POST"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/embeddings", false); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + request.SetHeaderValue("Content-Type", "application/json"); + request.Content = content; + return message; + } + + private static RequestOptions DefaultRequestContext = new RequestOptions(); + internal static RequestOptions FromCancellationToken(CancellationToken cancellationToken = default) + { + if (!cancellationToken.CanBeCanceled) + { + return DefaultRequestContext; + } + + return new RequestOptions() { CancellationToken = cancellationToken }; + } + + private static ResponseErrorClassifier _responseErrorClassifier200; + private static ResponseErrorClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= new StatusResponseClassifier(stackalloc ushort[] { 200 }); + } +} diff --git a/.dotnet/src/Generated/Files.cs b/.dotnet/src/Generated/Files.cs new file mode 100644 index 000000000..05aa7d834 --- /dev/null +++ b/.dotnet/src/Generated/Files.cs @@ -0,0 +1,683 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.ClientModel.Primitives.Pipeline; +using System.Threading; +using System.Threading.Tasks; +using OpenAI.Models; + +namespace OpenAI +{ + // Data plane generated sub-client. + /// The Files sub-client. + public partial class Files + { + private const string AuthorizationHeader = "Authorization"; + private readonly KeyCredential _keyCredential; + private const string AuthorizationApiKeyPrefix = "Bearer"; + private readonly MessagePipeline _pipeline; + private readonly Uri _endpoint; + + /// The ClientDiagnostics is used to provide tracing support for the client library. + internal TelemetrySource ClientDiagnostics { get; } + + /// The HTTP pipeline for sending and receiving REST requests and responses. + public virtual MessagePipeline Pipeline => _pipeline; + + /// Initializes a new instance of Files for mocking. + protected Files() + { + } + + /// Initializes a new instance of Files. + /// The handler for diagnostic messaging in the client. + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// The key credential to copy. + /// OpenAI Endpoint. + internal Files(TelemetrySource clientDiagnostics, MessagePipeline pipeline, KeyCredential keyCredential, Uri endpoint) + { + ClientDiagnostics = clientDiagnostics; + _pipeline = pipeline; + _keyCredential = keyCredential; + _endpoint = endpoint; + } + + /// + /// Upload a file that can be used across various endpoints. The size of all the files uploaded by + /// one organization can be up to 100 GB. + /// + /// The size of individual files can be a maximum of 512 MB or 2 million tokens for Assistants. See + /// the [Assistants Tools guide](/docs/assistants/tools) to learn more about the types of files + /// supported. The Fine-tuning API only supports `.jsonl` files. + /// + /// Please [contact us](https://help.openai.com/) if you need to increase these storage limits. + /// + /// The to use. + /// The cancellation token to use. + /// is null. + public virtual async Task> CreateFileAsync(CreateFileRequest file, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNull(file, nameof(file)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = file.ToRequestBody(); + Result result = await CreateFileAsync(content, context).ConfigureAwait(false); + return Result.FromValue(OpenAIFile.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// Upload a file that can be used across various endpoints. The size of all the files uploaded by + /// one organization can be up to 100 GB. + /// + /// The size of individual files can be a maximum of 512 MB or 2 million tokens for Assistants. See + /// the [Assistants Tools guide](/docs/assistants/tools) to learn more about the types of files + /// supported. The Fine-tuning API only supports `.jsonl` files. + /// + /// Please [contact us](https://help.openai.com/) if you need to increase these storage limits. + /// + /// The to use. + /// The cancellation token to use. + /// is null. + public virtual Result CreateFile(CreateFileRequest file, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNull(file, nameof(file)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = file.ToRequestBody(); + Result result = CreateFile(content, context); + return Result.FromValue(OpenAIFile.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Upload a file that can be used across various endpoints. The size of all the files uploaded by + /// one organization can be up to 100 GB. + /// + /// The size of individual files can be a maximum of 512 MB or 2 million tokens for Assistants. See + /// the [Assistants Tools guide](/docs/assistants/tools) to learn more about the types of files + /// supported. The Fine-tuning API only supports `.jsonl` files. + /// + /// Please [contact us](https://help.openai.com/) if you need to increase these storage limits. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CreateFileAsync(RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Files.CreateFile"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateFileRequest(content, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Upload a file that can be used across various endpoints. The size of all the files uploaded by + /// one organization can be up to 100 GB. + /// + /// The size of individual files can be a maximum of 512 MB or 2 million tokens for Assistants. See + /// the [Assistants Tools guide](/docs/assistants/tools) to learn more about the types of files + /// supported. The Fine-tuning API only supports `.jsonl` files. + /// + /// Please [contact us](https://help.openai.com/) if you need to increase these storage limits. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result CreateFile(RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Files.CreateFile"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateFileRequest(content, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Returns a list of files that belong to the user's organization. + /// Only return files with the given purpose. + /// The cancellation token to use. + public virtual async Task> GetFilesAsync(string purpose = null, CancellationToken cancellationToken = default) + { + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = await GetFilesAsync(purpose, context).ConfigureAwait(false); + return Result.FromValue(ListFilesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Returns a list of files that belong to the user's organization. + /// Only return files with the given purpose. + /// The cancellation token to use. + public virtual Result GetFiles(string purpose = null, CancellationToken cancellationToken = default) + { + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = GetFiles(purpose, context); + return Result.FromValue(ListFilesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Returns a list of files that belong to the user's organization. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// Only return files with the given purpose. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task GetFilesAsync(string purpose, RequestOptions context) + { + using var scope = ClientDiagnostics.CreateSpan("Files.GetFiles"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetFilesRequest(purpose, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Returns a list of files that belong to the user's organization. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// Only return files with the given purpose. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result GetFiles(string purpose, RequestOptions context) + { + using var scope = ClientDiagnostics.CreateSpan("Files.GetFiles"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetFilesRequest(purpose, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Returns information about a specific file. + /// The ID of the file to use for this request. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> RetrieveFileAsync(string fileId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = await RetrieveFileAsync(fileId, context).ConfigureAwait(false); + return Result.FromValue(OpenAIFile.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Returns information about a specific file. + /// The ID of the file to use for this request. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual Result RetrieveFile(string fileId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = RetrieveFile(fileId, context); + return Result.FromValue(OpenAIFile.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Returns information about a specific file. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the file to use for this request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task RetrieveFileAsync(string fileId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + using var scope = ClientDiagnostics.CreateSpan("Files.RetrieveFile"); + scope.Start(); + try + { + using PipelineMessage message = CreateRetrieveFileRequest(fileId, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Returns information about a specific file. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the file to use for this request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result RetrieveFile(string fileId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + using var scope = ClientDiagnostics.CreateSpan("Files.RetrieveFile"); + scope.Start(); + try + { + using PipelineMessage message = CreateRetrieveFileRequest(fileId, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Delete a file. + /// The ID of the file to use for this request. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> DeleteFileAsync(string fileId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = await DeleteFileAsync(fileId, context).ConfigureAwait(false); + return Result.FromValue(DeleteFileResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Delete a file. + /// The ID of the file to use for this request. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual Result DeleteFile(string fileId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = DeleteFile(fileId, context); + return Result.FromValue(DeleteFileResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Delete a file + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the file to use for this request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task DeleteFileAsync(string fileId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + using var scope = ClientDiagnostics.CreateSpan("Files.DeleteFile"); + scope.Start(); + try + { + using PipelineMessage message = CreateDeleteFileRequest(fileId, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Delete a file + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the file to use for this request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result DeleteFile(string fileId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + using var scope = ClientDiagnostics.CreateSpan("Files.DeleteFile"); + scope.Start(); + try + { + using PipelineMessage message = CreateDeleteFileRequest(fileId, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Returns the contents of the specified file. + /// The ID of the file to use for this request. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> DownloadFileAsync(string fileId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = await DownloadFileAsync(fileId, context).ConfigureAwait(false); + return Result.FromValue(result.GetRawResponse().Content.ToObjectFromJson(), result.GetRawResponse()); + } + + /// Returns the contents of the specified file. + /// The ID of the file to use for this request. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual Result DownloadFile(string fileId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = DownloadFile(fileId, context); + return Result.FromValue(result.GetRawResponse().Content.ToObjectFromJson(), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Returns the contents of the specified file. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the file to use for this request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task DownloadFileAsync(string fileId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + using var scope = ClientDiagnostics.CreateSpan("Files.DownloadFile"); + scope.Start(); + try + { + using PipelineMessage message = CreateDownloadFileRequest(fileId, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Returns the contents of the specified file. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the file to use for this request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result DownloadFile(string fileId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + using var scope = ClientDiagnostics.CreateSpan("Files.DownloadFile"); + scope.Start(); + try + { + using PipelineMessage message = CreateDownloadFileRequest(fileId, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + internal PipelineMessage CreateCreateFileRequest(RequestBody content, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("POST"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/files", false); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + request.SetHeaderValue("content-type", "multipart/form-data"); + request.Content = content; + return message; + } + + internal PipelineMessage CreateGetFilesRequest(string purpose, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("GET"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/files", false); + if (purpose != null) + { + uri.AppendQuery("purpose", purpose, true); + } + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateRetrieveFileRequest(string fileId, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("GET"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/files/", false); + uri.AppendPath(fileId, true); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateDeleteFileRequest(string fileId, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("DELETE"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/files/", false); + uri.AppendPath(fileId, true); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateDownloadFileRequest(string fileId, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("GET"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/files/", false); + uri.AppendPath(fileId, true); + uri.AppendPath("/content", false); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + return message; + } + + private static RequestOptions DefaultRequestContext = new RequestOptions(); + internal static RequestOptions FromCancellationToken(CancellationToken cancellationToken = default) + { + if (!cancellationToken.CanBeCanceled) + { + return DefaultRequestContext; + } + + return new RequestOptions() { CancellationToken = cancellationToken }; + } + + private static ResponseErrorClassifier _responseErrorClassifier200; + private static ResponseErrorClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= new StatusResponseClassifier(stackalloc ushort[] { 200 }); + } +} diff --git a/.dotnet/src/Generated/FineTunes.cs b/.dotnet/src/Generated/FineTunes.cs new file mode 100644 index 000000000..04c8bae74 --- /dev/null +++ b/.dotnet/src/Generated/FineTunes.cs @@ -0,0 +1,736 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.ClientModel.Primitives.Pipeline; +using System.Threading; +using System.Threading.Tasks; +using OpenAI.Models; + +namespace OpenAI +{ + // Data plane generated sub-client. + /// The FineTunes sub-client. + public partial class FineTunes + { + private const string AuthorizationHeader = "Authorization"; + private readonly KeyCredential _keyCredential; + private const string AuthorizationApiKeyPrefix = "Bearer"; + private readonly MessagePipeline _pipeline; + private readonly Uri _endpoint; + + /// The ClientDiagnostics is used to provide tracing support for the client library. + internal TelemetrySource ClientDiagnostics { get; } + + /// The HTTP pipeline for sending and receiving REST requests and responses. + public virtual MessagePipeline Pipeline => _pipeline; + + /// Initializes a new instance of FineTunes for mocking. + protected FineTunes() + { + } + + /// Initializes a new instance of FineTunes. + /// The handler for diagnostic messaging in the client. + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// The key credential to copy. + /// OpenAI Endpoint. + internal FineTunes(TelemetrySource clientDiagnostics, MessagePipeline pipeline, KeyCredential keyCredential, Uri endpoint) + { + ClientDiagnostics = clientDiagnostics; + _pipeline = pipeline; + _keyCredential = keyCredential; + _endpoint = endpoint; + } + + /// + /// Creates a job that fine-tunes a specified model from a given dataset. + /// + /// Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + /// + /// [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + /// + /// The to use. + /// The cancellation token to use. + /// is null. + [Obsolete("deprecated")] + public virtual async Task> CreateFineTuneAsync(CreateFineTuneRequest fineTune, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNull(fineTune, nameof(fineTune)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = fineTune.ToRequestBody(); + Result result = await CreateFineTuneAsync(content, context).ConfigureAwait(false); + return Result.FromValue(FineTune.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// Creates a job that fine-tunes a specified model from a given dataset. + /// + /// Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + /// + /// [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + /// + /// The to use. + /// The cancellation token to use. + /// is null. + [Obsolete("deprecated")] + public virtual Result CreateFineTune(CreateFineTuneRequest fineTune, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNull(fineTune, nameof(fineTune)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = fineTune.ToRequestBody(); + Result result = CreateFineTune(content, context); + return Result.FromValue(FineTune.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Creates a job that fine-tunes a specified model from a given dataset. + /// + /// Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + /// + /// [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + [Obsolete("deprecated")] + public virtual async Task CreateFineTuneAsync(RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("FineTunes.CreateFineTune"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateFineTuneRequest(content, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Creates a job that fine-tunes a specified model from a given dataset. + /// + /// Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + /// + /// [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + [Obsolete("deprecated")] + public virtual Result CreateFineTune(RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("FineTunes.CreateFineTune"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateFineTuneRequest(content, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// List your organization's fine-tuning jobs. + /// The cancellation token to use. + [Obsolete("deprecated")] + public virtual async Task> GetFineTunesAsync(CancellationToken cancellationToken = default) + { + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = await GetFineTunesAsync(context).ConfigureAwait(false); + return Result.FromValue(ListFineTunesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// List your organization's fine-tuning jobs. + /// The cancellation token to use. + [Obsolete("deprecated")] + public virtual Result GetFineTunes(CancellationToken cancellationToken = default) + { + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = GetFineTunes(context); + return Result.FromValue(ListFineTunesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] List your organization's fine-tuning jobs + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. + /// The response returned from the service. + [Obsolete("deprecated")] + public virtual async Task GetFineTunesAsync(RequestOptions context) + { + using var scope = ClientDiagnostics.CreateSpan("FineTunes.GetFineTunes"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetFineTunesRequest(context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] List your organization's fine-tuning jobs + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. + /// The response returned from the service. + [Obsolete("deprecated")] + public virtual Result GetFineTunes(RequestOptions context) + { + using var scope = ClientDiagnostics.CreateSpan("FineTunes.GetFineTunes"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetFineTunesRequest(context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// Gets info about the fine-tune job. + /// + /// [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + /// + /// The ID of the fine-tune job. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + [Obsolete("deprecated")] + public virtual async Task> RetrieveFineTuneAsync(string fineTuneId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(fineTuneId, nameof(fineTuneId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = await RetrieveFineTuneAsync(fineTuneId, context).ConfigureAwait(false); + return Result.FromValue(FineTune.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// Gets info about the fine-tune job. + /// + /// [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + /// + /// The ID of the fine-tune job. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + [Obsolete("deprecated")] + public virtual Result RetrieveFineTune(string fineTuneId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(fineTuneId, nameof(fineTuneId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = RetrieveFineTune(fineTuneId, context); + return Result.FromValue(FineTune.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Gets info about the fine-tune job. + /// + /// [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the fine-tune job. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + [Obsolete("deprecated")] + public virtual async Task RetrieveFineTuneAsync(string fineTuneId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(fineTuneId, nameof(fineTuneId)); + + using var scope = ClientDiagnostics.CreateSpan("FineTunes.RetrieveFineTune"); + scope.Start(); + try + { + using PipelineMessage message = CreateRetrieveFineTuneRequest(fineTuneId, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Gets info about the fine-tune job. + /// + /// [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the fine-tune job. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + [Obsolete("deprecated")] + public virtual Result RetrieveFineTune(string fineTuneId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(fineTuneId, nameof(fineTuneId)); + + using var scope = ClientDiagnostics.CreateSpan("FineTunes.RetrieveFineTune"); + scope.Start(); + try + { + using PipelineMessage message = CreateRetrieveFineTuneRequest(fineTuneId, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Get fine-grained status updates for a fine-tune job. + /// The ID of the fine-tune job to get events for. + /// + /// Whether to stream events for the fine-tune job. If set to true, events will be sent as + /// data-only + /// [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + /// as they become available. The stream will terminate with a `data: [DONE]` message when the + /// job is finished (succeeded, cancelled, or failed). + /// + /// If set to false, only events generated so far will be returned. + /// + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + [Obsolete("deprecated")] + public virtual async Task> GetFineTuneEventsAsync(string fineTuneId, bool? stream = null, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(fineTuneId, nameof(fineTuneId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = await GetFineTuneEventsAsync(fineTuneId, stream, context).ConfigureAwait(false); + return Result.FromValue(ListFineTuneEventsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Get fine-grained status updates for a fine-tune job. + /// The ID of the fine-tune job to get events for. + /// + /// Whether to stream events for the fine-tune job. If set to true, events will be sent as + /// data-only + /// [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + /// as they become available. The stream will terminate with a `data: [DONE]` message when the + /// job is finished (succeeded, cancelled, or failed). + /// + /// If set to false, only events generated so far will be returned. + /// + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + [Obsolete("deprecated")] + public virtual Result GetFineTuneEvents(string fineTuneId, bool? stream = null, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(fineTuneId, nameof(fineTuneId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = GetFineTuneEvents(fineTuneId, stream, context); + return Result.FromValue(ListFineTuneEventsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Get fine-grained status updates for a fine-tune job. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the fine-tune job to get events for. + /// + /// Whether to stream events for the fine-tune job. If set to true, events will be sent as + /// data-only + /// [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + /// as they become available. The stream will terminate with a `data: [DONE]` message when the + /// job is finished (succeeded, cancelled, or failed). + /// + /// If set to false, only events generated so far will be returned. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + [Obsolete("deprecated")] + public virtual async Task GetFineTuneEventsAsync(string fineTuneId, bool? stream, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(fineTuneId, nameof(fineTuneId)); + + using var scope = ClientDiagnostics.CreateSpan("FineTunes.GetFineTuneEvents"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetFineTuneEventsRequest(fineTuneId, stream, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Get fine-grained status updates for a fine-tune job. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the fine-tune job to get events for. + /// + /// Whether to stream events for the fine-tune job. If set to true, events will be sent as + /// data-only + /// [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + /// as they become available. The stream will terminate with a `data: [DONE]` message when the + /// job is finished (succeeded, cancelled, or failed). + /// + /// If set to false, only events generated so far will be returned. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + [Obsolete("deprecated")] + public virtual Result GetFineTuneEvents(string fineTuneId, bool? stream, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(fineTuneId, nameof(fineTuneId)); + + using var scope = ClientDiagnostics.CreateSpan("FineTunes.GetFineTuneEvents"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetFineTuneEventsRequest(fineTuneId, stream, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Immediately cancel a fine-tune job. + /// The ID of the fine-tune job to cancel. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + [Obsolete("deprecated")] + public virtual async Task> CancelFineTuneAsync(string fineTuneId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(fineTuneId, nameof(fineTuneId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = await CancelFineTuneAsync(fineTuneId, context).ConfigureAwait(false); + return Result.FromValue(FineTune.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Immediately cancel a fine-tune job. + /// The ID of the fine-tune job to cancel. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + [Obsolete("deprecated")] + public virtual Result CancelFineTune(string fineTuneId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(fineTuneId, nameof(fineTuneId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = CancelFineTune(fineTuneId, context); + return Result.FromValue(FineTune.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Immediately cancel a fine-tune job. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the fine-tune job to cancel. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + [Obsolete("deprecated")] + public virtual async Task CancelFineTuneAsync(string fineTuneId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(fineTuneId, nameof(fineTuneId)); + + using var scope = ClientDiagnostics.CreateSpan("FineTunes.CancelFineTune"); + scope.Start(); + try + { + using PipelineMessage message = CreateCancelFineTuneRequest(fineTuneId, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Immediately cancel a fine-tune job. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the fine-tune job to cancel. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + [Obsolete("deprecated")] + public virtual Result CancelFineTune(string fineTuneId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(fineTuneId, nameof(fineTuneId)); + + using var scope = ClientDiagnostics.CreateSpan("FineTunes.CancelFineTune"); + scope.Start(); + try + { + using PipelineMessage message = CreateCancelFineTuneRequest(fineTuneId, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + internal PipelineMessage CreateCreateFineTuneRequest(RequestBody content, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("POST"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/fine-tunes", false); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + request.SetHeaderValue("Content-Type", "application/json"); + request.Content = content; + return message; + } + + internal PipelineMessage CreateGetFineTunesRequest(RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("GET"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/fine-tunes", false); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateRetrieveFineTuneRequest(string fineTuneId, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("GET"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/fine-tunes/", false); + uri.AppendPath(fineTuneId, true); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateGetFineTuneEventsRequest(string fineTuneId, bool? stream, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("GET"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/fine-tunes/", false); + uri.AppendPath(fineTuneId, true); + uri.AppendPath("/events", false); + if (stream != null) + { + uri.AppendQuery("stream", stream.Value, true); + } + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateCancelFineTuneRequest(string fineTuneId, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("POST"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/fine-tunes/", false); + uri.AppendPath(fineTuneId, true); + uri.AppendPath("/cancel", false); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + return message; + } + + private static RequestOptions DefaultRequestContext = new RequestOptions(); + internal static RequestOptions FromCancellationToken(CancellationToken cancellationToken = default) + { + if (!cancellationToken.CanBeCanceled) + { + return DefaultRequestContext; + } + + return new RequestOptions() { CancellationToken = cancellationToken }; + } + + private static ResponseErrorClassifier _responseErrorClassifier200; + private static ResponseErrorClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= new StatusResponseClassifier(stackalloc ushort[] { 200 }); + } +} diff --git a/.dotnet/src/Generated/FineTuning.cs b/.dotnet/src/Generated/FineTuning.cs new file mode 100644 index 000000000..40f208a6e --- /dev/null +++ b/.dotnet/src/Generated/FineTuning.cs @@ -0,0 +1,55 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.ClientModel.Primitives.Pipeline; +using System.Threading; + +namespace OpenAI +{ + // Data plane generated sub-client. + /// The FineTuning sub-client. + public partial class FineTuning + { + private const string AuthorizationHeader = "Authorization"; + private readonly KeyCredential _keyCredential; + private const string AuthorizationApiKeyPrefix = "Bearer"; + private readonly MessagePipeline _pipeline; + private readonly Uri _endpoint; + + /// The ClientDiagnostics is used to provide tracing support for the client library. + internal TelemetrySource ClientDiagnostics { get; } + + /// The HTTP pipeline for sending and receiving REST requests and responses. + public virtual MessagePipeline Pipeline => _pipeline; + + /// Initializes a new instance of FineTuning for mocking. + protected FineTuning() + { + } + + /// Initializes a new instance of FineTuning. + /// The handler for diagnostic messaging in the client. + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// The key credential to copy. + /// OpenAI Endpoint. + internal FineTuning(TelemetrySource clientDiagnostics, MessagePipeline pipeline, KeyCredential keyCredential, Uri endpoint) + { + ClientDiagnostics = clientDiagnostics; + _pipeline = pipeline; + _keyCredential = keyCredential; + _endpoint = endpoint; + } + + private FineTuningJobs _cachedFineTuningJobs; + + /// Initializes a new instance of FineTuningJobs. + public virtual FineTuningJobs GetFineTuningJobsClient() + { + return Volatile.Read(ref _cachedFineTuningJobs) ?? Interlocked.CompareExchange(ref _cachedFineTuningJobs, new FineTuningJobs(ClientDiagnostics, _pipeline, _keyCredential, _endpoint), null) ?? _cachedFineTuningJobs; + } + } +} diff --git a/.dotnet/src/Generated/FineTuningJobs.cs b/.dotnet/src/Generated/FineTuningJobs.cs new file mode 100644 index 000000000..669480a3d --- /dev/null +++ b/.dotnet/src/Generated/FineTuningJobs.cs @@ -0,0 +1,710 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.ClientModel.Primitives.Pipeline; +using System.Threading; +using System.Threading.Tasks; +using OpenAI.Models; + +namespace OpenAI +{ + // Data plane generated sub-client. + /// The FineTuningJobs sub-client. + public partial class FineTuningJobs + { + private const string AuthorizationHeader = "Authorization"; + private readonly KeyCredential _keyCredential; + private const string AuthorizationApiKeyPrefix = "Bearer"; + private readonly MessagePipeline _pipeline; + private readonly Uri _endpoint; + + /// The ClientDiagnostics is used to provide tracing support for the client library. + internal TelemetrySource ClientDiagnostics { get; } + + /// The HTTP pipeline for sending and receiving REST requests and responses. + public virtual MessagePipeline Pipeline => _pipeline; + + /// Initializes a new instance of FineTuningJobs for mocking. + protected FineTuningJobs() + { + } + + /// Initializes a new instance of FineTuningJobs. + /// The handler for diagnostic messaging in the client. + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// The key credential to copy. + /// OpenAI Endpoint. + internal FineTuningJobs(TelemetrySource clientDiagnostics, MessagePipeline pipeline, KeyCredential keyCredential, Uri endpoint) + { + ClientDiagnostics = clientDiagnostics; + _pipeline = pipeline; + _keyCredential = keyCredential; + _endpoint = endpoint; + } + + /// + /// Creates a job that fine-tunes a specified model from a given dataset. + /// + /// Response includes details of the enqueued job including job status and the name of the + /// fine-tuned models once complete. + /// + /// [Learn more about fine-tuning](/docs/guides/fine-tuning) + /// + /// The to use. + /// The cancellation token to use. + /// is null. + public virtual async Task> CreateFineTuningJobAsync(CreateFineTuningJobRequest job, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNull(job, nameof(job)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = job.ToRequestBody(); + Result result = await CreateFineTuningJobAsync(content, context).ConfigureAwait(false); + return Result.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// Creates a job that fine-tunes a specified model from a given dataset. + /// + /// Response includes details of the enqueued job including job status and the name of the + /// fine-tuned models once complete. + /// + /// [Learn more about fine-tuning](/docs/guides/fine-tuning) + /// + /// The to use. + /// The cancellation token to use. + /// is null. + public virtual Result CreateFineTuningJob(CreateFineTuningJobRequest job, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNull(job, nameof(job)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = job.ToRequestBody(); + Result result = CreateFineTuningJob(content, context); + return Result.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Creates a job that fine-tunes a specified model from a given dataset. + /// + /// Response includes details of the enqueued job including job status and the name of the + /// fine-tuned models once complete. + /// + /// [Learn more about fine-tuning](/docs/guides/fine-tuning) + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CreateFineTuningJobAsync(RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("FineTuningJobs.CreateFineTuningJob"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateFineTuningJobRequest(content, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Creates a job that fine-tunes a specified model from a given dataset. + /// + /// Response includes details of the enqueued job including job status and the name of the + /// fine-tuned models once complete. + /// + /// [Learn more about fine-tuning](/docs/guides/fine-tuning) + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result CreateFineTuningJob(RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("FineTuningJobs.CreateFineTuningJob"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateFineTuningJobRequest(content, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Identifier for the last job from the previous pagination request. + /// Number of fine-tuning jobs to retrieve. + /// The cancellation token to use. + public virtual async Task> GetPaginatedFineTuningJobsAsync(string after = null, long? limit = null, CancellationToken cancellationToken = default) + { + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = await GetPaginatedFineTuningJobsAsync(after, limit, context).ConfigureAwait(false); + return Result.FromValue(ListPaginatedFineTuningJobsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Identifier for the last job from the previous pagination request. + /// Number of fine-tuning jobs to retrieve. + /// The cancellation token to use. + public virtual Result GetPaginatedFineTuningJobs(string after = null, long? limit = null, CancellationToken cancellationToken = default) + { + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = GetPaginatedFineTuningJobs(after, limit, context); + return Result.FromValue(ListPaginatedFineTuningJobsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// Identifier for the last job from the previous pagination request. + /// Number of fine-tuning jobs to retrieve. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task GetPaginatedFineTuningJobsAsync(string after, long? limit, RequestOptions context) + { + using var scope = ClientDiagnostics.CreateSpan("FineTuningJobs.GetPaginatedFineTuningJobs"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetPaginatedFineTuningJobsRequest(after, limit, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// Identifier for the last job from the previous pagination request. + /// Number of fine-tuning jobs to retrieve. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result GetPaginatedFineTuningJobs(string after, long? limit, RequestOptions context) + { + using var scope = ClientDiagnostics.CreateSpan("FineTuningJobs.GetPaginatedFineTuningJobs"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetPaginatedFineTuningJobsRequest(after, limit, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// Get info about a fine-tuning job. + /// + /// [Learn more about fine-tuning](/docs/guides/fine-tuning) + /// + /// The to use. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> RetrieveFineTuningJobAsync(string fineTuningJobId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = await RetrieveFineTuningJobAsync(fineTuningJobId, context).ConfigureAwait(false); + return Result.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// Get info about a fine-tuning job. + /// + /// [Learn more about fine-tuning](/docs/guides/fine-tuning) + /// + /// The to use. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual Result RetrieveFineTuningJob(string fineTuningJobId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = RetrieveFineTuningJob(fineTuningJobId, context); + return Result.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Get info about a fine-tuning job. + /// + /// [Learn more about fine-tuning](/docs/guides/fine-tuning) + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The to use. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task RetrieveFineTuningJobAsync(string fineTuningJobId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + + using var scope = ClientDiagnostics.CreateSpan("FineTuningJobs.RetrieveFineTuningJob"); + scope.Start(); + try + { + using PipelineMessage message = CreateRetrieveFineTuningJobRequest(fineTuningJobId, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Get info about a fine-tuning job. + /// + /// [Learn more about fine-tuning](/docs/guides/fine-tuning) + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The to use. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result RetrieveFineTuningJob(string fineTuningJobId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + + using var scope = ClientDiagnostics.CreateSpan("FineTuningJobs.RetrieveFineTuningJob"); + scope.Start(); + try + { + using PipelineMessage message = CreateRetrieveFineTuningJobRequest(fineTuningJobId, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Get status updates for a fine-tuning job. + /// The ID of the fine-tuning job to get events for. + /// Identifier for the last event from the previous pagination request. + /// Number of events to retrieve. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> GetFineTuningEventsAsync(string fineTuningJobId, string after = null, int? limit = null, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = await GetFineTuningEventsAsync(fineTuningJobId, after, limit, context).ConfigureAwait(false); + return Result.FromValue(ListFineTuningJobEventsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Get status updates for a fine-tuning job. + /// The ID of the fine-tuning job to get events for. + /// Identifier for the last event from the previous pagination request. + /// Number of events to retrieve. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual Result GetFineTuningEvents(string fineTuningJobId, string after = null, int? limit = null, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = GetFineTuningEvents(fineTuningJobId, after, limit, context); + return Result.FromValue(ListFineTuningJobEventsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Get status updates for a fine-tuning job. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the fine-tuning job to get events for. + /// Identifier for the last event from the previous pagination request. + /// Number of events to retrieve. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task GetFineTuningEventsAsync(string fineTuningJobId, string after, int? limit, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + + using var scope = ClientDiagnostics.CreateSpan("FineTuningJobs.GetFineTuningEvents"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetFineTuningEventsRequest(fineTuningJobId, after, limit, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Get status updates for a fine-tuning job. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the fine-tuning job to get events for. + /// Identifier for the last event from the previous pagination request. + /// Number of events to retrieve. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result GetFineTuningEvents(string fineTuningJobId, string after, int? limit, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + + using var scope = ClientDiagnostics.CreateSpan("FineTuningJobs.GetFineTuningEvents"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetFineTuningEventsRequest(fineTuningJobId, after, limit, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Immediately cancel a fine-tune job. + /// The ID of the fine-tuning job to cancel. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> CancelFineTuningJobAsync(string fineTuningJobId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = await CancelFineTuningJobAsync(fineTuningJobId, context).ConfigureAwait(false); + return Result.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Immediately cancel a fine-tune job. + /// The ID of the fine-tuning job to cancel. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual Result CancelFineTuningJob(string fineTuningJobId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = CancelFineTuningJob(fineTuningJobId, context); + return Result.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Immediately cancel a fine-tune job. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the fine-tuning job to cancel. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CancelFineTuningJobAsync(string fineTuningJobId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + + using var scope = ClientDiagnostics.CreateSpan("FineTuningJobs.CancelFineTuningJob"); + scope.Start(); + try + { + using PipelineMessage message = CreateCancelFineTuningJobRequest(fineTuningJobId, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Immediately cancel a fine-tune job. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the fine-tuning job to cancel. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result CancelFineTuningJob(string fineTuningJobId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + + using var scope = ClientDiagnostics.CreateSpan("FineTuningJobs.CancelFineTuningJob"); + scope.Start(); + try + { + using PipelineMessage message = CreateCancelFineTuningJobRequest(fineTuningJobId, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + internal PipelineMessage CreateCreateFineTuningJobRequest(RequestBody content, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("POST"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/fine_tuning/jobs", false); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + request.SetHeaderValue("Content-Type", "application/json"); + request.Content = content; + return message; + } + + internal PipelineMessage CreateGetPaginatedFineTuningJobsRequest(string after, long? limit, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("GET"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/fine_tuning/jobs", false); + if (after != null) + { + uri.AppendQuery("after", after, true); + } + if (limit != null) + { + uri.AppendQuery("limit", limit.Value, true); + } + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateRetrieveFineTuningJobRequest(string fineTuningJobId, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("GET"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/fine_tuning/jobs/", false); + uri.AppendPath(fineTuningJobId, true); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateGetFineTuningEventsRequest(string fineTuningJobId, string after, int? limit, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("GET"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/fine_tuning/jobs/", false); + uri.AppendPath(fineTuningJobId, true); + uri.AppendPath("/events", false); + if (after != null) + { + uri.AppendQuery("after", after, true); + } + if (limit != null) + { + uri.AppendQuery("limit", limit.Value, true); + } + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateCancelFineTuningJobRequest(string fineTuningJobId, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("POST"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/fine_tuning/jobs/", false); + uri.AppendPath(fineTuningJobId, true); + uri.AppendPath("/cancel", false); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + return message; + } + + private static RequestOptions DefaultRequestContext = new RequestOptions(); + internal static RequestOptions FromCancellationToken(CancellationToken cancellationToken = default) + { + if (!cancellationToken.CanBeCanceled) + { + return DefaultRequestContext; + } + + return new RequestOptions() { CancellationToken = cancellationToken }; + } + + private static ResponseErrorClassifier _responseErrorClassifier200; + private static ResponseErrorClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= new StatusResponseClassifier(stackalloc ushort[] { 200 }); + } +} diff --git a/.dotnet/src/Generated/Images.cs b/.dotnet/src/Generated/Images.cs new file mode 100644 index 000000000..a3de80e0b --- /dev/null +++ b/.dotnet/src/Generated/Images.cs @@ -0,0 +1,421 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.ClientModel.Primitives.Pipeline; +using System.Threading; +using System.Threading.Tasks; +using OpenAI.Models; + +namespace OpenAI +{ + // Data plane generated sub-client. + /// The Images sub-client. + public partial class Images + { + private const string AuthorizationHeader = "Authorization"; + private readonly KeyCredential _keyCredential; + private const string AuthorizationApiKeyPrefix = "Bearer"; + private readonly MessagePipeline _pipeline; + private readonly Uri _endpoint; + + /// The ClientDiagnostics is used to provide tracing support for the client library. + internal TelemetrySource ClientDiagnostics { get; } + + /// The HTTP pipeline for sending and receiving REST requests and responses. + public virtual MessagePipeline Pipeline => _pipeline; + + /// Initializes a new instance of Images for mocking. + protected Images() + { + } + + /// Initializes a new instance of Images. + /// The handler for diagnostic messaging in the client. + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// The key credential to copy. + /// OpenAI Endpoint. + internal Images(TelemetrySource clientDiagnostics, MessagePipeline pipeline, KeyCredential keyCredential, Uri endpoint) + { + ClientDiagnostics = clientDiagnostics; + _pipeline = pipeline; + _keyCredential = keyCredential; + _endpoint = endpoint; + } + + /// Creates an image given a prompt. + /// The to use. + /// The cancellation token to use. + /// is null. + public virtual async Task> CreateImageAsync(CreateImageRequest image, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNull(image, nameof(image)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = image.ToRequestBody(); + Result result = await CreateImageAsync(content, context).ConfigureAwait(false); + return Result.FromValue(ImagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Creates an image given a prompt. + /// The to use. + /// The cancellation token to use. + /// is null. + public virtual Result CreateImage(CreateImageRequest image, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNull(image, nameof(image)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = image.ToRequestBody(); + Result result = CreateImage(content, context); + return Result.FromValue(ImagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Creates an image given a prompt + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CreateImageAsync(RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Images.CreateImage"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateImageRequest(content, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Creates an image given a prompt + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result CreateImage(RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Images.CreateImage"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateImageRequest(content, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Creates an edited or extended image given an original image and a prompt. + /// The to use. + /// The cancellation token to use. + /// is null. + public virtual async Task> CreateImageEditAsync(CreateImageEditRequest image, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNull(image, nameof(image)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = image.ToRequestBody(); + Result result = await CreateImageEditAsync(content, context).ConfigureAwait(false); + return Result.FromValue(ImagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Creates an edited or extended image given an original image and a prompt. + /// The to use. + /// The cancellation token to use. + /// is null. + public virtual Result CreateImageEdit(CreateImageEditRequest image, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNull(image, nameof(image)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = image.ToRequestBody(); + Result result = CreateImageEdit(content, context); + return Result.FromValue(ImagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Creates an edited or extended image given an original image and a prompt. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CreateImageEditAsync(RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Images.CreateImageEdit"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateImageEditRequest(content, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Creates an edited or extended image given an original image and a prompt. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result CreateImageEdit(RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Images.CreateImageEdit"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateImageEditRequest(content, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Creates an edited or extended image given an original image and a prompt. + /// The to use. + /// The cancellation token to use. + /// is null. + public virtual async Task> CreateImageVariationAsync(CreateImageVariationRequest image, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNull(image, nameof(image)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = image.ToRequestBody(); + Result result = await CreateImageVariationAsync(content, context).ConfigureAwait(false); + return Result.FromValue(ImagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Creates an edited or extended image given an original image and a prompt. + /// The to use. + /// The cancellation token to use. + /// is null. + public virtual Result CreateImageVariation(CreateImageVariationRequest image, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNull(image, nameof(image)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = image.ToRequestBody(); + Result result = CreateImageVariation(content, context); + return Result.FromValue(ImagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Creates an edited or extended image given an original image and a prompt. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CreateImageVariationAsync(RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Images.CreateImageVariation"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateImageVariationRequest(content, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Creates an edited or extended image given an original image and a prompt. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result CreateImageVariation(RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Images.CreateImageVariation"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateImageVariationRequest(content, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + internal PipelineMessage CreateCreateImageRequest(RequestBody content, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("POST"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/images/generations", false); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + request.SetHeaderValue("Content-Type", "application/json"); + request.Content = content; + return message; + } + + internal PipelineMessage CreateCreateImageEditRequest(RequestBody content, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("POST"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/images/edits", false); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + request.SetHeaderValue("content-type", "multipart/form-data"); + request.Content = content; + return message; + } + + internal PipelineMessage CreateCreateImageVariationRequest(RequestBody content, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("POST"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/images/variations", false); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + request.SetHeaderValue("content-type", "multipart/form-data"); + request.Content = content; + return message; + } + + private static RequestOptions DefaultRequestContext = new RequestOptions(); + internal static RequestOptions FromCancellationToken(CancellationToken cancellationToken = default) + { + if (!cancellationToken.CanBeCanceled) + { + return DefaultRequestContext; + } + + return new RequestOptions() { CancellationToken = cancellationToken }; + } + + private static ResponseErrorClassifier _responseErrorClassifier200; + private static ResponseErrorClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= new StatusResponseClassifier(stackalloc ushort[] { 200 }); + } +} diff --git a/.dotnet/src/Generated/Messages.cs b/.dotnet/src/Generated/Messages.cs new file mode 100644 index 000000000..1ca608801 --- /dev/null +++ b/.dotnet/src/Generated/Messages.cs @@ -0,0 +1,1037 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.ClientModel.Primitives.Pipeline; +using System.Threading; +using System.Threading.Tasks; +using OpenAI.Models; + +namespace OpenAI +{ + // Data plane generated sub-client. + /// The Messages sub-client. + public partial class Messages + { + private const string AuthorizationHeader = "Authorization"; + private readonly KeyCredential _keyCredential; + private const string AuthorizationApiKeyPrefix = "Bearer"; + private readonly MessagePipeline _pipeline; + private readonly Uri _endpoint; + + /// The ClientDiagnostics is used to provide tracing support for the client library. + internal TelemetrySource ClientDiagnostics { get; } + + /// The HTTP pipeline for sending and receiving REST requests and responses. + public virtual MessagePipeline Pipeline => _pipeline; + + /// Initializes a new instance of Messages for mocking. + protected Messages() + { + } + + /// Initializes a new instance of Messages. + /// The handler for diagnostic messaging in the client. + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// The key credential to copy. + /// OpenAI Endpoint. + internal Messages(TelemetrySource clientDiagnostics, MessagePipeline pipeline, KeyCredential keyCredential, Uri endpoint) + { + ClientDiagnostics = clientDiagnostics; + _pipeline = pipeline; + _keyCredential = keyCredential; + _endpoint = endpoint; + } + + /// Create a message. + /// The ID of the [thread](/docs/api-reference/threads) to create a message for. + /// The to use. + /// The cancellation token to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> CreateMessageAsync(string threadId, CreateMessageRequest message, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNull(message, nameof(message)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = message.ToRequestBody(); + Result result = await CreateMessageAsync(threadId, content, context).ConfigureAwait(false); + return Result.FromValue(MessageObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Create a message. + /// The ID of the [thread](/docs/api-reference/threads) to create a message for. + /// The to use. + /// The cancellation token to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + public virtual Result CreateMessage(string threadId, CreateMessageRequest message, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNull(message, nameof(message)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = message.ToRequestBody(); + Result result = CreateMessage(threadId, content, context); + return Result.FromValue(MessageObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Create a message. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the [thread](/docs/api-reference/threads) to create a message for. + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CreateMessageAsync(string threadId, RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Messages.CreateMessage"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateMessageRequest(threadId, content, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Create a message. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the [thread](/docs/api-reference/threads) to create a message for. + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result CreateMessage(string threadId, RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Messages.CreateMessage"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateMessageRequest(threadId, content, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Returns a list of messages for a given thread. + /// The ID of the [thread](/docs/api-reference/threads) the messages belong to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> GetMessagesAsync(string threadId, int? limit = null, ListOrder? order = null, string after = null, string before = null, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = await GetMessagesAsync(threadId, limit, order?.ToString(), after, before, context).ConfigureAwait(false); + return Result.FromValue(ListMessagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Returns a list of messages for a given thread. + /// The ID of the [thread](/docs/api-reference/threads) the messages belong to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual Result GetMessages(string threadId, int? limit = null, ListOrder? order = null, string after = null, string before = null, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = GetMessages(threadId, limit, order?.ToString(), after, before, context); + return Result.FromValue(ListMessagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Returns a list of messages for a given thread. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the [thread](/docs/api-reference/threads) the messages belong to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. Allowed values: "asc" | "desc" + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task GetMessagesAsync(string threadId, int? limit, string order, string after, string before, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + + using var scope = ClientDiagnostics.CreateSpan("Messages.GetMessages"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetMessagesRequest(threadId, limit, order, after, before, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Returns a list of messages for a given thread. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the [thread](/docs/api-reference/threads) the messages belong to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. Allowed values: "asc" | "desc" + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result GetMessages(string threadId, int? limit, string order, string after, string before, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + + using var scope = ClientDiagnostics.CreateSpan("Messages.GetMessages"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetMessagesRequest(threadId, limit, order, after, before, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Retrieve a message. + /// The ID of the [thread](/docs/api-reference/threads) to which this message belongs. + /// The ID of the message to retrieve. + /// The cancellation token to use. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual async Task> GetMessageAsync(string threadId, string messageId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(messageId, nameof(messageId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = await GetMessageAsync(threadId, messageId, context).ConfigureAwait(false); + return Result.FromValue(MessageObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Retrieve a message. + /// The ID of the [thread](/docs/api-reference/threads) to which this message belongs. + /// The ID of the message to retrieve. + /// The cancellation token to use. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual Result GetMessage(string threadId, string messageId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(messageId, nameof(messageId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = GetMessage(threadId, messageId, context); + return Result.FromValue(MessageObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Retrieve a message. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the [thread](/docs/api-reference/threads) to which this message belongs. + /// The ID of the message to retrieve. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task GetMessageAsync(string threadId, string messageId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(messageId, nameof(messageId)); + + using var scope = ClientDiagnostics.CreateSpan("Messages.GetMessage"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetMessageRequest(threadId, messageId, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Retrieve a message. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the [thread](/docs/api-reference/threads) to which this message belongs. + /// The ID of the message to retrieve. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result GetMessage(string threadId, string messageId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(messageId, nameof(messageId)); + + using var scope = ClientDiagnostics.CreateSpan("Messages.GetMessage"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetMessageRequest(threadId, messageId, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Modifies a message. + /// The ID of the thread to which this message belongs. + /// The ID of the message to modify. + /// The to use. + /// The cancellation token to use. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual async Task> ModifyMessageAsync(string threadId, string messageId, ModifyMessageRequest message, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(messageId, nameof(messageId)); + ClientUtilities.AssertNotNull(message, nameof(message)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = message.ToRequestBody(); + Result result = await ModifyMessageAsync(threadId, messageId, content, context).ConfigureAwait(false); + return Result.FromValue(MessageObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Modifies a message. + /// The ID of the thread to which this message belongs. + /// The ID of the message to modify. + /// The to use. + /// The cancellation token to use. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual Result ModifyMessage(string threadId, string messageId, ModifyMessageRequest message, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(messageId, nameof(messageId)); + ClientUtilities.AssertNotNull(message, nameof(message)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = message.ToRequestBody(); + Result result = ModifyMessage(threadId, messageId, content, context); + return Result.FromValue(MessageObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Modifies a message. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread to which this message belongs. + /// The ID of the message to modify. + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task ModifyMessageAsync(string threadId, string messageId, RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(messageId, nameof(messageId)); + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Messages.ModifyMessage"); + scope.Start(); + try + { + using PipelineMessage message = CreateModifyMessageRequest(threadId, messageId, content, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Modifies a message. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread to which this message belongs. + /// The ID of the message to modify. + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result ModifyMessage(string threadId, string messageId, RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(messageId, nameof(messageId)); + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Messages.ModifyMessage"); + scope.Start(); + try + { + using PipelineMessage message = CreateModifyMessageRequest(threadId, messageId, content, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Returns a list of message files. + /// The ID of the thread that the message and files belong to. + /// The ID of the message that the files belongs to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// The cancellation token to use. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual async Task> GetMessageFilesAsync(string threadId, string messageId, int? limit = null, ListOrder? order = null, string after = null, string before = null, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(messageId, nameof(messageId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = await GetMessageFilesAsync(threadId, messageId, limit, order?.ToString(), after, before, context).ConfigureAwait(false); + return Result.FromValue(ListMessageFilesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Returns a list of message files. + /// The ID of the thread that the message and files belong to. + /// The ID of the message that the files belongs to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// The cancellation token to use. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual Result GetMessageFiles(string threadId, string messageId, int? limit = null, ListOrder? order = null, string after = null, string before = null, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(messageId, nameof(messageId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = GetMessageFiles(threadId, messageId, limit, order?.ToString(), after, before, context); + return Result.FromValue(ListMessageFilesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Returns a list of message files. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread that the message and files belong to. + /// The ID of the message that the files belongs to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. Allowed values: "asc" | "desc" + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task GetMessageFilesAsync(string threadId, string messageId, int? limit, string order, string after, string before, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(messageId, nameof(messageId)); + + using var scope = ClientDiagnostics.CreateSpan("Messages.GetMessageFiles"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetMessageFilesRequest(threadId, messageId, limit, order, after, before, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Returns a list of message files. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread that the message and files belong to. + /// The ID of the message that the files belongs to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. Allowed values: "asc" | "desc" + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result GetMessageFiles(string threadId, string messageId, int? limit, string order, string after, string before, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(messageId, nameof(messageId)); + + using var scope = ClientDiagnostics.CreateSpan("Messages.GetMessageFiles"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetMessageFilesRequest(threadId, messageId, limit, order, after, before, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Retrieves a message file. + /// The ID of the thread to which the message and File belong. + /// The ID of the message the file belongs to. + /// The ID of the file being retrieved. + /// The cancellation token to use. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + public virtual async Task> GetMessageFileAsync(string threadId, string messageId, string fileId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(messageId, nameof(messageId)); + ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = await GetMessageFileAsync(threadId, messageId, fileId, context).ConfigureAwait(false); + return Result.FromValue(MessageFileObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Retrieves a message file. + /// The ID of the thread to which the message and File belong. + /// The ID of the message the file belongs to. + /// The ID of the file being retrieved. + /// The cancellation token to use. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + public virtual Result GetMessageFile(string threadId, string messageId, string fileId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(messageId, nameof(messageId)); + ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = GetMessageFile(threadId, messageId, fileId, context); + return Result.FromValue(MessageFileObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Retrieves a message file. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread to which the message and File belong. + /// The ID of the message the file belongs to. + /// The ID of the file being retrieved. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task GetMessageFileAsync(string threadId, string messageId, string fileId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(messageId, nameof(messageId)); + ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + using var scope = ClientDiagnostics.CreateSpan("Messages.GetMessageFile"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetMessageFileRequest(threadId, messageId, fileId, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Retrieves a message file. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread to which the message and File belong. + /// The ID of the message the file belongs to. + /// The ID of the file being retrieved. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result GetMessageFile(string threadId, string messageId, string fileId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(messageId, nameof(messageId)); + ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + using var scope = ClientDiagnostics.CreateSpan("Messages.GetMessageFile"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetMessageFileRequest(threadId, messageId, fileId, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + internal PipelineMessage CreateCreateMessageRequest(string threadId, RequestBody content, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("POST"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/threads/", false); + uri.AppendPath(threadId, true); + uri.AppendPath("/messages", false); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + request.SetHeaderValue("Content-Type", "application/json"); + request.Content = content; + return message; + } + + internal PipelineMessage CreateGetMessagesRequest(string threadId, int? limit, string order, string after, string before, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("GET"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/threads/", false); + uri.AppendPath(threadId, true); + uri.AppendPath("/messages", false); + if (limit != null) + { + uri.AppendQuery("limit", limit.Value, true); + } + if (order != null) + { + uri.AppendQuery("order", order, true); + } + if (after != null) + { + uri.AppendQuery("after", after, true); + } + if (before != null) + { + uri.AppendQuery("before", before, true); + } + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateGetMessageRequest(string threadId, string messageId, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("GET"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/threads/", false); + uri.AppendPath(threadId, true); + uri.AppendPath("/messages/", false); + uri.AppendPath(messageId, true); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateModifyMessageRequest(string threadId, string messageId, RequestBody content, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("POST"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/threads/", false); + uri.AppendPath(threadId, true); + uri.AppendPath("/messages/", false); + uri.AppendPath(messageId, true); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + request.SetHeaderValue("Content-Type", "application/json"); + request.Content = content; + return message; + } + + internal PipelineMessage CreateGetMessageFilesRequest(string threadId, string messageId, int? limit, string order, string after, string before, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("GET"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/threads/", false); + uri.AppendPath(threadId, true); + uri.AppendPath("/messages/", false); + uri.AppendPath(messageId, true); + uri.AppendPath("/files", false); + if (limit != null) + { + uri.AppendQuery("limit", limit.Value, true); + } + if (order != null) + { + uri.AppendQuery("order", order, true); + } + if (after != null) + { + uri.AppendQuery("after", after, true); + } + if (before != null) + { + uri.AppendQuery("before", before, true); + } + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateGetMessageFileRequest(string threadId, string messageId, string fileId, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("GET"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/threads/", false); + uri.AppendPath(threadId, true); + uri.AppendPath("/messages/", false); + uri.AppendPath(messageId, true); + uri.AppendPath("/files/", false); + uri.AppendPath(fileId, true); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + return message; + } + + private static RequestOptions DefaultRequestContext = new RequestOptions(); + internal static RequestOptions FromCancellationToken(CancellationToken cancellationToken = default) + { + if (!cancellationToken.CanBeCanceled) + { + return DefaultRequestContext; + } + + return new RequestOptions() { CancellationToken = cancellationToken }; + } + + private static ResponseErrorClassifier _responseErrorClassifier200; + private static ResponseErrorClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= new StatusResponseClassifier(stackalloc ushort[] { 200 }); + } +} diff --git a/.dotnet/src/Generated/Models/AssistantFileObject.Serialization.cs b/.dotnet/src/Generated/Models/AssistantFileObject.Serialization.cs new file mode 100644 index 000000000..74ad1f256 --- /dev/null +++ b/.dotnet/src/Generated/Models/AssistantFileObject.Serialization.cs @@ -0,0 +1,156 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class AssistantFileObject : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(AssistantFileObject)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("created_at"u8); + writer.WriteNumberValue(CreatedAt, "U"); + writer.WritePropertyName("assistant_id"u8); + writer.WriteStringValue(AssistantId); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + AssistantFileObject IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(AssistantFileObject)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeAssistantFileObject(document.RootElement, options); + } + + internal static AssistantFileObject DeserializeAssistantFileObject(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + AssistantFileObjectObject @object = default; + DateTimeOffset createdAt = default; + string assistantId = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new AssistantFileObjectObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("created_at"u8)) + { + createdAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("assistant_id"u8)) + { + assistantId = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new AssistantFileObject(id, @object, createdAt, assistantId, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(AssistantFileObject)} does not support '{options.Format}' format."); + } + } + + AssistantFileObject IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeAssistantFileObject(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(AssistantFileObject)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static AssistantFileObject FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeAssistantFileObject(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/AssistantFileObject.cs b/.dotnet/src/Generated/Models/AssistantFileObject.cs new file mode 100644 index 000000000..21aef1f7b --- /dev/null +++ b/.dotnet/src/Generated/Models/AssistantFileObject.cs @@ -0,0 +1,91 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// A list of [Files](/docs/api-reference/files) attached to an `assistant`. + public partial class AssistantFileObject + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The identifier, which can be referenced in API endpoints. + /// The Unix timestamp (in seconds) for when the assistant file was created. + /// The assistant ID that the file is attached to. + /// or is null. + internal AssistantFileObject(string id, DateTimeOffset createdAt, string assistantId) + { + ClientUtilities.AssertNotNull(id, nameof(id)); + ClientUtilities.AssertNotNull(assistantId, nameof(assistantId)); + + Id = id; + CreatedAt = createdAt; + AssistantId = assistantId; + } + + /// Initializes a new instance of . + /// The identifier, which can be referenced in API endpoints. + /// The object type, which is always `assistant.file`. + /// The Unix timestamp (in seconds) for when the assistant file was created. + /// The assistant ID that the file is attached to. + /// Keeps track of any properties unknown to the library. + internal AssistantFileObject(string id, AssistantFileObjectObject @object, DateTimeOffset createdAt, string assistantId, IDictionary serializedAdditionalRawData) + { + Id = id; + Object = @object; + CreatedAt = createdAt; + AssistantId = assistantId; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal AssistantFileObject() + { + } + + /// The identifier, which can be referenced in API endpoints. + public string Id { get; } + /// The object type, which is always `assistant.file`. + public AssistantFileObjectObject Object { get; } = AssistantFileObjectObject.AssistantFile; + + /// The Unix timestamp (in seconds) for when the assistant file was created. + public DateTimeOffset CreatedAt { get; } + /// The assistant ID that the file is attached to. + public string AssistantId { get; } + } +} diff --git a/.dotnet/src/Generated/Models/AssistantFileObjectObject.cs b/.dotnet/src/Generated/Models/AssistantFileObjectObject.cs new file mode 100644 index 000000000..d9b017682 --- /dev/null +++ b/.dotnet/src/Generated/Models/AssistantFileObjectObject.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The AssistantFileObject_object. + public readonly partial struct AssistantFileObjectObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public AssistantFileObjectObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string AssistantFileValue = "assistant.file"; + + /// assistant.file. + public static AssistantFileObjectObject AssistantFile { get; } = new AssistantFileObjectObject(AssistantFileValue); + /// Determines if two values are the same. + public static bool operator ==(AssistantFileObjectObject left, AssistantFileObjectObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(AssistantFileObjectObject left, AssistantFileObjectObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator AssistantFileObjectObject(string value) => new AssistantFileObjectObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is AssistantFileObjectObject other && Equals(other); + /// + public bool Equals(AssistantFileObjectObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/AssistantObject.Serialization.cs b/.dotnet/src/Generated/Models/AssistantObject.Serialization.cs new file mode 100644 index 000000000..3676fb26d --- /dev/null +++ b/.dotnet/src/Generated/Models/AssistantObject.Serialization.cs @@ -0,0 +1,302 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class AssistantObject : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(AssistantObject)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("created_at"u8); + writer.WriteNumberValue(CreatedAt, "U"); + if (Name != null) + { + writer.WritePropertyName("name"u8); + writer.WriteStringValue(Name); + } + else + { + writer.WriteNull("name"); + } + if (Description != null) + { + writer.WritePropertyName("description"u8); + writer.WriteStringValue(Description); + } + else + { + writer.WriteNull("description"); + } + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model); + if (Instructions != null) + { + writer.WritePropertyName("instructions"u8); + writer.WriteStringValue(Instructions); + } + else + { + writer.WriteNull("instructions"); + } + writer.WritePropertyName("tools"u8); + writer.WriteStartArray(); + foreach (var item in Tools) + { + if (item == null) + { + writer.WriteNullValue(); + continue; + } +#if NET6_0_OR_GREATER + writer.WriteRawValue(item); +#else + using (JsonDocument document = JsonDocument.Parse(item)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + writer.WriteEndArray(); + writer.WritePropertyName("file_ids"u8); + writer.WriteStartArray(); + foreach (var item in FileIds) + { + writer.WriteStringValue(item); + } + writer.WriteEndArray(); + if (Metadata != null && OptionalProperty.IsCollectionDefined(Metadata)) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartObject(); + foreach (var item in Metadata) + { + writer.WritePropertyName(item.Key); + writer.WriteStringValue(item.Value); + } + writer.WriteEndObject(); + } + else + { + writer.WriteNull("metadata"); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + AssistantObject IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(AssistantObject)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeAssistantObject(document.RootElement, options); + } + + internal static AssistantObject DeserializeAssistantObject(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + AssistantObjectObject @object = default; + DateTimeOffset createdAt = default; + string name = default; + string description = default; + string model = default; + string instructions = default; + IReadOnlyList tools = default; + IReadOnlyList fileIds = default; + IReadOnlyDictionary metadata = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new AssistantObjectObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("created_at"u8)) + { + createdAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("name"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + name = null; + continue; + } + name = property.Value.GetString(); + continue; + } + if (property.NameEquals("description"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + description = null; + continue; + } + description = property.Value.GetString(); + continue; + } + if (property.NameEquals("model"u8)) + { + model = property.Value.GetString(); + continue; + } + if (property.NameEquals("instructions"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + instructions = null; + continue; + } + instructions = property.Value.GetString(); + continue; + } + if (property.NameEquals("tools"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + if (item.ValueKind == JsonValueKind.Null) + { + array.Add(null); + } + else + { + array.Add(BinaryData.FromString(item.GetRawText())); + } + } + tools = array; + continue; + } + if (property.NameEquals("file_ids"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(item.GetString()); + } + fileIds = array; + continue; + } + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + metadata = new OptionalDictionary(); + continue; + } + Dictionary dictionary = new Dictionary(); + foreach (var property0 in property.Value.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetString()); + } + metadata = dictionary; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new AssistantObject(id, @object, createdAt, name, description, model, instructions, tools, fileIds, metadata, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(AssistantObject)} does not support '{options.Format}' format."); + } + } + + AssistantObject IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeAssistantObject(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(AssistantObject)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static AssistantObject FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeAssistantObject(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/AssistantObject.cs b/.dotnet/src/Generated/Models/AssistantObject.cs new file mode 100644 index 000000000..4816bc339 --- /dev/null +++ b/.dotnet/src/Generated/Models/AssistantObject.cs @@ -0,0 +1,202 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI.Models +{ + /// Represents an `assistant` that can call the model and use tools. + public partial class AssistantObject + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The identifier, which can be referenced in API endpoints. + /// The Unix timestamp (in seconds) for when the assistant was created. + /// The name of the assistant. The maximum length is 256 characters. + /// The description of the assistant. The maximum length is 512 characters. + /// + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + /// see all of your available models, or see our [Model overview](/docs/models/overview) for + /// descriptions of them. + /// + /// The system instructions that the assistant uses. The maximum length is 32768 characters. + /// + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. + /// Tools can be of types `code_interpreter`, `retrieval`, or `function`. + /// + /// + /// A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a + /// maximum of 20 files attached to the assistant. Files are ordered by their creation date in + /// ascending order. + /// + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// , , or is null. + internal AssistantObject(string id, DateTimeOffset createdAt, string name, string description, string model, string instructions, IEnumerable tools, IEnumerable fileIds, IReadOnlyDictionary metadata) + { + ClientUtilities.AssertNotNull(id, nameof(id)); + ClientUtilities.AssertNotNull(model, nameof(model)); + ClientUtilities.AssertNotNull(tools, nameof(tools)); + ClientUtilities.AssertNotNull(fileIds, nameof(fileIds)); + + Id = id; + CreatedAt = createdAt; + Name = name; + Description = description; + Model = model; + Instructions = instructions; + Tools = tools.ToList(); + FileIds = fileIds.ToList(); + Metadata = metadata; + } + + /// Initializes a new instance of . + /// The identifier, which can be referenced in API endpoints. + /// The object type, which is always `assistant`. + /// The Unix timestamp (in seconds) for when the assistant was created. + /// The name of the assistant. The maximum length is 256 characters. + /// The description of the assistant. The maximum length is 512 characters. + /// + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + /// see all of your available models, or see our [Model overview](/docs/models/overview) for + /// descriptions of them. + /// + /// The system instructions that the assistant uses. The maximum length is 32768 characters. + /// + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. + /// Tools can be of types `code_interpreter`, `retrieval`, or `function`. + /// + /// + /// A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a + /// maximum of 20 files attached to the assistant. Files are ordered by their creation date in + /// ascending order. + /// + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// Keeps track of any properties unknown to the library. + internal AssistantObject(string id, AssistantObjectObject @object, DateTimeOffset createdAt, string name, string description, string model, string instructions, IReadOnlyList tools, IReadOnlyList fileIds, IReadOnlyDictionary metadata, IDictionary serializedAdditionalRawData) + { + Id = id; + Object = @object; + CreatedAt = createdAt; + Name = name; + Description = description; + Model = model; + Instructions = instructions; + Tools = tools; + FileIds = fileIds; + Metadata = metadata; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal AssistantObject() + { + } + + /// The identifier, which can be referenced in API endpoints. + public string Id { get; } + /// The object type, which is always `assistant`. + public AssistantObjectObject Object { get; } = AssistantObjectObject.Assistant; + + /// The Unix timestamp (in seconds) for when the assistant was created. + public DateTimeOffset CreatedAt { get; } + /// The name of the assistant. The maximum length is 256 characters. + public string Name { get; } + /// The description of the assistant. The maximum length is 512 characters. + public string Description { get; } + /// + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + /// see all of your available models, or see our [Model overview](/docs/models/overview) for + /// descriptions of them. + /// + public string Model { get; } + /// The system instructions that the assistant uses. The maximum length is 32768 characters. + public string Instructions { get; } + /// + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. + /// Tools can be of types `code_interpreter`, `retrieval`, or `function`. + /// + /// To assign an object to the element of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public IReadOnlyList Tools { get; } + /// + /// A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a + /// maximum of 20 files attached to the assistant. Files are ordered by their creation date in + /// ascending order. + /// + public IReadOnlyList FileIds { get; } + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + public IReadOnlyDictionary Metadata { get; } + } +} diff --git a/.dotnet/src/Generated/Models/AssistantObjectObject.cs b/.dotnet/src/Generated/Models/AssistantObjectObject.cs new file mode 100644 index 000000000..9c8eb972f --- /dev/null +++ b/.dotnet/src/Generated/Models/AssistantObjectObject.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The AssistantObject_object. + public readonly partial struct AssistantObjectObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public AssistantObjectObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string AssistantValue = "assistant"; + + /// assistant. + public static AssistantObjectObject Assistant { get; } = new AssistantObjectObject(AssistantValue); + /// Determines if two values are the same. + public static bool operator ==(AssistantObjectObject left, AssistantObjectObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(AssistantObjectObject left, AssistantObjectObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator AssistantObjectObject(string value) => new AssistantObjectObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is AssistantObjectObject other && Equals(other); + /// + public bool Equals(AssistantObjectObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/AudioSegment.Serialization.cs b/.dotnet/src/Generated/Models/AudioSegment.Serialization.cs new file mode 100644 index 000000000..e114a09c0 --- /dev/null +++ b/.dotnet/src/Generated/Models/AudioSegment.Serialization.cs @@ -0,0 +1,214 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class AudioSegment : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(AudioSegment)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteNumberValue(Id); + writer.WritePropertyName("seek"u8); + writer.WriteNumberValue(Seek); + writer.WritePropertyName("start"u8); + writer.WriteNumberValue(Convert.ToInt32(Start.ToString("%s"))); + writer.WritePropertyName("end"u8); + writer.WriteNumberValue(Convert.ToInt32(End.ToString("%s"))); + writer.WritePropertyName("text"u8); + writer.WriteStringValue(Text); + writer.WritePropertyName("tokens"u8); + writer.WriteStartArray(); + foreach (var item in Tokens) + { + writer.WriteNumberValue(item); + } + writer.WriteEndArray(); + writer.WritePropertyName("temperature"u8); + writer.WriteNumberValue(Temperature); + writer.WritePropertyName("avg_logprob"u8); + writer.WriteNumberValue(AvgLogprob); + writer.WritePropertyName("compression_ratio"u8); + writer.WriteNumberValue(CompressionRatio); + writer.WritePropertyName("no_speech_prob"u8); + writer.WriteNumberValue(NoSpeechProb); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + AudioSegment IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(AudioSegment)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeAudioSegment(document.RootElement, options); + } + + internal static AudioSegment DeserializeAudioSegment(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + long id = default; + long seek = default; + TimeSpan start = default; + TimeSpan end = default; + string text = default; + IReadOnlyList tokens = default; + double temperature = default; + double avgLogprob = default; + double compressionRatio = default; + double noSpeechProb = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("seek"u8)) + { + seek = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("start"u8)) + { + start = TimeSpan.FromSeconds(property.Value.GetInt32()); + continue; + } + if (property.NameEquals("end"u8)) + { + end = TimeSpan.FromSeconds(property.Value.GetInt32()); + continue; + } + if (property.NameEquals("text"u8)) + { + text = property.Value.GetString(); + continue; + } + if (property.NameEquals("tokens"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(item.GetInt64()); + } + tokens = array; + continue; + } + if (property.NameEquals("temperature"u8)) + { + temperature = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("avg_logprob"u8)) + { + avgLogprob = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("compression_ratio"u8)) + { + compressionRatio = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("no_speech_prob"u8)) + { + noSpeechProb = property.Value.GetDouble(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new AudioSegment(id, seek, start, end, text, tokens, temperature, avgLogprob, compressionRatio, noSpeechProb, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(AudioSegment)} does not support '{options.Format}' format."); + } + } + + AudioSegment IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeAudioSegment(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(AudioSegment)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static AudioSegment FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeAudioSegment(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/AudioSegment.cs b/.dotnet/src/Generated/Models/AudioSegment.cs new file mode 100644 index 000000000..d44bf8dc7 --- /dev/null +++ b/.dotnet/src/Generated/Models/AudioSegment.cs @@ -0,0 +1,147 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI.Models +{ + /// The AudioSegment. + public partial class AudioSegment + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The zero-based index of this segment. + /// + /// The seek position associated with the processing of this audio segment. Seek positions are + /// expressed as hundredths of seconds. The model may process several segments from a single seek + /// position, so while the seek position will never represent a later time than the segment's + /// start, the segment's start may represent a significantly later time than the segment's + /// associated seek position. + /// + /// The time at which this segment started relative to the beginning of the audio. + /// The time at which this segment ended relative to the beginning of the audio. + /// The text that was part of this audio segment. + /// The token IDs matching the text in this audio segment. + /// The temperature score associated with this audio segment. + /// The average log probability associated with this audio segment. + /// The compression ratio of this audio segment. + /// The probability of no speech detection within this audio segment. + /// or is null. + internal AudioSegment(long id, long seek, TimeSpan start, TimeSpan end, string text, IEnumerable tokens, double temperature, double avgLogprob, double compressionRatio, double noSpeechProb) + { + ClientUtilities.AssertNotNull(text, nameof(text)); + ClientUtilities.AssertNotNull(tokens, nameof(tokens)); + + Id = id; + Seek = seek; + Start = start; + End = end; + Text = text; + Tokens = tokens.ToList(); + Temperature = temperature; + AvgLogprob = avgLogprob; + CompressionRatio = compressionRatio; + NoSpeechProb = noSpeechProb; + } + + /// Initializes a new instance of . + /// The zero-based index of this segment. + /// + /// The seek position associated with the processing of this audio segment. Seek positions are + /// expressed as hundredths of seconds. The model may process several segments from a single seek + /// position, so while the seek position will never represent a later time than the segment's + /// start, the segment's start may represent a significantly later time than the segment's + /// associated seek position. + /// + /// The time at which this segment started relative to the beginning of the audio. + /// The time at which this segment ended relative to the beginning of the audio. + /// The text that was part of this audio segment. + /// The token IDs matching the text in this audio segment. + /// The temperature score associated with this audio segment. + /// The average log probability associated with this audio segment. + /// The compression ratio of this audio segment. + /// The probability of no speech detection within this audio segment. + /// Keeps track of any properties unknown to the library. + internal AudioSegment(long id, long seek, TimeSpan start, TimeSpan end, string text, IReadOnlyList tokens, double temperature, double avgLogprob, double compressionRatio, double noSpeechProb, IDictionary serializedAdditionalRawData) + { + Id = id; + Seek = seek; + Start = start; + End = end; + Text = text; + Tokens = tokens; + Temperature = temperature; + AvgLogprob = avgLogprob; + CompressionRatio = compressionRatio; + NoSpeechProb = noSpeechProb; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal AudioSegment() + { + } + + /// The zero-based index of this segment. + public long Id { get; } + /// + /// The seek position associated with the processing of this audio segment. Seek positions are + /// expressed as hundredths of seconds. The model may process several segments from a single seek + /// position, so while the seek position will never represent a later time than the segment's + /// start, the segment's start may represent a significantly later time than the segment's + /// associated seek position. + /// + public long Seek { get; } + /// The time at which this segment started relative to the beginning of the audio. + public TimeSpan Start { get; } + /// The time at which this segment ended relative to the beginning of the audio. + public TimeSpan End { get; } + /// The text that was part of this audio segment. + public string Text { get; } + /// The token IDs matching the text in this audio segment. + public IReadOnlyList Tokens { get; } + /// The temperature score associated with this audio segment. + public double Temperature { get; } + /// The average log probability associated with this audio segment. + public double AvgLogprob { get; } + /// The compression ratio of this audio segment. + public double CompressionRatio { get; } + /// The probability of no speech detection within this audio segment. + public double NoSpeechProb { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionFunctionCallOption.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionFunctionCallOption.Serialization.cs new file mode 100644 index 000000000..c85718f98 --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionFunctionCallOption.Serialization.cs @@ -0,0 +1,132 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + internal partial class ChatCompletionFunctionCallOption : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionFunctionCallOption)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("name"u8); + writer.WriteStringValue(Name); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ChatCompletionFunctionCallOption IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionFunctionCallOption)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeChatCompletionFunctionCallOption(document.RootElement, options); + } + + internal static ChatCompletionFunctionCallOption DeserializeChatCompletionFunctionCallOption(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string name = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("name"u8)) + { + name = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ChatCompletionFunctionCallOption(name, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ChatCompletionFunctionCallOption)} does not support '{options.Format}' format."); + } + } + + ChatCompletionFunctionCallOption IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeChatCompletionFunctionCallOption(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ChatCompletionFunctionCallOption)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ChatCompletionFunctionCallOption FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeChatCompletionFunctionCallOption(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionFunctionCallOption.cs b/.dotnet/src/Generated/Models/ChatCompletionFunctionCallOption.cs new file mode 100644 index 000000000..268efaf1a --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionFunctionCallOption.cs @@ -0,0 +1,76 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// + /// Specifying a particular function via `{"name": "my_function"}` forces the model to call that + /// function. + /// + internal partial class ChatCompletionFunctionCallOption + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The name of the function to call. + /// is null. + public ChatCompletionFunctionCallOption(string name) + { + ClientUtilities.AssertNotNull(name, nameof(name)); + + Name = name; + } + + /// Initializes a new instance of . + /// The name of the function to call. + /// Keeps track of any properties unknown to the library. + internal ChatCompletionFunctionCallOption(string name, IDictionary serializedAdditionalRawData) + { + Name = name; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ChatCompletionFunctionCallOption() + { + } + + /// The name of the function to call. + public string Name { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionFunctions.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionFunctions.Serialization.cs new file mode 100644 index 000000000..119be9a51 --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionFunctions.Serialization.cs @@ -0,0 +1,158 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class ChatCompletionFunctions : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionFunctions)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + if (OptionalProperty.IsDefined(Description)) + { + writer.WritePropertyName("description"u8); + writer.WriteStringValue(Description); + } + writer.WritePropertyName("name"u8); + writer.WriteStringValue(Name); + if (OptionalProperty.IsDefined(Parameters)) + { + writer.WritePropertyName("parameters"u8); + writer.WriteObjectValue(Parameters); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ChatCompletionFunctions IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionFunctions)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeChatCompletionFunctions(document.RootElement, options); + } + + internal static ChatCompletionFunctions DeserializeChatCompletionFunctions(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + OptionalProperty description = default; + string name = default; + OptionalProperty parameters = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("description"u8)) + { + description = property.Value.GetString(); + continue; + } + if (property.NameEquals("name"u8)) + { + name = property.Value.GetString(); + continue; + } + if (property.NameEquals("parameters"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + parameters = FunctionParameters.DeserializeFunctionParameters(property.Value); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ChatCompletionFunctions(description.Value, name, parameters.Value, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ChatCompletionFunctions)} does not support '{options.Format}' format."); + } + } + + ChatCompletionFunctions IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeChatCompletionFunctions(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ChatCompletionFunctions)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ChatCompletionFunctions FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeChatCompletionFunctions(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionFunctions.cs b/.dotnet/src/Generated/Models/ChatCompletionFunctions.cs new file mode 100644 index 000000000..d1bbfa718 --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionFunctions.cs @@ -0,0 +1,97 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The ChatCompletionFunctions. + [Obsolete("deprecated")] + public partial class ChatCompletionFunctions + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and + /// dashes, with a maximum length of 64. + /// + /// is null. + public ChatCompletionFunctions(string name) + { + ClientUtilities.AssertNotNull(name, nameof(name)); + + Name = name; + } + + /// Initializes a new instance of . + /// + /// A description of what the function does, used by the model to choose when and how to call the + /// function. + /// + /// + /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and + /// dashes, with a maximum length of 64. + /// + /// + /// Keeps track of any properties unknown to the library. + internal ChatCompletionFunctions(string description, string name, FunctionParameters parameters, IDictionary serializedAdditionalRawData) + { + Description = description; + Name = name; + Parameters = parameters; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ChatCompletionFunctions() + { + } + + /// + /// A description of what the function does, used by the model to choose when and how to call the + /// function. + /// + public string Description { get; set; } + /// + /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and + /// dashes, with a maximum length of 64. + /// + public string Name { get; } + /// Gets or sets the parameters. + public FunctionParameters Parameters { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionMessageToolCall.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCall.Serialization.cs new file mode 100644 index 000000000..e3d872da2 --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCall.Serialization.cs @@ -0,0 +1,148 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class ChatCompletionMessageToolCall : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionMessageToolCall)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("type"u8); + writer.WriteStringValue(Type.ToString()); + writer.WritePropertyName("function"u8); + writer.WriteObjectValue(Function); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ChatCompletionMessageToolCall IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionMessageToolCall)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeChatCompletionMessageToolCall(document.RootElement, options); + } + + internal static ChatCompletionMessageToolCall DeserializeChatCompletionMessageToolCall(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + ChatCompletionMessageToolCallType type = default; + ChatCompletionMessageToolCallFunction function = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("type"u8)) + { + type = new ChatCompletionMessageToolCallType(property.Value.GetString()); + continue; + } + if (property.NameEquals("function"u8)) + { + function = ChatCompletionMessageToolCallFunction.DeserializeChatCompletionMessageToolCallFunction(property.Value); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ChatCompletionMessageToolCall(id, type, function, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ChatCompletionMessageToolCall)} does not support '{options.Format}' format."); + } + } + + ChatCompletionMessageToolCall IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeChatCompletionMessageToolCall(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ChatCompletionMessageToolCall)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ChatCompletionMessageToolCall FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeChatCompletionMessageToolCall(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionMessageToolCall.cs b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCall.cs new file mode 100644 index 000000000..e5249dafc --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCall.cs @@ -0,0 +1,85 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The ChatCompletionMessageToolCall. + public partial class ChatCompletionMessageToolCall + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The ID of the tool call. + /// The function that the model called. + /// or is null. + public ChatCompletionMessageToolCall(string id, ChatCompletionMessageToolCallFunction function) + { + ClientUtilities.AssertNotNull(id, nameof(id)); + ClientUtilities.AssertNotNull(function, nameof(function)); + + Id = id; + Function = function; + } + + /// Initializes a new instance of . + /// The ID of the tool call. + /// The type of the tool. Currently, only `function` is supported. + /// The function that the model called. + /// Keeps track of any properties unknown to the library. + internal ChatCompletionMessageToolCall(string id, ChatCompletionMessageToolCallType type, ChatCompletionMessageToolCallFunction function, IDictionary serializedAdditionalRawData) + { + Id = id; + Type = type; + Function = function; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ChatCompletionMessageToolCall() + { + } + + /// The ID of the tool call. + public string Id { get; set; } + /// The type of the tool. Currently, only `function` is supported. + public ChatCompletionMessageToolCallType Type { get; } = ChatCompletionMessageToolCallType.Function; + + /// The function that the model called. + public ChatCompletionMessageToolCallFunction Function { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallFunction.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallFunction.Serialization.cs new file mode 100644 index 000000000..ca4e609d1 --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallFunction.Serialization.cs @@ -0,0 +1,140 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class ChatCompletionMessageToolCallFunction : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionMessageToolCallFunction)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("name"u8); + writer.WriteStringValue(Name); + writer.WritePropertyName("arguments"u8); + writer.WriteStringValue(Arguments); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ChatCompletionMessageToolCallFunction IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionMessageToolCallFunction)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeChatCompletionMessageToolCallFunction(document.RootElement, options); + } + + internal static ChatCompletionMessageToolCallFunction DeserializeChatCompletionMessageToolCallFunction(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string name = default; + string arguments = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("name"u8)) + { + name = property.Value.GetString(); + continue; + } + if (property.NameEquals("arguments"u8)) + { + arguments = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ChatCompletionMessageToolCallFunction(name, arguments, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ChatCompletionMessageToolCallFunction)} does not support '{options.Format}' format."); + } + } + + ChatCompletionMessageToolCallFunction IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeChatCompletionMessageToolCallFunction(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ChatCompletionMessageToolCallFunction)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ChatCompletionMessageToolCallFunction FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeChatCompletionMessageToolCallFunction(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallFunction.cs b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallFunction.cs new file mode 100644 index 000000000..92b945f4c --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallFunction.cs @@ -0,0 +1,92 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The ChatCompletionMessageToolCallFunction. + public partial class ChatCompletionMessageToolCallFunction + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The name of the function to call. + /// + /// The arguments to call the function with, as generated by the model in JSON format. Note that + /// the model does not always generate valid JSON, and may hallucinate parameters not defined by + /// your function schema. Validate the arguments in your code before calling your function. + /// + /// or is null. + public ChatCompletionMessageToolCallFunction(string name, string arguments) + { + ClientUtilities.AssertNotNull(name, nameof(name)); + ClientUtilities.AssertNotNull(arguments, nameof(arguments)); + + Name = name; + Arguments = arguments; + } + + /// Initializes a new instance of . + /// The name of the function to call. + /// + /// The arguments to call the function with, as generated by the model in JSON format. Note that + /// the model does not always generate valid JSON, and may hallucinate parameters not defined by + /// your function schema. Validate the arguments in your code before calling your function. + /// + /// Keeps track of any properties unknown to the library. + internal ChatCompletionMessageToolCallFunction(string name, string arguments, IDictionary serializedAdditionalRawData) + { + Name = name; + Arguments = arguments; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ChatCompletionMessageToolCallFunction() + { + } + + /// The name of the function to call. + public string Name { get; set; } + /// + /// The arguments to call the function with, as generated by the model in JSON format. Note that + /// the model does not always generate valid JSON, and may hallucinate parameters not defined by + /// your function schema. Validate the arguments in your code before calling your function. + /// + public string Arguments { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallType.cs b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallType.cs new file mode 100644 index 000000000..e6c9dff53 --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallType.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The ChatCompletionMessageToolCall_type. + public readonly partial struct ChatCompletionMessageToolCallType : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public ChatCompletionMessageToolCallType(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string FunctionValue = "function"; + + /// function. + public static ChatCompletionMessageToolCallType Function { get; } = new ChatCompletionMessageToolCallType(FunctionValue); + /// Determines if two values are the same. + public static bool operator ==(ChatCompletionMessageToolCallType left, ChatCompletionMessageToolCallType right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(ChatCompletionMessageToolCallType left, ChatCompletionMessageToolCallType right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator ChatCompletionMessageToolCallType(string value) => new ChatCompletionMessageToolCallType(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ChatCompletionMessageToolCallType other && Equals(other); + /// + public bool Equals(ChatCompletionMessageToolCallType other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoice.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoice.Serialization.cs new file mode 100644 index 000000000..7ec5a3aeb --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoice.Serialization.cs @@ -0,0 +1,140 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + internal partial class ChatCompletionNamedToolChoice : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionNamedToolChoice)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("type"u8); + writer.WriteStringValue(Type.ToString()); + writer.WritePropertyName("function"u8); + writer.WriteObjectValue(Function); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ChatCompletionNamedToolChoice IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionNamedToolChoice)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeChatCompletionNamedToolChoice(document.RootElement, options); + } + + internal static ChatCompletionNamedToolChoice DeserializeChatCompletionNamedToolChoice(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + ChatCompletionNamedToolChoiceType type = default; + ChatCompletionNamedToolChoiceFunction function = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("type"u8)) + { + type = new ChatCompletionNamedToolChoiceType(property.Value.GetString()); + continue; + } + if (property.NameEquals("function"u8)) + { + function = ChatCompletionNamedToolChoiceFunction.DeserializeChatCompletionNamedToolChoiceFunction(property.Value); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ChatCompletionNamedToolChoice(type, function, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ChatCompletionNamedToolChoice)} does not support '{options.Format}' format."); + } + } + + ChatCompletionNamedToolChoice IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeChatCompletionNamedToolChoice(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ChatCompletionNamedToolChoice)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ChatCompletionNamedToolChoice FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeChatCompletionNamedToolChoice(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoice.cs b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoice.cs new file mode 100644 index 000000000..da8b10edb --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoice.cs @@ -0,0 +1,78 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// Specifies a tool the model should use. Use to force the model to call a specific function. + internal partial class ChatCompletionNamedToolChoice + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// is null. + public ChatCompletionNamedToolChoice(ChatCompletionNamedToolChoiceFunction function) + { + ClientUtilities.AssertNotNull(function, nameof(function)); + + Function = function; + } + + /// Initializes a new instance of . + /// The type of the tool. Currently, only `function` is supported. + /// + /// Keeps track of any properties unknown to the library. + internal ChatCompletionNamedToolChoice(ChatCompletionNamedToolChoiceType type, ChatCompletionNamedToolChoiceFunction function, IDictionary serializedAdditionalRawData) + { + Type = type; + Function = function; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ChatCompletionNamedToolChoice() + { + } + + /// The type of the tool. Currently, only `function` is supported. + public ChatCompletionNamedToolChoiceType Type { get; } = ChatCompletionNamedToolChoiceType.Function; + + /// Gets the function. + public ChatCompletionNamedToolChoiceFunction Function { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceFunction.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceFunction.Serialization.cs new file mode 100644 index 000000000..654aa4f90 --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceFunction.Serialization.cs @@ -0,0 +1,132 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + internal partial class ChatCompletionNamedToolChoiceFunction : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionNamedToolChoiceFunction)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("name"u8); + writer.WriteStringValue(Name); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ChatCompletionNamedToolChoiceFunction IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionNamedToolChoiceFunction)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeChatCompletionNamedToolChoiceFunction(document.RootElement, options); + } + + internal static ChatCompletionNamedToolChoiceFunction DeserializeChatCompletionNamedToolChoiceFunction(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string name = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("name"u8)) + { + name = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ChatCompletionNamedToolChoiceFunction(name, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ChatCompletionNamedToolChoiceFunction)} does not support '{options.Format}' format."); + } + } + + ChatCompletionNamedToolChoiceFunction IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeChatCompletionNamedToolChoiceFunction(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ChatCompletionNamedToolChoiceFunction)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ChatCompletionNamedToolChoiceFunction FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeChatCompletionNamedToolChoiceFunction(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceFunction.cs b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceFunction.cs new file mode 100644 index 000000000..383fceef2 --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceFunction.cs @@ -0,0 +1,73 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The ChatCompletionNamedToolChoiceFunction. + internal partial class ChatCompletionNamedToolChoiceFunction + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The name of the function to call. + /// is null. + public ChatCompletionNamedToolChoiceFunction(string name) + { + ClientUtilities.AssertNotNull(name, nameof(name)); + + Name = name; + } + + /// Initializes a new instance of . + /// The name of the function to call. + /// Keeps track of any properties unknown to the library. + internal ChatCompletionNamedToolChoiceFunction(string name, IDictionary serializedAdditionalRawData) + { + Name = name; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ChatCompletionNamedToolChoiceFunction() + { + } + + /// The name of the function to call. + public string Name { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceType.cs b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceType.cs new file mode 100644 index 000000000..8f94a2674 --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceType.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The ChatCompletionNamedToolChoice_type. + internal readonly partial struct ChatCompletionNamedToolChoiceType : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public ChatCompletionNamedToolChoiceType(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string FunctionValue = "function"; + + /// function. + public static ChatCompletionNamedToolChoiceType Function { get; } = new ChatCompletionNamedToolChoiceType(FunctionValue); + /// Determines if two values are the same. + public static bool operator ==(ChatCompletionNamedToolChoiceType left, ChatCompletionNamedToolChoiceType right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(ChatCompletionNamedToolChoiceType left, ChatCompletionNamedToolChoiceType right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator ChatCompletionNamedToolChoiceType(string value) => new ChatCompletionNamedToolChoiceType(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ChatCompletionNamedToolChoiceType other && Equals(other); + /// + public bool Equals(ChatCompletionNamedToolChoiceType other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionResponseMessage.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionResponseMessage.Serialization.cs new file mode 100644 index 000000000..b467eb421 --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionResponseMessage.Serialization.cs @@ -0,0 +1,192 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class ChatCompletionResponseMessage : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionResponseMessage)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + if (Content != null) + { + writer.WritePropertyName("content"u8); + writer.WriteStringValue(Content); + } + else + { + writer.WriteNull("content"); + } + if (OptionalProperty.IsCollectionDefined(ToolCalls)) + { + writer.WritePropertyName("tool_calls"u8); + writer.WriteStartArray(); + foreach (var item in ToolCalls) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + } + writer.WritePropertyName("role"u8); + writer.WriteStringValue(Role.ToString()); + if (OptionalProperty.IsDefined(FunctionCall)) + { + writer.WritePropertyName("function_call"u8); + writer.WriteObjectValue(FunctionCall); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ChatCompletionResponseMessage IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionResponseMessage)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeChatCompletionResponseMessage(document.RootElement, options); + } + + internal static ChatCompletionResponseMessage DeserializeChatCompletionResponseMessage(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string content = default; + OptionalProperty> toolCalls = default; + ChatCompletionResponseMessageRole role = default; + OptionalProperty functionCall = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("content"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + content = null; + continue; + } + content = property.Value.GetString(); + continue; + } + if (property.NameEquals("tool_calls"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(ChatCompletionMessageToolCall.DeserializeChatCompletionMessageToolCall(item)); + } + toolCalls = array; + continue; + } + if (property.NameEquals("role"u8)) + { + role = new ChatCompletionResponseMessageRole(property.Value.GetString()); + continue; + } + if (property.NameEquals("function_call"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + functionCall = ChatCompletionResponseMessageFunctionCall.DeserializeChatCompletionResponseMessageFunctionCall(property.Value); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ChatCompletionResponseMessage(content, OptionalProperty.ToList(toolCalls), role, functionCall.Value, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ChatCompletionResponseMessage)} does not support '{options.Format}' format."); + } + } + + ChatCompletionResponseMessage IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeChatCompletionResponseMessage(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ChatCompletionResponseMessage)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ChatCompletionResponseMessage FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeChatCompletionResponseMessage(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionResponseMessage.cs b/.dotnet/src/Generated/Models/ChatCompletionResponseMessage.cs new file mode 100644 index 000000000..709a705ff --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionResponseMessage.cs @@ -0,0 +1,84 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The ChatCompletionResponseMessage. + public partial class ChatCompletionResponseMessage + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The contents of the message. + internal ChatCompletionResponseMessage(string content) + { + Content = content; + ToolCalls = new OptionalList(); + } + + /// Initializes a new instance of . + /// The contents of the message. + /// + /// The role of the author of this message. + /// Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. + /// Keeps track of any properties unknown to the library. + internal ChatCompletionResponseMessage(string content, IReadOnlyList toolCalls, ChatCompletionResponseMessageRole role, ChatCompletionResponseMessageFunctionCall functionCall, IDictionary serializedAdditionalRawData) + { + Content = content; + ToolCalls = toolCalls; + Role = role; + FunctionCall = functionCall; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ChatCompletionResponseMessage() + { + } + + /// The contents of the message. + public string Content { get; } + /// Gets the tool calls. + public IReadOnlyList ToolCalls { get; } + /// The role of the author of this message. + public ChatCompletionResponseMessageRole Role { get; } = ChatCompletionResponseMessageRole.Assistant; + + /// Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. + public ChatCompletionResponseMessageFunctionCall FunctionCall { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionResponseMessageFunctionCall.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionResponseMessageFunctionCall.Serialization.cs new file mode 100644 index 000000000..98aac4cd5 --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionResponseMessageFunctionCall.Serialization.cs @@ -0,0 +1,140 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class ChatCompletionResponseMessageFunctionCall : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionResponseMessageFunctionCall)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("arguments"u8); + writer.WriteStringValue(Arguments); + writer.WritePropertyName("name"u8); + writer.WriteStringValue(Name); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ChatCompletionResponseMessageFunctionCall IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionResponseMessageFunctionCall)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeChatCompletionResponseMessageFunctionCall(document.RootElement, options); + } + + internal static ChatCompletionResponseMessageFunctionCall DeserializeChatCompletionResponseMessageFunctionCall(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string arguments = default; + string name = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("arguments"u8)) + { + arguments = property.Value.GetString(); + continue; + } + if (property.NameEquals("name"u8)) + { + name = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ChatCompletionResponseMessageFunctionCall(arguments, name, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ChatCompletionResponseMessageFunctionCall)} does not support '{options.Format}' format."); + } + } + + ChatCompletionResponseMessageFunctionCall IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeChatCompletionResponseMessageFunctionCall(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ChatCompletionResponseMessageFunctionCall)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ChatCompletionResponseMessageFunctionCall FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeChatCompletionResponseMessageFunctionCall(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionResponseMessageFunctionCall.cs b/.dotnet/src/Generated/Models/ChatCompletionResponseMessageFunctionCall.cs new file mode 100644 index 000000000..7f9e8549c --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionResponseMessageFunctionCall.cs @@ -0,0 +1,92 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The ChatCompletionResponseMessageFunctionCall. + public partial class ChatCompletionResponseMessageFunctionCall + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// The arguments to call the function with, as generated by the model in JSON format. Note that + /// the model does not always generate valid JSON, and may hallucinate parameters not defined by + /// your function schema. Validate the arguments in your code before calling your function. + /// + /// The name of the function to call. + /// or is null. + internal ChatCompletionResponseMessageFunctionCall(string arguments, string name) + { + ClientUtilities.AssertNotNull(arguments, nameof(arguments)); + ClientUtilities.AssertNotNull(name, nameof(name)); + + Arguments = arguments; + Name = name; + } + + /// Initializes a new instance of . + /// + /// The arguments to call the function with, as generated by the model in JSON format. Note that + /// the model does not always generate valid JSON, and may hallucinate parameters not defined by + /// your function schema. Validate the arguments in your code before calling your function. + /// + /// The name of the function to call. + /// Keeps track of any properties unknown to the library. + internal ChatCompletionResponseMessageFunctionCall(string arguments, string name, IDictionary serializedAdditionalRawData) + { + Arguments = arguments; + Name = name; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ChatCompletionResponseMessageFunctionCall() + { + } + + /// + /// The arguments to call the function with, as generated by the model in JSON format. Note that + /// the model does not always generate valid JSON, and may hallucinate parameters not defined by + /// your function schema. Validate the arguments in your code before calling your function. + /// + public string Arguments { get; } + /// The name of the function to call. + public string Name { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionResponseMessageRole.cs b/.dotnet/src/Generated/Models/ChatCompletionResponseMessageRole.cs new file mode 100644 index 000000000..ee43ccb40 --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionResponseMessageRole.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The ChatCompletionResponseMessage_role. + public readonly partial struct ChatCompletionResponseMessageRole : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public ChatCompletionResponseMessageRole(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string AssistantValue = "assistant"; + + /// assistant. + public static ChatCompletionResponseMessageRole Assistant { get; } = new ChatCompletionResponseMessageRole(AssistantValue); + /// Determines if two values are the same. + public static bool operator ==(ChatCompletionResponseMessageRole left, ChatCompletionResponseMessageRole right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(ChatCompletionResponseMessageRole left, ChatCompletionResponseMessageRole right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator ChatCompletionResponseMessageRole(string value) => new ChatCompletionResponseMessageRole(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ChatCompletionResponseMessageRole other && Equals(other); + /// + public bool Equals(ChatCompletionResponseMessageRole other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionTokenLogprob.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionTokenLogprob.Serialization.cs new file mode 100644 index 000000000..b80e2c44a --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionTokenLogprob.Serialization.cs @@ -0,0 +1,188 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class ChatCompletionTokenLogprob : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionTokenLogprob)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("token"u8); + writer.WriteStringValue(Token); + writer.WritePropertyName("logprob"u8); + writer.WriteNumberValue(Logprob); + if (Bytes != null && OptionalProperty.IsCollectionDefined(Bytes)) + { + writer.WritePropertyName("bytes"u8); + writer.WriteStartArray(); + foreach (var item in Bytes) + { + writer.WriteNumberValue(item); + } + writer.WriteEndArray(); + } + else + { + writer.WriteNull("bytes"); + } + writer.WritePropertyName("top_logprobs"u8); + writer.WriteStartArray(); + foreach (var item in TopLogprobs) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ChatCompletionTokenLogprob IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionTokenLogprob)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeChatCompletionTokenLogprob(document.RootElement, options); + } + + internal static ChatCompletionTokenLogprob DeserializeChatCompletionTokenLogprob(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string token = default; + double logprob = default; + IReadOnlyList bytes = default; + IReadOnlyList topLogprobs = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("token"u8)) + { + token = property.Value.GetString(); + continue; + } + if (property.NameEquals("logprob"u8)) + { + logprob = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("bytes"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + bytes = new OptionalList(); + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(item.GetInt64()); + } + bytes = array; + continue; + } + if (property.NameEquals("top_logprobs"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(ChatCompletionTokenLogprobTopLogprob.DeserializeChatCompletionTokenLogprobTopLogprob(item)); + } + topLogprobs = array; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ChatCompletionTokenLogprob(token, logprob, bytes, topLogprobs, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ChatCompletionTokenLogprob)} does not support '{options.Format}' format."); + } + } + + ChatCompletionTokenLogprob IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeChatCompletionTokenLogprob(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ChatCompletionTokenLogprob)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ChatCompletionTokenLogprob FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeChatCompletionTokenLogprob(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionTokenLogprob.cs b/.dotnet/src/Generated/Models/ChatCompletionTokenLogprob.cs new file mode 100644 index 000000000..6e487ac5c --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionTokenLogprob.cs @@ -0,0 +1,117 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI.Models +{ + /// The ChatCompletionTokenLogprob. + public partial class ChatCompletionTokenLogprob + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The token. + /// The log probability of this token. + /// + /// A list of integers representing the UTF-8 bytes representation of the token. Useful in + /// instances where characters are represented by multiple tokens and their byte representations + /// must be combined to generate the correct text representation. Can be `null` if there is no + /// bytes representation for the token. + /// + /// + /// List of the most likely tokens and their log probability, at this token position. In rare + /// cases, there may be fewer than the number of requested `top_logprobs` returned. + /// + /// or is null. + internal ChatCompletionTokenLogprob(string token, double logprob, IEnumerable bytes, IEnumerable topLogprobs) + { + ClientUtilities.AssertNotNull(token, nameof(token)); + ClientUtilities.AssertNotNull(topLogprobs, nameof(topLogprobs)); + + Token = token; + Logprob = logprob; + Bytes = bytes?.ToList(); + TopLogprobs = topLogprobs.ToList(); + } + + /// Initializes a new instance of . + /// The token. + /// The log probability of this token. + /// + /// A list of integers representing the UTF-8 bytes representation of the token. Useful in + /// instances where characters are represented by multiple tokens and their byte representations + /// must be combined to generate the correct text representation. Can be `null` if there is no + /// bytes representation for the token. + /// + /// + /// List of the most likely tokens and their log probability, at this token position. In rare + /// cases, there may be fewer than the number of requested `top_logprobs` returned. + /// + /// Keeps track of any properties unknown to the library. + internal ChatCompletionTokenLogprob(string token, double logprob, IReadOnlyList bytes, IReadOnlyList topLogprobs, IDictionary serializedAdditionalRawData) + { + Token = token; + Logprob = logprob; + Bytes = bytes; + TopLogprobs = topLogprobs; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ChatCompletionTokenLogprob() + { + } + + /// The token. + public string Token { get; } + /// The log probability of this token. + public double Logprob { get; } + /// + /// A list of integers representing the UTF-8 bytes representation of the token. Useful in + /// instances where characters are represented by multiple tokens and their byte representations + /// must be combined to generate the correct text representation. Can be `null` if there is no + /// bytes representation for the token. + /// + public IReadOnlyList Bytes { get; } + /// + /// List of the most likely tokens and their log probability, at this token position. In rare + /// cases, there may be fewer than the number of requested `top_logprobs` returned. + /// + public IReadOnlyList TopLogprobs { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionTokenLogprobTopLogprob.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionTokenLogprobTopLogprob.Serialization.cs new file mode 100644 index 000000000..ab6b52872 --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionTokenLogprobTopLogprob.Serialization.cs @@ -0,0 +1,170 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class ChatCompletionTokenLogprobTopLogprob : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionTokenLogprobTopLogprob)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("token"u8); + writer.WriteStringValue(Token); + writer.WritePropertyName("logprob"u8); + writer.WriteNumberValue(Logprob); + if (Bytes != null && OptionalProperty.IsCollectionDefined(Bytes)) + { + writer.WritePropertyName("bytes"u8); + writer.WriteStartArray(); + foreach (var item in Bytes) + { + writer.WriteNumberValue(item); + } + writer.WriteEndArray(); + } + else + { + writer.WriteNull("bytes"); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ChatCompletionTokenLogprobTopLogprob IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionTokenLogprobTopLogprob)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeChatCompletionTokenLogprobTopLogprob(document.RootElement, options); + } + + internal static ChatCompletionTokenLogprobTopLogprob DeserializeChatCompletionTokenLogprobTopLogprob(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string token = default; + double logprob = default; + IReadOnlyList bytes = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("token"u8)) + { + token = property.Value.GetString(); + continue; + } + if (property.NameEquals("logprob"u8)) + { + logprob = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("bytes"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + bytes = new OptionalList(); + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(item.GetInt64()); + } + bytes = array; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ChatCompletionTokenLogprobTopLogprob(token, logprob, bytes, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ChatCompletionTokenLogprobTopLogprob)} does not support '{options.Format}' format."); + } + } + + ChatCompletionTokenLogprobTopLogprob IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeChatCompletionTokenLogprobTopLogprob(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ChatCompletionTokenLogprobTopLogprob)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ChatCompletionTokenLogprobTopLogprob FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeChatCompletionTokenLogprobTopLogprob(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionTokenLogprobTopLogprob.cs b/.dotnet/src/Generated/Models/ChatCompletionTokenLogprobTopLogprob.cs new file mode 100644 index 000000000..f0a7c191b --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionTokenLogprobTopLogprob.cs @@ -0,0 +1,101 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI.Models +{ + /// The ChatCompletionTokenLogprobTopLogprob. + public partial class ChatCompletionTokenLogprobTopLogprob + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The token. + /// The log probability of this token. + /// + /// A list of integers representing the UTF-8 bytes representation of the token. Useful in + /// instances where characters are represented by multiple tokens and their byte representations + /// must be combined to generate the correct text representation. Can be `null` if there is no + /// bytes representation for the token. + /// + /// is null. + internal ChatCompletionTokenLogprobTopLogprob(string token, double logprob, IEnumerable bytes) + { + ClientUtilities.AssertNotNull(token, nameof(token)); + + Token = token; + Logprob = logprob; + Bytes = bytes?.ToList(); + } + + /// Initializes a new instance of . + /// The token. + /// The log probability of this token. + /// + /// A list of integers representing the UTF-8 bytes representation of the token. Useful in + /// instances where characters are represented by multiple tokens and their byte representations + /// must be combined to generate the correct text representation. Can be `null` if there is no + /// bytes representation for the token. + /// + /// Keeps track of any properties unknown to the library. + internal ChatCompletionTokenLogprobTopLogprob(string token, double logprob, IReadOnlyList bytes, IDictionary serializedAdditionalRawData) + { + Token = token; + Logprob = logprob; + Bytes = bytes; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ChatCompletionTokenLogprobTopLogprob() + { + } + + /// The token. + public string Token { get; } + /// The log probability of this token. + public double Logprob { get; } + /// + /// A list of integers representing the UTF-8 bytes representation of the token. Useful in + /// instances where characters are represented by multiple tokens and their byte representations + /// must be combined to generate the correct text representation. Can be `null` if there is no + /// bytes representation for the token. + /// + public IReadOnlyList Bytes { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionTool.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionTool.Serialization.cs new file mode 100644 index 000000000..4f0233034 --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionTool.Serialization.cs @@ -0,0 +1,140 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class ChatCompletionTool : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionTool)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("type"u8); + writer.WriteStringValue(Type.ToString()); + writer.WritePropertyName("function"u8); + writer.WriteObjectValue(Function); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ChatCompletionTool IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionTool)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeChatCompletionTool(document.RootElement, options); + } + + internal static ChatCompletionTool DeserializeChatCompletionTool(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + ChatCompletionToolType type = default; + FunctionObject function = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("type"u8)) + { + type = new ChatCompletionToolType(property.Value.GetString()); + continue; + } + if (property.NameEquals("function"u8)) + { + function = FunctionObject.DeserializeFunctionObject(property.Value); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ChatCompletionTool(type, function, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ChatCompletionTool)} does not support '{options.Format}' format."); + } + } + + ChatCompletionTool IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeChatCompletionTool(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ChatCompletionTool)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ChatCompletionTool FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeChatCompletionTool(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionTool.cs b/.dotnet/src/Generated/Models/ChatCompletionTool.cs new file mode 100644 index 000000000..c03dc4647 --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionTool.cs @@ -0,0 +1,78 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The ChatCompletionTool. + public partial class ChatCompletionTool + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// is null. + public ChatCompletionTool(FunctionObject function) + { + ClientUtilities.AssertNotNull(function, nameof(function)); + + Function = function; + } + + /// Initializes a new instance of . + /// The type of the tool. Currently, only `function` is supported. + /// + /// Keeps track of any properties unknown to the library. + internal ChatCompletionTool(ChatCompletionToolType type, FunctionObject function, IDictionary serializedAdditionalRawData) + { + Type = type; + Function = function; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ChatCompletionTool() + { + } + + /// The type of the tool. Currently, only `function` is supported. + public ChatCompletionToolType Type { get; } = ChatCompletionToolType.Function; + + /// Gets the function. + public FunctionObject Function { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionToolType.cs b/.dotnet/src/Generated/Models/ChatCompletionToolType.cs new file mode 100644 index 000000000..f1a96376c --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionToolType.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The ChatCompletionTool_type. + public readonly partial struct ChatCompletionToolType : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public ChatCompletionToolType(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string FunctionValue = "function"; + + /// function. + public static ChatCompletionToolType Function { get; } = new ChatCompletionToolType(FunctionValue); + /// Determines if two values are the same. + public static bool operator ==(ChatCompletionToolType left, ChatCompletionToolType right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(ChatCompletionToolType left, ChatCompletionToolType right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator ChatCompletionToolType(string value) => new ChatCompletionToolType(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ChatCompletionToolType other && Equals(other); + /// + public bool Equals(ChatCompletionToolType other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CompletionUsage.Serialization.cs b/.dotnet/src/Generated/Models/CompletionUsage.Serialization.cs new file mode 100644 index 000000000..ffaa5adcb --- /dev/null +++ b/.dotnet/src/Generated/Models/CompletionUsage.Serialization.cs @@ -0,0 +1,148 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class CompletionUsage : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CompletionUsage)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("prompt_tokens"u8); + writer.WriteNumberValue(PromptTokens); + writer.WritePropertyName("completion_tokens"u8); + writer.WriteNumberValue(CompletionTokens); + writer.WritePropertyName("total_tokens"u8); + writer.WriteNumberValue(TotalTokens); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CompletionUsage IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CompletionUsage)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCompletionUsage(document.RootElement, options); + } + + internal static CompletionUsage DeserializeCompletionUsage(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + long promptTokens = default; + long completionTokens = default; + long totalTokens = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("prompt_tokens"u8)) + { + promptTokens = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("completion_tokens"u8)) + { + completionTokens = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("total_tokens"u8)) + { + totalTokens = property.Value.GetInt64(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CompletionUsage(promptTokens, completionTokens, totalTokens, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CompletionUsage)} does not support '{options.Format}' format."); + } + } + + CompletionUsage IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCompletionUsage(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CompletionUsage)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CompletionUsage FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCompletionUsage(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CompletionUsage.cs b/.dotnet/src/Generated/Models/CompletionUsage.cs new file mode 100644 index 000000000..07f38ee8b --- /dev/null +++ b/.dotnet/src/Generated/Models/CompletionUsage.cs @@ -0,0 +1,81 @@ +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// Usage statistics for the completion request. + public partial class CompletionUsage + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// Number of tokens in the prompt. + /// Number of tokens in the generated completion. + /// Total number of tokens used in the request (prompt + completion). + internal CompletionUsage(long promptTokens, long completionTokens, long totalTokens) + { + PromptTokens = promptTokens; + CompletionTokens = completionTokens; + TotalTokens = totalTokens; + } + + /// Initializes a new instance of . + /// Number of tokens in the prompt. + /// Number of tokens in the generated completion. + /// Total number of tokens used in the request (prompt + completion). + /// Keeps track of any properties unknown to the library. + internal CompletionUsage(long promptTokens, long completionTokens, long totalTokens, IDictionary serializedAdditionalRawData) + { + PromptTokens = promptTokens; + CompletionTokens = completionTokens; + TotalTokens = totalTokens; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CompletionUsage() + { + } + + /// Number of tokens in the prompt. + public long PromptTokens { get; } + /// Number of tokens in the generated completion. + public long CompletionTokens { get; } + /// Total number of tokens used in the request (prompt + completion). + public long TotalTokens { get; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateAssistantFileRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateAssistantFileRequest.Serialization.cs new file mode 100644 index 000000000..ae5a3b32e --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateAssistantFileRequest.Serialization.cs @@ -0,0 +1,132 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class CreateAssistantFileRequest : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateAssistantFileRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("file_id"u8); + writer.WriteStringValue(FileId); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateAssistantFileRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateAssistantFileRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateAssistantFileRequest(document.RootElement, options); + } + + internal static CreateAssistantFileRequest DeserializeCreateAssistantFileRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string fileId = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("file_id"u8)) + { + fileId = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateAssistantFileRequest(fileId, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateAssistantFileRequest)} does not support '{options.Format}' format."); + } + } + + CreateAssistantFileRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateAssistantFileRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateAssistantFileRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateAssistantFileRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateAssistantFileRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateAssistantFileRequest.cs b/.dotnet/src/Generated/Models/CreateAssistantFileRequest.cs new file mode 100644 index 000000000..8c11246d1 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateAssistantFileRequest.cs @@ -0,0 +1,82 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The CreateAssistantFileRequest. + public partial class CreateAssistantFileRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// A [File](/docs/api-reference/files) ID (with `purpose="assistants"`) that the assistant should + /// use. Useful for tools like `retrieval` and `code_interpreter` that can access files. + /// + /// is null. + public CreateAssistantFileRequest(string fileId) + { + ClientUtilities.AssertNotNull(fileId, nameof(fileId)); + + FileId = fileId; + } + + /// Initializes a new instance of . + /// + /// A [File](/docs/api-reference/files) ID (with `purpose="assistants"`) that the assistant should + /// use. Useful for tools like `retrieval` and `code_interpreter` that can access files. + /// + /// Keeps track of any properties unknown to the library. + internal CreateAssistantFileRequest(string fileId, IDictionary serializedAdditionalRawData) + { + FileId = fileId; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateAssistantFileRequest() + { + } + + /// + /// A [File](/docs/api-reference/files) ID (with `purpose="assistants"`) that the assistant should + /// use. Useful for tools like `retrieval` and `code_interpreter` that can access files. + /// + public string FileId { get; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateAssistantRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateAssistantRequest.Serialization.cs new file mode 100644 index 000000000..8aaa22372 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateAssistantRequest.Serialization.cs @@ -0,0 +1,303 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class CreateAssistantRequest : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateAssistantRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model); + if (OptionalProperty.IsDefined(Name)) + { + if (Name != null) + { + writer.WritePropertyName("name"u8); + writer.WriteStringValue(Name); + } + else + { + writer.WriteNull("name"); + } + } + if (OptionalProperty.IsDefined(Description)) + { + if (Description != null) + { + writer.WritePropertyName("description"u8); + writer.WriteStringValue(Description); + } + else + { + writer.WriteNull("description"); + } + } + if (OptionalProperty.IsDefined(Instructions)) + { + if (Instructions != null) + { + writer.WritePropertyName("instructions"u8); + writer.WriteStringValue(Instructions); + } + else + { + writer.WriteNull("instructions"); + } + } + if (OptionalProperty.IsCollectionDefined(Tools)) + { + writer.WritePropertyName("tools"u8); + writer.WriteStartArray(); + foreach (var item in Tools) + { + if (item == null) + { + writer.WriteNullValue(); + continue; + } +#if NET6_0_OR_GREATER + writer.WriteRawValue(item); +#else + using (JsonDocument document = JsonDocument.Parse(item)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + writer.WriteEndArray(); + } + if (OptionalProperty.IsCollectionDefined(FileIds)) + { + writer.WritePropertyName("file_ids"u8); + writer.WriteStartArray(); + foreach (var item in FileIds) + { + writer.WriteStringValue(item); + } + writer.WriteEndArray(); + } + if (OptionalProperty.IsCollectionDefined(Metadata)) + { + if (Metadata != null) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartObject(); + foreach (var item in Metadata) + { + writer.WritePropertyName(item.Key); + writer.WriteStringValue(item.Value); + } + writer.WriteEndObject(); + } + else + { + writer.WriteNull("metadata"); + } + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateAssistantRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateAssistantRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateAssistantRequest(document.RootElement, options); + } + + internal static CreateAssistantRequest DeserializeCreateAssistantRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string model = default; + OptionalProperty name = default; + OptionalProperty description = default; + OptionalProperty instructions = default; + OptionalProperty> tools = default; + OptionalProperty> fileIds = default; + OptionalProperty> metadata = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("model"u8)) + { + model = property.Value.GetString(); + continue; + } + if (property.NameEquals("name"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + name = null; + continue; + } + name = property.Value.GetString(); + continue; + } + if (property.NameEquals("description"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + description = null; + continue; + } + description = property.Value.GetString(); + continue; + } + if (property.NameEquals("instructions"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + instructions = null; + continue; + } + instructions = property.Value.GetString(); + continue; + } + if (property.NameEquals("tools"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + if (item.ValueKind == JsonValueKind.Null) + { + array.Add(null); + } + else + { + array.Add(BinaryData.FromString(item.GetRawText())); + } + } + tools = array; + continue; + } + if (property.NameEquals("file_ids"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(item.GetString()); + } + fileIds = array; + continue; + } + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + Dictionary dictionary = new Dictionary(); + foreach (var property0 in property.Value.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetString()); + } + metadata = dictionary; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateAssistantRequest(model, name.Value, description.Value, instructions.Value, OptionalProperty.ToList(tools), OptionalProperty.ToList(fileIds), OptionalProperty.ToDictionary(metadata), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateAssistantRequest)} does not support '{options.Format}' format."); + } + } + + CreateAssistantRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateAssistantRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateAssistantRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateAssistantRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateAssistantRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateAssistantRequest.cs b/.dotnet/src/Generated/Models/CreateAssistantRequest.cs new file mode 100644 index 000000000..115fbc0d9 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateAssistantRequest.cs @@ -0,0 +1,161 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The CreateAssistantRequest. + public partial class CreateAssistantRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + /// see all of your available models, or see our [Model overview](/docs/models/overview) for + /// descriptions of them. + /// + /// is null. + public CreateAssistantRequest(string model) + { + ClientUtilities.AssertNotNull(model, nameof(model)); + + Model = model; + Tools = new OptionalList(); + FileIds = new OptionalList(); + Metadata = new OptionalDictionary(); + } + + /// Initializes a new instance of . + /// + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + /// see all of your available models, or see our [Model overview](/docs/models/overview) for + /// descriptions of them. + /// + /// The name of the assistant. The maximum length is 256 characters. + /// The description of the assistant. The maximum length is 512 characters. + /// The system instructions that the assistant uses. The maximum length is 32768 characters. + /// + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. + /// Tools can be of types `code_interpreter`, `retrieval`, or `function`. + /// + /// + /// A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a + /// maximum of 20 files attached to the assistant. Files are ordered by their creation date in + /// ascending order. + /// + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// Keeps track of any properties unknown to the library. + internal CreateAssistantRequest(string model, string name, string description, string instructions, IList tools, IList fileIds, IDictionary metadata, IDictionary serializedAdditionalRawData) + { + Model = model; + Name = name; + Description = description; + Instructions = instructions; + Tools = tools; + FileIds = fileIds; + Metadata = metadata; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateAssistantRequest() + { + } + + /// + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + /// see all of your available models, or see our [Model overview](/docs/models/overview) for + /// descriptions of them. + /// + public string Model { get; } + /// The name of the assistant. The maximum length is 256 characters. + public string Name { get; set; } + /// The description of the assistant. The maximum length is 512 characters. + public string Description { get; set; } + /// The system instructions that the assistant uses. The maximum length is 32768 characters. + public string Instructions { get; set; } + /// + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. + /// Tools can be of types `code_interpreter`, `retrieval`, or `function`. + /// + /// To assign an object to the element of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public IList Tools { get; } + /// + /// A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a + /// maximum of 20 files attached to the assistant. Files are ordered by their creation date in + /// ascending order. + /// + public IList FileIds { get; } + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + public IDictionary Metadata { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateChatCompletionRequest.Serialization.cs new file mode 100644 index 000000000..66c50785a --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateChatCompletionRequest.Serialization.cs @@ -0,0 +1,582 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class CreateChatCompletionRequest : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateChatCompletionRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("messages"u8); + writer.WriteStartArray(); + foreach (var item in Messages) + { + if (item == null) + { + writer.WriteNullValue(); + continue; + } +#if NET6_0_OR_GREATER + writer.WriteRawValue(item); +#else + using (JsonDocument document = JsonDocument.Parse(item)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + writer.WriteEndArray(); + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model.ToString()); + if (OptionalProperty.IsDefined(FrequencyPenalty)) + { + if (FrequencyPenalty != null) + { + writer.WritePropertyName("frequency_penalty"u8); + writer.WriteNumberValue(FrequencyPenalty.Value); + } + else + { + writer.WriteNull("frequency_penalty"); + } + } + if (OptionalProperty.IsCollectionDefined(LogitBias)) + { + if (LogitBias != null) + { + writer.WritePropertyName("logit_bias"u8); + writer.WriteStartObject(); + foreach (var item in LogitBias) + { + writer.WritePropertyName(item.Key); + writer.WriteNumberValue(item.Value); + } + writer.WriteEndObject(); + } + else + { + writer.WriteNull("logit_bias"); + } + } + if (OptionalProperty.IsDefined(Logprobs)) + { + if (Logprobs != null) + { + writer.WritePropertyName("logprobs"u8); + writer.WriteBooleanValue(Logprobs.Value); + } + else + { + writer.WriteNull("logprobs"); + } + } + if (OptionalProperty.IsDefined(TopLogprobs)) + { + if (TopLogprobs != null) + { + writer.WritePropertyName("top_logprobs"u8); + writer.WriteNumberValue(TopLogprobs.Value); + } + else + { + writer.WriteNull("top_logprobs"); + } + } + if (OptionalProperty.IsDefined(MaxTokens)) + { + if (MaxTokens != null) + { + writer.WritePropertyName("max_tokens"u8); + writer.WriteNumberValue(MaxTokens.Value); + } + else + { + writer.WriteNull("max_tokens"); + } + } + if (OptionalProperty.IsDefined(N)) + { + if (N != null) + { + writer.WritePropertyName("n"u8); + writer.WriteNumberValue(N.Value); + } + else + { + writer.WriteNull("n"); + } + } + if (OptionalProperty.IsDefined(PresencePenalty)) + { + if (PresencePenalty != null) + { + writer.WritePropertyName("presence_penalty"u8); + writer.WriteNumberValue(PresencePenalty.Value); + } + else + { + writer.WriteNull("presence_penalty"); + } + } + if (OptionalProperty.IsDefined(ResponseFormat)) + { + writer.WritePropertyName("response_format"u8); + writer.WriteObjectValue(ResponseFormat); + } + if (OptionalProperty.IsDefined(Seed)) + { + if (Seed != null) + { + writer.WritePropertyName("seed"u8); + writer.WriteNumberValue(Seed.Value); + } + else + { + writer.WriteNull("seed"); + } + } + if (OptionalProperty.IsDefined(Stop)) + { + if (Stop != null) + { + writer.WritePropertyName("stop"u8); +#if NET6_0_OR_GREATER + writer.WriteRawValue(Stop); +#else + using (JsonDocument document = JsonDocument.Parse(Stop)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + else + { + writer.WriteNull("stop"); + } + } + if (OptionalProperty.IsDefined(Stream)) + { + if (Stream != null) + { + writer.WritePropertyName("stream"u8); + writer.WriteBooleanValue(Stream.Value); + } + else + { + writer.WriteNull("stream"); + } + } + if (OptionalProperty.IsDefined(Temperature)) + { + if (Temperature != null) + { + writer.WritePropertyName("temperature"u8); + writer.WriteNumberValue(Temperature.Value); + } + else + { + writer.WriteNull("temperature"); + } + } + if (OptionalProperty.IsDefined(TopP)) + { + if (TopP != null) + { + writer.WritePropertyName("top_p"u8); + writer.WriteNumberValue(TopP.Value); + } + else + { + writer.WriteNull("top_p"); + } + } + if (OptionalProperty.IsCollectionDefined(Tools)) + { + writer.WritePropertyName("tools"u8); + writer.WriteStartArray(); + foreach (var item in Tools) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + } + if (OptionalProperty.IsDefined(ToolChoice)) + { + writer.WritePropertyName("tool_choice"u8); +#if NET6_0_OR_GREATER + writer.WriteRawValue(ToolChoice); +#else + using (JsonDocument document = JsonDocument.Parse(ToolChoice)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + if (OptionalProperty.IsDefined(User)) + { + writer.WritePropertyName("user"u8); + writer.WriteStringValue(User); + } + if (OptionalProperty.IsDefined(FunctionCall)) + { + writer.WritePropertyName("function_call"u8); +#if NET6_0_OR_GREATER + writer.WriteRawValue(FunctionCall); +#else + using (JsonDocument document = JsonDocument.Parse(FunctionCall)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + if (OptionalProperty.IsCollectionDefined(Functions)) + { + writer.WritePropertyName("functions"u8); + writer.WriteStartArray(); + foreach (var item in Functions) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateChatCompletionRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateChatCompletionRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateChatCompletionRequest(document.RootElement, options); + } + + internal static CreateChatCompletionRequest DeserializeCreateChatCompletionRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + IList messages = default; + CreateChatCompletionRequestModel model = default; + OptionalProperty frequencyPenalty = default; + OptionalProperty> logitBias = default; + OptionalProperty logprobs = default; + OptionalProperty topLogprobs = default; + OptionalProperty maxTokens = default; + OptionalProperty n = default; + OptionalProperty presencePenalty = default; + OptionalProperty responseFormat = default; + OptionalProperty seed = default; + OptionalProperty stop = default; + OptionalProperty stream = default; + OptionalProperty temperature = default; + OptionalProperty topP = default; + OptionalProperty> tools = default; + OptionalProperty toolChoice = default; + OptionalProperty user = default; + OptionalProperty functionCall = default; + OptionalProperty> functions = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("messages"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + if (item.ValueKind == JsonValueKind.Null) + { + array.Add(null); + } + else + { + array.Add(BinaryData.FromString(item.GetRawText())); + } + } + messages = array; + continue; + } + if (property.NameEquals("model"u8)) + { + model = new CreateChatCompletionRequestModel(property.Value.GetString()); + continue; + } + if (property.NameEquals("frequency_penalty"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + frequencyPenalty = null; + continue; + } + frequencyPenalty = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("logit_bias"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + Dictionary dictionary = new Dictionary(); + foreach (var property0 in property.Value.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetInt64()); + } + logitBias = dictionary; + continue; + } + if (property.NameEquals("logprobs"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + logprobs = null; + continue; + } + logprobs = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("top_logprobs"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + topLogprobs = null; + continue; + } + topLogprobs = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("max_tokens"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + maxTokens = null; + continue; + } + maxTokens = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("n"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + n = null; + continue; + } + n = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("presence_penalty"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + presencePenalty = null; + continue; + } + presencePenalty = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("response_format"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + responseFormat = CreateChatCompletionRequestResponseFormat.DeserializeCreateChatCompletionRequestResponseFormat(property.Value); + continue; + } + if (property.NameEquals("seed"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + seed = null; + continue; + } + seed = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("stop"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + stop = null; + continue; + } + stop = BinaryData.FromString(property.Value.GetRawText()); + continue; + } + if (property.NameEquals("stream"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + stream = null; + continue; + } + stream = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("temperature"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + temperature = null; + continue; + } + temperature = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("top_p"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + topP = null; + continue; + } + topP = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("tools"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(ChatCompletionTool.DeserializeChatCompletionTool(item)); + } + tools = array; + continue; + } + if (property.NameEquals("tool_choice"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + toolChoice = BinaryData.FromString(property.Value.GetRawText()); + continue; + } + if (property.NameEquals("user"u8)) + { + user = property.Value.GetString(); + continue; + } + if (property.NameEquals("function_call"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + functionCall = BinaryData.FromString(property.Value.GetRawText()); + continue; + } + if (property.NameEquals("functions"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(ChatCompletionFunctions.DeserializeChatCompletionFunctions(item)); + } + functions = array; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateChatCompletionRequest(messages, model, OptionalProperty.ToNullable(frequencyPenalty), OptionalProperty.ToDictionary(logitBias), OptionalProperty.ToNullable(logprobs), OptionalProperty.ToNullable(topLogprobs), OptionalProperty.ToNullable(maxTokens), OptionalProperty.ToNullable(n), OptionalProperty.ToNullable(presencePenalty), responseFormat.Value, OptionalProperty.ToNullable(seed), stop.Value, OptionalProperty.ToNullable(stream), OptionalProperty.ToNullable(temperature), OptionalProperty.ToNullable(topP), OptionalProperty.ToList(tools), toolChoice.Value, user.Value, functionCall.Value, OptionalProperty.ToList(functions), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateChatCompletionRequest)} does not support '{options.Format}' format."); + } + } + + CreateChatCompletionRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateChatCompletionRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateChatCompletionRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateChatCompletionRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateChatCompletionRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionRequest.cs b/.dotnet/src/Generated/Models/CreateChatCompletionRequest.cs new file mode 100644 index 000000000..c773e9297 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateChatCompletionRequest.cs @@ -0,0 +1,512 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI.Models +{ + /// The CreateChatCompletionRequest. + public partial class CreateChatCompletionRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// A list of messages comprising the conversation so far. + /// [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). + /// + /// + /// ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) + /// table for details on which models work with the Chat API. + /// + /// is null. + public CreateChatCompletionRequest(IEnumerable messages, CreateChatCompletionRequestModel model) + { + ClientUtilities.AssertNotNull(messages, nameof(messages)); + + Messages = messages.ToList(); + Model = model; + LogitBias = new OptionalDictionary(); + Tools = new OptionalList(); + Functions = new OptionalList(); + } + + /// Initializes a new instance of . + /// + /// A list of messages comprising the conversation so far. + /// [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). + /// + /// + /// ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) + /// table for details on which models work with the Chat API. + /// + /// + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing + /// frequency in the text so far, decreasing the model's likelihood to repeat the same line + /// verbatim. + /// + /// [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + /// + /// + /// Modify the likelihood of specified tokens appearing in the completion. + /// + /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an + /// associated bias value from -100 to 100. Mathematically, the bias is added to the logits + /// generated by the model prior to sampling. The exact effect will vary per model, but values + /// between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 + /// should result in a ban or exclusive selection of the relevant token. + /// + /// + /// Whether to return log probabilities of the output tokens or not. If true, returns the log + /// probabilities of each output token returned in the `content` of `message`. This option is + /// currently not available on the `gpt-4-vision-preview` model. + /// + /// + /// An integer between 0 and 5 specifying the number of most likely tokens to return at each token + /// position, each with an associated log probability. `logprobs` must be set to `true` if this + /// parameter is used. + /// + /// + /// The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. + /// + /// The total length of input tokens and generated tokens is limited by the model's context length. + /// [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + /// for counting tokens. + /// + /// + /// How many chat completion choices to generate for each input message. Note that you will be + /// charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to + /// minimize costs. + /// + /// + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear + /// in the text so far, increasing the model's likelihood to talk about new topics. + /// + /// [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + /// + /// + /// An object specifying the format that the model must output. Compatible with + /// [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and `gpt-3.5-turbo-1106`. + /// + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the + /// model generates is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON + /// yourself via a system or user message. Without this, the model may generate an unending stream + /// of whitespace until the generation reaches the token limit, resulting in a long-running and + /// seemingly "stuck" request. Also note that the message content may be partially cut off if + /// `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the + /// conversation exceeded the max context length. + /// + /// + /// This feature is in Beta. + /// + /// If specified, our system will make a best effort to sample deterministically, such that + /// repeated requests with the same `seed` and parameters should return the same result. + /// + /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response + /// parameter to monitor changes in the backend. + /// + /// Up to 4 sequences where the API will stop generating further tokens. + /// + /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only + /// [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + /// as they become available, with the stream terminated by a `data: [DONE]` message. + /// [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + /// + /// + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + /// more random, while lower values like 0.2 will make it more focused and deterministic. + /// + /// We generally recommend altering this or `top_p` but not both. + /// + /// + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers + /// the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising + /// the top 10% probability mass are considered. + /// + /// We generally recommend altering this or `temperature` but not both. + /// + /// + /// A list of tools the model may call. Currently, only functions are supported as a tool. Use this + /// to provide a list of functions the model may generate JSON inputs for. + /// + /// + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect + /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + /// + /// + /// Deprecated in favor of `tool_choice`. + /// + /// Controls which (if any) function is called by the model. `none` means the model will not call a + /// function and instead generates a message. `auto` means the model can pick between generating a + /// message or calling a function. Specifying a particular function via `{"name": "my_function"}` + /// forces the model to call that function. + /// + /// `none` is the default when no functions are present. `auto` is the default if functions are + /// present. + /// + /// + /// Deprecated in favor of `tools`. + /// + /// A list of functions the model may generate JSON inputs for. + /// + /// Keeps track of any properties unknown to the library. + internal CreateChatCompletionRequest(IList messages, CreateChatCompletionRequestModel model, double? frequencyPenalty, IDictionary logitBias, bool? logprobs, long? topLogprobs, long? maxTokens, long? n, double? presencePenalty, CreateChatCompletionRequestResponseFormat responseFormat, long? seed, BinaryData stop, bool? stream, double? temperature, double? topP, IList tools, BinaryData toolChoice, string user, BinaryData functionCall, IList functions, IDictionary serializedAdditionalRawData) + { + Messages = messages; + Model = model; + FrequencyPenalty = frequencyPenalty; + LogitBias = logitBias; + Logprobs = logprobs; + TopLogprobs = topLogprobs; + MaxTokens = maxTokens; + N = n; + PresencePenalty = presencePenalty; + ResponseFormat = responseFormat; + Seed = seed; + Stop = stop; + Stream = stream; + Temperature = temperature; + TopP = topP; + Tools = tools; + ToolChoice = toolChoice; + User = user; + FunctionCall = functionCall; + Functions = functions; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateChatCompletionRequest() + { + } + + /// + /// A list of messages comprising the conversation so far. + /// [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). + /// + /// To assign an object to the element of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public IList Messages { get; } + /// + /// ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) + /// table for details on which models work with the Chat API. + /// + public CreateChatCompletionRequestModel Model { get; } + /// + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing + /// frequency in the text so far, decreasing the model's likelihood to repeat the same line + /// verbatim. + /// + /// [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + /// + public double? FrequencyPenalty { get; set; } + /// + /// Modify the likelihood of specified tokens appearing in the completion. + /// + /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an + /// associated bias value from -100 to 100. Mathematically, the bias is added to the logits + /// generated by the model prior to sampling. The exact effect will vary per model, but values + /// between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 + /// should result in a ban or exclusive selection of the relevant token. + /// + public IDictionary LogitBias { get; set; } + /// + /// Whether to return log probabilities of the output tokens or not. If true, returns the log + /// probabilities of each output token returned in the `content` of `message`. This option is + /// currently not available on the `gpt-4-vision-preview` model. + /// + public bool? Logprobs { get; set; } + /// + /// An integer between 0 and 5 specifying the number of most likely tokens to return at each token + /// position, each with an associated log probability. `logprobs` must be set to `true` if this + /// parameter is used. + /// + public long? TopLogprobs { get; set; } + /// + /// The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. + /// + /// The total length of input tokens and generated tokens is limited by the model's context length. + /// [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + /// for counting tokens. + /// + public long? MaxTokens { get; set; } + /// + /// How many chat completion choices to generate for each input message. Note that you will be + /// charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to + /// minimize costs. + /// + public long? N { get; set; } + /// + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear + /// in the text so far, increasing the model's likelihood to talk about new topics. + /// + /// [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + /// + public double? PresencePenalty { get; set; } + /// + /// An object specifying the format that the model must output. Compatible with + /// [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and `gpt-3.5-turbo-1106`. + /// + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the + /// model generates is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON + /// yourself via a system or user message. Without this, the model may generate an unending stream + /// of whitespace until the generation reaches the token limit, resulting in a long-running and + /// seemingly "stuck" request. Also note that the message content may be partially cut off if + /// `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the + /// conversation exceeded the max context length. + /// + public CreateChatCompletionRequestResponseFormat ResponseFormat { get; set; } + /// + /// This feature is in Beta. + /// + /// If specified, our system will make a best effort to sample deterministically, such that + /// repeated requests with the same `seed` and parameters should return the same result. + /// + /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response + /// parameter to monitor changes in the backend. + /// + public long? Seed { get; set; } + /// + /// Up to 4 sequences where the API will stop generating further tokens. + /// + /// To assign an object to this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// + /// Supported types: + /// + /// + /// + /// + /// + /// where T is of type + /// + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public BinaryData Stop { get; set; } + /// + /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only + /// [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + /// as they become available, with the stream terminated by a `data: [DONE]` message. + /// [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + /// + public bool? Stream { get; set; } + /// + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + /// more random, while lower values like 0.2 will make it more focused and deterministic. + /// + /// We generally recommend altering this or `top_p` but not both. + /// + public double? Temperature { get; set; } + /// + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers + /// the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising + /// the top 10% probability mass are considered. + /// + /// We generally recommend altering this or `temperature` but not both. + /// + public double? TopP { get; set; } + /// + /// A list of tools the model may call. Currently, only functions are supported as a tool. Use this + /// to provide a list of functions the model may generate JSON inputs for. + /// + public IList Tools { get; } + /// + /// Gets or sets the tool choice + /// + /// To assign an object to this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// + /// Supported types: + /// + /// + /// "none" + /// + /// + /// "auto" + /// + /// + /// + /// + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public BinaryData ToolChoice { get; set; } + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect + /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + /// + public string User { get; set; } + /// + /// Deprecated in favor of `tool_choice`. + /// + /// Controls which (if any) function is called by the model. `none` means the model will not call a + /// function and instead generates a message. `auto` means the model can pick between generating a + /// message or calling a function. Specifying a particular function via `{"name": "my_function"}` + /// forces the model to call that function. + /// + /// `none` is the default when no functions are present. `auto` is the default if functions are + /// present. + /// + /// To assign an object to this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// + /// Supported types: + /// + /// + /// "none" + /// + /// + /// "auto" + /// + /// + /// + /// + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public BinaryData FunctionCall { get; set; } + /// + /// Deprecated in favor of `tools`. + /// + /// A list of functions the model may generate JSON inputs for. + /// + public IList Functions { get; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionRequestModel.cs b/.dotnet/src/Generated/Models/CreateChatCompletionRequestModel.cs new file mode 100644 index 000000000..a0c497c47 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateChatCompletionRequestModel.cs @@ -0,0 +1,90 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for model in CreateChatCompletionRequest. + public readonly partial struct CreateChatCompletionRequestModel : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateChatCompletionRequestModel(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string Gpt40125PreviewValue = "gpt-4-0125-preview"; + private const string Gpt4TurboPreviewValue = "gpt-4-turbo-preview"; + private const string Gpt41106PreviewValue = "gpt-4-1106-preview"; + private const string Gpt4VisionPreviewValue = "gpt-4-vision-preview"; + private const string Gpt4Value = "gpt-4"; + private const string Gpt40314Value = "gpt-4-0314"; + private const string Gpt40613Value = "gpt-4-0613"; + private const string Gpt432kValue = "gpt-4-32k"; + private const string Gpt432k0314Value = "gpt-4-32k-0314"; + private const string Gpt432k0613Value = "gpt-4-32k-0613"; + private const string Gpt35TurboValue = "gpt-3.5-turbo"; + private const string Gpt35Turbo16kValue = "gpt-3.5-turbo-16k"; + private const string Gpt35Turbo0301Value = "gpt-3.5-turbo-0301"; + private const string Gpt35Turbo0613Value = "gpt-3.5-turbo-0613"; + private const string Gpt35Turbo1106Value = "gpt-3.5-turbo-1106"; + private const string Gpt35Turbo16k0613Value = "gpt-3.5-turbo-16k-0613"; + + /// gpt-4-0125-preview. + public static CreateChatCompletionRequestModel Gpt40125Preview { get; } = new CreateChatCompletionRequestModel(Gpt40125PreviewValue); + /// gpt-4-turbo-preview. + public static CreateChatCompletionRequestModel Gpt4TurboPreview { get; } = new CreateChatCompletionRequestModel(Gpt4TurboPreviewValue); + /// gpt-4-1106-preview. + public static CreateChatCompletionRequestModel Gpt41106Preview { get; } = new CreateChatCompletionRequestModel(Gpt41106PreviewValue); + /// gpt-4-vision-preview. + public static CreateChatCompletionRequestModel Gpt4VisionPreview { get; } = new CreateChatCompletionRequestModel(Gpt4VisionPreviewValue); + /// gpt-4. + public static CreateChatCompletionRequestModel Gpt4 { get; } = new CreateChatCompletionRequestModel(Gpt4Value); + /// gpt-4-0314. + public static CreateChatCompletionRequestModel Gpt40314 { get; } = new CreateChatCompletionRequestModel(Gpt40314Value); + /// gpt-4-0613. + public static CreateChatCompletionRequestModel Gpt40613 { get; } = new CreateChatCompletionRequestModel(Gpt40613Value); + /// gpt-4-32k. + public static CreateChatCompletionRequestModel Gpt432k { get; } = new CreateChatCompletionRequestModel(Gpt432kValue); + /// gpt-4-32k-0314. + public static CreateChatCompletionRequestModel Gpt432k0314 { get; } = new CreateChatCompletionRequestModel(Gpt432k0314Value); + /// gpt-4-32k-0613. + public static CreateChatCompletionRequestModel Gpt432k0613 { get; } = new CreateChatCompletionRequestModel(Gpt432k0613Value); + /// gpt-3.5-turbo. + public static CreateChatCompletionRequestModel Gpt35Turbo { get; } = new CreateChatCompletionRequestModel(Gpt35TurboValue); + /// gpt-3.5-turbo-16k. + public static CreateChatCompletionRequestModel Gpt35Turbo16k { get; } = new CreateChatCompletionRequestModel(Gpt35Turbo16kValue); + /// gpt-3.5-turbo-0301. + public static CreateChatCompletionRequestModel Gpt35Turbo0301 { get; } = new CreateChatCompletionRequestModel(Gpt35Turbo0301Value); + /// gpt-3.5-turbo-0613. + public static CreateChatCompletionRequestModel Gpt35Turbo0613 { get; } = new CreateChatCompletionRequestModel(Gpt35Turbo0613Value); + /// gpt-3.5-turbo-1106. + public static CreateChatCompletionRequestModel Gpt35Turbo1106 { get; } = new CreateChatCompletionRequestModel(Gpt35Turbo1106Value); + /// gpt-3.5-turbo-16k-0613. + public static CreateChatCompletionRequestModel Gpt35Turbo16k0613 { get; } = new CreateChatCompletionRequestModel(Gpt35Turbo16k0613Value); + /// Determines if two values are the same. + public static bool operator ==(CreateChatCompletionRequestModel left, CreateChatCompletionRequestModel right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateChatCompletionRequestModel left, CreateChatCompletionRequestModel right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateChatCompletionRequestModel(string value) => new CreateChatCompletionRequestModel(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateChatCompletionRequestModel other && Equals(other); + /// + public bool Equals(CreateChatCompletionRequestModel other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormat.Serialization.cs b/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormat.Serialization.cs new file mode 100644 index 000000000..ecd0ac9b1 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormat.Serialization.cs @@ -0,0 +1,139 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class CreateChatCompletionRequestResponseFormat : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateChatCompletionRequestResponseFormat)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + if (OptionalProperty.IsDefined(Type)) + { + writer.WritePropertyName("type"u8); + writer.WriteStringValue(Type.Value.ToString()); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateChatCompletionRequestResponseFormat IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateChatCompletionRequestResponseFormat)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateChatCompletionRequestResponseFormat(document.RootElement, options); + } + + internal static CreateChatCompletionRequestResponseFormat DeserializeCreateChatCompletionRequestResponseFormat(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + OptionalProperty type = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("type"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + type = new CreateChatCompletionRequestResponseFormatType(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateChatCompletionRequestResponseFormat(OptionalProperty.ToNullable(type), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateChatCompletionRequestResponseFormat)} does not support '{options.Format}' format."); + } + } + + CreateChatCompletionRequestResponseFormat IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateChatCompletionRequestResponseFormat(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateChatCompletionRequestResponseFormat)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateChatCompletionRequestResponseFormat FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateChatCompletionRequestResponseFormat(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormat.cs b/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormat.cs new file mode 100644 index 000000000..5c8f4c2c0 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormat.cs @@ -0,0 +1,62 @@ +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The CreateChatCompletionRequestResponseFormat. + public partial class CreateChatCompletionRequestResponseFormat + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public CreateChatCompletionRequestResponseFormat() + { + } + + /// Initializes a new instance of . + /// Must be one of `text` or `json_object`. + /// Keeps track of any properties unknown to the library. + internal CreateChatCompletionRequestResponseFormat(CreateChatCompletionRequestResponseFormatType? type, IDictionary serializedAdditionalRawData) + { + Type = type; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Must be one of `text` or `json_object`. + public CreateChatCompletionRequestResponseFormatType? Type { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormatType.cs b/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormatType.cs new file mode 100644 index 000000000..d0332868f --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormatType.cs @@ -0,0 +1,48 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for type in CreateChatCompletionRequestResponseFormat. + public readonly partial struct CreateChatCompletionRequestResponseFormatType : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateChatCompletionRequestResponseFormatType(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string TextValue = "text"; + private const string JsonObjectValue = "json_object"; + + /// text. + public static CreateChatCompletionRequestResponseFormatType Text { get; } = new CreateChatCompletionRequestResponseFormatType(TextValue); + /// json_object. + public static CreateChatCompletionRequestResponseFormatType JsonObject { get; } = new CreateChatCompletionRequestResponseFormatType(JsonObjectValue); + /// Determines if two values are the same. + public static bool operator ==(CreateChatCompletionRequestResponseFormatType left, CreateChatCompletionRequestResponseFormatType right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateChatCompletionRequestResponseFormatType left, CreateChatCompletionRequestResponseFormatType right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateChatCompletionRequestResponseFormatType(string value) => new CreateChatCompletionRequestResponseFormatType(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateChatCompletionRequestResponseFormatType other && Equals(other); + /// + public bool Equals(CreateChatCompletionRequestResponseFormatType other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionResponse.Serialization.cs b/.dotnet/src/Generated/Models/CreateChatCompletionResponse.Serialization.cs new file mode 100644 index 000000000..c560aa105 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateChatCompletionResponse.Serialization.cs @@ -0,0 +1,200 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class CreateChatCompletionResponse : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateChatCompletionResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("choices"u8); + writer.WriteStartArray(); + foreach (var item in Choices) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + writer.WritePropertyName("created"u8); + writer.WriteNumberValue(Created, "U"); + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model); + if (OptionalProperty.IsDefined(SystemFingerprint)) + { + writer.WritePropertyName("system_fingerprint"u8); + writer.WriteStringValue(SystemFingerprint); + } + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + if (OptionalProperty.IsDefined(Usage)) + { + writer.WritePropertyName("usage"u8); + writer.WriteObjectValue(Usage); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateChatCompletionResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateChatCompletionResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateChatCompletionResponse(document.RootElement, options); + } + + internal static CreateChatCompletionResponse DeserializeCreateChatCompletionResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + IReadOnlyList choices = default; + DateTimeOffset created = default; + string model = default; + OptionalProperty systemFingerprint = default; + CreateChatCompletionResponseObject @object = default; + OptionalProperty usage = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("choices"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(CreateChatCompletionResponseChoice.DeserializeCreateChatCompletionResponseChoice(item)); + } + choices = array; + continue; + } + if (property.NameEquals("created"u8)) + { + created = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("model"u8)) + { + model = property.Value.GetString(); + continue; + } + if (property.NameEquals("system_fingerprint"u8)) + { + systemFingerprint = property.Value.GetString(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new CreateChatCompletionResponseObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("usage"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + usage = CompletionUsage.DeserializeCompletionUsage(property.Value); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateChatCompletionResponse(id, choices, created, model, systemFingerprint.Value, @object, usage.Value, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateChatCompletionResponse)} does not support '{options.Format}' format."); + } + } + + CreateChatCompletionResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateChatCompletionResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateChatCompletionResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateChatCompletionResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateChatCompletionResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionResponse.cs b/.dotnet/src/Generated/Models/CreateChatCompletionResponse.cs new file mode 100644 index 000000000..cc16880d8 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateChatCompletionResponse.cs @@ -0,0 +1,117 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI.Models +{ + /// Represents a chat completion response returned by model, based on the provided input. + public partial class CreateChatCompletionResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// A unique identifier for the chat completion. + /// A list of chat completion choices. Can be more than one if `n` is greater than 1. + /// The Unix timestamp (in seconds) of when the chat completion was created. + /// The model used for the chat completion. + /// , or is null. + internal CreateChatCompletionResponse(string id, IEnumerable choices, DateTimeOffset created, string model) + { + ClientUtilities.AssertNotNull(id, nameof(id)); + ClientUtilities.AssertNotNull(choices, nameof(choices)); + ClientUtilities.AssertNotNull(model, nameof(model)); + + Id = id; + Choices = choices.ToList(); + Created = created; + Model = model; + } + + /// Initializes a new instance of . + /// A unique identifier for the chat completion. + /// A list of chat completion choices. Can be more than one if `n` is greater than 1. + /// The Unix timestamp (in seconds) of when the chat completion was created. + /// The model used for the chat completion. + /// + /// This fingerprint represents the backend configuration that the model runs with. + /// + /// Can be used in conjunction with the `seed` request parameter to understand when backend changes + /// have been made that might impact determinism. + /// + /// The object type, which is always `chat.completion`. + /// + /// Keeps track of any properties unknown to the library. + internal CreateChatCompletionResponse(string id, IReadOnlyList choices, DateTimeOffset created, string model, string systemFingerprint, CreateChatCompletionResponseObject @object, CompletionUsage usage, IDictionary serializedAdditionalRawData) + { + Id = id; + Choices = choices; + Created = created; + Model = model; + SystemFingerprint = systemFingerprint; + Object = @object; + Usage = usage; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateChatCompletionResponse() + { + } + + /// A unique identifier for the chat completion. + public string Id { get; } + /// A list of chat completion choices. Can be more than one if `n` is greater than 1. + public IReadOnlyList Choices { get; } + /// The Unix timestamp (in seconds) of when the chat completion was created. + public DateTimeOffset Created { get; } + /// The model used for the chat completion. + public string Model { get; } + /// + /// This fingerprint represents the backend configuration that the model runs with. + /// + /// Can be used in conjunction with the `seed` request parameter to understand when backend changes + /// have been made that might impact determinism. + /// + public string SystemFingerprint { get; } + /// The object type, which is always `chat.completion`. + public CreateChatCompletionResponseObject Object { get; } = CreateChatCompletionResponseObject.ChatCompletion; + + /// Gets the usage. + public CompletionUsage Usage { get; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoice.Serialization.cs b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoice.Serialization.cs new file mode 100644 index 000000000..8663dced6 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoice.Serialization.cs @@ -0,0 +1,168 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class CreateChatCompletionResponseChoice : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateChatCompletionResponseChoice)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("finish_reason"u8); + writer.WriteStringValue(FinishReason.ToString()); + writer.WritePropertyName("index"u8); + writer.WriteNumberValue(Index); + writer.WritePropertyName("message"u8); + writer.WriteObjectValue(Message); + if (Logprobs != null) + { + writer.WritePropertyName("logprobs"u8); + writer.WriteObjectValue(Logprobs); + } + else + { + writer.WriteNull("logprobs"); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateChatCompletionResponseChoice IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateChatCompletionResponseChoice)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateChatCompletionResponseChoice(document.RootElement, options); + } + + internal static CreateChatCompletionResponseChoice DeserializeCreateChatCompletionResponseChoice(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + CreateChatCompletionResponseChoiceFinishReason finishReason = default; + long index = default; + ChatCompletionResponseMessage message = default; + CreateChatCompletionResponseChoiceLogprobs logprobs = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("finish_reason"u8)) + { + finishReason = new CreateChatCompletionResponseChoiceFinishReason(property.Value.GetString()); + continue; + } + if (property.NameEquals("index"u8)) + { + index = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("message"u8)) + { + message = ChatCompletionResponseMessage.DeserializeChatCompletionResponseMessage(property.Value); + continue; + } + if (property.NameEquals("logprobs"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + logprobs = null; + continue; + } + logprobs = CreateChatCompletionResponseChoiceLogprobs.DeserializeCreateChatCompletionResponseChoiceLogprobs(property.Value); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateChatCompletionResponseChoice(finishReason, index, message, logprobs, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateChatCompletionResponseChoice)} does not support '{options.Format}' format."); + } + } + + CreateChatCompletionResponseChoice IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateChatCompletionResponseChoice(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateChatCompletionResponseChoice)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateChatCompletionResponseChoice FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateChatCompletionResponseChoice(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoice.cs b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoice.cs new file mode 100644 index 000000000..7a5b8c5c9 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoice.cs @@ -0,0 +1,109 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The CreateChatCompletionResponseChoice. + public partial class CreateChatCompletionResponseChoice + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// The reason the model stopped generating tokens. This will be `stop` if the model hit a + /// natural stop point or a provided stop sequence, `length` if the maximum number of tokens + /// specified in the request was reached, `content_filter` if content was omitted due to a flag + /// from our content filters, `tool_calls` if the model called a tool, or `function_call` + /// (deprecated) if the model called a function. + /// + /// The index of the choice in the list of choices. + /// + /// Log probability information for the choice. + /// is null. + internal CreateChatCompletionResponseChoice(CreateChatCompletionResponseChoiceFinishReason finishReason, long index, ChatCompletionResponseMessage message, CreateChatCompletionResponseChoiceLogprobs logprobs) + { + ClientUtilities.AssertNotNull(message, nameof(message)); + + FinishReason = finishReason; + Index = index; + Message = message; + Logprobs = logprobs; + } + + /// Initializes a new instance of . + /// + /// The reason the model stopped generating tokens. This will be `stop` if the model hit a + /// natural stop point or a provided stop sequence, `length` if the maximum number of tokens + /// specified in the request was reached, `content_filter` if content was omitted due to a flag + /// from our content filters, `tool_calls` if the model called a tool, or `function_call` + /// (deprecated) if the model called a function. + /// + /// The index of the choice in the list of choices. + /// + /// Log probability information for the choice. + /// Keeps track of any properties unknown to the library. + internal CreateChatCompletionResponseChoice(CreateChatCompletionResponseChoiceFinishReason finishReason, long index, ChatCompletionResponseMessage message, CreateChatCompletionResponseChoiceLogprobs logprobs, IDictionary serializedAdditionalRawData) + { + FinishReason = finishReason; + Index = index; + Message = message; + Logprobs = logprobs; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateChatCompletionResponseChoice() + { + } + + /// + /// The reason the model stopped generating tokens. This will be `stop` if the model hit a + /// natural stop point or a provided stop sequence, `length` if the maximum number of tokens + /// specified in the request was reached, `content_filter` if content was omitted due to a flag + /// from our content filters, `tool_calls` if the model called a tool, or `function_call` + /// (deprecated) if the model called a function. + /// + public CreateChatCompletionResponseChoiceFinishReason FinishReason { get; } + /// The index of the choice in the list of choices. + public long Index { get; } + /// Gets the message. + public ChatCompletionResponseMessage Message { get; } + /// Log probability information for the choice. + public CreateChatCompletionResponseChoiceLogprobs Logprobs { get; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceFinishReason.cs b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceFinishReason.cs new file mode 100644 index 000000000..65e41895b --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceFinishReason.cs @@ -0,0 +1,57 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for finish_reason in CreateChatCompletionResponseChoice. + public readonly partial struct CreateChatCompletionResponseChoiceFinishReason : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateChatCompletionResponseChoiceFinishReason(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string StopValue = "stop"; + private const string LengthValue = "length"; + private const string ToolCallsValue = "tool_calls"; + private const string ContentFilterValue = "content_filter"; + private const string FunctionCallValue = "function_call"; + + /// stop. + public static CreateChatCompletionResponseChoiceFinishReason Stop { get; } = new CreateChatCompletionResponseChoiceFinishReason(StopValue); + /// length. + public static CreateChatCompletionResponseChoiceFinishReason Length { get; } = new CreateChatCompletionResponseChoiceFinishReason(LengthValue); + /// tool_calls. + public static CreateChatCompletionResponseChoiceFinishReason ToolCalls { get; } = new CreateChatCompletionResponseChoiceFinishReason(ToolCallsValue); + /// content_filter. + public static CreateChatCompletionResponseChoiceFinishReason ContentFilter { get; } = new CreateChatCompletionResponseChoiceFinishReason(ContentFilterValue); + /// function_call. + public static CreateChatCompletionResponseChoiceFinishReason FunctionCall { get; } = new CreateChatCompletionResponseChoiceFinishReason(FunctionCallValue); + /// Determines if two values are the same. + public static bool operator ==(CreateChatCompletionResponseChoiceFinishReason left, CreateChatCompletionResponseChoiceFinishReason right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateChatCompletionResponseChoiceFinishReason left, CreateChatCompletionResponseChoiceFinishReason right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateChatCompletionResponseChoiceFinishReason(string value) => new CreateChatCompletionResponseChoiceFinishReason(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateChatCompletionResponseChoiceFinishReason other && Equals(other); + /// + public bool Equals(CreateChatCompletionResponseChoiceFinishReason other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceLogprobs.Serialization.cs b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceLogprobs.Serialization.cs new file mode 100644 index 000000000..8893ed290 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceLogprobs.Serialization.cs @@ -0,0 +1,154 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class CreateChatCompletionResponseChoiceLogprobs : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateChatCompletionResponseChoiceLogprobs)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + if (Content != null && OptionalProperty.IsCollectionDefined(Content)) + { + writer.WritePropertyName("content"u8); + writer.WriteStartArray(); + foreach (var item in Content) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + } + else + { + writer.WriteNull("content"); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateChatCompletionResponseChoiceLogprobs IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateChatCompletionResponseChoiceLogprobs)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateChatCompletionResponseChoiceLogprobs(document.RootElement, options); + } + + internal static CreateChatCompletionResponseChoiceLogprobs DeserializeCreateChatCompletionResponseChoiceLogprobs(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + IReadOnlyList content = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("content"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + content = new OptionalList(); + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(ChatCompletionTokenLogprob.DeserializeChatCompletionTokenLogprob(item)); + } + content = array; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateChatCompletionResponseChoiceLogprobs(content, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateChatCompletionResponseChoiceLogprobs)} does not support '{options.Format}' format."); + } + } + + CreateChatCompletionResponseChoiceLogprobs IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateChatCompletionResponseChoiceLogprobs(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateChatCompletionResponseChoiceLogprobs)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateChatCompletionResponseChoiceLogprobs FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateChatCompletionResponseChoiceLogprobs(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceLogprobs.cs b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceLogprobs.cs new file mode 100644 index 000000000..36cc66621 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceLogprobs.cs @@ -0,0 +1,70 @@ +// + +#nullable disable + +using System; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI.Models +{ + /// The CreateChatCompletionResponseChoiceLogprobs. + public partial class CreateChatCompletionResponseChoiceLogprobs + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + internal CreateChatCompletionResponseChoiceLogprobs(IEnumerable content) + { + Content = content?.ToList(); + } + + /// Initializes a new instance of . + /// + /// Keeps track of any properties unknown to the library. + internal CreateChatCompletionResponseChoiceLogprobs(IReadOnlyList content, IDictionary serializedAdditionalRawData) + { + Content = content; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateChatCompletionResponseChoiceLogprobs() + { + } + + /// Gets the content. + public IReadOnlyList Content { get; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionResponseObject.cs b/.dotnet/src/Generated/Models/CreateChatCompletionResponseObject.cs new file mode 100644 index 000000000..3e2747865 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateChatCompletionResponseObject.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The CreateChatCompletionResponse_object. + public readonly partial struct CreateChatCompletionResponseObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateChatCompletionResponseObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ChatCompletionValue = "chat.completion"; + + /// chat.completion. + public static CreateChatCompletionResponseObject ChatCompletion { get; } = new CreateChatCompletionResponseObject(ChatCompletionValue); + /// Determines if two values are the same. + public static bool operator ==(CreateChatCompletionResponseObject left, CreateChatCompletionResponseObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateChatCompletionResponseObject left, CreateChatCompletionResponseObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateChatCompletionResponseObject(string value) => new CreateChatCompletionResponseObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateChatCompletionResponseObject other && Equals(other); + /// + public bool Equals(CreateChatCompletionResponseObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateCompletionRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateCompletionRequest.Serialization.cs new file mode 100644 index 000000000..9d1be276a --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateCompletionRequest.Serialization.cs @@ -0,0 +1,509 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class CreateCompletionRequest : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateCompletionRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model.ToString()); + if (Prompt != null) + { + writer.WritePropertyName("prompt"u8); +#if NET6_0_OR_GREATER + writer.WriteRawValue(Prompt); +#else + using (JsonDocument document = JsonDocument.Parse(Prompt)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + else + { + writer.WriteNull("prompt"); + } + if (OptionalProperty.IsDefined(BestOf)) + { + if (BestOf != null) + { + writer.WritePropertyName("best_of"u8); + writer.WriteNumberValue(BestOf.Value); + } + else + { + writer.WriteNull("best_of"); + } + } + if (OptionalProperty.IsDefined(Echo)) + { + if (Echo != null) + { + writer.WritePropertyName("echo"u8); + writer.WriteBooleanValue(Echo.Value); + } + else + { + writer.WriteNull("echo"); + } + } + if (OptionalProperty.IsDefined(FrequencyPenalty)) + { + if (FrequencyPenalty != null) + { + writer.WritePropertyName("frequency_penalty"u8); + writer.WriteNumberValue(FrequencyPenalty.Value); + } + else + { + writer.WriteNull("frequency_penalty"); + } + } + if (OptionalProperty.IsCollectionDefined(LogitBias)) + { + if (LogitBias != null) + { + writer.WritePropertyName("logit_bias"u8); + writer.WriteStartObject(); + foreach (var item in LogitBias) + { + writer.WritePropertyName(item.Key); + writer.WriteNumberValue(item.Value); + } + writer.WriteEndObject(); + } + else + { + writer.WriteNull("logit_bias"); + } + } + if (OptionalProperty.IsDefined(Logprobs)) + { + if (Logprobs != null) + { + writer.WritePropertyName("logprobs"u8); + writer.WriteNumberValue(Logprobs.Value); + } + else + { + writer.WriteNull("logprobs"); + } + } + if (OptionalProperty.IsDefined(MaxTokens)) + { + if (MaxTokens != null) + { + writer.WritePropertyName("max_tokens"u8); + writer.WriteNumberValue(MaxTokens.Value); + } + else + { + writer.WriteNull("max_tokens"); + } + } + if (OptionalProperty.IsDefined(N)) + { + if (N != null) + { + writer.WritePropertyName("n"u8); + writer.WriteNumberValue(N.Value); + } + else + { + writer.WriteNull("n"); + } + } + if (OptionalProperty.IsDefined(PresencePenalty)) + { + if (PresencePenalty != null) + { + writer.WritePropertyName("presence_penalty"u8); + writer.WriteNumberValue(PresencePenalty.Value); + } + else + { + writer.WriteNull("presence_penalty"); + } + } + if (OptionalProperty.IsDefined(Seed)) + { + if (Seed != null) + { + writer.WritePropertyName("seed"u8); + writer.WriteNumberValue(Seed.Value); + } + else + { + writer.WriteNull("seed"); + } + } + if (OptionalProperty.IsDefined(Stop)) + { + if (Stop != null) + { + writer.WritePropertyName("stop"u8); +#if NET6_0_OR_GREATER + writer.WriteRawValue(Stop); +#else + using (JsonDocument document = JsonDocument.Parse(Stop)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + else + { + writer.WriteNull("stop"); + } + } + if (OptionalProperty.IsDefined(Stream)) + { + if (Stream != null) + { + writer.WritePropertyName("stream"u8); + writer.WriteBooleanValue(Stream.Value); + } + else + { + writer.WriteNull("stream"); + } + } + if (OptionalProperty.IsDefined(Suffix)) + { + if (Suffix != null) + { + writer.WritePropertyName("suffix"u8); + writer.WriteStringValue(Suffix); + } + else + { + writer.WriteNull("suffix"); + } + } + if (OptionalProperty.IsDefined(Temperature)) + { + if (Temperature != null) + { + writer.WritePropertyName("temperature"u8); + writer.WriteNumberValue(Temperature.Value); + } + else + { + writer.WriteNull("temperature"); + } + } + if (OptionalProperty.IsDefined(TopP)) + { + if (TopP != null) + { + writer.WritePropertyName("top_p"u8); + writer.WriteNumberValue(TopP.Value); + } + else + { + writer.WriteNull("top_p"); + } + } + if (OptionalProperty.IsDefined(User)) + { + writer.WritePropertyName("user"u8); + writer.WriteStringValue(User); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateCompletionRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateCompletionRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateCompletionRequest(document.RootElement, options); + } + + internal static CreateCompletionRequest DeserializeCreateCompletionRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + CreateCompletionRequestModel model = default; + BinaryData prompt = default; + OptionalProperty bestOf = default; + OptionalProperty echo = default; + OptionalProperty frequencyPenalty = default; + OptionalProperty> logitBias = default; + OptionalProperty logprobs = default; + OptionalProperty maxTokens = default; + OptionalProperty n = default; + OptionalProperty presencePenalty = default; + OptionalProperty seed = default; + OptionalProperty stop = default; + OptionalProperty stream = default; + OptionalProperty suffix = default; + OptionalProperty temperature = default; + OptionalProperty topP = default; + OptionalProperty user = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("model"u8)) + { + model = new CreateCompletionRequestModel(property.Value.GetString()); + continue; + } + if (property.NameEquals("prompt"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + prompt = null; + continue; + } + prompt = BinaryData.FromString(property.Value.GetRawText()); + continue; + } + if (property.NameEquals("best_of"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + bestOf = null; + continue; + } + bestOf = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("echo"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + echo = null; + continue; + } + echo = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("frequency_penalty"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + frequencyPenalty = null; + continue; + } + frequencyPenalty = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("logit_bias"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + Dictionary dictionary = new Dictionary(); + foreach (var property0 in property.Value.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetInt64()); + } + logitBias = dictionary; + continue; + } + if (property.NameEquals("logprobs"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + logprobs = null; + continue; + } + logprobs = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("max_tokens"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + maxTokens = null; + continue; + } + maxTokens = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("n"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + n = null; + continue; + } + n = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("presence_penalty"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + presencePenalty = null; + continue; + } + presencePenalty = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("seed"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + seed = null; + continue; + } + seed = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("stop"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + stop = null; + continue; + } + stop = BinaryData.FromString(property.Value.GetRawText()); + continue; + } + if (property.NameEquals("stream"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + stream = null; + continue; + } + stream = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("suffix"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + suffix = null; + continue; + } + suffix = property.Value.GetString(); + continue; + } + if (property.NameEquals("temperature"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + temperature = null; + continue; + } + temperature = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("top_p"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + topP = null; + continue; + } + topP = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("user"u8)) + { + user = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateCompletionRequest(model, prompt, OptionalProperty.ToNullable(bestOf), OptionalProperty.ToNullable(echo), OptionalProperty.ToNullable(frequencyPenalty), OptionalProperty.ToDictionary(logitBias), OptionalProperty.ToNullable(logprobs), OptionalProperty.ToNullable(maxTokens), OptionalProperty.ToNullable(n), OptionalProperty.ToNullable(presencePenalty), OptionalProperty.ToNullable(seed), stop.Value, OptionalProperty.ToNullable(stream), suffix.Value, OptionalProperty.ToNullable(temperature), OptionalProperty.ToNullable(topP), user.Value, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateCompletionRequest)} does not support '{options.Format}' format."); + } + } + + CreateCompletionRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateCompletionRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateCompletionRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateCompletionRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateCompletionRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateCompletionRequest.cs b/.dotnet/src/Generated/Models/CreateCompletionRequest.cs new file mode 100644 index 000000000..7797fb2a3 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateCompletionRequest.cs @@ -0,0 +1,401 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The CreateCompletionRequest. + public partial class CreateCompletionRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + /// see all of your available models, or see our [Model overview](/docs/models/overview) for + /// descriptions of them. + /// + /// + /// The prompt(s) to generate completions for, encoded as a string, array of strings, array of + /// tokens, or array of token arrays. + /// + /// Note that <|endoftext|> is the document separator that the model sees during training, so if a + /// prompt is not specified the model will generate as if from the beginning of a new document. + /// + public CreateCompletionRequest(CreateCompletionRequestModel model, BinaryData prompt) + { + Model = model; + Prompt = prompt; + LogitBias = new OptionalDictionary(); + } + + /// Initializes a new instance of . + /// + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + /// see all of your available models, or see our [Model overview](/docs/models/overview) for + /// descriptions of them. + /// + /// + /// The prompt(s) to generate completions for, encoded as a string, array of strings, array of + /// tokens, or array of token arrays. + /// + /// Note that <|endoftext|> is the document separator that the model sees during training, so if a + /// prompt is not specified the model will generate as if from the beginning of a new document. + /// + /// + /// Generates `best_of` completions server-side and returns the "best" (the one with the highest + /// log probability per token). Results cannot be streamed. + /// + /// When used with `n`, `best_of` controls the number of candidate completions and `n` specifies + /// how many to return – `best_of` must be greater than `n`. + /// + /// **Note:** Because this parameter generates many completions, it can quickly consume your token + /// quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + /// + /// Echo back the prompt in addition to the completion. + /// + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing + /// frequency in the text so far, decreasing the model's likelihood to repeat the same line + /// verbatim. + /// + /// [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + /// + /// + /// Modify the likelihood of specified tokens appearing in the completion. + /// + /// Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an + /// associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) + /// to convert text to token IDs. Mathematically, the bias is added to the logits generated by the + /// model prior to sampling. The exact effect will vary per model, but values between -1 and 1 + /// should decrease or increase likelihood of selection; values like -100 or 100 should result in a + /// ban or exclusive selection of the relevant token. + /// + /// As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being + /// generated. + /// + /// + /// Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. + /// For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The + /// API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` + /// elements in the response. + /// + /// The maximum value for `logprobs` is 5. + /// + /// + /// The maximum number of [tokens](/tokenizer) to generate in the completion. + /// + /// The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + /// [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + /// for counting tokens. + /// + /// + /// How many completions to generate for each prompt. + /// + /// **Note:** Because this parameter generates many completions, it can quickly consume your token + /// quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + /// + /// + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear + /// in the text so far, increasing the model's likelihood to talk about new topics. + /// + /// [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + /// + /// + /// If specified, our system will make a best effort to sample deterministically, such that + /// repeated requests with the same `seed` and parameters should return the same result. + /// + /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response + /// parameter to monitor changes in the backend. + /// + /// Up to 4 sequences where the API will stop generating further tokens. + /// + /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only + /// [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + /// as they become available, with the stream terminated by a `data: [DONE]` message. + /// [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). + /// + /// The suffix that comes after a completion of inserted text. + /// + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + /// more random, while lower values like 0.2 will make it more focused and deterministic. + /// + /// We generally recommend altering this or `top_p` but not both. + /// + /// + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers + /// the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising + /// the top 10% probability mass are considered. + /// + /// We generally recommend altering this or `temperature` but not both. + /// + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect + /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + /// + /// Keeps track of any properties unknown to the library. + internal CreateCompletionRequest(CreateCompletionRequestModel model, BinaryData prompt, long? bestOf, bool? echo, double? frequencyPenalty, IDictionary logitBias, long? logprobs, long? maxTokens, long? n, double? presencePenalty, long? seed, BinaryData stop, bool? stream, string suffix, double? temperature, double? topP, string user, IDictionary serializedAdditionalRawData) + { + Model = model; + Prompt = prompt; + BestOf = bestOf; + Echo = echo; + FrequencyPenalty = frequencyPenalty; + LogitBias = logitBias; + Logprobs = logprobs; + MaxTokens = maxTokens; + N = n; + PresencePenalty = presencePenalty; + Seed = seed; + Stop = stop; + Stream = stream; + Suffix = suffix; + Temperature = temperature; + TopP = topP; + User = user; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateCompletionRequest() + { + } + + /// + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + /// see all of your available models, or see our [Model overview](/docs/models/overview) for + /// descriptions of them. + /// + public CreateCompletionRequestModel Model { get; } + /// + /// The prompt(s) to generate completions for, encoded as a string, array of strings, array of + /// tokens, or array of token arrays. + /// + /// Note that <|endoftext|> is the document separator that the model sees during training, so if a + /// prompt is not specified the model will generate as if from the beginning of a new document. + /// + /// To assign an object to this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// + /// Supported types: + /// + /// + /// + /// + /// + /// where T is of type + /// + /// + /// where T is of type + /// + /// + /// where T is of type IList{long} + /// + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public BinaryData Prompt { get; } + /// + /// Generates `best_of` completions server-side and returns the "best" (the one with the highest + /// log probability per token). Results cannot be streamed. + /// + /// When used with `n`, `best_of` controls the number of candidate completions and `n` specifies + /// how many to return – `best_of` must be greater than `n`. + /// + /// **Note:** Because this parameter generates many completions, it can quickly consume your token + /// quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + /// + public long? BestOf { get; set; } + /// Echo back the prompt in addition to the completion. + public bool? Echo { get; set; } + /// + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing + /// frequency in the text so far, decreasing the model's likelihood to repeat the same line + /// verbatim. + /// + /// [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + /// + public double? FrequencyPenalty { get; set; } + /// + /// Modify the likelihood of specified tokens appearing in the completion. + /// + /// Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an + /// associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) + /// to convert text to token IDs. Mathematically, the bias is added to the logits generated by the + /// model prior to sampling. The exact effect will vary per model, but values between -1 and 1 + /// should decrease or increase likelihood of selection; values like -100 or 100 should result in a + /// ban or exclusive selection of the relevant token. + /// + /// As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being + /// generated. + /// + public IDictionary LogitBias { get; set; } + /// + /// Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. + /// For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The + /// API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` + /// elements in the response. + /// + /// The maximum value for `logprobs` is 5. + /// + public long? Logprobs { get; set; } + /// + /// The maximum number of [tokens](/tokenizer) to generate in the completion. + /// + /// The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + /// [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + /// for counting tokens. + /// + public long? MaxTokens { get; set; } + /// + /// How many completions to generate for each prompt. + /// + /// **Note:** Because this parameter generates many completions, it can quickly consume your token + /// quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + /// + public long? N { get; set; } + /// + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear + /// in the text so far, increasing the model's likelihood to talk about new topics. + /// + /// [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + /// + public double? PresencePenalty { get; set; } + /// + /// If specified, our system will make a best effort to sample deterministically, such that + /// repeated requests with the same `seed` and parameters should return the same result. + /// + /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response + /// parameter to monitor changes in the backend. + /// + public long? Seed { get; set; } + /// + /// Up to 4 sequences where the API will stop generating further tokens. + /// + /// To assign an object to this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// + /// Supported types: + /// + /// + /// + /// + /// + /// where T is of type + /// + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public BinaryData Stop { get; set; } + /// + /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only + /// [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + /// as they become available, with the stream terminated by a `data: [DONE]` message. + /// [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). + /// + public bool? Stream { get; set; } + /// The suffix that comes after a completion of inserted text. + public string Suffix { get; set; } + /// + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + /// more random, while lower values like 0.2 will make it more focused and deterministic. + /// + /// We generally recommend altering this or `top_p` but not both. + /// + public double? Temperature { get; set; } + /// + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers + /// the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising + /// the top 10% probability mass are considered. + /// + /// We generally recommend altering this or `temperature` but not both. + /// + public double? TopP { get; set; } + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect + /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + /// + public string User { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateCompletionRequestModel.cs b/.dotnet/src/Generated/Models/CreateCompletionRequestModel.cs new file mode 100644 index 000000000..885dde0a4 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateCompletionRequestModel.cs @@ -0,0 +1,51 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for model in CreateCompletionRequest. + public readonly partial struct CreateCompletionRequestModel : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateCompletionRequestModel(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string Gpt35TurboInstructValue = "gpt-3.5-turbo-instruct"; + private const string Davinci002Value = "davinci-002"; + private const string Babbage002Value = "babbage-002"; + + /// gpt-3.5-turbo-instruct. + public static CreateCompletionRequestModel Gpt35TurboInstruct { get; } = new CreateCompletionRequestModel(Gpt35TurboInstructValue); + /// davinci-002. + public static CreateCompletionRequestModel Davinci002 { get; } = new CreateCompletionRequestModel(Davinci002Value); + /// babbage-002. + public static CreateCompletionRequestModel Babbage002 { get; } = new CreateCompletionRequestModel(Babbage002Value); + /// Determines if two values are the same. + public static bool operator ==(CreateCompletionRequestModel left, CreateCompletionRequestModel right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateCompletionRequestModel left, CreateCompletionRequestModel right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateCompletionRequestModel(string value) => new CreateCompletionRequestModel(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateCompletionRequestModel other && Equals(other); + /// + public bool Equals(CreateCompletionRequestModel other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateCompletionResponse.Serialization.cs b/.dotnet/src/Generated/Models/CreateCompletionResponse.Serialization.cs new file mode 100644 index 000000000..3afa5cb8c --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateCompletionResponse.Serialization.cs @@ -0,0 +1,200 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class CreateCompletionResponse : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateCompletionResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("choices"u8); + writer.WriteStartArray(); + foreach (var item in Choices) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + writer.WritePropertyName("created"u8); + writer.WriteNumberValue(Created, "U"); + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model); + if (OptionalProperty.IsDefined(SystemFingerprint)) + { + writer.WritePropertyName("system_fingerprint"u8); + writer.WriteStringValue(SystemFingerprint); + } + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + if (OptionalProperty.IsDefined(Usage)) + { + writer.WritePropertyName("usage"u8); + writer.WriteObjectValue(Usage); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateCompletionResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateCompletionResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateCompletionResponse(document.RootElement, options); + } + + internal static CreateCompletionResponse DeserializeCreateCompletionResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + IReadOnlyList choices = default; + DateTimeOffset created = default; + string model = default; + OptionalProperty systemFingerprint = default; + CreateCompletionResponseObject @object = default; + OptionalProperty usage = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("choices"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(CreateCompletionResponseChoice.DeserializeCreateCompletionResponseChoice(item)); + } + choices = array; + continue; + } + if (property.NameEquals("created"u8)) + { + created = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("model"u8)) + { + model = property.Value.GetString(); + continue; + } + if (property.NameEquals("system_fingerprint"u8)) + { + systemFingerprint = property.Value.GetString(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new CreateCompletionResponseObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("usage"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + usage = CompletionUsage.DeserializeCompletionUsage(property.Value); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateCompletionResponse(id, choices, created, model, systemFingerprint.Value, @object, usage.Value, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateCompletionResponse)} does not support '{options.Format}' format."); + } + } + + CreateCompletionResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateCompletionResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateCompletionResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateCompletionResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateCompletionResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateCompletionResponse.cs b/.dotnet/src/Generated/Models/CreateCompletionResponse.cs new file mode 100644 index 000000000..0d47cab50 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateCompletionResponse.cs @@ -0,0 +1,120 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI.Models +{ + /// + /// Represents a completion response from the API. Note: both the streamed and non-streamed response + /// objects share the same shape (unlike the chat endpoint). + /// + public partial class CreateCompletionResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// A unique identifier for the completion. + /// The list of completion choices the model generated for the input. + /// The Unix timestamp (in seconds) of when the completion was created. + /// The model used for the completion. + /// , or is null. + internal CreateCompletionResponse(string id, IEnumerable choices, DateTimeOffset created, string model) + { + ClientUtilities.AssertNotNull(id, nameof(id)); + ClientUtilities.AssertNotNull(choices, nameof(choices)); + ClientUtilities.AssertNotNull(model, nameof(model)); + + Id = id; + Choices = choices.ToList(); + Created = created; + Model = model; + } + + /// Initializes a new instance of . + /// A unique identifier for the completion. + /// The list of completion choices the model generated for the input. + /// The Unix timestamp (in seconds) of when the completion was created. + /// The model used for the completion. + /// + /// This fingerprint represents the backend configuration that the model runs with. + /// + /// Can be used in conjunction with the `seed` request parameter to understand when backend changes + /// have been made that might impact determinism. + /// + /// The object type, which is always `text_completion`. + /// Usage statistics for the completion request. + /// Keeps track of any properties unknown to the library. + internal CreateCompletionResponse(string id, IReadOnlyList choices, DateTimeOffset created, string model, string systemFingerprint, CreateCompletionResponseObject @object, CompletionUsage usage, IDictionary serializedAdditionalRawData) + { + Id = id; + Choices = choices; + Created = created; + Model = model; + SystemFingerprint = systemFingerprint; + Object = @object; + Usage = usage; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateCompletionResponse() + { + } + + /// A unique identifier for the completion. + public string Id { get; } + /// The list of completion choices the model generated for the input. + public IReadOnlyList Choices { get; } + /// The Unix timestamp (in seconds) of when the completion was created. + public DateTimeOffset Created { get; } + /// The model used for the completion. + public string Model { get; } + /// + /// This fingerprint represents the backend configuration that the model runs with. + /// + /// Can be used in conjunction with the `seed` request parameter to understand when backend changes + /// have been made that might impact determinism. + /// + public string SystemFingerprint { get; } + /// The object type, which is always `text_completion`. + public CreateCompletionResponseObject Object { get; } = CreateCompletionResponseObject.TextCompletion; + + /// Usage statistics for the completion request. + public CompletionUsage Usage { get; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateCompletionResponseChoice.Serialization.cs b/.dotnet/src/Generated/Models/CreateCompletionResponseChoice.Serialization.cs new file mode 100644 index 000000000..d9f0caf93 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateCompletionResponseChoice.Serialization.cs @@ -0,0 +1,168 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class CreateCompletionResponseChoice : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateCompletionResponseChoice)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("index"u8); + writer.WriteNumberValue(Index); + writer.WritePropertyName("text"u8); + writer.WriteStringValue(Text); + if (Logprobs != null) + { + writer.WritePropertyName("logprobs"u8); + writer.WriteObjectValue(Logprobs); + } + else + { + writer.WriteNull("logprobs"); + } + writer.WritePropertyName("finish_reason"u8); + writer.WriteStringValue(FinishReason.ToString()); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateCompletionResponseChoice IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateCompletionResponseChoice)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateCompletionResponseChoice(document.RootElement, options); + } + + internal static CreateCompletionResponseChoice DeserializeCreateCompletionResponseChoice(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + long index = default; + string text = default; + CreateCompletionResponseChoiceLogprobs logprobs = default; + CreateCompletionResponseChoiceFinishReason finishReason = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("index"u8)) + { + index = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("text"u8)) + { + text = property.Value.GetString(); + continue; + } + if (property.NameEquals("logprobs"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + logprobs = null; + continue; + } + logprobs = CreateCompletionResponseChoiceLogprobs.DeserializeCreateCompletionResponseChoiceLogprobs(property.Value); + continue; + } + if (property.NameEquals("finish_reason"u8)) + { + finishReason = new CreateCompletionResponseChoiceFinishReason(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateCompletionResponseChoice(index, text, logprobs, finishReason, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateCompletionResponseChoice)} does not support '{options.Format}' format."); + } + } + + CreateCompletionResponseChoice IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateCompletionResponseChoice(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateCompletionResponseChoice)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateCompletionResponseChoice FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateCompletionResponseChoice(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateCompletionResponseChoice.cs b/.dotnet/src/Generated/Models/CreateCompletionResponseChoice.cs new file mode 100644 index 000000000..b38455976 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateCompletionResponseChoice.cs @@ -0,0 +1,109 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The CreateCompletionResponseChoice. + public partial class CreateCompletionResponseChoice + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// + /// + /// + /// The reason the model stopped generating tokens. This will be `stop` if the model hit a + /// natural stop point or a provided stop sequence, or `content_filter` if content was omitted + /// due to a flag from our content filters, `length` if the maximum number of tokens specified + /// in the request was reached, or `content_filter` if content was omitted due to a flag from our + /// content filters. + /// + /// is null. + internal CreateCompletionResponseChoice(long index, string text, CreateCompletionResponseChoiceLogprobs logprobs, CreateCompletionResponseChoiceFinishReason finishReason) + { + ClientUtilities.AssertNotNull(text, nameof(text)); + + Index = index; + Text = text; + Logprobs = logprobs; + FinishReason = finishReason; + } + + /// Initializes a new instance of . + /// + /// + /// + /// + /// The reason the model stopped generating tokens. This will be `stop` if the model hit a + /// natural stop point or a provided stop sequence, or `content_filter` if content was omitted + /// due to a flag from our content filters, `length` if the maximum number of tokens specified + /// in the request was reached, or `content_filter` if content was omitted due to a flag from our + /// content filters. + /// + /// Keeps track of any properties unknown to the library. + internal CreateCompletionResponseChoice(long index, string text, CreateCompletionResponseChoiceLogprobs logprobs, CreateCompletionResponseChoiceFinishReason finishReason, IDictionary serializedAdditionalRawData) + { + Index = index; + Text = text; + Logprobs = logprobs; + FinishReason = finishReason; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateCompletionResponseChoice() + { + } + + /// Gets the index. + public long Index { get; } + /// Gets the text. + public string Text { get; } + /// Gets the logprobs. + public CreateCompletionResponseChoiceLogprobs Logprobs { get; } + /// + /// The reason the model stopped generating tokens. This will be `stop` if the model hit a + /// natural stop point or a provided stop sequence, or `content_filter` if content was omitted + /// due to a flag from our content filters, `length` if the maximum number of tokens specified + /// in the request was reached, or `content_filter` if content was omitted due to a flag from our + /// content filters. + /// + public CreateCompletionResponseChoiceFinishReason FinishReason { get; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceFinishReason.cs b/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceFinishReason.cs new file mode 100644 index 000000000..5071c4a7a --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceFinishReason.cs @@ -0,0 +1,51 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for finish_reason in CreateCompletionResponseChoice. + public readonly partial struct CreateCompletionResponseChoiceFinishReason : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateCompletionResponseChoiceFinishReason(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string StopValue = "stop"; + private const string LengthValue = "length"; + private const string ContentFilterValue = "content_filter"; + + /// stop. + public static CreateCompletionResponseChoiceFinishReason Stop { get; } = new CreateCompletionResponseChoiceFinishReason(StopValue); + /// length. + public static CreateCompletionResponseChoiceFinishReason Length { get; } = new CreateCompletionResponseChoiceFinishReason(LengthValue); + /// content_filter. + public static CreateCompletionResponseChoiceFinishReason ContentFilter { get; } = new CreateCompletionResponseChoiceFinishReason(ContentFilterValue); + /// Determines if two values are the same. + public static bool operator ==(CreateCompletionResponseChoiceFinishReason left, CreateCompletionResponseChoiceFinishReason right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateCompletionResponseChoiceFinishReason left, CreateCompletionResponseChoiceFinishReason right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateCompletionResponseChoiceFinishReason(string value) => new CreateCompletionResponseChoiceFinishReason(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateCompletionResponseChoiceFinishReason other && Equals(other); + /// + public bool Equals(CreateCompletionResponseChoiceFinishReason other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceLogprobs.Serialization.cs b/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceLogprobs.Serialization.cs new file mode 100644 index 000000000..89dcafec8 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceLogprobs.Serialization.cs @@ -0,0 +1,219 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class CreateCompletionResponseChoiceLogprobs : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateCompletionResponseChoiceLogprobs)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("tokens"u8); + writer.WriteStartArray(); + foreach (var item in Tokens) + { + writer.WriteStringValue(item); + } + writer.WriteEndArray(); + writer.WritePropertyName("token_logprobs"u8); + writer.WriteStartArray(); + foreach (var item in TokenLogprobs) + { + writer.WriteNumberValue(item); + } + writer.WriteEndArray(); + writer.WritePropertyName("top_logprobs"u8); + writer.WriteStartArray(); + foreach (var item in TopLogprobs) + { + if (item == null) + { + writer.WriteNullValue(); + continue; + } + writer.WriteStartObject(); + foreach (var item0 in item) + { + writer.WritePropertyName(item0.Key); + writer.WriteNumberValue(item0.Value); + } + writer.WriteEndObject(); + } + writer.WriteEndArray(); + writer.WritePropertyName("text_offset"u8); + writer.WriteStartArray(); + foreach (var item in TextOffset) + { + writer.WriteNumberValue(item); + } + writer.WriteEndArray(); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateCompletionResponseChoiceLogprobs IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateCompletionResponseChoiceLogprobs)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateCompletionResponseChoiceLogprobs(document.RootElement, options); + } + + internal static CreateCompletionResponseChoiceLogprobs DeserializeCreateCompletionResponseChoiceLogprobs(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + IReadOnlyList tokens = default; + IReadOnlyList tokenLogprobs = default; + IReadOnlyList> topLogprobs = default; + IReadOnlyList textOffset = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("tokens"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(item.GetString()); + } + tokens = array; + continue; + } + if (property.NameEquals("token_logprobs"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(item.GetDouble()); + } + tokenLogprobs = array; + continue; + } + if (property.NameEquals("top_logprobs"u8)) + { + List> array = new List>(); + foreach (var item in property.Value.EnumerateArray()) + { + if (item.ValueKind == JsonValueKind.Null) + { + array.Add(null); + } + else + { + Dictionary dictionary = new Dictionary(); + foreach (var property0 in item.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetInt64()); + } + array.Add(dictionary); + } + } + topLogprobs = array; + continue; + } + if (property.NameEquals("text_offset"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(item.GetInt64()); + } + textOffset = array; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateCompletionResponseChoiceLogprobs(tokens, tokenLogprobs, topLogprobs, textOffset, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateCompletionResponseChoiceLogprobs)} does not support '{options.Format}' format."); + } + } + + CreateCompletionResponseChoiceLogprobs IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateCompletionResponseChoiceLogprobs(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateCompletionResponseChoiceLogprobs)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateCompletionResponseChoiceLogprobs FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateCompletionResponseChoiceLogprobs(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceLogprobs.cs b/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceLogprobs.cs new file mode 100644 index 000000000..5b88d866b --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceLogprobs.cs @@ -0,0 +1,95 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI.Models +{ + /// The CreateCompletionResponseChoiceLogprobs. + public partial class CreateCompletionResponseChoiceLogprobs + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// + /// + /// + /// , , or is null. + internal CreateCompletionResponseChoiceLogprobs(IEnumerable tokens, IEnumerable tokenLogprobs, IEnumerable> topLogprobs, IEnumerable textOffset) + { + ClientUtilities.AssertNotNull(tokens, nameof(tokens)); + ClientUtilities.AssertNotNull(tokenLogprobs, nameof(tokenLogprobs)); + ClientUtilities.AssertNotNull(topLogprobs, nameof(topLogprobs)); + ClientUtilities.AssertNotNull(textOffset, nameof(textOffset)); + + Tokens = tokens.ToList(); + TokenLogprobs = tokenLogprobs.ToList(); + TopLogprobs = topLogprobs.ToList(); + TextOffset = textOffset.ToList(); + } + + /// Initializes a new instance of . + /// + /// + /// + /// + /// Keeps track of any properties unknown to the library. + internal CreateCompletionResponseChoiceLogprobs(IReadOnlyList tokens, IReadOnlyList tokenLogprobs, IReadOnlyList> topLogprobs, IReadOnlyList textOffset, IDictionary serializedAdditionalRawData) + { + Tokens = tokens; + TokenLogprobs = tokenLogprobs; + TopLogprobs = topLogprobs; + TextOffset = textOffset; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateCompletionResponseChoiceLogprobs() + { + } + + /// Gets the tokens. + public IReadOnlyList Tokens { get; } + /// Gets the token logprobs. + public IReadOnlyList TokenLogprobs { get; } + /// Gets the top logprobs. + public IReadOnlyList> TopLogprobs { get; } + /// Gets the text offset. + public IReadOnlyList TextOffset { get; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateCompletionResponseObject.cs b/.dotnet/src/Generated/Models/CreateCompletionResponseObject.cs new file mode 100644 index 000000000..3185d279a --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateCompletionResponseObject.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The CreateCompletionResponse_object. + public readonly partial struct CreateCompletionResponseObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateCompletionResponseObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string TextCompletionValue = "text_completion"; + + /// text_completion. + public static CreateCompletionResponseObject TextCompletion { get; } = new CreateCompletionResponseObject(TextCompletionValue); + /// Determines if two values are the same. + public static bool operator ==(CreateCompletionResponseObject left, CreateCompletionResponseObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateCompletionResponseObject left, CreateCompletionResponseObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateCompletionResponseObject(string value) => new CreateCompletionResponseObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateCompletionResponseObject other && Equals(other); + /// + public bool Equals(CreateCompletionResponseObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateEmbeddingRequest.Serialization.cs new file mode 100644 index 000000000..4601bcc63 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateEmbeddingRequest.Serialization.cs @@ -0,0 +1,188 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class CreateEmbeddingRequest : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateEmbeddingRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("input"u8); +#if NET6_0_OR_GREATER + writer.WriteRawValue(Input); +#else + using (JsonDocument document = JsonDocument.Parse(Input)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model.ToString()); + if (OptionalProperty.IsDefined(EncodingFormat)) + { + writer.WritePropertyName("encoding_format"u8); + writer.WriteStringValue(EncodingFormat.Value.ToString()); + } + if (OptionalProperty.IsDefined(Dimensions)) + { + writer.WritePropertyName("dimensions"u8); + writer.WriteNumberValue(Dimensions.Value); + } + if (OptionalProperty.IsDefined(User)) + { + writer.WritePropertyName("user"u8); + writer.WriteStringValue(User); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateEmbeddingRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateEmbeddingRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateEmbeddingRequest(document.RootElement, options); + } + + internal static CreateEmbeddingRequest DeserializeCreateEmbeddingRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + BinaryData input = default; + CreateEmbeddingRequestModel model = default; + OptionalProperty encodingFormat = default; + OptionalProperty dimensions = default; + OptionalProperty user = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("input"u8)) + { + input = BinaryData.FromString(property.Value.GetRawText()); + continue; + } + if (property.NameEquals("model"u8)) + { + model = new CreateEmbeddingRequestModel(property.Value.GetString()); + continue; + } + if (property.NameEquals("encoding_format"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + encodingFormat = new CreateEmbeddingRequestEncodingFormat(property.Value.GetString()); + continue; + } + if (property.NameEquals("dimensions"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + dimensions = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("user"u8)) + { + user = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateEmbeddingRequest(input, model, OptionalProperty.ToNullable(encodingFormat), OptionalProperty.ToNullable(dimensions), user.Value, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateEmbeddingRequest)} does not support '{options.Format}' format."); + } + } + + CreateEmbeddingRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateEmbeddingRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateEmbeddingRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateEmbeddingRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateEmbeddingRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingRequest.cs b/.dotnet/src/Generated/Models/CreateEmbeddingRequest.cs new file mode 100644 index 000000000..770cc70db --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateEmbeddingRequest.cs @@ -0,0 +1,186 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The CreateEmbeddingRequest. + public partial class CreateEmbeddingRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a + /// single request, pass an array of strings or array of token arrays. Each input must not exceed + /// the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an + /// empty string. + /// [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + /// for counting tokens. + /// + /// + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + /// see all of your available models, or see our [Model overview](/docs/models/overview) for + /// descriptions of them. + /// + /// is null. + public CreateEmbeddingRequest(BinaryData input, CreateEmbeddingRequestModel model) + { + ClientUtilities.AssertNotNull(input, nameof(input)); + + Input = input; + Model = model; + } + + /// Initializes a new instance of . + /// + /// Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a + /// single request, pass an array of strings or array of token arrays. Each input must not exceed + /// the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an + /// empty string. + /// [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + /// for counting tokens. + /// + /// + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + /// see all of your available models, or see our [Model overview](/docs/models/overview) for + /// descriptions of them. + /// + /// + /// The format to return the embeddings in. Can be either `float` or + /// [`base64`](https://pypi.org/project/pybase64/). + /// + /// + /// The number of dimensions the resulting output embeddings should have. Only supported in + /// `text-embedding-3` and later models. + /// + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect + /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + /// + /// Keeps track of any properties unknown to the library. + internal CreateEmbeddingRequest(BinaryData input, CreateEmbeddingRequestModel model, CreateEmbeddingRequestEncodingFormat? encodingFormat, long? dimensions, string user, IDictionary serializedAdditionalRawData) + { + Input = input; + Model = model; + EncodingFormat = encodingFormat; + Dimensions = dimensions; + User = user; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateEmbeddingRequest() + { + } + + /// + /// Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a + /// single request, pass an array of strings or array of token arrays. Each input must not exceed + /// the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an + /// empty string. + /// [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + /// for counting tokens. + /// + /// To assign an object to this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// + /// Supported types: + /// + /// + /// + /// + /// + /// where T is of type + /// + /// + /// where T is of type + /// + /// + /// where T is of type IList{long} + /// + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public BinaryData Input { get; } + /// + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + /// see all of your available models, or see our [Model overview](/docs/models/overview) for + /// descriptions of them. + /// + public CreateEmbeddingRequestModel Model { get; } + /// + /// The format to return the embeddings in. Can be either `float` or + /// [`base64`](https://pypi.org/project/pybase64/). + /// + public CreateEmbeddingRequestEncodingFormat? EncodingFormat { get; set; } + /// + /// The number of dimensions the resulting output embeddings should have. Only supported in + /// `text-embedding-3` and later models. + /// + public long? Dimensions { get; set; } + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect + /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + /// + public string User { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingRequestEncodingFormat.cs b/.dotnet/src/Generated/Models/CreateEmbeddingRequestEncodingFormat.cs new file mode 100644 index 000000000..23debf648 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateEmbeddingRequestEncodingFormat.cs @@ -0,0 +1,48 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for encoding_format in CreateEmbeddingRequest. + public readonly partial struct CreateEmbeddingRequestEncodingFormat : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateEmbeddingRequestEncodingFormat(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string FloatValue = "float"; + private const string Base64Value = "base64"; + + /// float. + public static CreateEmbeddingRequestEncodingFormat Float { get; } = new CreateEmbeddingRequestEncodingFormat(FloatValue); + /// base64. + public static CreateEmbeddingRequestEncodingFormat Base64 { get; } = new CreateEmbeddingRequestEncodingFormat(Base64Value); + /// Determines if two values are the same. + public static bool operator ==(CreateEmbeddingRequestEncodingFormat left, CreateEmbeddingRequestEncodingFormat right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateEmbeddingRequestEncodingFormat left, CreateEmbeddingRequestEncodingFormat right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateEmbeddingRequestEncodingFormat(string value) => new CreateEmbeddingRequestEncodingFormat(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateEmbeddingRequestEncodingFormat other && Equals(other); + /// + public bool Equals(CreateEmbeddingRequestEncodingFormat other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingRequestModel.cs b/.dotnet/src/Generated/Models/CreateEmbeddingRequestModel.cs new file mode 100644 index 000000000..1b65ef4e0 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateEmbeddingRequestModel.cs @@ -0,0 +1,51 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for model in CreateEmbeddingRequest. + public readonly partial struct CreateEmbeddingRequestModel : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateEmbeddingRequestModel(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string TextEmbeddingAda002Value = "text-embedding-ada-002"; + private const string TextEmbedding3SmallValue = "text-embedding-3-small"; + private const string TextEmbedding3LargeValue = "text-embedding-3-large"; + + /// text-embedding-ada-002. + public static CreateEmbeddingRequestModel TextEmbeddingAda002 { get; } = new CreateEmbeddingRequestModel(TextEmbeddingAda002Value); + /// text-embedding-3-small. + public static CreateEmbeddingRequestModel TextEmbedding3Small { get; } = new CreateEmbeddingRequestModel(TextEmbedding3SmallValue); + /// text-embedding-3-large. + public static CreateEmbeddingRequestModel TextEmbedding3Large { get; } = new CreateEmbeddingRequestModel(TextEmbedding3LargeValue); + /// Determines if two values are the same. + public static bool operator ==(CreateEmbeddingRequestModel left, CreateEmbeddingRequestModel right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateEmbeddingRequestModel left, CreateEmbeddingRequestModel right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateEmbeddingRequestModel(string value) => new CreateEmbeddingRequestModel(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateEmbeddingRequestModel other && Equals(other); + /// + public bool Equals(CreateEmbeddingRequestModel other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingResponse.Serialization.cs b/.dotnet/src/Generated/Models/CreateEmbeddingResponse.Serialization.cs new file mode 100644 index 000000000..78595eb7e --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateEmbeddingResponse.Serialization.cs @@ -0,0 +1,166 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class CreateEmbeddingResponse : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateEmbeddingResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("data"u8); + writer.WriteStartArray(); + foreach (var item in Data) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("usage"u8); + writer.WriteObjectValue(Usage); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateEmbeddingResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateEmbeddingResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateEmbeddingResponse(document.RootElement, options); + } + + internal static CreateEmbeddingResponse DeserializeCreateEmbeddingResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + IReadOnlyList data = default; + string model = default; + CreateEmbeddingResponseObject @object = default; + CreateEmbeddingResponseUsage usage = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("data"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(Embedding.DeserializeEmbedding(item)); + } + data = array; + continue; + } + if (property.NameEquals("model"u8)) + { + model = property.Value.GetString(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new CreateEmbeddingResponseObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("usage"u8)) + { + usage = CreateEmbeddingResponseUsage.DeserializeCreateEmbeddingResponseUsage(property.Value); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateEmbeddingResponse(data, model, @object, usage, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateEmbeddingResponse)} does not support '{options.Format}' format."); + } + } + + CreateEmbeddingResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateEmbeddingResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateEmbeddingResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateEmbeddingResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateEmbeddingResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingResponse.cs b/.dotnet/src/Generated/Models/CreateEmbeddingResponse.cs new file mode 100644 index 000000000..bf7e741ca --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateEmbeddingResponse.cs @@ -0,0 +1,93 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI.Models +{ + /// The CreateEmbeddingResponse. + public partial class CreateEmbeddingResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The list of embeddings generated by the model. + /// The name of the model used to generate the embedding. + /// The usage information for the request. + /// , or is null. + internal CreateEmbeddingResponse(IEnumerable data, string model, CreateEmbeddingResponseUsage usage) + { + ClientUtilities.AssertNotNull(data, nameof(data)); + ClientUtilities.AssertNotNull(model, nameof(model)); + ClientUtilities.AssertNotNull(usage, nameof(usage)); + + Data = data.ToList(); + Model = model; + Usage = usage; + } + + /// Initializes a new instance of . + /// The list of embeddings generated by the model. + /// The name of the model used to generate the embedding. + /// The object type, which is always "list". + /// The usage information for the request. + /// Keeps track of any properties unknown to the library. + internal CreateEmbeddingResponse(IReadOnlyList data, string model, CreateEmbeddingResponseObject @object, CreateEmbeddingResponseUsage usage, IDictionary serializedAdditionalRawData) + { + Data = data; + Model = model; + Object = @object; + Usage = usage; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateEmbeddingResponse() + { + } + + /// The list of embeddings generated by the model. + public IReadOnlyList Data { get; } + /// The name of the model used to generate the embedding. + public string Model { get; } + /// The object type, which is always "list". + public CreateEmbeddingResponseObject Object { get; } = CreateEmbeddingResponseObject.List; + + /// The usage information for the request. + public CreateEmbeddingResponseUsage Usage { get; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingResponseObject.cs b/.dotnet/src/Generated/Models/CreateEmbeddingResponseObject.cs new file mode 100644 index 000000000..82ba92eec --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateEmbeddingResponseObject.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The CreateEmbeddingResponse_object. + public readonly partial struct CreateEmbeddingResponseObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateEmbeddingResponseObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ListValue = "list"; + + /// list. + public static CreateEmbeddingResponseObject List { get; } = new CreateEmbeddingResponseObject(ListValue); + /// Determines if two values are the same. + public static bool operator ==(CreateEmbeddingResponseObject left, CreateEmbeddingResponseObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateEmbeddingResponseObject left, CreateEmbeddingResponseObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateEmbeddingResponseObject(string value) => new CreateEmbeddingResponseObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateEmbeddingResponseObject other && Equals(other); + /// + public bool Equals(CreateEmbeddingResponseObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingResponseUsage.Serialization.cs b/.dotnet/src/Generated/Models/CreateEmbeddingResponseUsage.Serialization.cs new file mode 100644 index 000000000..c662893ed --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateEmbeddingResponseUsage.Serialization.cs @@ -0,0 +1,140 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class CreateEmbeddingResponseUsage : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateEmbeddingResponseUsage)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("prompt_tokens"u8); + writer.WriteNumberValue(PromptTokens); + writer.WritePropertyName("total_tokens"u8); + writer.WriteNumberValue(TotalTokens); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateEmbeddingResponseUsage IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateEmbeddingResponseUsage)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateEmbeddingResponseUsage(document.RootElement, options); + } + + internal static CreateEmbeddingResponseUsage DeserializeCreateEmbeddingResponseUsage(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + long promptTokens = default; + long totalTokens = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("prompt_tokens"u8)) + { + promptTokens = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("total_tokens"u8)) + { + totalTokens = property.Value.GetInt64(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateEmbeddingResponseUsage(promptTokens, totalTokens, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateEmbeddingResponseUsage)} does not support '{options.Format}' format."); + } + } + + CreateEmbeddingResponseUsage IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateEmbeddingResponseUsage(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateEmbeddingResponseUsage)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateEmbeddingResponseUsage FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateEmbeddingResponseUsage(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingResponseUsage.cs b/.dotnet/src/Generated/Models/CreateEmbeddingResponseUsage.cs new file mode 100644 index 000000000..d21b5b9e9 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateEmbeddingResponseUsage.cs @@ -0,0 +1,75 @@ +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The CreateEmbeddingResponseUsage. + public partial class CreateEmbeddingResponseUsage + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The number of tokens used by the prompt. + /// The total number of tokens used by the request. + internal CreateEmbeddingResponseUsage(long promptTokens, long totalTokens) + { + PromptTokens = promptTokens; + TotalTokens = totalTokens; + } + + /// Initializes a new instance of . + /// The number of tokens used by the prompt. + /// The total number of tokens used by the request. + /// Keeps track of any properties unknown to the library. + internal CreateEmbeddingResponseUsage(long promptTokens, long totalTokens, IDictionary serializedAdditionalRawData) + { + PromptTokens = promptTokens; + TotalTokens = totalTokens; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateEmbeddingResponseUsage() + { + } + + /// The number of tokens used by the prompt. + public long PromptTokens { get; } + /// The total number of tokens used by the request. + public long TotalTokens { get; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateFileRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateFileRequest.Serialization.cs new file mode 100644 index 000000000..6f2012c20 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateFileRequest.Serialization.cs @@ -0,0 +1,140 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class CreateFileRequest : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateFileRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("file"u8); + writer.WriteBase64StringValue(File.ToArray(), "D"); + writer.WritePropertyName("purpose"u8); + writer.WriteStringValue(Purpose.ToString()); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateFileRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateFileRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateFileRequest(document.RootElement, options); + } + + internal static CreateFileRequest DeserializeCreateFileRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + BinaryData file = default; + CreateFileRequestPurpose purpose = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("file"u8)) + { + file = BinaryData.FromBytes(property.Value.GetBytesFromBase64("D")); + continue; + } + if (property.NameEquals("purpose"u8)) + { + purpose = new CreateFileRequestPurpose(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateFileRequest(file, purpose, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateFileRequest)} does not support '{options.Format}' format."); + } + } + + CreateFileRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateFileRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateFileRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateFileRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateFileRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateFileRequest.cs b/.dotnet/src/Generated/Models/CreateFileRequest.cs new file mode 100644 index 000000000..9e10579a6 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateFileRequest.cs @@ -0,0 +1,109 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The CreateFileRequest. + public partial class CreateFileRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The file object (not file name) to be uploaded. + /// + /// The intended purpose of the uploaded file. Use "fine-tune" for + /// [Fine-tuning](/docs/api-reference/fine-tuning) and "assistants" for + /// [Assistants](/docs/api-reference/assistants) and [Messages](/docs/api-reference/messages). This + /// allows us to validate the format of the uploaded file is correct for fine-tuning. + /// + /// is null. + public CreateFileRequest(BinaryData file, CreateFileRequestPurpose purpose) + { + ClientUtilities.AssertNotNull(file, nameof(file)); + + File = file; + Purpose = purpose; + } + + /// Initializes a new instance of . + /// The file object (not file name) to be uploaded. + /// + /// The intended purpose of the uploaded file. Use "fine-tune" for + /// [Fine-tuning](/docs/api-reference/fine-tuning) and "assistants" for + /// [Assistants](/docs/api-reference/assistants) and [Messages](/docs/api-reference/messages). This + /// allows us to validate the format of the uploaded file is correct for fine-tuning. + /// + /// Keeps track of any properties unknown to the library. + internal CreateFileRequest(BinaryData file, CreateFileRequestPurpose purpose, IDictionary serializedAdditionalRawData) + { + File = file; + Purpose = purpose; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateFileRequest() + { + } + + /// + /// The file object (not file name) to be uploaded. + /// + /// To assign a byte[] to this property use . + /// The byte[] will be serialized to a Base64 encoded string. + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromBytes(new byte[] { 1, 2, 3 }) + /// Creates a payload of "AQID". + /// + /// + /// + /// + public BinaryData File { get; } + /// + /// The intended purpose of the uploaded file. Use "fine-tune" for + /// [Fine-tuning](/docs/api-reference/fine-tuning) and "assistants" for + /// [Assistants](/docs/api-reference/assistants) and [Messages](/docs/api-reference/messages). This + /// allows us to validate the format of the uploaded file is correct for fine-tuning. + /// + public CreateFileRequestPurpose Purpose { get; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateFileRequestPurpose.cs b/.dotnet/src/Generated/Models/CreateFileRequestPurpose.cs new file mode 100644 index 000000000..866087c3b --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateFileRequestPurpose.cs @@ -0,0 +1,48 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for purpose in CreateFileRequest. + public readonly partial struct CreateFileRequestPurpose : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateFileRequestPurpose(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string FineTuneValue = "fine-tune"; + private const string AssistantsValue = "assistants"; + + /// fine-tune. + public static CreateFileRequestPurpose FineTune { get; } = new CreateFileRequestPurpose(FineTuneValue); + /// assistants. + public static CreateFileRequestPurpose Assistants { get; } = new CreateFileRequestPurpose(AssistantsValue); + /// Determines if two values are the same. + public static bool operator ==(CreateFileRequestPurpose left, CreateFileRequestPurpose right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateFileRequestPurpose left, CreateFileRequestPurpose right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateFileRequestPurpose(string value) => new CreateFileRequestPurpose(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateFileRequestPurpose other && Equals(other); + /// + public bool Equals(CreateFileRequestPurpose other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateFineTuneRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateFineTuneRequest.Serialization.cs new file mode 100644 index 000000000..c83a0246a --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateFineTuneRequest.Serialization.cs @@ -0,0 +1,386 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class CreateFineTuneRequest : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateFineTuneRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("training_file"u8); + writer.WriteStringValue(TrainingFile); + if (OptionalProperty.IsDefined(ValidationFile)) + { + if (ValidationFile != null) + { + writer.WritePropertyName("validation_file"u8); + writer.WriteStringValue(ValidationFile); + } + else + { + writer.WriteNull("validation_file"); + } + } + if (OptionalProperty.IsDefined(Model)) + { + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model.Value.ToString()); + } + if (OptionalProperty.IsDefined(NEpochs)) + { + if (NEpochs != null) + { + writer.WritePropertyName("n_epochs"u8); + writer.WriteNumberValue(NEpochs.Value); + } + else + { + writer.WriteNull("n_epochs"); + } + } + if (OptionalProperty.IsDefined(BatchSize)) + { + if (BatchSize != null) + { + writer.WritePropertyName("batch_size"u8); + writer.WriteNumberValue(BatchSize.Value); + } + else + { + writer.WriteNull("batch_size"); + } + } + if (OptionalProperty.IsDefined(LearningRateMultiplier)) + { + if (LearningRateMultiplier != null) + { + writer.WritePropertyName("learning_rate_multiplier"u8); + writer.WriteNumberValue(LearningRateMultiplier.Value); + } + else + { + writer.WriteNull("learning_rate_multiplier"); + } + } + if (OptionalProperty.IsDefined(PromptLossRate)) + { + if (PromptLossRate != null) + { + writer.WritePropertyName("prompt_loss_rate"u8); + writer.WriteNumberValue(PromptLossRate.Value); + } + else + { + writer.WriteNull("prompt_loss_rate"); + } + } + if (OptionalProperty.IsDefined(ComputeClassificationMetrics)) + { + if (ComputeClassificationMetrics != null) + { + writer.WritePropertyName("compute_classification_metrics"u8); + writer.WriteBooleanValue(ComputeClassificationMetrics.Value); + } + else + { + writer.WriteNull("compute_classification_metrics"); + } + } + if (OptionalProperty.IsDefined(ClassificationNClasses)) + { + if (ClassificationNClasses != null) + { + writer.WritePropertyName("classification_n_classes"u8); + writer.WriteNumberValue(ClassificationNClasses.Value); + } + else + { + writer.WriteNull("classification_n_classes"); + } + } + if (OptionalProperty.IsDefined(ClassificationPositiveClass)) + { + if (ClassificationPositiveClass != null) + { + writer.WritePropertyName("classification_positive_class"u8); + writer.WriteStringValue(ClassificationPositiveClass); + } + else + { + writer.WriteNull("classification_positive_class"); + } + } + if (OptionalProperty.IsCollectionDefined(ClassificationBetas)) + { + if (ClassificationBetas != null) + { + writer.WritePropertyName("classification_betas"u8); + writer.WriteStartArray(); + foreach (var item in ClassificationBetas) + { + writer.WriteNumberValue(item); + } + writer.WriteEndArray(); + } + else + { + writer.WriteNull("classification_betas"); + } + } + if (OptionalProperty.IsDefined(Suffix)) + { + if (Suffix != null) + { + writer.WritePropertyName("suffix"u8); + writer.WriteStringValue(Suffix); + } + else + { + writer.WriteNull("suffix"); + } + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateFineTuneRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateFineTuneRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateFineTuneRequest(document.RootElement, options); + } + + internal static CreateFineTuneRequest DeserializeCreateFineTuneRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string trainingFile = default; + OptionalProperty validationFile = default; + OptionalProperty model = default; + OptionalProperty nEpochs = default; + OptionalProperty batchSize = default; + OptionalProperty learningRateMultiplier = default; + OptionalProperty promptLossRate = default; + OptionalProperty computeClassificationMetrics = default; + OptionalProperty classificationNClasses = default; + OptionalProperty classificationPositiveClass = default; + OptionalProperty> classificationBetas = default; + OptionalProperty suffix = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("training_file"u8)) + { + trainingFile = property.Value.GetString(); + continue; + } + if (property.NameEquals("validation_file"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + validationFile = null; + continue; + } + validationFile = property.Value.GetString(); + continue; + } + if (property.NameEquals("model"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + model = new CreateFineTuneRequestModel(property.Value.GetString()); + continue; + } + if (property.NameEquals("n_epochs"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + nEpochs = null; + continue; + } + nEpochs = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("batch_size"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + batchSize = null; + continue; + } + batchSize = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("learning_rate_multiplier"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + learningRateMultiplier = null; + continue; + } + learningRateMultiplier = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("prompt_loss_rate"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + promptLossRate = null; + continue; + } + promptLossRate = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("compute_classification_metrics"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + computeClassificationMetrics = null; + continue; + } + computeClassificationMetrics = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("classification_n_classes"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + classificationNClasses = null; + continue; + } + classificationNClasses = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("classification_positive_class"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + classificationPositiveClass = null; + continue; + } + classificationPositiveClass = property.Value.GetString(); + continue; + } + if (property.NameEquals("classification_betas"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(item.GetDouble()); + } + classificationBetas = array; + continue; + } + if (property.NameEquals("suffix"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + suffix = null; + continue; + } + suffix = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateFineTuneRequest(trainingFile, validationFile.Value, OptionalProperty.ToNullable(model), OptionalProperty.ToNullable(nEpochs), OptionalProperty.ToNullable(batchSize), OptionalProperty.ToNullable(learningRateMultiplier), OptionalProperty.ToNullable(promptLossRate), OptionalProperty.ToNullable(computeClassificationMetrics), OptionalProperty.ToNullable(classificationNClasses), classificationPositiveClass.Value, OptionalProperty.ToList(classificationBetas), suffix.Value, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateFineTuneRequest)} does not support '{options.Format}' format."); + } + } + + CreateFineTuneRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateFineTuneRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateFineTuneRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateFineTuneRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateFineTuneRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateFineTuneRequest.cs b/.dotnet/src/Generated/Models/CreateFineTuneRequest.cs new file mode 100644 index 000000000..31b410dcb --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateFineTuneRequest.cs @@ -0,0 +1,295 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The CreateFineTuneRequest. + public partial class CreateFineTuneRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// The ID of an uploaded file that contains training data. + /// + /// See [upload file](/docs/api-reference/files/upload) for how to upload a file. + /// + /// Your dataset must be formatted as a JSONL file, where each training example is a JSON object + /// with the keys "prompt" and "completion". Additionally, you must upload your file with the + /// purpose `fine-tune`. + /// + /// See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more + /// details. + /// + /// is null. + public CreateFineTuneRequest(string trainingFile) + { + ClientUtilities.AssertNotNull(trainingFile, nameof(trainingFile)); + + TrainingFile = trainingFile; + ClassificationBetas = new OptionalList(); + } + + /// Initializes a new instance of . + /// + /// The ID of an uploaded file that contains training data. + /// + /// See [upload file](/docs/api-reference/files/upload) for how to upload a file. + /// + /// Your dataset must be formatted as a JSONL file, where each training example is a JSON object + /// with the keys "prompt" and "completion". Additionally, you must upload your file with the + /// purpose `fine-tune`. + /// + /// See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more + /// details. + /// + /// + /// The ID of an uploaded file that contains validation data. + /// + /// If you provide this file, the data is used to generate validation metrics periodically during + /// fine-tuning. These metrics can be viewed in the + /// [fine-tuning results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). + /// Your train and validation data should be mutually exclusive. + /// + /// Your dataset must be formatted as a JSONL file, where each validation example is a JSON object + /// with the keys "prompt" and "completion". Additionally, you must upload your file with the + /// purpose `fine-tune`. + /// + /// See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more + /// details. + /// + /// + /// The name of the base model to fine-tune. You can select one of "ada", "babbage", "curie", + /// "davinci", or a fine-tuned model created after 2022-04-21 and before 2023-08-22. To learn more + /// about these models, see the [Models](/docs/models) documentation. + /// + /// + /// The number of epochs to train the model for. An epoch refers to one full cycle through the + /// training dataset. + /// + /// + /// The batch size to use for training. The batch size is the number of training examples used to + /// train a single forward and backward pass. + /// + /// By default, the batch size will be dynamically configured to be ~0.2% of the number of examples + /// in the training set, capped at 256 - in general, we've found that larger batch sizes tend to + /// work better for larger datasets. + /// + /// + /// The learning rate multiplier to use for training. The fine-tuning learning rate is the original + /// learning rate used for pretraining multiplied by this value. + /// + /// By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 depending on final + /// `batch_size` (larger learning rates tend to perform better with larger batch sizes). We + /// recommend experimenting with values in the range 0.02 to 0.2 to see what produces the best + /// results. + /// + /// + /// The weight to use for loss on the prompt tokens. This controls how much the model tries to + /// learn to generate the prompt (as compared to the completion which always has a weight of 1.0), + /// and can add a stabilizing effect to training when completions are short. + /// + /// If prompts are extremely long (relative to completions), it may make sense to reduce this + /// weight so as to avoid over-prioritizing learning the prompt. + /// + /// + /// If set, we calculate classification-specific metrics such as accuracy and F-1 score using the + /// validation set at the end of every epoch. These metrics can be viewed in the + /// [results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). + /// + /// In order to compute classification metrics, you must provide a `validation_file`. Additionally, + /// you must specify `classification_n_classes` for multiclass classification or + /// `classification_positive_class` for binary classification. + /// + /// + /// The number of classes in a classification task. + /// + /// This parameter is required for multiclass classification. + /// + /// + /// The positive class in binary classification. + /// + /// This parameter is needed to generate precision, recall, and F1 metrics when doing binary + /// classification. + /// + /// + /// If this is provided, we calculate F-beta scores at the specified beta values. The F-beta score + /// is a generalization of F-1 score. This is only used for binary classification. + /// + /// With a beta of 1 (i.e. the F-1 score), precision and recall are given the same weight. A larger + /// beta score puts more weight on recall and less on precision. A smaller beta score puts more + /// weight on precision and less on recall. + /// + /// + /// A string of up to 18 characters that will be added to your fine-tuned model name. + /// + /// For example, a `suffix` of "custom-model-name" would produce a model name like + /// `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`. + /// + /// Keeps track of any properties unknown to the library. + internal CreateFineTuneRequest(string trainingFile, string validationFile, CreateFineTuneRequestModel? model, long? nEpochs, long? batchSize, double? learningRateMultiplier, double? promptLossRate, bool? computeClassificationMetrics, long? classificationNClasses, string classificationPositiveClass, IList classificationBetas, string suffix, IDictionary serializedAdditionalRawData) + { + TrainingFile = trainingFile; + ValidationFile = validationFile; + Model = model; + NEpochs = nEpochs; + BatchSize = batchSize; + LearningRateMultiplier = learningRateMultiplier; + PromptLossRate = promptLossRate; + ComputeClassificationMetrics = computeClassificationMetrics; + ClassificationNClasses = classificationNClasses; + ClassificationPositiveClass = classificationPositiveClass; + ClassificationBetas = classificationBetas; + Suffix = suffix; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateFineTuneRequest() + { + } + + /// + /// The ID of an uploaded file that contains training data. + /// + /// See [upload file](/docs/api-reference/files/upload) for how to upload a file. + /// + /// Your dataset must be formatted as a JSONL file, where each training example is a JSON object + /// with the keys "prompt" and "completion". Additionally, you must upload your file with the + /// purpose `fine-tune`. + /// + /// See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more + /// details. + /// + public string TrainingFile { get; } + /// + /// The ID of an uploaded file that contains validation data. + /// + /// If you provide this file, the data is used to generate validation metrics periodically during + /// fine-tuning. These metrics can be viewed in the + /// [fine-tuning results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). + /// Your train and validation data should be mutually exclusive. + /// + /// Your dataset must be formatted as a JSONL file, where each validation example is a JSON object + /// with the keys "prompt" and "completion". Additionally, you must upload your file with the + /// purpose `fine-tune`. + /// + /// See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more + /// details. + /// + public string ValidationFile { get; set; } + /// + /// The name of the base model to fine-tune. You can select one of "ada", "babbage", "curie", + /// "davinci", or a fine-tuned model created after 2022-04-21 and before 2023-08-22. To learn more + /// about these models, see the [Models](/docs/models) documentation. + /// + public CreateFineTuneRequestModel? Model { get; set; } + /// + /// The number of epochs to train the model for. An epoch refers to one full cycle through the + /// training dataset. + /// + public long? NEpochs { get; set; } + /// + /// The batch size to use for training. The batch size is the number of training examples used to + /// train a single forward and backward pass. + /// + /// By default, the batch size will be dynamically configured to be ~0.2% of the number of examples + /// in the training set, capped at 256 - in general, we've found that larger batch sizes tend to + /// work better for larger datasets. + /// + public long? BatchSize { get; set; } + /// + /// The learning rate multiplier to use for training. The fine-tuning learning rate is the original + /// learning rate used for pretraining multiplied by this value. + /// + /// By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 depending on final + /// `batch_size` (larger learning rates tend to perform better with larger batch sizes). We + /// recommend experimenting with values in the range 0.02 to 0.2 to see what produces the best + /// results. + /// + public double? LearningRateMultiplier { get; set; } + /// + /// The weight to use for loss on the prompt tokens. This controls how much the model tries to + /// learn to generate the prompt (as compared to the completion which always has a weight of 1.0), + /// and can add a stabilizing effect to training when completions are short. + /// + /// If prompts are extremely long (relative to completions), it may make sense to reduce this + /// weight so as to avoid over-prioritizing learning the prompt. + /// + public double? PromptLossRate { get; set; } + /// + /// If set, we calculate classification-specific metrics such as accuracy and F-1 score using the + /// validation set at the end of every epoch. These metrics can be viewed in the + /// [results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). + /// + /// In order to compute classification metrics, you must provide a `validation_file`. Additionally, + /// you must specify `classification_n_classes` for multiclass classification or + /// `classification_positive_class` for binary classification. + /// + public bool? ComputeClassificationMetrics { get; set; } + /// + /// The number of classes in a classification task. + /// + /// This parameter is required for multiclass classification. + /// + public long? ClassificationNClasses { get; set; } + /// + /// The positive class in binary classification. + /// + /// This parameter is needed to generate precision, recall, and F1 metrics when doing binary + /// classification. + /// + public string ClassificationPositiveClass { get; set; } + /// + /// If this is provided, we calculate F-beta scores at the specified beta values. The F-beta score + /// is a generalization of F-1 score. This is only used for binary classification. + /// + /// With a beta of 1 (i.e. the F-1 score), precision and recall are given the same weight. A larger + /// beta score puts more weight on recall and less on precision. A smaller beta score puts more + /// weight on precision and less on recall. + /// + public IList ClassificationBetas { get; set; } + /// + /// A string of up to 18 characters that will be added to your fine-tuned model name. + /// + /// For example, a `suffix` of "custom-model-name" would produce a model name like + /// `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`. + /// + public string Suffix { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateFineTuneRequestModel.cs b/.dotnet/src/Generated/Models/CreateFineTuneRequestModel.cs new file mode 100644 index 000000000..de5b6f368 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateFineTuneRequestModel.cs @@ -0,0 +1,54 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for model in CreateFineTuneRequest. + public readonly partial struct CreateFineTuneRequestModel : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateFineTuneRequestModel(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string AdaValue = "ada"; + private const string BabbageValue = "babbage"; + private const string CurieValue = "curie"; + private const string DavinciValue = "davinci"; + + /// ada. + public static CreateFineTuneRequestModel Ada { get; } = new CreateFineTuneRequestModel(AdaValue); + /// babbage. + public static CreateFineTuneRequestModel Babbage { get; } = new CreateFineTuneRequestModel(BabbageValue); + /// curie. + public static CreateFineTuneRequestModel Curie { get; } = new CreateFineTuneRequestModel(CurieValue); + /// davinci. + public static CreateFineTuneRequestModel Davinci { get; } = new CreateFineTuneRequestModel(DavinciValue); + /// Determines if two values are the same. + public static bool operator ==(CreateFineTuneRequestModel left, CreateFineTuneRequestModel right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateFineTuneRequestModel left, CreateFineTuneRequestModel right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateFineTuneRequestModel(string value) => new CreateFineTuneRequestModel(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateFineTuneRequestModel other && Equals(other); + /// + public bool Equals(CreateFineTuneRequestModel other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateFineTuningJobRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateFineTuningJobRequest.Serialization.cs new file mode 100644 index 000000000..6d26b67fc --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateFineTuningJobRequest.Serialization.cs @@ -0,0 +1,201 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class CreateFineTuningJobRequest : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateFineTuningJobRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("training_file"u8); + writer.WriteStringValue(TrainingFile); + if (OptionalProperty.IsDefined(ValidationFile)) + { + if (ValidationFile != null) + { + writer.WritePropertyName("validation_file"u8); + writer.WriteStringValue(ValidationFile); + } + else + { + writer.WriteNull("validation_file"); + } + } + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model.ToString()); + if (OptionalProperty.IsDefined(Hyperparameters)) + { + writer.WritePropertyName("hyperparameters"u8); + writer.WriteObjectValue(Hyperparameters); + } + if (OptionalProperty.IsDefined(Suffix)) + { + if (Suffix != null) + { + writer.WritePropertyName("suffix"u8); + writer.WriteStringValue(Suffix); + } + else + { + writer.WriteNull("suffix"); + } + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateFineTuningJobRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateFineTuningJobRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateFineTuningJobRequest(document.RootElement, options); + } + + internal static CreateFineTuningJobRequest DeserializeCreateFineTuningJobRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string trainingFile = default; + OptionalProperty validationFile = default; + CreateFineTuningJobRequestModel model = default; + OptionalProperty hyperparameters = default; + OptionalProperty suffix = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("training_file"u8)) + { + trainingFile = property.Value.GetString(); + continue; + } + if (property.NameEquals("validation_file"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + validationFile = null; + continue; + } + validationFile = property.Value.GetString(); + continue; + } + if (property.NameEquals("model"u8)) + { + model = new CreateFineTuningJobRequestModel(property.Value.GetString()); + continue; + } + if (property.NameEquals("hyperparameters"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + hyperparameters = CreateFineTuningJobRequestHyperparameters.DeserializeCreateFineTuningJobRequestHyperparameters(property.Value); + continue; + } + if (property.NameEquals("suffix"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + suffix = null; + continue; + } + suffix = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateFineTuningJobRequest(trainingFile, validationFile.Value, model, hyperparameters.Value, suffix.Value, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateFineTuningJobRequest)} does not support '{options.Format}' format."); + } + } + + CreateFineTuningJobRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateFineTuningJobRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateFineTuningJobRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateFineTuningJobRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateFineTuningJobRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateFineTuningJobRequest.cs b/.dotnet/src/Generated/Models/CreateFineTuningJobRequest.cs new file mode 100644 index 000000000..7157e813c --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateFineTuningJobRequest.cs @@ -0,0 +1,159 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The CreateFineTuningJobRequest. + public partial class CreateFineTuningJobRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// The ID of an uploaded file that contains training data. + /// + /// See [upload file](/docs/api-reference/files/upload) for how to upload a file. + /// + /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with + /// the purpose `fine-tune`. + /// + /// See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + /// + /// + /// The name of the model to fine-tune. You can select one of the + /// [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + /// + /// is null. + public CreateFineTuningJobRequest(string trainingFile, CreateFineTuningJobRequestModel model) + { + ClientUtilities.AssertNotNull(trainingFile, nameof(trainingFile)); + + TrainingFile = trainingFile; + Model = model; + } + + /// Initializes a new instance of . + /// + /// The ID of an uploaded file that contains training data. + /// + /// See [upload file](/docs/api-reference/files/upload) for how to upload a file. + /// + /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with + /// the purpose `fine-tune`. + /// + /// See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + /// + /// + /// The ID of an uploaded file that contains validation data. + /// + /// If you provide this file, the data is used to generate validation metrics periodically during + /// fine-tuning. These metrics can be viewed in the fine-tuning results file. The same data should + /// not be present in both train and validation files. + /// + /// Your dataset must be formatted as a JSONL file. You must upload your file with the purpose + /// `fine-tune`. + /// + /// See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + /// + /// + /// The name of the model to fine-tune. You can select one of the + /// [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + /// + /// The hyperparameters used for the fine-tuning job. + /// + /// A string of up to 18 characters that will be added to your fine-tuned model name. + /// + /// For example, a `suffix` of "custom-model-name" would produce a model name like + /// `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + /// + /// Keeps track of any properties unknown to the library. + internal CreateFineTuningJobRequest(string trainingFile, string validationFile, CreateFineTuningJobRequestModel model, CreateFineTuningJobRequestHyperparameters hyperparameters, string suffix, IDictionary serializedAdditionalRawData) + { + TrainingFile = trainingFile; + ValidationFile = validationFile; + Model = model; + Hyperparameters = hyperparameters; + Suffix = suffix; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateFineTuningJobRequest() + { + } + + /// + /// The ID of an uploaded file that contains training data. + /// + /// See [upload file](/docs/api-reference/files/upload) for how to upload a file. + /// + /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with + /// the purpose `fine-tune`. + /// + /// See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + /// + public string TrainingFile { get; } + /// + /// The ID of an uploaded file that contains validation data. + /// + /// If you provide this file, the data is used to generate validation metrics periodically during + /// fine-tuning. These metrics can be viewed in the fine-tuning results file. The same data should + /// not be present in both train and validation files. + /// + /// Your dataset must be formatted as a JSONL file. You must upload your file with the purpose + /// `fine-tune`. + /// + /// See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + /// + public string ValidationFile { get; set; } + /// + /// The name of the model to fine-tune. You can select one of the + /// [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + /// + public CreateFineTuningJobRequestModel Model { get; } + /// The hyperparameters used for the fine-tuning job. + public CreateFineTuningJobRequestHyperparameters Hyperparameters { get; set; } + /// + /// A string of up to 18 characters that will be added to your fine-tuned model name. + /// + /// For example, a `suffix` of "custom-model-name" would produce a model name like + /// `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + /// + public string Suffix { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateFineTuningJobRequestHyperparameters.Serialization.cs b/.dotnet/src/Generated/Models/CreateFineTuningJobRequestHyperparameters.Serialization.cs new file mode 100644 index 000000000..de057600c --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateFineTuningJobRequestHyperparameters.Serialization.cs @@ -0,0 +1,146 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class CreateFineTuningJobRequestHyperparameters : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateFineTuningJobRequestHyperparameters)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + if (OptionalProperty.IsDefined(NEpochs)) + { + writer.WritePropertyName("n_epochs"u8); +#if NET6_0_OR_GREATER + writer.WriteRawValue(NEpochs); +#else + using (JsonDocument document = JsonDocument.Parse(NEpochs)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateFineTuningJobRequestHyperparameters IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateFineTuningJobRequestHyperparameters)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateFineTuningJobRequestHyperparameters(document.RootElement, options); + } + + internal static CreateFineTuningJobRequestHyperparameters DeserializeCreateFineTuningJobRequestHyperparameters(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + OptionalProperty nEpochs = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("n_epochs"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + nEpochs = BinaryData.FromString(property.Value.GetRawText()); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateFineTuningJobRequestHyperparameters(nEpochs.Value, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateFineTuningJobRequestHyperparameters)} does not support '{options.Format}' format."); + } + } + + CreateFineTuningJobRequestHyperparameters IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateFineTuningJobRequestHyperparameters(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateFineTuningJobRequestHyperparameters)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateFineTuningJobRequestHyperparameters FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateFineTuningJobRequestHyperparameters(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateFineTuningJobRequestHyperparameters.cs b/.dotnet/src/Generated/Models/CreateFineTuningJobRequestHyperparameters.cs new file mode 100644 index 000000000..258881cb0 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateFineTuningJobRequestHyperparameters.cs @@ -0,0 +1,106 @@ +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The CreateFineTuningJobRequestHyperparameters. + public partial class CreateFineTuningJobRequestHyperparameters + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public CreateFineTuningJobRequestHyperparameters() + { + } + + /// Initializes a new instance of . + /// + /// The number of epochs to train the model for. An epoch refers to one full cycle through the + /// training dataset. + /// + /// Keeps track of any properties unknown to the library. + internal CreateFineTuningJobRequestHyperparameters(BinaryData nEpochs, IDictionary serializedAdditionalRawData) + { + NEpochs = nEpochs; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// + /// The number of epochs to train the model for. An epoch refers to one full cycle through the + /// training dataset. + /// + /// To assign an object to this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// + /// Supported types: + /// + /// + /// "auto" + /// + /// + /// + /// + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public BinaryData NEpochs { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateFineTuningJobRequestModel.cs b/.dotnet/src/Generated/Models/CreateFineTuningJobRequestModel.cs new file mode 100644 index 000000000..ef3c6ec0c --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateFineTuningJobRequestModel.cs @@ -0,0 +1,51 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for model in CreateFineTuningJobRequest. + public readonly partial struct CreateFineTuningJobRequestModel : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateFineTuningJobRequestModel(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string Babbage002Value = "babbage-002"; + private const string Davinci002Value = "davinci-002"; + private const string Gpt35TurboValue = "gpt-3.5-turbo"; + + /// babbage-002. + public static CreateFineTuningJobRequestModel Babbage002 { get; } = new CreateFineTuningJobRequestModel(Babbage002Value); + /// davinci-002. + public static CreateFineTuningJobRequestModel Davinci002 { get; } = new CreateFineTuningJobRequestModel(Davinci002Value); + /// gpt-3.5-turbo. + public static CreateFineTuningJobRequestModel Gpt35Turbo { get; } = new CreateFineTuningJobRequestModel(Gpt35TurboValue); + /// Determines if two values are the same. + public static bool operator ==(CreateFineTuningJobRequestModel left, CreateFineTuningJobRequestModel right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateFineTuningJobRequestModel left, CreateFineTuningJobRequestModel right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateFineTuningJobRequestModel(string value) => new CreateFineTuningJobRequestModel(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateFineTuningJobRequestModel other && Equals(other); + /// + public bool Equals(CreateFineTuningJobRequestModel other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateImageEditRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateImageEditRequest.Serialization.cs new file mode 100644 index 000000000..cc8938c1f --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateImageEditRequest.Serialization.cs @@ -0,0 +1,234 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class CreateImageEditRequest : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateImageEditRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("image"u8); + writer.WriteBase64StringValue(Image.ToArray(), "D"); + writer.WritePropertyName("prompt"u8); + writer.WriteStringValue(Prompt); + if (OptionalProperty.IsDefined(Mask)) + { + writer.WritePropertyName("mask"u8); + writer.WriteBase64StringValue(Mask.ToArray(), "D"); + } + if (OptionalProperty.IsDefined(Model)) + { + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model.Value.ToString()); + } + if (OptionalProperty.IsDefined(N)) + { + if (N != null) + { + writer.WritePropertyName("n"u8); + writer.WriteNumberValue(N.Value); + } + else + { + writer.WriteNull("n"); + } + } + if (OptionalProperty.IsDefined(Size)) + { + writer.WritePropertyName("size"u8); + writer.WriteStringValue(Size.Value.ToString()); + } + if (OptionalProperty.IsDefined(ResponseFormat)) + { + writer.WritePropertyName("response_format"u8); + writer.WriteStringValue(ResponseFormat.Value.ToString()); + } + if (OptionalProperty.IsDefined(User)) + { + writer.WritePropertyName("user"u8); + writer.WriteStringValue(User); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateImageEditRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateImageEditRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateImageEditRequest(document.RootElement, options); + } + + internal static CreateImageEditRequest DeserializeCreateImageEditRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + BinaryData image = default; + string prompt = default; + OptionalProperty mask = default; + OptionalProperty model = default; + OptionalProperty n = default; + OptionalProperty size = default; + OptionalProperty responseFormat = default; + OptionalProperty user = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("image"u8)) + { + image = BinaryData.FromBytes(property.Value.GetBytesFromBase64("D")); + continue; + } + if (property.NameEquals("prompt"u8)) + { + prompt = property.Value.GetString(); + continue; + } + if (property.NameEquals("mask"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + mask = BinaryData.FromBytes(property.Value.GetBytesFromBase64("D")); + continue; + } + if (property.NameEquals("model"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + model = new CreateImageEditRequestModel(property.Value.GetString()); + continue; + } + if (property.NameEquals("n"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + n = null; + continue; + } + n = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("size"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + size = new CreateImageEditRequestSize(property.Value.GetString()); + continue; + } + if (property.NameEquals("response_format"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + responseFormat = new CreateImageEditRequestResponseFormat(property.Value.GetString()); + continue; + } + if (property.NameEquals("user"u8)) + { + user = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateImageEditRequest(image, prompt, mask.Value, OptionalProperty.ToNullable(model), OptionalProperty.ToNullable(n), OptionalProperty.ToNullable(size), OptionalProperty.ToNullable(responseFormat), user.Value, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateImageEditRequest)} does not support '{options.Format}' format."); + } + } + + CreateImageEditRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateImageEditRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateImageEditRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateImageEditRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateImageEditRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateImageEditRequest.cs b/.dotnet/src/Generated/Models/CreateImageEditRequest.cs new file mode 100644 index 000000000..12f245f91 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateImageEditRequest.cs @@ -0,0 +1,153 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The CreateImageEditRequest. + public partial class CreateImageEditRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not + /// provided, image must have transparency, which will be used as the mask. + /// + /// A text description of the desired image(s). The maximum length is 1000 characters. + /// or is null. + public CreateImageEditRequest(BinaryData image, string prompt) + { + ClientUtilities.AssertNotNull(image, nameof(image)); + ClientUtilities.AssertNotNull(prompt, nameof(prompt)); + + Image = image; + Prompt = prompt; + } + + /// Initializes a new instance of . + /// + /// The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not + /// provided, image must have transparency, which will be used as the mask. + /// + /// A text description of the desired image(s). The maximum length is 1000 characters. + /// + /// An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where + /// `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions + /// as `image`. + /// + /// The model to use for image generation. Only `dall-e-2` is supported at this time. + /// The number of images to generate. Must be between 1 and 10. + /// The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + /// The format in which the generated images are returned. Must be one of `url` or `b64_json`. + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect + /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + /// + /// Keeps track of any properties unknown to the library. + internal CreateImageEditRequest(BinaryData image, string prompt, BinaryData mask, CreateImageEditRequestModel? model, long? n, CreateImageEditRequestSize? size, CreateImageEditRequestResponseFormat? responseFormat, string user, IDictionary serializedAdditionalRawData) + { + Image = image; + Prompt = prompt; + Mask = mask; + Model = model; + N = n; + Size = size; + ResponseFormat = responseFormat; + User = user; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateImageEditRequest() + { + } + + /// + /// The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not + /// provided, image must have transparency, which will be used as the mask. + /// + /// To assign a byte[] to this property use . + /// The byte[] will be serialized to a Base64 encoded string. + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromBytes(new byte[] { 1, 2, 3 }) + /// Creates a payload of "AQID". + /// + /// + /// + /// + public BinaryData Image { get; } + /// A text description of the desired image(s). The maximum length is 1000 characters. + public string Prompt { get; } + /// + /// An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where + /// `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions + /// as `image`. + /// + /// To assign a byte[] to this property use . + /// The byte[] will be serialized to a Base64 encoded string. + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromBytes(new byte[] { 1, 2, 3 }) + /// Creates a payload of "AQID". + /// + /// + /// + /// + public BinaryData Mask { get; set; } + /// The model to use for image generation. Only `dall-e-2` is supported at this time. + public CreateImageEditRequestModel? Model { get; set; } + /// The number of images to generate. Must be between 1 and 10. + public long? N { get; set; } + /// The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + public CreateImageEditRequestSize? Size { get; set; } + /// The format in which the generated images are returned. Must be one of `url` or `b64_json`. + public CreateImageEditRequestResponseFormat? ResponseFormat { get; set; } + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect + /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + /// + public string User { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateImageEditRequestModel.cs b/.dotnet/src/Generated/Models/CreateImageEditRequestModel.cs new file mode 100644 index 000000000..152466ce3 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateImageEditRequestModel.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for model in CreateImageEditRequest. + public readonly partial struct CreateImageEditRequestModel : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateImageEditRequestModel(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string DallE2Value = "dall-e-2"; + + /// dall-e-2. + public static CreateImageEditRequestModel DallE2 { get; } = new CreateImageEditRequestModel(DallE2Value); + /// Determines if two values are the same. + public static bool operator ==(CreateImageEditRequestModel left, CreateImageEditRequestModel right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateImageEditRequestModel left, CreateImageEditRequestModel right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateImageEditRequestModel(string value) => new CreateImageEditRequestModel(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateImageEditRequestModel other && Equals(other); + /// + public bool Equals(CreateImageEditRequestModel other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateImageEditRequestResponseFormat.cs b/.dotnet/src/Generated/Models/CreateImageEditRequestResponseFormat.cs new file mode 100644 index 000000000..2dfc96644 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateImageEditRequestResponseFormat.cs @@ -0,0 +1,48 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for response_format in CreateImageEditRequest. + public readonly partial struct CreateImageEditRequestResponseFormat : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateImageEditRequestResponseFormat(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string UrlValue = "url"; + private const string B64JsonValue = "b64_json"; + + /// url. + public static CreateImageEditRequestResponseFormat Url { get; } = new CreateImageEditRequestResponseFormat(UrlValue); + /// b64_json. + public static CreateImageEditRequestResponseFormat B64Json { get; } = new CreateImageEditRequestResponseFormat(B64JsonValue); + /// Determines if two values are the same. + public static bool operator ==(CreateImageEditRequestResponseFormat left, CreateImageEditRequestResponseFormat right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateImageEditRequestResponseFormat left, CreateImageEditRequestResponseFormat right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateImageEditRequestResponseFormat(string value) => new CreateImageEditRequestResponseFormat(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateImageEditRequestResponseFormat other && Equals(other); + /// + public bool Equals(CreateImageEditRequestResponseFormat other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateImageEditRequestSize.cs b/.dotnet/src/Generated/Models/CreateImageEditRequestSize.cs new file mode 100644 index 000000000..01c9a4c45 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateImageEditRequestSize.cs @@ -0,0 +1,51 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for size in CreateImageEditRequest. + public readonly partial struct CreateImageEditRequestSize : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateImageEditRequestSize(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string _256x256Value = "256x256"; + private const string _512x512Value = "512x512"; + private const string _1024x1024Value = "1024x1024"; + + /// 256x256. + public static CreateImageEditRequestSize _256x256 { get; } = new CreateImageEditRequestSize(_256x256Value); + /// 512x512. + public static CreateImageEditRequestSize _512x512 { get; } = new CreateImageEditRequestSize(_512x512Value); + /// 1024x1024. + public static CreateImageEditRequestSize _1024x1024 { get; } = new CreateImageEditRequestSize(_1024x1024Value); + /// Determines if two values are the same. + public static bool operator ==(CreateImageEditRequestSize left, CreateImageEditRequestSize right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateImageEditRequestSize left, CreateImageEditRequestSize right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateImageEditRequestSize(string value) => new CreateImageEditRequestSize(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateImageEditRequestSize other && Equals(other); + /// + public bool Equals(CreateImageEditRequestSize other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateImageRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateImageRequest.Serialization.cs new file mode 100644 index 000000000..f790166ff --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateImageRequest.Serialization.cs @@ -0,0 +1,241 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class CreateImageRequest : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateImageRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("prompt"u8); + writer.WriteStringValue(Prompt); + if (OptionalProperty.IsDefined(Model)) + { + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model.Value.ToString()); + } + if (OptionalProperty.IsDefined(N)) + { + if (N != null) + { + writer.WritePropertyName("n"u8); + writer.WriteNumberValue(N.Value); + } + else + { + writer.WriteNull("n"); + } + } + if (OptionalProperty.IsDefined(Quality)) + { + writer.WritePropertyName("quality"u8); + writer.WriteStringValue(Quality.Value.ToString()); + } + if (OptionalProperty.IsDefined(ResponseFormat)) + { + writer.WritePropertyName("response_format"u8); + writer.WriteStringValue(ResponseFormat.Value.ToString()); + } + if (OptionalProperty.IsDefined(Size)) + { + writer.WritePropertyName("size"u8); + writer.WriteStringValue(Size.Value.ToString()); + } + if (OptionalProperty.IsDefined(Style)) + { + writer.WritePropertyName("style"u8); + writer.WriteStringValue(Style.Value.ToString()); + } + if (OptionalProperty.IsDefined(User)) + { + writer.WritePropertyName("user"u8); + writer.WriteStringValue(User); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateImageRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateImageRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateImageRequest(document.RootElement, options); + } + + internal static CreateImageRequest DeserializeCreateImageRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string prompt = default; + OptionalProperty model = default; + OptionalProperty n = default; + OptionalProperty quality = default; + OptionalProperty responseFormat = default; + OptionalProperty size = default; + OptionalProperty style = default; + OptionalProperty user = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("prompt"u8)) + { + prompt = property.Value.GetString(); + continue; + } + if (property.NameEquals("model"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + model = new CreateImageRequestModel(property.Value.GetString()); + continue; + } + if (property.NameEquals("n"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + n = null; + continue; + } + n = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("quality"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + quality = new CreateImageRequestQuality(property.Value.GetString()); + continue; + } + if (property.NameEquals("response_format"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + responseFormat = new CreateImageRequestResponseFormat(property.Value.GetString()); + continue; + } + if (property.NameEquals("size"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + size = new CreateImageRequestSize(property.Value.GetString()); + continue; + } + if (property.NameEquals("style"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + style = new CreateImageRequestStyle(property.Value.GetString()); + continue; + } + if (property.NameEquals("user"u8)) + { + user = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateImageRequest(prompt, OptionalProperty.ToNullable(model), OptionalProperty.ToNullable(n), OptionalProperty.ToNullable(quality), OptionalProperty.ToNullable(responseFormat), OptionalProperty.ToNullable(size), OptionalProperty.ToNullable(style), user.Value, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateImageRequest)} does not support '{options.Format}' format."); + } + } + + CreateImageRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateImageRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateImageRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateImageRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateImageRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateImageRequest.cs b/.dotnet/src/Generated/Models/CreateImageRequest.cs new file mode 100644 index 000000000..bde1c78ba --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateImageRequest.cs @@ -0,0 +1,142 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The CreateImageRequest. + public partial class CreateImageRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// A text description of the desired image(s). The maximum length is 1000 characters for + /// `dall-e-2` and 4000 characters for `dall-e-3`. + /// + /// is null. + public CreateImageRequest(string prompt) + { + ClientUtilities.AssertNotNull(prompt, nameof(prompt)); + + Prompt = prompt; + } + + /// Initializes a new instance of . + /// + /// A text description of the desired image(s). The maximum length is 1000 characters for + /// `dall-e-2` and 4000 characters for `dall-e-3`. + /// + /// The model to use for image generation. + /// + /// The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is + /// supported. + /// + /// + /// The quality of the image that will be generated. `hd` creates images with finer details and + /// greater consistency across the image. This param is only supported for `dall-e-3`. + /// + /// The format in which the generated images are returned. Must be one of `url` or `b64_json`. + /// + /// The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for + /// `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. + /// + /// + /// The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes the model + /// to lean towards generating hyper-real and dramatic images. Natural causes the model to produce + /// more natural, less hyper-real looking images. This param is only supported for `dall-e-3`. + /// + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect + /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + /// + /// Keeps track of any properties unknown to the library. + internal CreateImageRequest(string prompt, CreateImageRequestModel? model, long? n, CreateImageRequestQuality? quality, CreateImageRequestResponseFormat? responseFormat, CreateImageRequestSize? size, CreateImageRequestStyle? style, string user, IDictionary serializedAdditionalRawData) + { + Prompt = prompt; + Model = model; + N = n; + Quality = quality; + ResponseFormat = responseFormat; + Size = size; + Style = style; + User = user; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateImageRequest() + { + } + + /// + /// A text description of the desired image(s). The maximum length is 1000 characters for + /// `dall-e-2` and 4000 characters for `dall-e-3`. + /// + public string Prompt { get; } + /// The model to use for image generation. + public CreateImageRequestModel? Model { get; set; } + /// + /// The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is + /// supported. + /// + public long? N { get; set; } + /// + /// The quality of the image that will be generated. `hd` creates images with finer details and + /// greater consistency across the image. This param is only supported for `dall-e-3`. + /// + public CreateImageRequestQuality? Quality { get; set; } + /// The format in which the generated images are returned. Must be one of `url` or `b64_json`. + public CreateImageRequestResponseFormat? ResponseFormat { get; set; } + /// + /// The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for + /// `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. + /// + public CreateImageRequestSize? Size { get; set; } + /// + /// The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes the model + /// to lean towards generating hyper-real and dramatic images. Natural causes the model to produce + /// more natural, less hyper-real looking images. This param is only supported for `dall-e-3`. + /// + public CreateImageRequestStyle? Style { get; set; } + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect + /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + /// + public string User { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateImageRequestModel.cs b/.dotnet/src/Generated/Models/CreateImageRequestModel.cs new file mode 100644 index 000000000..46d224028 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateImageRequestModel.cs @@ -0,0 +1,48 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for model in CreateImageRequest. + public readonly partial struct CreateImageRequestModel : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateImageRequestModel(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string DallE2Value = "dall-e-2"; + private const string DallE3Value = "dall-e-3"; + + /// dall-e-2. + public static CreateImageRequestModel DallE2 { get; } = new CreateImageRequestModel(DallE2Value); + /// dall-e-3. + public static CreateImageRequestModel DallE3 { get; } = new CreateImageRequestModel(DallE3Value); + /// Determines if two values are the same. + public static bool operator ==(CreateImageRequestModel left, CreateImageRequestModel right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateImageRequestModel left, CreateImageRequestModel right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateImageRequestModel(string value) => new CreateImageRequestModel(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateImageRequestModel other && Equals(other); + /// + public bool Equals(CreateImageRequestModel other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateImageRequestQuality.cs b/.dotnet/src/Generated/Models/CreateImageRequestQuality.cs new file mode 100644 index 000000000..e3a737c23 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateImageRequestQuality.cs @@ -0,0 +1,48 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for quality in CreateImageRequest. + public readonly partial struct CreateImageRequestQuality : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateImageRequestQuality(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string StandardValue = "standard"; + private const string HdValue = "hd"; + + /// standard. + public static CreateImageRequestQuality Standard { get; } = new CreateImageRequestQuality(StandardValue); + /// hd. + public static CreateImageRequestQuality Hd { get; } = new CreateImageRequestQuality(HdValue); + /// Determines if two values are the same. + public static bool operator ==(CreateImageRequestQuality left, CreateImageRequestQuality right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateImageRequestQuality left, CreateImageRequestQuality right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateImageRequestQuality(string value) => new CreateImageRequestQuality(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateImageRequestQuality other && Equals(other); + /// + public bool Equals(CreateImageRequestQuality other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateImageRequestResponseFormat.cs b/.dotnet/src/Generated/Models/CreateImageRequestResponseFormat.cs new file mode 100644 index 000000000..5990ab315 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateImageRequestResponseFormat.cs @@ -0,0 +1,48 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for response_format in CreateImageRequest. + public readonly partial struct CreateImageRequestResponseFormat : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateImageRequestResponseFormat(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string UrlValue = "url"; + private const string B64JsonValue = "b64_json"; + + /// url. + public static CreateImageRequestResponseFormat Url { get; } = new CreateImageRequestResponseFormat(UrlValue); + /// b64_json. + public static CreateImageRequestResponseFormat B64Json { get; } = new CreateImageRequestResponseFormat(B64JsonValue); + /// Determines if two values are the same. + public static bool operator ==(CreateImageRequestResponseFormat left, CreateImageRequestResponseFormat right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateImageRequestResponseFormat left, CreateImageRequestResponseFormat right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateImageRequestResponseFormat(string value) => new CreateImageRequestResponseFormat(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateImageRequestResponseFormat other && Equals(other); + /// + public bool Equals(CreateImageRequestResponseFormat other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateImageRequestSize.cs b/.dotnet/src/Generated/Models/CreateImageRequestSize.cs new file mode 100644 index 000000000..df73f6aac --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateImageRequestSize.cs @@ -0,0 +1,57 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for size in CreateImageRequest. + public readonly partial struct CreateImageRequestSize : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateImageRequestSize(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string _256x256Value = "256x256"; + private const string _512x512Value = "512x512"; + private const string _1024x1024Value = "1024x1024"; + private const string _1792x1024Value = "1792x1024"; + private const string _1024x1792Value = "1024x1792"; + + /// 256x256. + public static CreateImageRequestSize _256x256 { get; } = new CreateImageRequestSize(_256x256Value); + /// 512x512. + public static CreateImageRequestSize _512x512 { get; } = new CreateImageRequestSize(_512x512Value); + /// 1024x1024. + public static CreateImageRequestSize _1024x1024 { get; } = new CreateImageRequestSize(_1024x1024Value); + /// 1792x1024. + public static CreateImageRequestSize _1792x1024 { get; } = new CreateImageRequestSize(_1792x1024Value); + /// 1024x1792. + public static CreateImageRequestSize _1024x1792 { get; } = new CreateImageRequestSize(_1024x1792Value); + /// Determines if two values are the same. + public static bool operator ==(CreateImageRequestSize left, CreateImageRequestSize right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateImageRequestSize left, CreateImageRequestSize right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateImageRequestSize(string value) => new CreateImageRequestSize(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateImageRequestSize other && Equals(other); + /// + public bool Equals(CreateImageRequestSize other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateImageRequestStyle.cs b/.dotnet/src/Generated/Models/CreateImageRequestStyle.cs new file mode 100644 index 000000000..a711e0635 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateImageRequestStyle.cs @@ -0,0 +1,48 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for style in CreateImageRequest. + public readonly partial struct CreateImageRequestStyle : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateImageRequestStyle(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string VividValue = "vivid"; + private const string NaturalValue = "natural"; + + /// vivid. + public static CreateImageRequestStyle Vivid { get; } = new CreateImageRequestStyle(VividValue); + /// natural. + public static CreateImageRequestStyle Natural { get; } = new CreateImageRequestStyle(NaturalValue); + /// Determines if two values are the same. + public static bool operator ==(CreateImageRequestStyle left, CreateImageRequestStyle right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateImageRequestStyle left, CreateImageRequestStyle right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateImageRequestStyle(string value) => new CreateImageRequestStyle(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateImageRequestStyle other && Equals(other); + /// + public bool Equals(CreateImageRequestStyle other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateImageVariationRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateImageVariationRequest.Serialization.cs new file mode 100644 index 000000000..bd3a103fb --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateImageVariationRequest.Serialization.cs @@ -0,0 +1,211 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class CreateImageVariationRequest : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateImageVariationRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("image"u8); + writer.WriteBase64StringValue(Image.ToArray(), "D"); + if (OptionalProperty.IsDefined(Model)) + { + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model.Value.ToString()); + } + if (OptionalProperty.IsDefined(N)) + { + if (N != null) + { + writer.WritePropertyName("n"u8); + writer.WriteNumberValue(N.Value); + } + else + { + writer.WriteNull("n"); + } + } + if (OptionalProperty.IsDefined(ResponseFormat)) + { + writer.WritePropertyName("response_format"u8); + writer.WriteStringValue(ResponseFormat.Value.ToString()); + } + if (OptionalProperty.IsDefined(Size)) + { + writer.WritePropertyName("size"u8); + writer.WriteStringValue(Size.Value.ToString()); + } + if (OptionalProperty.IsDefined(User)) + { + writer.WritePropertyName("user"u8); + writer.WriteStringValue(User); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateImageVariationRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateImageVariationRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateImageVariationRequest(document.RootElement, options); + } + + internal static CreateImageVariationRequest DeserializeCreateImageVariationRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + BinaryData image = default; + OptionalProperty model = default; + OptionalProperty n = default; + OptionalProperty responseFormat = default; + OptionalProperty size = default; + OptionalProperty user = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("image"u8)) + { + image = BinaryData.FromBytes(property.Value.GetBytesFromBase64("D")); + continue; + } + if (property.NameEquals("model"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + model = new CreateImageVariationRequestModel(property.Value.GetString()); + continue; + } + if (property.NameEquals("n"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + n = null; + continue; + } + n = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("response_format"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + responseFormat = new CreateImageVariationRequestResponseFormat(property.Value.GetString()); + continue; + } + if (property.NameEquals("size"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + size = new CreateImageVariationRequestSize(property.Value.GetString()); + continue; + } + if (property.NameEquals("user"u8)) + { + user = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateImageVariationRequest(image, OptionalProperty.ToNullable(model), OptionalProperty.ToNullable(n), OptionalProperty.ToNullable(responseFormat), OptionalProperty.ToNullable(size), user.Value, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateImageVariationRequest)} does not support '{options.Format}' format."); + } + } + + CreateImageVariationRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateImageVariationRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateImageVariationRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateImageVariationRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateImageVariationRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateImageVariationRequest.cs b/.dotnet/src/Generated/Models/CreateImageVariationRequest.cs new file mode 100644 index 000000000..f1c52762f --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateImageVariationRequest.cs @@ -0,0 +1,121 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The CreateImageVariationRequest. + public partial class CreateImageVariationRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, + /// and square. + /// + /// is null. + public CreateImageVariationRequest(BinaryData image) + { + ClientUtilities.AssertNotNull(image, nameof(image)); + + Image = image; + } + + /// Initializes a new instance of . + /// + /// The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, + /// and square. + /// + /// The model to use for image generation. Only `dall-e-2` is supported at this time. + /// The number of images to generate. Must be between 1 and 10. + /// The format in which the generated images are returned. Must be one of `url` or `b64_json`. + /// The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect + /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + /// + /// Keeps track of any properties unknown to the library. + internal CreateImageVariationRequest(BinaryData image, CreateImageVariationRequestModel? model, long? n, CreateImageVariationRequestResponseFormat? responseFormat, CreateImageVariationRequestSize? size, string user, IDictionary serializedAdditionalRawData) + { + Image = image; + Model = model; + N = n; + ResponseFormat = responseFormat; + Size = size; + User = user; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateImageVariationRequest() + { + } + + /// + /// The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, + /// and square. + /// + /// To assign a byte[] to this property use . + /// The byte[] will be serialized to a Base64 encoded string. + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromBytes(new byte[] { 1, 2, 3 }) + /// Creates a payload of "AQID". + /// + /// + /// + /// + public BinaryData Image { get; } + /// The model to use for image generation. Only `dall-e-2` is supported at this time. + public CreateImageVariationRequestModel? Model { get; set; } + /// The number of images to generate. Must be between 1 and 10. + public long? N { get; set; } + /// The format in which the generated images are returned. Must be one of `url` or `b64_json`. + public CreateImageVariationRequestResponseFormat? ResponseFormat { get; set; } + /// The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + public CreateImageVariationRequestSize? Size { get; set; } + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect + /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + /// + public string User { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateImageVariationRequestModel.cs b/.dotnet/src/Generated/Models/CreateImageVariationRequestModel.cs new file mode 100644 index 000000000..e1fb70037 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateImageVariationRequestModel.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for model in CreateImageVariationRequest. + public readonly partial struct CreateImageVariationRequestModel : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateImageVariationRequestModel(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string DallE2Value = "dall-e-2"; + + /// dall-e-2. + public static CreateImageVariationRequestModel DallE2 { get; } = new CreateImageVariationRequestModel(DallE2Value); + /// Determines if two values are the same. + public static bool operator ==(CreateImageVariationRequestModel left, CreateImageVariationRequestModel right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateImageVariationRequestModel left, CreateImageVariationRequestModel right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateImageVariationRequestModel(string value) => new CreateImageVariationRequestModel(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateImageVariationRequestModel other && Equals(other); + /// + public bool Equals(CreateImageVariationRequestModel other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateImageVariationRequestResponseFormat.cs b/.dotnet/src/Generated/Models/CreateImageVariationRequestResponseFormat.cs new file mode 100644 index 000000000..2c384bb1f --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateImageVariationRequestResponseFormat.cs @@ -0,0 +1,48 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for response_format in CreateImageVariationRequest. + public readonly partial struct CreateImageVariationRequestResponseFormat : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateImageVariationRequestResponseFormat(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string UrlValue = "url"; + private const string B64JsonValue = "b64_json"; + + /// url. + public static CreateImageVariationRequestResponseFormat Url { get; } = new CreateImageVariationRequestResponseFormat(UrlValue); + /// b64_json. + public static CreateImageVariationRequestResponseFormat B64Json { get; } = new CreateImageVariationRequestResponseFormat(B64JsonValue); + /// Determines if two values are the same. + public static bool operator ==(CreateImageVariationRequestResponseFormat left, CreateImageVariationRequestResponseFormat right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateImageVariationRequestResponseFormat left, CreateImageVariationRequestResponseFormat right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateImageVariationRequestResponseFormat(string value) => new CreateImageVariationRequestResponseFormat(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateImageVariationRequestResponseFormat other && Equals(other); + /// + public bool Equals(CreateImageVariationRequestResponseFormat other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateImageVariationRequestSize.cs b/.dotnet/src/Generated/Models/CreateImageVariationRequestSize.cs new file mode 100644 index 000000000..50f61d60c --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateImageVariationRequestSize.cs @@ -0,0 +1,51 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for size in CreateImageVariationRequest. + public readonly partial struct CreateImageVariationRequestSize : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateImageVariationRequestSize(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string _256x256Value = "256x256"; + private const string _512x512Value = "512x512"; + private const string _1024x1024Value = "1024x1024"; + + /// 256x256. + public static CreateImageVariationRequestSize _256x256 { get; } = new CreateImageVariationRequestSize(_256x256Value); + /// 512x512. + public static CreateImageVariationRequestSize _512x512 { get; } = new CreateImageVariationRequestSize(_512x512Value); + /// 1024x1024. + public static CreateImageVariationRequestSize _1024x1024 { get; } = new CreateImageVariationRequestSize(_1024x1024Value); + /// Determines if two values are the same. + public static bool operator ==(CreateImageVariationRequestSize left, CreateImageVariationRequestSize right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateImageVariationRequestSize left, CreateImageVariationRequestSize right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateImageVariationRequestSize(string value) => new CreateImageVariationRequestSize(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateImageVariationRequestSize other && Equals(other); + /// + public bool Equals(CreateImageVariationRequestSize other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateMessageRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateMessageRequest.Serialization.cs new file mode 100644 index 000000000..f8cdabd45 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateMessageRequest.Serialization.cs @@ -0,0 +1,198 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class CreateMessageRequest : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateMessageRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("role"u8); + writer.WriteStringValue(Role.ToString()); + writer.WritePropertyName("content"u8); + writer.WriteStringValue(Content); + if (OptionalProperty.IsCollectionDefined(FileIds)) + { + writer.WritePropertyName("file_ids"u8); + writer.WriteStartArray(); + foreach (var item in FileIds) + { + writer.WriteStringValue(item); + } + writer.WriteEndArray(); + } + if (OptionalProperty.IsCollectionDefined(Metadata)) + { + if (Metadata != null) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartObject(); + foreach (var item in Metadata) + { + writer.WritePropertyName(item.Key); + writer.WriteStringValue(item.Value); + } + writer.WriteEndObject(); + } + else + { + writer.WriteNull("metadata"); + } + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateMessageRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateMessageRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateMessageRequest(document.RootElement, options); + } + + internal static CreateMessageRequest DeserializeCreateMessageRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + CreateMessageRequestRole role = default; + string content = default; + OptionalProperty> fileIds = default; + OptionalProperty> metadata = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("role"u8)) + { + role = new CreateMessageRequestRole(property.Value.GetString()); + continue; + } + if (property.NameEquals("content"u8)) + { + content = property.Value.GetString(); + continue; + } + if (property.NameEquals("file_ids"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(item.GetString()); + } + fileIds = array; + continue; + } + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + Dictionary dictionary = new Dictionary(); + foreach (var property0 in property.Value.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetString()); + } + metadata = dictionary; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateMessageRequest(role, content, OptionalProperty.ToList(fileIds), OptionalProperty.ToDictionary(metadata), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateMessageRequest)} does not support '{options.Format}' format."); + } + } + + CreateMessageRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateMessageRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateMessageRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateMessageRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateMessageRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateMessageRequest.cs b/.dotnet/src/Generated/Models/CreateMessageRequest.cs new file mode 100644 index 000000000..63200e048 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateMessageRequest.cs @@ -0,0 +1,104 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The CreateMessageRequest. + public partial class CreateMessageRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The content of the message. + /// is null. + public CreateMessageRequest(string content) + { + ClientUtilities.AssertNotNull(content, nameof(content)); + + Content = content; + FileIds = new OptionalList(); + Metadata = new OptionalDictionary(); + } + + /// Initializes a new instance of . + /// The role of the entity that is creating the message. Currently only `user` is supported. + /// The content of the message. + /// + /// A list of [File](/docs/api-reference/files) IDs that the message should use. There can be a + /// maximum of 10 files attached to a message. Useful for tools like `retrieval` and + /// `code_interpreter` that can access and use files. + /// + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// Keeps track of any properties unknown to the library. + internal CreateMessageRequest(CreateMessageRequestRole role, string content, IList fileIds, IDictionary metadata, IDictionary serializedAdditionalRawData) + { + Role = role; + Content = content; + FileIds = fileIds; + Metadata = metadata; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateMessageRequest() + { + } + + /// The role of the entity that is creating the message. Currently only `user` is supported. + public CreateMessageRequestRole Role { get; } = CreateMessageRequestRole.User; + + /// The content of the message. + public string Content { get; } + /// + /// A list of [File](/docs/api-reference/files) IDs that the message should use. There can be a + /// maximum of 10 files attached to a message. Useful for tools like `retrieval` and + /// `code_interpreter` that can access and use files. + /// + public IList FileIds { get; } + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + public IDictionary Metadata { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateMessageRequestRole.cs b/.dotnet/src/Generated/Models/CreateMessageRequestRole.cs new file mode 100644 index 000000000..2f7ae075a --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateMessageRequestRole.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The CreateMessageRequest_role. + public readonly partial struct CreateMessageRequestRole : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateMessageRequestRole(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string UserValue = "user"; + + /// user. + public static CreateMessageRequestRole User { get; } = new CreateMessageRequestRole(UserValue); + /// Determines if two values are the same. + public static bool operator ==(CreateMessageRequestRole left, CreateMessageRequestRole right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateMessageRequestRole left, CreateMessageRequestRole right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateMessageRequestRole(string value) => new CreateMessageRequestRole(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateMessageRequestRole other && Equals(other); + /// + public bool Equals(CreateMessageRequestRole other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateModerationRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateModerationRequest.Serialization.cs new file mode 100644 index 000000000..d7d7cac14 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateModerationRequest.Serialization.cs @@ -0,0 +1,154 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class CreateModerationRequest : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateModerationRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("input"u8); +#if NET6_0_OR_GREATER + writer.WriteRawValue(Input); +#else + using (JsonDocument document = JsonDocument.Parse(Input)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + if (OptionalProperty.IsDefined(Model)) + { + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model.Value.ToString()); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateModerationRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateModerationRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateModerationRequest(document.RootElement, options); + } + + internal static CreateModerationRequest DeserializeCreateModerationRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + BinaryData input = default; + OptionalProperty model = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("input"u8)) + { + input = BinaryData.FromString(property.Value.GetRawText()); + continue; + } + if (property.NameEquals("model"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + model = new CreateModerationRequestModel(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateModerationRequest(input, OptionalProperty.ToNullable(model), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateModerationRequest)} does not support '{options.Format}' format."); + } + } + + CreateModerationRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateModerationRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateModerationRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateModerationRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateModerationRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateModerationRequest.cs b/.dotnet/src/Generated/Models/CreateModerationRequest.cs new file mode 100644 index 000000000..3192ceea7 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateModerationRequest.cs @@ -0,0 +1,129 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The CreateModerationRequest. + public partial class CreateModerationRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The input text to classify. + /// is null. + public CreateModerationRequest(BinaryData input) + { + ClientUtilities.AssertNotNull(input, nameof(input)); + + Input = input; + } + + /// Initializes a new instance of . + /// The input text to classify. + /// + /// Two content moderations models are available: `text-moderation-stable` and + /// `text-moderation-latest`. The default is `text-moderation-latest` which will be automatically + /// upgraded over time. This ensures you are always using our most accurate model. If you use + /// `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy + /// of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. + /// + /// Keeps track of any properties unknown to the library. + internal CreateModerationRequest(BinaryData input, CreateModerationRequestModel? model, IDictionary serializedAdditionalRawData) + { + Input = input; + Model = model; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateModerationRequest() + { + } + + /// + /// The input text to classify + /// + /// To assign an object to this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// + /// Supported types: + /// + /// + /// + /// + /// + /// where T is of type + /// + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public BinaryData Input { get; } + /// + /// Two content moderations models are available: `text-moderation-stable` and + /// `text-moderation-latest`. The default is `text-moderation-latest` which will be automatically + /// upgraded over time. This ensures you are always using our most accurate model. If you use + /// `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy + /// of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. + /// + public CreateModerationRequestModel? Model { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateModerationRequestModel.cs b/.dotnet/src/Generated/Models/CreateModerationRequestModel.cs new file mode 100644 index 000000000..fcc067e02 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateModerationRequestModel.cs @@ -0,0 +1,48 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for model in CreateModerationRequest. + public readonly partial struct CreateModerationRequestModel : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateModerationRequestModel(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string TextModerationLatestValue = "text-moderation-latest"; + private const string TextModerationStableValue = "text-moderation-stable"; + + /// text-moderation-latest. + public static CreateModerationRequestModel TextModerationLatest { get; } = new CreateModerationRequestModel(TextModerationLatestValue); + /// text-moderation-stable. + public static CreateModerationRequestModel TextModerationStable { get; } = new CreateModerationRequestModel(TextModerationStableValue); + /// Determines if two values are the same. + public static bool operator ==(CreateModerationRequestModel left, CreateModerationRequestModel right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateModerationRequestModel left, CreateModerationRequestModel right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateModerationRequestModel(string value) => new CreateModerationRequestModel(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateModerationRequestModel other && Equals(other); + /// + public bool Equals(CreateModerationRequestModel other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateModerationResponse.Serialization.cs b/.dotnet/src/Generated/Models/CreateModerationResponse.Serialization.cs new file mode 100644 index 000000000..95902ce14 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateModerationResponse.Serialization.cs @@ -0,0 +1,158 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class CreateModerationResponse : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateModerationResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model); + writer.WritePropertyName("results"u8); + writer.WriteStartArray(); + foreach (var item in Results) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateModerationResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateModerationResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateModerationResponse(document.RootElement, options); + } + + internal static CreateModerationResponse DeserializeCreateModerationResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + string model = default; + IReadOnlyList results = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("model"u8)) + { + model = property.Value.GetString(); + continue; + } + if (property.NameEquals("results"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(CreateModerationResponseResult.DeserializeCreateModerationResponseResult(item)); + } + results = array; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateModerationResponse(id, model, results, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateModerationResponse)} does not support '{options.Format}' format."); + } + } + + CreateModerationResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateModerationResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateModerationResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateModerationResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateModerationResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateModerationResponse.cs b/.dotnet/src/Generated/Models/CreateModerationResponse.cs new file mode 100644 index 000000000..7fe06f011 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateModerationResponse.cs @@ -0,0 +1,88 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI.Models +{ + /// Represents policy compliance report by OpenAI's content moderation model against a given input. + public partial class CreateModerationResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The unique identifier for the moderation request. + /// The model used to generate the moderation results. + /// A list of moderation objects. + /// , or is null. + internal CreateModerationResponse(string id, string model, IEnumerable results) + { + ClientUtilities.AssertNotNull(id, nameof(id)); + ClientUtilities.AssertNotNull(model, nameof(model)); + ClientUtilities.AssertNotNull(results, nameof(results)); + + Id = id; + Model = model; + Results = results.ToList(); + } + + /// Initializes a new instance of . + /// The unique identifier for the moderation request. + /// The model used to generate the moderation results. + /// A list of moderation objects. + /// Keeps track of any properties unknown to the library. + internal CreateModerationResponse(string id, string model, IReadOnlyList results, IDictionary serializedAdditionalRawData) + { + Id = id; + Model = model; + Results = results; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateModerationResponse() + { + } + + /// The unique identifier for the moderation request. + public string Id { get; } + /// The model used to generate the moderation results. + public string Model { get; } + /// A list of moderation objects. + public IReadOnlyList Results { get; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateModerationResponseResult.Serialization.cs b/.dotnet/src/Generated/Models/CreateModerationResponseResult.Serialization.cs new file mode 100644 index 000000000..6c56b94e2 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateModerationResponseResult.Serialization.cs @@ -0,0 +1,148 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class CreateModerationResponseResult : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateModerationResponseResult)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("flagged"u8); + writer.WriteBooleanValue(Flagged); + writer.WritePropertyName("categories"u8); + writer.WriteObjectValue(Categories); + writer.WritePropertyName("category_scores"u8); + writer.WriteObjectValue(CategoryScores); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateModerationResponseResult IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateModerationResponseResult)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateModerationResponseResult(document.RootElement, options); + } + + internal static CreateModerationResponseResult DeserializeCreateModerationResponseResult(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + bool flagged = default; + CreateModerationResponseResultCategories categories = default; + CreateModerationResponseResultCategoryScores categoryScores = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("flagged"u8)) + { + flagged = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("categories"u8)) + { + categories = CreateModerationResponseResultCategories.DeserializeCreateModerationResponseResultCategories(property.Value); + continue; + } + if (property.NameEquals("category_scores"u8)) + { + categoryScores = CreateModerationResponseResultCategoryScores.DeserializeCreateModerationResponseResultCategoryScores(property.Value); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateModerationResponseResult(flagged, categories, categoryScores, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateModerationResponseResult)} does not support '{options.Format}' format."); + } + } + + CreateModerationResponseResult IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateModerationResponseResult(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateModerationResponseResult)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateModerationResponseResult FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateModerationResponseResult(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateModerationResponseResult.cs b/.dotnet/src/Generated/Models/CreateModerationResponseResult.cs new file mode 100644 index 000000000..3842b8365 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateModerationResponseResult.cs @@ -0,0 +1,86 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The CreateModerationResponseResult. + public partial class CreateModerationResponseResult + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// Whether the content violates [OpenAI's usage policies](/policies/usage-policies). + /// A list of the categories, and whether they are flagged or not. + /// A list of the categories along with their scores as predicted by model. + /// or is null. + internal CreateModerationResponseResult(bool flagged, CreateModerationResponseResultCategories categories, CreateModerationResponseResultCategoryScores categoryScores) + { + ClientUtilities.AssertNotNull(categories, nameof(categories)); + ClientUtilities.AssertNotNull(categoryScores, nameof(categoryScores)); + + Flagged = flagged; + Categories = categories; + CategoryScores = categoryScores; + } + + /// Initializes a new instance of . + /// Whether the content violates [OpenAI's usage policies](/policies/usage-policies). + /// A list of the categories, and whether they are flagged or not. + /// A list of the categories along with their scores as predicted by model. + /// Keeps track of any properties unknown to the library. + internal CreateModerationResponseResult(bool flagged, CreateModerationResponseResultCategories categories, CreateModerationResponseResultCategoryScores categoryScores, IDictionary serializedAdditionalRawData) + { + Flagged = flagged; + Categories = categories; + CategoryScores = categoryScores; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateModerationResponseResult() + { + } + + /// Whether the content violates [OpenAI's usage policies](/policies/usage-policies). + public bool Flagged { get; } + /// A list of the categories, and whether they are flagged or not. + public CreateModerationResponseResultCategories Categories { get; } + /// A list of the categories along with their scores as predicted by model. + public CreateModerationResponseResultCategoryScores CategoryScores { get; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateModerationResponseResultCategories.Serialization.cs b/.dotnet/src/Generated/Models/CreateModerationResponseResultCategories.Serialization.cs new file mode 100644 index 000000000..53595dc4a --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateModerationResponseResultCategories.Serialization.cs @@ -0,0 +1,212 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class CreateModerationResponseResultCategories : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateModerationResponseResultCategories)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("hate"u8); + writer.WriteBooleanValue(Hate); + writer.WritePropertyName("hate/threatening"u8); + writer.WriteBooleanValue(HateThreatening); + writer.WritePropertyName("harassment"u8); + writer.WriteBooleanValue(Harassment); + writer.WritePropertyName("harassment/threatening"u8); + writer.WriteBooleanValue(HarassmentThreatening); + writer.WritePropertyName("self-harm"u8); + writer.WriteBooleanValue(SelfHarm); + writer.WritePropertyName("self-harm/intent"u8); + writer.WriteBooleanValue(SelfHarmIntent); + writer.WritePropertyName("self-harm/instructions"u8); + writer.WriteBooleanValue(SelfHarmInstructions); + writer.WritePropertyName("sexual"u8); + writer.WriteBooleanValue(Sexual); + writer.WritePropertyName("sexual/minors"u8); + writer.WriteBooleanValue(SexualMinors); + writer.WritePropertyName("violence"u8); + writer.WriteBooleanValue(Violence); + writer.WritePropertyName("violence/graphic"u8); + writer.WriteBooleanValue(ViolenceGraphic); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateModerationResponseResultCategories IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateModerationResponseResultCategories)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateModerationResponseResultCategories(document.RootElement, options); + } + + internal static CreateModerationResponseResultCategories DeserializeCreateModerationResponseResultCategories(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + bool hate = default; + bool hateThreatening = default; + bool harassment = default; + bool harassmentThreatening = default; + bool selfHarm = default; + bool selfHarmIntent = default; + bool selfHarmInstructions = default; + bool sexual = default; + bool sexualMinors = default; + bool violence = default; + bool violenceGraphic = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("hate"u8)) + { + hate = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("hate/threatening"u8)) + { + hateThreatening = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("harassment"u8)) + { + harassment = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("harassment/threatening"u8)) + { + harassmentThreatening = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("self-harm"u8)) + { + selfHarm = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("self-harm/intent"u8)) + { + selfHarmIntent = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("self-harm/instructions"u8)) + { + selfHarmInstructions = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("sexual"u8)) + { + sexual = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("sexual/minors"u8)) + { + sexualMinors = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("violence"u8)) + { + violence = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("violence/graphic"u8)) + { + violenceGraphic = property.Value.GetBoolean(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateModerationResponseResultCategories(hate, hateThreatening, harassment, harassmentThreatening, selfHarm, selfHarmIntent, selfHarmInstructions, sexual, sexualMinors, violence, violenceGraphic, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateModerationResponseResultCategories)} does not support '{options.Format}' format."); + } + } + + CreateModerationResponseResultCategories IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateModerationResponseResultCategories(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateModerationResponseResultCategories)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateModerationResponseResultCategories FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateModerationResponseResultCategories(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateModerationResponseResultCategories.cs b/.dotnet/src/Generated/Models/CreateModerationResponseResultCategories.cs new file mode 100644 index 000000000..fdb93fb6c --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateModerationResponseResultCategories.cs @@ -0,0 +1,189 @@ +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The CreateModerationResponseResultCategories. + public partial class CreateModerationResponseResultCategories + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// Content that expresses, incites, or promotes hate based on race, gender, ethnicity, + /// religion, nationality, sexual orientation, disability status, or caste. Hateful content + /// aimed at non-protected groups (e.g., chess players) is harrassment. + /// + /// + /// Hateful content that also includes violence or serious harm towards the targeted group + /// based on race, gender, ethnicity, religion, nationality, sexual orientation, disability + /// status, or caste. + /// + /// Content that expresses, incites, or promotes harassing language towards any target. + /// Harassment content that also includes violence or serious harm towards any target. + /// + /// Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, + /// and eating disorders. + /// + /// + /// Content where the speaker expresses that they are engaging or intend to engage in acts of + /// self-harm, such as suicide, cutting, and eating disorders. + /// + /// + /// Content that encourages performing acts of self-harm, such as suicide, cutting, and eating + /// disorders, or that gives instructions or advice on how to commit such acts. + /// + /// + /// Content meant to arouse sexual excitement, such as the description of sexual activity, or + /// that promotes sexual services (excluding sex education and wellness). + /// + /// Sexual content that includes an individual who is under 18 years old. + /// Content that depicts death, violence, or physical injury. + /// Content that depicts death, violence, or physical injury in graphic detail. + internal CreateModerationResponseResultCategories(bool hate, bool hateThreatening, bool harassment, bool harassmentThreatening, bool selfHarm, bool selfHarmIntent, bool selfHarmInstructions, bool sexual, bool sexualMinors, bool violence, bool violenceGraphic) + { + Hate = hate; + HateThreatening = hateThreatening; + Harassment = harassment; + HarassmentThreatening = harassmentThreatening; + SelfHarm = selfHarm; + SelfHarmIntent = selfHarmIntent; + SelfHarmInstructions = selfHarmInstructions; + Sexual = sexual; + SexualMinors = sexualMinors; + Violence = violence; + ViolenceGraphic = violenceGraphic; + } + + /// Initializes a new instance of . + /// + /// Content that expresses, incites, or promotes hate based on race, gender, ethnicity, + /// religion, nationality, sexual orientation, disability status, or caste. Hateful content + /// aimed at non-protected groups (e.g., chess players) is harrassment. + /// + /// + /// Hateful content that also includes violence or serious harm towards the targeted group + /// based on race, gender, ethnicity, religion, nationality, sexual orientation, disability + /// status, or caste. + /// + /// Content that expresses, incites, or promotes harassing language towards any target. + /// Harassment content that also includes violence or serious harm towards any target. + /// + /// Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, + /// and eating disorders. + /// + /// + /// Content where the speaker expresses that they are engaging or intend to engage in acts of + /// self-harm, such as suicide, cutting, and eating disorders. + /// + /// + /// Content that encourages performing acts of self-harm, such as suicide, cutting, and eating + /// disorders, or that gives instructions or advice on how to commit such acts. + /// + /// + /// Content meant to arouse sexual excitement, such as the description of sexual activity, or + /// that promotes sexual services (excluding sex education and wellness). + /// + /// Sexual content that includes an individual who is under 18 years old. + /// Content that depicts death, violence, or physical injury. + /// Content that depicts death, violence, or physical injury in graphic detail. + /// Keeps track of any properties unknown to the library. + internal CreateModerationResponseResultCategories(bool hate, bool hateThreatening, bool harassment, bool harassmentThreatening, bool selfHarm, bool selfHarmIntent, bool selfHarmInstructions, bool sexual, bool sexualMinors, bool violence, bool violenceGraphic, IDictionary serializedAdditionalRawData) + { + Hate = hate; + HateThreatening = hateThreatening; + Harassment = harassment; + HarassmentThreatening = harassmentThreatening; + SelfHarm = selfHarm; + SelfHarmIntent = selfHarmIntent; + SelfHarmInstructions = selfHarmInstructions; + Sexual = sexual; + SexualMinors = sexualMinors; + Violence = violence; + ViolenceGraphic = violenceGraphic; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateModerationResponseResultCategories() + { + } + + /// + /// Content that expresses, incites, or promotes hate based on race, gender, ethnicity, + /// religion, nationality, sexual orientation, disability status, or caste. Hateful content + /// aimed at non-protected groups (e.g., chess players) is harrassment. + /// + public bool Hate { get; } + /// + /// Hateful content that also includes violence or serious harm towards the targeted group + /// based on race, gender, ethnicity, religion, nationality, sexual orientation, disability + /// status, or caste. + /// + public bool HateThreatening { get; } + /// Content that expresses, incites, or promotes harassing language towards any target. + public bool Harassment { get; } + /// Harassment content that also includes violence or serious harm towards any target. + public bool HarassmentThreatening { get; } + /// + /// Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, + /// and eating disorders. + /// + public bool SelfHarm { get; } + /// + /// Content where the speaker expresses that they are engaging or intend to engage in acts of + /// self-harm, such as suicide, cutting, and eating disorders. + /// + public bool SelfHarmIntent { get; } + /// + /// Content that encourages performing acts of self-harm, such as suicide, cutting, and eating + /// disorders, or that gives instructions or advice on how to commit such acts. + /// + public bool SelfHarmInstructions { get; } + /// + /// Content meant to arouse sexual excitement, such as the description of sexual activity, or + /// that promotes sexual services (excluding sex education and wellness). + /// + public bool Sexual { get; } + /// Sexual content that includes an individual who is under 18 years old. + public bool SexualMinors { get; } + /// Content that depicts death, violence, or physical injury. + public bool Violence { get; } + /// Content that depicts death, violence, or physical injury in graphic detail. + public bool ViolenceGraphic { get; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateModerationResponseResultCategoryScores.Serialization.cs b/.dotnet/src/Generated/Models/CreateModerationResponseResultCategoryScores.Serialization.cs new file mode 100644 index 000000000..2c169bdec --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateModerationResponseResultCategoryScores.Serialization.cs @@ -0,0 +1,212 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class CreateModerationResponseResultCategoryScores : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateModerationResponseResultCategoryScores)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("hate"u8); + writer.WriteNumberValue(Hate); + writer.WritePropertyName("hate/threatening"u8); + writer.WriteNumberValue(HateThreatening); + writer.WritePropertyName("harassment"u8); + writer.WriteNumberValue(Harassment); + writer.WritePropertyName("harassment/threatening"u8); + writer.WriteNumberValue(HarassmentThreatening); + writer.WritePropertyName("self-harm"u8); + writer.WriteNumberValue(SelfHarm); + writer.WritePropertyName("self-harm/intent"u8); + writer.WriteNumberValue(SelfHarmIntent); + writer.WritePropertyName("self-harm/instructions"u8); + writer.WriteNumberValue(SelfHarmInstructions); + writer.WritePropertyName("sexual"u8); + writer.WriteNumberValue(Sexual); + writer.WritePropertyName("sexual/minors"u8); + writer.WriteNumberValue(SexualMinors); + writer.WritePropertyName("violence"u8); + writer.WriteNumberValue(Violence); + writer.WritePropertyName("violence/graphic"u8); + writer.WriteNumberValue(ViolenceGraphic); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateModerationResponseResultCategoryScores IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateModerationResponseResultCategoryScores)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateModerationResponseResultCategoryScores(document.RootElement, options); + } + + internal static CreateModerationResponseResultCategoryScores DeserializeCreateModerationResponseResultCategoryScores(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + double hate = default; + double hateThreatening = default; + double harassment = default; + double harassmentThreatening = default; + double selfHarm = default; + double selfHarmIntent = default; + double selfHarmInstructions = default; + double sexual = default; + double sexualMinors = default; + double violence = default; + double violenceGraphic = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("hate"u8)) + { + hate = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("hate/threatening"u8)) + { + hateThreatening = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("harassment"u8)) + { + harassment = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("harassment/threatening"u8)) + { + harassmentThreatening = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("self-harm"u8)) + { + selfHarm = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("self-harm/intent"u8)) + { + selfHarmIntent = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("self-harm/instructions"u8)) + { + selfHarmInstructions = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("sexual"u8)) + { + sexual = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("sexual/minors"u8)) + { + sexualMinors = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("violence"u8)) + { + violence = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("violence/graphic"u8)) + { + violenceGraphic = property.Value.GetDouble(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateModerationResponseResultCategoryScores(hate, hateThreatening, harassment, harassmentThreatening, selfHarm, selfHarmIntent, selfHarmInstructions, sexual, sexualMinors, violence, violenceGraphic, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateModerationResponseResultCategoryScores)} does not support '{options.Format}' format."); + } + } + + CreateModerationResponseResultCategoryScores IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateModerationResponseResultCategoryScores(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateModerationResponseResultCategoryScores)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateModerationResponseResultCategoryScores FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateModerationResponseResultCategoryScores(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateModerationResponseResultCategoryScores.cs b/.dotnet/src/Generated/Models/CreateModerationResponseResultCategoryScores.cs new file mode 100644 index 000000000..2e5247ddb --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateModerationResponseResultCategoryScores.cs @@ -0,0 +1,129 @@ +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The CreateModerationResponseResultCategoryScores. + public partial class CreateModerationResponseResultCategoryScores + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The score for the category 'hate'. + /// The score for the category 'hate/threatening'. + /// The score for the category 'harassment'. + /// The score for the category 'harassment/threatening'. + /// The score for the category 'self-harm'. + /// The score for the category 'self-harm/intent'. + /// The score for the category 'self-harm/instructive'. + /// The score for the category 'sexual'. + /// The score for the category 'sexual/minors'. + /// The score for the category 'violence'. + /// The score for the category 'violence/graphic'. + internal CreateModerationResponseResultCategoryScores(double hate, double hateThreatening, double harassment, double harassmentThreatening, double selfHarm, double selfHarmIntent, double selfHarmInstructions, double sexual, double sexualMinors, double violence, double violenceGraphic) + { + Hate = hate; + HateThreatening = hateThreatening; + Harassment = harassment; + HarassmentThreatening = harassmentThreatening; + SelfHarm = selfHarm; + SelfHarmIntent = selfHarmIntent; + SelfHarmInstructions = selfHarmInstructions; + Sexual = sexual; + SexualMinors = sexualMinors; + Violence = violence; + ViolenceGraphic = violenceGraphic; + } + + /// Initializes a new instance of . + /// The score for the category 'hate'. + /// The score for the category 'hate/threatening'. + /// The score for the category 'harassment'. + /// The score for the category 'harassment/threatening'. + /// The score for the category 'self-harm'. + /// The score for the category 'self-harm/intent'. + /// The score for the category 'self-harm/instructive'. + /// The score for the category 'sexual'. + /// The score for the category 'sexual/minors'. + /// The score for the category 'violence'. + /// The score for the category 'violence/graphic'. + /// Keeps track of any properties unknown to the library. + internal CreateModerationResponseResultCategoryScores(double hate, double hateThreatening, double harassment, double harassmentThreatening, double selfHarm, double selfHarmIntent, double selfHarmInstructions, double sexual, double sexualMinors, double violence, double violenceGraphic, IDictionary serializedAdditionalRawData) + { + Hate = hate; + HateThreatening = hateThreatening; + Harassment = harassment; + HarassmentThreatening = harassmentThreatening; + SelfHarm = selfHarm; + SelfHarmIntent = selfHarmIntent; + SelfHarmInstructions = selfHarmInstructions; + Sexual = sexual; + SexualMinors = sexualMinors; + Violence = violence; + ViolenceGraphic = violenceGraphic; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateModerationResponseResultCategoryScores() + { + } + + /// The score for the category 'hate'. + public double Hate { get; } + /// The score for the category 'hate/threatening'. + public double HateThreatening { get; } + /// The score for the category 'harassment'. + public double Harassment { get; } + /// The score for the category 'harassment/threatening'. + public double HarassmentThreatening { get; } + /// The score for the category 'self-harm'. + public double SelfHarm { get; } + /// The score for the category 'self-harm/intent'. + public double SelfHarmIntent { get; } + /// The score for the category 'self-harm/instructive'. + public double SelfHarmInstructions { get; } + /// The score for the category 'sexual'. + public double Sexual { get; } + /// The score for the category 'sexual/minors'. + public double SexualMinors { get; } + /// The score for the category 'violence'. + public double Violence { get; } + /// The score for the category 'violence/graphic'. + public double ViolenceGraphic { get; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateRunRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateRunRequest.Serialization.cs new file mode 100644 index 000000000..33e243bc6 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateRunRequest.Serialization.cs @@ -0,0 +1,285 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class CreateRunRequest : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateRunRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("assistant_id"u8); + writer.WriteStringValue(AssistantId); + if (OptionalProperty.IsDefined(Model)) + { + if (Model != null) + { + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model); + } + else + { + writer.WriteNull("model"); + } + } + if (OptionalProperty.IsDefined(Instructions)) + { + if (Instructions != null) + { + writer.WritePropertyName("instructions"u8); + writer.WriteStringValue(Instructions); + } + else + { + writer.WriteNull("instructions"); + } + } + if (OptionalProperty.IsDefined(AdditionalInstructions)) + { + if (AdditionalInstructions != null) + { + writer.WritePropertyName("additional_instructions"u8); + writer.WriteStringValue(AdditionalInstructions); + } + else + { + writer.WriteNull("additional_instructions"); + } + } + if (OptionalProperty.IsCollectionDefined(Tools)) + { + if (Tools != null) + { + writer.WritePropertyName("tools"u8); + writer.WriteStartArray(); + foreach (var item in Tools) + { + if (item == null) + { + writer.WriteNullValue(); + continue; + } +#if NET6_0_OR_GREATER + writer.WriteRawValue(item); +#else + using (JsonDocument document = JsonDocument.Parse(item)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + writer.WriteEndArray(); + } + else + { + writer.WriteNull("tools"); + } + } + if (OptionalProperty.IsCollectionDefined(Metadata)) + { + if (Metadata != null) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartObject(); + foreach (var item in Metadata) + { + writer.WritePropertyName(item.Key); + writer.WriteStringValue(item.Value); + } + writer.WriteEndObject(); + } + else + { + writer.WriteNull("metadata"); + } + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateRunRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateRunRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateRunRequest(document.RootElement, options); + } + + internal static CreateRunRequest DeserializeCreateRunRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string assistantId = default; + OptionalProperty model = default; + OptionalProperty instructions = default; + OptionalProperty additionalInstructions = default; + OptionalProperty> tools = default; + OptionalProperty> metadata = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("assistant_id"u8)) + { + assistantId = property.Value.GetString(); + continue; + } + if (property.NameEquals("model"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + model = null; + continue; + } + model = property.Value.GetString(); + continue; + } + if (property.NameEquals("instructions"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + instructions = null; + continue; + } + instructions = property.Value.GetString(); + continue; + } + if (property.NameEquals("additional_instructions"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + additionalInstructions = null; + continue; + } + additionalInstructions = property.Value.GetString(); + continue; + } + if (property.NameEquals("tools"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + if (item.ValueKind == JsonValueKind.Null) + { + array.Add(null); + } + else + { + array.Add(BinaryData.FromString(item.GetRawText())); + } + } + tools = array; + continue; + } + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + Dictionary dictionary = new Dictionary(); + foreach (var property0 in property.Value.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetString()); + } + metadata = dictionary; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateRunRequest(assistantId, model.Value, instructions.Value, additionalInstructions.Value, OptionalProperty.ToList(tools), OptionalProperty.ToDictionary(metadata), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateRunRequest)} does not support '{options.Format}' format."); + } + } + + CreateRunRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateRunRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateRunRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateRunRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateRunRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateRunRequest.cs b/.dotnet/src/Generated/Models/CreateRunRequest.cs new file mode 100644 index 000000000..25e22310d --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateRunRequest.cs @@ -0,0 +1,156 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The CreateRunRequest. + public partial class CreateRunRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. + /// is null. + public CreateRunRequest(string assistantId) + { + ClientUtilities.AssertNotNull(assistantId, nameof(assistantId)); + + AssistantId = assistantId; + Tools = new OptionalList(); + Metadata = new OptionalDictionary(); + } + + /// Initializes a new instance of . + /// The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. + /// + /// The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value + /// is provided here, it will override the model associated with the assistant. If not, the model + /// associated with the assistant will be used. + /// + /// + /// Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of the assistant. + /// This is useful for modifying the behavior on a per-run basis. + /// + /// + /// Appends additional instructions at the end of the instructions for the run. This is useful for + /// modifying the behavior on a per-run basis without overriding other instructions. + /// + /// + /// Override the tools the assistant can use for this run. This is useful for modifying the + /// behavior on a per-run basis. + /// + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// Keeps track of any properties unknown to the library. + internal CreateRunRequest(string assistantId, string model, string instructions, string additionalInstructions, IList tools, IDictionary metadata, IDictionary serializedAdditionalRawData) + { + AssistantId = assistantId; + Model = model; + Instructions = instructions; + AdditionalInstructions = additionalInstructions; + Tools = tools; + Metadata = metadata; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateRunRequest() + { + } + + /// The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. + public string AssistantId { get; } + /// + /// The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value + /// is provided here, it will override the model associated with the assistant. If not, the model + /// associated with the assistant will be used. + /// + public string Model { get; set; } + /// + /// Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of the assistant. + /// This is useful for modifying the behavior on a per-run basis. + /// + public string Instructions { get; set; } + /// + /// Appends additional instructions at the end of the instructions for the run. This is useful for + /// modifying the behavior on a per-run basis without overriding other instructions. + /// + public string AdditionalInstructions { get; set; } + /// + /// Override the tools the assistant can use for this run. This is useful for modifying the + /// behavior on a per-run basis. + /// + /// To assign an object to the element of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public IList Tools { get; set; } + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + public IDictionary Metadata { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateSpeechRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateSpeechRequest.Serialization.cs new file mode 100644 index 000000000..299ac0668 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateSpeechRequest.Serialization.cs @@ -0,0 +1,178 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class CreateSpeechRequest : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateSpeechRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model.ToString()); + writer.WritePropertyName("input"u8); + writer.WriteStringValue(Input); + writer.WritePropertyName("voice"u8); + writer.WriteStringValue(Voice.ToString()); + if (OptionalProperty.IsDefined(ResponseFormat)) + { + writer.WritePropertyName("response_format"u8); + writer.WriteStringValue(ResponseFormat.Value.ToString()); + } + if (OptionalProperty.IsDefined(Speed)) + { + writer.WritePropertyName("speed"u8); + writer.WriteNumberValue(Speed.Value); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateSpeechRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateSpeechRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateSpeechRequest(document.RootElement, options); + } + + internal static CreateSpeechRequest DeserializeCreateSpeechRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + CreateSpeechRequestModel model = default; + string input = default; + CreateSpeechRequestVoice voice = default; + OptionalProperty responseFormat = default; + OptionalProperty speed = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("model"u8)) + { + model = new CreateSpeechRequestModel(property.Value.GetString()); + continue; + } + if (property.NameEquals("input"u8)) + { + input = property.Value.GetString(); + continue; + } + if (property.NameEquals("voice"u8)) + { + voice = new CreateSpeechRequestVoice(property.Value.GetString()); + continue; + } + if (property.NameEquals("response_format"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + responseFormat = new CreateSpeechRequestResponseFormat(property.Value.GetString()); + continue; + } + if (property.NameEquals("speed"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + speed = property.Value.GetDouble(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateSpeechRequest(model, input, voice, OptionalProperty.ToNullable(responseFormat), OptionalProperty.ToNullable(speed), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateSpeechRequest)} does not support '{options.Format}' format."); + } + } + + CreateSpeechRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateSpeechRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateSpeechRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateSpeechRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateSpeechRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateSpeechRequest.cs b/.dotnet/src/Generated/Models/CreateSpeechRequest.cs new file mode 100644 index 000000000..ac765143c --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateSpeechRequest.cs @@ -0,0 +1,105 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The CreateSpeechRequest. + public partial class CreateSpeechRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd`. + /// The text to generate audio for. The maximum length is 4096 characters. + /// + /// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, + /// `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the + /// [Text to speech guide](/docs/guides/text-to-speech/voice-options). + /// + /// is null. + public CreateSpeechRequest(CreateSpeechRequestModel model, string input, CreateSpeechRequestVoice voice) + { + ClientUtilities.AssertNotNull(input, nameof(input)); + + Model = model; + Input = input; + Voice = voice; + } + + /// Initializes a new instance of . + /// One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd`. + /// The text to generate audio for. The maximum length is 4096 characters. + /// + /// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, + /// `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the + /// [Text to speech guide](/docs/guides/text-to-speech/voice-options). + /// + /// The format to audio in. Supported formats are `mp3`, `opus`, `aac`, and `flac`. + /// The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default. + /// Keeps track of any properties unknown to the library. + internal CreateSpeechRequest(CreateSpeechRequestModel model, string input, CreateSpeechRequestVoice voice, CreateSpeechRequestResponseFormat? responseFormat, double? speed, IDictionary serializedAdditionalRawData) + { + Model = model; + Input = input; + Voice = voice; + ResponseFormat = responseFormat; + Speed = speed; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateSpeechRequest() + { + } + + /// One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd`. + public CreateSpeechRequestModel Model { get; } + /// The text to generate audio for. The maximum length is 4096 characters. + public string Input { get; } + /// + /// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, + /// `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the + /// [Text to speech guide](/docs/guides/text-to-speech/voice-options). + /// + public CreateSpeechRequestVoice Voice { get; } + /// The format to audio in. Supported formats are `mp3`, `opus`, `aac`, and `flac`. + public CreateSpeechRequestResponseFormat? ResponseFormat { get; set; } + /// The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default. + public double? Speed { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateSpeechRequestModel.cs b/.dotnet/src/Generated/Models/CreateSpeechRequestModel.cs new file mode 100644 index 000000000..0db564d08 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateSpeechRequestModel.cs @@ -0,0 +1,48 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for model in CreateSpeechRequest. + public readonly partial struct CreateSpeechRequestModel : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateSpeechRequestModel(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string Tts1Value = "tts-1"; + private const string Tts1HdValue = "tts-1-hd"; + + /// tts-1. + public static CreateSpeechRequestModel Tts1 { get; } = new CreateSpeechRequestModel(Tts1Value); + /// tts-1-hd. + public static CreateSpeechRequestModel Tts1Hd { get; } = new CreateSpeechRequestModel(Tts1HdValue); + /// Determines if two values are the same. + public static bool operator ==(CreateSpeechRequestModel left, CreateSpeechRequestModel right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateSpeechRequestModel left, CreateSpeechRequestModel right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateSpeechRequestModel(string value) => new CreateSpeechRequestModel(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateSpeechRequestModel other && Equals(other); + /// + public bool Equals(CreateSpeechRequestModel other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateSpeechRequestResponseFormat.cs b/.dotnet/src/Generated/Models/CreateSpeechRequestResponseFormat.cs new file mode 100644 index 000000000..ca5b8462f --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateSpeechRequestResponseFormat.cs @@ -0,0 +1,54 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for response_format in CreateSpeechRequest. + public readonly partial struct CreateSpeechRequestResponseFormat : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateSpeechRequestResponseFormat(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string Mp3Value = "mp3"; + private const string OpusValue = "opus"; + private const string AacValue = "aac"; + private const string FlacValue = "flac"; + + /// mp3. + public static CreateSpeechRequestResponseFormat Mp3 { get; } = new CreateSpeechRequestResponseFormat(Mp3Value); + /// opus. + public static CreateSpeechRequestResponseFormat Opus { get; } = new CreateSpeechRequestResponseFormat(OpusValue); + /// aac. + public static CreateSpeechRequestResponseFormat Aac { get; } = new CreateSpeechRequestResponseFormat(AacValue); + /// flac. + public static CreateSpeechRequestResponseFormat Flac { get; } = new CreateSpeechRequestResponseFormat(FlacValue); + /// Determines if two values are the same. + public static bool operator ==(CreateSpeechRequestResponseFormat left, CreateSpeechRequestResponseFormat right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateSpeechRequestResponseFormat left, CreateSpeechRequestResponseFormat right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateSpeechRequestResponseFormat(string value) => new CreateSpeechRequestResponseFormat(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateSpeechRequestResponseFormat other && Equals(other); + /// + public bool Equals(CreateSpeechRequestResponseFormat other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateSpeechRequestVoice.cs b/.dotnet/src/Generated/Models/CreateSpeechRequestVoice.cs new file mode 100644 index 000000000..db9af9d9d --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateSpeechRequestVoice.cs @@ -0,0 +1,60 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for voice in CreateSpeechRequest. + public readonly partial struct CreateSpeechRequestVoice : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateSpeechRequestVoice(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string AlloyValue = "alloy"; + private const string EchoValue = "echo"; + private const string FableValue = "fable"; + private const string OnyxValue = "onyx"; + private const string NovaValue = "nova"; + private const string ShimmerValue = "shimmer"; + + /// alloy. + public static CreateSpeechRequestVoice Alloy { get; } = new CreateSpeechRequestVoice(AlloyValue); + /// echo. + public static CreateSpeechRequestVoice Echo { get; } = new CreateSpeechRequestVoice(EchoValue); + /// fable. + public static CreateSpeechRequestVoice Fable { get; } = new CreateSpeechRequestVoice(FableValue); + /// onyx. + public static CreateSpeechRequestVoice Onyx { get; } = new CreateSpeechRequestVoice(OnyxValue); + /// nova. + public static CreateSpeechRequestVoice Nova { get; } = new CreateSpeechRequestVoice(NovaValue); + /// shimmer. + public static CreateSpeechRequestVoice Shimmer { get; } = new CreateSpeechRequestVoice(ShimmerValue); + /// Determines if two values are the same. + public static bool operator ==(CreateSpeechRequestVoice left, CreateSpeechRequestVoice right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateSpeechRequestVoice left, CreateSpeechRequestVoice right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateSpeechRequestVoice(string value) => new CreateSpeechRequestVoice(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateSpeechRequestVoice other && Equals(other); + /// + public bool Equals(CreateSpeechRequestVoice other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateThreadAndRunRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateThreadAndRunRequest.Serialization.cs new file mode 100644 index 000000000..8710e7c9a --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateThreadAndRunRequest.Serialization.cs @@ -0,0 +1,277 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class CreateThreadAndRunRequest : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateThreadAndRunRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("assistant_id"u8); + writer.WriteStringValue(AssistantId); + if (OptionalProperty.IsDefined(Thread)) + { + writer.WritePropertyName("thread"u8); + writer.WriteObjectValue(Thread); + } + if (OptionalProperty.IsDefined(Model)) + { + if (Model != null) + { + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model); + } + else + { + writer.WriteNull("model"); + } + } + if (OptionalProperty.IsDefined(Instructions)) + { + if (Instructions != null) + { + writer.WritePropertyName("instructions"u8); + writer.WriteStringValue(Instructions); + } + else + { + writer.WriteNull("instructions"); + } + } + if (OptionalProperty.IsCollectionDefined(Tools)) + { + if (Tools != null) + { + writer.WritePropertyName("tools"u8); + writer.WriteStartArray(); + foreach (var item in Tools) + { + if (item == null) + { + writer.WriteNullValue(); + continue; + } +#if NET6_0_OR_GREATER + writer.WriteRawValue(item); +#else + using (JsonDocument document = JsonDocument.Parse(item)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + writer.WriteEndArray(); + } + else + { + writer.WriteNull("tools"); + } + } + if (OptionalProperty.IsCollectionDefined(Metadata)) + { + if (Metadata != null) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartObject(); + foreach (var item in Metadata) + { + writer.WritePropertyName(item.Key); + writer.WriteStringValue(item.Value); + } + writer.WriteEndObject(); + } + else + { + writer.WriteNull("metadata"); + } + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateThreadAndRunRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateThreadAndRunRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateThreadAndRunRequest(document.RootElement, options); + } + + internal static CreateThreadAndRunRequest DeserializeCreateThreadAndRunRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string assistantId = default; + OptionalProperty thread = default; + OptionalProperty model = default; + OptionalProperty instructions = default; + OptionalProperty> tools = default; + OptionalProperty> metadata = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("assistant_id"u8)) + { + assistantId = property.Value.GetString(); + continue; + } + if (property.NameEquals("thread"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + thread = CreateThreadRequest.DeserializeCreateThreadRequest(property.Value); + continue; + } + if (property.NameEquals("model"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + model = null; + continue; + } + model = property.Value.GetString(); + continue; + } + if (property.NameEquals("instructions"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + instructions = null; + continue; + } + instructions = property.Value.GetString(); + continue; + } + if (property.NameEquals("tools"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + if (item.ValueKind == JsonValueKind.Null) + { + array.Add(null); + } + else + { + array.Add(BinaryData.FromString(item.GetRawText())); + } + } + tools = array; + continue; + } + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + Dictionary dictionary = new Dictionary(); + foreach (var property0 in property.Value.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetString()); + } + metadata = dictionary; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateThreadAndRunRequest(assistantId, thread.Value, model.Value, instructions.Value, OptionalProperty.ToList(tools), OptionalProperty.ToDictionary(metadata), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateThreadAndRunRequest)} does not support '{options.Format}' format."); + } + } + + CreateThreadAndRunRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateThreadAndRunRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateThreadAndRunRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateThreadAndRunRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateThreadAndRunRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateThreadAndRunRequest.cs b/.dotnet/src/Generated/Models/CreateThreadAndRunRequest.cs new file mode 100644 index 000000000..cffca99a7 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateThreadAndRunRequest.cs @@ -0,0 +1,150 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The CreateThreadAndRunRequest. + public partial class CreateThreadAndRunRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. + /// is null. + public CreateThreadAndRunRequest(string assistantId) + { + ClientUtilities.AssertNotNull(assistantId, nameof(assistantId)); + + AssistantId = assistantId; + Tools = new OptionalList(); + Metadata = new OptionalDictionary(); + } + + /// Initializes a new instance of . + /// The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. + /// If no thread is provided, an empty thread will be created. + /// + /// The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is + /// provided here, it will override the model associated with the assistant. If not, the model + /// associated with the assistant will be used. + /// + /// + /// Override the default system message of the assistant. This is useful for modifying the behavior + /// on a per-run basis. + /// + /// + /// Override the tools the assistant can use for this run. This is useful for modifying the + /// behavior on a per-run basis. + /// + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// Keeps track of any properties unknown to the library. + internal CreateThreadAndRunRequest(string assistantId, CreateThreadRequest thread, string model, string instructions, IList tools, IDictionary metadata, IDictionary serializedAdditionalRawData) + { + AssistantId = assistantId; + Thread = thread; + Model = model; + Instructions = instructions; + Tools = tools; + Metadata = metadata; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateThreadAndRunRequest() + { + } + + /// The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. + public string AssistantId { get; } + /// If no thread is provided, an empty thread will be created. + public CreateThreadRequest Thread { get; set; } + /// + /// The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is + /// provided here, it will override the model associated with the assistant. If not, the model + /// associated with the assistant will be used. + /// + public string Model { get; set; } + /// + /// Override the default system message of the assistant. This is useful for modifying the behavior + /// on a per-run basis. + /// + public string Instructions { get; set; } + /// + /// Override the tools the assistant can use for this run. This is useful for modifying the + /// behavior on a per-run basis. + /// + /// To assign an object to the element of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public IList Tools { get; set; } + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + public IDictionary Metadata { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateThreadRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateThreadRequest.Serialization.cs new file mode 100644 index 000000000..e321794d2 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateThreadRequest.Serialization.cs @@ -0,0 +1,182 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class CreateThreadRequest : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateThreadRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + if (OptionalProperty.IsCollectionDefined(Messages)) + { + writer.WritePropertyName("messages"u8); + writer.WriteStartArray(); + foreach (var item in Messages) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + } + if (OptionalProperty.IsCollectionDefined(Metadata)) + { + if (Metadata != null) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartObject(); + foreach (var item in Metadata) + { + writer.WritePropertyName(item.Key); + writer.WriteStringValue(item.Value); + } + writer.WriteEndObject(); + } + else + { + writer.WriteNull("metadata"); + } + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateThreadRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateThreadRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateThreadRequest(document.RootElement, options); + } + + internal static CreateThreadRequest DeserializeCreateThreadRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + OptionalProperty> messages = default; + OptionalProperty> metadata = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("messages"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(CreateMessageRequest.DeserializeCreateMessageRequest(item)); + } + messages = array; + continue; + } + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + Dictionary dictionary = new Dictionary(); + foreach (var property0 in property.Value.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetString()); + } + metadata = dictionary; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateThreadRequest(OptionalProperty.ToList(messages), OptionalProperty.ToDictionary(metadata), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateThreadRequest)} does not support '{options.Format}' format."); + } + } + + CreateThreadRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateThreadRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateThreadRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateThreadRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateThreadRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateThreadRequest.cs b/.dotnet/src/Generated/Models/CreateThreadRequest.cs new file mode 100644 index 000000000..b59545b62 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateThreadRequest.cs @@ -0,0 +1,77 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The CreateThreadRequest. + public partial class CreateThreadRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public CreateThreadRequest() + { + Messages = new OptionalList(); + Metadata = new OptionalDictionary(); + } + + /// Initializes a new instance of . + /// A list of [messages](/docs/api-reference/messages) to start the thread with. + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// Keeps track of any properties unknown to the library. + internal CreateThreadRequest(IList messages, IDictionary metadata, IDictionary serializedAdditionalRawData) + { + Messages = messages; + Metadata = metadata; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// A list of [messages](/docs/api-reference/messages) to start the thread with. + public IList Messages { get; } + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + public IDictionary Metadata { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateTranscriptionRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateTranscriptionRequest.Serialization.cs new file mode 100644 index 000000000..8b89df137 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateTranscriptionRequest.Serialization.cs @@ -0,0 +1,192 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class CreateTranscriptionRequest : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateTranscriptionRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("file"u8); + writer.WriteBase64StringValue(File.ToArray(), "D"); + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model.ToString()); + if (OptionalProperty.IsDefined(Language)) + { + writer.WritePropertyName("language"u8); + writer.WriteStringValue(Language); + } + if (OptionalProperty.IsDefined(Prompt)) + { + writer.WritePropertyName("prompt"u8); + writer.WriteStringValue(Prompt); + } + if (OptionalProperty.IsDefined(ResponseFormat)) + { + writer.WritePropertyName("response_format"u8); + writer.WriteStringValue(ResponseFormat.Value.ToString()); + } + if (OptionalProperty.IsDefined(Temperature)) + { + writer.WritePropertyName("temperature"u8); + writer.WriteNumberValue(Temperature.Value); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateTranscriptionRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateTranscriptionRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateTranscriptionRequest(document.RootElement, options); + } + + internal static CreateTranscriptionRequest DeserializeCreateTranscriptionRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + BinaryData file = default; + CreateTranscriptionRequestModel model = default; + OptionalProperty language = default; + OptionalProperty prompt = default; + OptionalProperty responseFormat = default; + OptionalProperty temperature = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("file"u8)) + { + file = BinaryData.FromBytes(property.Value.GetBytesFromBase64("D")); + continue; + } + if (property.NameEquals("model"u8)) + { + model = new CreateTranscriptionRequestModel(property.Value.GetString()); + continue; + } + if (property.NameEquals("language"u8)) + { + language = property.Value.GetString(); + continue; + } + if (property.NameEquals("prompt"u8)) + { + prompt = property.Value.GetString(); + continue; + } + if (property.NameEquals("response_format"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + responseFormat = new CreateTranscriptionRequestResponseFormat(property.Value.GetString()); + continue; + } + if (property.NameEquals("temperature"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + temperature = property.Value.GetDouble(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateTranscriptionRequest(file, model, language.Value, prompt.Value, OptionalProperty.ToNullable(responseFormat), OptionalProperty.ToNullable(temperature), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateTranscriptionRequest)} does not support '{options.Format}' format."); + } + } + + CreateTranscriptionRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateTranscriptionRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateTranscriptionRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateTranscriptionRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateTranscriptionRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateTranscriptionRequest.cs b/.dotnet/src/Generated/Models/CreateTranscriptionRequest.cs new file mode 100644 index 000000000..74d9d5ebd --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateTranscriptionRequest.cs @@ -0,0 +1,147 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The CreateTranscriptionRequest. + public partial class CreateTranscriptionRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, + /// mpeg, mpga, m4a, ogg, wav, or webm. + /// + /// ID of the model to use. Only `whisper-1` is currently available. + /// is null. + public CreateTranscriptionRequest(BinaryData file, CreateTranscriptionRequestModel model) + { + ClientUtilities.AssertNotNull(file, nameof(file)); + + File = file; + Model = model; + } + + /// Initializes a new instance of . + /// + /// The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, + /// mpeg, mpga, m4a, ogg, wav, or webm. + /// + /// ID of the model to use. Only `whisper-1` is currently available. + /// + /// The language of the input audio. Supplying the input language in + /// [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy + /// and latency. + /// + /// + /// An optional text to guide the model's style or continue a previous audio segment. The + /// [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + /// + /// + /// The format of the transcript output, in one of these options: json, text, srt, verbose_json, or + /// vtt. + /// + /// + /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more + /// random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, + /// the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to + /// automatically increase the temperature until certain thresholds are hit. + /// + /// Keeps track of any properties unknown to the library. + internal CreateTranscriptionRequest(BinaryData file, CreateTranscriptionRequestModel model, string language, string prompt, CreateTranscriptionRequestResponseFormat? responseFormat, double? temperature, IDictionary serializedAdditionalRawData) + { + File = file; + Model = model; + Language = language; + Prompt = prompt; + ResponseFormat = responseFormat; + Temperature = temperature; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateTranscriptionRequest() + { + } + + /// + /// The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, + /// mpeg, mpga, m4a, ogg, wav, or webm. + /// + /// To assign a byte[] to this property use . + /// The byte[] will be serialized to a Base64 encoded string. + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromBytes(new byte[] { 1, 2, 3 }) + /// Creates a payload of "AQID". + /// + /// + /// + /// + public BinaryData File { get; } + /// ID of the model to use. Only `whisper-1` is currently available. + public CreateTranscriptionRequestModel Model { get; } + /// + /// The language of the input audio. Supplying the input language in + /// [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy + /// and latency. + /// + public string Language { get; set; } + /// + /// An optional text to guide the model's style or continue a previous audio segment. The + /// [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + /// + public string Prompt { get; set; } + /// + /// The format of the transcript output, in one of these options: json, text, srt, verbose_json, or + /// vtt. + /// + public CreateTranscriptionRequestResponseFormat? ResponseFormat { get; set; } + /// + /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more + /// random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, + /// the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to + /// automatically increase the temperature until certain thresholds are hit. + /// + public double? Temperature { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateTranscriptionRequestModel.cs b/.dotnet/src/Generated/Models/CreateTranscriptionRequestModel.cs new file mode 100644 index 000000000..d3c46e425 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateTranscriptionRequestModel.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for model in CreateTranscriptionRequest. + public readonly partial struct CreateTranscriptionRequestModel : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateTranscriptionRequestModel(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string Whisper1Value = "whisper-1"; + + /// whisper-1. + public static CreateTranscriptionRequestModel Whisper1 { get; } = new CreateTranscriptionRequestModel(Whisper1Value); + /// Determines if two values are the same. + public static bool operator ==(CreateTranscriptionRequestModel left, CreateTranscriptionRequestModel right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateTranscriptionRequestModel left, CreateTranscriptionRequestModel right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateTranscriptionRequestModel(string value) => new CreateTranscriptionRequestModel(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateTranscriptionRequestModel other && Equals(other); + /// + public bool Equals(CreateTranscriptionRequestModel other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateTranscriptionRequestResponseFormat.cs b/.dotnet/src/Generated/Models/CreateTranscriptionRequestResponseFormat.cs new file mode 100644 index 000000000..354321879 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateTranscriptionRequestResponseFormat.cs @@ -0,0 +1,57 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for response_format in CreateTranscriptionRequest. + public readonly partial struct CreateTranscriptionRequestResponseFormat : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateTranscriptionRequestResponseFormat(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string JsonValue = "json"; + private const string TextValue = "text"; + private const string SrtValue = "srt"; + private const string VerboseJsonValue = "verbose_json"; + private const string VttValue = "vtt"; + + /// json. + public static CreateTranscriptionRequestResponseFormat Json { get; } = new CreateTranscriptionRequestResponseFormat(JsonValue); + /// text. + public static CreateTranscriptionRequestResponseFormat Text { get; } = new CreateTranscriptionRequestResponseFormat(TextValue); + /// srt. + public static CreateTranscriptionRequestResponseFormat Srt { get; } = new CreateTranscriptionRequestResponseFormat(SrtValue); + /// verbose_json. + public static CreateTranscriptionRequestResponseFormat VerboseJson { get; } = new CreateTranscriptionRequestResponseFormat(VerboseJsonValue); + /// vtt. + public static CreateTranscriptionRequestResponseFormat Vtt { get; } = new CreateTranscriptionRequestResponseFormat(VttValue); + /// Determines if two values are the same. + public static bool operator ==(CreateTranscriptionRequestResponseFormat left, CreateTranscriptionRequestResponseFormat right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateTranscriptionRequestResponseFormat left, CreateTranscriptionRequestResponseFormat right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateTranscriptionRequestResponseFormat(string value) => new CreateTranscriptionRequestResponseFormat(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateTranscriptionRequestResponseFormat other && Equals(other); + /// + public bool Equals(CreateTranscriptionRequestResponseFormat other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateTranscriptionResponse.Serialization.cs b/.dotnet/src/Generated/Models/CreateTranscriptionResponse.Serialization.cs new file mode 100644 index 000000000..7ebe36e6c --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateTranscriptionResponse.Serialization.cs @@ -0,0 +1,198 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class CreateTranscriptionResponse : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateTranscriptionResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("text"u8); + writer.WriteStringValue(Text); + if (OptionalProperty.IsDefined(Task)) + { + writer.WritePropertyName("task"u8); + writer.WriteStringValue(Task.Value.ToString()); + } + if (OptionalProperty.IsDefined(Language)) + { + writer.WritePropertyName("language"u8); + writer.WriteStringValue(Language); + } + if (OptionalProperty.IsDefined(Duration)) + { + writer.WritePropertyName("duration"u8); + writer.WriteNumberValue(Convert.ToInt32(Duration.Value.ToString("%s"))); + } + if (OptionalProperty.IsCollectionDefined(Segments)) + { + writer.WritePropertyName("segments"u8); + writer.WriteStartArray(); + foreach (var item in Segments) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateTranscriptionResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateTranscriptionResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateTranscriptionResponse(document.RootElement, options); + } + + internal static CreateTranscriptionResponse DeserializeCreateTranscriptionResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string text = default; + OptionalProperty task = default; + OptionalProperty language = default; + OptionalProperty duration = default; + OptionalProperty> segments = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("text"u8)) + { + text = property.Value.GetString(); + continue; + } + if (property.NameEquals("task"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + task = new CreateTranscriptionResponseTask(property.Value.GetString()); + continue; + } + if (property.NameEquals("language"u8)) + { + language = property.Value.GetString(); + continue; + } + if (property.NameEquals("duration"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + duration = TimeSpan.FromSeconds(property.Value.GetInt32()); + continue; + } + if (property.NameEquals("segments"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(AudioSegment.DeserializeAudioSegment(item)); + } + segments = array; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateTranscriptionResponse(text, OptionalProperty.ToNullable(task), language.Value, OptionalProperty.ToNullable(duration), OptionalProperty.ToList(segments), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateTranscriptionResponse)} does not support '{options.Format}' format."); + } + } + + CreateTranscriptionResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateTranscriptionResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateTranscriptionResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateTranscriptionResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateTranscriptionResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateTranscriptionResponse.cs b/.dotnet/src/Generated/Models/CreateTranscriptionResponse.cs new file mode 100644 index 000000000..c526f24a1 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateTranscriptionResponse.cs @@ -0,0 +1,96 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The CreateTranscriptionResponse. + public partial class CreateTranscriptionResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The transcribed text for the provided audio data. + /// is null. + internal CreateTranscriptionResponse(string text) + { + ClientUtilities.AssertNotNull(text, nameof(text)); + + Text = text; + Segments = new OptionalList(); + } + + /// Initializes a new instance of . + /// The transcribed text for the provided audio data. + /// The label that describes which operation type generated the accompanying response data. + /// The spoken language that was detected in the audio data. + /// The total duration of the audio processed to produce accompanying transcription information. + /// + /// A collection of information about the timing, probabilities, and other detail of each processed + /// audio segment. + /// + /// Keeps track of any properties unknown to the library. + internal CreateTranscriptionResponse(string text, CreateTranscriptionResponseTask? task, string language, TimeSpan? duration, IReadOnlyList segments, IDictionary serializedAdditionalRawData) + { + Text = text; + Task = task; + Language = language; + Duration = duration; + Segments = segments; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateTranscriptionResponse() + { + } + + /// The transcribed text for the provided audio data. + public string Text { get; } + /// The label that describes which operation type generated the accompanying response data. + public CreateTranscriptionResponseTask? Task { get; } + /// The spoken language that was detected in the audio data. + public string Language { get; } + /// The total duration of the audio processed to produce accompanying transcription information. + public TimeSpan? Duration { get; } + /// + /// A collection of information about the timing, probabilities, and other detail of each processed + /// audio segment. + /// + public IReadOnlyList Segments { get; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateTranscriptionResponseTask.cs b/.dotnet/src/Generated/Models/CreateTranscriptionResponseTask.cs new file mode 100644 index 000000000..2cf79c9ae --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateTranscriptionResponseTask.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The CreateTranscriptionResponse_task. + public readonly partial struct CreateTranscriptionResponseTask : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateTranscriptionResponseTask(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string TranscribeValue = "transcribe"; + + /// transcribe. + public static CreateTranscriptionResponseTask Transcribe { get; } = new CreateTranscriptionResponseTask(TranscribeValue); + /// Determines if two values are the same. + public static bool operator ==(CreateTranscriptionResponseTask left, CreateTranscriptionResponseTask right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateTranscriptionResponseTask left, CreateTranscriptionResponseTask right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateTranscriptionResponseTask(string value) => new CreateTranscriptionResponseTask(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateTranscriptionResponseTask other && Equals(other); + /// + public bool Equals(CreateTranscriptionResponseTask other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateTranslationRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateTranslationRequest.Serialization.cs new file mode 100644 index 000000000..2e29df2c7 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateTranslationRequest.Serialization.cs @@ -0,0 +1,181 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class CreateTranslationRequest : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateTranslationRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("file"u8); + writer.WriteBase64StringValue(File.ToArray(), "D"); + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model.ToString()); + if (OptionalProperty.IsDefined(Prompt)) + { + writer.WritePropertyName("prompt"u8); + writer.WriteStringValue(Prompt); + } + if (OptionalProperty.IsDefined(ResponseFormat)) + { + writer.WritePropertyName("response_format"u8); + writer.WriteStringValue(ResponseFormat.Value.ToString()); + } + if (OptionalProperty.IsDefined(Temperature)) + { + writer.WritePropertyName("temperature"u8); + writer.WriteNumberValue(Temperature.Value); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateTranslationRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateTranslationRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateTranslationRequest(document.RootElement, options); + } + + internal static CreateTranslationRequest DeserializeCreateTranslationRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + BinaryData file = default; + CreateTranslationRequestModel model = default; + OptionalProperty prompt = default; + OptionalProperty responseFormat = default; + OptionalProperty temperature = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("file"u8)) + { + file = BinaryData.FromBytes(property.Value.GetBytesFromBase64("D")); + continue; + } + if (property.NameEquals("model"u8)) + { + model = new CreateTranslationRequestModel(property.Value.GetString()); + continue; + } + if (property.NameEquals("prompt"u8)) + { + prompt = property.Value.GetString(); + continue; + } + if (property.NameEquals("response_format"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + responseFormat = new CreateTranslationRequestResponseFormat(property.Value.GetString()); + continue; + } + if (property.NameEquals("temperature"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + temperature = property.Value.GetDouble(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateTranslationRequest(file, model, prompt.Value, OptionalProperty.ToNullable(responseFormat), OptionalProperty.ToNullable(temperature), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateTranslationRequest)} does not support '{options.Format}' format."); + } + } + + CreateTranslationRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateTranslationRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateTranslationRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateTranslationRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateTranslationRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateTranslationRequest.cs b/.dotnet/src/Generated/Models/CreateTranslationRequest.cs new file mode 100644 index 000000000..6ee7db49a --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateTranslationRequest.cs @@ -0,0 +1,135 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The CreateTranslationRequest. + public partial class CreateTranslationRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// The audio file object (not file name) to translate, in one of these formats: flac, mp3, mp4, + /// mpeg, mpga, m4a, ogg, wav, or webm. + /// + /// ID of the model to use. Only `whisper-1` is currently available. + /// is null. + public CreateTranslationRequest(BinaryData file, CreateTranslationRequestModel model) + { + ClientUtilities.AssertNotNull(file, nameof(file)); + + File = file; + Model = model; + } + + /// Initializes a new instance of . + /// + /// The audio file object (not file name) to translate, in one of these formats: flac, mp3, mp4, + /// mpeg, mpga, m4a, ogg, wav, or webm. + /// + /// ID of the model to use. Only `whisper-1` is currently available. + /// + /// An optional text to guide the model's style or continue a previous audio segment. The + /// [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + /// + /// + /// The format of the transcript output, in one of these options: json, text, srt, verbose_json, or + /// vtt. + /// + /// + /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more + /// random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, + /// the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to + /// automatically increase the temperature until certain thresholds are hit. + /// + /// Keeps track of any properties unknown to the library. + internal CreateTranslationRequest(BinaryData file, CreateTranslationRequestModel model, string prompt, CreateTranslationRequestResponseFormat? responseFormat, double? temperature, IDictionary serializedAdditionalRawData) + { + File = file; + Model = model; + Prompt = prompt; + ResponseFormat = responseFormat; + Temperature = temperature; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateTranslationRequest() + { + } + + /// + /// The audio file object (not file name) to translate, in one of these formats: flac, mp3, mp4, + /// mpeg, mpga, m4a, ogg, wav, or webm. + /// + /// To assign a byte[] to this property use . + /// The byte[] will be serialized to a Base64 encoded string. + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromBytes(new byte[] { 1, 2, 3 }) + /// Creates a payload of "AQID". + /// + /// + /// + /// + public BinaryData File { get; } + /// ID of the model to use. Only `whisper-1` is currently available. + public CreateTranslationRequestModel Model { get; } + /// + /// An optional text to guide the model's style or continue a previous audio segment. The + /// [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + /// + public string Prompt { get; set; } + /// + /// The format of the transcript output, in one of these options: json, text, srt, verbose_json, or + /// vtt. + /// + public CreateTranslationRequestResponseFormat? ResponseFormat { get; set; } + /// + /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more + /// random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, + /// the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to + /// automatically increase the temperature until certain thresholds are hit. + /// + public double? Temperature { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateTranslationRequestModel.cs b/.dotnet/src/Generated/Models/CreateTranslationRequestModel.cs new file mode 100644 index 000000000..b9d4c7076 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateTranslationRequestModel.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for model in CreateTranslationRequest. + public readonly partial struct CreateTranslationRequestModel : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateTranslationRequestModel(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string Whisper1Value = "whisper-1"; + + /// whisper-1. + public static CreateTranslationRequestModel Whisper1 { get; } = new CreateTranslationRequestModel(Whisper1Value); + /// Determines if two values are the same. + public static bool operator ==(CreateTranslationRequestModel left, CreateTranslationRequestModel right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateTranslationRequestModel left, CreateTranslationRequestModel right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateTranslationRequestModel(string value) => new CreateTranslationRequestModel(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateTranslationRequestModel other && Equals(other); + /// + public bool Equals(CreateTranslationRequestModel other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateTranslationRequestResponseFormat.cs b/.dotnet/src/Generated/Models/CreateTranslationRequestResponseFormat.cs new file mode 100644 index 000000000..b24279e3c --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateTranslationRequestResponseFormat.cs @@ -0,0 +1,57 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for response_format in CreateTranslationRequest. + public readonly partial struct CreateTranslationRequestResponseFormat : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateTranslationRequestResponseFormat(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string JsonValue = "json"; + private const string TextValue = "text"; + private const string SrtValue = "srt"; + private const string VerboseJsonValue = "verbose_json"; + private const string VttValue = "vtt"; + + /// json. + public static CreateTranslationRequestResponseFormat Json { get; } = new CreateTranslationRequestResponseFormat(JsonValue); + /// text. + public static CreateTranslationRequestResponseFormat Text { get; } = new CreateTranslationRequestResponseFormat(TextValue); + /// srt. + public static CreateTranslationRequestResponseFormat Srt { get; } = new CreateTranslationRequestResponseFormat(SrtValue); + /// verbose_json. + public static CreateTranslationRequestResponseFormat VerboseJson { get; } = new CreateTranslationRequestResponseFormat(VerboseJsonValue); + /// vtt. + public static CreateTranslationRequestResponseFormat Vtt { get; } = new CreateTranslationRequestResponseFormat(VttValue); + /// Determines if two values are the same. + public static bool operator ==(CreateTranslationRequestResponseFormat left, CreateTranslationRequestResponseFormat right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateTranslationRequestResponseFormat left, CreateTranslationRequestResponseFormat right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateTranslationRequestResponseFormat(string value) => new CreateTranslationRequestResponseFormat(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateTranslationRequestResponseFormat other && Equals(other); + /// + public bool Equals(CreateTranslationRequestResponseFormat other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateTranslationResponse.Serialization.cs b/.dotnet/src/Generated/Models/CreateTranslationResponse.Serialization.cs new file mode 100644 index 000000000..917fbfd99 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateTranslationResponse.Serialization.cs @@ -0,0 +1,198 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class CreateTranslationResponse : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateTranslationResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("text"u8); + writer.WriteStringValue(Text); + if (OptionalProperty.IsDefined(Task)) + { + writer.WritePropertyName("task"u8); + writer.WriteStringValue(Task.Value.ToString()); + } + if (OptionalProperty.IsDefined(Language)) + { + writer.WritePropertyName("language"u8); + writer.WriteStringValue(Language); + } + if (OptionalProperty.IsDefined(Duration)) + { + writer.WritePropertyName("duration"u8); + writer.WriteNumberValue(Convert.ToInt32(Duration.Value.ToString("%s"))); + } + if (OptionalProperty.IsCollectionDefined(Segments)) + { + writer.WritePropertyName("segments"u8); + writer.WriteStartArray(); + foreach (var item in Segments) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateTranslationResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateTranslationResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateTranslationResponse(document.RootElement, options); + } + + internal static CreateTranslationResponse DeserializeCreateTranslationResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string text = default; + OptionalProperty task = default; + OptionalProperty language = default; + OptionalProperty duration = default; + OptionalProperty> segments = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("text"u8)) + { + text = property.Value.GetString(); + continue; + } + if (property.NameEquals("task"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + task = new CreateTranslationResponseTask(property.Value.GetString()); + continue; + } + if (property.NameEquals("language"u8)) + { + language = property.Value.GetString(); + continue; + } + if (property.NameEquals("duration"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + duration = TimeSpan.FromSeconds(property.Value.GetInt32()); + continue; + } + if (property.NameEquals("segments"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(AudioSegment.DeserializeAudioSegment(item)); + } + segments = array; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateTranslationResponse(text, OptionalProperty.ToNullable(task), language.Value, OptionalProperty.ToNullable(duration), OptionalProperty.ToList(segments), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateTranslationResponse)} does not support '{options.Format}' format."); + } + } + + CreateTranslationResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateTranslationResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateTranslationResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateTranslationResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateTranslationResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateTranslationResponse.cs b/.dotnet/src/Generated/Models/CreateTranslationResponse.cs new file mode 100644 index 000000000..c6c9ea458 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateTranslationResponse.cs @@ -0,0 +1,96 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The CreateTranslationResponse. + public partial class CreateTranslationResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The translated text for the provided audio data. + /// is null. + internal CreateTranslationResponse(string text) + { + ClientUtilities.AssertNotNull(text, nameof(text)); + + Text = text; + Segments = new OptionalList(); + } + + /// Initializes a new instance of . + /// The translated text for the provided audio data. + /// The label that describes which operation type generated the accompanying response data. + /// The spoken language that was detected in the audio data. + /// The total duration of the audio processed to produce accompanying translation information. + /// + /// A collection of information about the timing, probabilities, and other detail of each processed + /// audio segment. + /// + /// Keeps track of any properties unknown to the library. + internal CreateTranslationResponse(string text, CreateTranslationResponseTask? task, string language, TimeSpan? duration, IReadOnlyList segments, IDictionary serializedAdditionalRawData) + { + Text = text; + Task = task; + Language = language; + Duration = duration; + Segments = segments; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateTranslationResponse() + { + } + + /// The translated text for the provided audio data. + public string Text { get; } + /// The label that describes which operation type generated the accompanying response data. + public CreateTranslationResponseTask? Task { get; } + /// The spoken language that was detected in the audio data. + public string Language { get; } + /// The total duration of the audio processed to produce accompanying translation information. + public TimeSpan? Duration { get; } + /// + /// A collection of information about the timing, probabilities, and other detail of each processed + /// audio segment. + /// + public IReadOnlyList Segments { get; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateTranslationResponseTask.cs b/.dotnet/src/Generated/Models/CreateTranslationResponseTask.cs new file mode 100644 index 000000000..4104c9b1c --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateTranslationResponseTask.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The CreateTranslationResponse_task. + public readonly partial struct CreateTranslationResponseTask : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateTranslationResponseTask(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string TranslateValue = "translate"; + + /// translate. + public static CreateTranslationResponseTask Translate { get; } = new CreateTranslationResponseTask(TranslateValue); + /// Determines if two values are the same. + public static bool operator ==(CreateTranslationResponseTask left, CreateTranslationResponseTask right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateTranslationResponseTask left, CreateTranslationResponseTask right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateTranslationResponseTask(string value) => new CreateTranslationResponseTask(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateTranslationResponseTask other && Equals(other); + /// + public bool Equals(CreateTranslationResponseTask other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/DeleteAssistantFileResponse.Serialization.cs b/.dotnet/src/Generated/Models/DeleteAssistantFileResponse.Serialization.cs new file mode 100644 index 000000000..2f92de865 --- /dev/null +++ b/.dotnet/src/Generated/Models/DeleteAssistantFileResponse.Serialization.cs @@ -0,0 +1,148 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class DeleteAssistantFileResponse : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(DeleteAssistantFileResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("deleted"u8); + writer.WriteBooleanValue(Deleted); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + DeleteAssistantFileResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(DeleteAssistantFileResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeDeleteAssistantFileResponse(document.RootElement, options); + } + + internal static DeleteAssistantFileResponse DeserializeDeleteAssistantFileResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + bool deleted = default; + DeleteAssistantFileResponseObject @object = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("deleted"u8)) + { + deleted = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new DeleteAssistantFileResponseObject(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new DeleteAssistantFileResponse(id, deleted, @object, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(DeleteAssistantFileResponse)} does not support '{options.Format}' format."); + } + } + + DeleteAssistantFileResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeDeleteAssistantFileResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(DeleteAssistantFileResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static DeleteAssistantFileResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeDeleteAssistantFileResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/DeleteAssistantFileResponse.cs b/.dotnet/src/Generated/Models/DeleteAssistantFileResponse.cs new file mode 100644 index 000000000..478d175d2 --- /dev/null +++ b/.dotnet/src/Generated/Models/DeleteAssistantFileResponse.cs @@ -0,0 +1,86 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// + /// Deletes the association between the assistant and the file, but does not delete the + /// [File](/docs/api-reference/files) object itself. + /// + public partial class DeleteAssistantFileResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// + /// is null. + internal DeleteAssistantFileResponse(string id, bool deleted) + { + ClientUtilities.AssertNotNull(id, nameof(id)); + + Id = id; + Deleted = deleted; + } + + /// Initializes a new instance of . + /// + /// + /// + /// Keeps track of any properties unknown to the library. + internal DeleteAssistantFileResponse(string id, bool deleted, DeleteAssistantFileResponseObject @object, IDictionary serializedAdditionalRawData) + { + Id = id; + Deleted = deleted; + Object = @object; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal DeleteAssistantFileResponse() + { + } + + /// Gets the id. + public string Id { get; } + /// Gets the deleted. + public bool Deleted { get; } + /// Gets the object. + public DeleteAssistantFileResponseObject Object { get; } = DeleteAssistantFileResponseObject.AssistantFileDeleted; + } +} diff --git a/.dotnet/src/Generated/Models/DeleteAssistantFileResponseObject.cs b/.dotnet/src/Generated/Models/DeleteAssistantFileResponseObject.cs new file mode 100644 index 000000000..8f9d146db --- /dev/null +++ b/.dotnet/src/Generated/Models/DeleteAssistantFileResponseObject.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The DeleteAssistantFileResponse_object. + public readonly partial struct DeleteAssistantFileResponseObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public DeleteAssistantFileResponseObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string AssistantFileDeletedValue = "assistant.file.deleted"; + + /// assistant.file.deleted. + public static DeleteAssistantFileResponseObject AssistantFileDeleted { get; } = new DeleteAssistantFileResponseObject(AssistantFileDeletedValue); + /// Determines if two values are the same. + public static bool operator ==(DeleteAssistantFileResponseObject left, DeleteAssistantFileResponseObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(DeleteAssistantFileResponseObject left, DeleteAssistantFileResponseObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator DeleteAssistantFileResponseObject(string value) => new DeleteAssistantFileResponseObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is DeleteAssistantFileResponseObject other && Equals(other); + /// + public bool Equals(DeleteAssistantFileResponseObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/DeleteAssistantResponse.Serialization.cs b/.dotnet/src/Generated/Models/DeleteAssistantResponse.Serialization.cs new file mode 100644 index 000000000..da95808b9 --- /dev/null +++ b/.dotnet/src/Generated/Models/DeleteAssistantResponse.Serialization.cs @@ -0,0 +1,148 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class DeleteAssistantResponse : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(DeleteAssistantResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("deleted"u8); + writer.WriteBooleanValue(Deleted); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + DeleteAssistantResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(DeleteAssistantResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeDeleteAssistantResponse(document.RootElement, options); + } + + internal static DeleteAssistantResponse DeserializeDeleteAssistantResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + bool deleted = default; + DeleteAssistantResponseObject @object = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("deleted"u8)) + { + deleted = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new DeleteAssistantResponseObject(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new DeleteAssistantResponse(id, deleted, @object, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(DeleteAssistantResponse)} does not support '{options.Format}' format."); + } + } + + DeleteAssistantResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeDeleteAssistantResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(DeleteAssistantResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static DeleteAssistantResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeDeleteAssistantResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/DeleteAssistantResponse.cs b/.dotnet/src/Generated/Models/DeleteAssistantResponse.cs new file mode 100644 index 000000000..059769a28 --- /dev/null +++ b/.dotnet/src/Generated/Models/DeleteAssistantResponse.cs @@ -0,0 +1,83 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The DeleteAssistantResponse. + public partial class DeleteAssistantResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// + /// is null. + internal DeleteAssistantResponse(string id, bool deleted) + { + ClientUtilities.AssertNotNull(id, nameof(id)); + + Id = id; + Deleted = deleted; + } + + /// Initializes a new instance of . + /// + /// + /// + /// Keeps track of any properties unknown to the library. + internal DeleteAssistantResponse(string id, bool deleted, DeleteAssistantResponseObject @object, IDictionary serializedAdditionalRawData) + { + Id = id; + Deleted = deleted; + Object = @object; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal DeleteAssistantResponse() + { + } + + /// Gets the id. + public string Id { get; } + /// Gets the deleted. + public bool Deleted { get; } + /// Gets the object. + public DeleteAssistantResponseObject Object { get; } = DeleteAssistantResponseObject.AssistantDeleted; + } +} diff --git a/.dotnet/src/Generated/Models/DeleteAssistantResponseObject.cs b/.dotnet/src/Generated/Models/DeleteAssistantResponseObject.cs new file mode 100644 index 000000000..7c80077ca --- /dev/null +++ b/.dotnet/src/Generated/Models/DeleteAssistantResponseObject.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The DeleteAssistantResponse_object. + public readonly partial struct DeleteAssistantResponseObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public DeleteAssistantResponseObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string AssistantDeletedValue = "assistant.deleted"; + + /// assistant.deleted. + public static DeleteAssistantResponseObject AssistantDeleted { get; } = new DeleteAssistantResponseObject(AssistantDeletedValue); + /// Determines if two values are the same. + public static bool operator ==(DeleteAssistantResponseObject left, DeleteAssistantResponseObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(DeleteAssistantResponseObject left, DeleteAssistantResponseObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator DeleteAssistantResponseObject(string value) => new DeleteAssistantResponseObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is DeleteAssistantResponseObject other && Equals(other); + /// + public bool Equals(DeleteAssistantResponseObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/DeleteFileResponse.Serialization.cs b/.dotnet/src/Generated/Models/DeleteFileResponse.Serialization.cs new file mode 100644 index 000000000..6ed78cf28 --- /dev/null +++ b/.dotnet/src/Generated/Models/DeleteFileResponse.Serialization.cs @@ -0,0 +1,148 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class DeleteFileResponse : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(DeleteFileResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("deleted"u8); + writer.WriteBooleanValue(Deleted); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + DeleteFileResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(DeleteFileResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeDeleteFileResponse(document.RootElement, options); + } + + internal static DeleteFileResponse DeserializeDeleteFileResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + DeleteFileResponseObject @object = default; + bool deleted = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new DeleteFileResponseObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("deleted"u8)) + { + deleted = property.Value.GetBoolean(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new DeleteFileResponse(id, @object, deleted, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(DeleteFileResponse)} does not support '{options.Format}' format."); + } + } + + DeleteFileResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeDeleteFileResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(DeleteFileResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static DeleteFileResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeDeleteFileResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/DeleteFileResponse.cs b/.dotnet/src/Generated/Models/DeleteFileResponse.cs new file mode 100644 index 000000000..202cb2637 --- /dev/null +++ b/.dotnet/src/Generated/Models/DeleteFileResponse.cs @@ -0,0 +1,84 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The DeleteFileResponse. + public partial class DeleteFileResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// + /// is null. + internal DeleteFileResponse(string id, bool deleted) + { + ClientUtilities.AssertNotNull(id, nameof(id)); + + Id = id; + Deleted = deleted; + } + + /// Initializes a new instance of . + /// + /// + /// + /// Keeps track of any properties unknown to the library. + internal DeleteFileResponse(string id, DeleteFileResponseObject @object, bool deleted, IDictionary serializedAdditionalRawData) + { + Id = id; + Object = @object; + Deleted = deleted; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal DeleteFileResponse() + { + } + + /// Gets the id. + public string Id { get; } + /// Gets the object. + public DeleteFileResponseObject Object { get; } = DeleteFileResponseObject.File; + + /// Gets the deleted. + public bool Deleted { get; } + } +} diff --git a/.dotnet/src/Generated/Models/DeleteFileResponseObject.cs b/.dotnet/src/Generated/Models/DeleteFileResponseObject.cs new file mode 100644 index 000000000..8b8080a6a --- /dev/null +++ b/.dotnet/src/Generated/Models/DeleteFileResponseObject.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The DeleteFileResponse_object. + public readonly partial struct DeleteFileResponseObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public DeleteFileResponseObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string FileValue = "file"; + + /// file. + public static DeleteFileResponseObject File { get; } = new DeleteFileResponseObject(FileValue); + /// Determines if two values are the same. + public static bool operator ==(DeleteFileResponseObject left, DeleteFileResponseObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(DeleteFileResponseObject left, DeleteFileResponseObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator DeleteFileResponseObject(string value) => new DeleteFileResponseObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is DeleteFileResponseObject other && Equals(other); + /// + public bool Equals(DeleteFileResponseObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/DeleteModelResponse.Serialization.cs b/.dotnet/src/Generated/Models/DeleteModelResponse.Serialization.cs new file mode 100644 index 000000000..a16d26d8b --- /dev/null +++ b/.dotnet/src/Generated/Models/DeleteModelResponse.Serialization.cs @@ -0,0 +1,148 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class DeleteModelResponse : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(DeleteModelResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("deleted"u8); + writer.WriteBooleanValue(Deleted); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + DeleteModelResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(DeleteModelResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeDeleteModelResponse(document.RootElement, options); + } + + internal static DeleteModelResponse DeserializeDeleteModelResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + bool deleted = default; + DeleteModelResponseObject @object = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("deleted"u8)) + { + deleted = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new DeleteModelResponseObject(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new DeleteModelResponse(id, deleted, @object, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(DeleteModelResponse)} does not support '{options.Format}' format."); + } + } + + DeleteModelResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeDeleteModelResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(DeleteModelResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static DeleteModelResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeDeleteModelResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/DeleteModelResponse.cs b/.dotnet/src/Generated/Models/DeleteModelResponse.cs new file mode 100644 index 000000000..fc8a2790b --- /dev/null +++ b/.dotnet/src/Generated/Models/DeleteModelResponse.cs @@ -0,0 +1,83 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The DeleteModelResponse. + public partial class DeleteModelResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// + /// is null. + internal DeleteModelResponse(string id, bool deleted) + { + ClientUtilities.AssertNotNull(id, nameof(id)); + + Id = id; + Deleted = deleted; + } + + /// Initializes a new instance of . + /// + /// + /// + /// Keeps track of any properties unknown to the library. + internal DeleteModelResponse(string id, bool deleted, DeleteModelResponseObject @object, IDictionary serializedAdditionalRawData) + { + Id = id; + Deleted = deleted; + Object = @object; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal DeleteModelResponse() + { + } + + /// Gets the id. + public string Id { get; } + /// Gets the deleted. + public bool Deleted { get; } + /// Gets the object. + public DeleteModelResponseObject Object { get; } = DeleteModelResponseObject.Model; + } +} diff --git a/.dotnet/src/Generated/Models/DeleteModelResponseObject.cs b/.dotnet/src/Generated/Models/DeleteModelResponseObject.cs new file mode 100644 index 000000000..ec25f197b --- /dev/null +++ b/.dotnet/src/Generated/Models/DeleteModelResponseObject.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The DeleteModelResponse_object. + public readonly partial struct DeleteModelResponseObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public DeleteModelResponseObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ModelValue = "model"; + + /// model. + public static DeleteModelResponseObject Model { get; } = new DeleteModelResponseObject(ModelValue); + /// Determines if two values are the same. + public static bool operator ==(DeleteModelResponseObject left, DeleteModelResponseObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(DeleteModelResponseObject left, DeleteModelResponseObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator DeleteModelResponseObject(string value) => new DeleteModelResponseObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is DeleteModelResponseObject other && Equals(other); + /// + public bool Equals(DeleteModelResponseObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/DeleteThreadResponse.Serialization.cs b/.dotnet/src/Generated/Models/DeleteThreadResponse.Serialization.cs new file mode 100644 index 000000000..f287add9c --- /dev/null +++ b/.dotnet/src/Generated/Models/DeleteThreadResponse.Serialization.cs @@ -0,0 +1,148 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class DeleteThreadResponse : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(DeleteThreadResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("deleted"u8); + writer.WriteBooleanValue(Deleted); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + DeleteThreadResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(DeleteThreadResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeDeleteThreadResponse(document.RootElement, options); + } + + internal static DeleteThreadResponse DeserializeDeleteThreadResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + bool deleted = default; + DeleteThreadResponseObject @object = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("deleted"u8)) + { + deleted = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new DeleteThreadResponseObject(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new DeleteThreadResponse(id, deleted, @object, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(DeleteThreadResponse)} does not support '{options.Format}' format."); + } + } + + DeleteThreadResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeDeleteThreadResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(DeleteThreadResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static DeleteThreadResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeDeleteThreadResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/DeleteThreadResponse.cs b/.dotnet/src/Generated/Models/DeleteThreadResponse.cs new file mode 100644 index 000000000..740de89a5 --- /dev/null +++ b/.dotnet/src/Generated/Models/DeleteThreadResponse.cs @@ -0,0 +1,83 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The DeleteThreadResponse. + public partial class DeleteThreadResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// + /// is null. + internal DeleteThreadResponse(string id, bool deleted) + { + ClientUtilities.AssertNotNull(id, nameof(id)); + + Id = id; + Deleted = deleted; + } + + /// Initializes a new instance of . + /// + /// + /// + /// Keeps track of any properties unknown to the library. + internal DeleteThreadResponse(string id, bool deleted, DeleteThreadResponseObject @object, IDictionary serializedAdditionalRawData) + { + Id = id; + Deleted = deleted; + Object = @object; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal DeleteThreadResponse() + { + } + + /// Gets the id. + public string Id { get; } + /// Gets the deleted. + public bool Deleted { get; } + /// Gets the object. + public DeleteThreadResponseObject Object { get; } = DeleteThreadResponseObject.ThreadDeleted; + } +} diff --git a/.dotnet/src/Generated/Models/DeleteThreadResponseObject.cs b/.dotnet/src/Generated/Models/DeleteThreadResponseObject.cs new file mode 100644 index 000000000..87838dd02 --- /dev/null +++ b/.dotnet/src/Generated/Models/DeleteThreadResponseObject.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The DeleteThreadResponse_object. + public readonly partial struct DeleteThreadResponseObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public DeleteThreadResponseObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ThreadDeletedValue = "thread.deleted"; + + /// thread.deleted. + public static DeleteThreadResponseObject ThreadDeleted { get; } = new DeleteThreadResponseObject(ThreadDeletedValue); + /// Determines if two values are the same. + public static bool operator ==(DeleteThreadResponseObject left, DeleteThreadResponseObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(DeleteThreadResponseObject left, DeleteThreadResponseObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator DeleteThreadResponseObject(string value) => new DeleteThreadResponseObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is DeleteThreadResponseObject other && Equals(other); + /// + public bool Equals(DeleteThreadResponseObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/Embedding.Serialization.cs b/.dotnet/src/Generated/Models/Embedding.Serialization.cs new file mode 100644 index 000000000..cdb14b425 --- /dev/null +++ b/.dotnet/src/Generated/Models/Embedding.Serialization.cs @@ -0,0 +1,155 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class Embedding : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(Embedding)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("index"u8); + writer.WriteNumberValue(Index); + writer.WritePropertyName("embedding"u8); +#if NET6_0_OR_GREATER + writer.WriteRawValue(EmbeddingProperty); +#else + using (JsonDocument document = JsonDocument.Parse(EmbeddingProperty)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + Embedding IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(Embedding)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeEmbedding(document.RootElement, options); + } + + internal static Embedding DeserializeEmbedding(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + long index = default; + BinaryData embedding = default; + EmbeddingObject @object = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("index"u8)) + { + index = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("embedding"u8)) + { + embedding = BinaryData.FromString(property.Value.GetRawText()); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new EmbeddingObject(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new Embedding(index, embedding, @object, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(Embedding)} does not support '{options.Format}' format."); + } + } + + Embedding IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeEmbedding(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(Embedding)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static Embedding FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeEmbedding(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/Embedding.cs b/.dotnet/src/Generated/Models/Embedding.cs new file mode 100644 index 000000000..40eba7486 --- /dev/null +++ b/.dotnet/src/Generated/Models/Embedding.cs @@ -0,0 +1,130 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// Represents an embedding vector returned by embedding endpoint. + public partial class Embedding + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The index of the embedding in the list of embeddings. + /// + /// The embedding vector, which is a list of floats. The length of vector depends on the model as + /// listed in the [embedding guide](/docs/guides/embeddings). + /// + /// is null. + internal Embedding(long index, BinaryData embeddingProperty) + { + ClientUtilities.AssertNotNull(embeddingProperty, nameof(embeddingProperty)); + + Index = index; + EmbeddingProperty = embeddingProperty; + } + + /// Initializes a new instance of . + /// The index of the embedding in the list of embeddings. + /// + /// The embedding vector, which is a list of floats. The length of vector depends on the model as + /// listed in the [embedding guide](/docs/guides/embeddings). + /// + /// The object type, which is always "embedding". + /// Keeps track of any properties unknown to the library. + internal Embedding(long index, BinaryData embeddingProperty, EmbeddingObject @object, IDictionary serializedAdditionalRawData) + { + Index = index; + EmbeddingProperty = embeddingProperty; + Object = @object; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal Embedding() + { + } + + /// The index of the embedding in the list of embeddings. + public long Index { get; } + /// + /// The embedding vector, which is a list of floats. The length of vector depends on the model as + /// listed in the [embedding guide](/docs/guides/embeddings). + /// + /// To assign an object to this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// + /// Supported types: + /// + /// + /// where T is of type + /// + /// + /// + /// + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public BinaryData EmbeddingProperty { get; } + /// The object type, which is always "embedding". + public EmbeddingObject Object { get; } = EmbeddingObject.Embedding; + } +} diff --git a/.dotnet/src/Generated/Models/EmbeddingObject.cs b/.dotnet/src/Generated/Models/EmbeddingObject.cs new file mode 100644 index 000000000..1a6df67a6 --- /dev/null +++ b/.dotnet/src/Generated/Models/EmbeddingObject.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The Embedding_object. + public readonly partial struct EmbeddingObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public EmbeddingObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string EmbeddingValue = "embedding"; + + /// embedding. + public static EmbeddingObject Embedding { get; } = new EmbeddingObject(EmbeddingValue); + /// Determines if two values are the same. + public static bool operator ==(EmbeddingObject left, EmbeddingObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(EmbeddingObject left, EmbeddingObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator EmbeddingObject(string value) => new EmbeddingObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is EmbeddingObject other && Equals(other); + /// + public bool Equals(EmbeddingObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/FineTune.Serialization.cs b/.dotnet/src/Generated/Models/FineTune.Serialization.cs new file mode 100644 index 000000000..cdbb27bb0 --- /dev/null +++ b/.dotnet/src/Generated/Models/FineTune.Serialization.cs @@ -0,0 +1,287 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class FineTune : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(FineTune)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("created_at"u8); + writer.WriteNumberValue(CreatedAt, "U"); + writer.WritePropertyName("updated_at"u8); + writer.WriteNumberValue(UpdatedAt, "U"); + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model); + if (FineTunedModel != null) + { + writer.WritePropertyName("fine_tuned_model"u8); + writer.WriteStringValue(FineTunedModel); + } + else + { + writer.WriteNull("fine_tuned_model"); + } + writer.WritePropertyName("organization_id"u8); + writer.WriteStringValue(OrganizationId); + writer.WritePropertyName("status"u8); + writer.WriteStringValue(Status.ToString()); + writer.WritePropertyName("hyperparams"u8); + writer.WriteObjectValue(Hyperparams); + writer.WritePropertyName("training_files"u8); + writer.WriteStartArray(); + foreach (var item in TrainingFiles) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + writer.WritePropertyName("validation_files"u8); + writer.WriteStartArray(); + foreach (var item in ValidationFiles) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + writer.WritePropertyName("result_files"u8); + writer.WriteStartArray(); + foreach (var item in ResultFiles) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + if (OptionalProperty.IsCollectionDefined(Events)) + { + writer.WritePropertyName("events"u8); + writer.WriteStartArray(); + foreach (var item in Events) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + FineTune IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(FineTune)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeFineTune(document.RootElement, options); + } + + internal static FineTune DeserializeFineTune(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + FineTuneObject @object = default; + DateTimeOffset createdAt = default; + DateTimeOffset updatedAt = default; + string model = default; + string fineTunedModel = default; + string organizationId = default; + FineTuneStatus status = default; + FineTuneHyperparams hyperparams = default; + IReadOnlyList trainingFiles = default; + IReadOnlyList validationFiles = default; + IReadOnlyList resultFiles = default; + OptionalProperty> events = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new FineTuneObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("created_at"u8)) + { + createdAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("updated_at"u8)) + { + updatedAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("model"u8)) + { + model = property.Value.GetString(); + continue; + } + if (property.NameEquals("fine_tuned_model"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + fineTunedModel = null; + continue; + } + fineTunedModel = property.Value.GetString(); + continue; + } + if (property.NameEquals("organization_id"u8)) + { + organizationId = property.Value.GetString(); + continue; + } + if (property.NameEquals("status"u8)) + { + status = new FineTuneStatus(property.Value.GetString()); + continue; + } + if (property.NameEquals("hyperparams"u8)) + { + hyperparams = FineTuneHyperparams.DeserializeFineTuneHyperparams(property.Value); + continue; + } + if (property.NameEquals("training_files"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(OpenAIFile.DeserializeOpenAIFile(item)); + } + trainingFiles = array; + continue; + } + if (property.NameEquals("validation_files"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(OpenAIFile.DeserializeOpenAIFile(item)); + } + validationFiles = array; + continue; + } + if (property.NameEquals("result_files"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(OpenAIFile.DeserializeOpenAIFile(item)); + } + resultFiles = array; + continue; + } + if (property.NameEquals("events"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(FineTuneEvent.DeserializeFineTuneEvent(item)); + } + events = array; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new FineTune(id, @object, createdAt, updatedAt, model, fineTunedModel, organizationId, status, hyperparams, trainingFiles, validationFiles, resultFiles, OptionalProperty.ToList(events), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(FineTune)} does not support '{options.Format}' format."); + } + } + + FineTune IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeFineTune(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(FineTune)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static FineTune FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeFineTune(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/FineTune.cs b/.dotnet/src/Generated/Models/FineTune.cs new file mode 100644 index 000000000..6300b6a99 --- /dev/null +++ b/.dotnet/src/Generated/Models/FineTune.cs @@ -0,0 +1,169 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI.Models +{ + /// The `FineTune` object represents a legacy fine-tune job that has been created through the API. + [Obsolete("deprecated")] + public partial class FineTune + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The object identifier, which can be referenced in the API endpoints. + /// The Unix timestamp (in seconds) for when the fine-tuning job was created. + /// The Unix timestamp (in seconds) for when the fine-tuning job was last updated. + /// The base model that is being fine-tuned. + /// The name of the fine-tuned model that is being created. + /// The organization that owns the fine-tuning job. + /// + /// The current status of the fine-tuning job, which can be either `created`, `running`, + /// `succeeded`, `failed`, or `cancelled`. + /// + /// + /// The hyperparameters used for the fine-tuning job. See the + /// [fine-tuning guide](/docs/guides/legacy-fine-tuning/hyperparameters) for more details. + /// + /// The list of files used for training. + /// The list of files used for validation. + /// The compiled results files for the fine-tuning job. + /// , , , , , or is null. + internal FineTune(string id, DateTimeOffset createdAt, DateTimeOffset updatedAt, string model, string fineTunedModel, string organizationId, FineTuneStatus status, FineTuneHyperparams hyperparams, IEnumerable trainingFiles, IEnumerable validationFiles, IEnumerable resultFiles) + { + ClientUtilities.AssertNotNull(id, nameof(id)); + ClientUtilities.AssertNotNull(model, nameof(model)); + ClientUtilities.AssertNotNull(organizationId, nameof(organizationId)); + ClientUtilities.AssertNotNull(hyperparams, nameof(hyperparams)); + ClientUtilities.AssertNotNull(trainingFiles, nameof(trainingFiles)); + ClientUtilities.AssertNotNull(validationFiles, nameof(validationFiles)); + ClientUtilities.AssertNotNull(resultFiles, nameof(resultFiles)); + + Id = id; + CreatedAt = createdAt; + UpdatedAt = updatedAt; + Model = model; + FineTunedModel = fineTunedModel; + OrganizationId = organizationId; + Status = status; + Hyperparams = hyperparams; + TrainingFiles = trainingFiles.ToList(); + ValidationFiles = validationFiles.ToList(); + ResultFiles = resultFiles.ToList(); + Events = new OptionalList(); + } + + /// Initializes a new instance of . + /// The object identifier, which can be referenced in the API endpoints. + /// The object type, which is always "fine-tune". + /// The Unix timestamp (in seconds) for when the fine-tuning job was created. + /// The Unix timestamp (in seconds) for when the fine-tuning job was last updated. + /// The base model that is being fine-tuned. + /// The name of the fine-tuned model that is being created. + /// The organization that owns the fine-tuning job. + /// + /// The current status of the fine-tuning job, which can be either `created`, `running`, + /// `succeeded`, `failed`, or `cancelled`. + /// + /// + /// The hyperparameters used for the fine-tuning job. See the + /// [fine-tuning guide](/docs/guides/legacy-fine-tuning/hyperparameters) for more details. + /// + /// The list of files used for training. + /// The list of files used for validation. + /// The compiled results files for the fine-tuning job. + /// The list of events that have been observed in the lifecycle of the FineTune job. + /// Keeps track of any properties unknown to the library. + internal FineTune(string id, FineTuneObject @object, DateTimeOffset createdAt, DateTimeOffset updatedAt, string model, string fineTunedModel, string organizationId, FineTuneStatus status, FineTuneHyperparams hyperparams, IReadOnlyList trainingFiles, IReadOnlyList validationFiles, IReadOnlyList resultFiles, IReadOnlyList events, IDictionary serializedAdditionalRawData) + { + Id = id; + Object = @object; + CreatedAt = createdAt; + UpdatedAt = updatedAt; + Model = model; + FineTunedModel = fineTunedModel; + OrganizationId = organizationId; + Status = status; + Hyperparams = hyperparams; + TrainingFiles = trainingFiles; + ValidationFiles = validationFiles; + ResultFiles = resultFiles; + Events = events; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal FineTune() + { + } + + /// The object identifier, which can be referenced in the API endpoints. + public string Id { get; } + /// The object type, which is always "fine-tune". + public FineTuneObject Object { get; } = FineTuneObject.FineTune; + + /// The Unix timestamp (in seconds) for when the fine-tuning job was created. + public DateTimeOffset CreatedAt { get; } + /// The Unix timestamp (in seconds) for when the fine-tuning job was last updated. + public DateTimeOffset UpdatedAt { get; } + /// The base model that is being fine-tuned. + public string Model { get; } + /// The name of the fine-tuned model that is being created. + public string FineTunedModel { get; } + /// The organization that owns the fine-tuning job. + public string OrganizationId { get; } + /// + /// The current status of the fine-tuning job, which can be either `created`, `running`, + /// `succeeded`, `failed`, or `cancelled`. + /// + public FineTuneStatus Status { get; } + /// + /// The hyperparameters used for the fine-tuning job. See the + /// [fine-tuning guide](/docs/guides/legacy-fine-tuning/hyperparameters) for more details. + /// + public FineTuneHyperparams Hyperparams { get; } + /// The list of files used for training. + public IReadOnlyList TrainingFiles { get; } + /// The list of files used for validation. + public IReadOnlyList ValidationFiles { get; } + /// The compiled results files for the fine-tuning job. + public IReadOnlyList ResultFiles { get; } + /// The list of events that have been observed in the lifecycle of the FineTune job. + public IReadOnlyList Events { get; } + } +} diff --git a/.dotnet/src/Generated/Models/FineTuneEvent.Serialization.cs b/.dotnet/src/Generated/Models/FineTuneEvent.Serialization.cs new file mode 100644 index 000000000..185b9830f --- /dev/null +++ b/.dotnet/src/Generated/Models/FineTuneEvent.Serialization.cs @@ -0,0 +1,156 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class FineTuneEvent : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(FineTuneEvent)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object); + writer.WritePropertyName("created_at"u8); + writer.WriteNumberValue(CreatedAt, "U"); + writer.WritePropertyName("level"u8); + writer.WriteStringValue(Level); + writer.WritePropertyName("message"u8); + writer.WriteStringValue(Message); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + FineTuneEvent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(FineTuneEvent)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeFineTuneEvent(document.RootElement, options); + } + + internal static FineTuneEvent DeserializeFineTuneEvent(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string @object = default; + DateTimeOffset createdAt = default; + string level = default; + string message = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("object"u8)) + { + @object = property.Value.GetString(); + continue; + } + if (property.NameEquals("created_at"u8)) + { + createdAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("level"u8)) + { + level = property.Value.GetString(); + continue; + } + if (property.NameEquals("message"u8)) + { + message = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new FineTuneEvent(@object, createdAt, level, message, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(FineTuneEvent)} does not support '{options.Format}' format."); + } + } + + FineTuneEvent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeFineTuneEvent(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(FineTuneEvent)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static FineTuneEvent FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeFineTuneEvent(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/FineTuneEvent.cs b/.dotnet/src/Generated/Models/FineTuneEvent.cs new file mode 100644 index 000000000..dcf13faa4 --- /dev/null +++ b/.dotnet/src/Generated/Models/FineTuneEvent.cs @@ -0,0 +1,93 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The FineTuneEvent. + public partial class FineTuneEvent + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// + /// + /// + /// , or is null. + internal FineTuneEvent(string @object, DateTimeOffset createdAt, string level, string message) + { + ClientUtilities.AssertNotNull(@object, nameof(@object)); + ClientUtilities.AssertNotNull(level, nameof(level)); + ClientUtilities.AssertNotNull(message, nameof(message)); + + Object = @object; + CreatedAt = createdAt; + Level = level; + Message = message; + } + + /// Initializes a new instance of . + /// + /// + /// + /// + /// Keeps track of any properties unknown to the library. + internal FineTuneEvent(string @object, DateTimeOffset createdAt, string level, string message, IDictionary serializedAdditionalRawData) + { + Object = @object; + CreatedAt = createdAt; + Level = level; + Message = message; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal FineTuneEvent() + { + } + + /// Gets the object. + public string Object { get; } + /// Gets the created at. + public DateTimeOffset CreatedAt { get; } + /// Gets the level. + public string Level { get; } + /// Gets the message. + public string Message { get; } + } +} diff --git a/.dotnet/src/Generated/Models/FineTuneHyperparams.Serialization.cs b/.dotnet/src/Generated/Models/FineTuneHyperparams.Serialization.cs new file mode 100644 index 000000000..8e6588b67 --- /dev/null +++ b/.dotnet/src/Generated/Models/FineTuneHyperparams.Serialization.cs @@ -0,0 +1,197 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class FineTuneHyperparams : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(FineTuneHyperparams)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("n_epochs"u8); + writer.WriteNumberValue(NEpochs); + writer.WritePropertyName("batch_size"u8); + writer.WriteNumberValue(BatchSize); + writer.WritePropertyName("prompt_loss_weight"u8); + writer.WriteNumberValue(PromptLossWeight); + writer.WritePropertyName("learning_rate_multiplier"u8); + writer.WriteNumberValue(LearningRateMultiplier); + if (OptionalProperty.IsDefined(ComputeClassificationMetrics)) + { + writer.WritePropertyName("compute_classification_metrics"u8); + writer.WriteBooleanValue(ComputeClassificationMetrics.Value); + } + if (OptionalProperty.IsDefined(ClassificationPositiveClass)) + { + writer.WritePropertyName("classification_positive_class"u8); + writer.WriteStringValue(ClassificationPositiveClass); + } + if (OptionalProperty.IsDefined(ClassificationNClasses)) + { + writer.WritePropertyName("classification_n_classes"u8); + writer.WriteNumberValue(ClassificationNClasses.Value); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + FineTuneHyperparams IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(FineTuneHyperparams)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeFineTuneHyperparams(document.RootElement, options); + } + + internal static FineTuneHyperparams DeserializeFineTuneHyperparams(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + long nEpochs = default; + long batchSize = default; + double promptLossWeight = default; + double learningRateMultiplier = default; + OptionalProperty computeClassificationMetrics = default; + OptionalProperty classificationPositiveClass = default; + OptionalProperty classificationNClasses = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("n_epochs"u8)) + { + nEpochs = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("batch_size"u8)) + { + batchSize = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("prompt_loss_weight"u8)) + { + promptLossWeight = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("learning_rate_multiplier"u8)) + { + learningRateMultiplier = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("compute_classification_metrics"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + computeClassificationMetrics = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("classification_positive_class"u8)) + { + classificationPositiveClass = property.Value.GetString(); + continue; + } + if (property.NameEquals("classification_n_classes"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + classificationNClasses = property.Value.GetInt64(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new FineTuneHyperparams(nEpochs, batchSize, promptLossWeight, learningRateMultiplier, OptionalProperty.ToNullable(computeClassificationMetrics), classificationPositiveClass.Value, OptionalProperty.ToNullable(classificationNClasses), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(FineTuneHyperparams)} does not support '{options.Format}' format."); + } + } + + FineTuneHyperparams IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeFineTuneHyperparams(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(FineTuneHyperparams)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static FineTuneHyperparams FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeFineTuneHyperparams(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/FineTuneHyperparams.cs b/.dotnet/src/Generated/Models/FineTuneHyperparams.cs new file mode 100644 index 000000000..f45f7fe7a --- /dev/null +++ b/.dotnet/src/Generated/Models/FineTuneHyperparams.cs @@ -0,0 +1,117 @@ +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The FineTuneHyperparams. + public partial class FineTuneHyperparams + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// The number of epochs to train the model for. An epoch refers to one full cycle through the + /// training dataset. + /// + /// + /// The batch size to use for training. The batch size is the number of training examples used to + /// train a single forward and backward pass. + /// + /// The weight to use for loss on the prompt tokens. + /// The learning rate multiplier to use for training. + internal FineTuneHyperparams(long nEpochs, long batchSize, double promptLossWeight, double learningRateMultiplier) + { + NEpochs = nEpochs; + BatchSize = batchSize; + PromptLossWeight = promptLossWeight; + LearningRateMultiplier = learningRateMultiplier; + } + + /// Initializes a new instance of . + /// + /// The number of epochs to train the model for. An epoch refers to one full cycle through the + /// training dataset. + /// + /// + /// The batch size to use for training. The batch size is the number of training examples used to + /// train a single forward and backward pass. + /// + /// The weight to use for loss on the prompt tokens. + /// The learning rate multiplier to use for training. + /// The classification metrics to compute using the validation dataset at the end of every epoch. + /// The positive class to use for computing classification metrics. + /// The number of classes to use for computing classification metrics. + /// Keeps track of any properties unknown to the library. + internal FineTuneHyperparams(long nEpochs, long batchSize, double promptLossWeight, double learningRateMultiplier, bool? computeClassificationMetrics, string classificationPositiveClass, long? classificationNClasses, IDictionary serializedAdditionalRawData) + { + NEpochs = nEpochs; + BatchSize = batchSize; + PromptLossWeight = promptLossWeight; + LearningRateMultiplier = learningRateMultiplier; + ComputeClassificationMetrics = computeClassificationMetrics; + ClassificationPositiveClass = classificationPositiveClass; + ClassificationNClasses = classificationNClasses; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal FineTuneHyperparams() + { + } + + /// + /// The number of epochs to train the model for. An epoch refers to one full cycle through the + /// training dataset. + /// + public long NEpochs { get; } + /// + /// The batch size to use for training. The batch size is the number of training examples used to + /// train a single forward and backward pass. + /// + public long BatchSize { get; } + /// The weight to use for loss on the prompt tokens. + public double PromptLossWeight { get; } + /// The learning rate multiplier to use for training. + public double LearningRateMultiplier { get; } + /// The classification metrics to compute using the validation dataset at the end of every epoch. + public bool? ComputeClassificationMetrics { get; } + /// The positive class to use for computing classification metrics. + public string ClassificationPositiveClass { get; } + /// The number of classes to use for computing classification metrics. + public long? ClassificationNClasses { get; } + } +} diff --git a/.dotnet/src/Generated/Models/FineTuneObject.cs b/.dotnet/src/Generated/Models/FineTuneObject.cs new file mode 100644 index 000000000..7c848fe16 --- /dev/null +++ b/.dotnet/src/Generated/Models/FineTuneObject.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The FineTune_object. + public readonly partial struct FineTuneObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public FineTuneObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string FineTuneValue = "fine-tune"; + + /// fine-tune. + public static FineTuneObject FineTune { get; } = new FineTuneObject(FineTuneValue); + /// Determines if two values are the same. + public static bool operator ==(FineTuneObject left, FineTuneObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(FineTuneObject left, FineTuneObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator FineTuneObject(string value) => new FineTuneObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is FineTuneObject other && Equals(other); + /// + public bool Equals(FineTuneObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/FineTuneStatus.cs b/.dotnet/src/Generated/Models/FineTuneStatus.cs new file mode 100644 index 000000000..00121f091 --- /dev/null +++ b/.dotnet/src/Generated/Models/FineTuneStatus.cs @@ -0,0 +1,57 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for status in FineTune. + public readonly partial struct FineTuneStatus : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public FineTuneStatus(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string CreatedValue = "created"; + private const string RunningValue = "running"; + private const string SucceededValue = "succeeded"; + private const string FailedValue = "failed"; + private const string CancelledValue = "cancelled"; + + /// created. + public static FineTuneStatus Created { get; } = new FineTuneStatus(CreatedValue); + /// running. + public static FineTuneStatus Running { get; } = new FineTuneStatus(RunningValue); + /// succeeded. + public static FineTuneStatus Succeeded { get; } = new FineTuneStatus(SucceededValue); + /// failed. + public static FineTuneStatus Failed { get; } = new FineTuneStatus(FailedValue); + /// cancelled. + public static FineTuneStatus Cancelled { get; } = new FineTuneStatus(CancelledValue); + /// Determines if two values are the same. + public static bool operator ==(FineTuneStatus left, FineTuneStatus right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(FineTuneStatus left, FineTuneStatus right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator FineTuneStatus(string value) => new FineTuneStatus(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is FineTuneStatus other && Equals(other); + /// + public bool Equals(FineTuneStatus other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/FineTuningJob.Serialization.cs b/.dotnet/src/Generated/Models/FineTuningJob.Serialization.cs new file mode 100644 index 000000000..a7b9c02b7 --- /dev/null +++ b/.dotnet/src/Generated/Models/FineTuningJob.Serialization.cs @@ -0,0 +1,306 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class FineTuningJob : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(FineTuningJob)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("created_at"u8); + writer.WriteNumberValue(CreatedAt, "U"); + if (FinishedAt != null) + { + writer.WritePropertyName("finished_at"u8); + writer.WriteStringValue(FinishedAt.Value, "O"); + } + else + { + writer.WriteNull("finished_at"); + } + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model); + if (FineTunedModel != null) + { + writer.WritePropertyName("fine_tuned_model"u8); + writer.WriteStringValue(FineTunedModel); + } + else + { + writer.WriteNull("fine_tuned_model"); + } + writer.WritePropertyName("organization_id"u8); + writer.WriteStringValue(OrganizationId); + writer.WritePropertyName("status"u8); + writer.WriteStringValue(Status.ToString()); + writer.WritePropertyName("hyperparameters"u8); + writer.WriteObjectValue(Hyperparameters); + writer.WritePropertyName("training_file"u8); + writer.WriteStringValue(TrainingFile); + if (ValidationFile != null) + { + writer.WritePropertyName("validation_file"u8); + writer.WriteStringValue(ValidationFile); + } + else + { + writer.WriteNull("validation_file"); + } + writer.WritePropertyName("result_files"u8); + writer.WriteStartArray(); + foreach (var item in ResultFiles) + { + writer.WriteStringValue(item); + } + writer.WriteEndArray(); + if (TrainedTokens != null) + { + writer.WritePropertyName("trained_tokens"u8); + writer.WriteNumberValue(TrainedTokens.Value); + } + else + { + writer.WriteNull("trained_tokens"); + } + if (Error != null) + { + writer.WritePropertyName("error"u8); + writer.WriteObjectValue(Error); + } + else + { + writer.WriteNull("error"); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + FineTuningJob IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(FineTuningJob)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeFineTuningJob(document.RootElement, options); + } + + internal static FineTuningJob DeserializeFineTuningJob(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + FineTuningJobObject @object = default; + DateTimeOffset createdAt = default; + DateTimeOffset? finishedAt = default; + string model = default; + string fineTunedModel = default; + string organizationId = default; + FineTuningJobStatus status = default; + FineTuningJobHyperparameters hyperparameters = default; + string trainingFile = default; + string validationFile = default; + IReadOnlyList resultFiles = default; + long? trainedTokens = default; + FineTuningJobError error = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new FineTuningJobObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("created_at"u8)) + { + createdAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("finished_at"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + finishedAt = null; + continue; + } + finishedAt = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("model"u8)) + { + model = property.Value.GetString(); + continue; + } + if (property.NameEquals("fine_tuned_model"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + fineTunedModel = null; + continue; + } + fineTunedModel = property.Value.GetString(); + continue; + } + if (property.NameEquals("organization_id"u8)) + { + organizationId = property.Value.GetString(); + continue; + } + if (property.NameEquals("status"u8)) + { + status = new FineTuningJobStatus(property.Value.GetString()); + continue; + } + if (property.NameEquals("hyperparameters"u8)) + { + hyperparameters = FineTuningJobHyperparameters.DeserializeFineTuningJobHyperparameters(property.Value); + continue; + } + if (property.NameEquals("training_file"u8)) + { + trainingFile = property.Value.GetString(); + continue; + } + if (property.NameEquals("validation_file"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + validationFile = null; + continue; + } + validationFile = property.Value.GetString(); + continue; + } + if (property.NameEquals("result_files"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(item.GetString()); + } + resultFiles = array; + continue; + } + if (property.NameEquals("trained_tokens"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + trainedTokens = null; + continue; + } + trainedTokens = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("error"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + error = null; + continue; + } + error = FineTuningJobError.DeserializeFineTuningJobError(property.Value); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new FineTuningJob(id, @object, createdAt, finishedAt, model, fineTunedModel, organizationId, status, hyperparameters, trainingFile, validationFile, resultFiles, trainedTokens, error, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(FineTuningJob)} does not support '{options.Format}' format."); + } + } + + FineTuningJob IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeFineTuningJob(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(FineTuningJob)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static FineTuningJob FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeFineTuningJob(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/FineTuningJob.cs b/.dotnet/src/Generated/Models/FineTuningJob.cs new file mode 100644 index 000000000..0203658aa --- /dev/null +++ b/.dotnet/src/Generated/Models/FineTuningJob.cs @@ -0,0 +1,237 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI.Models +{ + /// The FineTuningJob. + public partial class FineTuningJob + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The object identifier, which can be referenced in the API endpoints. + /// The Unix timestamp (in seconds) for when the fine-tuning job was created. + /// + /// The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be + /// null if the fine-tuning job is still running. + /// + /// The base model that is being fine-tuned. + /// + /// The name of the fine-tuned model that is being created. The value will be null if the + /// fine-tuning job is still running. + /// + /// The organization that owns the fine-tuning job. + /// + /// The current status of the fine-tuning job, which can be either `created`, `pending`, `running`, + /// `succeeded`, `failed`, or `cancelled`. + /// + /// + /// The hyperparameters used for the fine-tuning job. See the + /// [fine-tuning guide](/docs/guides/fine-tuning) for more details. + /// + /// + /// The file ID used for training. You can retrieve the training data with the + /// [Files API](/docs/api-reference/files/retrieve-contents). + /// + /// + /// The file ID used for validation. You can retrieve the validation results with the + /// [Files API](/docs/api-reference/files/retrieve-contents). + /// + /// + /// The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the + /// [Files API](/docs/api-reference/files/retrieve-contents). + /// + /// + /// The total number of billable tokens processed by this fine tuning job. The value will be null + /// if the fine-tuning job is still running. + /// + /// + /// For fine-tuning jobs that have `failed`, this will contain more information on the cause of the + /// failure. + /// + /// , , , , or is null. + internal FineTuningJob(string id, DateTimeOffset createdAt, DateTimeOffset? finishedAt, string model, string fineTunedModel, string organizationId, FineTuningJobStatus status, FineTuningJobHyperparameters hyperparameters, string trainingFile, string validationFile, IEnumerable resultFiles, long? trainedTokens, FineTuningJobError error) + { + ClientUtilities.AssertNotNull(id, nameof(id)); + ClientUtilities.AssertNotNull(model, nameof(model)); + ClientUtilities.AssertNotNull(organizationId, nameof(organizationId)); + ClientUtilities.AssertNotNull(hyperparameters, nameof(hyperparameters)); + ClientUtilities.AssertNotNull(trainingFile, nameof(trainingFile)); + ClientUtilities.AssertNotNull(resultFiles, nameof(resultFiles)); + + Id = id; + CreatedAt = createdAt; + FinishedAt = finishedAt; + Model = model; + FineTunedModel = fineTunedModel; + OrganizationId = organizationId; + Status = status; + Hyperparameters = hyperparameters; + TrainingFile = trainingFile; + ValidationFile = validationFile; + ResultFiles = resultFiles.ToList(); + TrainedTokens = trainedTokens; + Error = error; + } + + /// Initializes a new instance of . + /// The object identifier, which can be referenced in the API endpoints. + /// The object type, which is always "fine_tuning.job". + /// The Unix timestamp (in seconds) for when the fine-tuning job was created. + /// + /// The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be + /// null if the fine-tuning job is still running. + /// + /// The base model that is being fine-tuned. + /// + /// The name of the fine-tuned model that is being created. The value will be null if the + /// fine-tuning job is still running. + /// + /// The organization that owns the fine-tuning job. + /// + /// The current status of the fine-tuning job, which can be either `created`, `pending`, `running`, + /// `succeeded`, `failed`, or `cancelled`. + /// + /// + /// The hyperparameters used for the fine-tuning job. See the + /// [fine-tuning guide](/docs/guides/fine-tuning) for more details. + /// + /// + /// The file ID used for training. You can retrieve the training data with the + /// [Files API](/docs/api-reference/files/retrieve-contents). + /// + /// + /// The file ID used for validation. You can retrieve the validation results with the + /// [Files API](/docs/api-reference/files/retrieve-contents). + /// + /// + /// The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the + /// [Files API](/docs/api-reference/files/retrieve-contents). + /// + /// + /// The total number of billable tokens processed by this fine tuning job. The value will be null + /// if the fine-tuning job is still running. + /// + /// + /// For fine-tuning jobs that have `failed`, this will contain more information on the cause of the + /// failure. + /// + /// Keeps track of any properties unknown to the library. + internal FineTuningJob(string id, FineTuningJobObject @object, DateTimeOffset createdAt, DateTimeOffset? finishedAt, string model, string fineTunedModel, string organizationId, FineTuningJobStatus status, FineTuningJobHyperparameters hyperparameters, string trainingFile, string validationFile, IReadOnlyList resultFiles, long? trainedTokens, FineTuningJobError error, IDictionary serializedAdditionalRawData) + { + Id = id; + Object = @object; + CreatedAt = createdAt; + FinishedAt = finishedAt; + Model = model; + FineTunedModel = fineTunedModel; + OrganizationId = organizationId; + Status = status; + Hyperparameters = hyperparameters; + TrainingFile = trainingFile; + ValidationFile = validationFile; + ResultFiles = resultFiles; + TrainedTokens = trainedTokens; + Error = error; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal FineTuningJob() + { + } + + /// The object identifier, which can be referenced in the API endpoints. + public string Id { get; } + /// The object type, which is always "fine_tuning.job". + public FineTuningJobObject Object { get; } = FineTuningJobObject.FineTuningJob; + + /// The Unix timestamp (in seconds) for when the fine-tuning job was created. + public DateTimeOffset CreatedAt { get; } + /// + /// The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be + /// null if the fine-tuning job is still running. + /// + public DateTimeOffset? FinishedAt { get; } + /// The base model that is being fine-tuned. + public string Model { get; } + /// + /// The name of the fine-tuned model that is being created. The value will be null if the + /// fine-tuning job is still running. + /// + public string FineTunedModel { get; } + /// The organization that owns the fine-tuning job. + public string OrganizationId { get; } + /// + /// The current status of the fine-tuning job, which can be either `created`, `pending`, `running`, + /// `succeeded`, `failed`, or `cancelled`. + /// + public FineTuningJobStatus Status { get; } + /// + /// The hyperparameters used for the fine-tuning job. See the + /// [fine-tuning guide](/docs/guides/fine-tuning) for more details. + /// + public FineTuningJobHyperparameters Hyperparameters { get; } + /// + /// The file ID used for training. You can retrieve the training data with the + /// [Files API](/docs/api-reference/files/retrieve-contents). + /// + public string TrainingFile { get; } + /// + /// The file ID used for validation. You can retrieve the validation results with the + /// [Files API](/docs/api-reference/files/retrieve-contents). + /// + public string ValidationFile { get; } + /// + /// The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the + /// [Files API](/docs/api-reference/files/retrieve-contents). + /// + public IReadOnlyList ResultFiles { get; } + /// + /// The total number of billable tokens processed by this fine tuning job. The value will be null + /// if the fine-tuning job is still running. + /// + public long? TrainedTokens { get; } + /// + /// For fine-tuning jobs that have `failed`, this will contain more information on the cause of the + /// failure. + /// + public FineTuningJobError Error { get; } + } +} diff --git a/.dotnet/src/Generated/Models/FineTuningJobError.Serialization.cs b/.dotnet/src/Generated/Models/FineTuningJobError.Serialization.cs new file mode 100644 index 000000000..e586868e7 --- /dev/null +++ b/.dotnet/src/Generated/Models/FineTuningJobError.Serialization.cs @@ -0,0 +1,169 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class FineTuningJobError : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(FineTuningJobError)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + if (OptionalProperty.IsDefined(Message)) + { + writer.WritePropertyName("message"u8); + writer.WriteStringValue(Message); + } + if (OptionalProperty.IsDefined(Code)) + { + writer.WritePropertyName("code"u8); + writer.WriteStringValue(Code); + } + if (OptionalProperty.IsDefined(Param)) + { + if (Param != null) + { + writer.WritePropertyName("param"u8); + writer.WriteStringValue(Param); + } + else + { + writer.WriteNull("param"); + } + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + FineTuningJobError IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(FineTuningJobError)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeFineTuningJobError(document.RootElement, options); + } + + internal static FineTuningJobError DeserializeFineTuningJobError(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + OptionalProperty message = default; + OptionalProperty code = default; + OptionalProperty param = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("message"u8)) + { + message = property.Value.GetString(); + continue; + } + if (property.NameEquals("code"u8)) + { + code = property.Value.GetString(); + continue; + } + if (property.NameEquals("param"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + param = null; + continue; + } + param = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new FineTuningJobError(message.Value, code.Value, param.Value, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(FineTuningJobError)} does not support '{options.Format}' format."); + } + } + + FineTuningJobError IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeFineTuningJobError(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(FineTuningJobError)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static FineTuningJobError FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeFineTuningJobError(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/FineTuningJobError.cs b/.dotnet/src/Generated/Models/FineTuningJobError.cs new file mode 100644 index 000000000..9b5b27e34 --- /dev/null +++ b/.dotnet/src/Generated/Models/FineTuningJobError.cs @@ -0,0 +1,76 @@ +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The FineTuningJobError. + public partial class FineTuningJobError + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + internal FineTuningJobError() + { + } + + /// Initializes a new instance of . + /// A human-readable error message. + /// A machine-readable error code. + /// + /// The parameter that was invalid, usually `training_file` or `validation_file`. This field + /// will be null if the failure was not parameter-specific. + /// + /// Keeps track of any properties unknown to the library. + internal FineTuningJobError(string message, string code, string param, IDictionary serializedAdditionalRawData) + { + Message = message; + Code = code; + Param = param; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// A human-readable error message. + public string Message { get; } + /// A machine-readable error code. + public string Code { get; } + /// + /// The parameter that was invalid, usually `training_file` or `validation_file`. This field + /// will be null if the failure was not parameter-specific. + /// + public string Param { get; } + } +} diff --git a/.dotnet/src/Generated/Models/FineTuningJobEvent.Serialization.cs b/.dotnet/src/Generated/Models/FineTuningJobEvent.Serialization.cs new file mode 100644 index 000000000..20bb7a0fa --- /dev/null +++ b/.dotnet/src/Generated/Models/FineTuningJobEvent.Serialization.cs @@ -0,0 +1,164 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class FineTuningJobEvent : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(FineTuningJobEvent)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object); + writer.WritePropertyName("created_at"u8); + writer.WriteNumberValue(CreatedAt, "U"); + writer.WritePropertyName("level"u8); + writer.WriteStringValue(Level.ToString()); + writer.WritePropertyName("message"u8); + writer.WriteStringValue(Message); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + FineTuningJobEvent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(FineTuningJobEvent)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeFineTuningJobEvent(document.RootElement, options); + } + + internal static FineTuningJobEvent DeserializeFineTuningJobEvent(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + string @object = default; + DateTimeOffset createdAt = default; + FineTuningJobEventLevel level = default; + string message = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = property.Value.GetString(); + continue; + } + if (property.NameEquals("created_at"u8)) + { + createdAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("level"u8)) + { + level = new FineTuningJobEventLevel(property.Value.GetString()); + continue; + } + if (property.NameEquals("message"u8)) + { + message = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new FineTuningJobEvent(id, @object, createdAt, level, message, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(FineTuningJobEvent)} does not support '{options.Format}' format."); + } + } + + FineTuningJobEvent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeFineTuningJobEvent(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(FineTuningJobEvent)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static FineTuningJobEvent FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeFineTuningJobEvent(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/FineTuningJobEvent.cs b/.dotnet/src/Generated/Models/FineTuningJobEvent.cs new file mode 100644 index 000000000..a65270a76 --- /dev/null +++ b/.dotnet/src/Generated/Models/FineTuningJobEvent.cs @@ -0,0 +1,99 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The FineTuningJobEvent. + public partial class FineTuningJobEvent + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// + /// + /// + /// + /// , or is null. + internal FineTuningJobEvent(string id, string @object, DateTimeOffset createdAt, FineTuningJobEventLevel level, string message) + { + ClientUtilities.AssertNotNull(id, nameof(id)); + ClientUtilities.AssertNotNull(@object, nameof(@object)); + ClientUtilities.AssertNotNull(message, nameof(message)); + + Id = id; + Object = @object; + CreatedAt = createdAt; + Level = level; + Message = message; + } + + /// Initializes a new instance of . + /// + /// + /// + /// + /// + /// Keeps track of any properties unknown to the library. + internal FineTuningJobEvent(string id, string @object, DateTimeOffset createdAt, FineTuningJobEventLevel level, string message, IDictionary serializedAdditionalRawData) + { + Id = id; + Object = @object; + CreatedAt = createdAt; + Level = level; + Message = message; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal FineTuningJobEvent() + { + } + + /// Gets the id. + public string Id { get; } + /// Gets the object. + public string Object { get; } + /// Gets the created at. + public DateTimeOffset CreatedAt { get; } + /// Gets the level. + public FineTuningJobEventLevel Level { get; } + /// Gets the message. + public string Message { get; } + } +} diff --git a/.dotnet/src/Generated/Models/FineTuningJobEventLevel.cs b/.dotnet/src/Generated/Models/FineTuningJobEventLevel.cs new file mode 100644 index 000000000..7ac7da4c5 --- /dev/null +++ b/.dotnet/src/Generated/Models/FineTuningJobEventLevel.cs @@ -0,0 +1,51 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for level in FineTuningJobEvent. + public readonly partial struct FineTuningJobEventLevel : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public FineTuningJobEventLevel(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string InfoValue = "info"; + private const string WarnValue = "warn"; + private const string ErrorValue = "error"; + + /// info. + public static FineTuningJobEventLevel Info { get; } = new FineTuningJobEventLevel(InfoValue); + /// warn. + public static FineTuningJobEventLevel Warn { get; } = new FineTuningJobEventLevel(WarnValue); + /// error. + public static FineTuningJobEventLevel Error { get; } = new FineTuningJobEventLevel(ErrorValue); + /// Determines if two values are the same. + public static bool operator ==(FineTuningJobEventLevel left, FineTuningJobEventLevel right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(FineTuningJobEventLevel left, FineTuningJobEventLevel right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator FineTuningJobEventLevel(string value) => new FineTuningJobEventLevel(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is FineTuningJobEventLevel other && Equals(other); + /// + public bool Equals(FineTuningJobEventLevel other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/FineTuningJobHyperparameters.Serialization.cs b/.dotnet/src/Generated/Models/FineTuningJobHyperparameters.Serialization.cs new file mode 100644 index 000000000..8e4802d8a --- /dev/null +++ b/.dotnet/src/Generated/Models/FineTuningJobHyperparameters.Serialization.cs @@ -0,0 +1,146 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class FineTuningJobHyperparameters : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(FineTuningJobHyperparameters)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + if (OptionalProperty.IsDefined(NEpochs)) + { + writer.WritePropertyName("n_epochs"u8); +#if NET6_0_OR_GREATER + writer.WriteRawValue(NEpochs); +#else + using (JsonDocument document = JsonDocument.Parse(NEpochs)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + FineTuningJobHyperparameters IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(FineTuningJobHyperparameters)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeFineTuningJobHyperparameters(document.RootElement, options); + } + + internal static FineTuningJobHyperparameters DeserializeFineTuningJobHyperparameters(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + OptionalProperty nEpochs = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("n_epochs"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + nEpochs = BinaryData.FromString(property.Value.GetRawText()); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new FineTuningJobHyperparameters(nEpochs.Value, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(FineTuningJobHyperparameters)} does not support '{options.Format}' format."); + } + } + + FineTuningJobHyperparameters IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeFineTuningJobHyperparameters(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(FineTuningJobHyperparameters)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static FineTuningJobHyperparameters FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeFineTuningJobHyperparameters(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/FineTuningJobHyperparameters.cs b/.dotnet/src/Generated/Models/FineTuningJobHyperparameters.cs new file mode 100644 index 000000000..efcb91d25 --- /dev/null +++ b/.dotnet/src/Generated/Models/FineTuningJobHyperparameters.cs @@ -0,0 +1,112 @@ +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The FineTuningJobHyperparameters. + public partial class FineTuningJobHyperparameters + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + internal FineTuningJobHyperparameters() + { + } + + /// Initializes a new instance of . + /// + /// The number of epochs to train the model for. An epoch refers to one full cycle through the + /// training dataset. + /// + /// "Auto" decides the optimal number of epochs based on the size of the dataset. If setting the + /// number manually, we support any number between 1 and 50 epochs. + /// + /// Keeps track of any properties unknown to the library. + internal FineTuningJobHyperparameters(BinaryData nEpochs, IDictionary serializedAdditionalRawData) + { + NEpochs = nEpochs; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// + /// The number of epochs to train the model for. An epoch refers to one full cycle through the + /// training dataset. + /// + /// "Auto" decides the optimal number of epochs based on the size of the dataset. If setting the + /// number manually, we support any number between 1 and 50 epochs. + /// + /// To assign an object to this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// + /// Supported types: + /// + /// + /// "auto" + /// + /// + /// + /// + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public BinaryData NEpochs { get; } + } +} diff --git a/.dotnet/src/Generated/Models/FineTuningJobObject.cs b/.dotnet/src/Generated/Models/FineTuningJobObject.cs new file mode 100644 index 000000000..cae76bffa --- /dev/null +++ b/.dotnet/src/Generated/Models/FineTuningJobObject.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The FineTuningJob_object. + public readonly partial struct FineTuningJobObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public FineTuningJobObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string FineTuningJobValue = "fine_tuning.job"; + + /// fine_tuning.job. + public static FineTuningJobObject FineTuningJob { get; } = new FineTuningJobObject(FineTuningJobValue); + /// Determines if two values are the same. + public static bool operator ==(FineTuningJobObject left, FineTuningJobObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(FineTuningJobObject left, FineTuningJobObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator FineTuningJobObject(string value) => new FineTuningJobObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is FineTuningJobObject other && Equals(other); + /// + public bool Equals(FineTuningJobObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/FineTuningJobStatus.cs b/.dotnet/src/Generated/Models/FineTuningJobStatus.cs new file mode 100644 index 000000000..23924df4f --- /dev/null +++ b/.dotnet/src/Generated/Models/FineTuningJobStatus.cs @@ -0,0 +1,60 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for status in FineTuningJob. + public readonly partial struct FineTuningJobStatus : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public FineTuningJobStatus(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string CreatedValue = "created"; + private const string PendingValue = "pending"; + private const string RunningValue = "running"; + private const string SucceededValue = "succeeded"; + private const string FailedValue = "failed"; + private const string CancelledValue = "cancelled"; + + /// created. + public static FineTuningJobStatus Created { get; } = new FineTuningJobStatus(CreatedValue); + /// pending. + public static FineTuningJobStatus Pending { get; } = new FineTuningJobStatus(PendingValue); + /// running. + public static FineTuningJobStatus Running { get; } = new FineTuningJobStatus(RunningValue); + /// succeeded. + public static FineTuningJobStatus Succeeded { get; } = new FineTuningJobStatus(SucceededValue); + /// failed. + public static FineTuningJobStatus Failed { get; } = new FineTuningJobStatus(FailedValue); + /// cancelled. + public static FineTuningJobStatus Cancelled { get; } = new FineTuningJobStatus(CancelledValue); + /// Determines if two values are the same. + public static bool operator ==(FineTuningJobStatus left, FineTuningJobStatus right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(FineTuningJobStatus left, FineTuningJobStatus right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator FineTuningJobStatus(string value) => new FineTuningJobStatus(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is FineTuningJobStatus other && Equals(other); + /// + public bool Equals(FineTuningJobStatus other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/FunctionObject.Serialization.cs b/.dotnet/src/Generated/Models/FunctionObject.Serialization.cs new file mode 100644 index 000000000..99d391fea --- /dev/null +++ b/.dotnet/src/Generated/Models/FunctionObject.Serialization.cs @@ -0,0 +1,158 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class FunctionObject : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(FunctionObject)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + if (OptionalProperty.IsDefined(Description)) + { + writer.WritePropertyName("description"u8); + writer.WriteStringValue(Description); + } + writer.WritePropertyName("name"u8); + writer.WriteStringValue(Name); + if (OptionalProperty.IsDefined(Parameters)) + { + writer.WritePropertyName("parameters"u8); + writer.WriteObjectValue(Parameters); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + FunctionObject IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(FunctionObject)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeFunctionObject(document.RootElement, options); + } + + internal static FunctionObject DeserializeFunctionObject(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + OptionalProperty description = default; + string name = default; + OptionalProperty parameters = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("description"u8)) + { + description = property.Value.GetString(); + continue; + } + if (property.NameEquals("name"u8)) + { + name = property.Value.GetString(); + continue; + } + if (property.NameEquals("parameters"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + parameters = FunctionParameters.DeserializeFunctionParameters(property.Value); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new FunctionObject(description.Value, name, parameters.Value, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(FunctionObject)} does not support '{options.Format}' format."); + } + } + + FunctionObject IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeFunctionObject(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(FunctionObject)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static FunctionObject FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeFunctionObject(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/FunctionObject.cs b/.dotnet/src/Generated/Models/FunctionObject.cs new file mode 100644 index 000000000..00e9c8cdb --- /dev/null +++ b/.dotnet/src/Generated/Models/FunctionObject.cs @@ -0,0 +1,96 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The FunctionObject. + public partial class FunctionObject + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and + /// dashes, with a maximum length of 64. + /// + /// is null. + public FunctionObject(string name) + { + ClientUtilities.AssertNotNull(name, nameof(name)); + + Name = name; + } + + /// Initializes a new instance of . + /// + /// A description of what the function does, used by the model to choose when and how to call the + /// function. + /// + /// + /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and + /// dashes, with a maximum length of 64. + /// + /// + /// Keeps track of any properties unknown to the library. + internal FunctionObject(string description, string name, FunctionParameters parameters, IDictionary serializedAdditionalRawData) + { + Description = description; + Name = name; + Parameters = parameters; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal FunctionObject() + { + } + + /// + /// A description of what the function does, used by the model to choose when and how to call the + /// function. + /// + public string Description { get; set; } + /// + /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and + /// dashes, with a maximum length of 64. + /// + public string Name { get; set; } + /// Gets or sets the parameters. + public FunctionParameters Parameters { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/FunctionParameters.Serialization.cs b/.dotnet/src/Generated/Models/FunctionParameters.Serialization.cs new file mode 100644 index 000000000..e6f167ed4 --- /dev/null +++ b/.dotnet/src/Generated/Models/FunctionParameters.Serialization.cs @@ -0,0 +1,118 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class FunctionParameters : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(FunctionParameters)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + foreach (var item in AdditionalProperties) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + writer.WriteEndObject(); + } + + FunctionParameters IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(FunctionParameters)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeFunctionParameters(document.RootElement, options); + } + + internal static FunctionParameters DeserializeFunctionParameters(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + IDictionary additionalProperties = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + additionalProperties = additionalPropertiesDictionary; + return new FunctionParameters(additionalProperties); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(FunctionParameters)} does not support '{options.Format}' format."); + } + } + + FunctionParameters IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeFunctionParameters(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(FunctionParameters)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static FunctionParameters FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeFunctionParameters(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/FunctionParameters.cs b/.dotnet/src/Generated/Models/FunctionParameters.cs new file mode 100644 index 000000000..ae3100e0d --- /dev/null +++ b/.dotnet/src/Generated/Models/FunctionParameters.cs @@ -0,0 +1,65 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// + /// The parameters the functions accepts, described as a JSON Schema object. See the + /// [guide](/docs/guides/gpt/function-calling) for examples, and the + /// [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation + /// about the format.\n\nTo describe a function that accepts no parameters, provide the value + /// `{\"type\": \"object\", \"properties\": {}}`. + /// + public partial class FunctionParameters + { + /// Initializes a new instance of . + public FunctionParameters() + { + AdditionalProperties = new OptionalDictionary(); + } + + /// Initializes a new instance of . + /// Additional Properties. + internal FunctionParameters(IDictionary additionalProperties) + { + AdditionalProperties = additionalProperties; + } + + /// + /// Additional Properties + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public IDictionary AdditionalProperties { get; } + } +} diff --git a/.dotnet/src/Generated/Models/Image.Serialization.cs b/.dotnet/src/Generated/Models/Image.Serialization.cs new file mode 100644 index 000000000..209416ec1 --- /dev/null +++ b/.dotnet/src/Generated/Models/Image.Serialization.cs @@ -0,0 +1,165 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class Image : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(Image)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + if (OptionalProperty.IsDefined(B64Json)) + { + writer.WritePropertyName("b64_json"u8); + writer.WriteBase64StringValue(B64Json.ToArray(), "D"); + } + if (OptionalProperty.IsDefined(Url)) + { + writer.WritePropertyName("url"u8); + writer.WriteStringValue(Url.AbsoluteUri); + } + if (OptionalProperty.IsDefined(RevisedPrompt)) + { + writer.WritePropertyName("revised_prompt"u8); + writer.WriteStringValue(RevisedPrompt); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + Image IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(Image)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeImage(document.RootElement, options); + } + + internal static Image DeserializeImage(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + OptionalProperty b64Json = default; + OptionalProperty url = default; + OptionalProperty revisedPrompt = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("b64_json"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + b64Json = BinaryData.FromBytes(property.Value.GetBytesFromBase64("D")); + continue; + } + if (property.NameEquals("url"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + url = new Uri(property.Value.GetString()); + continue; + } + if (property.NameEquals("revised_prompt"u8)) + { + revisedPrompt = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new Image(b64Json.Value, url.Value, revisedPrompt.Value, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(Image)} does not support '{options.Format}' format."); + } + } + + Image IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeImage(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(Image)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static Image FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeImage(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/Image.cs b/.dotnet/src/Generated/Models/Image.cs new file mode 100644 index 000000000..a813097a2 --- /dev/null +++ b/.dotnet/src/Generated/Models/Image.cs @@ -0,0 +1,85 @@ +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// Represents the url or the content of an image generated by the OpenAI API. + public partial class Image + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + internal Image() + { + } + + /// Initializes a new instance of . + /// The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. + /// The URL of the generated image, if `response_format` is `url` (default). + /// The prompt that was used to generate the image, if there was any revision to the prompt. + /// Keeps track of any properties unknown to the library. + internal Image(BinaryData b64Json, Uri url, string revisedPrompt, IDictionary serializedAdditionalRawData) + { + B64Json = b64Json; + Url = url; + RevisedPrompt = revisedPrompt; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// + /// The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. + /// + /// To assign a byte[] to this property use . + /// The byte[] will be serialized to a Base64 encoded string. + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromBytes(new byte[] { 1, 2, 3 }) + /// Creates a payload of "AQID". + /// + /// + /// + /// + public BinaryData B64Json { get; } + /// The URL of the generated image, if `response_format` is `url` (default). + public Uri Url { get; } + /// The prompt that was used to generate the image, if there was any revision to the prompt. + public string RevisedPrompt { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ImagesResponse.Serialization.cs b/.dotnet/src/Generated/Models/ImagesResponse.Serialization.cs new file mode 100644 index 000000000..6d3955c84 --- /dev/null +++ b/.dotnet/src/Generated/Models/ImagesResponse.Serialization.cs @@ -0,0 +1,150 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class ImagesResponse : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ImagesResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("created"u8); + writer.WriteNumberValue(Created, "U"); + writer.WritePropertyName("data"u8); + writer.WriteStartArray(); + foreach (var item in Data) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ImagesResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ImagesResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeImagesResponse(document.RootElement, options); + } + + internal static ImagesResponse DeserializeImagesResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + DateTimeOffset created = default; + IReadOnlyList data = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("created"u8)) + { + created = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("data"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(Image.DeserializeImage(item)); + } + data = array; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ImagesResponse(created, data, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ImagesResponse)} does not support '{options.Format}' format."); + } + } + + ImagesResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeImagesResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ImagesResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ImagesResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeImagesResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ImagesResponse.cs b/.dotnet/src/Generated/Models/ImagesResponse.cs new file mode 100644 index 000000000..d3c377be9 --- /dev/null +++ b/.dotnet/src/Generated/Models/ImagesResponse.cs @@ -0,0 +1,80 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI.Models +{ + /// The ImagesResponse. + public partial class ImagesResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// + /// is null. + internal ImagesResponse(DateTimeOffset created, IEnumerable data) + { + ClientUtilities.AssertNotNull(data, nameof(data)); + + Created = created; + Data = data.ToList(); + } + + /// Initializes a new instance of . + /// + /// + /// Keeps track of any properties unknown to the library. + internal ImagesResponse(DateTimeOffset created, IReadOnlyList data, IDictionary serializedAdditionalRawData) + { + Created = created; + Data = data; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ImagesResponse() + { + } + + /// Gets the created. + public DateTimeOffset Created { get; } + /// Gets the data. + public IReadOnlyList Data { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ListAssistantFilesResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListAssistantFilesResponse.Serialization.cs new file mode 100644 index 000000000..d1bb15a51 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListAssistantFilesResponse.Serialization.cs @@ -0,0 +1,174 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class ListAssistantFilesResponse : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListAssistantFilesResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("data"u8); + writer.WriteStartArray(); + foreach (var item in Data) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + writer.WritePropertyName("first_id"u8); + writer.WriteStringValue(FirstId); + writer.WritePropertyName("last_id"u8); + writer.WriteStringValue(LastId); + writer.WritePropertyName("has_more"u8); + writer.WriteBooleanValue(HasMore); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ListAssistantFilesResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListAssistantFilesResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeListAssistantFilesResponse(document.RootElement, options); + } + + internal static ListAssistantFilesResponse DeserializeListAssistantFilesResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + ListAssistantFilesResponseObject @object = default; + IReadOnlyList data = default; + string firstId = default; + string lastId = default; + bool hasMore = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("object"u8)) + { + @object = new ListAssistantFilesResponseObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("data"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(AssistantFileObject.DeserializeAssistantFileObject(item)); + } + data = array; + continue; + } + if (property.NameEquals("first_id"u8)) + { + firstId = property.Value.GetString(); + continue; + } + if (property.NameEquals("last_id"u8)) + { + lastId = property.Value.GetString(); + continue; + } + if (property.NameEquals("has_more"u8)) + { + hasMore = property.Value.GetBoolean(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ListAssistantFilesResponse(@object, data, firstId, lastId, hasMore, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ListAssistantFilesResponse)} does not support '{options.Format}' format."); + } + } + + ListAssistantFilesResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeListAssistantFilesResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ListAssistantFilesResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ListAssistantFilesResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeListAssistantFilesResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ListAssistantFilesResponse.cs b/.dotnet/src/Generated/Models/ListAssistantFilesResponse.cs new file mode 100644 index 000000000..d155bf3e6 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListAssistantFilesResponse.cs @@ -0,0 +1,99 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI.Models +{ + /// The ListAssistantFilesResponse. + public partial class ListAssistantFilesResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// + /// + /// + /// , or is null. + internal ListAssistantFilesResponse(IEnumerable data, string firstId, string lastId, bool hasMore) + { + ClientUtilities.AssertNotNull(data, nameof(data)); + ClientUtilities.AssertNotNull(firstId, nameof(firstId)); + ClientUtilities.AssertNotNull(lastId, nameof(lastId)); + + Data = data.ToList(); + FirstId = firstId; + LastId = lastId; + HasMore = hasMore; + } + + /// Initializes a new instance of . + /// + /// + /// + /// + /// + /// Keeps track of any properties unknown to the library. + internal ListAssistantFilesResponse(ListAssistantFilesResponseObject @object, IReadOnlyList data, string firstId, string lastId, bool hasMore, IDictionary serializedAdditionalRawData) + { + Object = @object; + Data = data; + FirstId = firstId; + LastId = lastId; + HasMore = hasMore; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ListAssistantFilesResponse() + { + } + + /// Gets the object. + public ListAssistantFilesResponseObject Object { get; } = ListAssistantFilesResponseObject.List; + + /// Gets the data. + public IReadOnlyList Data { get; } + /// Gets the first id. + public string FirstId { get; } + /// Gets the last id. + public string LastId { get; } + /// Gets the has more. + public bool HasMore { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ListAssistantFilesResponseObject.cs b/.dotnet/src/Generated/Models/ListAssistantFilesResponseObject.cs new file mode 100644 index 000000000..5038ede26 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListAssistantFilesResponseObject.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The ListAssistantFilesResponse_object. + public readonly partial struct ListAssistantFilesResponseObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public ListAssistantFilesResponseObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ListValue = "list"; + + /// list. + public static ListAssistantFilesResponseObject List { get; } = new ListAssistantFilesResponseObject(ListValue); + /// Determines if two values are the same. + public static bool operator ==(ListAssistantFilesResponseObject left, ListAssistantFilesResponseObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(ListAssistantFilesResponseObject left, ListAssistantFilesResponseObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator ListAssistantFilesResponseObject(string value) => new ListAssistantFilesResponseObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ListAssistantFilesResponseObject other && Equals(other); + /// + public bool Equals(ListAssistantFilesResponseObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/ListAssistantsResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListAssistantsResponse.Serialization.cs new file mode 100644 index 000000000..f9c0b12b6 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListAssistantsResponse.Serialization.cs @@ -0,0 +1,174 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class ListAssistantsResponse : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListAssistantsResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("data"u8); + writer.WriteStartArray(); + foreach (var item in Data) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + writer.WritePropertyName("first_id"u8); + writer.WriteStringValue(FirstId); + writer.WritePropertyName("last_id"u8); + writer.WriteStringValue(LastId); + writer.WritePropertyName("has_more"u8); + writer.WriteBooleanValue(HasMore); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ListAssistantsResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListAssistantsResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeListAssistantsResponse(document.RootElement, options); + } + + internal static ListAssistantsResponse DeserializeListAssistantsResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + ListAssistantsResponseObject @object = default; + IReadOnlyList data = default; + string firstId = default; + string lastId = default; + bool hasMore = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("object"u8)) + { + @object = new ListAssistantsResponseObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("data"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(AssistantObject.DeserializeAssistantObject(item)); + } + data = array; + continue; + } + if (property.NameEquals("first_id"u8)) + { + firstId = property.Value.GetString(); + continue; + } + if (property.NameEquals("last_id"u8)) + { + lastId = property.Value.GetString(); + continue; + } + if (property.NameEquals("has_more"u8)) + { + hasMore = property.Value.GetBoolean(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ListAssistantsResponse(@object, data, firstId, lastId, hasMore, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ListAssistantsResponse)} does not support '{options.Format}' format."); + } + } + + ListAssistantsResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeListAssistantsResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ListAssistantsResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ListAssistantsResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeListAssistantsResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ListAssistantsResponse.cs b/.dotnet/src/Generated/Models/ListAssistantsResponse.cs new file mode 100644 index 000000000..002b27f3f --- /dev/null +++ b/.dotnet/src/Generated/Models/ListAssistantsResponse.cs @@ -0,0 +1,99 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI.Models +{ + /// The ListAssistantsResponse. + public partial class ListAssistantsResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// + /// + /// + /// , or is null. + internal ListAssistantsResponse(IEnumerable data, string firstId, string lastId, bool hasMore) + { + ClientUtilities.AssertNotNull(data, nameof(data)); + ClientUtilities.AssertNotNull(firstId, nameof(firstId)); + ClientUtilities.AssertNotNull(lastId, nameof(lastId)); + + Data = data.ToList(); + FirstId = firstId; + LastId = lastId; + HasMore = hasMore; + } + + /// Initializes a new instance of . + /// + /// + /// + /// + /// + /// Keeps track of any properties unknown to the library. + internal ListAssistantsResponse(ListAssistantsResponseObject @object, IReadOnlyList data, string firstId, string lastId, bool hasMore, IDictionary serializedAdditionalRawData) + { + Object = @object; + Data = data; + FirstId = firstId; + LastId = lastId; + HasMore = hasMore; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ListAssistantsResponse() + { + } + + /// Gets the object. + public ListAssistantsResponseObject Object { get; } = ListAssistantsResponseObject.List; + + /// Gets the data. + public IReadOnlyList Data { get; } + /// Gets the first id. + public string FirstId { get; } + /// Gets the last id. + public string LastId { get; } + /// Gets the has more. + public bool HasMore { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ListAssistantsResponseObject.cs b/.dotnet/src/Generated/Models/ListAssistantsResponseObject.cs new file mode 100644 index 000000000..5bbc18a2b --- /dev/null +++ b/.dotnet/src/Generated/Models/ListAssistantsResponseObject.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The ListAssistantsResponse_object. + public readonly partial struct ListAssistantsResponseObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public ListAssistantsResponseObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ListValue = "list"; + + /// list. + public static ListAssistantsResponseObject List { get; } = new ListAssistantsResponseObject(ListValue); + /// Determines if two values are the same. + public static bool operator ==(ListAssistantsResponseObject left, ListAssistantsResponseObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(ListAssistantsResponseObject left, ListAssistantsResponseObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator ListAssistantsResponseObject(string value) => new ListAssistantsResponseObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ListAssistantsResponseObject other && Equals(other); + /// + public bool Equals(ListAssistantsResponseObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/ListFilesResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListFilesResponse.Serialization.cs new file mode 100644 index 000000000..a580904af --- /dev/null +++ b/.dotnet/src/Generated/Models/ListFilesResponse.Serialization.cs @@ -0,0 +1,150 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class ListFilesResponse : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListFilesResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("data"u8); + writer.WriteStartArray(); + foreach (var item in Data) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ListFilesResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListFilesResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeListFilesResponse(document.RootElement, options); + } + + internal static ListFilesResponse DeserializeListFilesResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + IReadOnlyList data = default; + ListFilesResponseObject @object = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("data"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(OpenAIFile.DeserializeOpenAIFile(item)); + } + data = array; + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new ListFilesResponseObject(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ListFilesResponse(data, @object, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ListFilesResponse)} does not support '{options.Format}' format."); + } + } + + ListFilesResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeListFilesResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ListFilesResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ListFilesResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeListFilesResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ListFilesResponse.cs b/.dotnet/src/Generated/Models/ListFilesResponse.cs new file mode 100644 index 000000000..0738efe8d --- /dev/null +++ b/.dotnet/src/Generated/Models/ListFilesResponse.cs @@ -0,0 +1,78 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI.Models +{ + /// The ListFilesResponse. + public partial class ListFilesResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// is null. + internal ListFilesResponse(IEnumerable data) + { + ClientUtilities.AssertNotNull(data, nameof(data)); + + Data = data.ToList(); + } + + /// Initializes a new instance of . + /// + /// + /// Keeps track of any properties unknown to the library. + internal ListFilesResponse(IReadOnlyList data, ListFilesResponseObject @object, IDictionary serializedAdditionalRawData) + { + Data = data; + Object = @object; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ListFilesResponse() + { + } + + /// Gets the data. + public IReadOnlyList Data { get; } + /// Gets the object. + public ListFilesResponseObject Object { get; } = ListFilesResponseObject.List; + } +} diff --git a/.dotnet/src/Generated/Models/ListFilesResponseObject.cs b/.dotnet/src/Generated/Models/ListFilesResponseObject.cs new file mode 100644 index 000000000..ef68e3dda --- /dev/null +++ b/.dotnet/src/Generated/Models/ListFilesResponseObject.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The ListFilesResponse_object. + public readonly partial struct ListFilesResponseObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public ListFilesResponseObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ListValue = "list"; + + /// list. + public static ListFilesResponseObject List { get; } = new ListFilesResponseObject(ListValue); + /// Determines if two values are the same. + public static bool operator ==(ListFilesResponseObject left, ListFilesResponseObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(ListFilesResponseObject left, ListFilesResponseObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator ListFilesResponseObject(string value) => new ListFilesResponseObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ListFilesResponseObject other && Equals(other); + /// + public bool Equals(ListFilesResponseObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/ListFineTuneEventsResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListFineTuneEventsResponse.Serialization.cs new file mode 100644 index 000000000..bd3431eed --- /dev/null +++ b/.dotnet/src/Generated/Models/ListFineTuneEventsResponse.Serialization.cs @@ -0,0 +1,150 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class ListFineTuneEventsResponse : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListFineTuneEventsResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object); + writer.WritePropertyName("data"u8); + writer.WriteStartArray(); + foreach (var item in Data) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ListFineTuneEventsResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListFineTuneEventsResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeListFineTuneEventsResponse(document.RootElement, options); + } + + internal static ListFineTuneEventsResponse DeserializeListFineTuneEventsResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string @object = default; + IReadOnlyList data = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("object"u8)) + { + @object = property.Value.GetString(); + continue; + } + if (property.NameEquals("data"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(FineTuneEvent.DeserializeFineTuneEvent(item)); + } + data = array; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ListFineTuneEventsResponse(@object, data, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ListFineTuneEventsResponse)} does not support '{options.Format}' format."); + } + } + + ListFineTuneEventsResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeListFineTuneEventsResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ListFineTuneEventsResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ListFineTuneEventsResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeListFineTuneEventsResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ListFineTuneEventsResponse.cs b/.dotnet/src/Generated/Models/ListFineTuneEventsResponse.cs new file mode 100644 index 000000000..743f4ee68 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListFineTuneEventsResponse.cs @@ -0,0 +1,81 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI.Models +{ + /// The ListFineTuneEventsResponse. + public partial class ListFineTuneEventsResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// + /// or is null. + internal ListFineTuneEventsResponse(string @object, IEnumerable data) + { + ClientUtilities.AssertNotNull(@object, nameof(@object)); + ClientUtilities.AssertNotNull(data, nameof(data)); + + Object = @object; + Data = data.ToList(); + } + + /// Initializes a new instance of . + /// + /// + /// Keeps track of any properties unknown to the library. + internal ListFineTuneEventsResponse(string @object, IReadOnlyList data, IDictionary serializedAdditionalRawData) + { + Object = @object; + Data = data; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ListFineTuneEventsResponse() + { + } + + /// Gets the object. + public string Object { get; } + /// Gets the data. + public IReadOnlyList Data { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ListFineTunesResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListFineTunesResponse.Serialization.cs new file mode 100644 index 000000000..bfd1f5cf9 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListFineTunesResponse.Serialization.cs @@ -0,0 +1,150 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class ListFineTunesResponse : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListFineTunesResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object); + writer.WritePropertyName("data"u8); + writer.WriteStartArray(); + foreach (var item in Data) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ListFineTunesResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListFineTunesResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeListFineTunesResponse(document.RootElement, options); + } + + internal static ListFineTunesResponse DeserializeListFineTunesResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string @object = default; + IReadOnlyList data = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("object"u8)) + { + @object = property.Value.GetString(); + continue; + } + if (property.NameEquals("data"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(FineTune.DeserializeFineTune(item)); + } + data = array; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ListFineTunesResponse(@object, data, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ListFineTunesResponse)} does not support '{options.Format}' format."); + } + } + + ListFineTunesResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeListFineTunesResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ListFineTunesResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ListFineTunesResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeListFineTunesResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ListFineTunesResponse.cs b/.dotnet/src/Generated/Models/ListFineTunesResponse.cs new file mode 100644 index 000000000..1355ef3be --- /dev/null +++ b/.dotnet/src/Generated/Models/ListFineTunesResponse.cs @@ -0,0 +1,81 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI.Models +{ + /// The ListFineTunesResponse. + public partial class ListFineTunesResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// + /// or is null. + internal ListFineTunesResponse(string @object, IEnumerable data) + { + ClientUtilities.AssertNotNull(@object, nameof(@object)); + ClientUtilities.AssertNotNull(data, nameof(data)); + + Object = @object; + Data = data.ToList(); + } + + /// Initializes a new instance of . + /// + /// + /// Keeps track of any properties unknown to the library. + internal ListFineTunesResponse(string @object, IReadOnlyList data, IDictionary serializedAdditionalRawData) + { + Object = @object; + Data = data; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ListFineTunesResponse() + { + } + + /// Gets the object. + public string Object { get; } + /// Gets the data. + public IReadOnlyList Data { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ListFineTuningJobEventsResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListFineTuningJobEventsResponse.Serialization.cs new file mode 100644 index 000000000..b141b423a --- /dev/null +++ b/.dotnet/src/Generated/Models/ListFineTuningJobEventsResponse.Serialization.cs @@ -0,0 +1,150 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class ListFineTuningJobEventsResponse : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListFineTuningJobEventsResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object); + writer.WritePropertyName("data"u8); + writer.WriteStartArray(); + foreach (var item in Data) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ListFineTuningJobEventsResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListFineTuningJobEventsResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeListFineTuningJobEventsResponse(document.RootElement, options); + } + + internal static ListFineTuningJobEventsResponse DeserializeListFineTuningJobEventsResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string @object = default; + IReadOnlyList data = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("object"u8)) + { + @object = property.Value.GetString(); + continue; + } + if (property.NameEquals("data"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(FineTuningJobEvent.DeserializeFineTuningJobEvent(item)); + } + data = array; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ListFineTuningJobEventsResponse(@object, data, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ListFineTuningJobEventsResponse)} does not support '{options.Format}' format."); + } + } + + ListFineTuningJobEventsResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeListFineTuningJobEventsResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ListFineTuningJobEventsResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ListFineTuningJobEventsResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeListFineTuningJobEventsResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ListFineTuningJobEventsResponse.cs b/.dotnet/src/Generated/Models/ListFineTuningJobEventsResponse.cs new file mode 100644 index 000000000..94573da09 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListFineTuningJobEventsResponse.cs @@ -0,0 +1,81 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI.Models +{ + /// The ListFineTuningJobEventsResponse. + public partial class ListFineTuningJobEventsResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// + /// or is null. + internal ListFineTuningJobEventsResponse(string @object, IEnumerable data) + { + ClientUtilities.AssertNotNull(@object, nameof(@object)); + ClientUtilities.AssertNotNull(data, nameof(data)); + + Object = @object; + Data = data.ToList(); + } + + /// Initializes a new instance of . + /// + /// + /// Keeps track of any properties unknown to the library. + internal ListFineTuningJobEventsResponse(string @object, IReadOnlyList data, IDictionary serializedAdditionalRawData) + { + Object = @object; + Data = data; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ListFineTuningJobEventsResponse() + { + } + + /// Gets the object. + public string Object { get; } + /// Gets the data. + public IReadOnlyList Data { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ListMessageFilesResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListMessageFilesResponse.Serialization.cs new file mode 100644 index 000000000..516a772d1 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListMessageFilesResponse.Serialization.cs @@ -0,0 +1,174 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class ListMessageFilesResponse : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListMessageFilesResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("data"u8); + writer.WriteStartArray(); + foreach (var item in Data) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + writer.WritePropertyName("first_id"u8); + writer.WriteStringValue(FirstId); + writer.WritePropertyName("last_id"u8); + writer.WriteStringValue(LastId); + writer.WritePropertyName("has_more"u8); + writer.WriteBooleanValue(HasMore); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ListMessageFilesResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListMessageFilesResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeListMessageFilesResponse(document.RootElement, options); + } + + internal static ListMessageFilesResponse DeserializeListMessageFilesResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + ListMessageFilesResponseObject @object = default; + IReadOnlyList data = default; + string firstId = default; + string lastId = default; + bool hasMore = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("object"u8)) + { + @object = new ListMessageFilesResponseObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("data"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(MessageFileObject.DeserializeMessageFileObject(item)); + } + data = array; + continue; + } + if (property.NameEquals("first_id"u8)) + { + firstId = property.Value.GetString(); + continue; + } + if (property.NameEquals("last_id"u8)) + { + lastId = property.Value.GetString(); + continue; + } + if (property.NameEquals("has_more"u8)) + { + hasMore = property.Value.GetBoolean(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ListMessageFilesResponse(@object, data, firstId, lastId, hasMore, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ListMessageFilesResponse)} does not support '{options.Format}' format."); + } + } + + ListMessageFilesResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeListMessageFilesResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ListMessageFilesResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ListMessageFilesResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeListMessageFilesResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ListMessageFilesResponse.cs b/.dotnet/src/Generated/Models/ListMessageFilesResponse.cs new file mode 100644 index 000000000..e4476c13a --- /dev/null +++ b/.dotnet/src/Generated/Models/ListMessageFilesResponse.cs @@ -0,0 +1,99 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI.Models +{ + /// The ListMessageFilesResponse. + public partial class ListMessageFilesResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// + /// + /// + /// , or is null. + internal ListMessageFilesResponse(IEnumerable data, string firstId, string lastId, bool hasMore) + { + ClientUtilities.AssertNotNull(data, nameof(data)); + ClientUtilities.AssertNotNull(firstId, nameof(firstId)); + ClientUtilities.AssertNotNull(lastId, nameof(lastId)); + + Data = data.ToList(); + FirstId = firstId; + LastId = lastId; + HasMore = hasMore; + } + + /// Initializes a new instance of . + /// + /// + /// + /// + /// + /// Keeps track of any properties unknown to the library. + internal ListMessageFilesResponse(ListMessageFilesResponseObject @object, IReadOnlyList data, string firstId, string lastId, bool hasMore, IDictionary serializedAdditionalRawData) + { + Object = @object; + Data = data; + FirstId = firstId; + LastId = lastId; + HasMore = hasMore; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ListMessageFilesResponse() + { + } + + /// Gets the object. + public ListMessageFilesResponseObject Object { get; } = ListMessageFilesResponseObject.List; + + /// Gets the data. + public IReadOnlyList Data { get; } + /// Gets the first id. + public string FirstId { get; } + /// Gets the last id. + public string LastId { get; } + /// Gets the has more. + public bool HasMore { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ListMessageFilesResponseObject.cs b/.dotnet/src/Generated/Models/ListMessageFilesResponseObject.cs new file mode 100644 index 000000000..da8016c77 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListMessageFilesResponseObject.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The ListMessageFilesResponse_object. + public readonly partial struct ListMessageFilesResponseObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public ListMessageFilesResponseObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ListValue = "list"; + + /// list. + public static ListMessageFilesResponseObject List { get; } = new ListMessageFilesResponseObject(ListValue); + /// Determines if two values are the same. + public static bool operator ==(ListMessageFilesResponseObject left, ListMessageFilesResponseObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(ListMessageFilesResponseObject left, ListMessageFilesResponseObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator ListMessageFilesResponseObject(string value) => new ListMessageFilesResponseObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ListMessageFilesResponseObject other && Equals(other); + /// + public bool Equals(ListMessageFilesResponseObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/ListMessagesResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListMessagesResponse.Serialization.cs new file mode 100644 index 000000000..b68c46995 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListMessagesResponse.Serialization.cs @@ -0,0 +1,174 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class ListMessagesResponse : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListMessagesResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("data"u8); + writer.WriteStartArray(); + foreach (var item in Data) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + writer.WritePropertyName("first_id"u8); + writer.WriteStringValue(FirstId); + writer.WritePropertyName("last_id"u8); + writer.WriteStringValue(LastId); + writer.WritePropertyName("has_more"u8); + writer.WriteBooleanValue(HasMore); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ListMessagesResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListMessagesResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeListMessagesResponse(document.RootElement, options); + } + + internal static ListMessagesResponse DeserializeListMessagesResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + ListMessagesResponseObject @object = default; + IReadOnlyList data = default; + string firstId = default; + string lastId = default; + bool hasMore = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("object"u8)) + { + @object = new ListMessagesResponseObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("data"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(MessageObject.DeserializeMessageObject(item)); + } + data = array; + continue; + } + if (property.NameEquals("first_id"u8)) + { + firstId = property.Value.GetString(); + continue; + } + if (property.NameEquals("last_id"u8)) + { + lastId = property.Value.GetString(); + continue; + } + if (property.NameEquals("has_more"u8)) + { + hasMore = property.Value.GetBoolean(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ListMessagesResponse(@object, data, firstId, lastId, hasMore, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ListMessagesResponse)} does not support '{options.Format}' format."); + } + } + + ListMessagesResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeListMessagesResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ListMessagesResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ListMessagesResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeListMessagesResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ListMessagesResponse.cs b/.dotnet/src/Generated/Models/ListMessagesResponse.cs new file mode 100644 index 000000000..41193e112 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListMessagesResponse.cs @@ -0,0 +1,99 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI.Models +{ + /// The ListMessagesResponse. + public partial class ListMessagesResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// + /// + /// + /// , or is null. + internal ListMessagesResponse(IEnumerable data, string firstId, string lastId, bool hasMore) + { + ClientUtilities.AssertNotNull(data, nameof(data)); + ClientUtilities.AssertNotNull(firstId, nameof(firstId)); + ClientUtilities.AssertNotNull(lastId, nameof(lastId)); + + Data = data.ToList(); + FirstId = firstId; + LastId = lastId; + HasMore = hasMore; + } + + /// Initializes a new instance of . + /// + /// + /// + /// + /// + /// Keeps track of any properties unknown to the library. + internal ListMessagesResponse(ListMessagesResponseObject @object, IReadOnlyList data, string firstId, string lastId, bool hasMore, IDictionary serializedAdditionalRawData) + { + Object = @object; + Data = data; + FirstId = firstId; + LastId = lastId; + HasMore = hasMore; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ListMessagesResponse() + { + } + + /// Gets the object. + public ListMessagesResponseObject Object { get; } = ListMessagesResponseObject.List; + + /// Gets the data. + public IReadOnlyList Data { get; } + /// Gets the first id. + public string FirstId { get; } + /// Gets the last id. + public string LastId { get; } + /// Gets the has more. + public bool HasMore { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ListMessagesResponseObject.cs b/.dotnet/src/Generated/Models/ListMessagesResponseObject.cs new file mode 100644 index 000000000..ff1303bf4 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListMessagesResponseObject.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The ListMessagesResponse_object. + public readonly partial struct ListMessagesResponseObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public ListMessagesResponseObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ListValue = "list"; + + /// list. + public static ListMessagesResponseObject List { get; } = new ListMessagesResponseObject(ListValue); + /// Determines if two values are the same. + public static bool operator ==(ListMessagesResponseObject left, ListMessagesResponseObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(ListMessagesResponseObject left, ListMessagesResponseObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator ListMessagesResponseObject(string value) => new ListMessagesResponseObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ListMessagesResponseObject other && Equals(other); + /// + public bool Equals(ListMessagesResponseObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/ListModelsResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListModelsResponse.Serialization.cs new file mode 100644 index 000000000..8a6dbd3a7 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListModelsResponse.Serialization.cs @@ -0,0 +1,150 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class ListModelsResponse : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListModelsResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("data"u8); + writer.WriteStartArray(); + foreach (var item in Data) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ListModelsResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListModelsResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeListModelsResponse(document.RootElement, options); + } + + internal static ListModelsResponse DeserializeListModelsResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + ListModelsResponseObject @object = default; + IReadOnlyList data = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("object"u8)) + { + @object = new ListModelsResponseObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("data"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(Model.DeserializeModel(item)); + } + data = array; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ListModelsResponse(@object, data, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ListModelsResponse)} does not support '{options.Format}' format."); + } + } + + ListModelsResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeListModelsResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ListModelsResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ListModelsResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeListModelsResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ListModelsResponse.cs b/.dotnet/src/Generated/Models/ListModelsResponse.cs new file mode 100644 index 000000000..bc2ddf1a7 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListModelsResponse.cs @@ -0,0 +1,79 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI.Models +{ + /// The ListModelsResponse. + public partial class ListModelsResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// is null. + internal ListModelsResponse(IEnumerable data) + { + ClientUtilities.AssertNotNull(data, nameof(data)); + + Data = data.ToList(); + } + + /// Initializes a new instance of . + /// + /// + /// Keeps track of any properties unknown to the library. + internal ListModelsResponse(ListModelsResponseObject @object, IReadOnlyList data, IDictionary serializedAdditionalRawData) + { + Object = @object; + Data = data; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ListModelsResponse() + { + } + + /// Gets the object. + public ListModelsResponseObject Object { get; } = ListModelsResponseObject.List; + + /// Gets the data. + public IReadOnlyList Data { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ListModelsResponseObject.cs b/.dotnet/src/Generated/Models/ListModelsResponseObject.cs new file mode 100644 index 000000000..bb0127059 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListModelsResponseObject.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The ListModelsResponse_object. + public readonly partial struct ListModelsResponseObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public ListModelsResponseObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ListValue = "list"; + + /// list. + public static ListModelsResponseObject List { get; } = new ListModelsResponseObject(ListValue); + /// Determines if two values are the same. + public static bool operator ==(ListModelsResponseObject left, ListModelsResponseObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(ListModelsResponseObject left, ListModelsResponseObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator ListModelsResponseObject(string value) => new ListModelsResponseObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ListModelsResponseObject other && Equals(other); + /// + public bool Equals(ListModelsResponseObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/ListOrder.cs b/.dotnet/src/Generated/Models/ListOrder.cs new file mode 100644 index 000000000..0343ac0f3 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListOrder.cs @@ -0,0 +1,47 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + public readonly partial struct ListOrder : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public ListOrder(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string AscValue = "asc"; + private const string DescValue = "desc"; + + /// asc. + public static ListOrder Asc { get; } = new ListOrder(AscValue); + /// desc. + public static ListOrder Desc { get; } = new ListOrder(DescValue); + /// Determines if two values are the same. + public static bool operator ==(ListOrder left, ListOrder right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(ListOrder left, ListOrder right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator ListOrder(string value) => new ListOrder(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ListOrder other && Equals(other); + /// + public bool Equals(ListOrder other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/ListPaginatedFineTuningJobsResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListPaginatedFineTuningJobsResponse.Serialization.cs new file mode 100644 index 000000000..dd8425fb7 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListPaginatedFineTuningJobsResponse.Serialization.cs @@ -0,0 +1,158 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class ListPaginatedFineTuningJobsResponse : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListPaginatedFineTuningJobsResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object); + writer.WritePropertyName("data"u8); + writer.WriteStartArray(); + foreach (var item in Data) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + writer.WritePropertyName("has_more"u8); + writer.WriteBooleanValue(HasMore); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ListPaginatedFineTuningJobsResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListPaginatedFineTuningJobsResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeListPaginatedFineTuningJobsResponse(document.RootElement, options); + } + + internal static ListPaginatedFineTuningJobsResponse DeserializeListPaginatedFineTuningJobsResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string @object = default; + IReadOnlyList data = default; + bool hasMore = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("object"u8)) + { + @object = property.Value.GetString(); + continue; + } + if (property.NameEquals("data"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(FineTuningJob.DeserializeFineTuningJob(item)); + } + data = array; + continue; + } + if (property.NameEquals("has_more"u8)) + { + hasMore = property.Value.GetBoolean(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ListPaginatedFineTuningJobsResponse(@object, data, hasMore, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ListPaginatedFineTuningJobsResponse)} does not support '{options.Format}' format."); + } + } + + ListPaginatedFineTuningJobsResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeListPaginatedFineTuningJobsResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ListPaginatedFineTuningJobsResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ListPaginatedFineTuningJobsResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeListPaginatedFineTuningJobsResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ListPaginatedFineTuningJobsResponse.cs b/.dotnet/src/Generated/Models/ListPaginatedFineTuningJobsResponse.cs new file mode 100644 index 000000000..db1bf28f5 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListPaginatedFineTuningJobsResponse.cs @@ -0,0 +1,87 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI.Models +{ + /// The ListPaginatedFineTuningJobsResponse. + public partial class ListPaginatedFineTuningJobsResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// + /// + /// or is null. + internal ListPaginatedFineTuningJobsResponse(string @object, IEnumerable data, bool hasMore) + { + ClientUtilities.AssertNotNull(@object, nameof(@object)); + ClientUtilities.AssertNotNull(data, nameof(data)); + + Object = @object; + Data = data.ToList(); + HasMore = hasMore; + } + + /// Initializes a new instance of . + /// + /// + /// + /// Keeps track of any properties unknown to the library. + internal ListPaginatedFineTuningJobsResponse(string @object, IReadOnlyList data, bool hasMore, IDictionary serializedAdditionalRawData) + { + Object = @object; + Data = data; + HasMore = hasMore; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ListPaginatedFineTuningJobsResponse() + { + } + + /// Gets the object. + public string Object { get; } + /// Gets the data. + public IReadOnlyList Data { get; } + /// Gets the has more. + public bool HasMore { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ListRunStepsResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListRunStepsResponse.Serialization.cs new file mode 100644 index 000000000..3aa87a8a1 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListRunStepsResponse.Serialization.cs @@ -0,0 +1,174 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class ListRunStepsResponse : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListRunStepsResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("data"u8); + writer.WriteStartArray(); + foreach (var item in Data) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + writer.WritePropertyName("first_id"u8); + writer.WriteStringValue(FirstId); + writer.WritePropertyName("last_id"u8); + writer.WriteStringValue(LastId); + writer.WritePropertyName("has_more"u8); + writer.WriteBooleanValue(HasMore); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ListRunStepsResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListRunStepsResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeListRunStepsResponse(document.RootElement, options); + } + + internal static ListRunStepsResponse DeserializeListRunStepsResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + ListRunStepsResponseObject @object = default; + IReadOnlyList data = default; + string firstId = default; + string lastId = default; + bool hasMore = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("object"u8)) + { + @object = new ListRunStepsResponseObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("data"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(RunStepObject.DeserializeRunStepObject(item)); + } + data = array; + continue; + } + if (property.NameEquals("first_id"u8)) + { + firstId = property.Value.GetString(); + continue; + } + if (property.NameEquals("last_id"u8)) + { + lastId = property.Value.GetString(); + continue; + } + if (property.NameEquals("has_more"u8)) + { + hasMore = property.Value.GetBoolean(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ListRunStepsResponse(@object, data, firstId, lastId, hasMore, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ListRunStepsResponse)} does not support '{options.Format}' format."); + } + } + + ListRunStepsResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeListRunStepsResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ListRunStepsResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ListRunStepsResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeListRunStepsResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ListRunStepsResponse.cs b/.dotnet/src/Generated/Models/ListRunStepsResponse.cs new file mode 100644 index 000000000..1df4309d3 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListRunStepsResponse.cs @@ -0,0 +1,99 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI.Models +{ + /// The ListRunStepsResponse. + public partial class ListRunStepsResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// + /// + /// + /// , or is null. + internal ListRunStepsResponse(IEnumerable data, string firstId, string lastId, bool hasMore) + { + ClientUtilities.AssertNotNull(data, nameof(data)); + ClientUtilities.AssertNotNull(firstId, nameof(firstId)); + ClientUtilities.AssertNotNull(lastId, nameof(lastId)); + + Data = data.ToList(); + FirstId = firstId; + LastId = lastId; + HasMore = hasMore; + } + + /// Initializes a new instance of . + /// + /// + /// + /// + /// + /// Keeps track of any properties unknown to the library. + internal ListRunStepsResponse(ListRunStepsResponseObject @object, IReadOnlyList data, string firstId, string lastId, bool hasMore, IDictionary serializedAdditionalRawData) + { + Object = @object; + Data = data; + FirstId = firstId; + LastId = lastId; + HasMore = hasMore; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ListRunStepsResponse() + { + } + + /// Gets the object. + public ListRunStepsResponseObject Object { get; } = ListRunStepsResponseObject.List; + + /// Gets the data. + public IReadOnlyList Data { get; } + /// Gets the first id. + public string FirstId { get; } + /// Gets the last id. + public string LastId { get; } + /// Gets the has more. + public bool HasMore { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ListRunStepsResponseObject.cs b/.dotnet/src/Generated/Models/ListRunStepsResponseObject.cs new file mode 100644 index 000000000..043709495 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListRunStepsResponseObject.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The ListRunStepsResponse_object. + public readonly partial struct ListRunStepsResponseObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public ListRunStepsResponseObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ListValue = "list"; + + /// list. + public static ListRunStepsResponseObject List { get; } = new ListRunStepsResponseObject(ListValue); + /// Determines if two values are the same. + public static bool operator ==(ListRunStepsResponseObject left, ListRunStepsResponseObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(ListRunStepsResponseObject left, ListRunStepsResponseObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator ListRunStepsResponseObject(string value) => new ListRunStepsResponseObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ListRunStepsResponseObject other && Equals(other); + /// + public bool Equals(ListRunStepsResponseObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/ListRunsResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListRunsResponse.Serialization.cs new file mode 100644 index 000000000..d0705cd68 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListRunsResponse.Serialization.cs @@ -0,0 +1,174 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class ListRunsResponse : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListRunsResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("data"u8); + writer.WriteStartArray(); + foreach (var item in Data) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + writer.WritePropertyName("first_id"u8); + writer.WriteStringValue(FirstId); + writer.WritePropertyName("last_id"u8); + writer.WriteStringValue(LastId); + writer.WritePropertyName("has_more"u8); + writer.WriteBooleanValue(HasMore); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ListRunsResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListRunsResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeListRunsResponse(document.RootElement, options); + } + + internal static ListRunsResponse DeserializeListRunsResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + ListRunsResponseObject @object = default; + IReadOnlyList data = default; + string firstId = default; + string lastId = default; + bool hasMore = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("object"u8)) + { + @object = new ListRunsResponseObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("data"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(RunObject.DeserializeRunObject(item)); + } + data = array; + continue; + } + if (property.NameEquals("first_id"u8)) + { + firstId = property.Value.GetString(); + continue; + } + if (property.NameEquals("last_id"u8)) + { + lastId = property.Value.GetString(); + continue; + } + if (property.NameEquals("has_more"u8)) + { + hasMore = property.Value.GetBoolean(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ListRunsResponse(@object, data, firstId, lastId, hasMore, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ListRunsResponse)} does not support '{options.Format}' format."); + } + } + + ListRunsResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeListRunsResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ListRunsResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ListRunsResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeListRunsResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ListRunsResponse.cs b/.dotnet/src/Generated/Models/ListRunsResponse.cs new file mode 100644 index 000000000..efd6de920 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListRunsResponse.cs @@ -0,0 +1,99 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI.Models +{ + /// The ListRunsResponse. + public partial class ListRunsResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// + /// + /// + /// , or is null. + internal ListRunsResponse(IEnumerable data, string firstId, string lastId, bool hasMore) + { + ClientUtilities.AssertNotNull(data, nameof(data)); + ClientUtilities.AssertNotNull(firstId, nameof(firstId)); + ClientUtilities.AssertNotNull(lastId, nameof(lastId)); + + Data = data.ToList(); + FirstId = firstId; + LastId = lastId; + HasMore = hasMore; + } + + /// Initializes a new instance of . + /// + /// + /// + /// + /// + /// Keeps track of any properties unknown to the library. + internal ListRunsResponse(ListRunsResponseObject @object, IReadOnlyList data, string firstId, string lastId, bool hasMore, IDictionary serializedAdditionalRawData) + { + Object = @object; + Data = data; + FirstId = firstId; + LastId = lastId; + HasMore = hasMore; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ListRunsResponse() + { + } + + /// Gets the object. + public ListRunsResponseObject Object { get; } = ListRunsResponseObject.List; + + /// Gets the data. + public IReadOnlyList Data { get; } + /// Gets the first id. + public string FirstId { get; } + /// Gets the last id. + public string LastId { get; } + /// Gets the has more. + public bool HasMore { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ListRunsResponseObject.cs b/.dotnet/src/Generated/Models/ListRunsResponseObject.cs new file mode 100644 index 000000000..548feb288 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListRunsResponseObject.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The ListRunsResponse_object. + public readonly partial struct ListRunsResponseObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public ListRunsResponseObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ListValue = "list"; + + /// list. + public static ListRunsResponseObject List { get; } = new ListRunsResponseObject(ListValue); + /// Determines if two values are the same. + public static bool operator ==(ListRunsResponseObject left, ListRunsResponseObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(ListRunsResponseObject left, ListRunsResponseObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator ListRunsResponseObject(string value) => new ListRunsResponseObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ListRunsResponseObject other && Equals(other); + /// + public bool Equals(ListRunsResponseObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/MessageFileObject.Serialization.cs b/.dotnet/src/Generated/Models/MessageFileObject.Serialization.cs new file mode 100644 index 000000000..01d813ec5 --- /dev/null +++ b/.dotnet/src/Generated/Models/MessageFileObject.Serialization.cs @@ -0,0 +1,156 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class MessageFileObject : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(MessageFileObject)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("created_at"u8); + writer.WriteNumberValue(CreatedAt, "U"); + writer.WritePropertyName("message_id"u8); + writer.WriteStringValue(MessageId); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + MessageFileObject IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(MessageFileObject)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeMessageFileObject(document.RootElement, options); + } + + internal static MessageFileObject DeserializeMessageFileObject(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + MessageFileObjectObject @object = default; + DateTimeOffset createdAt = default; + string messageId = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new MessageFileObjectObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("created_at"u8)) + { + createdAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("message_id"u8)) + { + messageId = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new MessageFileObject(id, @object, createdAt, messageId, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(MessageFileObject)} does not support '{options.Format}' format."); + } + } + + MessageFileObject IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeMessageFileObject(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(MessageFileObject)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static MessageFileObject FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeMessageFileObject(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/MessageFileObject.cs b/.dotnet/src/Generated/Models/MessageFileObject.cs new file mode 100644 index 000000000..0afed8c95 --- /dev/null +++ b/.dotnet/src/Generated/Models/MessageFileObject.cs @@ -0,0 +1,91 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// A list of files attached to a `message`. + public partial class MessageFileObject + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// TThe identifier, which can be referenced in API endpoints. + /// The Unix timestamp (in seconds) for when the message file was created. + /// The ID of the [message](/docs/api-reference/messages) that the [File](/docs/api-reference/files) is attached to. + /// or is null. + internal MessageFileObject(string id, DateTimeOffset createdAt, string messageId) + { + ClientUtilities.AssertNotNull(id, nameof(id)); + ClientUtilities.AssertNotNull(messageId, nameof(messageId)); + + Id = id; + CreatedAt = createdAt; + MessageId = messageId; + } + + /// Initializes a new instance of . + /// TThe identifier, which can be referenced in API endpoints. + /// The object type, which is always `thread.message.file`. + /// The Unix timestamp (in seconds) for when the message file was created. + /// The ID of the [message](/docs/api-reference/messages) that the [File](/docs/api-reference/files) is attached to. + /// Keeps track of any properties unknown to the library. + internal MessageFileObject(string id, MessageFileObjectObject @object, DateTimeOffset createdAt, string messageId, IDictionary serializedAdditionalRawData) + { + Id = id; + Object = @object; + CreatedAt = createdAt; + MessageId = messageId; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal MessageFileObject() + { + } + + /// TThe identifier, which can be referenced in API endpoints. + public string Id { get; } + /// The object type, which is always `thread.message.file`. + public MessageFileObjectObject Object { get; } = MessageFileObjectObject.ThreadMessageFile; + + /// The Unix timestamp (in seconds) for when the message file was created. + public DateTimeOffset CreatedAt { get; } + /// The ID of the [message](/docs/api-reference/messages) that the [File](/docs/api-reference/files) is attached to. + public string MessageId { get; } + } +} diff --git a/.dotnet/src/Generated/Models/MessageFileObjectObject.cs b/.dotnet/src/Generated/Models/MessageFileObjectObject.cs new file mode 100644 index 000000000..55f4cc6f8 --- /dev/null +++ b/.dotnet/src/Generated/Models/MessageFileObjectObject.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The MessageFileObject_object. + public readonly partial struct MessageFileObjectObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public MessageFileObjectObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ThreadMessageFileValue = "thread.message.file"; + + /// thread.message.file. + public static MessageFileObjectObject ThreadMessageFile { get; } = new MessageFileObjectObject(ThreadMessageFileValue); + /// Determines if two values are the same. + public static bool operator ==(MessageFileObjectObject left, MessageFileObjectObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(MessageFileObjectObject left, MessageFileObjectObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator MessageFileObjectObject(string value) => new MessageFileObjectObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is MessageFileObjectObject other && Equals(other); + /// + public bool Equals(MessageFileObjectObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/MessageObject.Serialization.cs b/.dotnet/src/Generated/Models/MessageObject.Serialization.cs new file mode 100644 index 000000000..5f9db4673 --- /dev/null +++ b/.dotnet/src/Generated/Models/MessageObject.Serialization.cs @@ -0,0 +1,290 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class MessageObject : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(MessageObject)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("created_at"u8); + writer.WriteNumberValue(CreatedAt, "U"); + writer.WritePropertyName("thread_id"u8); + writer.WriteStringValue(ThreadId); + writer.WritePropertyName("role"u8); + writer.WriteStringValue(Role.ToString()); + writer.WritePropertyName("content"u8); + writer.WriteStartArray(); + foreach (var item in Content) + { + if (item == null) + { + writer.WriteNullValue(); + continue; + } +#if NET6_0_OR_GREATER + writer.WriteRawValue(item); +#else + using (JsonDocument document = JsonDocument.Parse(item)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + writer.WriteEndArray(); + if (AssistantId != null) + { + writer.WritePropertyName("assistant_id"u8); + writer.WriteStringValue(AssistantId); + } + else + { + writer.WriteNull("assistant_id"); + } + if (RunId != null) + { + writer.WritePropertyName("run_id"u8); + writer.WriteStringValue(RunId); + } + else + { + writer.WriteNull("run_id"); + } + writer.WritePropertyName("file_ids"u8); + writer.WriteStartArray(); + foreach (var item in FileIds) + { + writer.WriteStringValue(item); + } + writer.WriteEndArray(); + if (Metadata != null && OptionalProperty.IsCollectionDefined(Metadata)) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartObject(); + foreach (var item in Metadata) + { + writer.WritePropertyName(item.Key); + writer.WriteStringValue(item.Value); + } + writer.WriteEndObject(); + } + else + { + writer.WriteNull("metadata"); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + MessageObject IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(MessageObject)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeMessageObject(document.RootElement, options); + } + + internal static MessageObject DeserializeMessageObject(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + MessageObjectObject @object = default; + DateTimeOffset createdAt = default; + string threadId = default; + MessageObjectRole role = default; + IReadOnlyList content = default; + string assistantId = default; + string runId = default; + IReadOnlyList fileIds = default; + IReadOnlyDictionary metadata = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new MessageObjectObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("created_at"u8)) + { + createdAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("thread_id"u8)) + { + threadId = property.Value.GetString(); + continue; + } + if (property.NameEquals("role"u8)) + { + role = new MessageObjectRole(property.Value.GetString()); + continue; + } + if (property.NameEquals("content"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + if (item.ValueKind == JsonValueKind.Null) + { + array.Add(null); + } + else + { + array.Add(BinaryData.FromString(item.GetRawText())); + } + } + content = array; + continue; + } + if (property.NameEquals("assistant_id"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + assistantId = null; + continue; + } + assistantId = property.Value.GetString(); + continue; + } + if (property.NameEquals("run_id"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + runId = null; + continue; + } + runId = property.Value.GetString(); + continue; + } + if (property.NameEquals("file_ids"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(item.GetString()); + } + fileIds = array; + continue; + } + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + metadata = new OptionalDictionary(); + continue; + } + Dictionary dictionary = new Dictionary(); + foreach (var property0 in property.Value.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetString()); + } + metadata = dictionary; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new MessageObject(id, @object, createdAt, threadId, role, content, assistantId, runId, fileIds, metadata, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(MessageObject)} does not support '{options.Format}' format."); + } + } + + MessageObject IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeMessageObject(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(MessageObject)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static MessageObject FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeMessageObject(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/MessageObject.cs b/.dotnet/src/Generated/Models/MessageObject.cs new file mode 100644 index 000000000..54ea98399 --- /dev/null +++ b/.dotnet/src/Generated/Models/MessageObject.cs @@ -0,0 +1,201 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI.Models +{ + /// The MessageObject. + public partial class MessageObject + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The identifier, which can be referenced in API endpoints. + /// The Unix timestamp (in seconds) for when the message was created. + /// The [thread](/docs/api-reference/threads) ID that this message belongs to. + /// The entity that produced the message. One of `user` or `assistant`. + /// The content of the message in array of text and/or images. + /// + /// If applicable, the ID of the [assistant](/docs/api-reference/assistants) that authored this + /// message. + /// + /// + /// If applicable, the ID of the [run](/docs/api-reference/runs) associated with the authoring of + /// this message. + /// + /// + /// A list of [file](/docs/api-reference/files) IDs that the assistant should use. Useful for + /// tools like retrieval and code_interpreter that can access files. A maximum of 10 files can be + /// attached to a message. + /// + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// , , or is null. + internal MessageObject(string id, DateTimeOffset createdAt, string threadId, MessageObjectRole role, IEnumerable content, string assistantId, string runId, IEnumerable fileIds, IReadOnlyDictionary metadata) + { + ClientUtilities.AssertNotNull(id, nameof(id)); + ClientUtilities.AssertNotNull(threadId, nameof(threadId)); + ClientUtilities.AssertNotNull(content, nameof(content)); + ClientUtilities.AssertNotNull(fileIds, nameof(fileIds)); + + Id = id; + CreatedAt = createdAt; + ThreadId = threadId; + Role = role; + Content = content.ToList(); + AssistantId = assistantId; + RunId = runId; + FileIds = fileIds.ToList(); + Metadata = metadata; + } + + /// Initializes a new instance of . + /// The identifier, which can be referenced in API endpoints. + /// The object type, which is always `thread.message`. + /// The Unix timestamp (in seconds) for when the message was created. + /// The [thread](/docs/api-reference/threads) ID that this message belongs to. + /// The entity that produced the message. One of `user` or `assistant`. + /// The content of the message in array of text and/or images. + /// + /// If applicable, the ID of the [assistant](/docs/api-reference/assistants) that authored this + /// message. + /// + /// + /// If applicable, the ID of the [run](/docs/api-reference/runs) associated with the authoring of + /// this message. + /// + /// + /// A list of [file](/docs/api-reference/files) IDs that the assistant should use. Useful for + /// tools like retrieval and code_interpreter that can access files. A maximum of 10 files can be + /// attached to a message. + /// + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// Keeps track of any properties unknown to the library. + internal MessageObject(string id, MessageObjectObject @object, DateTimeOffset createdAt, string threadId, MessageObjectRole role, IReadOnlyList content, string assistantId, string runId, IReadOnlyList fileIds, IReadOnlyDictionary metadata, IDictionary serializedAdditionalRawData) + { + Id = id; + Object = @object; + CreatedAt = createdAt; + ThreadId = threadId; + Role = role; + Content = content; + AssistantId = assistantId; + RunId = runId; + FileIds = fileIds; + Metadata = metadata; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal MessageObject() + { + } + + /// The identifier, which can be referenced in API endpoints. + public string Id { get; } + /// The object type, which is always `thread.message`. + public MessageObjectObject Object { get; } = MessageObjectObject.ThreadMessage; + + /// The Unix timestamp (in seconds) for when the message was created. + public DateTimeOffset CreatedAt { get; } + /// The [thread](/docs/api-reference/threads) ID that this message belongs to. + public string ThreadId { get; } + /// The entity that produced the message. One of `user` or `assistant`. + public MessageObjectRole Role { get; } + /// + /// The content of the message in array of text and/or images. + /// + /// To assign an object to the element of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public IReadOnlyList Content { get; } + /// + /// If applicable, the ID of the [assistant](/docs/api-reference/assistants) that authored this + /// message. + /// + public string AssistantId { get; } + /// + /// If applicable, the ID of the [run](/docs/api-reference/runs) associated with the authoring of + /// this message. + /// + public string RunId { get; } + /// + /// A list of [file](/docs/api-reference/files) IDs that the assistant should use. Useful for + /// tools like retrieval and code_interpreter that can access files. A maximum of 10 files can be + /// attached to a message. + /// + public IReadOnlyList FileIds { get; } + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + public IReadOnlyDictionary Metadata { get; } + } +} diff --git a/.dotnet/src/Generated/Models/MessageObjectObject.cs b/.dotnet/src/Generated/Models/MessageObjectObject.cs new file mode 100644 index 000000000..5f835f436 --- /dev/null +++ b/.dotnet/src/Generated/Models/MessageObjectObject.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The MessageObject_object. + public readonly partial struct MessageObjectObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public MessageObjectObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ThreadMessageValue = "thread.message"; + + /// thread.message. + public static MessageObjectObject ThreadMessage { get; } = new MessageObjectObject(ThreadMessageValue); + /// Determines if two values are the same. + public static bool operator ==(MessageObjectObject left, MessageObjectObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(MessageObjectObject left, MessageObjectObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator MessageObjectObject(string value) => new MessageObjectObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is MessageObjectObject other && Equals(other); + /// + public bool Equals(MessageObjectObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/MessageObjectRole.cs b/.dotnet/src/Generated/Models/MessageObjectRole.cs new file mode 100644 index 000000000..a62512cd2 --- /dev/null +++ b/.dotnet/src/Generated/Models/MessageObjectRole.cs @@ -0,0 +1,48 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for role in MessageObject. + public readonly partial struct MessageObjectRole : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public MessageObjectRole(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string UserValue = "user"; + private const string AssistantValue = "assistant"; + + /// user. + public static MessageObjectRole User { get; } = new MessageObjectRole(UserValue); + /// assistant. + public static MessageObjectRole Assistant { get; } = new MessageObjectRole(AssistantValue); + /// Determines if two values are the same. + public static bool operator ==(MessageObjectRole left, MessageObjectRole right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(MessageObjectRole left, MessageObjectRole right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator MessageObjectRole(string value) => new MessageObjectRole(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is MessageObjectRole other && Equals(other); + /// + public bool Equals(MessageObjectRole other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/Model.Serialization.cs b/.dotnet/src/Generated/Models/Model.Serialization.cs new file mode 100644 index 000000000..b3a797b7b --- /dev/null +++ b/.dotnet/src/Generated/Models/Model.Serialization.cs @@ -0,0 +1,156 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class Model : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(Model)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("created"u8); + writer.WriteNumberValue(Created, "U"); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("owned_by"u8); + writer.WriteStringValue(OwnedBy); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + Model IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(Model)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeModel(document.RootElement, options); + } + + internal static Model DeserializeModel(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + DateTimeOffset created = default; + ModelObject @object = default; + string ownedBy = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("created"u8)) + { + created = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new ModelObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("owned_by"u8)) + { + ownedBy = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new Model(id, created, @object, ownedBy, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(Model)} does not support '{options.Format}' format."); + } + } + + Model IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeModel(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(Model)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static Model FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeModel(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/Model.cs b/.dotnet/src/Generated/Models/Model.cs new file mode 100644 index 000000000..70edf9a14 --- /dev/null +++ b/.dotnet/src/Generated/Models/Model.cs @@ -0,0 +1,91 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// Describes an OpenAI model offering that can be used with the API. + public partial class Model + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The model identifier, which can be referenced in the API endpoints. + /// The Unix timestamp (in seconds) when the model was created. + /// The organization that owns the model. + /// or is null. + internal Model(string id, DateTimeOffset created, string ownedBy) + { + ClientUtilities.AssertNotNull(id, nameof(id)); + ClientUtilities.AssertNotNull(ownedBy, nameof(ownedBy)); + + Id = id; + Created = created; + OwnedBy = ownedBy; + } + + /// Initializes a new instance of . + /// The model identifier, which can be referenced in the API endpoints. + /// The Unix timestamp (in seconds) when the model was created. + /// The object type, which is always "model". + /// The organization that owns the model. + /// Keeps track of any properties unknown to the library. + internal Model(string id, DateTimeOffset created, ModelObject @object, string ownedBy, IDictionary serializedAdditionalRawData) + { + Id = id; + Created = created; + Object = @object; + OwnedBy = ownedBy; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal Model() + { + } + + /// The model identifier, which can be referenced in the API endpoints. + public string Id { get; } + /// The Unix timestamp (in seconds) when the model was created. + public DateTimeOffset Created { get; } + /// The object type, which is always "model". + public ModelObject Object { get; } = ModelObject.Model; + + /// The organization that owns the model. + public string OwnedBy { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ModelObject.cs b/.dotnet/src/Generated/Models/ModelObject.cs new file mode 100644 index 000000000..063012b5a --- /dev/null +++ b/.dotnet/src/Generated/Models/ModelObject.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The Model_object. + public readonly partial struct ModelObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public ModelObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ModelValue = "model"; + + /// model. + public static ModelObject Model { get; } = new ModelObject(ModelValue); + /// Determines if two values are the same. + public static bool operator ==(ModelObject left, ModelObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(ModelObject left, ModelObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator ModelObject(string value) => new ModelObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ModelObject other && Equals(other); + /// + public bool Equals(ModelObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/ModifyAssistantRequest.Serialization.cs b/.dotnet/src/Generated/Models/ModifyAssistantRequest.Serialization.cs new file mode 100644 index 000000000..7e9027ef1 --- /dev/null +++ b/.dotnet/src/Generated/Models/ModifyAssistantRequest.Serialization.cs @@ -0,0 +1,306 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class ModifyAssistantRequest : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ModifyAssistantRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + if (OptionalProperty.IsDefined(Model)) + { + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model); + } + if (OptionalProperty.IsDefined(Name)) + { + if (Name != null) + { + writer.WritePropertyName("name"u8); + writer.WriteStringValue(Name); + } + else + { + writer.WriteNull("name"); + } + } + if (OptionalProperty.IsDefined(Description)) + { + if (Description != null) + { + writer.WritePropertyName("description"u8); + writer.WriteStringValue(Description); + } + else + { + writer.WriteNull("description"); + } + } + if (OptionalProperty.IsDefined(Instructions)) + { + if (Instructions != null) + { + writer.WritePropertyName("instructions"u8); + writer.WriteStringValue(Instructions); + } + else + { + writer.WriteNull("instructions"); + } + } + if (OptionalProperty.IsCollectionDefined(Tools)) + { + writer.WritePropertyName("tools"u8); + writer.WriteStartArray(); + foreach (var item in Tools) + { + if (item == null) + { + writer.WriteNullValue(); + continue; + } +#if NET6_0_OR_GREATER + writer.WriteRawValue(item); +#else + using (JsonDocument document = JsonDocument.Parse(item)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + writer.WriteEndArray(); + } + if (OptionalProperty.IsCollectionDefined(FileIds)) + { + writer.WritePropertyName("file_ids"u8); + writer.WriteStartArray(); + foreach (var item in FileIds) + { + writer.WriteStringValue(item); + } + writer.WriteEndArray(); + } + if (OptionalProperty.IsCollectionDefined(Metadata)) + { + if (Metadata != null) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartObject(); + foreach (var item in Metadata) + { + writer.WritePropertyName(item.Key); + writer.WriteStringValue(item.Value); + } + writer.WriteEndObject(); + } + else + { + writer.WriteNull("metadata"); + } + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ModifyAssistantRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ModifyAssistantRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeModifyAssistantRequest(document.RootElement, options); + } + + internal static ModifyAssistantRequest DeserializeModifyAssistantRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + OptionalProperty model = default; + OptionalProperty name = default; + OptionalProperty description = default; + OptionalProperty instructions = default; + OptionalProperty> tools = default; + OptionalProperty> fileIds = default; + OptionalProperty> metadata = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("model"u8)) + { + model = property.Value.GetString(); + continue; + } + if (property.NameEquals("name"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + name = null; + continue; + } + name = property.Value.GetString(); + continue; + } + if (property.NameEquals("description"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + description = null; + continue; + } + description = property.Value.GetString(); + continue; + } + if (property.NameEquals("instructions"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + instructions = null; + continue; + } + instructions = property.Value.GetString(); + continue; + } + if (property.NameEquals("tools"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + if (item.ValueKind == JsonValueKind.Null) + { + array.Add(null); + } + else + { + array.Add(BinaryData.FromString(item.GetRawText())); + } + } + tools = array; + continue; + } + if (property.NameEquals("file_ids"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(item.GetString()); + } + fileIds = array; + continue; + } + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + Dictionary dictionary = new Dictionary(); + foreach (var property0 in property.Value.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetString()); + } + metadata = dictionary; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ModifyAssistantRequest(model.Value, name.Value, description.Value, instructions.Value, OptionalProperty.ToList(tools), OptionalProperty.ToList(fileIds), OptionalProperty.ToDictionary(metadata), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ModifyAssistantRequest)} does not support '{options.Format}' format."); + } + } + + ModifyAssistantRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeModifyAssistantRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ModifyAssistantRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ModifyAssistantRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeModifyAssistantRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ModifyAssistantRequest.cs b/.dotnet/src/Generated/Models/ModifyAssistantRequest.cs new file mode 100644 index 000000000..abc79d17a --- /dev/null +++ b/.dotnet/src/Generated/Models/ModifyAssistantRequest.cs @@ -0,0 +1,147 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The ModifyAssistantRequest. + public partial class ModifyAssistantRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public ModifyAssistantRequest() + { + Tools = new OptionalList(); + FileIds = new OptionalList(); + Metadata = new OptionalDictionary(); + } + + /// Initializes a new instance of . + /// + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + /// see all of your available models, or see our [Model overview](/docs/models/overview) for + /// descriptions of them. + /// + /// The name of the assistant. The maximum length is 256 characters. + /// The description of the assistant. The maximum length is 512 characters. + /// The system instructions that the assistant uses. The maximum length is 32768 characters. + /// + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. + /// Tools can be of types `code_interpreter`, `retrieval`, or `function`. + /// + /// + /// A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a + /// maximum of 20 files attached to the assistant. Files are ordered by their creation date in + /// ascending order. + /// + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// Keeps track of any properties unknown to the library. + internal ModifyAssistantRequest(string model, string name, string description, string instructions, IList tools, IList fileIds, IDictionary metadata, IDictionary serializedAdditionalRawData) + { + Model = model; + Name = name; + Description = description; + Instructions = instructions; + Tools = tools; + FileIds = fileIds; + Metadata = metadata; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + /// see all of your available models, or see our [Model overview](/docs/models/overview) for + /// descriptions of them. + /// + public string Model { get; set; } + /// The name of the assistant. The maximum length is 256 characters. + public string Name { get; set; } + /// The description of the assistant. The maximum length is 512 characters. + public string Description { get; set; } + /// The system instructions that the assistant uses. The maximum length is 32768 characters. + public string Instructions { get; set; } + /// + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. + /// Tools can be of types `code_interpreter`, `retrieval`, or `function`. + /// + /// To assign an object to the element of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public IList Tools { get; } + /// + /// A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a + /// maximum of 20 files attached to the assistant. Files are ordered by their creation date in + /// ascending order. + /// + public IList FileIds { get; } + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + public IDictionary Metadata { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/ModifyMessageRequest.Serialization.cs b/.dotnet/src/Generated/Models/ModifyMessageRequest.Serialization.cs new file mode 100644 index 000000000..35ee9a46e --- /dev/null +++ b/.dotnet/src/Generated/Models/ModifyMessageRequest.Serialization.cs @@ -0,0 +1,157 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class ModifyMessageRequest : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ModifyMessageRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + if (OptionalProperty.IsCollectionDefined(Metadata)) + { + if (Metadata != null) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartObject(); + foreach (var item in Metadata) + { + writer.WritePropertyName(item.Key); + writer.WriteStringValue(item.Value); + } + writer.WriteEndObject(); + } + else + { + writer.WriteNull("metadata"); + } + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ModifyMessageRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ModifyMessageRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeModifyMessageRequest(document.RootElement, options); + } + + internal static ModifyMessageRequest DeserializeModifyMessageRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + OptionalProperty> metadata = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + Dictionary dictionary = new Dictionary(); + foreach (var property0 in property.Value.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetString()); + } + metadata = dictionary; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ModifyMessageRequest(OptionalProperty.ToDictionary(metadata), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ModifyMessageRequest)} does not support '{options.Format}' format."); + } + } + + ModifyMessageRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeModifyMessageRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ModifyMessageRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ModifyMessageRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeModifyMessageRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ModifyMessageRequest.cs b/.dotnet/src/Generated/Models/ModifyMessageRequest.cs new file mode 100644 index 000000000..3a200b238 --- /dev/null +++ b/.dotnet/src/Generated/Models/ModifyMessageRequest.cs @@ -0,0 +1,72 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The ModifyMessageRequest. + public partial class ModifyMessageRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public ModifyMessageRequest() + { + Metadata = new OptionalDictionary(); + } + + /// Initializes a new instance of . + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// Keeps track of any properties unknown to the library. + internal ModifyMessageRequest(IDictionary metadata, IDictionary serializedAdditionalRawData) + { + Metadata = metadata; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + public IDictionary Metadata { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/ModifyRunRequest.Serialization.cs b/.dotnet/src/Generated/Models/ModifyRunRequest.Serialization.cs new file mode 100644 index 000000000..75c891191 --- /dev/null +++ b/.dotnet/src/Generated/Models/ModifyRunRequest.Serialization.cs @@ -0,0 +1,157 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class ModifyRunRequest : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ModifyRunRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + if (OptionalProperty.IsCollectionDefined(Metadata)) + { + if (Metadata != null) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartObject(); + foreach (var item in Metadata) + { + writer.WritePropertyName(item.Key); + writer.WriteStringValue(item.Value); + } + writer.WriteEndObject(); + } + else + { + writer.WriteNull("metadata"); + } + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ModifyRunRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ModifyRunRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeModifyRunRequest(document.RootElement, options); + } + + internal static ModifyRunRequest DeserializeModifyRunRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + OptionalProperty> metadata = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + Dictionary dictionary = new Dictionary(); + foreach (var property0 in property.Value.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetString()); + } + metadata = dictionary; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ModifyRunRequest(OptionalProperty.ToDictionary(metadata), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ModifyRunRequest)} does not support '{options.Format}' format."); + } + } + + ModifyRunRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeModifyRunRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ModifyRunRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ModifyRunRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeModifyRunRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ModifyRunRequest.cs b/.dotnet/src/Generated/Models/ModifyRunRequest.cs new file mode 100644 index 000000000..8a8a14588 --- /dev/null +++ b/.dotnet/src/Generated/Models/ModifyRunRequest.cs @@ -0,0 +1,72 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The ModifyRunRequest. + public partial class ModifyRunRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public ModifyRunRequest() + { + Metadata = new OptionalDictionary(); + } + + /// Initializes a new instance of . + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// Keeps track of any properties unknown to the library. + internal ModifyRunRequest(IDictionary metadata, IDictionary serializedAdditionalRawData) + { + Metadata = metadata; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + public IDictionary Metadata { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/ModifyThreadRequest.Serialization.cs b/.dotnet/src/Generated/Models/ModifyThreadRequest.Serialization.cs new file mode 100644 index 000000000..2e8149f8e --- /dev/null +++ b/.dotnet/src/Generated/Models/ModifyThreadRequest.Serialization.cs @@ -0,0 +1,157 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class ModifyThreadRequest : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ModifyThreadRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + if (OptionalProperty.IsCollectionDefined(Metadata)) + { + if (Metadata != null) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartObject(); + foreach (var item in Metadata) + { + writer.WritePropertyName(item.Key); + writer.WriteStringValue(item.Value); + } + writer.WriteEndObject(); + } + else + { + writer.WriteNull("metadata"); + } + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ModifyThreadRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ModifyThreadRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeModifyThreadRequest(document.RootElement, options); + } + + internal static ModifyThreadRequest DeserializeModifyThreadRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + OptionalProperty> metadata = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + Dictionary dictionary = new Dictionary(); + foreach (var property0 in property.Value.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetString()); + } + metadata = dictionary; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ModifyThreadRequest(OptionalProperty.ToDictionary(metadata), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ModifyThreadRequest)} does not support '{options.Format}' format."); + } + } + + ModifyThreadRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeModifyThreadRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ModifyThreadRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ModifyThreadRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeModifyThreadRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ModifyThreadRequest.cs b/.dotnet/src/Generated/Models/ModifyThreadRequest.cs new file mode 100644 index 000000000..635172be4 --- /dev/null +++ b/.dotnet/src/Generated/Models/ModifyThreadRequest.cs @@ -0,0 +1,72 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The ModifyThreadRequest. + public partial class ModifyThreadRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public ModifyThreadRequest() + { + Metadata = new OptionalDictionary(); + } + + /// Initializes a new instance of . + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// Keeps track of any properties unknown to the library. + internal ModifyThreadRequest(IDictionary metadata, IDictionary serializedAdditionalRawData) + { + Metadata = metadata; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + public IDictionary Metadata { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/OpenAIFile.Serialization.cs b/.dotnet/src/Generated/Models/OpenAIFile.Serialization.cs new file mode 100644 index 000000000..53c7116dd --- /dev/null +++ b/.dotnet/src/Generated/Models/OpenAIFile.Serialization.cs @@ -0,0 +1,191 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class OpenAIFile : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(OpenAIFile)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("bytes"u8); + writer.WriteNumberValue(Bytes); + writer.WritePropertyName("created_at"u8); + writer.WriteNumberValue(CreatedAt, "U"); + writer.WritePropertyName("filename"u8); + writer.WriteStringValue(Filename); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("purpose"u8); + writer.WriteStringValue(Purpose.ToString()); + writer.WritePropertyName("status"u8); + writer.WriteStringValue(Status.ToString()); + if (OptionalProperty.IsDefined(StatusDetails)) + { + writer.WritePropertyName("status_details"u8); + writer.WriteStringValue(StatusDetails); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + OpenAIFile IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(OpenAIFile)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeOpenAIFile(document.RootElement, options); + } + + internal static OpenAIFile DeserializeOpenAIFile(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + long bytes = default; + DateTimeOffset createdAt = default; + string filename = default; + OpenAIFileObject @object = default; + OpenAIFilePurpose purpose = default; + OpenAIFileStatus status = default; + OptionalProperty statusDetails = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("bytes"u8)) + { + bytes = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("created_at"u8)) + { + createdAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("filename"u8)) + { + filename = property.Value.GetString(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new OpenAIFileObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("purpose"u8)) + { + purpose = new OpenAIFilePurpose(property.Value.GetString()); + continue; + } + if (property.NameEquals("status"u8)) + { + status = new OpenAIFileStatus(property.Value.GetString()); + continue; + } + if (property.NameEquals("status_details"u8)) + { + statusDetails = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new OpenAIFile(id, bytes, createdAt, filename, @object, purpose, status, statusDetails.Value, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(OpenAIFile)} does not support '{options.Format}' format."); + } + } + + OpenAIFile IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeOpenAIFile(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(OpenAIFile)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static OpenAIFile FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeOpenAIFile(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/OpenAIFile.cs b/.dotnet/src/Generated/Models/OpenAIFile.cs new file mode 100644 index 000000000..7d3adf563 --- /dev/null +++ b/.dotnet/src/Generated/Models/OpenAIFile.cs @@ -0,0 +1,137 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The `File` object represents a document that has been uploaded to OpenAI. + public partial class OpenAIFile + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The file identifier, which can be referenced in the API endpoints. + /// The size of the file, in bytes. + /// The Unix timestamp (in seconds) for when the file was created. + /// The name of the file. + /// + /// The intended purpose of the file. Supported values are `fine-tune`, `fine-tune-results`, + /// `assistants`, and `assistants_output`. + /// + /// + /// Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or + /// `error`. + /// + /// or is null. + internal OpenAIFile(string id, long bytes, DateTimeOffset createdAt, string filename, OpenAIFilePurpose purpose, OpenAIFileStatus status) + { + ClientUtilities.AssertNotNull(id, nameof(id)); + ClientUtilities.AssertNotNull(filename, nameof(filename)); + + Id = id; + Bytes = bytes; + CreatedAt = createdAt; + Filename = filename; + Purpose = purpose; + Status = status; + } + + /// Initializes a new instance of . + /// The file identifier, which can be referenced in the API endpoints. + /// The size of the file, in bytes. + /// The Unix timestamp (in seconds) for when the file was created. + /// The name of the file. + /// The object type, which is always "file". + /// + /// The intended purpose of the file. Supported values are `fine-tune`, `fine-tune-results`, + /// `assistants`, and `assistants_output`. + /// + /// + /// Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or + /// `error`. + /// + /// + /// Deprecated. For details on why a fine-tuning training file failed validation, see the `error` + /// field on `fine_tuning.job`. + /// + /// Keeps track of any properties unknown to the library. + internal OpenAIFile(string id, long bytes, DateTimeOffset createdAt, string filename, OpenAIFileObject @object, OpenAIFilePurpose purpose, OpenAIFileStatus status, string statusDetails, IDictionary serializedAdditionalRawData) + { + Id = id; + Bytes = bytes; + CreatedAt = createdAt; + Filename = filename; + Object = @object; + Purpose = purpose; + Status = status; + StatusDetails = statusDetails; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal OpenAIFile() + { + } + + /// The file identifier, which can be referenced in the API endpoints. + public string Id { get; } + /// The size of the file, in bytes. + public long Bytes { get; } + /// The Unix timestamp (in seconds) for when the file was created. + public DateTimeOffset CreatedAt { get; } + /// The name of the file. + public string Filename { get; } + /// The object type, which is always "file". + public OpenAIFileObject Object { get; } = OpenAIFileObject.File; + + /// + /// The intended purpose of the file. Supported values are `fine-tune`, `fine-tune-results`, + /// `assistants`, and `assistants_output`. + /// + public OpenAIFilePurpose Purpose { get; } + /// + /// Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or + /// `error`. + /// + public OpenAIFileStatus Status { get; } + /// + /// Deprecated. For details on why a fine-tuning training file failed validation, see the `error` + /// field on `fine_tuning.job`. + /// + public string StatusDetails { get; } + } +} diff --git a/.dotnet/src/Generated/Models/OpenAIFileObject.cs b/.dotnet/src/Generated/Models/OpenAIFileObject.cs new file mode 100644 index 000000000..79e03b84c --- /dev/null +++ b/.dotnet/src/Generated/Models/OpenAIFileObject.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The OpenAIFile_object. + public readonly partial struct OpenAIFileObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public OpenAIFileObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string FileValue = "file"; + + /// file. + public static OpenAIFileObject File { get; } = new OpenAIFileObject(FileValue); + /// Determines if two values are the same. + public static bool operator ==(OpenAIFileObject left, OpenAIFileObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(OpenAIFileObject left, OpenAIFileObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator OpenAIFileObject(string value) => new OpenAIFileObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is OpenAIFileObject other && Equals(other); + /// + public bool Equals(OpenAIFileObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/OpenAIFilePurpose.cs b/.dotnet/src/Generated/Models/OpenAIFilePurpose.cs new file mode 100644 index 000000000..0f3c83bc5 --- /dev/null +++ b/.dotnet/src/Generated/Models/OpenAIFilePurpose.cs @@ -0,0 +1,54 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for purpose in OpenAIFile. + public readonly partial struct OpenAIFilePurpose : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public OpenAIFilePurpose(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string FineTuneValue = "fine-tune"; + private const string FineTuneResultsValue = "fine-tune-results"; + private const string AssistantsValue = "assistants"; + private const string AssistantsOutputValue = "assistants_output"; + + /// fine-tune. + public static OpenAIFilePurpose FineTune { get; } = new OpenAIFilePurpose(FineTuneValue); + /// fine-tune-results. + public static OpenAIFilePurpose FineTuneResults { get; } = new OpenAIFilePurpose(FineTuneResultsValue); + /// assistants. + public static OpenAIFilePurpose Assistants { get; } = new OpenAIFilePurpose(AssistantsValue); + /// assistants_output. + public static OpenAIFilePurpose AssistantsOutput { get; } = new OpenAIFilePurpose(AssistantsOutputValue); + /// Determines if two values are the same. + public static bool operator ==(OpenAIFilePurpose left, OpenAIFilePurpose right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(OpenAIFilePurpose left, OpenAIFilePurpose right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator OpenAIFilePurpose(string value) => new OpenAIFilePurpose(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is OpenAIFilePurpose other && Equals(other); + /// + public bool Equals(OpenAIFilePurpose other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/OpenAIFileStatus.cs b/.dotnet/src/Generated/Models/OpenAIFileStatus.cs new file mode 100644 index 000000000..c35731688 --- /dev/null +++ b/.dotnet/src/Generated/Models/OpenAIFileStatus.cs @@ -0,0 +1,51 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for status in OpenAIFile. + public readonly partial struct OpenAIFileStatus : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public OpenAIFileStatus(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string UploadedValue = "uploaded"; + private const string ProcessedValue = "processed"; + private const string ErrorValue = "error"; + + /// uploaded. + public static OpenAIFileStatus Uploaded { get; } = new OpenAIFileStatus(UploadedValue); + /// processed. + public static OpenAIFileStatus Processed { get; } = new OpenAIFileStatus(ProcessedValue); + /// error. + public static OpenAIFileStatus Error { get; } = new OpenAIFileStatus(ErrorValue); + /// Determines if two values are the same. + public static bool operator ==(OpenAIFileStatus left, OpenAIFileStatus right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(OpenAIFileStatus left, OpenAIFileStatus right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator OpenAIFileStatus(string value) => new OpenAIFileStatus(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is OpenAIFileStatus other && Equals(other); + /// + public bool Equals(OpenAIFileStatus other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/RunCompletionUsage.Serialization.cs b/.dotnet/src/Generated/Models/RunCompletionUsage.Serialization.cs new file mode 100644 index 000000000..e857ef3b6 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunCompletionUsage.Serialization.cs @@ -0,0 +1,148 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class RunCompletionUsage : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunCompletionUsage)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("completion_tokens"u8); + writer.WriteNumberValue(CompletionTokens); + writer.WritePropertyName("prompt_tokens"u8); + writer.WriteNumberValue(PromptTokens); + writer.WritePropertyName("total_tokens"u8); + writer.WriteNumberValue(TotalTokens); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + RunCompletionUsage IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunCompletionUsage)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeRunCompletionUsage(document.RootElement, options); + } + + internal static RunCompletionUsage DeserializeRunCompletionUsage(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + long completionTokens = default; + long promptTokens = default; + long totalTokens = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("completion_tokens"u8)) + { + completionTokens = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("prompt_tokens"u8)) + { + promptTokens = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("total_tokens"u8)) + { + totalTokens = property.Value.GetInt64(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new RunCompletionUsage(completionTokens, promptTokens, totalTokens, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(RunCompletionUsage)} does not support '{options.Format}' format."); + } + } + + RunCompletionUsage IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeRunCompletionUsage(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(RunCompletionUsage)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static RunCompletionUsage FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeRunCompletionUsage(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/RunCompletionUsage.cs b/.dotnet/src/Generated/Models/RunCompletionUsage.cs new file mode 100644 index 000000000..be9401604 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunCompletionUsage.cs @@ -0,0 +1,84 @@ +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// + /// Usage statistics related to the run. This value will be `null` if the run is not in a terminal + /// state (i.e. `in_progress`, `queued`, etc.). + /// + public partial class RunCompletionUsage + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// Number of completion tokens used over the course of the run. + /// Number of prompt tokens used over the course of the run. + /// Total number of tokens used (prompt + completion). + internal RunCompletionUsage(long completionTokens, long promptTokens, long totalTokens) + { + CompletionTokens = completionTokens; + PromptTokens = promptTokens; + TotalTokens = totalTokens; + } + + /// Initializes a new instance of . + /// Number of completion tokens used over the course of the run. + /// Number of prompt tokens used over the course of the run. + /// Total number of tokens used (prompt + completion). + /// Keeps track of any properties unknown to the library. + internal RunCompletionUsage(long completionTokens, long promptTokens, long totalTokens, IDictionary serializedAdditionalRawData) + { + CompletionTokens = completionTokens; + PromptTokens = promptTokens; + TotalTokens = totalTokens; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal RunCompletionUsage() + { + } + + /// Number of completion tokens used over the course of the run. + public long CompletionTokens { get; } + /// Number of prompt tokens used over the course of the run. + public long PromptTokens { get; } + /// Total number of tokens used (prompt + completion). + public long TotalTokens { get; } + } +} diff --git a/.dotnet/src/Generated/Models/RunObject.Serialization.cs b/.dotnet/src/Generated/Models/RunObject.Serialization.cs new file mode 100644 index 000000000..7128918dc --- /dev/null +++ b/.dotnet/src/Generated/Models/RunObject.Serialization.cs @@ -0,0 +1,422 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class RunObject : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunObject)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("created_at"u8); + writer.WriteNumberValue(CreatedAt, "U"); + writer.WritePropertyName("thread_id"u8); + writer.WriteStringValue(ThreadId); + writer.WritePropertyName("assistant_id"u8); + writer.WriteStringValue(AssistantId); + writer.WritePropertyName("status"u8); + writer.WriteStringValue(Status.ToString()); + if (RequiredAction != null) + { + writer.WritePropertyName("required_action"u8); + writer.WriteObjectValue(RequiredAction); + } + else + { + writer.WriteNull("required_action"); + } + if (LastError != null) + { + writer.WritePropertyName("last_error"u8); + writer.WriteObjectValue(LastError); + } + else + { + writer.WriteNull("last_error"); + } + writer.WritePropertyName("expires_at"u8); + writer.WriteNumberValue(ExpiresAt, "U"); + if (StartedAt != null) + { + writer.WritePropertyName("started_at"u8); + writer.WriteStringValue(StartedAt.Value, "O"); + } + else + { + writer.WriteNull("started_at"); + } + if (CancelledAt != null) + { + writer.WritePropertyName("cancelled_at"u8); + writer.WriteStringValue(CancelledAt.Value, "O"); + } + else + { + writer.WriteNull("cancelled_at"); + } + if (FailedAt != null) + { + writer.WritePropertyName("failed_at"u8); + writer.WriteStringValue(FailedAt.Value, "O"); + } + else + { + writer.WriteNull("failed_at"); + } + if (CompletedAt != null) + { + writer.WritePropertyName("completed_at"u8); + writer.WriteStringValue(CompletedAt.Value, "O"); + } + else + { + writer.WriteNull("completed_at"); + } + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model); + writer.WritePropertyName("instructions"u8); + writer.WriteStringValue(Instructions); + writer.WritePropertyName("tools"u8); + writer.WriteStartArray(); + foreach (var item in Tools) + { + if (item == null) + { + writer.WriteNullValue(); + continue; + } +#if NET6_0_OR_GREATER + writer.WriteRawValue(item); +#else + using (JsonDocument document = JsonDocument.Parse(item)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + writer.WriteEndArray(); + writer.WritePropertyName("file_ids"u8); + writer.WriteStartArray(); + foreach (var item in FileIds) + { + writer.WriteStringValue(item); + } + writer.WriteEndArray(); + if (Metadata != null && OptionalProperty.IsCollectionDefined(Metadata)) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartObject(); + foreach (var item in Metadata) + { + writer.WritePropertyName(item.Key); + writer.WriteStringValue(item.Value); + } + writer.WriteEndObject(); + } + else + { + writer.WriteNull("metadata"); + } + if (Usage != null) + { + writer.WritePropertyName("usage"u8); + writer.WriteObjectValue(Usage); + } + else + { + writer.WriteNull("usage"); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + RunObject IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunObject)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeRunObject(document.RootElement, options); + } + + internal static RunObject DeserializeRunObject(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + RunObjectObject @object = default; + DateTimeOffset createdAt = default; + string threadId = default; + string assistantId = default; + RunObjectStatus status = default; + RunObjectRequiredAction requiredAction = default; + RunObjectLastError lastError = default; + DateTimeOffset expiresAt = default; + DateTimeOffset? startedAt = default; + DateTimeOffset? cancelledAt = default; + DateTimeOffset? failedAt = default; + DateTimeOffset? completedAt = default; + string model = default; + string instructions = default; + IReadOnlyList tools = default; + IReadOnlyList fileIds = default; + IReadOnlyDictionary metadata = default; + RunCompletionUsage usage = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new RunObjectObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("created_at"u8)) + { + createdAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("thread_id"u8)) + { + threadId = property.Value.GetString(); + continue; + } + if (property.NameEquals("assistant_id"u8)) + { + assistantId = property.Value.GetString(); + continue; + } + if (property.NameEquals("status"u8)) + { + status = new RunObjectStatus(property.Value.GetString()); + continue; + } + if (property.NameEquals("required_action"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + requiredAction = null; + continue; + } + requiredAction = RunObjectRequiredAction.DeserializeRunObjectRequiredAction(property.Value); + continue; + } + if (property.NameEquals("last_error"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + lastError = null; + continue; + } + lastError = RunObjectLastError.DeserializeRunObjectLastError(property.Value); + continue; + } + if (property.NameEquals("expires_at"u8)) + { + expiresAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("started_at"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + startedAt = null; + continue; + } + startedAt = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("cancelled_at"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + cancelledAt = null; + continue; + } + cancelledAt = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("failed_at"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + failedAt = null; + continue; + } + failedAt = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("completed_at"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + completedAt = null; + continue; + } + completedAt = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("model"u8)) + { + model = property.Value.GetString(); + continue; + } + if (property.NameEquals("instructions"u8)) + { + instructions = property.Value.GetString(); + continue; + } + if (property.NameEquals("tools"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + if (item.ValueKind == JsonValueKind.Null) + { + array.Add(null); + } + else + { + array.Add(BinaryData.FromString(item.GetRawText())); + } + } + tools = array; + continue; + } + if (property.NameEquals("file_ids"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(item.GetString()); + } + fileIds = array; + continue; + } + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + metadata = new OptionalDictionary(); + continue; + } + Dictionary dictionary = new Dictionary(); + foreach (var property0 in property.Value.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetString()); + } + metadata = dictionary; + continue; + } + if (property.NameEquals("usage"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + usage = null; + continue; + } + usage = RunCompletionUsage.DeserializeRunCompletionUsage(property.Value); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new RunObject(id, @object, createdAt, threadId, assistantId, status, requiredAction, lastError, expiresAt, startedAt, cancelledAt, failedAt, completedAt, model, instructions, tools, fileIds, metadata, usage, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(RunObject)} does not support '{options.Format}' format."); + } + } + + RunObject IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeRunObject(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(RunObject)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static RunObject FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeRunObject(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/RunObject.cs b/.dotnet/src/Generated/Models/RunObject.cs new file mode 100644 index 000000000..8222d1eec --- /dev/null +++ b/.dotnet/src/Generated/Models/RunObject.cs @@ -0,0 +1,264 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI.Models +{ + /// Represents an execution run on a [thread](/docs/api-reference/threads). + public partial class RunObject + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The identifier, which can be referenced in API endpoints. + /// The Unix timestamp (in seconds) for when the run was created. + /// + /// The ID of the [thread](/docs/api-reference/threads) that was executed on as a part of this + /// run. + /// + /// The ID of the [assistant](/docs/api-reference/assistants) used for execution of this run. + /// + /// The status of the run, which can be either `queued`, `in_progress`, `requires_action`, + /// `cancelling`, `cancelled`, `failed`, `completed`, or `expired`. + /// + /// + /// Details on the action required to continue the run. Will be `null` if no action is + /// required. + /// + /// The last error associated with this run. Will be `null` if there are no errors. + /// The Unix timestamp (in seconds) for when the run will expire. + /// The Unix timestamp (in seconds) for when the run was started. + /// The Unix timestamp (in seconds) for when the run was cancelled. + /// The Unix timestamp (in seconds) for when the run failed. + /// The Unix timestamp (in seconds) for when the run was completed. + /// The model that the [assistant](/docs/api-reference/assistants) used for this run. + /// The instructions that the [assistant](/docs/api-reference/assistants) used for this run. + /// The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. + /// + /// The list of [File](/docs/api-reference/files) IDs the + /// [assistant](/docs/api-reference/assistants) used for this run. + /// + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// + /// , , , , , or is null. + internal RunObject(string id, DateTimeOffset createdAt, string threadId, string assistantId, RunObjectStatus status, RunObjectRequiredAction requiredAction, RunObjectLastError lastError, DateTimeOffset expiresAt, DateTimeOffset? startedAt, DateTimeOffset? cancelledAt, DateTimeOffset? failedAt, DateTimeOffset? completedAt, string model, string instructions, IEnumerable tools, IEnumerable fileIds, IReadOnlyDictionary metadata, RunCompletionUsage usage) + { + ClientUtilities.AssertNotNull(id, nameof(id)); + ClientUtilities.AssertNotNull(threadId, nameof(threadId)); + ClientUtilities.AssertNotNull(assistantId, nameof(assistantId)); + ClientUtilities.AssertNotNull(model, nameof(model)); + ClientUtilities.AssertNotNull(instructions, nameof(instructions)); + ClientUtilities.AssertNotNull(tools, nameof(tools)); + ClientUtilities.AssertNotNull(fileIds, nameof(fileIds)); + + Id = id; + CreatedAt = createdAt; + ThreadId = threadId; + AssistantId = assistantId; + Status = status; + RequiredAction = requiredAction; + LastError = lastError; + ExpiresAt = expiresAt; + StartedAt = startedAt; + CancelledAt = cancelledAt; + FailedAt = failedAt; + CompletedAt = completedAt; + Model = model; + Instructions = instructions; + Tools = tools.ToList(); + FileIds = fileIds.ToList(); + Metadata = metadata; + Usage = usage; + } + + /// Initializes a new instance of . + /// The identifier, which can be referenced in API endpoints. + /// The object type, which is always `thread.run`. + /// The Unix timestamp (in seconds) for when the run was created. + /// + /// The ID of the [thread](/docs/api-reference/threads) that was executed on as a part of this + /// run. + /// + /// The ID of the [assistant](/docs/api-reference/assistants) used for execution of this run. + /// + /// The status of the run, which can be either `queued`, `in_progress`, `requires_action`, + /// `cancelling`, `cancelled`, `failed`, `completed`, or `expired`. + /// + /// + /// Details on the action required to continue the run. Will be `null` if no action is + /// required. + /// + /// The last error associated with this run. Will be `null` if there are no errors. + /// The Unix timestamp (in seconds) for when the run will expire. + /// The Unix timestamp (in seconds) for when the run was started. + /// The Unix timestamp (in seconds) for when the run was cancelled. + /// The Unix timestamp (in seconds) for when the run failed. + /// The Unix timestamp (in seconds) for when the run was completed. + /// The model that the [assistant](/docs/api-reference/assistants) used for this run. + /// The instructions that the [assistant](/docs/api-reference/assistants) used for this run. + /// The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. + /// + /// The list of [File](/docs/api-reference/files) IDs the + /// [assistant](/docs/api-reference/assistants) used for this run. + /// + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// + /// Keeps track of any properties unknown to the library. + internal RunObject(string id, RunObjectObject @object, DateTimeOffset createdAt, string threadId, string assistantId, RunObjectStatus status, RunObjectRequiredAction requiredAction, RunObjectLastError lastError, DateTimeOffset expiresAt, DateTimeOffset? startedAt, DateTimeOffset? cancelledAt, DateTimeOffset? failedAt, DateTimeOffset? completedAt, string model, string instructions, IReadOnlyList tools, IReadOnlyList fileIds, IReadOnlyDictionary metadata, RunCompletionUsage usage, IDictionary serializedAdditionalRawData) + { + Id = id; + Object = @object; + CreatedAt = createdAt; + ThreadId = threadId; + AssistantId = assistantId; + Status = status; + RequiredAction = requiredAction; + LastError = lastError; + ExpiresAt = expiresAt; + StartedAt = startedAt; + CancelledAt = cancelledAt; + FailedAt = failedAt; + CompletedAt = completedAt; + Model = model; + Instructions = instructions; + Tools = tools; + FileIds = fileIds; + Metadata = metadata; + Usage = usage; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal RunObject() + { + } + + /// The identifier, which can be referenced in API endpoints. + public string Id { get; } + /// The object type, which is always `thread.run`. + public RunObjectObject Object { get; } = RunObjectObject.ThreadRun; + + /// The Unix timestamp (in seconds) for when the run was created. + public DateTimeOffset CreatedAt { get; } + /// + /// The ID of the [thread](/docs/api-reference/threads) that was executed on as a part of this + /// run. + /// + public string ThreadId { get; } + /// The ID of the [assistant](/docs/api-reference/assistants) used for execution of this run. + public string AssistantId { get; } + /// + /// The status of the run, which can be either `queued`, `in_progress`, `requires_action`, + /// `cancelling`, `cancelled`, `failed`, `completed`, or `expired`. + /// + public RunObjectStatus Status { get; } + /// + /// Details on the action required to continue the run. Will be `null` if no action is + /// required. + /// + public RunObjectRequiredAction RequiredAction { get; } + /// The last error associated with this run. Will be `null` if there are no errors. + public RunObjectLastError LastError { get; } + /// The Unix timestamp (in seconds) for when the run will expire. + public DateTimeOffset ExpiresAt { get; } + /// The Unix timestamp (in seconds) for when the run was started. + public DateTimeOffset? StartedAt { get; } + /// The Unix timestamp (in seconds) for when the run was cancelled. + public DateTimeOffset? CancelledAt { get; } + /// The Unix timestamp (in seconds) for when the run failed. + public DateTimeOffset? FailedAt { get; } + /// The Unix timestamp (in seconds) for when the run was completed. + public DateTimeOffset? CompletedAt { get; } + /// The model that the [assistant](/docs/api-reference/assistants) used for this run. + public string Model { get; } + /// The instructions that the [assistant](/docs/api-reference/assistants) used for this run. + public string Instructions { get; } + /// + /// The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. + /// + /// To assign an object to the element of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public IReadOnlyList Tools { get; } + /// + /// The list of [File](/docs/api-reference/files) IDs the + /// [assistant](/docs/api-reference/assistants) used for this run. + /// + public IReadOnlyList FileIds { get; } + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + public IReadOnlyDictionary Metadata { get; } + /// Gets the usage. + public RunCompletionUsage Usage { get; } + } +} diff --git a/.dotnet/src/Generated/Models/RunObjectLastError.Serialization.cs b/.dotnet/src/Generated/Models/RunObjectLastError.Serialization.cs new file mode 100644 index 000000000..f40c2aa0a --- /dev/null +++ b/.dotnet/src/Generated/Models/RunObjectLastError.Serialization.cs @@ -0,0 +1,140 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class RunObjectLastError : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunObjectLastError)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("code"u8); + writer.WriteStringValue(Code.ToString()); + writer.WritePropertyName("message"u8); + writer.WriteStringValue(Message); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + RunObjectLastError IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunObjectLastError)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeRunObjectLastError(document.RootElement, options); + } + + internal static RunObjectLastError DeserializeRunObjectLastError(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + RunObjectLastErrorCode code = default; + string message = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("code"u8)) + { + code = new RunObjectLastErrorCode(property.Value.GetString()); + continue; + } + if (property.NameEquals("message"u8)) + { + message = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new RunObjectLastError(code, message, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(RunObjectLastError)} does not support '{options.Format}' format."); + } + } + + RunObjectLastError IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeRunObjectLastError(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(RunObjectLastError)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static RunObjectLastError FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeRunObjectLastError(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/RunObjectLastError.cs b/.dotnet/src/Generated/Models/RunObjectLastError.cs new file mode 100644 index 000000000..f2e26a107 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunObjectLastError.cs @@ -0,0 +1,79 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The RunObjectLastError. + public partial class RunObjectLastError + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// One of `server_error` or `rate_limit_exceeded`. + /// A human-readable description of the error. + /// is null. + internal RunObjectLastError(RunObjectLastErrorCode code, string message) + { + ClientUtilities.AssertNotNull(message, nameof(message)); + + Code = code; + Message = message; + } + + /// Initializes a new instance of . + /// One of `server_error` or `rate_limit_exceeded`. + /// A human-readable description of the error. + /// Keeps track of any properties unknown to the library. + internal RunObjectLastError(RunObjectLastErrorCode code, string message, IDictionary serializedAdditionalRawData) + { + Code = code; + Message = message; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal RunObjectLastError() + { + } + + /// One of `server_error` or `rate_limit_exceeded`. + public RunObjectLastErrorCode Code { get; } + /// A human-readable description of the error. + public string Message { get; } + } +} diff --git a/.dotnet/src/Generated/Models/RunObjectLastErrorCode.cs b/.dotnet/src/Generated/Models/RunObjectLastErrorCode.cs new file mode 100644 index 000000000..abcb19758 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunObjectLastErrorCode.cs @@ -0,0 +1,48 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for code in RunObjectLastError. + public readonly partial struct RunObjectLastErrorCode : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public RunObjectLastErrorCode(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ServerErrorValue = "server_error"; + private const string RateLimitExceededValue = "rate_limit_exceeded"; + + /// server_error. + public static RunObjectLastErrorCode ServerError { get; } = new RunObjectLastErrorCode(ServerErrorValue); + /// rate_limit_exceeded. + public static RunObjectLastErrorCode RateLimitExceeded { get; } = new RunObjectLastErrorCode(RateLimitExceededValue); + /// Determines if two values are the same. + public static bool operator ==(RunObjectLastErrorCode left, RunObjectLastErrorCode right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(RunObjectLastErrorCode left, RunObjectLastErrorCode right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator RunObjectLastErrorCode(string value) => new RunObjectLastErrorCode(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is RunObjectLastErrorCode other && Equals(other); + /// + public bool Equals(RunObjectLastErrorCode other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/RunObjectObject.cs b/.dotnet/src/Generated/Models/RunObjectObject.cs new file mode 100644 index 000000000..c9b970b98 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunObjectObject.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The RunObject_object. + public readonly partial struct RunObjectObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public RunObjectObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ThreadRunValue = "thread.run"; + + /// thread.run. + public static RunObjectObject ThreadRun { get; } = new RunObjectObject(ThreadRunValue); + /// Determines if two values are the same. + public static bool operator ==(RunObjectObject left, RunObjectObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(RunObjectObject left, RunObjectObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator RunObjectObject(string value) => new RunObjectObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is RunObjectObject other && Equals(other); + /// + public bool Equals(RunObjectObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/RunObjectRequiredAction.Serialization.cs b/.dotnet/src/Generated/Models/RunObjectRequiredAction.Serialization.cs new file mode 100644 index 000000000..8b354c03e --- /dev/null +++ b/.dotnet/src/Generated/Models/RunObjectRequiredAction.Serialization.cs @@ -0,0 +1,140 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class RunObjectRequiredAction : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunObjectRequiredAction)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("type"u8); + writer.WriteStringValue(Type.ToString()); + writer.WritePropertyName("submit_tool_outputs"u8); + writer.WriteObjectValue(SubmitToolOutputs); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + RunObjectRequiredAction IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunObjectRequiredAction)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeRunObjectRequiredAction(document.RootElement, options); + } + + internal static RunObjectRequiredAction DeserializeRunObjectRequiredAction(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + RunObjectRequiredActionType type = default; + RunObjectRequiredActionSubmitToolOutputs submitToolOutputs = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("type"u8)) + { + type = new RunObjectRequiredActionType(property.Value.GetString()); + continue; + } + if (property.NameEquals("submit_tool_outputs"u8)) + { + submitToolOutputs = RunObjectRequiredActionSubmitToolOutputs.DeserializeRunObjectRequiredActionSubmitToolOutputs(property.Value); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new RunObjectRequiredAction(type, submitToolOutputs, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(RunObjectRequiredAction)} does not support '{options.Format}' format."); + } + } + + RunObjectRequiredAction IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeRunObjectRequiredAction(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(RunObjectRequiredAction)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static RunObjectRequiredAction FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeRunObjectRequiredAction(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/RunObjectRequiredAction.cs b/.dotnet/src/Generated/Models/RunObjectRequiredAction.cs new file mode 100644 index 000000000..fa11171c8 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunObjectRequiredAction.cs @@ -0,0 +1,78 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The RunObjectRequiredAction. + public partial class RunObjectRequiredAction + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// Details on the tool outputs needed for this run to continue. + /// is null. + internal RunObjectRequiredAction(RunObjectRequiredActionSubmitToolOutputs submitToolOutputs) + { + ClientUtilities.AssertNotNull(submitToolOutputs, nameof(submitToolOutputs)); + + SubmitToolOutputs = submitToolOutputs; + } + + /// Initializes a new instance of . + /// For now, this is always `submit_tool_outputs`. + /// Details on the tool outputs needed for this run to continue. + /// Keeps track of any properties unknown to the library. + internal RunObjectRequiredAction(RunObjectRequiredActionType type, RunObjectRequiredActionSubmitToolOutputs submitToolOutputs, IDictionary serializedAdditionalRawData) + { + Type = type; + SubmitToolOutputs = submitToolOutputs; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal RunObjectRequiredAction() + { + } + + /// For now, this is always `submit_tool_outputs`. + public RunObjectRequiredActionType Type { get; } = RunObjectRequiredActionType.SubmitToolOutputs; + + /// Details on the tool outputs needed for this run to continue. + public RunObjectRequiredActionSubmitToolOutputs SubmitToolOutputs { get; } + } +} diff --git a/.dotnet/src/Generated/Models/RunObjectRequiredActionSubmitToolOutputs.Serialization.cs b/.dotnet/src/Generated/Models/RunObjectRequiredActionSubmitToolOutputs.Serialization.cs new file mode 100644 index 000000000..6f7c1b4e5 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunObjectRequiredActionSubmitToolOutputs.Serialization.cs @@ -0,0 +1,142 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class RunObjectRequiredActionSubmitToolOutputs : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunObjectRequiredActionSubmitToolOutputs)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("tool_calls"u8); + writer.WriteStartArray(); + foreach (var item in ToolCalls) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + RunObjectRequiredActionSubmitToolOutputs IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunObjectRequiredActionSubmitToolOutputs)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeRunObjectRequiredActionSubmitToolOutputs(document.RootElement, options); + } + + internal static RunObjectRequiredActionSubmitToolOutputs DeserializeRunObjectRequiredActionSubmitToolOutputs(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + IReadOnlyList toolCalls = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("tool_calls"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(RunToolCallObject.DeserializeRunToolCallObject(item)); + } + toolCalls = array; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new RunObjectRequiredActionSubmitToolOutputs(toolCalls, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(RunObjectRequiredActionSubmitToolOutputs)} does not support '{options.Format}' format."); + } + } + + RunObjectRequiredActionSubmitToolOutputs IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeRunObjectRequiredActionSubmitToolOutputs(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(RunObjectRequiredActionSubmitToolOutputs)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static RunObjectRequiredActionSubmitToolOutputs FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeRunObjectRequiredActionSubmitToolOutputs(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/RunObjectRequiredActionSubmitToolOutputs.cs b/.dotnet/src/Generated/Models/RunObjectRequiredActionSubmitToolOutputs.cs new file mode 100644 index 000000000..e9f7b6a58 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunObjectRequiredActionSubmitToolOutputs.cs @@ -0,0 +1,74 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI.Models +{ + /// The RunObjectRequiredActionSubmitToolOutputs. + public partial class RunObjectRequiredActionSubmitToolOutputs + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// A list of the relevant tool calls. + /// is null. + internal RunObjectRequiredActionSubmitToolOutputs(IEnumerable toolCalls) + { + ClientUtilities.AssertNotNull(toolCalls, nameof(toolCalls)); + + ToolCalls = toolCalls.ToList(); + } + + /// Initializes a new instance of . + /// A list of the relevant tool calls. + /// Keeps track of any properties unknown to the library. + internal RunObjectRequiredActionSubmitToolOutputs(IReadOnlyList toolCalls, IDictionary serializedAdditionalRawData) + { + ToolCalls = toolCalls; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal RunObjectRequiredActionSubmitToolOutputs() + { + } + + /// A list of the relevant tool calls. + public IReadOnlyList ToolCalls { get; } + } +} diff --git a/.dotnet/src/Generated/Models/RunObjectRequiredActionType.cs b/.dotnet/src/Generated/Models/RunObjectRequiredActionType.cs new file mode 100644 index 000000000..eb11f17ac --- /dev/null +++ b/.dotnet/src/Generated/Models/RunObjectRequiredActionType.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The RunObjectRequiredAction_type. + public readonly partial struct RunObjectRequiredActionType : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public RunObjectRequiredActionType(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string SubmitToolOutputsValue = "submit_tool_outputs"; + + /// submit_tool_outputs. + public static RunObjectRequiredActionType SubmitToolOutputs { get; } = new RunObjectRequiredActionType(SubmitToolOutputsValue); + /// Determines if two values are the same. + public static bool operator ==(RunObjectRequiredActionType left, RunObjectRequiredActionType right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(RunObjectRequiredActionType left, RunObjectRequiredActionType right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator RunObjectRequiredActionType(string value) => new RunObjectRequiredActionType(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is RunObjectRequiredActionType other && Equals(other); + /// + public bool Equals(RunObjectRequiredActionType other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/RunObjectStatus.cs b/.dotnet/src/Generated/Models/RunObjectStatus.cs new file mode 100644 index 000000000..52f7c1603 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunObjectStatus.cs @@ -0,0 +1,66 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for status in RunObject. + public readonly partial struct RunObjectStatus : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public RunObjectStatus(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string QueuedValue = "queued"; + private const string InProgressValue = "in_progress"; + private const string RequiresActionValue = "requires_action"; + private const string CancellingValue = "cancelling"; + private const string CancelledValue = "cancelled"; + private const string FailedValue = "failed"; + private const string CompletedValue = "completed"; + private const string ExpiredValue = "expired"; + + /// queued. + public static RunObjectStatus Queued { get; } = new RunObjectStatus(QueuedValue); + /// in_progress. + public static RunObjectStatus InProgress { get; } = new RunObjectStatus(InProgressValue); + /// requires_action. + public static RunObjectStatus RequiresAction { get; } = new RunObjectStatus(RequiresActionValue); + /// cancelling. + public static RunObjectStatus Cancelling { get; } = new RunObjectStatus(CancellingValue); + /// cancelled. + public static RunObjectStatus Cancelled { get; } = new RunObjectStatus(CancelledValue); + /// failed. + public static RunObjectStatus Failed { get; } = new RunObjectStatus(FailedValue); + /// completed. + public static RunObjectStatus Completed { get; } = new RunObjectStatus(CompletedValue); + /// expired. + public static RunObjectStatus Expired { get; } = new RunObjectStatus(ExpiredValue); + /// Determines if two values are the same. + public static bool operator ==(RunObjectStatus left, RunObjectStatus right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(RunObjectStatus left, RunObjectStatus right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator RunObjectStatus(string value) => new RunObjectStatus(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is RunObjectStatus other && Equals(other); + /// + public bool Equals(RunObjectStatus other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObject.Serialization.cs b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObject.Serialization.cs new file mode 100644 index 000000000..9bfd95a67 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObject.Serialization.cs @@ -0,0 +1,140 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + internal partial class RunStepDetailsMessageCreationObject : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunStepDetailsMessageCreationObject)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("type"u8); + writer.WriteStringValue(Type.ToString()); + writer.WritePropertyName("message_creation"u8); + writer.WriteObjectValue(MessageCreation); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + RunStepDetailsMessageCreationObject IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunStepDetailsMessageCreationObject)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeRunStepDetailsMessageCreationObject(document.RootElement, options); + } + + internal static RunStepDetailsMessageCreationObject DeserializeRunStepDetailsMessageCreationObject(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + RunStepDetailsMessageCreationObjectType type = default; + RunStepDetailsMessageCreationObjectMessageCreation messageCreation = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("type"u8)) + { + type = new RunStepDetailsMessageCreationObjectType(property.Value.GetString()); + continue; + } + if (property.NameEquals("message_creation"u8)) + { + messageCreation = RunStepDetailsMessageCreationObjectMessageCreation.DeserializeRunStepDetailsMessageCreationObjectMessageCreation(property.Value); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new RunStepDetailsMessageCreationObject(type, messageCreation, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(RunStepDetailsMessageCreationObject)} does not support '{options.Format}' format."); + } + } + + RunStepDetailsMessageCreationObject IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeRunStepDetailsMessageCreationObject(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(RunStepDetailsMessageCreationObject)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static RunStepDetailsMessageCreationObject FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeRunStepDetailsMessageCreationObject(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObject.cs b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObject.cs new file mode 100644 index 000000000..d0db44dad --- /dev/null +++ b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObject.cs @@ -0,0 +1,78 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// Details of the message creation by the run step. + internal partial class RunStepDetailsMessageCreationObject + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// is null. + internal RunStepDetailsMessageCreationObject(RunStepDetailsMessageCreationObjectMessageCreation messageCreation) + { + ClientUtilities.AssertNotNull(messageCreation, nameof(messageCreation)); + + MessageCreation = messageCreation; + } + + /// Initializes a new instance of . + /// Details of the message creation by the run step. + /// + /// Keeps track of any properties unknown to the library. + internal RunStepDetailsMessageCreationObject(RunStepDetailsMessageCreationObjectType type, RunStepDetailsMessageCreationObjectMessageCreation messageCreation, IDictionary serializedAdditionalRawData) + { + Type = type; + MessageCreation = messageCreation; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal RunStepDetailsMessageCreationObject() + { + } + + /// Details of the message creation by the run step. + public RunStepDetailsMessageCreationObjectType Type { get; } = RunStepDetailsMessageCreationObjectType.MessageCreation; + + /// Gets the message creation. + public RunStepDetailsMessageCreationObjectMessageCreation MessageCreation { get; } + } +} diff --git a/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectMessageCreation.Serialization.cs b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectMessageCreation.Serialization.cs new file mode 100644 index 000000000..aadaece5e --- /dev/null +++ b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectMessageCreation.Serialization.cs @@ -0,0 +1,132 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + internal partial class RunStepDetailsMessageCreationObjectMessageCreation : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunStepDetailsMessageCreationObjectMessageCreation)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("message_id"u8); + writer.WriteStringValue(MessageId); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + RunStepDetailsMessageCreationObjectMessageCreation IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunStepDetailsMessageCreationObjectMessageCreation)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeRunStepDetailsMessageCreationObjectMessageCreation(document.RootElement, options); + } + + internal static RunStepDetailsMessageCreationObjectMessageCreation DeserializeRunStepDetailsMessageCreationObjectMessageCreation(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string messageId = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("message_id"u8)) + { + messageId = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new RunStepDetailsMessageCreationObjectMessageCreation(messageId, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(RunStepDetailsMessageCreationObjectMessageCreation)} does not support '{options.Format}' format."); + } + } + + RunStepDetailsMessageCreationObjectMessageCreation IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeRunStepDetailsMessageCreationObjectMessageCreation(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(RunStepDetailsMessageCreationObjectMessageCreation)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static RunStepDetailsMessageCreationObjectMessageCreation FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeRunStepDetailsMessageCreationObjectMessageCreation(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectMessageCreation.cs b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectMessageCreation.cs new file mode 100644 index 000000000..3163ffe5c --- /dev/null +++ b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectMessageCreation.cs @@ -0,0 +1,73 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The RunStepDetailsMessageCreationObjectMessageCreation. + internal partial class RunStepDetailsMessageCreationObjectMessageCreation + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The ID of the message that was created by this run step. + /// is null. + internal RunStepDetailsMessageCreationObjectMessageCreation(string messageId) + { + ClientUtilities.AssertNotNull(messageId, nameof(messageId)); + + MessageId = messageId; + } + + /// Initializes a new instance of . + /// The ID of the message that was created by this run step. + /// Keeps track of any properties unknown to the library. + internal RunStepDetailsMessageCreationObjectMessageCreation(string messageId, IDictionary serializedAdditionalRawData) + { + MessageId = messageId; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal RunStepDetailsMessageCreationObjectMessageCreation() + { + } + + /// The ID of the message that was created by this run step. + public string MessageId { get; } + } +} diff --git a/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectType.cs b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectType.cs new file mode 100644 index 000000000..007fd9eed --- /dev/null +++ b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectType.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The RunStepDetailsMessageCreationObject_type. + internal readonly partial struct RunStepDetailsMessageCreationObjectType : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public RunStepDetailsMessageCreationObjectType(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string MessageCreationValue = "message_creation"; + + /// message_creation. + public static RunStepDetailsMessageCreationObjectType MessageCreation { get; } = new RunStepDetailsMessageCreationObjectType(MessageCreationValue); + /// Determines if two values are the same. + public static bool operator ==(RunStepDetailsMessageCreationObjectType left, RunStepDetailsMessageCreationObjectType right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(RunStepDetailsMessageCreationObjectType left, RunStepDetailsMessageCreationObjectType right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator RunStepDetailsMessageCreationObjectType(string value) => new RunStepDetailsMessageCreationObjectType(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is RunStepDetailsMessageCreationObjectType other && Equals(other); + /// + public bool Equals(RunStepDetailsMessageCreationObjectType other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObject.Serialization.cs b/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObject.Serialization.cs new file mode 100644 index 000000000..5fa47d28b --- /dev/null +++ b/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObject.Serialization.cs @@ -0,0 +1,169 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + internal partial class RunStepDetailsToolCallsObject : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunStepDetailsToolCallsObject)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("type"u8); + writer.WriteStringValue(Type.ToString()); + writer.WritePropertyName("tool_calls"u8); + writer.WriteStartArray(); + foreach (var item in ToolCalls) + { + if (item == null) + { + writer.WriteNullValue(); + continue; + } +#if NET6_0_OR_GREATER + writer.WriteRawValue(item); +#else + using (JsonDocument document = JsonDocument.Parse(item)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + writer.WriteEndArray(); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + RunStepDetailsToolCallsObject IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunStepDetailsToolCallsObject)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeRunStepDetailsToolCallsObject(document.RootElement, options); + } + + internal static RunStepDetailsToolCallsObject DeserializeRunStepDetailsToolCallsObject(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + RunStepDetailsToolCallsObjectType type = default; + IReadOnlyList toolCalls = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("type"u8)) + { + type = new RunStepDetailsToolCallsObjectType(property.Value.GetString()); + continue; + } + if (property.NameEquals("tool_calls"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + if (item.ValueKind == JsonValueKind.Null) + { + array.Add(null); + } + else + { + array.Add(BinaryData.FromString(item.GetRawText())); + } + } + toolCalls = array; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new RunStepDetailsToolCallsObject(type, toolCalls, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(RunStepDetailsToolCallsObject)} does not support '{options.Format}' format."); + } + } + + RunStepDetailsToolCallsObject IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeRunStepDetailsToolCallsObject(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(RunStepDetailsToolCallsObject)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static RunStepDetailsToolCallsObject FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeRunStepDetailsToolCallsObject(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObject.cs b/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObject.cs new file mode 100644 index 000000000..76cdc1d4c --- /dev/null +++ b/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObject.cs @@ -0,0 +1,115 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI.Models +{ + /// Details of the tool call. + internal partial class RunStepDetailsToolCallsObject + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// An array of tool calls the run step was involved in. These can be associated with one of three + /// types of tools: `code_interpreter`, `retrieval`, or `function`. + /// + /// is null. + internal RunStepDetailsToolCallsObject(IEnumerable toolCalls) + { + ClientUtilities.AssertNotNull(toolCalls, nameof(toolCalls)); + + ToolCalls = toolCalls.ToList(); + } + + /// Initializes a new instance of . + /// Always `tool_calls`. + /// + /// An array of tool calls the run step was involved in. These can be associated with one of three + /// types of tools: `code_interpreter`, `retrieval`, or `function`. + /// + /// Keeps track of any properties unknown to the library. + internal RunStepDetailsToolCallsObject(RunStepDetailsToolCallsObjectType type, IReadOnlyList toolCalls, IDictionary serializedAdditionalRawData) + { + Type = type; + ToolCalls = toolCalls; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal RunStepDetailsToolCallsObject() + { + } + + /// Always `tool_calls`. + public RunStepDetailsToolCallsObjectType Type { get; } = RunStepDetailsToolCallsObjectType.ToolCalls; + + /// + /// An array of tool calls the run step was involved in. These can be associated with one of three + /// types of tools: `code_interpreter`, `retrieval`, or `function`. + /// + /// To assign an object to the element of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public IReadOnlyList ToolCalls { get; } + } +} diff --git a/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObjectType.cs b/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObjectType.cs new file mode 100644 index 000000000..56b168ac3 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObjectType.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The RunStepDetailsToolCallsObject_type. + internal readonly partial struct RunStepDetailsToolCallsObjectType : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public RunStepDetailsToolCallsObjectType(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ToolCallsValue = "tool_calls"; + + /// tool_calls. + public static RunStepDetailsToolCallsObjectType ToolCalls { get; } = new RunStepDetailsToolCallsObjectType(ToolCallsValue); + /// Determines if two values are the same. + public static bool operator ==(RunStepDetailsToolCallsObjectType left, RunStepDetailsToolCallsObjectType right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(RunStepDetailsToolCallsObjectType left, RunStepDetailsToolCallsObjectType right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator RunStepDetailsToolCallsObjectType(string value) => new RunStepDetailsToolCallsObjectType(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is RunStepDetailsToolCallsObjectType other && Equals(other); + /// + public bool Equals(RunStepDetailsToolCallsObjectType other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/RunStepObject.Serialization.cs b/.dotnet/src/Generated/Models/RunStepObject.Serialization.cs new file mode 100644 index 000000000..74f027391 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunStepObject.Serialization.cs @@ -0,0 +1,354 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class RunStepObject : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunStepObject)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("created_at"u8); + writer.WriteNumberValue(CreatedAt, "U"); + writer.WritePropertyName("assistant_id"u8); + writer.WriteStringValue(AssistantId); + writer.WritePropertyName("thread_id"u8); + writer.WriteStringValue(ThreadId); + writer.WritePropertyName("run_id"u8); + writer.WriteStringValue(RunId); + writer.WritePropertyName("type"u8); + writer.WriteStringValue(Type.ToString()); + writer.WritePropertyName("status"u8); + writer.WriteStringValue(Status.ToString()); + writer.WritePropertyName("step_details"u8); +#if NET6_0_OR_GREATER + writer.WriteRawValue(StepDetails); +#else + using (JsonDocument document = JsonDocument.Parse(StepDetails)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + if (LastError != null) + { + writer.WritePropertyName("last_error"u8); + writer.WriteObjectValue(LastError); + } + else + { + writer.WriteNull("last_error"); + } + if (ExpiresAt != null) + { + writer.WritePropertyName("expires_at"u8); + writer.WriteStringValue(ExpiresAt.Value, "O"); + } + else + { + writer.WriteNull("expires_at"); + } + if (CancelledAt != null) + { + writer.WritePropertyName("cancelled_at"u8); + writer.WriteStringValue(CancelledAt.Value, "O"); + } + else + { + writer.WriteNull("cancelled_at"); + } + if (FailedAt != null) + { + writer.WritePropertyName("failed_at"u8); + writer.WriteStringValue(FailedAt.Value, "O"); + } + else + { + writer.WriteNull("failed_at"); + } + if (CompletedAt != null) + { + writer.WritePropertyName("completed_at"u8); + writer.WriteStringValue(CompletedAt.Value, "O"); + } + else + { + writer.WriteNull("completed_at"); + } + if (Metadata != null && OptionalProperty.IsCollectionDefined(Metadata)) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartObject(); + foreach (var item in Metadata) + { + writer.WritePropertyName(item.Key); + writer.WriteStringValue(item.Value); + } + writer.WriteEndObject(); + } + else + { + writer.WriteNull("metadata"); + } + if (Usage != null) + { + writer.WritePropertyName("usage"u8); + writer.WriteObjectValue(Usage); + } + else + { + writer.WriteNull("usage"); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + RunStepObject IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunStepObject)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeRunStepObject(document.RootElement, options); + } + + internal static RunStepObject DeserializeRunStepObject(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + RunStepObjectObject @object = default; + DateTimeOffset createdAt = default; + string assistantId = default; + string threadId = default; + string runId = default; + RunStepObjectType type = default; + RunStepObjectStatus status = default; + BinaryData stepDetails = default; + RunStepObjectLastError lastError = default; + DateTimeOffset? expiresAt = default; + DateTimeOffset? cancelledAt = default; + DateTimeOffset? failedAt = default; + DateTimeOffset? completedAt = default; + IReadOnlyDictionary metadata = default; + RunCompletionUsage usage = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new RunStepObjectObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("created_at"u8)) + { + createdAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("assistant_id"u8)) + { + assistantId = property.Value.GetString(); + continue; + } + if (property.NameEquals("thread_id"u8)) + { + threadId = property.Value.GetString(); + continue; + } + if (property.NameEquals("run_id"u8)) + { + runId = property.Value.GetString(); + continue; + } + if (property.NameEquals("type"u8)) + { + type = new RunStepObjectType(property.Value.GetString()); + continue; + } + if (property.NameEquals("status"u8)) + { + status = new RunStepObjectStatus(property.Value.GetString()); + continue; + } + if (property.NameEquals("step_details"u8)) + { + stepDetails = BinaryData.FromString(property.Value.GetRawText()); + continue; + } + if (property.NameEquals("last_error"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + lastError = null; + continue; + } + lastError = RunStepObjectLastError.DeserializeRunStepObjectLastError(property.Value); + continue; + } + if (property.NameEquals("expires_at"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + expiresAt = null; + continue; + } + expiresAt = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("cancelled_at"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + cancelledAt = null; + continue; + } + cancelledAt = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("failed_at"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + failedAt = null; + continue; + } + failedAt = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("completed_at"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + completedAt = null; + continue; + } + completedAt = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + metadata = new OptionalDictionary(); + continue; + } + Dictionary dictionary = new Dictionary(); + foreach (var property0 in property.Value.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetString()); + } + metadata = dictionary; + continue; + } + if (property.NameEquals("usage"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + usage = null; + continue; + } + usage = RunCompletionUsage.DeserializeRunCompletionUsage(property.Value); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new RunStepObject(id, @object, createdAt, assistantId, threadId, runId, type, status, stepDetails, lastError, expiresAt, cancelledAt, failedAt, completedAt, metadata, usage, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(RunStepObject)} does not support '{options.Format}' format."); + } + } + + RunStepObject IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeRunStepObject(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(RunStepObject)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static RunStepObject FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeRunStepObject(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/RunStepObject.cs b/.dotnet/src/Generated/Models/RunStepObject.cs new file mode 100644 index 000000000..801dc2367 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunStepObject.cs @@ -0,0 +1,236 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// Represents a step in execution of a run. + public partial class RunStepObject + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The identifier of the run step, which can be referenced in API endpoints. + /// The Unix timestamp (in seconds) for when the run step was created. + /// The ID of the [assistant](/docs/api-reference/assistants) associated with the run step. + /// The ID of the [thread](/docs/api-reference/threads) that was run. + /// The ID of the [run](/docs/api-reference/runs) that this run step is a part of. + /// The type of run step, which can be either `message_creation` or `tool_calls`. + /// + /// The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, + /// `completed`, or `expired`. + /// + /// The details of the run step. + /// The last error associated with this run step. Will be `null` if there are no errors. + /// + /// The Unix timestamp (in seconds) for when the run step expired. A step is considered expired + /// if the parent run is expired. + /// + /// The Unix timestamp (in seconds) for when the run step was cancelled. + /// The Unix timestamp (in seconds) for when the run step failed. + /// T The Unix timestamp (in seconds) for when the run step completed.. + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// + /// , , , or is null. + internal RunStepObject(string id, DateTimeOffset createdAt, string assistantId, string threadId, string runId, RunStepObjectType type, RunStepObjectStatus status, BinaryData stepDetails, RunStepObjectLastError lastError, DateTimeOffset? expiresAt, DateTimeOffset? cancelledAt, DateTimeOffset? failedAt, DateTimeOffset? completedAt, IReadOnlyDictionary metadata, RunCompletionUsage usage) + { + ClientUtilities.AssertNotNull(id, nameof(id)); + ClientUtilities.AssertNotNull(assistantId, nameof(assistantId)); + ClientUtilities.AssertNotNull(threadId, nameof(threadId)); + ClientUtilities.AssertNotNull(runId, nameof(runId)); + ClientUtilities.AssertNotNull(stepDetails, nameof(stepDetails)); + + Id = id; + CreatedAt = createdAt; + AssistantId = assistantId; + ThreadId = threadId; + RunId = runId; + Type = type; + Status = status; + StepDetails = stepDetails; + LastError = lastError; + ExpiresAt = expiresAt; + CancelledAt = cancelledAt; + FailedAt = failedAt; + CompletedAt = completedAt; + Metadata = metadata; + Usage = usage; + } + + /// Initializes a new instance of . + /// The identifier of the run step, which can be referenced in API endpoints. + /// The object type, which is always `thread.run.step`. + /// The Unix timestamp (in seconds) for when the run step was created. + /// The ID of the [assistant](/docs/api-reference/assistants) associated with the run step. + /// The ID of the [thread](/docs/api-reference/threads) that was run. + /// The ID of the [run](/docs/api-reference/runs) that this run step is a part of. + /// The type of run step, which can be either `message_creation` or `tool_calls`. + /// + /// The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, + /// `completed`, or `expired`. + /// + /// The details of the run step. + /// The last error associated with this run step. Will be `null` if there are no errors. + /// + /// The Unix timestamp (in seconds) for when the run step expired. A step is considered expired + /// if the parent run is expired. + /// + /// The Unix timestamp (in seconds) for when the run step was cancelled. + /// The Unix timestamp (in seconds) for when the run step failed. + /// T The Unix timestamp (in seconds) for when the run step completed.. + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// + /// Keeps track of any properties unknown to the library. + internal RunStepObject(string id, RunStepObjectObject @object, DateTimeOffset createdAt, string assistantId, string threadId, string runId, RunStepObjectType type, RunStepObjectStatus status, BinaryData stepDetails, RunStepObjectLastError lastError, DateTimeOffset? expiresAt, DateTimeOffset? cancelledAt, DateTimeOffset? failedAt, DateTimeOffset? completedAt, IReadOnlyDictionary metadata, RunCompletionUsage usage, IDictionary serializedAdditionalRawData) + { + Id = id; + Object = @object; + CreatedAt = createdAt; + AssistantId = assistantId; + ThreadId = threadId; + RunId = runId; + Type = type; + Status = status; + StepDetails = stepDetails; + LastError = lastError; + ExpiresAt = expiresAt; + CancelledAt = cancelledAt; + FailedAt = failedAt; + CompletedAt = completedAt; + Metadata = metadata; + Usage = usage; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal RunStepObject() + { + } + + /// The identifier of the run step, which can be referenced in API endpoints. + public string Id { get; } + /// The object type, which is always `thread.run.step`. + public RunStepObjectObject Object { get; } = RunStepObjectObject.ThreadRunStep; + + /// The Unix timestamp (in seconds) for when the run step was created. + public DateTimeOffset CreatedAt { get; } + /// The ID of the [assistant](/docs/api-reference/assistants) associated with the run step. + public string AssistantId { get; } + /// The ID of the [thread](/docs/api-reference/threads) that was run. + public string ThreadId { get; } + /// The ID of the [run](/docs/api-reference/runs) that this run step is a part of. + public string RunId { get; } + /// The type of run step, which can be either `message_creation` or `tool_calls`. + public RunStepObjectType Type { get; } + /// + /// The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, + /// `completed`, or `expired`. + /// + public RunStepObjectStatus Status { get; } + /// + /// The details of the run step. + /// + /// To assign an object to this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// + /// Supported types: + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public BinaryData StepDetails { get; } + /// The last error associated with this run step. Will be `null` if there are no errors. + public RunStepObjectLastError LastError { get; } + /// + /// The Unix timestamp (in seconds) for when the run step expired. A step is considered expired + /// if the parent run is expired. + /// + public DateTimeOffset? ExpiresAt { get; } + /// The Unix timestamp (in seconds) for when the run step was cancelled. + public DateTimeOffset? CancelledAt { get; } + /// The Unix timestamp (in seconds) for when the run step failed. + public DateTimeOffset? FailedAt { get; } + /// T The Unix timestamp (in seconds) for when the run step completed.. + public DateTimeOffset? CompletedAt { get; } + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + public IReadOnlyDictionary Metadata { get; } + /// Gets the usage. + public RunCompletionUsage Usage { get; } + } +} diff --git a/.dotnet/src/Generated/Models/RunStepObjectLastError.Serialization.cs b/.dotnet/src/Generated/Models/RunStepObjectLastError.Serialization.cs new file mode 100644 index 000000000..368f9006e --- /dev/null +++ b/.dotnet/src/Generated/Models/RunStepObjectLastError.Serialization.cs @@ -0,0 +1,140 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class RunStepObjectLastError : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunStepObjectLastError)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("code"u8); + writer.WriteStringValue(Code.ToString()); + writer.WritePropertyName("message"u8); + writer.WriteStringValue(Message); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + RunStepObjectLastError IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunStepObjectLastError)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeRunStepObjectLastError(document.RootElement, options); + } + + internal static RunStepObjectLastError DeserializeRunStepObjectLastError(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + RunStepObjectLastErrorCode code = default; + string message = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("code"u8)) + { + code = new RunStepObjectLastErrorCode(property.Value.GetString()); + continue; + } + if (property.NameEquals("message"u8)) + { + message = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new RunStepObjectLastError(code, message, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(RunStepObjectLastError)} does not support '{options.Format}' format."); + } + } + + RunStepObjectLastError IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeRunStepObjectLastError(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(RunStepObjectLastError)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static RunStepObjectLastError FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeRunStepObjectLastError(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/RunStepObjectLastError.cs b/.dotnet/src/Generated/Models/RunStepObjectLastError.cs new file mode 100644 index 000000000..8742db0c0 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunStepObjectLastError.cs @@ -0,0 +1,79 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The RunStepObjectLastError. + public partial class RunStepObjectLastError + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// One of `server_error` or `rate_limit_exceeded`. + /// A human-readable description of the error. + /// is null. + internal RunStepObjectLastError(RunStepObjectLastErrorCode code, string message) + { + ClientUtilities.AssertNotNull(message, nameof(message)); + + Code = code; + Message = message; + } + + /// Initializes a new instance of . + /// One of `server_error` or `rate_limit_exceeded`. + /// A human-readable description of the error. + /// Keeps track of any properties unknown to the library. + internal RunStepObjectLastError(RunStepObjectLastErrorCode code, string message, IDictionary serializedAdditionalRawData) + { + Code = code; + Message = message; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal RunStepObjectLastError() + { + } + + /// One of `server_error` or `rate_limit_exceeded`. + public RunStepObjectLastErrorCode Code { get; } + /// A human-readable description of the error. + public string Message { get; } + } +} diff --git a/.dotnet/src/Generated/Models/RunStepObjectLastErrorCode.cs b/.dotnet/src/Generated/Models/RunStepObjectLastErrorCode.cs new file mode 100644 index 000000000..e3b156259 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunStepObjectLastErrorCode.cs @@ -0,0 +1,48 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for code in RunStepObjectLastError. + public readonly partial struct RunStepObjectLastErrorCode : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public RunStepObjectLastErrorCode(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ServerErrorValue = "server_error"; + private const string RateLimitExceededValue = "rate_limit_exceeded"; + + /// server_error. + public static RunStepObjectLastErrorCode ServerError { get; } = new RunStepObjectLastErrorCode(ServerErrorValue); + /// rate_limit_exceeded. + public static RunStepObjectLastErrorCode RateLimitExceeded { get; } = new RunStepObjectLastErrorCode(RateLimitExceededValue); + /// Determines if two values are the same. + public static bool operator ==(RunStepObjectLastErrorCode left, RunStepObjectLastErrorCode right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(RunStepObjectLastErrorCode left, RunStepObjectLastErrorCode right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator RunStepObjectLastErrorCode(string value) => new RunStepObjectLastErrorCode(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is RunStepObjectLastErrorCode other && Equals(other); + /// + public bool Equals(RunStepObjectLastErrorCode other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/RunStepObjectObject.cs b/.dotnet/src/Generated/Models/RunStepObjectObject.cs new file mode 100644 index 000000000..3c774719f --- /dev/null +++ b/.dotnet/src/Generated/Models/RunStepObjectObject.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The RunStepObject_object. + public readonly partial struct RunStepObjectObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public RunStepObjectObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ThreadRunStepValue = "thread.run.step"; + + /// thread.run.step. + public static RunStepObjectObject ThreadRunStep { get; } = new RunStepObjectObject(ThreadRunStepValue); + /// Determines if two values are the same. + public static bool operator ==(RunStepObjectObject left, RunStepObjectObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(RunStepObjectObject left, RunStepObjectObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator RunStepObjectObject(string value) => new RunStepObjectObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is RunStepObjectObject other && Equals(other); + /// + public bool Equals(RunStepObjectObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/RunStepObjectStatus.cs b/.dotnet/src/Generated/Models/RunStepObjectStatus.cs new file mode 100644 index 000000000..4b11e62a4 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunStepObjectStatus.cs @@ -0,0 +1,57 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for status in RunStepObject. + public readonly partial struct RunStepObjectStatus : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public RunStepObjectStatus(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string InProgressValue = "in_progress"; + private const string CancelledValue = "cancelled"; + private const string FailedValue = "failed"; + private const string CompletedValue = "completed"; + private const string ExpiredValue = "expired"; + + /// in_progress. + public static RunStepObjectStatus InProgress { get; } = new RunStepObjectStatus(InProgressValue); + /// cancelled. + public static RunStepObjectStatus Cancelled { get; } = new RunStepObjectStatus(CancelledValue); + /// failed. + public static RunStepObjectStatus Failed { get; } = new RunStepObjectStatus(FailedValue); + /// completed. + public static RunStepObjectStatus Completed { get; } = new RunStepObjectStatus(CompletedValue); + /// expired. + public static RunStepObjectStatus Expired { get; } = new RunStepObjectStatus(ExpiredValue); + /// Determines if two values are the same. + public static bool operator ==(RunStepObjectStatus left, RunStepObjectStatus right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(RunStepObjectStatus left, RunStepObjectStatus right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator RunStepObjectStatus(string value) => new RunStepObjectStatus(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is RunStepObjectStatus other && Equals(other); + /// + public bool Equals(RunStepObjectStatus other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/RunStepObjectType.cs b/.dotnet/src/Generated/Models/RunStepObjectType.cs new file mode 100644 index 000000000..49adee903 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunStepObjectType.cs @@ -0,0 +1,48 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for type in RunStepObject. + public readonly partial struct RunStepObjectType : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public RunStepObjectType(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string MessageCreationValue = "message_creation"; + private const string ToolCallsValue = "tool_calls"; + + /// message_creation. + public static RunStepObjectType MessageCreation { get; } = new RunStepObjectType(MessageCreationValue); + /// tool_calls. + public static RunStepObjectType ToolCalls { get; } = new RunStepObjectType(ToolCallsValue); + /// Determines if two values are the same. + public static bool operator ==(RunStepObjectType left, RunStepObjectType right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(RunStepObjectType left, RunStepObjectType right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator RunStepObjectType(string value) => new RunStepObjectType(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is RunStepObjectType other && Equals(other); + /// + public bool Equals(RunStepObjectType other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/RunToolCallObject.Serialization.cs b/.dotnet/src/Generated/Models/RunToolCallObject.Serialization.cs new file mode 100644 index 000000000..421bb3a02 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunToolCallObject.Serialization.cs @@ -0,0 +1,148 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class RunToolCallObject : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunToolCallObject)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("type"u8); + writer.WriteStringValue(Type.ToString()); + writer.WritePropertyName("function"u8); + writer.WriteObjectValue(Function); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + RunToolCallObject IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunToolCallObject)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeRunToolCallObject(document.RootElement, options); + } + + internal static RunToolCallObject DeserializeRunToolCallObject(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + RunToolCallObjectType type = default; + RunToolCallObjectFunction function = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("type"u8)) + { + type = new RunToolCallObjectType(property.Value.GetString()); + continue; + } + if (property.NameEquals("function"u8)) + { + function = RunToolCallObjectFunction.DeserializeRunToolCallObjectFunction(property.Value); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new RunToolCallObject(id, type, function, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(RunToolCallObject)} does not support '{options.Format}' format."); + } + } + + RunToolCallObject IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeRunToolCallObject(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(RunToolCallObject)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static RunToolCallObject FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeRunToolCallObject(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/RunToolCallObject.cs b/.dotnet/src/Generated/Models/RunToolCallObject.cs new file mode 100644 index 000000000..b6f3694a3 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunToolCallObject.cs @@ -0,0 +1,94 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// Tool call objects. + public partial class RunToolCallObject + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// The ID of the tool call. This ID must be referenced when you submit the tool outputs in using + /// the [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs) endpoint. + /// + /// The function definition. + /// or is null. + internal RunToolCallObject(string id, RunToolCallObjectFunction function) + { + ClientUtilities.AssertNotNull(id, nameof(id)); + ClientUtilities.AssertNotNull(function, nameof(function)); + + Id = id; + Function = function; + } + + /// Initializes a new instance of . + /// + /// The ID of the tool call. This ID must be referenced when you submit the tool outputs in using + /// the [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs) endpoint. + /// + /// The type of tool call the output is required for. For now, this is always `function`. + /// The function definition. + /// Keeps track of any properties unknown to the library. + internal RunToolCallObject(string id, RunToolCallObjectType type, RunToolCallObjectFunction function, IDictionary serializedAdditionalRawData) + { + Id = id; + Type = type; + Function = function; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal RunToolCallObject() + { + } + + /// + /// The ID of the tool call. This ID must be referenced when you submit the tool outputs in using + /// the [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs) endpoint. + /// + public string Id { get; } + /// The type of tool call the output is required for. For now, this is always `function`. + public RunToolCallObjectType Type { get; } = RunToolCallObjectType.Function; + + /// The function definition. + public RunToolCallObjectFunction Function { get; } + } +} diff --git a/.dotnet/src/Generated/Models/RunToolCallObjectFunction.Serialization.cs b/.dotnet/src/Generated/Models/RunToolCallObjectFunction.Serialization.cs new file mode 100644 index 000000000..ecef680d8 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunToolCallObjectFunction.Serialization.cs @@ -0,0 +1,140 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class RunToolCallObjectFunction : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunToolCallObjectFunction)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("name"u8); + writer.WriteStringValue(Name); + writer.WritePropertyName("arguments"u8); + writer.WriteStringValue(Arguments); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + RunToolCallObjectFunction IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunToolCallObjectFunction)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeRunToolCallObjectFunction(document.RootElement, options); + } + + internal static RunToolCallObjectFunction DeserializeRunToolCallObjectFunction(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string name = default; + string arguments = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("name"u8)) + { + name = property.Value.GetString(); + continue; + } + if (property.NameEquals("arguments"u8)) + { + arguments = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new RunToolCallObjectFunction(name, arguments, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(RunToolCallObjectFunction)} does not support '{options.Format}' format."); + } + } + + RunToolCallObjectFunction IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeRunToolCallObjectFunction(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(RunToolCallObjectFunction)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static RunToolCallObjectFunction FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeRunToolCallObjectFunction(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/RunToolCallObjectFunction.cs b/.dotnet/src/Generated/Models/RunToolCallObjectFunction.cs new file mode 100644 index 000000000..c94197dac --- /dev/null +++ b/.dotnet/src/Generated/Models/RunToolCallObjectFunction.cs @@ -0,0 +1,80 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The RunToolCallObjectFunction. + public partial class RunToolCallObjectFunction + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The name of the function. + /// The arguments that the model expects you to pass to the function. + /// or is null. + internal RunToolCallObjectFunction(string name, string arguments) + { + ClientUtilities.AssertNotNull(name, nameof(name)); + ClientUtilities.AssertNotNull(arguments, nameof(arguments)); + + Name = name; + Arguments = arguments; + } + + /// Initializes a new instance of . + /// The name of the function. + /// The arguments that the model expects you to pass to the function. + /// Keeps track of any properties unknown to the library. + internal RunToolCallObjectFunction(string name, string arguments, IDictionary serializedAdditionalRawData) + { + Name = name; + Arguments = arguments; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal RunToolCallObjectFunction() + { + } + + /// The name of the function. + public string Name { get; } + /// The arguments that the model expects you to pass to the function. + public string Arguments { get; } + } +} diff --git a/.dotnet/src/Generated/Models/RunToolCallObjectType.cs b/.dotnet/src/Generated/Models/RunToolCallObjectType.cs new file mode 100644 index 000000000..fb277c525 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunToolCallObjectType.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The RunToolCallObject_type. + public readonly partial struct RunToolCallObjectType : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public RunToolCallObjectType(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string FunctionValue = "function"; + + /// function. + public static RunToolCallObjectType Function { get; } = new RunToolCallObjectType(FunctionValue); + /// Determines if two values are the same. + public static bool operator ==(RunToolCallObjectType left, RunToolCallObjectType right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(RunToolCallObjectType left, RunToolCallObjectType right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator RunToolCallObjectType(string value) => new RunToolCallObjectType(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is RunToolCallObjectType other && Equals(other); + /// + public bool Equals(RunToolCallObjectType other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequest.Serialization.cs b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequest.Serialization.cs new file mode 100644 index 000000000..b1c71aec2 --- /dev/null +++ b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequest.Serialization.cs @@ -0,0 +1,132 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class SubmitToolOutputsRunRequest : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(SubmitToolOutputsRunRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("tool_outputs"u8); + writer.WriteObjectValue(ToolOutputs); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + SubmitToolOutputsRunRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(SubmitToolOutputsRunRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeSubmitToolOutputsRunRequest(document.RootElement, options); + } + + internal static SubmitToolOutputsRunRequest DeserializeSubmitToolOutputsRunRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + SubmitToolOutputsRunRequestToolOutputs toolOutputs = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("tool_outputs"u8)) + { + toolOutputs = SubmitToolOutputsRunRequestToolOutputs.DeserializeSubmitToolOutputsRunRequestToolOutputs(property.Value); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new SubmitToolOutputsRunRequest(toolOutputs, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(SubmitToolOutputsRunRequest)} does not support '{options.Format}' format."); + } + } + + SubmitToolOutputsRunRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeSubmitToolOutputsRunRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(SubmitToolOutputsRunRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static SubmitToolOutputsRunRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeSubmitToolOutputsRunRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequest.cs b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequest.cs new file mode 100644 index 000000000..47d39c5d4 --- /dev/null +++ b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequest.cs @@ -0,0 +1,73 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The SubmitToolOutputsRunRequest. + public partial class SubmitToolOutputsRunRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// A list of tools for which the outputs are being submitted. + /// is null. + public SubmitToolOutputsRunRequest(SubmitToolOutputsRunRequestToolOutputs toolOutputs) + { + ClientUtilities.AssertNotNull(toolOutputs, nameof(toolOutputs)); + + ToolOutputs = toolOutputs; + } + + /// Initializes a new instance of . + /// A list of tools for which the outputs are being submitted. + /// Keeps track of any properties unknown to the library. + internal SubmitToolOutputsRunRequest(SubmitToolOutputsRunRequestToolOutputs toolOutputs, IDictionary serializedAdditionalRawData) + { + ToolOutputs = toolOutputs; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal SubmitToolOutputsRunRequest() + { + } + + /// A list of tools for which the outputs are being submitted. + public SubmitToolOutputsRunRequestToolOutputs ToolOutputs { get; } + } +} diff --git a/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutputs.Serialization.cs b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutputs.Serialization.cs new file mode 100644 index 000000000..dc7e3a12d --- /dev/null +++ b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutputs.Serialization.cs @@ -0,0 +1,146 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class SubmitToolOutputsRunRequestToolOutputs : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(SubmitToolOutputsRunRequestToolOutputs)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + if (OptionalProperty.IsDefined(ToolCallId)) + { + writer.WritePropertyName("tool_call_id"u8); + writer.WriteStringValue(ToolCallId); + } + if (OptionalProperty.IsDefined(Output)) + { + writer.WritePropertyName("output"u8); + writer.WriteStringValue(Output); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + SubmitToolOutputsRunRequestToolOutputs IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(SubmitToolOutputsRunRequestToolOutputs)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeSubmitToolOutputsRunRequestToolOutputs(document.RootElement, options); + } + + internal static SubmitToolOutputsRunRequestToolOutputs DeserializeSubmitToolOutputsRunRequestToolOutputs(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + OptionalProperty toolCallId = default; + OptionalProperty output = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("tool_call_id"u8)) + { + toolCallId = property.Value.GetString(); + continue; + } + if (property.NameEquals("output"u8)) + { + output = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new SubmitToolOutputsRunRequestToolOutputs(toolCallId.Value, output.Value, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(SubmitToolOutputsRunRequestToolOutputs)} does not support '{options.Format}' format."); + } + } + + SubmitToolOutputsRunRequestToolOutputs IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeSubmitToolOutputsRunRequestToolOutputs(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(SubmitToolOutputsRunRequestToolOutputs)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static SubmitToolOutputsRunRequestToolOutputs FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeSubmitToolOutputsRunRequestToolOutputs(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutputs.cs b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutputs.cs new file mode 100644 index 000000000..45de2cc25 --- /dev/null +++ b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutputs.cs @@ -0,0 +1,72 @@ +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// The SubmitToolOutputsRunRequestToolOutputs. + public partial class SubmitToolOutputsRunRequestToolOutputs + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public SubmitToolOutputsRunRequestToolOutputs() + { + } + + /// Initializes a new instance of . + /// + /// The ID of the tool call in the `required_action` object within the run object the output is + /// being submitted for. + /// + /// The output of the tool call to be submitted to continue the run. + /// Keeps track of any properties unknown to the library. + internal SubmitToolOutputsRunRequestToolOutputs(string toolCallId, string output, IDictionary serializedAdditionalRawData) + { + ToolCallId = toolCallId; + Output = output; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// + /// The ID of the tool call in the `required_action` object within the run object the output is + /// being submitted for. + /// + public string ToolCallId { get; set; } + /// The output of the tool call to be submitted to continue the run. + public string Output { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/ThreadObject.Serialization.cs b/.dotnet/src/Generated/Models/ThreadObject.Serialization.cs new file mode 100644 index 000000000..1d3b80be1 --- /dev/null +++ b/.dotnet/src/Generated/Models/ThreadObject.Serialization.cs @@ -0,0 +1,179 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class ThreadObject : IUtf8JsonWriteable, IJsonModel + { + void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ThreadObject)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("created_at"u8); + writer.WriteNumberValue(CreatedAt, "U"); + if (Metadata != null && OptionalProperty.IsCollectionDefined(Metadata)) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartObject(); + foreach (var item in Metadata) + { + writer.WritePropertyName(item.Key); + writer.WriteStringValue(item.Value); + } + writer.WriteEndObject(); + } + else + { + writer.WriteNull("metadata"); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ThreadObject IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ThreadObject)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeThreadObject(document.RootElement, options); + } + + internal static ThreadObject DeserializeThreadObject(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + ThreadObjectObject @object = default; + DateTimeOffset createdAt = default; + IReadOnlyDictionary metadata = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new ThreadObjectObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("created_at"u8)) + { + createdAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + metadata = new OptionalDictionary(); + continue; + } + Dictionary dictionary = new Dictionary(); + foreach (var property0 in property.Value.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetString()); + } + metadata = dictionary; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ThreadObject(id, @object, createdAt, metadata, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ThreadObject)} does not support '{options.Format}' format."); + } + } + + ThreadObject IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeThreadObject(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ThreadObject)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ThreadObject FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeThreadObject(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual RequestBody ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ThreadObject.cs b/.dotnet/src/Generated/Models/ThreadObject.cs new file mode 100644 index 000000000..600c5b250 --- /dev/null +++ b/.dotnet/src/Generated/Models/ThreadObject.cs @@ -0,0 +1,102 @@ +// + +#nullable disable + +using System; +using System.ClientModel.Internal; +using System.Collections.Generic; + +namespace OpenAI.Models +{ + /// Represents a thread that contains [messages](/docs/api-reference/messages). + public partial class ThreadObject + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The identifier, which can be referenced in API endpoints. + /// The Unix timestamp (in seconds) for when the thread was created. + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// is null. + internal ThreadObject(string id, DateTimeOffset createdAt, IReadOnlyDictionary metadata) + { + ClientUtilities.AssertNotNull(id, nameof(id)); + + Id = id; + CreatedAt = createdAt; + Metadata = metadata; + } + + /// Initializes a new instance of . + /// The identifier, which can be referenced in API endpoints. + /// The object type, which is always `thread`. + /// The Unix timestamp (in seconds) for when the thread was created. + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// Keeps track of any properties unknown to the library. + internal ThreadObject(string id, ThreadObjectObject @object, DateTimeOffset createdAt, IReadOnlyDictionary metadata, IDictionary serializedAdditionalRawData) + { + Id = id; + Object = @object; + CreatedAt = createdAt; + Metadata = metadata; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ThreadObject() + { + } + + /// The identifier, which can be referenced in API endpoints. + public string Id { get; } + /// The object type, which is always `thread`. + public ThreadObjectObject Object { get; } = ThreadObjectObject.Thread; + + /// The Unix timestamp (in seconds) for when the thread was created. + public DateTimeOffset CreatedAt { get; } + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + public IReadOnlyDictionary Metadata { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ThreadObjectObject.cs b/.dotnet/src/Generated/Models/ThreadObjectObject.cs new file mode 100644 index 000000000..4fd5f5e05 --- /dev/null +++ b/.dotnet/src/Generated/Models/ThreadObjectObject.cs @@ -0,0 +1,45 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The ThreadObject_object. + public readonly partial struct ThreadObjectObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public ThreadObjectObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ThreadValue = "thread"; + + /// thread. + public static ThreadObjectObject Thread { get; } = new ThreadObjectObject(ThreadValue); + /// Determines if two values are the same. + public static bool operator ==(ThreadObjectObject left, ThreadObjectObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(ThreadObjectObject left, ThreadObjectObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator ThreadObjectObject(string value) => new ThreadObjectObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ThreadObjectObject other && Equals(other); + /// + public bool Equals(ThreadObjectObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/ModelsOps.cs b/.dotnet/src/Generated/ModelsOps.cs new file mode 100644 index 000000000..5b9442920 --- /dev/null +++ b/.dotnet/src/Generated/ModelsOps.cs @@ -0,0 +1,419 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.ClientModel.Primitives.Pipeline; +using System.Threading; +using System.Threading.Tasks; +using OpenAI.Models; + +namespace OpenAI +{ + // Data plane generated sub-client. + /// The ModelsOps sub-client. + public partial class ModelsOps + { + private const string AuthorizationHeader = "Authorization"; + private readonly KeyCredential _keyCredential; + private const string AuthorizationApiKeyPrefix = "Bearer"; + private readonly MessagePipeline _pipeline; + private readonly Uri _endpoint; + + /// The ClientDiagnostics is used to provide tracing support for the client library. + internal TelemetrySource ClientDiagnostics { get; } + + /// The HTTP pipeline for sending and receiving REST requests and responses. + public virtual MessagePipeline Pipeline => _pipeline; + + /// Initializes a new instance of ModelsOps for mocking. + protected ModelsOps() + { + } + + /// Initializes a new instance of ModelsOps. + /// The handler for diagnostic messaging in the client. + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// The key credential to copy. + /// OpenAI Endpoint. + internal ModelsOps(TelemetrySource clientDiagnostics, MessagePipeline pipeline, KeyCredential keyCredential, Uri endpoint) + { + ClientDiagnostics = clientDiagnostics; + _pipeline = pipeline; + _keyCredential = keyCredential; + _endpoint = endpoint; + } + + /// + /// Lists the currently available models, and provides basic information about each one such as the + /// owner and availability. + /// + /// The cancellation token to use. + public virtual async Task> GetModelsAsync(CancellationToken cancellationToken = default) + { + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = await GetModelsAsync(context).ConfigureAwait(false); + return Result.FromValue(ListModelsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// Lists the currently available models, and provides basic information about each one such as the + /// owner and availability. + /// + /// The cancellation token to use. + public virtual Result GetModels(CancellationToken cancellationToken = default) + { + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = GetModels(context); + return Result.FromValue(ListModelsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Lists the currently available models, and provides basic information about each one such as the + /// owner and availability. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task GetModelsAsync(RequestOptions context) + { + using var scope = ClientDiagnostics.CreateSpan("ModelsOps.GetModels"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetModelsRequest(context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Lists the currently available models, and provides basic information about each one such as the + /// owner and availability. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result GetModels(RequestOptions context) + { + using var scope = ClientDiagnostics.CreateSpan("ModelsOps.GetModels"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetModelsRequest(context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// Retrieves a model instance, providing basic information about the model such as the owner and + /// permissioning. + /// + /// The ID of the model to use for this request. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> RetrieveAsync(string model, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(model, nameof(model)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = await RetrieveAsync(model, context).ConfigureAwait(false); + return Result.FromValue(Model.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// Retrieves a model instance, providing basic information about the model such as the owner and + /// permissioning. + /// + /// The ID of the model to use for this request. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual Result Retrieve(string model, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(model, nameof(model)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = Retrieve(model, context); + return Result.FromValue(Model.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Retrieves a model instance, providing basic information about the model such as the owner and + /// permissioning. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the model to use for this request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task RetrieveAsync(string model, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(model, nameof(model)); + + using var scope = ClientDiagnostics.CreateSpan("ModelsOps.Retrieve"); + scope.Start(); + try + { + using PipelineMessage message = CreateRetrieveRequest(model, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Retrieves a model instance, providing basic information about the model such as the owner and + /// permissioning. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the model to use for this request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result Retrieve(string model, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(model, nameof(model)); + + using var scope = ClientDiagnostics.CreateSpan("ModelsOps.Retrieve"); + scope.Start(); + try + { + using PipelineMessage message = CreateRetrieveRequest(model, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. + /// The model to delete. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> DeleteAsync(string model, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(model, nameof(model)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = await DeleteAsync(model, context).ConfigureAwait(false); + return Result.FromValue(DeleteModelResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. + /// The model to delete. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual Result Delete(string model, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(model, nameof(model)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = Delete(model, context); + return Result.FromValue(DeleteModelResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The model to delete. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task DeleteAsync(string model, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(model, nameof(model)); + + using var scope = ClientDiagnostics.CreateSpan("ModelsOps.Delete"); + scope.Start(); + try + { + using PipelineMessage message = CreateDeleteRequest(model, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The model to delete. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result Delete(string model, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(model, nameof(model)); + + using var scope = ClientDiagnostics.CreateSpan("ModelsOps.Delete"); + scope.Start(); + try + { + using PipelineMessage message = CreateDeleteRequest(model, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + internal PipelineMessage CreateGetModelsRequest(RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("GET"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/models", false); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateRetrieveRequest(string model, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("GET"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/models/", false); + uri.AppendPath(model, true); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateDeleteRequest(string model, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("DELETE"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/models/", false); + uri.AppendPath(model, true); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + return message; + } + + private static RequestOptions DefaultRequestContext = new RequestOptions(); + internal static RequestOptions FromCancellationToken(CancellationToken cancellationToken = default) + { + if (!cancellationToken.CanBeCanceled) + { + return DefaultRequestContext; + } + + return new RequestOptions() { CancellationToken = cancellationToken }; + } + + private static ResponseErrorClassifier _responseErrorClassifier200; + private static ResponseErrorClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= new StatusResponseClassifier(stackalloc ushort[] { 200 }); + } +} diff --git a/.dotnet/src/Generated/Moderations.cs b/.dotnet/src/Generated/Moderations.cs new file mode 100644 index 000000000..7c4520943 --- /dev/null +++ b/.dotnet/src/Generated/Moderations.cs @@ -0,0 +1,183 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.ClientModel.Primitives.Pipeline; +using System.Threading; +using System.Threading.Tasks; +using OpenAI.Models; + +namespace OpenAI +{ + // Data plane generated sub-client. + /// The Moderations sub-client. + public partial class Moderations + { + private const string AuthorizationHeader = "Authorization"; + private readonly KeyCredential _keyCredential; + private const string AuthorizationApiKeyPrefix = "Bearer"; + private readonly MessagePipeline _pipeline; + private readonly Uri _endpoint; + + /// The ClientDiagnostics is used to provide tracing support for the client library. + internal TelemetrySource ClientDiagnostics { get; } + + /// The HTTP pipeline for sending and receiving REST requests and responses. + public virtual MessagePipeline Pipeline => _pipeline; + + /// Initializes a new instance of Moderations for mocking. + protected Moderations() + { + } + + /// Initializes a new instance of Moderations. + /// The handler for diagnostic messaging in the client. + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// The key credential to copy. + /// OpenAI Endpoint. + internal Moderations(TelemetrySource clientDiagnostics, MessagePipeline pipeline, KeyCredential keyCredential, Uri endpoint) + { + ClientDiagnostics = clientDiagnostics; + _pipeline = pipeline; + _keyCredential = keyCredential; + _endpoint = endpoint; + } + + /// Classifies if text violates OpenAI's Content Policy. + /// The to use. + /// The cancellation token to use. + /// is null. + public virtual async Task> CreateModerationAsync(CreateModerationRequest content, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNull(content, nameof(content)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content0 = content.ToRequestBody(); + Result result = await CreateModerationAsync(content0, context).ConfigureAwait(false); + return Result.FromValue(CreateModerationResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Classifies if text violates OpenAI's Content Policy. + /// The to use. + /// The cancellation token to use. + /// is null. + public virtual Result CreateModeration(CreateModerationRequest content, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNull(content, nameof(content)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content0 = content.ToRequestBody(); + Result result = CreateModeration(content0, context); + return Result.FromValue(CreateModerationResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Classifies if text violates OpenAI's Content Policy + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CreateModerationAsync(RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Moderations.CreateModeration"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateModerationRequest(content, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Classifies if text violates OpenAI's Content Policy + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result CreateModeration(RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Moderations.CreateModeration"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateModerationRequest(content, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + internal PipelineMessage CreateCreateModerationRequest(RequestBody content, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("POST"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/moderations", false); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + request.SetHeaderValue("Content-Type", "application/json"); + request.Content = content; + return message; + } + + private static RequestOptions DefaultRequestContext = new RequestOptions(); + internal static RequestOptions FromCancellationToken(CancellationToken cancellationToken = default) + { + if (!cancellationToken.CanBeCanceled) + { + return DefaultRequestContext; + } + + return new RequestOptions() { CancellationToken = cancellationToken }; + } + + private static ResponseErrorClassifier _responseErrorClassifier200; + private static ResponseErrorClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= new StatusResponseClassifier(stackalloc ushort[] { 200 }); + } +} diff --git a/.dotnet/src/Generated/OpenAIClient.cs b/.dotnet/src/Generated/OpenAIClient.cs new file mode 100644 index 000000000..ba4da1bc6 --- /dev/null +++ b/.dotnet/src/Generated/OpenAIClient.cs @@ -0,0 +1,158 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.ClientModel.Primitives.Pipeline; +using System.Threading; + +namespace OpenAI +{ + // Data plane generated client. + /// The OpenAI service client. + public partial class OpenAIClient + { + private const string AuthorizationHeader = "Authorization"; + private readonly KeyCredential _keyCredential; + private const string AuthorizationApiKeyPrefix = "Bearer"; + private readonly MessagePipeline _pipeline; + private readonly Uri _endpoint; + + /// The ClientDiagnostics is used to provide tracing support for the client library. + internal TelemetrySource ClientDiagnostics { get; } + + /// The HTTP pipeline for sending and receiving REST requests and responses. + public virtual MessagePipeline Pipeline => _pipeline; + + /// Initializes a new instance of OpenAIClient for mocking. + protected OpenAIClient() + { + } + + /// Initializes a new instance of OpenAIClient. + /// A credential used to authenticate to an Azure Service. + /// is null. + public OpenAIClient(KeyCredential credential) : this(new Uri("https://api.openai.com/v1"), credential, new OpenAIClientOptions()) + { + } + + /// Initializes a new instance of OpenAIClient. + /// OpenAI Endpoint. + /// A credential used to authenticate to an Azure Service. + /// The options for configuring the client. + /// or is null. + public OpenAIClient(Uri endpoint, KeyCredential credential, OpenAIClientOptions options) + { + ClientUtilities.AssertNotNull(endpoint, nameof(endpoint)); + ClientUtilities.AssertNotNull(credential, nameof(credential)); + options ??= new OpenAIClientOptions(); + + ClientDiagnostics = new TelemetrySource(options, true); + _keyCredential = credential; + _pipeline = MessagePipeline.Create(options, new IPipelinePolicy[] { new KeyCredentialPolicy(_keyCredential, AuthorizationHeader, AuthorizationApiKeyPrefix) }, Array.Empty>()); + _endpoint = endpoint; + } + + private FineTuning _cachedFineTuning; + private Audio _cachedAudio; + private Assistants _cachedAssistants; + private Chat _cachedChat; + private Completions _cachedCompletions; + private Embeddings _cachedEmbeddings; + private Files _cachedFiles; + private FineTunes _cachedFineTunes; + private Images _cachedImages; + private Messages _cachedMessages; + private ModelsOps _cachedModelsOps; + private Moderations _cachedModerations; + private Runs _cachedRuns; + private Threads _cachedThreads; + + /// Initializes a new instance of FineTuning. + public virtual FineTuning GetFineTuningClient() + { + return Volatile.Read(ref _cachedFineTuning) ?? Interlocked.CompareExchange(ref _cachedFineTuning, new FineTuning(ClientDiagnostics, _pipeline, _keyCredential, _endpoint), null) ?? _cachedFineTuning; + } + + /// Initializes a new instance of Audio. + public virtual Audio GetAudioClient() + { + return Volatile.Read(ref _cachedAudio) ?? Interlocked.CompareExchange(ref _cachedAudio, new Audio(ClientDiagnostics, _pipeline, _keyCredential, _endpoint), null) ?? _cachedAudio; + } + + /// Initializes a new instance of Assistants. + public virtual Assistants GetAssistantsClient() + { + return Volatile.Read(ref _cachedAssistants) ?? Interlocked.CompareExchange(ref _cachedAssistants, new Assistants(ClientDiagnostics, _pipeline, _keyCredential, _endpoint), null) ?? _cachedAssistants; + } + + /// Initializes a new instance of Chat. + public virtual Chat GetChatClient() + { + return Volatile.Read(ref _cachedChat) ?? Interlocked.CompareExchange(ref _cachedChat, new Chat(ClientDiagnostics, _pipeline, _keyCredential, _endpoint), null) ?? _cachedChat; + } + + /// Initializes a new instance of Completions. + public virtual Completions GetCompletionsClient() + { + return Volatile.Read(ref _cachedCompletions) ?? Interlocked.CompareExchange(ref _cachedCompletions, new Completions(ClientDiagnostics, _pipeline, _keyCredential, _endpoint), null) ?? _cachedCompletions; + } + + /// Initializes a new instance of Embeddings. + public virtual Embeddings GetEmbeddingsClient() + { + return Volatile.Read(ref _cachedEmbeddings) ?? Interlocked.CompareExchange(ref _cachedEmbeddings, new Embeddings(ClientDiagnostics, _pipeline, _keyCredential, _endpoint), null) ?? _cachedEmbeddings; + } + + /// Initializes a new instance of Files. + public virtual Files GetFilesClient() + { + return Volatile.Read(ref _cachedFiles) ?? Interlocked.CompareExchange(ref _cachedFiles, new Files(ClientDiagnostics, _pipeline, _keyCredential, _endpoint), null) ?? _cachedFiles; + } + + /// Initializes a new instance of FineTunes. + public virtual FineTunes GetFineTunesClient() + { + return Volatile.Read(ref _cachedFineTunes) ?? Interlocked.CompareExchange(ref _cachedFineTunes, new FineTunes(ClientDiagnostics, _pipeline, _keyCredential, _endpoint), null) ?? _cachedFineTunes; + } + + /// Initializes a new instance of Images. + public virtual Images GetImagesClient() + { + return Volatile.Read(ref _cachedImages) ?? Interlocked.CompareExchange(ref _cachedImages, new Images(ClientDiagnostics, _pipeline, _keyCredential, _endpoint), null) ?? _cachedImages; + } + + /// Initializes a new instance of Messages. + public virtual Messages GetMessagesClient() + { + return Volatile.Read(ref _cachedMessages) ?? Interlocked.CompareExchange(ref _cachedMessages, new Messages(ClientDiagnostics, _pipeline, _keyCredential, _endpoint), null) ?? _cachedMessages; + } + + /// Initializes a new instance of ModelsOps. + public virtual ModelsOps GetModelsOpsClient() + { + return Volatile.Read(ref _cachedModelsOps) ?? Interlocked.CompareExchange(ref _cachedModelsOps, new ModelsOps(ClientDiagnostics, _pipeline, _keyCredential, _endpoint), null) ?? _cachedModelsOps; + } + + /// Initializes a new instance of Moderations. + public virtual Moderations GetModerationsClient() + { + return Volatile.Read(ref _cachedModerations) ?? Interlocked.CompareExchange(ref _cachedModerations, new Moderations(ClientDiagnostics, _pipeline, _keyCredential, _endpoint), null) ?? _cachedModerations; + } + + /// Initializes a new instance of Runs. + public virtual Runs GetRunsClient() + { + return Volatile.Read(ref _cachedRuns) ?? Interlocked.CompareExchange(ref _cachedRuns, new Runs(ClientDiagnostics, _pipeline, _keyCredential, _endpoint), null) ?? _cachedRuns; + } + + /// Initializes a new instance of Threads. + public virtual Threads GetThreadsClient() + { + return Volatile.Read(ref _cachedThreads) ?? Interlocked.CompareExchange(ref _cachedThreads, new Threads(ClientDiagnostics, _pipeline, _keyCredential, _endpoint), null) ?? _cachedThreads; + } + } +} diff --git a/.dotnet/src/Generated/OpenAIClientOptions.cs b/.dotnet/src/Generated/OpenAIClientOptions.cs new file mode 100644 index 000000000..5b661878a --- /dev/null +++ b/.dotnet/src/Generated/OpenAIClientOptions.cs @@ -0,0 +1,13 @@ +// + +#nullable disable + +using System.ClientModel; + +namespace OpenAI +{ + /// Client options for OpenAIClient. + public partial class OpenAIClientOptions : RequestOptions + { + } +} diff --git a/.dotnet/src/Generated/OpenAIModelFactory.cs b/.dotnet/src/Generated/OpenAIModelFactory.cs new file mode 100644 index 000000000..ae070eb8a --- /dev/null +++ b/.dotnet/src/Generated/OpenAIModelFactory.cs @@ -0,0 +1,1769 @@ +// + +#nullable disable + +using System; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI.Models +{ + /// Model factory for models. + public static partial class OpenAIModelFactory + { + /// Initializes a new instance of . + /// + /// The ID of an uploaded file that contains training data. + /// + /// See [upload file](/docs/api-reference/files/upload) for how to upload a file. + /// + /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with + /// the purpose `fine-tune`. + /// + /// See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + /// + /// + /// The ID of an uploaded file that contains validation data. + /// + /// If you provide this file, the data is used to generate validation metrics periodically during + /// fine-tuning. These metrics can be viewed in the fine-tuning results file. The same data should + /// not be present in both train and validation files. + /// + /// Your dataset must be formatted as a JSONL file. You must upload your file with the purpose + /// `fine-tune`. + /// + /// See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + /// + /// + /// The name of the model to fine-tune. You can select one of the + /// [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + /// + /// The hyperparameters used for the fine-tuning job. + /// + /// A string of up to 18 characters that will be added to your fine-tuned model name. + /// + /// For example, a `suffix` of "custom-model-name" would produce a model name like + /// `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + /// + /// A new instance for mocking. + public static CreateFineTuningJobRequest CreateFineTuningJobRequest(string trainingFile = null, string validationFile = null, CreateFineTuningJobRequestModel model = default, CreateFineTuningJobRequestHyperparameters hyperparameters = null, string suffix = null) + { + return new CreateFineTuningJobRequest(trainingFile, validationFile, model, hyperparameters, suffix, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The object identifier, which can be referenced in the API endpoints. + /// The object type, which is always "fine_tuning.job". + /// The Unix timestamp (in seconds) for when the fine-tuning job was created. + /// + /// The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be + /// null if the fine-tuning job is still running. + /// + /// The base model that is being fine-tuned. + /// + /// The name of the fine-tuned model that is being created. The value will be null if the + /// fine-tuning job is still running. + /// + /// The organization that owns the fine-tuning job. + /// + /// The current status of the fine-tuning job, which can be either `created`, `pending`, `running`, + /// `succeeded`, `failed`, or `cancelled`. + /// + /// + /// The hyperparameters used for the fine-tuning job. See the + /// [fine-tuning guide](/docs/guides/fine-tuning) for more details. + /// + /// + /// The file ID used for training. You can retrieve the training data with the + /// [Files API](/docs/api-reference/files/retrieve-contents). + /// + /// + /// The file ID used for validation. You can retrieve the validation results with the + /// [Files API](/docs/api-reference/files/retrieve-contents). + /// + /// + /// The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the + /// [Files API](/docs/api-reference/files/retrieve-contents). + /// + /// + /// The total number of billable tokens processed by this fine tuning job. The value will be null + /// if the fine-tuning job is still running. + /// + /// + /// For fine-tuning jobs that have `failed`, this will contain more information on the cause of the + /// failure. + /// + /// A new instance for mocking. + public static FineTuningJob FineTuningJob(string id = null, FineTuningJobObject @object = default, DateTimeOffset createdAt = default, DateTimeOffset? finishedAt = null, string model = null, string fineTunedModel = null, string organizationId = null, FineTuningJobStatus status = default, FineTuningJobHyperparameters hyperparameters = null, string trainingFile = null, string validationFile = null, IEnumerable resultFiles = null, long? trainedTokens = null, FineTuningJobError error = null) + { + resultFiles ??= new List(); + + return new FineTuningJob(id, @object, createdAt, finishedAt, model, fineTunedModel, organizationId, status, hyperparameters, trainingFile, validationFile, resultFiles?.ToList(), trainedTokens, error, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// The number of epochs to train the model for. An epoch refers to one full cycle through the + /// training dataset. + /// + /// "Auto" decides the optimal number of epochs based on the size of the dataset. If setting the + /// number manually, we support any number between 1 and 50 epochs. + /// + /// A new instance for mocking. + public static FineTuningJobHyperparameters FineTuningJobHyperparameters(BinaryData nEpochs = null) + { + return new FineTuningJobHyperparameters(nEpochs, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// A human-readable error message. + /// A machine-readable error code. + /// + /// The parameter that was invalid, usually `training_file` or `validation_file`. This field + /// will be null if the failure was not parameter-specific. + /// + /// A new instance for mocking. + public static FineTuningJobError FineTuningJobError(string message = null, string code = null, string param = null) + { + return new FineTuningJobError(message, code, param, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// + /// + /// A new instance for mocking. + public static ListPaginatedFineTuningJobsResponse ListPaginatedFineTuningJobsResponse(string @object = null, IEnumerable data = null, bool hasMore = default) + { + data ??= new List(); + + return new ListPaginatedFineTuningJobsResponse(@object, data?.ToList(), hasMore, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// + /// A new instance for mocking. + public static ListFineTuningJobEventsResponse ListFineTuningJobEventsResponse(string @object = null, IEnumerable data = null) + { + data ??= new List(); + + return new ListFineTuningJobEventsResponse(@object, data?.ToList(), serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// + /// + /// + /// + /// A new instance for mocking. + public static FineTuningJobEvent FineTuningJobEvent(string id = null, string @object = null, DateTimeOffset createdAt = default, FineTuningJobEventLevel level = default, string message = null) + { + return new FineTuningJobEvent(id, @object, createdAt, level, message, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd`. + /// The text to generate audio for. The maximum length is 4096 characters. + /// + /// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, + /// `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the + /// [Text to speech guide](/docs/guides/text-to-speech/voice-options). + /// + /// The format to audio in. Supported formats are `mp3`, `opus`, `aac`, and `flac`. + /// The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default. + /// A new instance for mocking. + public static CreateSpeechRequest CreateSpeechRequest(CreateSpeechRequestModel model = default, string input = null, CreateSpeechRequestVoice voice = default, CreateSpeechRequestResponseFormat? responseFormat = null, double? speed = null) + { + return new CreateSpeechRequest(model, input, voice, responseFormat, speed, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, + /// mpeg, mpga, m4a, ogg, wav, or webm. + /// + /// ID of the model to use. Only `whisper-1` is currently available. + /// + /// The language of the input audio. Supplying the input language in + /// [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy + /// and latency. + /// + /// + /// An optional text to guide the model's style or continue a previous audio segment. The + /// [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + /// + /// + /// The format of the transcript output, in one of these options: json, text, srt, verbose_json, or + /// vtt. + /// + /// + /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more + /// random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, + /// the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to + /// automatically increase the temperature until certain thresholds are hit. + /// + /// A new instance for mocking. + public static CreateTranscriptionRequest CreateTranscriptionRequest(BinaryData file = null, CreateTranscriptionRequestModel model = default, string language = null, string prompt = null, CreateTranscriptionRequestResponseFormat? responseFormat = null, double? temperature = null) + { + return new CreateTranscriptionRequest(file, model, language, prompt, responseFormat, temperature, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The transcribed text for the provided audio data. + /// The label that describes which operation type generated the accompanying response data. + /// The spoken language that was detected in the audio data. + /// The total duration of the audio processed to produce accompanying transcription information. + /// + /// A collection of information about the timing, probabilities, and other detail of each processed + /// audio segment. + /// + /// A new instance for mocking. + public static CreateTranscriptionResponse CreateTranscriptionResponse(string text = null, CreateTranscriptionResponseTask? task = null, string language = null, TimeSpan? duration = null, IEnumerable segments = null) + { + segments ??= new List(); + + return new CreateTranscriptionResponse(text, task, language, duration, segments?.ToList(), serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The zero-based index of this segment. + /// + /// The seek position associated with the processing of this audio segment. Seek positions are + /// expressed as hundredths of seconds. The model may process several segments from a single seek + /// position, so while the seek position will never represent a later time than the segment's + /// start, the segment's start may represent a significantly later time than the segment's + /// associated seek position. + /// + /// The time at which this segment started relative to the beginning of the audio. + /// The time at which this segment ended relative to the beginning of the audio. + /// The text that was part of this audio segment. + /// The token IDs matching the text in this audio segment. + /// The temperature score associated with this audio segment. + /// The average log probability associated with this audio segment. + /// The compression ratio of this audio segment. + /// The probability of no speech detection within this audio segment. + /// A new instance for mocking. + public static AudioSegment AudioSegment(long id = default, long seek = default, TimeSpan start = default, TimeSpan end = default, string text = null, IEnumerable tokens = null, double temperature = default, double avgLogprob = default, double compressionRatio = default, double noSpeechProb = default) + { + tokens ??= new List(); + + return new AudioSegment(id, seek, start, end, text, tokens?.ToList(), temperature, avgLogprob, compressionRatio, noSpeechProb, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// The audio file object (not file name) to translate, in one of these formats: flac, mp3, mp4, + /// mpeg, mpga, m4a, ogg, wav, or webm. + /// + /// ID of the model to use. Only `whisper-1` is currently available. + /// + /// An optional text to guide the model's style or continue a previous audio segment. The + /// [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + /// + /// + /// The format of the transcript output, in one of these options: json, text, srt, verbose_json, or + /// vtt. + /// + /// + /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more + /// random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, + /// the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to + /// automatically increase the temperature until certain thresholds are hit. + /// + /// A new instance for mocking. + public static CreateTranslationRequest CreateTranslationRequest(BinaryData file = null, CreateTranslationRequestModel model = default, string prompt = null, CreateTranslationRequestResponseFormat? responseFormat = null, double? temperature = null) + { + return new CreateTranslationRequest(file, model, prompt, responseFormat, temperature, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The translated text for the provided audio data. + /// The label that describes which operation type generated the accompanying response data. + /// The spoken language that was detected in the audio data. + /// The total duration of the audio processed to produce accompanying translation information. + /// + /// A collection of information about the timing, probabilities, and other detail of each processed + /// audio segment. + /// + /// A new instance for mocking. + public static CreateTranslationResponse CreateTranslationResponse(string text = null, CreateTranslationResponseTask? task = null, string language = null, TimeSpan? duration = null, IEnumerable segments = null) + { + segments ??= new List(); + + return new CreateTranslationResponse(text, task, language, duration, segments?.ToList(), serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + /// see all of your available models, or see our [Model overview](/docs/models/overview) for + /// descriptions of them. + /// + /// The name of the assistant. The maximum length is 256 characters. + /// The description of the assistant. The maximum length is 512 characters. + /// The system instructions that the assistant uses. The maximum length is 32768 characters. + /// + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. + /// Tools can be of types `code_interpreter`, `retrieval`, or `function`. + /// + /// + /// A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a + /// maximum of 20 files attached to the assistant. Files are ordered by their creation date in + /// ascending order. + /// + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// A new instance for mocking. + public static CreateAssistantRequest CreateAssistantRequest(string model = null, string name = null, string description = null, string instructions = null, IEnumerable tools = null, IEnumerable fileIds = null, IDictionary metadata = null) + { + tools ??= new List(); + fileIds ??= new List(); + metadata ??= new Dictionary(); + + return new CreateAssistantRequest(model, name, description, instructions, tools?.ToList(), fileIds?.ToList(), metadata, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The identifier, which can be referenced in API endpoints. + /// The object type, which is always `assistant`. + /// The Unix timestamp (in seconds) for when the assistant was created. + /// The name of the assistant. The maximum length is 256 characters. + /// The description of the assistant. The maximum length is 512 characters. + /// + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + /// see all of your available models, or see our [Model overview](/docs/models/overview) for + /// descriptions of them. + /// + /// The system instructions that the assistant uses. The maximum length is 32768 characters. + /// + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. + /// Tools can be of types `code_interpreter`, `retrieval`, or `function`. + /// + /// + /// A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a + /// maximum of 20 files attached to the assistant. Files are ordered by their creation date in + /// ascending order. + /// + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// A new instance for mocking. + public static AssistantObject AssistantObject(string id = null, AssistantObjectObject @object = default, DateTimeOffset createdAt = default, string name = null, string description = null, string model = null, string instructions = null, IEnumerable tools = null, IEnumerable fileIds = null, IReadOnlyDictionary metadata = null) + { + tools ??= new List(); + fileIds ??= new List(); + metadata ??= new Dictionary(); + + return new AssistantObject(id, @object, createdAt, name, description, model, instructions, tools?.ToList(), fileIds?.ToList(), metadata, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// + /// + /// + /// + /// A new instance for mocking. + public static ListAssistantsResponse ListAssistantsResponse(ListAssistantsResponseObject @object = default, IEnumerable data = null, string firstId = null, string lastId = null, bool hasMore = default) + { + data ??= new List(); + + return new ListAssistantsResponse(@object, data?.ToList(), firstId, lastId, hasMore, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// + /// + /// A new instance for mocking. + public static DeleteAssistantResponse DeleteAssistantResponse(string id = null, bool deleted = default, DeleteAssistantResponseObject @object = default) + { + return new DeleteAssistantResponse(id, deleted, @object, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The identifier, which can be referenced in API endpoints. + /// The object type, which is always `assistant.file`. + /// The Unix timestamp (in seconds) for when the assistant file was created. + /// The assistant ID that the file is attached to. + /// A new instance for mocking. + public static AssistantFileObject AssistantFileObject(string id = null, AssistantFileObjectObject @object = default, DateTimeOffset createdAt = default, string assistantId = null) + { + return new AssistantFileObject(id, @object, createdAt, assistantId, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// + /// + /// + /// + /// A new instance for mocking. + public static ListAssistantFilesResponse ListAssistantFilesResponse(ListAssistantFilesResponseObject @object = default, IEnumerable data = null, string firstId = null, string lastId = null, bool hasMore = default) + { + data ??= new List(); + + return new ListAssistantFilesResponse(@object, data?.ToList(), firstId, lastId, hasMore, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// + /// + /// A new instance for mocking. + public static DeleteAssistantFileResponse DeleteAssistantFileResponse(string id = null, bool deleted = default, DeleteAssistantFileResponseObject @object = default) + { + return new DeleteAssistantFileResponse(id, deleted, @object, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// A list of messages comprising the conversation so far. + /// [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). + /// + /// + /// ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) + /// table for details on which models work with the Chat API. + /// + /// + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing + /// frequency in the text so far, decreasing the model's likelihood to repeat the same line + /// verbatim. + /// + /// [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + /// + /// + /// Modify the likelihood of specified tokens appearing in the completion. + /// + /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an + /// associated bias value from -100 to 100. Mathematically, the bias is added to the logits + /// generated by the model prior to sampling. The exact effect will vary per model, but values + /// between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 + /// should result in a ban or exclusive selection of the relevant token. + /// + /// + /// Whether to return log probabilities of the output tokens or not. If true, returns the log + /// probabilities of each output token returned in the `content` of `message`. This option is + /// currently not available on the `gpt-4-vision-preview` model. + /// + /// + /// An integer between 0 and 5 specifying the number of most likely tokens to return at each token + /// position, each with an associated log probability. `logprobs` must be set to `true` if this + /// parameter is used. + /// + /// + /// The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. + /// + /// The total length of input tokens and generated tokens is limited by the model's context length. + /// [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + /// for counting tokens. + /// + /// + /// How many chat completion choices to generate for each input message. Note that you will be + /// charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to + /// minimize costs. + /// + /// + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear + /// in the text so far, increasing the model's likelihood to talk about new topics. + /// + /// [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + /// + /// + /// An object specifying the format that the model must output. Compatible with + /// [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and `gpt-3.5-turbo-1106`. + /// + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the + /// model generates is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON + /// yourself via a system or user message. Without this, the model may generate an unending stream + /// of whitespace until the generation reaches the token limit, resulting in a long-running and + /// seemingly "stuck" request. Also note that the message content may be partially cut off if + /// `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the + /// conversation exceeded the max context length. + /// + /// + /// This feature is in Beta. + /// + /// If specified, our system will make a best effort to sample deterministically, such that + /// repeated requests with the same `seed` and parameters should return the same result. + /// + /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response + /// parameter to monitor changes in the backend. + /// + /// Up to 4 sequences where the API will stop generating further tokens. + /// + /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only + /// [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + /// as they become available, with the stream terminated by a `data: [DONE]` message. + /// [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + /// + /// + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + /// more random, while lower values like 0.2 will make it more focused and deterministic. + /// + /// We generally recommend altering this or `top_p` but not both. + /// + /// + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers + /// the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising + /// the top 10% probability mass are considered. + /// + /// We generally recommend altering this or `temperature` but not both. + /// + /// + /// A list of tools the model may call. Currently, only functions are supported as a tool. Use this + /// to provide a list of functions the model may generate JSON inputs for. + /// + /// + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect + /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + /// + /// + /// Deprecated in favor of `tool_choice`. + /// + /// Controls which (if any) function is called by the model. `none` means the model will not call a + /// function and instead generates a message. `auto` means the model can pick between generating a + /// message or calling a function. Specifying a particular function via `{"name": "my_function"}` + /// forces the model to call that function. + /// + /// `none` is the default when no functions are present. `auto` is the default if functions are + /// present. + /// + /// + /// Deprecated in favor of `tools`. + /// + /// A list of functions the model may generate JSON inputs for. + /// + /// A new instance for mocking. + public static CreateChatCompletionRequest CreateChatCompletionRequest(IEnumerable messages = null, CreateChatCompletionRequestModel model = default, double? frequencyPenalty = null, IDictionary logitBias = null, bool? logprobs = null, long? topLogprobs = null, long? maxTokens = null, long? n = null, double? presencePenalty = null, CreateChatCompletionRequestResponseFormat responseFormat = null, long? seed = null, BinaryData stop = null, bool? stream = null, double? temperature = null, double? topP = null, IEnumerable tools = null, BinaryData toolChoice = null, string user = null, BinaryData functionCall = null, IEnumerable functions = null) + { + messages ??= new List(); + logitBias ??= new Dictionary(); + tools ??= new List(); + functions ??= new List(); + + return new CreateChatCompletionRequest(messages?.ToList(), model, frequencyPenalty, logitBias, logprobs, topLogprobs, maxTokens, n, presencePenalty, responseFormat, seed, stop, stream, temperature, topP, tools?.ToList(), toolChoice, user, functionCall, functions?.ToList(), serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The ID of the tool call. + /// The type of the tool. Currently, only `function` is supported. + /// The function that the model called. + /// A new instance for mocking. + public static ChatCompletionMessageToolCall ChatCompletionMessageToolCall(string id = null, ChatCompletionMessageToolCallType type = default, ChatCompletionMessageToolCallFunction function = null) + { + return new ChatCompletionMessageToolCall(id, type, function, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The type of the tool. Currently, only `function` is supported. + /// + /// A new instance for mocking. + public static ChatCompletionTool ChatCompletionTool(ChatCompletionToolType type = default, FunctionObject function = null) + { + return new ChatCompletionTool(type, function, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// A description of what the function does, used by the model to choose when and how to call the + /// function. + /// + /// + /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and + /// dashes, with a maximum length of 64. + /// + /// + /// A new instance for mocking. + public static ChatCompletionFunctions ChatCompletionFunctions(string description = null, string name = null, FunctionParameters parameters = null) + { + return new ChatCompletionFunctions(description, name, parameters, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// A unique identifier for the chat completion. + /// A list of chat completion choices. Can be more than one if `n` is greater than 1. + /// The Unix timestamp (in seconds) of when the chat completion was created. + /// The model used for the chat completion. + /// + /// This fingerprint represents the backend configuration that the model runs with. + /// + /// Can be used in conjunction with the `seed` request parameter to understand when backend changes + /// have been made that might impact determinism. + /// + /// The object type, which is always `chat.completion`. + /// + /// A new instance for mocking. + public static CreateChatCompletionResponse CreateChatCompletionResponse(string id = null, IEnumerable choices = null, DateTimeOffset created = default, string model = null, string systemFingerprint = null, CreateChatCompletionResponseObject @object = default, CompletionUsage usage = null) + { + choices ??= new List(); + + return new CreateChatCompletionResponse(id, choices?.ToList(), created, model, systemFingerprint, @object, usage, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// The reason the model stopped generating tokens. This will be `stop` if the model hit a + /// natural stop point or a provided stop sequence, `length` if the maximum number of tokens + /// specified in the request was reached, `content_filter` if content was omitted due to a flag + /// from our content filters, `tool_calls` if the model called a tool, or `function_call` + /// (deprecated) if the model called a function. + /// + /// The index of the choice in the list of choices. + /// + /// Log probability information for the choice. + /// A new instance for mocking. + public static CreateChatCompletionResponseChoice CreateChatCompletionResponseChoice(CreateChatCompletionResponseChoiceFinishReason finishReason = default, long index = default, ChatCompletionResponseMessage message = null, CreateChatCompletionResponseChoiceLogprobs logprobs = null) + { + return new CreateChatCompletionResponseChoice(finishReason, index, message, logprobs, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The contents of the message. + /// + /// The role of the author of this message. + /// Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. + /// A new instance for mocking. + public static ChatCompletionResponseMessage ChatCompletionResponseMessage(string content = null, IEnumerable toolCalls = null, ChatCompletionResponseMessageRole role = default, ChatCompletionResponseMessageFunctionCall functionCall = null) + { + toolCalls ??= new List(); + + return new ChatCompletionResponseMessage(content, toolCalls?.ToList(), role, functionCall, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// The arguments to call the function with, as generated by the model in JSON format. Note that + /// the model does not always generate valid JSON, and may hallucinate parameters not defined by + /// your function schema. Validate the arguments in your code before calling your function. + /// + /// The name of the function to call. + /// A new instance for mocking. + public static ChatCompletionResponseMessageFunctionCall ChatCompletionResponseMessageFunctionCall(string arguments = null, string name = null) + { + return new ChatCompletionResponseMessageFunctionCall(arguments, name, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// A new instance for mocking. + public static CreateChatCompletionResponseChoiceLogprobs CreateChatCompletionResponseChoiceLogprobs(IEnumerable content = null) + { + content ??= new List(); + + return new CreateChatCompletionResponseChoiceLogprobs(content?.ToList(), serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The token. + /// The log probability of this token. + /// + /// A list of integers representing the UTF-8 bytes representation of the token. Useful in + /// instances where characters are represented by multiple tokens and their byte representations + /// must be combined to generate the correct text representation. Can be `null` if there is no + /// bytes representation for the token. + /// + /// + /// List of the most likely tokens and their log probability, at this token position. In rare + /// cases, there may be fewer than the number of requested `top_logprobs` returned. + /// + /// A new instance for mocking. + public static ChatCompletionTokenLogprob ChatCompletionTokenLogprob(string token = null, double logprob = default, IEnumerable bytes = null, IEnumerable topLogprobs = null) + { + bytes ??= new List(); + topLogprobs ??= new List(); + + return new ChatCompletionTokenLogprob(token, logprob, bytes?.ToList(), topLogprobs?.ToList(), serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The token. + /// The log probability of this token. + /// + /// A list of integers representing the UTF-8 bytes representation of the token. Useful in + /// instances where characters are represented by multiple tokens and their byte representations + /// must be combined to generate the correct text representation. Can be `null` if there is no + /// bytes representation for the token. + /// + /// A new instance for mocking. + public static ChatCompletionTokenLogprobTopLogprob ChatCompletionTokenLogprobTopLogprob(string token = null, double logprob = default, IEnumerable bytes = null) + { + bytes ??= new List(); + + return new ChatCompletionTokenLogprobTopLogprob(token, logprob, bytes?.ToList(), serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// Number of tokens in the prompt. + /// Number of tokens in the generated completion. + /// Total number of tokens used in the request (prompt + completion). + /// A new instance for mocking. + public static CompletionUsage CompletionUsage(long promptTokens = default, long completionTokens = default, long totalTokens = default) + { + return new CompletionUsage(promptTokens, completionTokens, totalTokens, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + /// see all of your available models, or see our [Model overview](/docs/models/overview) for + /// descriptions of them. + /// + /// + /// The prompt(s) to generate completions for, encoded as a string, array of strings, array of + /// tokens, or array of token arrays. + /// + /// Note that <|endoftext|> is the document separator that the model sees during training, so if a + /// prompt is not specified the model will generate as if from the beginning of a new document. + /// + /// + /// Generates `best_of` completions server-side and returns the "best" (the one with the highest + /// log probability per token). Results cannot be streamed. + /// + /// When used with `n`, `best_of` controls the number of candidate completions and `n` specifies + /// how many to return – `best_of` must be greater than `n`. + /// + /// **Note:** Because this parameter generates many completions, it can quickly consume your token + /// quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + /// + /// Echo back the prompt in addition to the completion. + /// + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing + /// frequency in the text so far, decreasing the model's likelihood to repeat the same line + /// verbatim. + /// + /// [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + /// + /// + /// Modify the likelihood of specified tokens appearing in the completion. + /// + /// Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an + /// associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) + /// to convert text to token IDs. Mathematically, the bias is added to the logits generated by the + /// model prior to sampling. The exact effect will vary per model, but values between -1 and 1 + /// should decrease or increase likelihood of selection; values like -100 or 100 should result in a + /// ban or exclusive selection of the relevant token. + /// + /// As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being + /// generated. + /// + /// + /// Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. + /// For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The + /// API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` + /// elements in the response. + /// + /// The maximum value for `logprobs` is 5. + /// + /// + /// The maximum number of [tokens](/tokenizer) to generate in the completion. + /// + /// The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + /// [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + /// for counting tokens. + /// + /// + /// How many completions to generate for each prompt. + /// + /// **Note:** Because this parameter generates many completions, it can quickly consume your token + /// quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + /// + /// + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear + /// in the text so far, increasing the model's likelihood to talk about new topics. + /// + /// [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + /// + /// + /// If specified, our system will make a best effort to sample deterministically, such that + /// repeated requests with the same `seed` and parameters should return the same result. + /// + /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response + /// parameter to monitor changes in the backend. + /// + /// Up to 4 sequences where the API will stop generating further tokens. + /// + /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only + /// [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + /// as they become available, with the stream terminated by a `data: [DONE]` message. + /// [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). + /// + /// The suffix that comes after a completion of inserted text. + /// + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + /// more random, while lower values like 0.2 will make it more focused and deterministic. + /// + /// We generally recommend altering this or `top_p` but not both. + /// + /// + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers + /// the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising + /// the top 10% probability mass are considered. + /// + /// We generally recommend altering this or `temperature` but not both. + /// + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect + /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + /// + /// A new instance for mocking. + public static CreateCompletionRequest CreateCompletionRequest(CreateCompletionRequestModel model = default, BinaryData prompt = null, long? bestOf = null, bool? echo = null, double? frequencyPenalty = null, IDictionary logitBias = null, long? logprobs = null, long? maxTokens = null, long? n = null, double? presencePenalty = null, long? seed = null, BinaryData stop = null, bool? stream = null, string suffix = null, double? temperature = null, double? topP = null, string user = null) + { + logitBias ??= new Dictionary(); + + return new CreateCompletionRequest(model, prompt, bestOf, echo, frequencyPenalty, logitBias, logprobs, maxTokens, n, presencePenalty, seed, stop, stream, suffix, temperature, topP, user, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// A unique identifier for the completion. + /// The list of completion choices the model generated for the input. + /// The Unix timestamp (in seconds) of when the completion was created. + /// The model used for the completion. + /// + /// This fingerprint represents the backend configuration that the model runs with. + /// + /// Can be used in conjunction with the `seed` request parameter to understand when backend changes + /// have been made that might impact determinism. + /// + /// The object type, which is always `text_completion`. + /// Usage statistics for the completion request. + /// A new instance for mocking. + public static CreateCompletionResponse CreateCompletionResponse(string id = null, IEnumerable choices = null, DateTimeOffset created = default, string model = null, string systemFingerprint = null, CreateCompletionResponseObject @object = default, CompletionUsage usage = null) + { + choices ??= new List(); + + return new CreateCompletionResponse(id, choices?.ToList(), created, model, systemFingerprint, @object, usage, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// + /// + /// + /// The reason the model stopped generating tokens. This will be `stop` if the model hit a + /// natural stop point or a provided stop sequence, or `content_filter` if content was omitted + /// due to a flag from our content filters, `length` if the maximum number of tokens specified + /// in the request was reached, or `content_filter` if content was omitted due to a flag from our + /// content filters. + /// + /// A new instance for mocking. + public static CreateCompletionResponseChoice CreateCompletionResponseChoice(long index = default, string text = null, CreateCompletionResponseChoiceLogprobs logprobs = null, CreateCompletionResponseChoiceFinishReason finishReason = default) + { + return new CreateCompletionResponseChoice(index, text, logprobs, finishReason, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// + /// + /// + /// A new instance for mocking. + public static CreateCompletionResponseChoiceLogprobs CreateCompletionResponseChoiceLogprobs(IEnumerable tokens = null, IEnumerable tokenLogprobs = null, IEnumerable> topLogprobs = null, IEnumerable textOffset = null) + { + tokens ??= new List(); + tokenLogprobs ??= new List(); + topLogprobs ??= new List>(); + textOffset ??= new List(); + + return new CreateCompletionResponseChoiceLogprobs(tokens?.ToList(), tokenLogprobs?.ToList(), topLogprobs?.ToList(), textOffset?.ToList(), serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a + /// single request, pass an array of strings or array of token arrays. Each input must not exceed + /// the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an + /// empty string. + /// [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + /// for counting tokens. + /// + /// + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + /// see all of your available models, or see our [Model overview](/docs/models/overview) for + /// descriptions of them. + /// + /// + /// The format to return the embeddings in. Can be either `float` or + /// [`base64`](https://pypi.org/project/pybase64/). + /// + /// + /// The number of dimensions the resulting output embeddings should have. Only supported in + /// `text-embedding-3` and later models. + /// + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect + /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + /// + /// A new instance for mocking. + public static CreateEmbeddingRequest CreateEmbeddingRequest(BinaryData input = null, CreateEmbeddingRequestModel model = default, CreateEmbeddingRequestEncodingFormat? encodingFormat = null, long? dimensions = null, string user = null) + { + return new CreateEmbeddingRequest(input, model, encodingFormat, dimensions, user, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The list of embeddings generated by the model. + /// The name of the model used to generate the embedding. + /// The object type, which is always "list". + /// The usage information for the request. + /// A new instance for mocking. + public static CreateEmbeddingResponse CreateEmbeddingResponse(IEnumerable data = null, string model = null, CreateEmbeddingResponseObject @object = default, CreateEmbeddingResponseUsage usage = null) + { + data ??= new List(); + + return new CreateEmbeddingResponse(data?.ToList(), model, @object, usage, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The index of the embedding in the list of embeddings. + /// + /// The embedding vector, which is a list of floats. The length of vector depends on the model as + /// listed in the [embedding guide](/docs/guides/embeddings). + /// + /// The object type, which is always "embedding". + /// A new instance for mocking. + public static Embedding Embedding(long index = default, BinaryData embeddingProperty = null, EmbeddingObject @object = default) + { + return new Embedding(index, embeddingProperty, @object, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The number of tokens used by the prompt. + /// The total number of tokens used by the request. + /// A new instance for mocking. + public static CreateEmbeddingResponseUsage CreateEmbeddingResponseUsage(long promptTokens = default, long totalTokens = default) + { + return new CreateEmbeddingResponseUsage(promptTokens, totalTokens, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The file identifier, which can be referenced in the API endpoints. + /// The size of the file, in bytes. + /// The Unix timestamp (in seconds) for when the file was created. + /// The name of the file. + /// The object type, which is always "file". + /// + /// The intended purpose of the file. Supported values are `fine-tune`, `fine-tune-results`, + /// `assistants`, and `assistants_output`. + /// + /// + /// Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or + /// `error`. + /// + /// + /// Deprecated. For details on why a fine-tuning training file failed validation, see the `error` + /// field on `fine_tuning.job`. + /// + /// A new instance for mocking. + public static OpenAIFile OpenAIFile(string id = null, long bytes = default, DateTimeOffset createdAt = default, string filename = null, OpenAIFileObject @object = default, OpenAIFilePurpose purpose = default, OpenAIFileStatus status = default, string statusDetails = null) + { + return new OpenAIFile(id, bytes, createdAt, filename, @object, purpose, status, statusDetails, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// + /// A new instance for mocking. + public static ListFilesResponse ListFilesResponse(IEnumerable data = null, ListFilesResponseObject @object = default) + { + data ??= new List(); + + return new ListFilesResponse(data?.ToList(), @object, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// + /// + /// A new instance for mocking. + public static DeleteFileResponse DeleteFileResponse(string id = null, DeleteFileResponseObject @object = default, bool deleted = default) + { + return new DeleteFileResponse(id, @object, deleted, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// The ID of an uploaded file that contains training data. + /// + /// See [upload file](/docs/api-reference/files/upload) for how to upload a file. + /// + /// Your dataset must be formatted as a JSONL file, where each training example is a JSON object + /// with the keys "prompt" and "completion". Additionally, you must upload your file with the + /// purpose `fine-tune`. + /// + /// See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more + /// details. + /// + /// + /// The ID of an uploaded file that contains validation data. + /// + /// If you provide this file, the data is used to generate validation metrics periodically during + /// fine-tuning. These metrics can be viewed in the + /// [fine-tuning results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). + /// Your train and validation data should be mutually exclusive. + /// + /// Your dataset must be formatted as a JSONL file, where each validation example is a JSON object + /// with the keys "prompt" and "completion". Additionally, you must upload your file with the + /// purpose `fine-tune`. + /// + /// See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more + /// details. + /// + /// + /// The name of the base model to fine-tune. You can select one of "ada", "babbage", "curie", + /// "davinci", or a fine-tuned model created after 2022-04-21 and before 2023-08-22. To learn more + /// about these models, see the [Models](/docs/models) documentation. + /// + /// + /// The number of epochs to train the model for. An epoch refers to one full cycle through the + /// training dataset. + /// + /// + /// The batch size to use for training. The batch size is the number of training examples used to + /// train a single forward and backward pass. + /// + /// By default, the batch size will be dynamically configured to be ~0.2% of the number of examples + /// in the training set, capped at 256 - in general, we've found that larger batch sizes tend to + /// work better for larger datasets. + /// + /// + /// The learning rate multiplier to use for training. The fine-tuning learning rate is the original + /// learning rate used for pretraining multiplied by this value. + /// + /// By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 depending on final + /// `batch_size` (larger learning rates tend to perform better with larger batch sizes). We + /// recommend experimenting with values in the range 0.02 to 0.2 to see what produces the best + /// results. + /// + /// + /// The weight to use for loss on the prompt tokens. This controls how much the model tries to + /// learn to generate the prompt (as compared to the completion which always has a weight of 1.0), + /// and can add a stabilizing effect to training when completions are short. + /// + /// If prompts are extremely long (relative to completions), it may make sense to reduce this + /// weight so as to avoid over-prioritizing learning the prompt. + /// + /// + /// If set, we calculate classification-specific metrics such as accuracy and F-1 score using the + /// validation set at the end of every epoch. These metrics can be viewed in the + /// [results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). + /// + /// In order to compute classification metrics, you must provide a `validation_file`. Additionally, + /// you must specify `classification_n_classes` for multiclass classification or + /// `classification_positive_class` for binary classification. + /// + /// + /// The number of classes in a classification task. + /// + /// This parameter is required for multiclass classification. + /// + /// + /// The positive class in binary classification. + /// + /// This parameter is needed to generate precision, recall, and F1 metrics when doing binary + /// classification. + /// + /// + /// If this is provided, we calculate F-beta scores at the specified beta values. The F-beta score + /// is a generalization of F-1 score. This is only used for binary classification. + /// + /// With a beta of 1 (i.e. the F-1 score), precision and recall are given the same weight. A larger + /// beta score puts more weight on recall and less on precision. A smaller beta score puts more + /// weight on precision and less on recall. + /// + /// + /// A string of up to 18 characters that will be added to your fine-tuned model name. + /// + /// For example, a `suffix` of "custom-model-name" would produce a model name like + /// `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`. + /// + /// A new instance for mocking. + public static CreateFineTuneRequest CreateFineTuneRequest(string trainingFile = null, string validationFile = null, CreateFineTuneRequestModel? model = null, long? nEpochs = null, long? batchSize = null, double? learningRateMultiplier = null, double? promptLossRate = null, bool? computeClassificationMetrics = null, long? classificationNClasses = null, string classificationPositiveClass = null, IEnumerable classificationBetas = null, string suffix = null) + { + classificationBetas ??= new List(); + + return new CreateFineTuneRequest(trainingFile, validationFile, model, nEpochs, batchSize, learningRateMultiplier, promptLossRate, computeClassificationMetrics, classificationNClasses, classificationPositiveClass, classificationBetas?.ToList(), suffix, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The object identifier, which can be referenced in the API endpoints. + /// The object type, which is always "fine-tune". + /// The Unix timestamp (in seconds) for when the fine-tuning job was created. + /// The Unix timestamp (in seconds) for when the fine-tuning job was last updated. + /// The base model that is being fine-tuned. + /// The name of the fine-tuned model that is being created. + /// The organization that owns the fine-tuning job. + /// + /// The current status of the fine-tuning job, which can be either `created`, `running`, + /// `succeeded`, `failed`, or `cancelled`. + /// + /// + /// The hyperparameters used for the fine-tuning job. See the + /// [fine-tuning guide](/docs/guides/legacy-fine-tuning/hyperparameters) for more details. + /// + /// The list of files used for training. + /// The list of files used for validation. + /// The compiled results files for the fine-tuning job. + /// The list of events that have been observed in the lifecycle of the FineTune job. + /// A new instance for mocking. + public static FineTune FineTune(string id = null, FineTuneObject @object = default, DateTimeOffset createdAt = default, DateTimeOffset updatedAt = default, string model = null, string fineTunedModel = null, string organizationId = null, FineTuneStatus status = default, FineTuneHyperparams hyperparams = null, IEnumerable trainingFiles = null, IEnumerable validationFiles = null, IEnumerable resultFiles = null, IEnumerable events = null) + { + trainingFiles ??= new List(); + validationFiles ??= new List(); + resultFiles ??= new List(); + events ??= new List(); + + return new FineTune(id, @object, createdAt, updatedAt, model, fineTunedModel, organizationId, status, hyperparams, trainingFiles?.ToList(), validationFiles?.ToList(), resultFiles?.ToList(), events?.ToList(), serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// The number of epochs to train the model for. An epoch refers to one full cycle through the + /// training dataset. + /// + /// + /// The batch size to use for training. The batch size is the number of training examples used to + /// train a single forward and backward pass. + /// + /// The weight to use for loss on the prompt tokens. + /// The learning rate multiplier to use for training. + /// The classification metrics to compute using the validation dataset at the end of every epoch. + /// The positive class to use for computing classification metrics. + /// The number of classes to use for computing classification metrics. + /// A new instance for mocking. + public static FineTuneHyperparams FineTuneHyperparams(long nEpochs = default, long batchSize = default, double promptLossWeight = default, double learningRateMultiplier = default, bool? computeClassificationMetrics = null, string classificationPositiveClass = null, long? classificationNClasses = null) + { + return new FineTuneHyperparams(nEpochs, batchSize, promptLossWeight, learningRateMultiplier, computeClassificationMetrics, classificationPositiveClass, classificationNClasses, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// + /// + /// + /// A new instance for mocking. + public static FineTuneEvent FineTuneEvent(string @object = null, DateTimeOffset createdAt = default, string level = null, string message = null) + { + return new FineTuneEvent(@object, createdAt, level, message, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// + /// A new instance for mocking. + public static ListFineTunesResponse ListFineTunesResponse(string @object = null, IEnumerable data = null) + { + data ??= new List(); + + return new ListFineTunesResponse(@object, data?.ToList(), serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// + /// A new instance for mocking. + public static ListFineTuneEventsResponse ListFineTuneEventsResponse(string @object = null, IEnumerable data = null) + { + data ??= new List(); + + return new ListFineTuneEventsResponse(@object, data?.ToList(), serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// A text description of the desired image(s). The maximum length is 1000 characters for + /// `dall-e-2` and 4000 characters for `dall-e-3`. + /// + /// The model to use for image generation. + /// + /// The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is + /// supported. + /// + /// + /// The quality of the image that will be generated. `hd` creates images with finer details and + /// greater consistency across the image. This param is only supported for `dall-e-3`. + /// + /// The format in which the generated images are returned. Must be one of `url` or `b64_json`. + /// + /// The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for + /// `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. + /// + /// + /// The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes the model + /// to lean towards generating hyper-real and dramatic images. Natural causes the model to produce + /// more natural, less hyper-real looking images. This param is only supported for `dall-e-3`. + /// + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect + /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + /// + /// A new instance for mocking. + public static CreateImageRequest CreateImageRequest(string prompt = null, CreateImageRequestModel? model = null, long? n = null, CreateImageRequestQuality? quality = null, CreateImageRequestResponseFormat? responseFormat = null, CreateImageRequestSize? size = null, CreateImageRequestStyle? style = null, string user = null) + { + return new CreateImageRequest(prompt, model, n, quality, responseFormat, size, style, user, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// + /// A new instance for mocking. + public static ImagesResponse ImagesResponse(DateTimeOffset created = default, IEnumerable data = null) + { + data ??= new List(); + + return new ImagesResponse(created, data?.ToList(), serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. + /// The URL of the generated image, if `response_format` is `url` (default). + /// The prompt that was used to generate the image, if there was any revision to the prompt. + /// A new instance for mocking. + public static Image Image(BinaryData b64Json = null, Uri url = null, string revisedPrompt = null) + { + return new Image(b64Json, url, revisedPrompt, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not + /// provided, image must have transparency, which will be used as the mask. + /// + /// A text description of the desired image(s). The maximum length is 1000 characters. + /// + /// An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where + /// `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions + /// as `image`. + /// + /// The model to use for image generation. Only `dall-e-2` is supported at this time. + /// The number of images to generate. Must be between 1 and 10. + /// The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + /// The format in which the generated images are returned. Must be one of `url` or `b64_json`. + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect + /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + /// + /// A new instance for mocking. + public static CreateImageEditRequest CreateImageEditRequest(BinaryData image = null, string prompt = null, BinaryData mask = null, CreateImageEditRequestModel? model = null, long? n = null, CreateImageEditRequestSize? size = null, CreateImageEditRequestResponseFormat? responseFormat = null, string user = null) + { + return new CreateImageEditRequest(image, prompt, mask, model, n, size, responseFormat, user, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, + /// and square. + /// + /// The model to use for image generation. Only `dall-e-2` is supported at this time. + /// The number of images to generate. Must be between 1 and 10. + /// The format in which the generated images are returned. Must be one of `url` or `b64_json`. + /// The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect + /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + /// + /// A new instance for mocking. + public static CreateImageVariationRequest CreateImageVariationRequest(BinaryData image = null, CreateImageVariationRequestModel? model = null, long? n = null, CreateImageVariationRequestResponseFormat? responseFormat = null, CreateImageVariationRequestSize? size = null, string user = null) + { + return new CreateImageVariationRequest(image, model, n, responseFormat, size, user, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The role of the entity that is creating the message. Currently only `user` is supported. + /// The content of the message. + /// + /// A list of [File](/docs/api-reference/files) IDs that the message should use. There can be a + /// maximum of 10 files attached to a message. Useful for tools like `retrieval` and + /// `code_interpreter` that can access and use files. + /// + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// A new instance for mocking. + public static CreateMessageRequest CreateMessageRequest(CreateMessageRequestRole role = default, string content = null, IEnumerable fileIds = null, IDictionary metadata = null) + { + fileIds ??= new List(); + metadata ??= new Dictionary(); + + return new CreateMessageRequest(role, content, fileIds?.ToList(), metadata, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The identifier, which can be referenced in API endpoints. + /// The object type, which is always `thread.message`. + /// The Unix timestamp (in seconds) for when the message was created. + /// The [thread](/docs/api-reference/threads) ID that this message belongs to. + /// The entity that produced the message. One of `user` or `assistant`. + /// The content of the message in array of text and/or images. + /// + /// If applicable, the ID of the [assistant](/docs/api-reference/assistants) that authored this + /// message. + /// + /// + /// If applicable, the ID of the [run](/docs/api-reference/runs) associated with the authoring of + /// this message. + /// + /// + /// A list of [file](/docs/api-reference/files) IDs that the assistant should use. Useful for + /// tools like retrieval and code_interpreter that can access files. A maximum of 10 files can be + /// attached to a message. + /// + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// A new instance for mocking. + public static MessageObject MessageObject(string id = null, MessageObjectObject @object = default, DateTimeOffset createdAt = default, string threadId = null, MessageObjectRole role = default, IEnumerable content = null, string assistantId = null, string runId = null, IEnumerable fileIds = null, IReadOnlyDictionary metadata = null) + { + content ??= new List(); + fileIds ??= new List(); + metadata ??= new Dictionary(); + + return new MessageObject(id, @object, createdAt, threadId, role, content?.ToList(), assistantId, runId, fileIds?.ToList(), metadata, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// + /// + /// + /// + /// A new instance for mocking. + public static ListMessagesResponse ListMessagesResponse(ListMessagesResponseObject @object = default, IEnumerable data = null, string firstId = null, string lastId = null, bool hasMore = default) + { + data ??= new List(); + + return new ListMessagesResponse(@object, data?.ToList(), firstId, lastId, hasMore, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// + /// + /// + /// + /// A new instance for mocking. + public static ListMessageFilesResponse ListMessageFilesResponse(ListMessageFilesResponseObject @object = default, IEnumerable data = null, string firstId = null, string lastId = null, bool hasMore = default) + { + data ??= new List(); + + return new ListMessageFilesResponse(@object, data?.ToList(), firstId, lastId, hasMore, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// TThe identifier, which can be referenced in API endpoints. + /// The object type, which is always `thread.message.file`. + /// The Unix timestamp (in seconds) for when the message file was created. + /// The ID of the [message](/docs/api-reference/messages) that the [File](/docs/api-reference/files) is attached to. + /// A new instance for mocking. + public static MessageFileObject MessageFileObject(string id = null, MessageFileObjectObject @object = default, DateTimeOffset createdAt = default, string messageId = null) + { + return new MessageFileObject(id, @object, createdAt, messageId, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// + /// A new instance for mocking. + public static ListModelsResponse ListModelsResponse(ListModelsResponseObject @object = default, IEnumerable data = null) + { + data ??= new List(); + + return new ListModelsResponse(@object, data?.ToList(), serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The model identifier, which can be referenced in the API endpoints. + /// The Unix timestamp (in seconds) when the model was created. + /// The object type, which is always "model". + /// The organization that owns the model. + /// A new instance for mocking. + public static Model Model(string id = null, DateTimeOffset created = default, ModelObject @object = default, string ownedBy = null) + { + return new Model(id, created, @object, ownedBy, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// + /// + /// A new instance for mocking. + public static DeleteModelResponse DeleteModelResponse(string id = null, bool deleted = default, DeleteModelResponseObject @object = default) + { + return new DeleteModelResponse(id, deleted, @object, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The input text to classify. + /// + /// Two content moderations models are available: `text-moderation-stable` and + /// `text-moderation-latest`. The default is `text-moderation-latest` which will be automatically + /// upgraded over time. This ensures you are always using our most accurate model. If you use + /// `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy + /// of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. + /// + /// A new instance for mocking. + public static CreateModerationRequest CreateModerationRequest(BinaryData input = null, CreateModerationRequestModel? model = null) + { + return new CreateModerationRequest(input, model, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The unique identifier for the moderation request. + /// The model used to generate the moderation results. + /// A list of moderation objects. + /// A new instance for mocking. + public static CreateModerationResponse CreateModerationResponse(string id = null, string model = null, IEnumerable results = null) + { + results ??= new List(); + + return new CreateModerationResponse(id, model, results?.ToList(), serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// Whether the content violates [OpenAI's usage policies](/policies/usage-policies). + /// A list of the categories, and whether they are flagged or not. + /// A list of the categories along with their scores as predicted by model. + /// A new instance for mocking. + public static CreateModerationResponseResult CreateModerationResponseResult(bool flagged = default, CreateModerationResponseResultCategories categories = null, CreateModerationResponseResultCategoryScores categoryScores = null) + { + return new CreateModerationResponseResult(flagged, categories, categoryScores, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// Content that expresses, incites, or promotes hate based on race, gender, ethnicity, + /// religion, nationality, sexual orientation, disability status, or caste. Hateful content + /// aimed at non-protected groups (e.g., chess players) is harrassment. + /// + /// + /// Hateful content that also includes violence or serious harm towards the targeted group + /// based on race, gender, ethnicity, religion, nationality, sexual orientation, disability + /// status, or caste. + /// + /// Content that expresses, incites, or promotes harassing language towards any target. + /// Harassment content that also includes violence or serious harm towards any target. + /// + /// Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, + /// and eating disorders. + /// + /// + /// Content where the speaker expresses that they are engaging or intend to engage in acts of + /// self-harm, such as suicide, cutting, and eating disorders. + /// + /// + /// Content that encourages performing acts of self-harm, such as suicide, cutting, and eating + /// disorders, or that gives instructions or advice on how to commit such acts. + /// + /// + /// Content meant to arouse sexual excitement, such as the description of sexual activity, or + /// that promotes sexual services (excluding sex education and wellness). + /// + /// Sexual content that includes an individual who is under 18 years old. + /// Content that depicts death, violence, or physical injury. + /// Content that depicts death, violence, or physical injury in graphic detail. + /// A new instance for mocking. + public static CreateModerationResponseResultCategories CreateModerationResponseResultCategories(bool hate = default, bool hateThreatening = default, bool harassment = default, bool harassmentThreatening = default, bool selfHarm = default, bool selfHarmIntent = default, bool selfHarmInstructions = default, bool sexual = default, bool sexualMinors = default, bool violence = default, bool violenceGraphic = default) + { + return new CreateModerationResponseResultCategories(hate, hateThreatening, harassment, harassmentThreatening, selfHarm, selfHarmIntent, selfHarmInstructions, sexual, sexualMinors, violence, violenceGraphic, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The score for the category 'hate'. + /// The score for the category 'hate/threatening'. + /// The score for the category 'harassment'. + /// The score for the category 'harassment/threatening'. + /// The score for the category 'self-harm'. + /// The score for the category 'self-harm/intent'. + /// The score for the category 'self-harm/instructive'. + /// The score for the category 'sexual'. + /// The score for the category 'sexual/minors'. + /// The score for the category 'violence'. + /// The score for the category 'violence/graphic'. + /// A new instance for mocking. + public static CreateModerationResponseResultCategoryScores CreateModerationResponseResultCategoryScores(double hate = default, double hateThreatening = default, double harassment = default, double harassmentThreatening = default, double selfHarm = default, double selfHarmIntent = default, double selfHarmInstructions = default, double sexual = default, double sexualMinors = default, double violence = default, double violenceGraphic = default) + { + return new CreateModerationResponseResultCategoryScores(hate, hateThreatening, harassment, harassmentThreatening, selfHarm, selfHarmIntent, selfHarmInstructions, sexual, sexualMinors, violence, violenceGraphic, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. + /// If no thread is provided, an empty thread will be created. + /// + /// The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is + /// provided here, it will override the model associated with the assistant. If not, the model + /// associated with the assistant will be used. + /// + /// + /// Override the default system message of the assistant. This is useful for modifying the behavior + /// on a per-run basis. + /// + /// + /// Override the tools the assistant can use for this run. This is useful for modifying the + /// behavior on a per-run basis. + /// + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// A new instance for mocking. + public static CreateThreadAndRunRequest CreateThreadAndRunRequest(string assistantId = null, CreateThreadRequest thread = null, string model = null, string instructions = null, IEnumerable tools = null, IDictionary metadata = null) + { + tools ??= new List(); + metadata ??= new Dictionary(); + + return new CreateThreadAndRunRequest(assistantId, thread, model, instructions, tools?.ToList(), metadata, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The identifier, which can be referenced in API endpoints. + /// The object type, which is always `thread.run`. + /// The Unix timestamp (in seconds) for when the run was created. + /// + /// The ID of the [thread](/docs/api-reference/threads) that was executed on as a part of this + /// run. + /// + /// The ID of the [assistant](/docs/api-reference/assistants) used for execution of this run. + /// + /// The status of the run, which can be either `queued`, `in_progress`, `requires_action`, + /// `cancelling`, `cancelled`, `failed`, `completed`, or `expired`. + /// + /// + /// Details on the action required to continue the run. Will be `null` if no action is + /// required. + /// + /// The last error associated with this run. Will be `null` if there are no errors. + /// The Unix timestamp (in seconds) for when the run will expire. + /// The Unix timestamp (in seconds) for when the run was started. + /// The Unix timestamp (in seconds) for when the run was cancelled. + /// The Unix timestamp (in seconds) for when the run failed. + /// The Unix timestamp (in seconds) for when the run was completed. + /// The model that the [assistant](/docs/api-reference/assistants) used for this run. + /// The instructions that the [assistant](/docs/api-reference/assistants) used for this run. + /// The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. + /// + /// The list of [File](/docs/api-reference/files) IDs the + /// [assistant](/docs/api-reference/assistants) used for this run. + /// + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// + /// A new instance for mocking. + public static RunObject RunObject(string id = null, RunObjectObject @object = default, DateTimeOffset createdAt = default, string threadId = null, string assistantId = null, RunObjectStatus status = default, RunObjectRequiredAction requiredAction = null, RunObjectLastError lastError = null, DateTimeOffset expiresAt = default, DateTimeOffset? startedAt = null, DateTimeOffset? cancelledAt = null, DateTimeOffset? failedAt = null, DateTimeOffset? completedAt = null, string model = null, string instructions = null, IEnumerable tools = null, IEnumerable fileIds = null, IReadOnlyDictionary metadata = null, RunCompletionUsage usage = null) + { + tools ??= new List(); + fileIds ??= new List(); + metadata ??= new Dictionary(); + + return new RunObject(id, @object, createdAt, threadId, assistantId, status, requiredAction, lastError, expiresAt, startedAt, cancelledAt, failedAt, completedAt, model, instructions, tools?.ToList(), fileIds?.ToList(), metadata, usage, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// For now, this is always `submit_tool_outputs`. + /// Details on the tool outputs needed for this run to continue. + /// A new instance for mocking. + public static RunObjectRequiredAction RunObjectRequiredAction(RunObjectRequiredActionType type = default, RunObjectRequiredActionSubmitToolOutputs submitToolOutputs = null) + { + return new RunObjectRequiredAction(type, submitToolOutputs, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// A list of the relevant tool calls. + /// A new instance for mocking. + public static RunObjectRequiredActionSubmitToolOutputs RunObjectRequiredActionSubmitToolOutputs(IEnumerable toolCalls = null) + { + toolCalls ??= new List(); + + return new RunObjectRequiredActionSubmitToolOutputs(toolCalls?.ToList(), serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// The ID of the tool call. This ID must be referenced when you submit the tool outputs in using + /// the [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs) endpoint. + /// + /// The type of tool call the output is required for. For now, this is always `function`. + /// The function definition. + /// A new instance for mocking. + public static RunToolCallObject RunToolCallObject(string id = null, RunToolCallObjectType type = default, RunToolCallObjectFunction function = null) + { + return new RunToolCallObject(id, type, function, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The name of the function. + /// The arguments that the model expects you to pass to the function. + /// A new instance for mocking. + public static RunToolCallObjectFunction RunToolCallObjectFunction(string name = null, string arguments = null) + { + return new RunToolCallObjectFunction(name, arguments, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// One of `server_error` or `rate_limit_exceeded`. + /// A human-readable description of the error. + /// A new instance for mocking. + public static RunObjectLastError RunObjectLastError(RunObjectLastErrorCode code = default, string message = null) + { + return new RunObjectLastError(code, message, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// Number of completion tokens used over the course of the run. + /// Number of prompt tokens used over the course of the run. + /// Total number of tokens used (prompt + completion). + /// A new instance for mocking. + public static RunCompletionUsage RunCompletionUsage(long completionTokens = default, long promptTokens = default, long totalTokens = default) + { + return new RunCompletionUsage(completionTokens, promptTokens, totalTokens, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. + /// + /// The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value + /// is provided here, it will override the model associated with the assistant. If not, the model + /// associated with the assistant will be used. + /// + /// + /// Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of the assistant. + /// This is useful for modifying the behavior on a per-run basis. + /// + /// + /// Appends additional instructions at the end of the instructions for the run. This is useful for + /// modifying the behavior on a per-run basis without overriding other instructions. + /// + /// + /// Override the tools the assistant can use for this run. This is useful for modifying the + /// behavior on a per-run basis. + /// + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// A new instance for mocking. + public static CreateRunRequest CreateRunRequest(string assistantId = null, string model = null, string instructions = null, string additionalInstructions = null, IEnumerable tools = null, IDictionary metadata = null) + { + tools ??= new List(); + metadata ??= new Dictionary(); + + return new CreateRunRequest(assistantId, model, instructions, additionalInstructions, tools?.ToList(), metadata, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// + /// + /// + /// + /// A new instance for mocking. + public static ListRunsResponse ListRunsResponse(ListRunsResponseObject @object = default, IEnumerable data = null, string firstId = null, string lastId = null, bool hasMore = default) + { + data ??= new List(); + + return new ListRunsResponse(@object, data?.ToList(), firstId, lastId, hasMore, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// + /// + /// + /// + /// A new instance for mocking. + public static ListRunStepsResponse ListRunStepsResponse(ListRunStepsResponseObject @object = default, IEnumerable data = null, string firstId = null, string lastId = null, bool hasMore = default) + { + data ??= new List(); + + return new ListRunStepsResponse(@object, data?.ToList(), firstId, lastId, hasMore, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The identifier of the run step, which can be referenced in API endpoints. + /// The object type, which is always `thread.run.step`. + /// The Unix timestamp (in seconds) for when the run step was created. + /// The ID of the [assistant](/docs/api-reference/assistants) associated with the run step. + /// The ID of the [thread](/docs/api-reference/threads) that was run. + /// The ID of the [run](/docs/api-reference/runs) that this run step is a part of. + /// The type of run step, which can be either `message_creation` or `tool_calls`. + /// + /// The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, + /// `completed`, or `expired`. + /// + /// The details of the run step. + /// The last error associated with this run step. Will be `null` if there are no errors. + /// + /// The Unix timestamp (in seconds) for when the run step expired. A step is considered expired + /// if the parent run is expired. + /// + /// The Unix timestamp (in seconds) for when the run step was cancelled. + /// The Unix timestamp (in seconds) for when the run step failed. + /// T The Unix timestamp (in seconds) for when the run step completed.. + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// + /// A new instance for mocking. + public static RunStepObject RunStepObject(string id = null, RunStepObjectObject @object = default, DateTimeOffset createdAt = default, string assistantId = null, string threadId = null, string runId = null, RunStepObjectType type = default, RunStepObjectStatus status = default, BinaryData stepDetails = null, RunStepObjectLastError lastError = null, DateTimeOffset? expiresAt = null, DateTimeOffset? cancelledAt = null, DateTimeOffset? failedAt = null, DateTimeOffset? completedAt = null, IReadOnlyDictionary metadata = null, RunCompletionUsage usage = null) + { + metadata ??= new Dictionary(); + + return new RunStepObject(id, @object, createdAt, assistantId, threadId, runId, type, status, stepDetails, lastError, expiresAt, cancelledAt, failedAt, completedAt, metadata, usage, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// One of `server_error` or `rate_limit_exceeded`. + /// A human-readable description of the error. + /// A new instance for mocking. + public static RunStepObjectLastError RunStepObjectLastError(RunStepObjectLastErrorCode code = default, string message = null) + { + return new RunStepObjectLastError(code, message, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The identifier, which can be referenced in API endpoints. + /// The object type, which is always `thread`. + /// The Unix timestamp (in seconds) for when the thread was created. + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// A new instance for mocking. + public static ThreadObject ThreadObject(string id = null, ThreadObjectObject @object = default, DateTimeOffset createdAt = default, IReadOnlyDictionary metadata = null) + { + metadata ??= new Dictionary(); + + return new ThreadObject(id, @object, createdAt, metadata, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// + /// + /// A new instance for mocking. + public static DeleteThreadResponse DeleteThreadResponse(string id = null, bool deleted = default, DeleteThreadResponseObject @object = default) + { + return new DeleteThreadResponse(id, deleted, @object, serializedAdditionalRawData: null); + } + } +} diff --git a/.dotnet/src/Generated/Runs.cs b/.dotnet/src/Generated/Runs.cs new file mode 100644 index 000000000..fbe954204 --- /dev/null +++ b/.dotnet/src/Generated/Runs.cs @@ -0,0 +1,1442 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.ClientModel.Primitives.Pipeline; +using System.Threading; +using System.Threading.Tasks; +using OpenAI.Models; + +namespace OpenAI +{ + // Data plane generated sub-client. + /// The Runs sub-client. + public partial class Runs + { + private const string AuthorizationHeader = "Authorization"; + private readonly KeyCredential _keyCredential; + private const string AuthorizationApiKeyPrefix = "Bearer"; + private readonly MessagePipeline _pipeline; + private readonly Uri _endpoint; + + /// The ClientDiagnostics is used to provide tracing support for the client library. + internal TelemetrySource ClientDiagnostics { get; } + + /// The HTTP pipeline for sending and receiving REST requests and responses. + public virtual MessagePipeline Pipeline => _pipeline; + + /// Initializes a new instance of Runs for mocking. + protected Runs() + { + } + + /// Initializes a new instance of Runs. + /// The handler for diagnostic messaging in the client. + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// The key credential to copy. + /// OpenAI Endpoint. + internal Runs(TelemetrySource clientDiagnostics, MessagePipeline pipeline, KeyCredential keyCredential, Uri endpoint) + { + ClientDiagnostics = clientDiagnostics; + _pipeline = pipeline; + _keyCredential = keyCredential; + _endpoint = endpoint; + } + + /// Create a thread and run it in one request. + /// The to use. + /// The cancellation token to use. + /// is null. + public virtual async Task> CreateThreadAndRunAsync(CreateThreadAndRunRequest threadAndRun, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNull(threadAndRun, nameof(threadAndRun)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = threadAndRun.ToRequestBody(); + Result result = await CreateThreadAndRunAsync(content, context).ConfigureAwait(false); + return Result.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Create a thread and run it in one request. + /// The to use. + /// The cancellation token to use. + /// is null. + public virtual Result CreateThreadAndRun(CreateThreadAndRunRequest threadAndRun, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNull(threadAndRun, nameof(threadAndRun)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = threadAndRun.ToRequestBody(); + Result result = CreateThreadAndRun(content, context); + return Result.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Create a thread and run it in one request. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CreateThreadAndRunAsync(RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Runs.CreateThreadAndRun"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateThreadAndRunRequest(content, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Create a thread and run it in one request. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result CreateThreadAndRun(RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Runs.CreateThreadAndRun"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateThreadAndRunRequest(content, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Create a run. + /// The ID of the thread to run. + /// The to use. + /// The cancellation token to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> CreateRunAsync(string threadId, CreateRunRequest run, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNull(run, nameof(run)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = run.ToRequestBody(); + Result result = await CreateRunAsync(threadId, content, context).ConfigureAwait(false); + return Result.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Create a run. + /// The ID of the thread to run. + /// The to use. + /// The cancellation token to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + public virtual Result CreateRun(string threadId, CreateRunRequest run, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNull(run, nameof(run)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = run.ToRequestBody(); + Result result = CreateRun(threadId, content, context); + return Result.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Create a run. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread to run. + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CreateRunAsync(string threadId, RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Runs.CreateRun"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateRunRequest(threadId, content, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Create a run. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread to run. + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result CreateRun(string threadId, RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Runs.CreateRun"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateRunRequest(threadId, content, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Returns a list of runs belonging to a thread. + /// The ID of the thread the run belongs to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> GetRunsAsync(string threadId, int? limit = null, ListOrder? order = null, string after = null, string before = null, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = await GetRunsAsync(threadId, limit, order?.ToString(), after, before, context).ConfigureAwait(false); + return Result.FromValue(ListRunsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Returns a list of runs belonging to a thread. + /// The ID of the thread the run belongs to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual Result GetRuns(string threadId, int? limit = null, ListOrder? order = null, string after = null, string before = null, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = GetRuns(threadId, limit, order?.ToString(), after, before, context); + return Result.FromValue(ListRunsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Returns a list of runs belonging to a thread. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread the run belongs to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. Allowed values: "asc" | "desc" + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task GetRunsAsync(string threadId, int? limit, string order, string after, string before, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + + using var scope = ClientDiagnostics.CreateSpan("Runs.GetRuns"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetRunsRequest(threadId, limit, order, after, before, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Returns a list of runs belonging to a thread. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread the run belongs to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. Allowed values: "asc" | "desc" + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result GetRuns(string threadId, int? limit, string order, string after, string before, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + + using var scope = ClientDiagnostics.CreateSpan("Runs.GetRuns"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetRunsRequest(threadId, limit, order, after, before, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Retrieves a run. + /// The ID of the [thread](/docs/api-reference/threads) that was run. + /// The ID of the run to retrieve. + /// The cancellation token to use. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual async Task> GetRunAsync(string threadId, string runId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = await GetRunAsync(threadId, runId, context).ConfigureAwait(false); + return Result.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Retrieves a run. + /// The ID of the [thread](/docs/api-reference/threads) that was run. + /// The ID of the run to retrieve. + /// The cancellation token to use. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual Result GetRun(string threadId, string runId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = GetRun(threadId, runId, context); + return Result.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Retrieves a run. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the [thread](/docs/api-reference/threads) that was run. + /// The ID of the run to retrieve. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task GetRunAsync(string threadId, string runId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); + + using var scope = ClientDiagnostics.CreateSpan("Runs.GetRun"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetRunRequest(threadId, runId, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Retrieves a run. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the [thread](/docs/api-reference/threads) that was run. + /// The ID of the run to retrieve. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result GetRun(string threadId, string runId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); + + using var scope = ClientDiagnostics.CreateSpan("Runs.GetRun"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetRunRequest(threadId, runId, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Modifies a run. + /// The ID of the [thread](/docs/api-reference/threads) that was run. + /// The ID of the run to modify. + /// The to use. + /// The cancellation token to use. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual async Task> ModifyRunAsync(string threadId, string runId, ModifyRunRequest run, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); + ClientUtilities.AssertNotNull(run, nameof(run)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = run.ToRequestBody(); + Result result = await ModifyRunAsync(threadId, runId, content, context).ConfigureAwait(false); + return Result.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Modifies a run. + /// The ID of the [thread](/docs/api-reference/threads) that was run. + /// The ID of the run to modify. + /// The to use. + /// The cancellation token to use. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual Result ModifyRun(string threadId, string runId, ModifyRunRequest run, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); + ClientUtilities.AssertNotNull(run, nameof(run)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = run.ToRequestBody(); + Result result = ModifyRun(threadId, runId, content, context); + return Result.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Modifies a run. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the [thread](/docs/api-reference/threads) that was run. + /// The ID of the run to modify. + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task ModifyRunAsync(string threadId, string runId, RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Runs.ModifyRun"); + scope.Start(); + try + { + using PipelineMessage message = CreateModifyRunRequest(threadId, runId, content, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Modifies a run. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the [thread](/docs/api-reference/threads) that was run. + /// The ID of the run to modify. + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result ModifyRun(string threadId, string runId, RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Runs.ModifyRun"); + scope.Start(); + try + { + using PipelineMessage message = CreateModifyRunRequest(threadId, runId, content, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Cancels a run that is `in_progress`. + /// The ID of the thread to which this run belongs. + /// The ID of the run to cancel. + /// The cancellation token to use. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual async Task> CancelRunAsync(string threadId, string runId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = await CancelRunAsync(threadId, runId, context).ConfigureAwait(false); + return Result.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Cancels a run that is `in_progress`. + /// The ID of the thread to which this run belongs. + /// The ID of the run to cancel. + /// The cancellation token to use. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual Result CancelRun(string threadId, string runId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = CancelRun(threadId, runId, context); + return Result.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Cancels a run that is `in_progress`. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread to which this run belongs. + /// The ID of the run to cancel. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CancelRunAsync(string threadId, string runId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); + + using var scope = ClientDiagnostics.CreateSpan("Runs.CancelRun"); + scope.Start(); + try + { + using PipelineMessage message = CreateCancelRunRequest(threadId, runId, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Cancels a run that is `in_progress`. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread to which this run belongs. + /// The ID of the run to cancel. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result CancelRun(string threadId, string runId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); + + using var scope = ClientDiagnostics.CreateSpan("Runs.CancelRun"); + scope.Start(); + try + { + using PipelineMessage message = CreateCancelRunRequest(threadId, runId, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// When a run has the `status: "requires_action"` and `required_action.type` is + /// `submit_tool_outputs`, this endpoint can be used to submit the outputs from the tool calls once + /// they're all completed. All outputs must be submitted in a single request. + /// + /// The ID of the [thread](/docs/api-reference/threads) to which this run belongs. + /// The ID of the run that requires the tool output submission. + /// The to use. + /// The cancellation token to use. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual async Task> SubmitToolOuputsToRunAsync(string threadId, string runId, SubmitToolOutputsRunRequest submitToolOutputsRun, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); + ClientUtilities.AssertNotNull(submitToolOutputsRun, nameof(submitToolOutputsRun)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = submitToolOutputsRun.ToRequestBody(); + Result result = await SubmitToolOuputsToRunAsync(threadId, runId, content, context).ConfigureAwait(false); + return Result.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// When a run has the `status: "requires_action"` and `required_action.type` is + /// `submit_tool_outputs`, this endpoint can be used to submit the outputs from the tool calls once + /// they're all completed. All outputs must be submitted in a single request. + /// + /// The ID of the [thread](/docs/api-reference/threads) to which this run belongs. + /// The ID of the run that requires the tool output submission. + /// The to use. + /// The cancellation token to use. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual Result SubmitToolOuputsToRun(string threadId, string runId, SubmitToolOutputsRunRequest submitToolOutputsRun, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); + ClientUtilities.AssertNotNull(submitToolOutputsRun, nameof(submitToolOutputsRun)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = submitToolOutputsRun.ToRequestBody(); + Result result = SubmitToolOuputsToRun(threadId, runId, content, context); + return Result.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] When a run has the `status: "requires_action"` and `required_action.type` is + /// `submit_tool_outputs`, this endpoint can be used to submit the outputs from the tool calls once + /// they're all completed. All outputs must be submitted in a single request. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the [thread](/docs/api-reference/threads) to which this run belongs. + /// The ID of the run that requires the tool output submission. + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task SubmitToolOuputsToRunAsync(string threadId, string runId, RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Runs.SubmitToolOuputsToRun"); + scope.Start(); + try + { + using PipelineMessage message = CreateSubmitToolOuputsToRunRequest(threadId, runId, content, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] When a run has the `status: "requires_action"` and `required_action.type` is + /// `submit_tool_outputs`, this endpoint can be used to submit the outputs from the tool calls once + /// they're all completed. All outputs must be submitted in a single request. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the [thread](/docs/api-reference/threads) to which this run belongs. + /// The ID of the run that requires the tool output submission. + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result SubmitToolOuputsToRun(string threadId, string runId, RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Runs.SubmitToolOuputsToRun"); + scope.Start(); + try + { + using PipelineMessage message = CreateSubmitToolOuputsToRunRequest(threadId, runId, content, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Returns a list of run steps belonging to a run. + /// The ID of the thread the run and run steps belong to. + /// The ID of the run the run steps belong to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// The cancellation token to use. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual async Task> GetRunStepsAsync(string threadId, string runId, int? limit = null, ListOrder? order = null, string after = null, string before = null, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = await GetRunStepsAsync(threadId, runId, limit, order?.ToString(), after, before, context).ConfigureAwait(false); + return Result.FromValue(ListRunStepsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Returns a list of run steps belonging to a run. + /// The ID of the thread the run and run steps belong to. + /// The ID of the run the run steps belong to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// The cancellation token to use. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual Result GetRunSteps(string threadId, string runId, int? limit = null, ListOrder? order = null, string after = null, string before = null, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = GetRunSteps(threadId, runId, limit, order?.ToString(), after, before, context); + return Result.FromValue(ListRunStepsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Returns a list of run steps belonging to a run. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread the run and run steps belong to. + /// The ID of the run the run steps belong to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. Allowed values: "asc" | "desc" + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task GetRunStepsAsync(string threadId, string runId, int? limit, string order, string after, string before, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); + + using var scope = ClientDiagnostics.CreateSpan("Runs.GetRunSteps"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetRunStepsRequest(threadId, runId, limit, order, after, before, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Returns a list of run steps belonging to a run. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread the run and run steps belong to. + /// The ID of the run the run steps belong to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. Allowed values: "asc" | "desc" + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result GetRunSteps(string threadId, string runId, int? limit, string order, string after, string before, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); + + using var scope = ClientDiagnostics.CreateSpan("Runs.GetRunSteps"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetRunStepsRequest(threadId, runId, limit, order, after, before, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Retrieves a run step. + /// The ID of the thread to which the run and run step belongs. + /// The ID of the run to which the run step belongs. + /// The ID of the run step to retrieve. + /// The cancellation token to use. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + public virtual async Task> GetRunStepAsync(string threadId, string runId, string stepId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); + ClientUtilities.AssertNotNullOrEmpty(stepId, nameof(stepId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = await GetRunStepAsync(threadId, runId, stepId, context).ConfigureAwait(false); + return Result.FromValue(RunStepObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Retrieves a run step. + /// The ID of the thread to which the run and run step belongs. + /// The ID of the run to which the run step belongs. + /// The ID of the run step to retrieve. + /// The cancellation token to use. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + public virtual Result GetRunStep(string threadId, string runId, string stepId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); + ClientUtilities.AssertNotNullOrEmpty(stepId, nameof(stepId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = GetRunStep(threadId, runId, stepId, context); + return Result.FromValue(RunStepObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Retrieves a run step. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread to which the run and run step belongs. + /// The ID of the run to which the run step belongs. + /// The ID of the run step to retrieve. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task GetRunStepAsync(string threadId, string runId, string stepId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); + ClientUtilities.AssertNotNullOrEmpty(stepId, nameof(stepId)); + + using var scope = ClientDiagnostics.CreateSpan("Runs.GetRunStep"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetRunStepRequest(threadId, runId, stepId, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Retrieves a run step. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread to which the run and run step belongs. + /// The ID of the run to which the run step belongs. + /// The ID of the run step to retrieve. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result GetRunStep(string threadId, string runId, string stepId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); + ClientUtilities.AssertNotNullOrEmpty(stepId, nameof(stepId)); + + using var scope = ClientDiagnostics.CreateSpan("Runs.GetRunStep"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetRunStepRequest(threadId, runId, stepId, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + internal PipelineMessage CreateCreateThreadAndRunRequest(RequestBody content, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("POST"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/threads/runs", false); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + request.SetHeaderValue("Content-Type", "application/json"); + request.Content = content; + return message; + } + + internal PipelineMessage CreateCreateRunRequest(string threadId, RequestBody content, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("POST"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/threads/", false); + uri.AppendPath(threadId, true); + uri.AppendPath("/runs", false); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + request.SetHeaderValue("Content-Type", "application/json"); + request.Content = content; + return message; + } + + internal PipelineMessage CreateGetRunsRequest(string threadId, int? limit, string order, string after, string before, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("GET"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/threads/", false); + uri.AppendPath(threadId, true); + uri.AppendPath("/runs", false); + if (limit != null) + { + uri.AppendQuery("limit", limit.Value, true); + } + if (order != null) + { + uri.AppendQuery("order", order, true); + } + if (after != null) + { + uri.AppendQuery("after", after, true); + } + if (before != null) + { + uri.AppendQuery("before", before, true); + } + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateGetRunRequest(string threadId, string runId, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("GET"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/threads/", false); + uri.AppendPath(threadId, true); + uri.AppendPath("/runs/", false); + uri.AppendPath(runId, true); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateModifyRunRequest(string threadId, string runId, RequestBody content, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("POST"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/threads/", false); + uri.AppendPath(threadId, true); + uri.AppendPath("/runs/", false); + uri.AppendPath(runId, true); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + request.SetHeaderValue("Content-Type", "application/json"); + request.Content = content; + return message; + } + + internal PipelineMessage CreateCancelRunRequest(string threadId, string runId, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("POST"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/threads/", false); + uri.AppendPath(threadId, true); + uri.AppendPath("/runs/", false); + uri.AppendPath(runId, true); + uri.AppendPath("/cancel", false); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateSubmitToolOuputsToRunRequest(string threadId, string runId, RequestBody content, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("POST"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/threads/", false); + uri.AppendPath(threadId, true); + uri.AppendPath("/runs/", false); + uri.AppendPath(runId, true); + uri.AppendPath("/submit_tool_outputs", false); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + request.SetHeaderValue("Content-Type", "application/json"); + request.Content = content; + return message; + } + + internal PipelineMessage CreateGetRunStepsRequest(string threadId, string runId, int? limit, string order, string after, string before, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("GET"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/threads/", false); + uri.AppendPath(threadId, true); + uri.AppendPath("/runs/", false); + uri.AppendPath(runId, true); + uri.AppendPath("/steps", false); + if (limit != null) + { + uri.AppendQuery("limit", limit.Value, true); + } + if (order != null) + { + uri.AppendQuery("order", order, true); + } + if (after != null) + { + uri.AppendQuery("after", after, true); + } + if (before != null) + { + uri.AppendQuery("before", before, true); + } + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateGetRunStepRequest(string threadId, string runId, string stepId, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("GET"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/threads/", false); + uri.AppendPath(threadId, true); + uri.AppendPath("/runs/", false); + uri.AppendPath(runId, true); + uri.AppendPath("/steps/", false); + uri.AppendPath(stepId, true); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + return message; + } + + private static RequestOptions DefaultRequestContext = new RequestOptions(); + internal static RequestOptions FromCancellationToken(CancellationToken cancellationToken = default) + { + if (!cancellationToken.CanBeCanceled) + { + return DefaultRequestContext; + } + + return new RequestOptions() { CancellationToken = cancellationToken }; + } + + private static ResponseErrorClassifier _responseErrorClassifier200; + private static ResponseErrorClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= new StatusResponseClassifier(stackalloc ushort[] { 200 }); + } +} diff --git a/.dotnet/src/Generated/Threads.cs b/.dotnet/src/Generated/Threads.cs new file mode 100644 index 000000000..b7ef0388d --- /dev/null +++ b/.dotnet/src/Generated/Threads.cs @@ -0,0 +1,555 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.ClientModel.Internal; +using System.ClientModel.Primitives; +using System.ClientModel.Primitives.Pipeline; +using System.Threading; +using System.Threading.Tasks; +using OpenAI.Models; + +namespace OpenAI +{ + // Data plane generated sub-client. + /// The Threads sub-client. + public partial class Threads + { + private const string AuthorizationHeader = "Authorization"; + private readonly KeyCredential _keyCredential; + private const string AuthorizationApiKeyPrefix = "Bearer"; + private readonly MessagePipeline _pipeline; + private readonly Uri _endpoint; + + /// The ClientDiagnostics is used to provide tracing support for the client library. + internal TelemetrySource ClientDiagnostics { get; } + + /// The HTTP pipeline for sending and receiving REST requests and responses. + public virtual MessagePipeline Pipeline => _pipeline; + + /// Initializes a new instance of Threads for mocking. + protected Threads() + { + } + + /// Initializes a new instance of Threads. + /// The handler for diagnostic messaging in the client. + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// The key credential to copy. + /// OpenAI Endpoint. + internal Threads(TelemetrySource clientDiagnostics, MessagePipeline pipeline, KeyCredential keyCredential, Uri endpoint) + { + ClientDiagnostics = clientDiagnostics; + _pipeline = pipeline; + _keyCredential = keyCredential; + _endpoint = endpoint; + } + + /// Create a thread. + /// The to use. + /// The cancellation token to use. + /// is null. + public virtual async Task> CreateThreadAsync(CreateThreadRequest thread, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNull(thread, nameof(thread)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = thread.ToRequestBody(); + Result result = await CreateThreadAsync(content, context).ConfigureAwait(false); + return Result.FromValue(ThreadObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Create a thread. + /// The to use. + /// The cancellation token to use. + /// is null. + public virtual Result CreateThread(CreateThreadRequest thread, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNull(thread, nameof(thread)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = thread.ToRequestBody(); + Result result = CreateThread(content, context); + return Result.FromValue(ThreadObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Create a thread. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CreateThreadAsync(RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Threads.CreateThread"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateThreadRequest(content, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Create a thread. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result CreateThread(RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Threads.CreateThread"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateThreadRequest(content, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Retrieves a thread. + /// The ID of the thread to retrieve. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> GetThreadAsync(string threadId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = await GetThreadAsync(threadId, context).ConfigureAwait(false); + return Result.FromValue(ThreadObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Retrieves a thread. + /// The ID of the thread to retrieve. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual Result GetThread(string threadId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = GetThread(threadId, context); + return Result.FromValue(ThreadObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Retrieves a thread. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread to retrieve. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task GetThreadAsync(string threadId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + + using var scope = ClientDiagnostics.CreateSpan("Threads.GetThread"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetThreadRequest(threadId, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Retrieves a thread. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread to retrieve. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result GetThread(string threadId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + + using var scope = ClientDiagnostics.CreateSpan("Threads.GetThread"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetThreadRequest(threadId, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Modifies a thread. + /// The ID of the thread to modify. Only the `metadata` can be modified. + /// The to use. + /// The cancellation token to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> ModifyThreadAsync(string threadId, ModifyThreadRequest thread, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNull(thread, nameof(thread)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = thread.ToRequestBody(); + Result result = await ModifyThreadAsync(threadId, content, context).ConfigureAwait(false); + return Result.FromValue(ThreadObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Modifies a thread. + /// The ID of the thread to modify. Only the `metadata` can be modified. + /// The to use. + /// The cancellation token to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + public virtual Result ModifyThread(string threadId, ModifyThreadRequest thread, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNull(thread, nameof(thread)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = thread.ToRequestBody(); + Result result = ModifyThread(threadId, content, context); + return Result.FromValue(ThreadObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Modifies a thread. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread to modify. Only the `metadata` can be modified. + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task ModifyThreadAsync(string threadId, RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Threads.ModifyThread"); + scope.Start(); + try + { + using PipelineMessage message = CreateModifyThreadRequest(threadId, content, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Modifies a thread. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread to modify. Only the `metadata` can be modified. + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result ModifyThread(string threadId, RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("Threads.ModifyThread"); + scope.Start(); + try + { + using PipelineMessage message = CreateModifyThreadRequest(threadId, content, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Delete a thread. + /// The ID of the thread to delete. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> DeleteThreadAsync(string threadId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = await DeleteThreadAsync(threadId, context).ConfigureAwait(false); + return Result.FromValue(DeleteThreadResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Delete a thread. + /// The ID of the thread to delete. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual Result DeleteThread(string threadId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = DeleteThread(threadId, context); + return Result.FromValue(DeleteThreadResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Delete a thread. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread to delete. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task DeleteThreadAsync(string threadId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + + using var scope = ClientDiagnostics.CreateSpan("Threads.DeleteThread"); + scope.Start(); + try + { + using PipelineMessage message = CreateDeleteThreadRequest(threadId, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Delete a thread. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread to delete. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result DeleteThread(string threadId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + + using var scope = ClientDiagnostics.CreateSpan("Threads.DeleteThread"); + scope.Start(); + try + { + using PipelineMessage message = CreateDeleteThreadRequest(threadId, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + internal PipelineMessage CreateCreateThreadRequest(RequestBody content, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("POST"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/threads", false); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + request.SetHeaderValue("Content-Type", "application/json"); + request.Content = content; + return message; + } + + internal PipelineMessage CreateGetThreadRequest(string threadId, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("GET"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/threads/", false); + uri.AppendPath(threadId, true); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateModifyThreadRequest(string threadId, RequestBody content, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("POST"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/threads/", false); + uri.AppendPath(threadId, true); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + request.SetHeaderValue("Content-Type", "application/json"); + request.Content = content; + return message; + } + + internal PipelineMessage CreateDeleteThreadRequest(string threadId, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("DELETE"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/threads/", false); + uri.AppendPath(threadId, true); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + return message; + } + + private static RequestOptions DefaultRequestContext = new RequestOptions(); + internal static RequestOptions FromCancellationToken(CancellationToken cancellationToken = default) + { + if (!cancellationToken.CanBeCanceled) + { + return DefaultRequestContext; + } + + return new RequestOptions() { CancellationToken = cancellationToken }; + } + + private static ResponseErrorClassifier _responseErrorClassifier200; + private static ResponseErrorClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= new StatusResponseClassifier(stackalloc ushort[] { 200 }); + } +} diff --git a/.dotnet/src/OpenAI.csproj b/.dotnet/src/OpenAI.csproj new file mode 100644 index 000000000..c07146eaf --- /dev/null +++ b/.dotnet/src/OpenAI.csproj @@ -0,0 +1,16 @@ + + + This is the OpenAI client library for developing .NET applications with rich experience. + SDK Code Generation OpenAI + 1.0.0-beta.1 + OpenAI + netstandard2.0 + latest + true + + + + + + + diff --git a/.dotnet/tests/Generated/Tests/AssistantsTests.cs b/.dotnet/tests/Generated/Tests/AssistantsTests.cs new file mode 100644 index 000000000..ea014b7fb --- /dev/null +++ b/.dotnet/tests/Generated/Tests/AssistantsTests.cs @@ -0,0 +1,22 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using NUnit.Framework; +using OpenAI; + +namespace OpenAI.Tests +{ + public partial class AssistantsTests + { + [Test] + public void SmokeTest() + { + KeyCredential credential = new KeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + Assistants client = new OpenAIClient(credential).GetAssistantsClient(); + Assert.IsNotNull(client); + } + } +} diff --git a/.dotnet/tests/Generated/Tests/AudioTests.cs b/.dotnet/tests/Generated/Tests/AudioTests.cs new file mode 100644 index 000000000..8fe314b48 --- /dev/null +++ b/.dotnet/tests/Generated/Tests/AudioTests.cs @@ -0,0 +1,22 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using NUnit.Framework; +using OpenAI; + +namespace OpenAI.Tests +{ + public partial class AudioTests + { + [Test] + public void SmokeTest() + { + KeyCredential credential = new KeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + Audio client = new OpenAIClient(credential).GetAudioClient(); + Assert.IsNotNull(client); + } + } +} diff --git a/.dotnet/tests/Generated/Tests/ChatTests.cs b/.dotnet/tests/Generated/Tests/ChatTests.cs new file mode 100644 index 000000000..eb5b40763 --- /dev/null +++ b/.dotnet/tests/Generated/Tests/ChatTests.cs @@ -0,0 +1,22 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using NUnit.Framework; +using OpenAI; + +namespace OpenAI.Tests +{ + public partial class ChatTests + { + [Test] + public void SmokeTest() + { + KeyCredential credential = new KeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + Chat client = new OpenAIClient(credential).GetChatClient(); + Assert.IsNotNull(client); + } + } +} diff --git a/.dotnet/tests/Generated/Tests/CompletionsTests.cs b/.dotnet/tests/Generated/Tests/CompletionsTests.cs new file mode 100644 index 000000000..3a0695403 --- /dev/null +++ b/.dotnet/tests/Generated/Tests/CompletionsTests.cs @@ -0,0 +1,22 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using NUnit.Framework; +using OpenAI; + +namespace OpenAI.Tests +{ + public partial class CompletionsTests + { + [Test] + public void SmokeTest() + { + KeyCredential credential = new KeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + Completions client = new OpenAIClient(credential).GetCompletionsClient(); + Assert.IsNotNull(client); + } + } +} diff --git a/.dotnet/tests/Generated/Tests/EmbeddingsTests.cs b/.dotnet/tests/Generated/Tests/EmbeddingsTests.cs new file mode 100644 index 000000000..8a6052960 --- /dev/null +++ b/.dotnet/tests/Generated/Tests/EmbeddingsTests.cs @@ -0,0 +1,22 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using NUnit.Framework; +using OpenAI; + +namespace OpenAI.Tests +{ + public partial class EmbeddingsTests + { + [Test] + public void SmokeTest() + { + KeyCredential credential = new KeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + Embeddings client = new OpenAIClient(credential).GetEmbeddingsClient(); + Assert.IsNotNull(client); + } + } +} diff --git a/.dotnet/tests/Generated/Tests/FilesTests.cs b/.dotnet/tests/Generated/Tests/FilesTests.cs new file mode 100644 index 000000000..9372d75f4 --- /dev/null +++ b/.dotnet/tests/Generated/Tests/FilesTests.cs @@ -0,0 +1,22 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using NUnit.Framework; +using OpenAI; + +namespace OpenAI.Tests +{ + public partial class FilesTests + { + [Test] + public void SmokeTest() + { + KeyCredential credential = new KeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + Files client = new OpenAIClient(credential).GetFilesClient(); + Assert.IsNotNull(client); + } + } +} diff --git a/.dotnet/tests/Generated/Tests/FineTuningJobsTests.cs b/.dotnet/tests/Generated/Tests/FineTuningJobsTests.cs new file mode 100644 index 000000000..ea1fc2ad2 --- /dev/null +++ b/.dotnet/tests/Generated/Tests/FineTuningJobsTests.cs @@ -0,0 +1,22 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using NUnit.Framework; +using OpenAI; + +namespace OpenAI.Tests +{ + public partial class FineTuningJobsTests + { + [Test] + public void SmokeTest() + { + KeyCredential credential = new KeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + FineTuningJobs client = new OpenAIClient(credential).GetFineTuningClient().GetFineTuningJobsClient(); + Assert.IsNotNull(client); + } + } +} diff --git a/.dotnet/tests/Generated/Tests/ImagesTests.cs b/.dotnet/tests/Generated/Tests/ImagesTests.cs new file mode 100644 index 000000000..a48795229 --- /dev/null +++ b/.dotnet/tests/Generated/Tests/ImagesTests.cs @@ -0,0 +1,22 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using NUnit.Framework; +using OpenAI; + +namespace OpenAI.Tests +{ + public partial class ImagesTests + { + [Test] + public void SmokeTest() + { + KeyCredential credential = new KeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + Images client = new OpenAIClient(credential).GetImagesClient(); + Assert.IsNotNull(client); + } + } +} diff --git a/.dotnet/tests/Generated/Tests/MessagesTests.cs b/.dotnet/tests/Generated/Tests/MessagesTests.cs new file mode 100644 index 000000000..4b62bbf50 --- /dev/null +++ b/.dotnet/tests/Generated/Tests/MessagesTests.cs @@ -0,0 +1,22 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using NUnit.Framework; +using OpenAI; + +namespace OpenAI.Tests +{ + public partial class MessagesTests + { + [Test] + public void SmokeTest() + { + KeyCredential credential = new KeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + Messages client = new OpenAIClient(credential).GetMessagesClient(); + Assert.IsNotNull(client); + } + } +} diff --git a/.dotnet/tests/Generated/Tests/ModelsOpsTests.cs b/.dotnet/tests/Generated/Tests/ModelsOpsTests.cs new file mode 100644 index 000000000..24c9896c0 --- /dev/null +++ b/.dotnet/tests/Generated/Tests/ModelsOpsTests.cs @@ -0,0 +1,22 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using NUnit.Framework; +using OpenAI; + +namespace OpenAI.Tests +{ + public partial class ModelsOpsTests + { + [Test] + public void SmokeTest() + { + KeyCredential credential = new KeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + ModelsOps client = new OpenAIClient(credential).GetModelsOpsClient(); + Assert.IsNotNull(client); + } + } +} diff --git a/.dotnet/tests/Generated/Tests/ModerationsTests.cs b/.dotnet/tests/Generated/Tests/ModerationsTests.cs new file mode 100644 index 000000000..138b487d3 --- /dev/null +++ b/.dotnet/tests/Generated/Tests/ModerationsTests.cs @@ -0,0 +1,22 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using NUnit.Framework; +using OpenAI; + +namespace OpenAI.Tests +{ + public partial class ModerationsTests + { + [Test] + public void SmokeTest() + { + KeyCredential credential = new KeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + Moderations client = new OpenAIClient(credential).GetModerationsClient(); + Assert.IsNotNull(client); + } + } +} diff --git a/.dotnet/tests/Generated/Tests/RunsTests.cs b/.dotnet/tests/Generated/Tests/RunsTests.cs new file mode 100644 index 000000000..e3b8ee12b --- /dev/null +++ b/.dotnet/tests/Generated/Tests/RunsTests.cs @@ -0,0 +1,22 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using NUnit.Framework; +using OpenAI; + +namespace OpenAI.Tests +{ + public partial class RunsTests + { + [Test] + public void SmokeTest() + { + KeyCredential credential = new KeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + Runs client = new OpenAIClient(credential).GetRunsClient(); + Assert.IsNotNull(client); + } + } +} diff --git a/.dotnet/tests/Generated/Tests/ThreadsTests.cs b/.dotnet/tests/Generated/Tests/ThreadsTests.cs new file mode 100644 index 000000000..9b7ef6f78 --- /dev/null +++ b/.dotnet/tests/Generated/Tests/ThreadsTests.cs @@ -0,0 +1,22 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using NUnit.Framework; +using OpenAI; + +namespace OpenAI.Tests +{ + public partial class ThreadsTests + { + [Test] + public void SmokeTest() + { + KeyCredential credential = new KeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + Threads client = new OpenAIClient(credential).GetThreadsClient(); + Assert.IsNotNull(client); + } + } +} diff --git a/.dotnet/tests/OpenAI.Tests.csproj b/.dotnet/tests/OpenAI.Tests.csproj new file mode 100644 index 000000000..a279ac212 --- /dev/null +++ b/.dotnet/tests/OpenAI.Tests.csproj @@ -0,0 +1,18 @@ + + + net7.0 + + $(NoWarn);CS1591 + + + + + + + + + + + + + diff --git a/.dotnet/tsp-output/@typespec/openapi3/openapi.yaml b/.dotnet/tsp-output/@typespec/openapi3/openapi.yaml new file mode 100644 index 000000000..cc299da11 --- /dev/null +++ b/.dotnet/tsp-output/@typespec/openapi3/openapi.yaml @@ -0,0 +1,6019 @@ +openapi: 3.0.0 +info: + title: OpenAI API + version: 2.0.0 + description: The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. +tags: + - name: Fine-tuning + - name: Audio + - name: Assistants + - name: Chat + - name: Completions + - name: Embeddings + - name: Files + - name: Images + - name: Models + - name: Moderations +paths: + /assistants: + post: + tags: + - Assistants + operationId: createAssistant + summary: Create an assistant with a model and instructions. + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/AssistantObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateAssistantRequest' + get: + tags: + - Assistants + operationId: listAssistants + summary: Returns a list of assistants. + parameters: + - name: limit + in: query + required: false + description: |- + A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + default is 20. + schema: + type: integer + format: int32 + default: 20 + - name: order + in: query + required: false + description: |- + Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + for descending order. + schema: + $ref: '#/components/schemas/ListOrder' + default: desc + - name: after + in: query + required: false + description: |- + A cursor for use in pagination. `after` is an object ID that defines your place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + - name: before + in: query + required: false + description: |- + A cursor for use in pagination. `before` is an object ID that defines your place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the previous page of the list. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ListAssistantsResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /assistants/{assistant_id}: + get: + tags: + - Assistants + operationId: getAssistant + summary: Retrieves an assistant. + parameters: + - name: assistant_id + in: path + required: true + description: The ID of the assistant to retrieve. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/AssistantObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + post: + tags: + - Assistants + operationId: modifyAssistant + summary: Modifies an assistant. + parameters: + - name: assistant_id + in: path + required: true + description: The ID of the assistant to modify. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/AssistantObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ModifyAssistantRequest' + delete: + tags: + - Assistants + operationId: deleteAssistant + summary: Delete an assistant. + parameters: + - name: assistant_id + in: path + required: true + description: The ID of the assistant to delete. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/DeleteAssistantResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /assistants/{assistant_id}/files: + post: + tags: + - Assistants + operationId: createAssistantFile + summary: |- + Create an assistant file by attaching a [File](/docs/api-reference/files) to a + [assistant](/docs/api-reference/assistants). + parameters: + - name: assistant_id + in: path + required: true + description: The ID of the assistant for which to create a file. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/AssistantFileObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateAssistantFileRequest' + get: + tags: + - Assistants + operationId: listAssistantFiles + summary: Returns a list of assistant files. + parameters: + - name: assistant_id + in: path + required: true + description: The ID of the assistant the file belongs to. + schema: + type: string + - name: limit + in: query + required: false + description: |- + A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + default is 20. + schema: + type: integer + format: int32 + default: 20 + - name: order + in: query + required: false + description: |- + Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + for descending order. + schema: + $ref: '#/components/schemas/ListOrder' + default: desc + - name: after + in: query + required: false + description: |- + A cursor for use in pagination. `after` is an object ID that defines your place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + - name: before + in: query + required: false + description: |- + A cursor for use in pagination. `before` is an object ID that defines your place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the previous page of the list. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ListAssistantFilesResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /assistants/{assistant_id}/files/{file_id}: + get: + tags: + - Assistants + operationId: getAssistantFile + summary: Retrieves an assistant file. + parameters: + - name: assistant_id + in: path + required: true + description: The ID of the assistant the file belongs to. + schema: + type: string + - name: file_id + in: path + required: true + description: The ID of the file we're getting. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/AssistantFileObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + delete: + tags: + - Assistants + operationId: deleteAssistantFile + summary: Delete an assistant file. + parameters: + - name: assistant_id + in: path + required: true + description: The ID of the assistant the file belongs to. + schema: + type: string + - name: file_id + in: path + required: true + description: The ID of the file to delete. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/DeleteAssistantFileResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /audio/speech: + post: + tags: + - Audio + operationId: createSpeech + summary: Generates audio from the input text. + parameters: [] + responses: + '200': + description: The request has succeeded. + headers: + Transfer-Encoding: + required: false + description: chunked + schema: + type: string + content: + application/octet-stream: + schema: + type: string + format: binary + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateSpeechRequest' + /audio/transcriptions: + post: + tags: + - Audio + operationId: createTranscription + summary: Transcribes audio into the input language. + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/CreateTranscriptionResponse' + text/plain: + schema: + type: string + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/CreateTranscriptionRequestMultiPart' + /audio/translations: + post: + tags: + - Audio + operationId: createTranslation + summary: Translates audio into English.. + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/CreateTranslationResponse' + text/plain: + schema: + type: string + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/CreateTranslationRequestMultiPart' + /chat/completions: + post: + tags: + - Chat + operationId: createChatCompletion + summary: Creates a model response for the given chat conversation. + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/CreateChatCompletionResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateChatCompletionRequest' + /completions: + post: + tags: + - Completions + operationId: createCompletion + summary: Creates a completion for the provided prompt and parameters. + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/CreateCompletionResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateCompletionRequest' + /embeddings: + post: + tags: + - Embeddings + operationId: createEmbedding + summary: Creates an embedding vector representing the input text. + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/CreateEmbeddingResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateEmbeddingRequest' + /files: + post: + tags: + - Files + operationId: createFile + summary: |- + Upload a file that can be used across various endpoints. The size of all the files uploaded by + one organization can be up to 100 GB. + + The size of individual files can be a maximum of 512 MB or 2 million tokens for Assistants. See + the [Assistants Tools guide](/docs/assistants/tools) to learn more about the types of files + supported. The Fine-tuning API only supports `.jsonl` files. + + Please [contact us](https://help.openai.com/) if you need to increase these storage limits. + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/OpenAIFile' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/CreateFileRequestMultiPart' + get: + tags: + - Files + operationId: listFiles + summary: Returns a list of files that belong to the user's organization. + parameters: + - name: purpose + in: query + required: false + description: Only return files with the given purpose. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ListFilesResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /files/{file_id}: + get: + tags: + - Files + operationId: retrieveFile + summary: Returns information about a specific file. + parameters: + - name: file_id + in: path + required: true + description: The ID of the file to use for this request. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/OpenAIFile' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + delete: + tags: + - Files + operationId: deleteFile + summary: Delete a file + parameters: + - name: file_id + in: path + required: true + description: The ID of the file to use for this request. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/DeleteFileResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /files/{file_id}/content: + get: + tags: + - Files + operationId: downloadFile + summary: Returns the contents of the specified file. + parameters: + - name: file_id + in: path + required: true + description: The ID of the file to use for this request. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + type: string + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /fine-tunes: + post: + tags: + - Fine-tuning + operationId: createFineTune + summary: |- + Creates a job that fine-tunes a specified model from a given dataset. + + Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + + [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/FineTune' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateFineTuneRequest' + deprecated: true + get: + tags: + - Fine-tuning + operationId: listFineTunes + summary: List your organization's fine-tuning jobs + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ListFineTunesResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + deprecated: true + /fine-tunes/{fine_tune_id}: + get: + tags: + - Fine-tuning + operationId: retrieveFineTune + summary: |- + Gets info about the fine-tune job. + + [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + parameters: + - name: fine_tune_id + in: path + required: true + description: The ID of the fine-tune job + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/FineTune' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + deprecated: true + /fine-tunes/{fine_tune_id}/cancel: + post: + tags: + - Fine-tuning + operationId: cancelFineTune + summary: Immediately cancel a fine-tune job. + parameters: + - name: fine_tune_id + in: path + required: true + description: The ID of the fine-tune job to cancel + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/FineTune' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + deprecated: true + /fine-tunes/{fine_tune_id}/events: + get: + tags: + - Fine-tuning + operationId: listFineTuneEvents + summary: Get fine-grained status updates for a fine-tune job. + parameters: + - name: fine_tune_id + in: path + required: true + description: The ID of the fine-tune job to get events for. + schema: + type: string + - name: stream + in: query + required: false + description: |- + Whether to stream events for the fine-tune job. If set to true, events will be sent as + data-only + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available. The stream will terminate with a `data: [DONE]` message when the + job is finished (succeeded, cancelled, or failed). + + If set to false, only events generated so far will be returned. + schema: + type: boolean + default: false + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ListFineTuneEventsResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + deprecated: true + /fine_tuning/jobs: + post: + tags: + - Fine-tuning + operationId: createFineTuningJob + description: |- + Creates a job that fine-tunes a specified model from a given dataset. + + Response includes details of the enqueued job including job status and the name of the + fine-tuned models once complete. + + [Learn more about fine-tuning](/docs/guides/fine-tuning) + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/FineTuningJob' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateFineTuningJobRequest' + get: + tags: + - Fine-tuning + operationId: listPaginatedFineTuningJobs + parameters: + - name: after + in: query + required: false + description: Identifier for the last job from the previous pagination request. + schema: + type: string + - name: limit + in: query + required: false + description: Number of fine-tuning jobs to retrieve. + schema: + type: integer + format: int64 + default: 20 + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ListPaginatedFineTuningJobsResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /fine_tuning/jobs/{fine_tuning_job_id}: + get: + tags: + - Fine-tuning + operationId: retrieveFineTuningJob + summary: |- + Get info about a fine-tuning job. + + [Learn more about fine-tuning](/docs/guides/fine-tuning) + parameters: + - name: fine_tuning_job_id + in: path + required: true + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/FineTuningJob' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /fine_tuning/jobs/{fine_tuning_job_id}/cancel: + post: + tags: + - Fine-tuning + operationId: cancelFineTuningJob + summary: Immediately cancel a fine-tune job. + parameters: + - name: fine_tuning_job_id + in: path + required: true + description: The ID of the fine-tuning job to cancel. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/FineTuningJob' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /fine_tuning/jobs/{fine_tuning_job_id}/events: + get: + tags: + - Fine-tuning + operationId: listFineTuningEvents + summary: Get status updates for a fine-tuning job. + parameters: + - name: fine_tuning_job_id + in: path + required: true + description: The ID of the fine-tuning job to get events for. + schema: + type: string + - name: after + in: query + required: false + description: Identifier for the last event from the previous pagination request. + schema: + type: string + - name: limit + in: query + required: false + description: Number of events to retrieve. + schema: + type: integer + default: 20 + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ListFineTuningJobEventsResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /images/edits: + post: + tags: + - Images + operationId: createImageEdit + summary: Creates an edited or extended image given an original image and a prompt. + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ImagesResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/CreateImageEditRequestMultiPart' + /images/generations: + post: + tags: + - Images + operationId: createImage + summary: Creates an image given a prompt + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ImagesResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateImageRequest' + /images/variations: + post: + tags: + - Images + operationId: createImageVariation + summary: Creates an edited or extended image given an original image and a prompt. + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ImagesResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/CreateImageVariationRequestMultiPart' + /models: + get: + tags: + - Models + operationId: listModels + summary: |- + Lists the currently available models, and provides basic information about each one such as the + owner and availability. + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ListModelsResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /models/{model}: + get: + tags: + - Models + operationId: retrieveModel + summary: |- + Retrieves a model instance, providing basic information about the model such as the owner and + permissioning. + parameters: + - name: model + in: path + required: true + description: The ID of the model to use for this request. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/Model' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + delete: + tags: + - Models + operationId: deleteModel + summary: Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. + parameters: + - name: model + in: path + required: true + description: The model to delete + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/DeleteModelResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /moderations: + post: + tags: + - Moderations + operationId: createModeration + summary: Classifies if text violates OpenAI's Content Policy + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/CreateModerationResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateModerationRequest' + /threads: + post: + tags: + - Assistants + operationId: createThread + summary: Create a thread. + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ThreadObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateThreadRequest' + /threads/runs: + post: + tags: + - Assistants + operationId: createThreadAndRun + summary: Create a thread and run it in one request. + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/RunObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateThreadAndRunRequest' + /threads/{thread_id}: + get: + tags: + - Assistants + operationId: getThread + summary: Retrieves a thread. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the thread to retrieve. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ThreadObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + post: + tags: + - Assistants + operationId: modifyThread + summary: Modifies a thread. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the thread to modify. Only the `metadata` can be modified. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ThreadObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ModifyThreadRequest' + delete: + tags: + - Assistants + operationId: deleteThread + summary: Delete a thread. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the thread to delete. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/DeleteThreadResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /threads/{thread_id}/messages: + post: + tags: + - Assistants + operationId: createMessage + summary: Create a message. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the [thread](/docs/api-reference/threads) to create a message for. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/MessageObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateMessageRequest' + get: + tags: + - Assistants + operationId: listMessages + summary: Returns a list of messages for a given thread. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the [thread](/docs/api-reference/threads) the messages belong to. + schema: + type: string + - name: limit + in: query + required: false + description: |- + A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + default is 20. + schema: + type: integer + format: int32 + default: 20 + - name: order + in: query + required: false + description: |- + Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + for descending order. + schema: + $ref: '#/components/schemas/ListOrder' + default: desc + - name: after + in: query + required: false + description: |- + A cursor for use in pagination. `after` is an object ID that defines your place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + - name: before + in: query + required: false + description: |- + A cursor for use in pagination. `before` is an object ID that defines your place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the previous page of the list. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ListMessagesResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /threads/{thread_id}/messages/{message_id}: + get: + tags: + - Assistants + operationId: getMessage + summary: Retrieve a message. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the [thread](/docs/api-reference/threads) to which this message belongs. + schema: + type: string + - name: message_id + in: path + required: true + description: The ID of the message to retrieve. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/MessageObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + post: + tags: + - Assistants + operationId: modifyMessage + summary: Modifies a message. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the thread to which this message belongs. + schema: + type: string + - name: message_id + in: path + required: true + description: The ID of the message to modify. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/MessageObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ModifyMessageRequest' + /threads/{thread_id}/messages/{message_id}/files: + get: + tags: + - Assistants + operationId: listMessageFiles + summary: Returns a list of message files. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the thread that the message and files belong to. + schema: + type: string + - name: message_id + in: path + required: true + description: The ID of the message that the files belongs to. + schema: + type: string + - name: limit + in: query + required: false + description: |- + A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + default is 20. + schema: + type: integer + format: int32 + default: 20 + - name: order + in: query + required: false + description: |- + Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + for descending order. + schema: + $ref: '#/components/schemas/ListOrder' + default: desc + - name: after + in: query + required: false + description: |- + A cursor for use in pagination. `after` is an object ID that defines your place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + - name: before + in: query + required: false + description: |- + A cursor for use in pagination. `before` is an object ID that defines your place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the previous page of the list. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ListMessageFilesResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /threads/{thread_id}/messages/{message_id}/files/{file_id}: + get: + tags: + - Assistants + operationId: getMessageFile + summary: Retrieves a message file. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the thread to which the message and File belong. + schema: + type: string + - name: message_id + in: path + required: true + description: The ID of the message the file belongs to. + schema: + type: string + - name: file_id + in: path + required: true + description: The ID of the file being retrieved. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/MessageFileObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /threads/{thread_id}/runs: + post: + tags: + - Assistants + operationId: createRun + summary: Create a run. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the thread to run. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/RunObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateRunRequest' + get: + tags: + - Assistants + operationId: listRuns + summary: Returns a list of runs belonging to a thread. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the thread the run belongs to. + schema: + type: string + - name: limit + in: query + required: false + description: |- + A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + default is 20. + schema: + type: integer + format: int32 + default: 20 + - name: order + in: query + required: false + description: |- + Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + for descending order. + schema: + $ref: '#/components/schemas/ListOrder' + default: desc + - name: after + in: query + required: false + description: |- + A cursor for use in pagination. `after` is an object ID that defines your place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + - name: before + in: query + required: false + description: |- + A cursor for use in pagination. `before` is an object ID that defines your place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the previous page of the list. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ListRunsResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /threads/{thread_id}/runs/{run_id}: + get: + tags: + - Assistants + operationId: getRun + summary: Retrieves a run. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the [thread](/docs/api-reference/threads) that was run. + schema: + type: string + - name: run_id + in: path + required: true + description: The ID of the run to retrieve. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/RunObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + post: + tags: + - Assistants + operationId: modifyRun + summary: Modifies a run. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the [thread](/docs/api-reference/threads) that was run. + schema: + type: string + - name: run_id + in: path + required: true + description: The ID of the run to modify. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/RunObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ModifyRunRequest' + /threads/{thread_id}/runs/{run_id}/cancel: + post: + tags: + - Assistants + operationId: cancelRun + summary: Cancels a run that is `in_progress`. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the thread to which this run belongs. + schema: + type: string + - name: run_id + in: path + required: true + description: The ID of the run to cancel. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/RunObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /threads/{thread_id}/runs/{run_id}/steps: + get: + tags: + - Assistants + operationId: listRunSteps + summary: Returns a list of run steps belonging to a run. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the thread the run and run steps belong to. + schema: + type: string + - name: run_id + in: path + required: true + description: The ID of the run the run steps belong to. + schema: + type: string + - name: limit + in: query + required: false + description: |- + A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + default is 20. + schema: + type: integer + format: int32 + default: 20 + - name: order + in: query + required: false + description: |- + Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + for descending order. + schema: + $ref: '#/components/schemas/ListOrder' + default: desc + - name: after + in: query + required: false + description: |- + A cursor for use in pagination. `after` is an object ID that defines your place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + - name: before + in: query + required: false + description: |- + A cursor for use in pagination. `before` is an object ID that defines your place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the previous page of the list. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ListRunStepsResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /threads/{thread_id}/runs/{run_id}/steps/{step_id}: + get: + tags: + - Assistants + operationId: getRunStep + summary: Retrieves a run step. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the thread to which the run and run step belongs. + schema: + type: string + - name: run_id + in: path + required: true + description: The ID of the run to which the run step belongs. + schema: + type: string + - name: step_id + in: path + required: true + description: The ID of the run step to retrieve. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/RunStepObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /threads/{thread_id}/runs/{run_id}/submit_tool_outputs: + post: + tags: + - Assistants + operationId: submitToolOuputsToRun + summary: |- + When a run has the `status: "requires_action"` and `required_action.type` is + `submit_tool_outputs`, this endpoint can be used to submit the outputs from the tool calls once + they're all completed. All outputs must be submitted in a single request. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the [thread](/docs/api-reference/threads) to which this run belongs. + schema: + type: string + - name: run_id + in: path + required: true + description: The ID of the run that requires the tool output submission. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/RunObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/SubmitToolOutputsRunRequest' +security: + - BearerAuth: [] +components: + schemas: + AssistantFileObject: + type: object + required: + - id + - object + - created_at + - assistant_id + properties: + id: + type: string + description: The identifier, which can be referenced in API endpoints. + object: + type: string + enum: + - assistant.file + description: The object type, which is always `assistant.file`. + created_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) for when the assistant file was created. + assistant_id: + type: string + description: The assistant ID that the file is attached to. + description: A list of [Files](/docs/api-reference/files) attached to an `assistant`. + AssistantObject: + type: object + required: + - id + - object + - created_at + - name + - description + - model + - instructions + - tools + - file_ids + - metadata + properties: + id: + type: string + description: The identifier, which can be referenced in API endpoints. + object: + type: string + enum: + - assistant + description: The object type, which is always `assistant`. + created_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) for when the assistant was created. + name: + type: string + nullable: true + maxLength: 256 + description: The name of the assistant. The maximum length is 256 characters. + description: + type: string + nullable: true + maxLength: 512 + description: The description of the assistant. The maximum length is 512 characters. + model: + type: string + description: |- + ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + see all of your available models, or see our [Model overview](/docs/models/overview) for + descriptions of them. + instructions: + type: string + nullable: true + maxLength: 32768 + description: The system instructions that the assistant uses. The maximum length is 32768 characters. + tools: + allOf: + - $ref: '#/components/schemas/CreateAssistantRequestToolsItem' + description: |- + A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. + Tools can be of types `code_interpreter`, `retrieval`, or `function`. + default: [] + file_ids: + type: array + items: + type: string + maxItems: 20 + description: |- + A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a + maximum of 20 files attached to the assistant. Files are ordered by their creation date in + ascending order. + default: [] + metadata: + type: object + additionalProperties: + type: string + nullable: true + description: |- + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + description: Represents an `assistant` that can call the model and use tools. + AssistantToolsCode: + type: object + required: + - type + properties: + type: + type: string + enum: + - code_interpreter + description: 'The type of tool being defined: `code_interpreter`' + AssistantToolsFunction: + type: object + required: + - type + - function + properties: + type: + type: string + enum: + - function + description: 'The type of tool being defined: `function`' + function: + $ref: '#/components/schemas/FunctionObject' + AssistantToolsRetrieval: + type: object + required: + - type + properties: + type: + type: string + enum: + - retrieval + description: 'The type of tool being defined: `retrieval`' + AudioSegment: + type: object + required: + - id + - seek + - start + - end + - text + - tokens + - temperature + - avg_logprob + - compression_ratio + - no_speech_prob + properties: + id: + type: integer + format: int64 + description: The zero-based index of this segment. + seek: + type: integer + format: int64 + description: |- + The seek position associated with the processing of this audio segment. Seek positions are + expressed as hundredths of seconds. The model may process several segments from a single seek + position, so while the seek position will never represent a later time than the segment's + start, the segment's start may represent a significantly later time than the segment's + associated seek position. + start: + type: number + format: double + description: The time at which this segment started relative to the beginning of the audio. + end: + type: number + format: double + description: The time at which this segment ended relative to the beginning of the audio. + text: + type: string + description: The text that was part of this audio segment. + tokens: + allOf: + - $ref: '#/components/schemas/TokenArrayItem' + description: The token IDs matching the text in this audio segment. + temperature: + type: number + format: double + minimum: 0 + maximum: 1 + description: The temperature score associated with this audio segment. + avg_logprob: + type: number + format: double + description: The average log probability associated with this audio segment. + compression_ratio: + type: number + format: double + description: The compression ratio of this audio segment. + no_speech_prob: + type: number + format: double + description: The probability of no speech detection within this audio segment. + ChatCompletionFunctionCallOption: + type: object + required: + - name + properties: + name: + type: string + description: The name of the function to call. + description: |- + Specifying a particular function via `{"name": "my_function"}` forces the model to call that + function. + ChatCompletionFunctions: + type: object + required: + - name + properties: + description: + type: string + description: |- + A description of what the function does, used by the model to choose when and how to call the + function. + name: + type: string + description: |- + The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and + dashes, with a maximum length of 64. + parameters: + $ref: '#/components/schemas/FunctionParameters' + deprecated: true + ChatCompletionMessageToolCall: + type: object + required: + - id + - type + - function + properties: + id: + type: string + description: The ID of the tool call. + type: + type: string + enum: + - function + description: The type of the tool. Currently, only `function` is supported. + function: + type: object + properties: + name: + type: string + description: The name of the function to call. + arguments: + type: string + description: |- + The arguments to call the function with, as generated by the model in JSON format. Note that + the model does not always generate valid JSON, and may hallucinate parameters not defined by + your function schema. Validate the arguments in your code before calling your function. + required: + - name + - arguments + description: The function that the model called. + ChatCompletionMessageToolCallsItem: + type: array + items: + $ref: '#/components/schemas/ChatCompletionMessageToolCall' + description: The tool calls generated by the model, such as function calls. + ChatCompletionNamedToolChoice: + type: object + required: + - type + - function + properties: + type: + type: string + enum: + - function + description: The type of the tool. Currently, only `function` is supported. + function: + type: object + properties: + name: + type: string + description: The name of the function to call. + required: + - name + description: Specifies a tool the model should use. Use to force the model to call a specific function. + ChatCompletionRequestAssistantMessage: + type: object + required: + - role + properties: + content: + type: string + nullable: true + description: |- + The contents of the assistant message. Required unless `tool_calls` or `function_call` is' + specified. + role: + type: string + enum: + - assistant + description: The role of the messages author, in this case `assistant`. + name: + type: string + description: |- + An optional name for the participant. Provides the model information to differentiate between + participants of the same role. + tool_calls: + $ref: '#/components/schemas/ChatCompletionMessageToolCallsItem' + function_call: + type: object + properties: + arguments: + type: string + description: |- + The arguments to call the function with, as generated by the model in JSON format. Note that + the model does not always generate valid JSON, and may hallucinate parameters not defined by + your function schema. Validate the arguments in your code before calling your function. + name: + type: string + description: The name of the function to call. + required: + - arguments + - name + description: |- + Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be + called, as generated by the model. + deprecated: true + ChatCompletionRequestFunctionMessage: + type: object + required: + - role + - content + - name + properties: + role: + type: string + enum: + - function + description: The role of the messages author, in this case `function`. + content: + type: string + nullable: true + description: The contents of the function message. + name: + type: string + description: The name of the function to call. + ChatCompletionRequestMessage: + oneOf: + - $ref: '#/components/schemas/ChatCompletionRequestSystemMessage' + - $ref: '#/components/schemas/ChatCompletionRequestUserMessage' + - $ref: '#/components/schemas/ChatCompletionRequestAssistantMessage' + - $ref: '#/components/schemas/ChatCompletionRequestToolMessage' + - $ref: '#/components/schemas/ChatCompletionRequestFunctionMessage' + x-oaiExpandable: true + ChatCompletionRequestMessageContentPart: + oneOf: + - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' + - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartImage' + x-oaiExpandable: true + ChatCompletionRequestMessageContentPartImage: + type: object + required: + - type + - image_url + properties: + type: + type: string + enum: + - image_url + description: The type of the content part. + image_url: + type: object + properties: + url: + anyOf: + - type: string + format: uri + - type: string + description: Either a URL of the image or the base64 encoded image data. + detail: + type: string + enum: + - auto + - low + - high + description: |- + Specifies the detail level of the image. Learn more in the + [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding). + default: auto + required: + - url + ChatCompletionRequestMessageContentPartText: + type: object + required: + - type + - text + properties: + type: + type: string + enum: + - text + - json_object + description: The type of the content part. + text: + type: string + description: The text content. + ChatCompletionRequestMessageContentParts: + type: array + items: + $ref: '#/components/schemas/ChatCompletionRequestMessageContentPart' + minItems: 1 + ChatCompletionRequestSystemMessage: + type: object + required: + - content + - role + properties: + content: + type: string + description: The contents of the system message. + x-oaiExpandable: true + role: + type: string + enum: + - system + description: The role of the messages author, in this case `system`. + name: + type: string + description: |- + An optional name for the participant. Provides the model information to differentiate between + participants of the same role. + ChatCompletionRequestToolMessage: + type: object + required: + - role + - content + - tool_call_id + properties: + role: + type: string + enum: + - tool + description: The role of the messages author, in this case `tool`. + content: + type: string + description: The contents of the tool message. + tool_call_id: + type: string + description: Tool call that this message is responding to. + ChatCompletionRequestUserMessage: + type: object + required: + - content + - role + properties: + content: + allOf: + - $ref: '#/components/schemas/ChatCompletionRequestUserMessageContent' + description: The contents of the system message. + x-oaiExpandable: true + role: + type: string + enum: + - user + - assistant + description: The role of the messages author, in this case `user`. + name: + type: string + description: |- + An optional name for the participant. Provides the model information to differentiate between + participants of the same role. + ChatCompletionRequestUserMessageContent: + oneOf: + - type: string + - $ref: '#/components/schemas/ChatCompletionRequestMessageContentParts' + ChatCompletionResponseMessage: + type: object + required: + - content + - role + properties: + content: + type: string + nullable: true + description: The contents of the message. + tool_calls: + $ref: '#/components/schemas/ChatCompletionMessageToolCallsItem' + role: + type: string + enum: + - assistant + description: The role of the author of this message. + function_call: + type: object + properties: + arguments: + type: string + description: |- + The arguments to call the function with, as generated by the model in JSON format. Note that + the model does not always generate valid JSON, and may hallucinate parameters not defined by + your function schema. Validate the arguments in your code before calling your function. + name: + type: string + description: The name of the function to call. + required: + - arguments + - name + description: Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. + deprecated: true + ChatCompletionTokenLogprob: + type: object + required: + - token + - logprob + - bytes + - top_logprobs + properties: + token: + type: string + description: The token. + logprob: + type: number + format: double + description: The log probability of this token. + bytes: + type: array + items: + type: integer + format: int64 + nullable: true + description: |- + A list of integers representing the UTF-8 bytes representation of the token. Useful in + instances where characters are represented by multiple tokens and their byte representations + must be combined to generate the correct text representation. Can be `null` if there is no + bytes representation for the token. + top_logprobs: + type: array + items: + type: object + properties: + token: + type: string + description: The token. + logprob: + type: number + format: double + description: The log probability of this token. + bytes: + type: array + items: + type: integer + format: int64 + nullable: true + description: |- + A list of integers representing the UTF-8 bytes representation of the token. Useful in + instances where characters are represented by multiple tokens and their byte representations + must be combined to generate the correct text representation. Can be `null` if there is no + bytes representation for the token. + required: + - token + - logprob + - bytes + description: |- + List of the most likely tokens and their log probability, at this token position. In rare + cases, there may be fewer than the number of requested `top_logprobs` returned. + ChatCompletionTool: + type: object + required: + - type + - function + properties: + type: + type: string + enum: + - function + description: The type of the tool. Currently, only `function` is supported. + function: + $ref: '#/components/schemas/FunctionObject' + ChatCompletionToolChoiceOption: + oneOf: + - type: string + enum: + - none + - auto + - auto + - $ref: '#/components/schemas/ChatCompletionNamedToolChoice' + description: |- + Controls which (if any) function is called by the model. `none` means the model will not call a + function and instead generates a message. `auto` means the model can pick between generating a + message or calling a function. Specifying a particular function via + `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that + function. + + `none` is the default when no functions are present. `auto` is the default if functions are + present. + x-oaiExpandable: true + CompletionUsage: + type: object + required: + - prompt_tokens + - completion_tokens + - total_tokens + properties: + prompt_tokens: + type: integer + format: int64 + description: Number of tokens in the prompt. + completion_tokens: + type: integer + format: int64 + description: Number of tokens in the generated completion + total_tokens: + type: integer + format: int64 + description: Total number of tokens used in the request (prompt + completion). + description: Usage statistics for the completion request. + CreateAssistantFileRequest: + type: object + required: + - file_id + properties: + file_id: + type: string + description: |- + A [File](/docs/api-reference/files) ID (with `purpose="assistants"`) that the assistant should + use. Useful for tools like `retrieval` and `code_interpreter` that can access files. + CreateAssistantRequest: + type: object + required: + - model + properties: + model: + type: string + description: |- + ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + see all of your available models, or see our [Model overview](/docs/models/overview) for + descriptions of them. + name: + type: string + nullable: true + maxLength: 256 + description: The name of the assistant. The maximum length is 256 characters. + description: + type: string + nullable: true + maxLength: 512 + description: The description of the assistant. The maximum length is 512 characters. + instructions: + type: string + nullable: true + maxLength: 32768 + description: The system instructions that the assistant uses. The maximum length is 32768 characters. + tools: + allOf: + - $ref: '#/components/schemas/CreateAssistantRequestToolsItem' + description: |- + A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. + Tools can be of types `code_interpreter`, `retrieval`, or `function`. + default: [] + file_ids: + type: array + items: + type: string + maxItems: 20 + description: |- + A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a + maximum of 20 files attached to the assistant. Files are ordered by their creation date in + ascending order. + default: [] + metadata: + type: object + additionalProperties: + type: string + nullable: true + description: |- + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + CreateAssistantRequestTool: + oneOf: + - $ref: '#/components/schemas/AssistantToolsCode' + - $ref: '#/components/schemas/AssistantToolsRetrieval' + - $ref: '#/components/schemas/AssistantToolsFunction' + x-oaiExpandable: true + CreateAssistantRequestToolsItem: + type: array + items: + $ref: '#/components/schemas/CreateAssistantRequestTool' + maxItems: 128 + CreateChatCompletionRequest: + type: object + required: + - messages + - model + properties: + messages: + type: array + items: + $ref: '#/components/schemas/ChatCompletionRequestMessage' + minItems: 1 + description: |- + A list of messages comprising the conversation so far. + [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). + model: + anyOf: + - type: string + - type: string + enum: + - gpt-4-0125-preview + - gpt-4-turbo-preview + - gpt-4-1106-preview + - gpt-4-vision-preview + - gpt-4 + - gpt-4-0314 + - gpt-4-0613 + - gpt-4-32k + - gpt-4-32k-0314 + - gpt-4-32k-0613 + - gpt-3.5-turbo + - gpt-3.5-turbo-16k + - gpt-3.5-turbo-0301 + - gpt-3.5-turbo-0613 + - gpt-3.5-turbo-1106 + - gpt-3.5-turbo-16k-0613 + description: |- + ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) + table for details on which models work with the Chat API. + x-oaiTypeLabel: string + frequency_penalty: + type: number + format: double + nullable: true + minimum: -2 + maximum: 2 + description: |- + Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing + frequency in the text so far, decreasing the model's likelihood to repeat the same line + verbatim. + + [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + default: 0 + logit_bias: + type: object + additionalProperties: + type: integer + format: int64 + nullable: true + description: |- + Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an + associated bias value from -100 to 100. Mathematically, the bias is added to the logits + generated by the model prior to sampling. The exact effect will vary per model, but values + between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 + should result in a ban or exclusive selection of the relevant token. + x-oaiTypeLabel: map + default: null + logprobs: + type: boolean + nullable: true + description: |- + Whether to return log probabilities of the output tokens or not. If true, returns the log + probabilities of each output token returned in the `content` of `message`. This option is + currently not available on the `gpt-4-vision-preview` model. + default: false + top_logprobs: + type: integer + format: int64 + nullable: true + minimum: 0 + maximum: 5 + description: |- + An integer between 0 and 5 specifying the number of most likely tokens to return at each token + position, each with an associated log probability. `logprobs` must be set to `true` if this + parameter is used. + max_tokens: + type: integer + format: int64 + nullable: true + minimum: 0 + description: |- + The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. + + The total length of input tokens and generated tokens is limited by the model's context length. + [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + for counting tokens. + default: 16 + n: + type: integer + format: int64 + nullable: true + minimum: 1 + maximum: 128 + description: |- + How many chat completion choices to generate for each input message. Note that you will be + charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to + minimize costs. + default: 1 + presence_penalty: + type: number + format: double + nullable: true + minimum: -2 + maximum: 2 + description: |- + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear + in the text so far, increasing the model's likelihood to talk about new topics. + + [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + default: 0 + response_format: + type: object + properties: + type: + type: string + enum: + - text + - json_object + description: Must be one of `text` or `json_object`. + default: text + description: |- + An object specifying the format that the model must output. Compatible with + [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the + model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON + yourself via a system or user message. Without this, the model may generate an unending stream + of whitespace until the generation reaches the token limit, resulting in a long-running and + seemingly "stuck" request. Also note that the message content may be partially cut off if + `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the + conversation exceeded the max context length. + seed: + type: integer + format: int64 + nullable: true + minimum: -9223372036854776000 + maximum: 9223372036854776000 + description: |- + This feature is in Beta. + + If specified, our system will make a best effort to sample deterministically, such that + repeated requests with the same `seed` and parameters should return the same result. + + Determinism is not guaranteed, and you should refer to the `system_fingerprint` response + parameter to monitor changes in the backend. + x-oaiMeta: + beta: true + stop: + oneOf: + - $ref: '#/components/schemas/Stop' + nullable: true + description: Up to 4 sequences where the API will stop generating further tokens. + default: null + stream: + type: boolean + nullable: true + description: |- + If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: [DONE]` message. + [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + default: false + temperature: + type: number + format: double + nullable: true + minimum: 0 + maximum: 2 + description: |- + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. + + We generally recommend altering this or `top_p` but not both. + default: 1 + top_p: + type: number + format: double + nullable: true + minimum: 0 + maximum: 1 + description: |- + An alternative to sampling with temperature, called nucleus sampling, where the model considers + the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising + the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + default: 1 + tools: + type: array + items: + $ref: '#/components/schemas/ChatCompletionTool' + description: |- + A list of tools the model may call. Currently, only functions are supported as a tool. Use this + to provide a list of functions the model may generate JSON inputs for. + tool_choice: + $ref: '#/components/schemas/ChatCompletionToolChoiceOption' + user: + allOf: + - $ref: '#/components/schemas/User' + description: |- + A unique identifier representing your end-user, which can help OpenAI to monitor and detect + abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + function_call: + anyOf: + - type: string + enum: + - none + - auto + - auto + - $ref: '#/components/schemas/ChatCompletionFunctionCallOption' + description: |- + Deprecated in favor of `tool_choice`. + + Controls which (if any) function is called by the model. `none` means the model will not call a + function and instead generates a message. `auto` means the model can pick between generating a + message or calling a function. Specifying a particular function via `{"name": "my_function"}` + forces the model to call that function. + + `none` is the default when no functions are present. `auto` is the default if functions are + present. + deprecated: true + x-oaiExpandable: true + functions: + type: array + items: + $ref: '#/components/schemas/ChatCompletionFunctions' + minItems: 1 + maxItems: 128 + description: |- + Deprecated in favor of `tools`. + + A list of functions the model may generate JSON inputs for. + deprecated: true + CreateChatCompletionResponse: + type: object + required: + - id + - choices + - created + - model + - object + properties: + id: + type: string + description: A unique identifier for the chat completion. + choices: + type: array + items: + type: object + properties: + finish_reason: + type: string + enum: + - stop + - length + - tool_calls + - content_filter + - function_call + - length + - content_filter + description: |- + The reason the model stopped generating tokens. This will be `stop` if the model hit a + natural stop point or a provided stop sequence, `length` if the maximum number of tokens + specified in the request was reached, `content_filter` if content was omitted due to a flag + from our content filters, `tool_calls` if the model called a tool, or `function_call` + (deprecated) if the model called a function. + index: + type: integer + format: int64 + description: The index of the choice in the list of choices. + message: + $ref: '#/components/schemas/ChatCompletionResponseMessage' + logprobs: + type: object + properties: + content: + type: array + items: + $ref: '#/components/schemas/ChatCompletionTokenLogprob' + nullable: true + required: + - content + nullable: true + description: Log probability information for the choice. + required: + - finish_reason + - index + - message + - logprobs + description: A list of chat completion choices. Can be more than one if `n` is greater than 1. + created: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) of when the chat completion was created. + model: + type: string + description: The model used for the chat completion. + system_fingerprint: + type: string + description: |- + This fingerprint represents the backend configuration that the model runs with. + + Can be used in conjunction with the `seed` request parameter to understand when backend changes + have been made that might impact determinism. + object: + type: string + enum: + - chat.completion + description: The object type, which is always `chat.completion`. + usage: + $ref: '#/components/schemas/CompletionUsage' + description: Represents a chat completion response returned by model, based on the provided input. + CreateCompletionRequest: + type: object + required: + - model + - prompt + properties: + model: + anyOf: + - type: string + - type: string + enum: + - gpt-3.5-turbo-instruct + - davinci-002 + - babbage-002 + description: |- + ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + see all of your available models, or see our [Model overview](/docs/models/overview) for + descriptions of them. + x-oaiTypeLabel: string + prompt: + oneOf: + - $ref: '#/components/schemas/Prompt' + nullable: true + description: |- + The prompt(s) to generate completions for, encoded as a string, array of strings, array of + tokens, or array of token arrays. + + Note that <|endoftext|> is the document separator that the model sees during training, so if a + prompt is not specified the model will generate as if from the beginning of a new document. + default: <|endoftext|> + best_of: + type: integer + format: int64 + nullable: true + minimum: 0 + maximum: 20 + description: |- + Generates `best_of` completions server-side and returns the "best" (the one with the highest + log probability per token). Results cannot be streamed. + + When used with `n`, `best_of` controls the number of candidate completions and `n` specifies + how many to return – `best_of` must be greater than `n`. + + **Note:** Because this parameter generates many completions, it can quickly consume your token + quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + default: 1 + echo: + type: boolean + nullable: true + description: Echo back the prompt in addition to the completion + default: false + frequency_penalty: + type: number + format: double + nullable: true + minimum: -2 + maximum: 2 + description: |- + Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing + frequency in the text so far, decreasing the model's likelihood to repeat the same line + verbatim. + + [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + default: 0 + logit_bias: + type: object + additionalProperties: + type: integer + format: int64 + nullable: true + description: |- + Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an + associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) + to convert text to token IDs. Mathematically, the bias is added to the logits generated by the + model prior to sampling. The exact effect will vary per model, but values between -1 and 1 + should decrease or increase likelihood of selection; values like -100 or 100 should result in a + ban or exclusive selection of the relevant token. + + As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being + generated. + x-oaiTypeLabel: map + default: null + logprobs: + type: integer + format: int64 + nullable: true + minimum: 0 + maximum: 5 + description: |- + Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. + For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The + API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` + elements in the response. + + The maximum value for `logprobs` is 5. + default: null + max_tokens: + type: integer + format: int64 + nullable: true + minimum: 0 + description: |- + The maximum number of [tokens](/tokenizer) to generate in the completion. + + The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + for counting tokens. + default: 16 + n: + type: integer + format: int64 + nullable: true + minimum: 1 + maximum: 128 + description: |- + How many completions to generate for each prompt. + + **Note:** Because this parameter generates many completions, it can quickly consume your token + quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + default: 1 + presence_penalty: + type: number + format: double + nullable: true + minimum: -2 + maximum: 2 + description: |- + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear + in the text so far, increasing the model's likelihood to talk about new topics. + + [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + default: 0 + seed: + type: integer + format: int64 + nullable: true + minimum: -9223372036854776000 + maximum: 9223372036854776000 + description: |- + If specified, our system will make a best effort to sample deterministically, such that + repeated requests with the same `seed` and parameters should return the same result. + + Determinism is not guaranteed, and you should refer to the `system_fingerprint` response + parameter to monitor changes in the backend. + x-oaiMeta: + beta: true + stop: + oneOf: + - $ref: '#/components/schemas/Stop' + nullable: true + description: Up to 4 sequences where the API will stop generating further tokens. + default: null + stream: + type: boolean + nullable: true + description: |- + If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: [DONE]` message. + [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). + default: false + suffix: + type: string + nullable: true + description: The suffix that comes after a completion of inserted text. + default: null + temperature: + type: number + format: double + nullable: true + minimum: 0 + maximum: 2 + description: |- + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. + + We generally recommend altering this or `top_p` but not both. + default: 1 + top_p: + type: number + format: double + nullable: true + minimum: 0 + maximum: 1 + description: |- + An alternative to sampling with temperature, called nucleus sampling, where the model considers + the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising + the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + default: 1 + user: + allOf: + - $ref: '#/components/schemas/User' + description: |- + A unique identifier representing your end-user, which can help OpenAI to monitor and detect + abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + CreateCompletionResponse: + type: object + required: + - id + - choices + - created + - model + - object + properties: + id: + type: string + description: A unique identifier for the completion. + choices: + type: array + items: + type: object + properties: + index: + type: integer + format: int64 + text: + type: string + logprobs: + type: object + properties: + tokens: + type: array + items: + type: string + token_logprobs: + type: array + items: + type: number + format: double + top_logprobs: + type: array + items: + type: object + additionalProperties: + type: integer + format: int64 + text_offset: + type: array + items: + type: integer + format: int64 + required: + - tokens + - token_logprobs + - top_logprobs + - text_offset + nullable: true + finish_reason: + type: string + enum: + - stop + - length + - tool_calls + - content_filter + - function_call + - length + - content_filter + description: |- + The reason the model stopped generating tokens. This will be `stop` if the model hit a + natural stop point or a provided stop sequence, or `content_filter` if content was omitted + due to a flag from our content filters, `length` if the maximum number of tokens specified + in the request was reached, or `content_filter` if content was omitted due to a flag from our + content filters. + required: + - index + - text + - logprobs + - finish_reason + description: The list of completion choices the model generated for the input. + created: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) of when the completion was created. + model: + type: string + description: The model used for the completion. + system_fingerprint: + type: string + description: |- + This fingerprint represents the backend configuration that the model runs with. + + Can be used in conjunction with the `seed` request parameter to understand when backend changes + have been made that might impact determinism. + object: + type: string + enum: + - text_completion + description: The object type, which is always `text_completion`. + usage: + allOf: + - $ref: '#/components/schemas/CompletionUsage' + description: Usage statistics for the completion request. + description: |- + Represents a completion response from the API. Note: both the streamed and non-streamed response + objects share the same shape (unlike the chat endpoint). + CreateEmbeddingRequest: + type: object + required: + - input + - model + properties: + input: + allOf: + - $ref: '#/components/schemas/CreateEmbeddingRequestInput' + description: |- + Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a + single request, pass an array of strings or array of token arrays. Each input must not exceed + the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an + empty string. + [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + for counting tokens. + x-oaiExpandable: true + model: + anyOf: + - type: string + - type: string + enum: + - text-embedding-ada-002 + - text-embedding-3-small + - text-embedding-3-large + description: |- + ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + see all of your available models, or see our [Model overview](/docs/models/overview) for + descriptions of them. + x-oaiTypeLabel: string + encoding_format: + type: string + enum: + - float + - base64 + description: |- + The format to return the embeddings in. Can be either `float` or + [`base64`](https://pypi.org/project/pybase64/). + default: float + dimensions: + type: integer + format: int64 + minimum: 1 + description: |- + The number of dimensions the resulting output embeddings should have. Only supported in + `text-embedding-3` and later models. + user: + allOf: + - $ref: '#/components/schemas/User' + description: |- + A unique identifier representing your end-user, which can help OpenAI to monitor and detect + abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + CreateEmbeddingRequestInput: + oneOf: + - type: string + - type: array + items: + type: string + - $ref: '#/components/schemas/TokenArrayItem' + - $ref: '#/components/schemas/TokenArrayArray' + CreateEmbeddingResponse: + type: object + required: + - data + - model + - object + - usage + properties: + data: + type: array + items: + $ref: '#/components/schemas/Embedding' + description: The list of embeddings generated by the model. + model: + type: string + description: The name of the model used to generate the embedding. + object: + type: string + enum: + - list + description: The object type, which is always "list". + usage: + type: object + properties: + prompt_tokens: + type: integer + format: int64 + description: The number of tokens used by the prompt. + total_tokens: + type: integer + format: int64 + description: The total number of tokens used by the request. + required: + - prompt_tokens + - total_tokens + description: The usage information for the request. + CreateFileRequestMultiPart: + type: object + required: + - file + - purpose + properties: + file: + type: string + format: binary + description: The file object (not file name) to be uploaded. + purpose: + type: string + enum: + - fine-tune + - assistants + description: |- + The intended purpose of the uploaded file. Use "fine-tune" for + [Fine-tuning](/docs/api-reference/fine-tuning) and "assistants" for + [Assistants](/docs/api-reference/assistants) and [Messages](/docs/api-reference/messages). This + allows us to validate the format of the uploaded file is correct for fine-tuning. + CreateFineTuneRequest: + type: object + required: + - training_file + properties: + training_file: + type: string + description: |- + The ID of an uploaded file that contains training data. + + See [upload file](/docs/api-reference/files/upload) for how to upload a file. + + Your dataset must be formatted as a JSONL file, where each training example is a JSON object + with the keys "prompt" and "completion". Additionally, you must upload your file with the + purpose `fine-tune`. + + See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more + details. + validation_file: + type: string + nullable: true + description: |- + The ID of an uploaded file that contains validation data. + + If you provide this file, the data is used to generate validation metrics periodically during + fine-tuning. These metrics can be viewed in the + [fine-tuning results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). + Your train and validation data should be mutually exclusive. + + Your dataset must be formatted as a JSONL file, where each validation example is a JSON object + with the keys "prompt" and "completion". Additionally, you must upload your file with the + purpose `fine-tune`. + + See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more + details. + model: + anyOf: + - type: string + - type: string + enum: + - ada + - babbage + - curie + - davinci + nullable: true + description: |- + The name of the base model to fine-tune. You can select one of "ada", "babbage", "curie", + "davinci", or a fine-tuned model created after 2022-04-21 and before 2023-08-22. To learn more + about these models, see the [Models](/docs/models) documentation. + x-oaiTypeLabel: string + n_epochs: + type: integer + format: int64 + nullable: true + description: |- + The number of epochs to train the model for. An epoch refers to one full cycle through the + training dataset. + default: 4 + batch_size: + type: integer + format: int64 + nullable: true + description: |- + The batch size to use for training. The batch size is the number of training examples used to + train a single forward and backward pass. + + By default, the batch size will be dynamically configured to be ~0.2% of the number of examples + in the training set, capped at 256 - in general, we've found that larger batch sizes tend to + work better for larger datasets. + default: null + learning_rate_multiplier: + type: number + format: double + nullable: true + description: |- + The learning rate multiplier to use for training. The fine-tuning learning rate is the original + learning rate used for pretraining multiplied by this value. + + By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 depending on final + `batch_size` (larger learning rates tend to perform better with larger batch sizes). We + recommend experimenting with values in the range 0.02 to 0.2 to see what produces the best + results. + default: null + prompt_loss_rate: + type: number + format: double + nullable: true + description: |- + The weight to use for loss on the prompt tokens. This controls how much the model tries to + learn to generate the prompt (as compared to the completion which always has a weight of 1.0), + and can add a stabilizing effect to training when completions are short. + + If prompts are extremely long (relative to completions), it may make sense to reduce this + weight so as to avoid over-prioritizing learning the prompt. + default: 0.01 + compute_classification_metrics: + type: boolean + nullable: true + description: |- + If set, we calculate classification-specific metrics such as accuracy and F-1 score using the + validation set at the end of every epoch. These metrics can be viewed in the + [results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). + + In order to compute classification metrics, you must provide a `validation_file`. Additionally, + you must specify `classification_n_classes` for multiclass classification or + `classification_positive_class` for binary classification. + default: false + classification_n_classes: + type: integer + format: int64 + nullable: true + description: |- + The number of classes in a classification task. + + This parameter is required for multiclass classification. + default: null + classification_positive_class: + type: string + nullable: true + description: |- + The positive class in binary classification. + + This parameter is needed to generate precision, recall, and F1 metrics when doing binary + classification. + default: null + classification_betas: + type: array + items: + type: number + format: double + nullable: true + description: |- + If this is provided, we calculate F-beta scores at the specified beta values. The F-beta score + is a generalization of F-1 score. This is only used for binary classification. + + With a beta of 1 (i.e. the F-1 score), precision and recall are given the same weight. A larger + beta score puts more weight on recall and less on precision. A smaller beta score puts more + weight on precision and less on recall. + default: null + suffix: + oneOf: + - $ref: '#/components/schemas/SuffixString' + nullable: true + description: |- + A string of up to 18 characters that will be added to your fine-tuned model name. + + For example, a `suffix` of "custom-model-name" would produce a model name like + `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`. + default: null + CreateFineTuningJobRequest: + type: object + required: + - training_file + - model + properties: + training_file: + type: string + description: |- + The ID of an uploaded file that contains training data. + + See [upload file](/docs/api-reference/files/upload) for how to upload a file. + + Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with + the purpose `fine-tune`. + + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + validation_file: + type: string + nullable: true + description: |- + The ID of an uploaded file that contains validation data. + + If you provide this file, the data is used to generate validation metrics periodically during + fine-tuning. These metrics can be viewed in the fine-tuning results file. The same data should + not be present in both train and validation files. + + Your dataset must be formatted as a JSONL file. You must upload your file with the purpose + `fine-tune`. + + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + model: + anyOf: + - type: string + - type: string + enum: + - babbage-002 + - davinci-002 + - gpt-3.5-turbo + description: |- + The name of the model to fine-tune. You can select one of the + [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + x-oaiTypeLabel: string + hyperparameters: + type: object + properties: + n_epochs: + anyOf: + - type: string + enum: + - auto + - low + - high + - $ref: '#/components/schemas/NEpochs' + description: |- + The number of epochs to train the model for. An epoch refers to one full cycle through the + training dataset. + default: auto + description: The hyperparameters used for the fine-tuning job. + suffix: + oneOf: + - $ref: '#/components/schemas/SuffixString' + nullable: true + description: |- + A string of up to 18 characters that will be added to your fine-tuned model name. + + For example, a `suffix` of "custom-model-name" would produce a model name like + `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + default: null + CreateImageEditRequestMultiPart: + type: object + required: + - image + - prompt + properties: + image: + type: string + format: binary + description: |- + The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not + provided, image must have transparency, which will be used as the mask. + prompt: + type: string + maxLength: 1000 + description: A text description of the desired image(s). The maximum length is 1000 characters. + mask: + type: string + format: binary + description: |- + An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where + `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions + as `image`. + model: + anyOf: + - type: string + - type: string + enum: + - dall-e-2 + description: The model to use for image generation. Only `dall-e-2` is supported at this time. + x-oaiTypeLabel: string + default: dall-e-2 + n: + oneOf: + - $ref: '#/components/schemas/ImagesN' + nullable: true + description: The number of images to generate. Must be between 1 and 10. + default: 1 + size: + type: string + enum: + - 256x256 + - 512x512 + - 1024x1024 + - 512x512 + - 1024x1024 + nullable: true + description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + default: 1024x1024 + response_format: + type: string + enum: + - url + - b64_json + - b64_json + nullable: true + description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. + default: url + user: + allOf: + - $ref: '#/components/schemas/User' + description: |- + A unique identifier representing your end-user, which can help OpenAI to monitor and detect + abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + CreateImageRequest: + type: object + required: + - prompt + properties: + prompt: + type: string + description: |- + A text description of the desired image(s). The maximum length is 1000 characters for + `dall-e-2` and 4000 characters for `dall-e-3`. + model: + anyOf: + - type: string + - type: string + enum: + - dall-e-2 + - dall-e-3 + description: The model to use for image generation. + x-oaiTypeLabel: string + default: dall-e-2 + n: + oneOf: + - $ref: '#/components/schemas/ImagesN' + nullable: true + description: |- + The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is + supported. + default: 1 + quality: + type: string + enum: + - standard + - hd + nullable: true + description: |- + The quality of the image that will be generated. `hd` creates images with finer details and + greater consistency across the image. This param is only supported for `dall-e-3`. + default: standard + response_format: + type: string + enum: + - url + - b64_json + nullable: true + description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. + default: url + size: + type: string + enum: + - 256x256 + - 512x512 + - 1024x1024 + - 1792x1024 + - 1024x1792 + nullable: true + description: |- + The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for + `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. + default: 1024x1024 + style: + type: string + enum: + - vivid + - natural + nullable: true + description: |- + The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes the model + to lean towards generating hyper-real and dramatic images. Natural causes the model to produce + more natural, less hyper-real looking images. This param is only supported for `dall-e-3`. + default: vivid + user: + allOf: + - $ref: '#/components/schemas/User' + description: |- + A unique identifier representing your end-user, which can help OpenAI to monitor and detect + abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + CreateImageVariationRequestMultiPart: + type: object + required: + - image + properties: + image: + type: string + format: binary + description: |- + The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, + and square. + model: + anyOf: + - type: string + - type: string + enum: + - dall-e-2 + description: The model to use for image generation. Only `dall-e-2` is supported at this time. + x-oaiTypeLabel: string + default: dall-e-2 + n: + oneOf: + - $ref: '#/components/schemas/ImagesN' + nullable: true + description: The number of images to generate. Must be between 1 and 10. + default: 1 + response_format: + type: string + enum: + - url + - b64_json + - b64_json + nullable: true + description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. + default: url + size: + type: string + enum: + - 256x256 + - 512x512 + - 1024x1024 + - 512x512 + - 1024x1024 + nullable: true + description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + default: 1024x1024 + user: + allOf: + - $ref: '#/components/schemas/User' + description: |- + A unique identifier representing your end-user, which can help OpenAI to monitor and detect + abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + CreateMessageRequest: + type: object + required: + - role + - content + properties: + role: + type: string + enum: + - user + - assistant + description: The role of the entity that is creating the message. Currently only `user` is supported. + content: + type: string + minLength: 1 + maxLength: 32768 + description: The content of the message. + file_ids: + type: array + items: + type: string + minItems: 1 + maxItems: 10 + description: |- + A list of [File](/docs/api-reference/files) IDs that the message should use. There can be a + maximum of 10 files attached to a message. Useful for tools like `retrieval` and + `code_interpreter` that can access and use files. + default: [] + metadata: + type: object + additionalProperties: + type: string + nullable: true + description: |- + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + CreateModerationRequest: + type: object + required: + - input + properties: + input: + allOf: + - $ref: '#/components/schemas/CreateModerationRequestInput' + description: The input text to classify + model: + anyOf: + - type: string + - type: string + enum: + - text-moderation-latest + - text-moderation-stable + description: |- + Two content moderations models are available: `text-moderation-stable` and + `text-moderation-latest`. The default is `text-moderation-latest` which will be automatically + upgraded over time. This ensures you are always using our most accurate model. If you use + `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy + of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. + x-oaiTypeLabel: string + default: text-moderation-latest + CreateModerationRequestInput: + oneOf: + - type: string + - type: array + items: + type: string + CreateModerationResponse: + type: object + required: + - id + - model + - results + properties: + id: + type: string + description: The unique identifier for the moderation request. + model: + type: string + description: The model used to generate the moderation results. + results: + type: array + items: + type: object + properties: + flagged: + type: boolean + description: Whether the content violates [OpenAI's usage policies](/policies/usage-policies). + categories: + type: object + properties: + hate: + type: boolean + description: |- + Content that expresses, incites, or promotes hate based on race, gender, ethnicity, + religion, nationality, sexual orientation, disability status, or caste. Hateful content + aimed at non-protected groups (e.g., chess players) is harrassment. + hate/threatening: + type: boolean + description: |- + Hateful content that also includes violence or serious harm towards the targeted group + based on race, gender, ethnicity, religion, nationality, sexual orientation, disability + status, or caste. + harassment: + type: boolean + description: Content that expresses, incites, or promotes harassing language towards any target. + harassment/threatening: + type: boolean + description: Harassment content that also includes violence or serious harm towards any target. + self-harm: + type: boolean + description: |- + Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, + and eating disorders. + self-harm/intent: + type: boolean + description: |- + Content where the speaker expresses that they are engaging or intend to engage in acts of + self-harm, such as suicide, cutting, and eating disorders. + self-harm/instructions: + type: boolean + description: |- + Content that encourages performing acts of self-harm, such as suicide, cutting, and eating + disorders, or that gives instructions or advice on how to commit such acts. + sexual: + type: boolean + description: |- + Content meant to arouse sexual excitement, such as the description of sexual activity, or + that promotes sexual services (excluding sex education and wellness). + sexual/minors: + type: boolean + description: Sexual content that includes an individual who is under 18 years old. + violence: + type: boolean + description: Content that depicts death, violence, or physical injury. + violence/graphic: + type: boolean + description: Content that depicts death, violence, or physical injury in graphic detail. + required: + - hate + - hate/threatening + - harassment + - harassment/threatening + - self-harm + - self-harm/intent + - self-harm/instructions + - sexual + - sexual/minors + - violence + - violence/graphic + description: A list of the categories, and whether they are flagged or not. + category_scores: + type: object + properties: + hate: + type: number + format: double + description: The score for the category 'hate'. + hate/threatening: + type: number + format: double + description: The score for the category 'hate/threatening'. + harassment: + type: number + format: double + description: The score for the category 'harassment'. + harassment/threatening: + type: number + format: double + description: The score for the category 'harassment/threatening'. + self-harm: + type: number + format: double + description: The score for the category 'self-harm'. + self-harm/intent: + type: number + format: double + description: The score for the category 'self-harm/intent'. + self-harm/instructions: + type: number + format: double + description: The score for the category 'self-harm/instructive'. + sexual: + type: number + format: double + description: The score for the category 'sexual'. + sexual/minors: + type: number + format: double + description: The score for the category 'sexual/minors'. + violence: + type: number + format: double + description: The score for the category 'violence'. + violence/graphic: + type: number + format: double + description: The score for the category 'violence/graphic'. + required: + - hate + - hate/threatening + - harassment + - harassment/threatening + - self-harm + - self-harm/intent + - self-harm/instructions + - sexual + - sexual/minors + - violence + - violence/graphic + description: A list of the categories along with their scores as predicted by model. + required: + - flagged + - categories + - category_scores + description: A list of moderation objects. + description: Represents policy compliance report by OpenAI's content moderation model against a given input. + CreateRunRequest: + type: object + required: + - assistant_id + properties: + assistant_id: + type: string + description: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. + model: + type: string + nullable: true + description: |- + The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value + is provided here, it will override the model associated with the assistant. If not, the model + associated with the assistant will be used. + instructions: + type: string + nullable: true + description: |- + Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of the assistant. + This is useful for modifying the behavior on a per-run basis. + additional_instructions: + type: string + nullable: true + description: |- + Appends additional instructions at the end of the instructions for the run. This is useful for + modifying the behavior on a per-run basis without overriding other instructions. + tools: + type: object + allOf: + - $ref: '#/components/schemas/CreateRunRequestToolsItem' + nullable: true + description: |- + Override the tools the assistant can use for this run. This is useful for modifying the + behavior on a per-run basis. + metadata: + type: object + additionalProperties: + type: string + nullable: true + description: |- + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + CreateRunRequestTool: + oneOf: + - $ref: '#/components/schemas/AssistantToolsCode' + - $ref: '#/components/schemas/AssistantToolsRetrieval' + - $ref: '#/components/schemas/AssistantToolsFunction' + x-oaiExpandable: true + CreateRunRequestToolsItem: + type: array + items: + $ref: '#/components/schemas/CreateRunRequestTool' + maxItems: 20 + CreateSpeechRequest: + type: object + required: + - model + - input + - voice + properties: + model: + anyOf: + - type: string + - type: string + enum: + - tts-1 + - tts-1-hd + description: 'One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd`' + x-oaiTypeLabel: string + input: + type: string + maxLength: 4096 + description: The text to generate audio for. The maximum length is 4096 characters. + voice: + type: string + enum: + - alloy + - echo + - fable + - onyx + - nova + - shimmer + description: |- + The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, + `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the + [Text to speech guide](/docs/guides/text-to-speech/voice-options). + response_format: + type: string + enum: + - mp3 + - opus + - aac + - flac + description: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, and `flac`. + default: mp3 + speed: + type: number + format: double + minimum: 0.25 + maximum: 4 + description: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default. + default: 1 + CreateThreadAndRunRequest: + type: object + required: + - assistant_id + properties: + assistant_id: + type: string + description: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. + thread: + allOf: + - $ref: '#/components/schemas/CreateThreadRequest' + description: If no thread is provided, an empty thread will be created. + model: + type: string + nullable: true + description: |- + The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is + provided here, it will override the model associated with the assistant. If not, the model + associated with the assistant will be used. + instructions: + type: string + nullable: true + description: |- + Override the default system message of the assistant. This is useful for modifying the behavior + on a per-run basis. + tools: + type: object + allOf: + - $ref: '#/components/schemas/CreateRunRequestToolsItem' + nullable: true + description: |- + Override the tools the assistant can use for this run. This is useful for modifying the + behavior on a per-run basis. + metadata: + type: object + additionalProperties: + type: string + nullable: true + description: |- + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + CreateThreadRequest: + type: object + properties: + messages: + type: array + items: + $ref: '#/components/schemas/CreateMessageRequest' + description: A list of [messages](/docs/api-reference/messages) to start the thread with. + metadata: + type: object + additionalProperties: + type: string + nullable: true + description: |- + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + CreateTranscriptionRequestMultiPart: + type: object + required: + - file + - model + properties: + file: + type: string + format: binary + description: |- + The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, + mpeg, mpga, m4a, ogg, wav, or webm. + x-oaiTypeLabel: file + model: + anyOf: + - type: string + - type: string + enum: + - whisper-1 + description: ID of the model to use. Only `whisper-1` is currently available. + x-oaiTypeLabel: string + language: + type: string + description: |- + The language of the input audio. Supplying the input language in + [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy + and latency. + prompt: + type: string + description: |- + An optional text to guide the model's style or continue a previous audio segment. The + [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + response_format: + type: string + enum: + - json + - text + - srt + - verbose_json + - vtt + - text + - srt + - verbose_json + - vtt + description: |- + The format of the transcript output, in one of these options: json, text, srt, verbose_json, or + vtt. + default: json + temperature: + type: number + format: double + minimum: 0 + maximum: 1 + description: |- + The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more + random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, + the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to + automatically increase the temperature until certain thresholds are hit. + default: 0 + CreateTranscriptionResponse: + type: object + required: + - text + properties: + text: + type: string + description: The transcribed text for the provided audio data. + task: + type: string + enum: + - transcribe + description: The label that describes which operation type generated the accompanying response data. + language: + type: string + description: The spoken language that was detected in the audio data. + duration: + type: number + format: double + description: The total duration of the audio processed to produce accompanying transcription information. + segments: + type: array + items: + $ref: '#/components/schemas/AudioSegment' + description: |- + A collection of information about the timing, probabilities, and other detail of each processed + audio segment. + CreateTranslationRequestMultiPart: + type: object + required: + - file + - model + properties: + file: + type: string + format: binary + description: |- + The audio file object (not file name) to translate, in one of these formats: flac, mp3, mp4, + mpeg, mpga, m4a, ogg, wav, or webm. + x-oaiTypeLabel: file + model: + anyOf: + - type: string + - type: string + enum: + - whisper-1 + description: ID of the model to use. Only `whisper-1` is currently available. + x-oaiTypeLabel: string + prompt: + type: string + description: |- + An optional text to guide the model's style or continue a previous audio segment. The + [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + response_format: + type: string + enum: + - json + - text + - srt + - verbose_json + - vtt + - text + - srt + - verbose_json + - vtt + description: |- + The format of the transcript output, in one of these options: json, text, srt, verbose_json, or + vtt. + default: json + temperature: + type: number + format: double + minimum: 0 + maximum: 1 + description: |- + The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more + random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, + the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to + automatically increase the temperature until certain thresholds are hit. + default: 0 + CreateTranslationResponse: + type: object + required: + - text + properties: + text: + type: string + description: The translated text for the provided audio data. + task: + type: string + enum: + - translate + description: The label that describes which operation type generated the accompanying response data. + language: + type: string + description: The spoken language that was detected in the audio data. + duration: + type: number + format: double + description: The total duration of the audio processed to produce accompanying translation information. + segments: + type: array + items: + $ref: '#/components/schemas/AudioSegment' + description: |- + A collection of information about the timing, probabilities, and other detail of each processed + audio segment. + DeleteAssistantFileResponse: + type: object + required: + - id + - deleted + - object + properties: + id: + type: string + deleted: + type: boolean + object: + type: string + enum: + - assistant.file.deleted + description: |- + Deletes the association between the assistant and the file, but does not delete the + [File](/docs/api-reference/files) object itself. + DeleteAssistantResponse: + type: object + required: + - id + - deleted + - object + properties: + id: + type: string + deleted: + type: boolean + object: + type: string + enum: + - assistant.deleted + DeleteFileResponse: + type: object + required: + - id + - object + - deleted + properties: + id: + type: string + object: + type: string + enum: + - file + deleted: + type: boolean + DeleteModelResponse: + type: object + required: + - id + - deleted + - object + properties: + id: + type: string + deleted: + type: boolean + object: + type: string + enum: + - model + DeleteThreadResponse: + type: object + required: + - id + - deleted + - object + properties: + id: + type: string + deleted: + type: boolean + object: + type: string + enum: + - thread.deleted + Embedding: + type: object + required: + - index + - embedding + - object + properties: + index: + type: integer + format: int64 + description: The index of the embedding in the list of embeddings. + embedding: + anyOf: + - type: array + items: + type: number + format: double + - type: string + description: |- + The embedding vector, which is a list of floats. The length of vector depends on the model as + listed in the [embedding guide](/docs/guides/embeddings). + object: + type: string + enum: + - embedding + description: The object type, which is always "embedding". + description: Represents an embedding vector returned by embedding endpoint. + Error: + type: object + required: + - type + - message + - param + - code + properties: + type: + type: string + message: + type: string + param: + type: string + nullable: true + code: + type: string + nullable: true + ErrorResponse: + type: object + required: + - error + properties: + error: + $ref: '#/components/schemas/Error' + FineTune: + type: object + required: + - id + - object + - created_at + - updated_at + - model + - fine_tuned_model + - organization_id + - status + - hyperparams + - training_files + - validation_files + - result_files + properties: + id: + type: string + description: The object identifier, which can be referenced in the API endpoints. + object: + type: string + enum: + - fine-tune + - fine-tune-results + - assistants + - assistants_output + description: The object type, which is always "fine-tune". + created_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) for when the fine-tuning job was created. + updated_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) for when the fine-tuning job was last updated. + model: + type: string + description: The base model that is being fine-tuned. + fine_tuned_model: + type: string + nullable: true + description: The name of the fine-tuned model that is being created. + organization_id: + type: string + description: The organization that owns the fine-tuning job. + status: + type: string + enum: + - created + - pending + - running + - succeeded + - failed + - cancelled + - running + - succeeded + - failed + - cancelled + description: |- + The current status of the fine-tuning job, which can be either `created`, `running`, + `succeeded`, `failed`, or `cancelled`. + hyperparams: + type: object + properties: + n_epochs: + type: integer + format: int64 + description: |- + The number of epochs to train the model for. An epoch refers to one full cycle through the + training dataset. + batch_size: + type: integer + format: int64 + description: |- + The batch size to use for training. The batch size is the number of training examples used to + train a single forward and backward pass. + prompt_loss_weight: + type: number + format: double + description: The weight to use for loss on the prompt tokens. + learning_rate_multiplier: + type: number + format: double + description: The learning rate multiplier to use for training. + compute_classification_metrics: + type: boolean + description: The classification metrics to compute using the validation dataset at the end of every epoch. + classification_positive_class: + type: string + description: The positive class to use for computing classification metrics. + classification_n_classes: + type: integer + format: int64 + description: The number of classes to use for computing classification metrics. + required: + - n_epochs + - batch_size + - prompt_loss_weight + - learning_rate_multiplier + description: |- + The hyperparameters used for the fine-tuning job. See the + [fine-tuning guide](/docs/guides/legacy-fine-tuning/hyperparameters) for more details. + training_files: + type: array + items: + $ref: '#/components/schemas/OpenAIFile' + description: The list of files used for training. + validation_files: + type: array + items: + $ref: '#/components/schemas/OpenAIFile' + description: The list of files used for validation. + result_files: + type: array + items: + $ref: '#/components/schemas/OpenAIFile' + description: The compiled results files for the fine-tuning job. + events: + type: array + items: + $ref: '#/components/schemas/FineTuneEvent' + description: The list of events that have been observed in the lifecycle of the FineTune job. + description: The `FineTune` object represents a legacy fine-tune job that has been created through the API. + deprecated: true + FineTuneEvent: + type: object + required: + - object + - created_at + - level + - message + properties: + object: + type: string + created_at: + type: integer + format: unixtime + level: + type: string + message: + type: string + FineTuningEvent: + type: object + required: + - object + - created_at + - level + - message + properties: + object: + type: string + created_at: + type: integer + format: unixtime + level: + type: string + message: + type: string + data: + type: object + additionalProperties: {} + nullable: true + type: + type: string + enum: + - message + - metrics + FineTuningJob: + type: object + required: + - id + - object + - created_at + - finished_at + - model + - fine_tuned_model + - organization_id + - status + - hyperparameters + - training_file + - validation_file + - result_files + - trained_tokens + - error + properties: + id: + type: string + description: The object identifier, which can be referenced in the API endpoints. + object: + type: string + enum: + - fine_tuning.job + description: The object type, which is always "fine_tuning.job". + created_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) for when the fine-tuning job was created. + finished_at: + type: string + format: date-time + nullable: true + description: |- + The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be + null if the fine-tuning job is still running. + model: + type: string + description: The base model that is being fine-tuned. + fine_tuned_model: + type: string + nullable: true + description: |- + The name of the fine-tuned model that is being created. The value will be null if the + fine-tuning job is still running. + organization_id: + type: string + description: The organization that owns the fine-tuning job. + status: + type: string + enum: + - created + - pending + - running + - succeeded + - failed + - cancelled + - running + - succeeded + - failed + - cancelled + description: |- + The current status of the fine-tuning job, which can be either `created`, `pending`, `running`, + `succeeded`, `failed`, or `cancelled`. + hyperparameters: + type: object + properties: + n_epochs: + anyOf: + - type: string + enum: + - auto + - low + - high + - $ref: '#/components/schemas/NEpochs' + description: |- + The number of epochs to train the model for. An epoch refers to one full cycle through the + training dataset. + + "Auto" decides the optimal number of epochs based on the size of the dataset. If setting the + number manually, we support any number between 1 and 50 epochs. + default: auto + description: |- + The hyperparameters used for the fine-tuning job. See the + [fine-tuning guide](/docs/guides/fine-tuning) for more details. + training_file: + type: string + description: |- + The file ID used for training. You can retrieve the training data with the + [Files API](/docs/api-reference/files/retrieve-contents). + validation_file: + type: string + nullable: true + description: |- + The file ID used for validation. You can retrieve the validation results with the + [Files API](/docs/api-reference/files/retrieve-contents). + result_files: + type: array + items: + type: string + description: |- + The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the + [Files API](/docs/api-reference/files/retrieve-contents). + trained_tokens: + type: integer + format: int64 + nullable: true + description: |- + The total number of billable tokens processed by this fine tuning job. The value will be null + if the fine-tuning job is still running. + error: + type: object + properties: + message: + type: string + description: A human-readable error message. + code: + type: string + description: A machine-readable error code. + param: + type: string + nullable: true + description: |- + The parameter that was invalid, usually `training_file` or `validation_file`. This field + will be null if the failure was not parameter-specific. + nullable: true + description: |- + For fine-tuning jobs that have `failed`, this will contain more information on the cause of the + failure. + FineTuningJobEvent: + type: object + required: + - id + - object + - created_at + - level + - message + properties: + id: + type: string + object: + type: string + created_at: + type: integer + format: unixtime + level: + type: string + enum: + - info + - warn + - error + message: + type: string + FunctionObject: + type: object + required: + - name + properties: + description: + type: string + description: |- + A description of what the function does, used by the model to choose when and how to call the + function. + name: + type: string + description: |- + The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and + dashes, with a maximum length of 64. + parameters: + $ref: '#/components/schemas/FunctionParameters' + FunctionParameters: + type: object + additionalProperties: {} + description: |- + The parameters the functions accepts, described as a JSON Schema object. See the + [guide](/docs/guides/gpt/function-calling) for examples, and the + [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation + about the format.\n\nTo describe a function that accepts no parameters, provide the value + `{\"type\": \"object\", \"properties\": {}}`. + Image: + type: object + properties: + b64_json: + type: string + format: base64 + description: The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. + url: + type: string + format: uri + description: The URL of the generated image, if `response_format` is `url` (default). + revised_prompt: + type: string + description: The prompt that was used to generate the image, if there was any revision to the prompt. + description: Represents the url or the content of an image generated by the OpenAI API. + ImagesN: + type: integer + format: int64 + minimum: 1 + maximum: 10 + ImagesResponse: + type: object + required: + - created + - data + properties: + created: + type: integer + format: unixtime + data: + type: array + items: + $ref: '#/components/schemas/Image' + ListAssistantFilesResponse: + type: object + required: + - object + - data + - first_id + - last_id + - has_more + properties: + object: + type: string + enum: + - list + data: + type: array + items: + $ref: '#/components/schemas/AssistantFileObject' + first_id: + type: string + last_id: + type: string + has_more: + type: boolean + ListAssistantsResponse: + type: object + required: + - object + - data + - first_id + - last_id + - has_more + properties: + object: + type: string + enum: + - list + data: + type: array + items: + $ref: '#/components/schemas/AssistantObject' + first_id: + type: string + last_id: + type: string + has_more: + type: boolean + ListFilesResponse: + type: object + required: + - data + - object + properties: + data: + type: array + items: + $ref: '#/components/schemas/OpenAIFile' + object: + type: string + enum: + - list + ListFineTuneEventsResponse: + type: object + required: + - object + - data + properties: + object: + type: string + data: + type: array + items: + $ref: '#/components/schemas/FineTuneEvent' + ListFineTunesResponse: + type: object + required: + - object + - data + properties: + object: + type: string + data: + type: array + items: + $ref: '#/components/schemas/FineTune' + ListFineTuningJobEventsResponse: + type: object + required: + - object + - data + properties: + object: + type: string + data: + type: array + items: + $ref: '#/components/schemas/FineTuningJobEvent' + ListMessageFilesResponse: + type: object + required: + - object + - data + - first_id + - last_id + - has_more + properties: + object: + type: string + enum: + - list + data: + type: array + items: + $ref: '#/components/schemas/MessageFileObject' + first_id: + type: string + last_id: + type: string + has_more: + type: boolean + ListMessagesResponse: + type: object + required: + - object + - data + - first_id + - last_id + - has_more + properties: + object: + type: string + enum: + - list + data: + type: array + items: + $ref: '#/components/schemas/MessageObject' + first_id: + type: string + last_id: + type: string + has_more: + type: boolean + ListModelsResponse: + type: object + required: + - object + - data + properties: + object: + type: string + enum: + - list + data: + type: array + items: + $ref: '#/components/schemas/Model' + ListOrder: + type: string + enum: + - asc + - desc + ListPaginatedFineTuningJobsResponse: + type: object + required: + - object + - data + - has_more + properties: + object: + type: string + data: + type: array + items: + $ref: '#/components/schemas/FineTuningJob' + has_more: + type: boolean + ListRunStepsResponse: + type: object + required: + - object + - data + - first_id + - last_id + - has_more + properties: + object: + type: string + enum: + - list + data: + type: array + items: + $ref: '#/components/schemas/RunStepObject' + first_id: + type: string + last_id: + type: string + has_more: + type: boolean + ListRunsResponse: + type: object + required: + - object + - data + - first_id + - last_id + - has_more + properties: + object: + type: string + enum: + - list + data: + type: array + items: + $ref: '#/components/schemas/RunObject' + first_id: + type: string + last_id: + type: string + has_more: + type: boolean + MessageContentImageFileObject: + type: object + required: + - type + - image_file + properties: + type: + type: string + enum: + - image_file + description: Always `image_file`. + image_file: + type: object + properties: + file_id: + type: string + description: The [File](/docs/api-reference/files) ID of the image in the message content. + required: + - file_id + description: References an image [File](/docs/api-reference/files) in the content of a message. + MessageContentTextAnnotationsFileCitationObject: + type: object + required: + - type + - text + - file_citation + - start_index + - end_index + properties: + type: + type: string + enum: + - file_citation + description: Always `file_citation`. + text: + type: string + description: The text in the message content that needs to be replaced. + file_citation: + type: object + properties: + file_id: + type: string + description: The ID of the specific File the citation is from. + quote: + type: string + description: The specific quote in the file. + required: + - file_id + - quote + start_index: + type: integer + format: int64 + minimum: 0 + end_index: + type: integer + format: int64 + minimum: 0 + description: |- + A citation within the message that points to a specific quote from a specific File associated + with the assistant or the message. Generated when the assistant uses the "retrieval" tool to + search files. + MessageContentTextAnnotationsFilePathObject: + type: object + required: + - type + - text + - file_path + - start_index + - end_index + properties: + type: + type: string + enum: + - file_path + description: Always `file_path`. + text: + type: string + description: The text in the message content that needs to be replaced. + file_path: + type: object + properties: + file_id: + type: string + description: The ID of the file that was generated. + required: + - file_id + start_index: + type: integer + format: int64 + minimum: 0 + end_index: + type: integer + format: int64 + minimum: 0 + description: |- + A URL for the file that's generated when the assistant used the `code_interpreter` tool to + generate a file. + MessageContentTextObject: + type: object + required: + - type + - text + properties: + type: + type: string + enum: + - text + - json_object + description: Always `text`. + text: + type: object + properties: + value: + type: string + description: The data that makes up the text. + annotations: + type: array + items: + $ref: '#/components/schemas/MessageContentTextObjectAnnotations' + required: + - value + - annotations + description: The text content that is part of a message. + MessageContentTextObjectAnnotations: + oneOf: + - $ref: '#/components/schemas/MessageContentTextAnnotationsFileCitationObject' + - $ref: '#/components/schemas/MessageContentTextAnnotationsFilePathObject' + x-oaiExpandable: true + MessageFileObject: + type: object + required: + - id + - object + - created_at + - message_id + properties: + id: + type: string + description: TThe identifier, which can be referenced in API endpoints. + object: + type: string + enum: + - thread.message.file + description: The object type, which is always `thread.message.file`. + created_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) for when the message file was created. + message_id: + type: string + description: The ID of the [message](/docs/api-reference/messages) that the [File](/docs/api-reference/files) is attached to. + description: A list of files attached to a `message`. + MessageObject: + type: object + required: + - id + - object + - created_at + - thread_id + - role + - content + - assistant_id + - run_id + - file_ids + - metadata + properties: + id: + type: string + description: The identifier, which can be referenced in API endpoints. + object: + type: string + enum: + - thread.message + description: The object type, which is always `thread.message`. + created_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) for when the message was created. + thread_id: + type: string + description: The [thread](/docs/api-reference/threads) ID that this message belongs to. + role: + type: string + enum: + - user + - assistant + description: The entity that produced the message. One of `user` or `assistant`. + content: + type: array + items: + $ref: '#/components/schemas/MessageObjectContent' + description: The content of the message in array of text and/or images. + assistant_id: + type: string + nullable: true + description: |- + If applicable, the ID of the [assistant](/docs/api-reference/assistants) that authored this + message. + run_id: + type: string + nullable: true + description: |- + If applicable, the ID of the [run](/docs/api-reference/runs) associated with the authoring of + this message. + file_ids: + type: array + items: + type: string + maxItems: 10 + description: |- + A list of [file](/docs/api-reference/files) IDs that the assistant should use. Useful for + tools like retrieval and code_interpreter that can access files. A maximum of 10 files can be + attached to a message. + default: [] + metadata: + type: object + additionalProperties: + type: string + nullable: true + description: |- + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + MessageObjectContent: + oneOf: + - $ref: '#/components/schemas/MessageContentImageFileObject' + - $ref: '#/components/schemas/MessageContentTextObject' + x-oaiExpandable: true + Model: + type: object + required: + - id + - created + - object + - owned_by + properties: + id: + type: string + description: The model identifier, which can be referenced in the API endpoints. + created: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) when the model was created. + object: + type: string + enum: + - model + description: The object type, which is always "model". + owned_by: + type: string + description: The organization that owns the model. + description: Describes an OpenAI model offering that can be used with the API. + ModifyAssistantRequest: + type: object + properties: + model: + type: string + description: |- + ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + see all of your available models, or see our [Model overview](/docs/models/overview) for + descriptions of them. + name: + type: string + nullable: true + maxLength: 256 + description: The name of the assistant. The maximum length is 256 characters. + description: + type: string + nullable: true + maxLength: 512 + description: The description of the assistant. The maximum length is 512 characters. + instructions: + type: string + nullable: true + maxLength: 32768 + description: The system instructions that the assistant uses. The maximum length is 32768 characters. + tools: + allOf: + - $ref: '#/components/schemas/CreateAssistantRequestToolsItem' + description: |- + A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. + Tools can be of types `code_interpreter`, `retrieval`, or `function`. + default: [] + file_ids: + type: array + items: + type: string + maxItems: 20 + description: |- + A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a + maximum of 20 files attached to the assistant. Files are ordered by their creation date in + ascending order. + default: [] + metadata: + type: object + additionalProperties: + type: string + nullable: true + description: |- + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + ModifyMessageRequest: + type: object + properties: + metadata: + type: object + additionalProperties: + type: string + nullable: true + description: |- + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + ModifyRunRequest: + type: object + properties: + metadata: + type: object + additionalProperties: + type: string + nullable: true + description: |- + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + ModifyThreadRequest: + type: object + properties: + metadata: + type: object + additionalProperties: + type: string + nullable: true + description: |- + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + NEpochs: + type: integer + format: int64 + minimum: 1 + maximum: 50 + OpenAIFile: + type: object + required: + - id + - bytes + - created_at + - filename + - object + - purpose + - status + properties: + id: + type: string + description: The file identifier, which can be referenced in the API endpoints. + bytes: + type: integer + format: int64 + description: The size of the file, in bytes. + created_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) for when the file was created. + filename: + type: string + description: The name of the file. + object: + type: string + enum: + - file + description: The object type, which is always "file". + purpose: + type: string + enum: + - fine-tune + - fine-tune-results + - assistants + - assistants_output + description: |- + The intended purpose of the file. Supported values are `fine-tune`, `fine-tune-results`, + `assistants`, and `assistants_output`. + status: + type: string + enum: + - uploaded + - processed + - error + description: |- + Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or + `error`. + deprecated: true + status_details: + type: string + description: |- + Deprecated. For details on why a fine-tuning training file failed validation, see the `error` + field on `fine_tuning.job`. + deprecated: true + description: The `File` object represents a document that has been uploaded to OpenAI. + Prompt: + oneOf: + - type: string + - type: array + items: + type: string + - $ref: '#/components/schemas/TokenArrayItem' + - $ref: '#/components/schemas/TokenArrayArray' + RunCompletionUsage: + type: object + required: + - completion_tokens + - prompt_tokens + - total_tokens + properties: + completion_tokens: + type: integer + format: int64 + description: Number of completion tokens used over the course of the run. + prompt_tokens: + type: integer + format: int64 + description: Number of prompt tokens used over the course of the run. + total_tokens: + type: integer + format: int64 + description: Total number of tokens used (prompt + completion). + description: |- + Usage statistics related to the run. This value will be `null` if the run is not in a terminal + state (i.e. `in_progress`, `queued`, etc.). + RunObject: + type: object + required: + - id + - object + - created_at + - thread_id + - assistant_id + - status + - required_action + - last_error + - expires_at + - started_at + - cancelled_at + - failed_at + - completed_at + - model + - instructions + - tools + - file_ids + - metadata + - usage + properties: + id: + type: string + description: The identifier, which can be referenced in API endpoints. + object: + type: string + enum: + - thread.run + description: The object type, which is always `thread.run`. + created_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) for when the run was created. + thread_id: + type: string + description: |- + The ID of the [thread](/docs/api-reference/threads) that was executed on as a part of this + run. + assistant_id: + type: string + description: The ID of the [assistant](/docs/api-reference/assistants) used for execution of this run. + status: + type: string + enum: + - queued + - in_progress + - requires_action + - cancelling + - cancelled + - failed + - completed + - expired + description: |- + The status of the run, which can be either `queued`, `in_progress`, `requires_action`, + `cancelling`, `cancelled`, `failed`, `completed`, or `expired`. + required_action: + type: object + properties: + type: + type: string + enum: + - submit_tool_outputs + description: For now, this is always `submit_tool_outputs`. + submit_tool_outputs: + type: object + properties: + tool_calls: + type: array + items: + $ref: '#/components/schemas/RunToolCallObject' + description: A list of the relevant tool calls. + required: + - tool_calls + description: Details on the tool outputs needed for this run to continue. + required: + - type + - submit_tool_outputs + nullable: true + description: |- + Details on the action required to continue the run. Will be `null` if no action is + required. + last_error: + type: object + properties: + code: + type: string + enum: + - server_error + - rate_limit_exceeded + description: One of `server_error` or `rate_limit_exceeded`. + message: + type: string + description: A human-readable description of the error. + required: + - code + - message + nullable: true + description: The last error associated with this run. Will be `null` if there are no errors. + expires_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) for when the run will expire. + started_at: + type: string + format: date-time + nullable: true + description: The Unix timestamp (in seconds) for when the run was started. + cancelled_at: + type: string + format: date-time + nullable: true + description: The Unix timestamp (in seconds) for when the run was cancelled. + failed_at: + type: string + format: date-time + nullable: true + description: The Unix timestamp (in seconds) for when the run failed. + completed_at: + type: string + format: date-time + nullable: true + description: The Unix timestamp (in seconds) for when the run was completed. + model: + type: string + description: The model that the [assistant](/docs/api-reference/assistants) used for this run. + instructions: + type: string + description: The instructions that the [assistant](/docs/api-reference/assistants) used for this run. + tools: + allOf: + - $ref: '#/components/schemas/CreateRunRequestToolsItem' + description: The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. + file_ids: + type: array + items: + type: string + description: |- + The list of [File](/docs/api-reference/files) IDs the + [assistant](/docs/api-reference/assistants) used for this run. + default: [] + metadata: + type: object + additionalProperties: + type: string + nullable: true + description: |- + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + usage: + type: object + allOf: + - $ref: '#/components/schemas/RunCompletionUsage' + nullable: true + description: Represents an execution run on a [thread](/docs/api-reference/threads). + RunStepCompletionUsage: + type: object + required: + - completion_tokens + - prompt_tokens + - total_tokens + properties: + completion_tokens: + type: integer + format: int64 + description: Number of completion tokens used over the course of the run step. + prompt_tokens: + type: integer + format: int64 + description: Number of prompt tokens used over the course of the run step. + total_tokens: + type: integer + format: int64 + description: Total number of tokens used (prompt + completion). + description: |- + Usage statistics related to the run step. This value will be `null` while the run step's status + is `in_progress`. + RunStepDetails: + oneOf: + - $ref: '#/components/schemas/RunStepDetailsMessageCreationObject' + - $ref: '#/components/schemas/RunStepDetailsToolCallsObject' + x-oaiExpandable: true + RunStepDetailsMessageCreationObject: + type: object + required: + - type + - message_creation + properties: + type: + type: string + enum: + - message_creation + description: Details of the message creation by the run step. + message_creation: + type: object + properties: + message_id: + type: string + description: The ID of the message that was created by this run step. + required: + - message_id + description: Details of the message creation by the run step. + RunStepDetailsToolCallsCodeObject: + type: object + required: + - id + - type + - code_interpreter + properties: + id: + type: string + description: The ID of the tool call. + type: + type: string + enum: + - code_interpreter + description: |- + The type of tool call. This is always going to be `code_interpreter` for this type of tool + call. + code_interpreter: + type: object + properties: + input: + type: string + description: The input to the Code Interpreter tool call. + outputs: + allOf: + - $ref: '#/components/schemas/RunStepDetailsToolCallsCodeOutputs' + description: |- + The outputs from the Code Interpreter tool call. Code Interpreter can output one or more + items, including text (`logs`) or images (`image`). Each of these are represented by a + different object type. + required: + - input + - outputs + description: The Code Interpreter tool call definition. + description: Details of the Code Interpreter tool call the run step was involved in. + RunStepDetailsToolCallsCodeOutput: + oneOf: + - $ref: '#/components/schemas/RunStepDetailsToolCallsCodeOutputLogsObject' + - $ref: '#/components/schemas/RunStepDetailsToolCallsCodeOutputImageObject' + x-oaiExpandable: true + RunStepDetailsToolCallsCodeOutputImageObject: + type: object + required: + - type + - image + properties: + type: + type: string + enum: + - image + description: Always `image`. + image: + type: object + properties: + file_id: + type: string + description: The [file](/docs/api-reference/files) ID of the image. + required: + - file_id + RunStepDetailsToolCallsCodeOutputLogsObject: + type: object + required: + - type + - logs + properties: + type: + type: string + enum: + - logs + description: Always `logs`. + logs: + type: string + description: The text output from the Code Interpreter tool call. + description: Text output from the Code Interpreter tool call as part of a run step. + RunStepDetailsToolCallsCodeOutputs: + type: array + items: + $ref: '#/components/schemas/RunStepDetailsToolCallsCodeOutput' + RunStepDetailsToolCallsFunctionObject: + type: object + required: + - id + - type + - function + properties: + id: + type: string + description: The ID of the tool call object. + type: + type: string + enum: + - function + description: The type of tool call. This is always going to be `function` for this type of tool call. + function: + type: object + properties: + name: + type: string + description: The name of the function. + arguments: + type: string + description: The arguments passed to the function. + output: + type: string + nullable: true + description: |- + The output of the function. This will be `null` if the outputs have not been + [submitted](/docs/api-reference/runs/submitToolOutputs) yet. + required: + - name + - arguments + - output + description: The definition of the function that was called. + RunStepDetailsToolCallsObject: + type: object + required: + - type + - tool_calls + properties: + type: + type: string + enum: + - tool_calls + description: Always `tool_calls`. + tool_calls: + allOf: + - $ref: '#/components/schemas/RunStepDetailsToolCallsObjectToolCallsItem' + description: |- + An array of tool calls the run step was involved in. These can be associated with one of three + types of tools: `code_interpreter`, `retrieval`, or `function`. + description: Details of the tool call. + RunStepDetailsToolCallsObjectToolCall: + oneOf: + - $ref: '#/components/schemas/RunStepDetailsToolCallsCodeObject' + - $ref: '#/components/schemas/RunStepDetailsToolCallsRetrievalObject' + - $ref: '#/components/schemas/RunStepDetailsToolCallsFunctionObject' + x-oaiExpandable: true + RunStepDetailsToolCallsObjectToolCallsItem: + type: array + items: + $ref: '#/components/schemas/RunStepDetailsToolCallsObjectToolCall' + RunStepDetailsToolCallsRetrievalObject: + type: object + required: + - id + - type + - retrieval + properties: + id: + type: string + description: The ID of the tool call object. + type: + type: string + enum: + - retrieval + description: The type of tool call. This is always going to be `retrieval` for this type of tool call. + retrieval: + type: object + description: For now, this is always going to be an empty object. + x-oaiTypeLabel: map + RunStepObject: + type: object + required: + - id + - object + - created_at + - assistant_id + - thread_id + - run_id + - type + - status + - step_details + - last_error + - expires_at + - cancelled_at + - failed_at + - completed_at + - metadata + - usage + properties: + id: + type: string + description: The identifier of the run step, which can be referenced in API endpoints. + object: + type: string + enum: + - thread.run.step + description: The object type, which is always `thread.run.step`. + created_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) for when the run step was created. + assistant_id: + type: string + description: The ID of the [assistant](/docs/api-reference/assistants) associated with the run step. + thread_id: + type: string + description: The ID of the [thread](/docs/api-reference/threads) that was run. + run_id: + type: string + description: The ID of the [run](/docs/api-reference/runs) that this run step is a part of. + type: + type: string + enum: + - message_creation + - tool_calls + description: The type of run step, which can be either `message_creation` or `tool_calls`. + status: + type: string + enum: + - in_progress + - cancelled + - failed + - completed + - expired + description: |- + The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, + `completed`, or `expired`. + step_details: + allOf: + - $ref: '#/components/schemas/RunStepDetails' + description: The details of the run step. + last_error: + type: object + properties: + code: + type: string + enum: + - server_error + - rate_limit_exceeded + description: One of `server_error` or `rate_limit_exceeded`. + message: + type: string + description: A human-readable description of the error. + required: + - code + - message + nullable: true + description: The last error associated with this run step. Will be `null` if there are no errors. + expires_at: + type: string + format: date-time + nullable: true + description: |- + The Unix timestamp (in seconds) for when the run step expired. A step is considered expired + if the parent run is expired. + cancelled_at: + type: string + format: date-time + nullable: true + description: The Unix timestamp (in seconds) for when the run step was cancelled. + failed_at: + type: string + format: date-time + nullable: true + description: The Unix timestamp (in seconds) for when the run step failed. + completed_at: + type: string + format: date-time + nullable: true + description: T The Unix timestamp (in seconds) for when the run step completed.. + metadata: + type: object + additionalProperties: + type: string + nullable: true + description: |- + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + usage: + type: object + allOf: + - $ref: '#/components/schemas/RunCompletionUsage' + nullable: true + description: Represents a step in execution of a run. + RunToolCallObject: + type: object + required: + - id + - type + - function + properties: + id: + type: string + description: |- + The ID of the tool call. This ID must be referenced when you submit the tool outputs in using + the [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs) endpoint. + type: + type: string + enum: + - function + description: The type of tool call the output is required for. For now, this is always `function`. + function: + type: object + properties: + name: + type: string + description: The name of the function. + arguments: + type: string + description: The arguments that the model expects you to pass to the function. + required: + - name + - arguments + description: The function definition. + description: Tool call objects + Stop: + oneOf: + - type: string + - $ref: '#/components/schemas/StopSequences' + StopSequences: + type: array + items: + type: string + minItems: 1 + maxItems: 4 + SubmitToolOutputsRunRequest: + type: object + required: + - tool_outputs + properties: + tool_outputs: + type: object + properties: + tool_call_id: + type: string + description: |- + The ID of the tool call in the `required_action` object within the run object the output is + being submitted for. + output: + type: string + description: The output of the tool call to be submitted to continue the run. + description: A list of tools for which the outputs are being submitted. + SuffixString: + type: string + minLength: 1 + maxLength: 40 + ThreadObject: + type: object + required: + - id + - object + - created_at + - metadata + properties: + id: + type: string + description: The identifier, which can be referenced in API endpoints. + object: + type: string + enum: + - thread + description: The object type, which is always `thread`. + created_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) for when the thread was created. + metadata: + type: object + additionalProperties: + type: string + nullable: true + description: |- + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + description: Represents a thread that contains [messages](/docs/api-reference/messages). + TokenArrayArray: + type: array + items: + $ref: '#/components/schemas/TokenArrayItem' + minItems: 1 + TokenArrayItem: + type: array + items: + type: integer + format: int64 + minItems: 1 + User: + type: string + securitySchemes: + BearerAuth: + type: http + scheme: bearer +servers: + - url: https://api.openai.com/v1 + description: OpenAI Endpoint + variables: {} diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..5a3c43f3f --- /dev/null +++ b/.gitignore @@ -0,0 +1,178 @@ +## Ignore Visual Studio temporary files, build results, and +## files generated by popular Visual Studio add-ons. + +# User-specific files +*.suo +*.user +*.sln.docstates +.vs/ +*.lock.json +developer/ +launch.json +launchSettings.json + +# Default Assets restore directory +.assets + +# Build results +binaries/ +[Dd]ebug*/ +[Rr]elease/ +build/ +restoredPackages/ +PolicheckOutput/ +tools/net46/ +tools/SdkBuildTools/ +tools/Microsoft.WindowsAzure.Build.Tasks/packages/ +PublishedNugets/ +src/NuGet.Config +tools/7-zip/ +#tools/LocalNugetFeed/Microsoft.Internal.NetSdkBuild.Mgmt.Tools.*.nupkg + +[Tt]est[Rr]esult +[Bb]uild[Ll]og.* + +*_i.c +*_p.c +*.ilk +*.meta +*.obj +*.pch +*.pdb +*.pgc +*.pgd +*.rsp +*.sbr +*.tlb +*.tli +*.tlh +*.tmp +*.vspscc +*.vssscc +.builds + +*.pidb + +*.log +*.scc +# Visual C++ cache files +ipch/ +*.aps +*.ncb +*.opensdf +*.sdf + +# Visual Studio profiler +*.psess +*.vsp + +# VS Code +**/.vscode/* +!.vscode/cspell.json + +# Code analysis +*.CodeAnalysisLog.xml + +# Guidance Automation Toolkit +*.gpState + +# ReSharper is a .NET coding add-in +_ReSharper*/ + +*.[Rr]e[Ss]harper + +# Rider IDE +.idea + +# NCrunch +*.ncrunch* +.*crunch*.local.xml + +# Installshield output folder +[Ee]xpress + +# DocProject is a documentation generator add-in +DocProject/buildhelp/ +DocProject/Help/*.HxT +DocProject/Help/*.HxC +DocProject/Help/*.hhc +DocProject/Help/*.hhk +DocProject/Help/*.hhp +DocProject/Help/Html2 +DocProject/Help/html + +# Click-Once directory +publish + +# Publish Web Output +*.Publish.xml + +# Others +[Bb]in +[Oo]bj +TestResults +[Tt]est[Rr]esult* +*.Cache +ClientBin +~$* +*.dbmdl + +*.[Pp]ublish.xml + +Generated_Code #added for RIA/Silverlight projects + +# Build tasks +tools/*.dll + +# Sensitive files +*.keys +!Azure.Extensions.AspNetCore.DataProtection.Keys +!Azure.Security.KeyVault.Keys +*.pfx +TestConfigurations.xml +*.json.env +*.bicep.env + +# Backup & report files from converting an old project file to a newer +# Visual Studio version. Backup files are not needed, because we have git ;-) +_UpgradeReport_Files/ +Backup*/ +UpgradeLog*.XML + +# NuGet +packages +packages/repositories.config +testPackages + +# Mac development +.DS_Store + +# Specification DLLs +*.Specification.dll + +# Generated readme.txt files # +src/*/readme.txt + +build.out +.nuget/ + +# Azure Project +csx/ +*.GhostDoc.xml +pingme.txt + +# TS/Node files +dist/ +node_modules/ + +# MSBuild binary log files +msbuild.binlog + +# BenchmarkDotNet +BenchmarkDotNet.Artifacts + +artifacts +.assets + +# Temporary typespec folders for typespec generation +TempTypeSpecFiles/ diff --git a/assistants/operations.tsp b/assistants/operations.tsp index 350f68d7b..e462c6f5b 100644 --- a/assistants/operations.tsp +++ b/assistants/operations.tsp @@ -1,6 +1,7 @@ import "@typespec/http"; import "@typespec/openapi"; +import "../common/models.tsp"; import "../common/errors.tsp"; import "./models.tsp"; @@ -34,7 +35,7 @@ interface Assistants { * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` * for descending order. */ - @query order?: "asc" | "desc" = "desc"; + @query order?: ListOrder = ListOrder.desc; /** * A cursor for use in pagination. `after` is an object ID that defines your place in the list. @@ -116,7 +117,7 @@ interface Assistants { * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` * for descending order. */ - @query order?: "asc" | "desc" = "desc"; + @query order?: ListOrder = ListOrder.desc; /** * A cursor for use in pagination. `after` is an object ID that defines your place in the list. diff --git a/common/models.tsp b/common/models.tsp index 611e53e7a..13ab11ca2 100644 --- a/common/models.tsp +++ b/common/models.tsp @@ -10,6 +10,11 @@ model TokenArray is safeint[]; @minItems(1) model TokenArrayArray is TokenArray[]; +enum ListOrder { + asc: "asc", + desc: "desc", +} + model FunctionObject { /** * A description of what the function does, used by the model to choose when and how to call the diff --git a/files/operations.tsp b/files/operations.tsp index dda5f95b8..94320fa3d 100644 --- a/files/operations.tsp +++ b/files/operations.tsp @@ -36,7 +36,7 @@ interface Files { listFiles( /** Only return files with the given purpose. */ // NOTE: This is just a string in the OpenAPI spec. - @query purpose?: FILE_PURPOSE, + @query purpose?: string, ): ListFilesResponse | ErrorResponse; @route("{file_id}") diff --git a/messages/operations.tsp b/messages/operations.tsp index 0c9843e0a..652b79007 100644 --- a/messages/operations.tsp +++ b/messages/operations.tsp @@ -1,6 +1,7 @@ import "@typespec/http"; import "@typespec/openapi"; +import "../common/models.tsp"; import "../common/errors.tsp"; import "./models.tsp"; @@ -40,7 +41,7 @@ interface Messages { * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` * for descending order. */ - @query order?: "asc" | "desc" = "desc"; + @query order?: ListOrder = ListOrder.desc; /** * A cursor for use in pagination. `after` is an object ID that defines your place in the list. @@ -107,7 +108,7 @@ interface Messages { * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` * for descending order. */ - @query order?: "asc" | "desc" = "desc"; + @query order?: ListOrder = ListOrder.desc; /** * A cursor for use in pagination. `after` is an object ID that defines your place in the list. diff --git a/runs/operations.tsp b/runs/operations.tsp index 8bf1f0df1..69a96e03c 100644 --- a/runs/operations.tsp +++ b/runs/operations.tsp @@ -1,6 +1,7 @@ import "@typespec/http"; import "@typespec/openapi"; +import "../common/models.tsp"; import "../common/errors.tsp"; import "./models.tsp"; @@ -51,7 +52,7 @@ interface Runs { * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` * for descending order. */ - @query order?: "asc" | "desc" = "desc"; + @query order?: ListOrder = ListOrder.desc; /** * A cursor for use in pagination. `after` is an object ID that defines your place in the list. @@ -150,7 +151,7 @@ interface Runs { * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` * for descending order. */ - @query order?: "asc" | "desc" = "desc"; + @query order?: ListOrder = ListOrder.desc; /** * A cursor for use in pagination. `after` is an object ID that defines your place in the list. diff --git a/tsp-output/@typespec/openapi3/openapi.yaml b/tsp-output/@typespec/openapi3/openapi.yaml index 8f62d8667..346061391 100644 --- a/tsp-output/@typespec/openapi3/openapi.yaml +++ b/tsp-output/@typespec/openapi3/openapi.yaml @@ -64,15 +64,7 @@ paths: Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` for descending order. schema: - type: string - enum: - - asc - - desc - - desc - - desc - - desc - - desc - - desc + $ref: '#/components/schemas/ListOrder' default: desc - name: after in: query @@ -250,15 +242,7 @@ paths: Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` for descending order. schema: - type: string - enum: - - asc - - desc - - desc - - desc - - desc - - desc - - desc + $ref: '#/components/schemas/ListOrder' default: desc - name: after in: query @@ -563,11 +547,6 @@ paths: description: Only return files with the given purpose. schema: type: string - enum: - - fine-tune - - fine-tune-results - - assistants - - assistants_output responses: '200': description: The request has succeeded. @@ -1339,15 +1318,7 @@ paths: Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` for descending order. schema: - type: string - enum: - - asc - - desc - - desc - - desc - - desc - - desc - - desc + $ref: '#/components/schemas/ListOrder' default: desc - name: after in: query @@ -1485,15 +1456,7 @@ paths: Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` for descending order. schema: - type: string - enum: - - asc - - desc - - desc - - desc - - desc - - desc - - desc + $ref: '#/components/schemas/ListOrder' default: desc - name: after in: query @@ -1625,15 +1588,7 @@ paths: Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` for descending order. schema: - type: string - enum: - - asc - - desc - - desc - - desc - - desc - - desc - - desc + $ref: '#/components/schemas/ListOrder' default: desc - name: after in: query @@ -1803,15 +1758,7 @@ paths: Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` for descending order. schema: - type: string - enum: - - asc - - desc - - desc - - desc - - desc - - desc - - desc + $ref: '#/components/schemas/ListOrder' default: desc - name: after in: query @@ -4974,6 +4921,13 @@ components: type: array items: $ref: '#/components/schemas/Model' + ListOrder: + anyOf: + - type: string + - type: string + enum: + - asc + - desc ListPaginatedFineTuningJobsResponse: type: object required: diff --git a/tspconfig.yaml b/tspconfig.yaml new file mode 100644 index 000000000..3788a1428 --- /dev/null +++ b/tspconfig.yaml @@ -0,0 +1,4 @@ +options: + "@azure-tools/typespec-csharp": + branded: false + generate-test-project: true \ No newline at end of file From 43dd97bef97a8a995551d5362e54c7a73a8ccd6f Mon Sep 17 00:00:00 2001 From: Jose Arriaga Maldonado Date: Tue, 20 Feb 2024 01:29:27 -0800 Subject: [PATCH 10/18] Fix fine-tuning operations.tsp --- .dotnet/src/Generated/FineTunes.cs | 736 -- .dotnet/src/Generated/FineTuning.cs | 661 +- .dotnet/src/Generated/FineTuningJobs.cs | 710 -- .../CreateFineTuneRequest.Serialization.cs | 386 -- .../Generated/Models/CreateFineTuneRequest.cs | 295 - .../Models/CreateFineTuneRequestModel.cs | 54 - .../Models/FineTune.Serialization.cs | 287 - .dotnet/src/Generated/Models/FineTune.cs | 169 - .../Models/FineTuneEvent.Serialization.cs | 156 - .dotnet/src/Generated/Models/FineTuneEvent.cs | 93 - .../FineTuneHyperparams.Serialization.cs | 197 - .../Generated/Models/FineTuneHyperparams.cs | 117 - .../src/Generated/Models/FineTuneObject.cs | 45 - .../src/Generated/Models/FineTuneStatus.cs | 57 - ...istFineTuneEventsResponse.Serialization.cs | 150 - .../Models/ListFineTuneEventsResponse.cs | 81 - .../ListFineTunesResponse.Serialization.cs | 150 - .../Generated/Models/ListFineTunesResponse.cs | 81 - .dotnet/src/Generated/OpenAIClient.cs | 15 +- .dotnet/src/Generated/OpenAIModelFactory.cs | 374 +- ...eTuningJobsTests.cs => FineTuningTests.cs} | 4 +- .../@typespec/openapi3/openapi.yaml | 6019 ----------------- fine-tuning/operations.tsp | 162 +- tsp-output/@typespec/openapi3/openapi.yaml | 170 +- 24 files changed, 809 insertions(+), 10360 deletions(-) delete mode 100644 .dotnet/src/Generated/FineTunes.cs delete mode 100644 .dotnet/src/Generated/FineTuningJobs.cs delete mode 100644 .dotnet/src/Generated/Models/CreateFineTuneRequest.Serialization.cs delete mode 100644 .dotnet/src/Generated/Models/CreateFineTuneRequest.cs delete mode 100644 .dotnet/src/Generated/Models/CreateFineTuneRequestModel.cs delete mode 100644 .dotnet/src/Generated/Models/FineTune.Serialization.cs delete mode 100644 .dotnet/src/Generated/Models/FineTune.cs delete mode 100644 .dotnet/src/Generated/Models/FineTuneEvent.Serialization.cs delete mode 100644 .dotnet/src/Generated/Models/FineTuneEvent.cs delete mode 100644 .dotnet/src/Generated/Models/FineTuneHyperparams.Serialization.cs delete mode 100644 .dotnet/src/Generated/Models/FineTuneHyperparams.cs delete mode 100644 .dotnet/src/Generated/Models/FineTuneObject.cs delete mode 100644 .dotnet/src/Generated/Models/FineTuneStatus.cs delete mode 100644 .dotnet/src/Generated/Models/ListFineTuneEventsResponse.Serialization.cs delete mode 100644 .dotnet/src/Generated/Models/ListFineTuneEventsResponse.cs delete mode 100644 .dotnet/src/Generated/Models/ListFineTunesResponse.Serialization.cs delete mode 100644 .dotnet/src/Generated/Models/ListFineTunesResponse.cs rename .dotnet/tests/Generated/Tests/{FineTuningJobsTests.cs => FineTuningTests.cs} (70%) delete mode 100644 .dotnet/tsp-output/@typespec/openapi3/openapi.yaml diff --git a/.dotnet/src/Generated/FineTunes.cs b/.dotnet/src/Generated/FineTunes.cs deleted file mode 100644 index 04c8bae74..000000000 --- a/.dotnet/src/Generated/FineTunes.cs +++ /dev/null @@ -1,736 +0,0 @@ -// - -#nullable disable - -using System; -using System.ClientModel; -using System.ClientModel.Internal; -using System.ClientModel.Primitives; -using System.ClientModel.Primitives.Pipeline; -using System.Threading; -using System.Threading.Tasks; -using OpenAI.Models; - -namespace OpenAI -{ - // Data plane generated sub-client. - /// The FineTunes sub-client. - public partial class FineTunes - { - private const string AuthorizationHeader = "Authorization"; - private readonly KeyCredential _keyCredential; - private const string AuthorizationApiKeyPrefix = "Bearer"; - private readonly MessagePipeline _pipeline; - private readonly Uri _endpoint; - - /// The ClientDiagnostics is used to provide tracing support for the client library. - internal TelemetrySource ClientDiagnostics { get; } - - /// The HTTP pipeline for sending and receiving REST requests and responses. - public virtual MessagePipeline Pipeline => _pipeline; - - /// Initializes a new instance of FineTunes for mocking. - protected FineTunes() - { - } - - /// Initializes a new instance of FineTunes. - /// The handler for diagnostic messaging in the client. - /// The HTTP pipeline for sending and receiving REST requests and responses. - /// The key credential to copy. - /// OpenAI Endpoint. - internal FineTunes(TelemetrySource clientDiagnostics, MessagePipeline pipeline, KeyCredential keyCredential, Uri endpoint) - { - ClientDiagnostics = clientDiagnostics; - _pipeline = pipeline; - _keyCredential = keyCredential; - _endpoint = endpoint; - } - - /// - /// Creates a job that fine-tunes a specified model from a given dataset. - /// - /// Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. - /// - /// [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) - /// - /// The to use. - /// The cancellation token to use. - /// is null. - [Obsolete("deprecated")] - public virtual async Task> CreateFineTuneAsync(CreateFineTuneRequest fineTune, CancellationToken cancellationToken = default) - { - ClientUtilities.AssertNotNull(fineTune, nameof(fineTune)); - - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = fineTune.ToRequestBody(); - Result result = await CreateFineTuneAsync(content, context).ConfigureAwait(false); - return Result.FromValue(FineTune.FromResponse(result.GetRawResponse()), result.GetRawResponse()); - } - - /// - /// Creates a job that fine-tunes a specified model from a given dataset. - /// - /// Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. - /// - /// [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) - /// - /// The to use. - /// The cancellation token to use. - /// is null. - [Obsolete("deprecated")] - public virtual Result CreateFineTune(CreateFineTuneRequest fineTune, CancellationToken cancellationToken = default) - { - ClientUtilities.AssertNotNull(fineTune, nameof(fineTune)); - - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = fineTune.ToRequestBody(); - Result result = CreateFineTune(content, context); - return Result.FromValue(FineTune.FromResponse(result.GetRawResponse()), result.GetRawResponse()); - } - - /// - /// [Protocol Method] Creates a job that fine-tunes a specified model from a given dataset. - /// - /// Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. - /// - /// [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) - /// - /// - /// - /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. - /// - /// - /// - /// - /// Please try the simpler convenience overload with strongly typed models first. - /// - /// - /// - /// - /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// is null. - /// Service returned a non-success status code. - /// The response returned from the service. - [Obsolete("deprecated")] - public virtual async Task CreateFineTuneAsync(RequestBody content, RequestOptions context = null) - { - ClientUtilities.AssertNotNull(content, nameof(content)); - - using var scope = ClientDiagnostics.CreateSpan("FineTunes.CreateFineTune"); - scope.Start(); - try - { - using PipelineMessage message = CreateCreateFineTuneRequest(content, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) - { - scope.Failed(e); - throw; - } - } - - /// - /// [Protocol Method] Creates a job that fine-tunes a specified model from a given dataset. - /// - /// Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. - /// - /// [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) - /// - /// - /// - /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. - /// - /// - /// - /// - /// Please try the simpler convenience overload with strongly typed models first. - /// - /// - /// - /// - /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// is null. - /// Service returned a non-success status code. - /// The response returned from the service. - [Obsolete("deprecated")] - public virtual Result CreateFineTune(RequestBody content, RequestOptions context = null) - { - ClientUtilities.AssertNotNull(content, nameof(content)); - - using var scope = ClientDiagnostics.CreateSpan("FineTunes.CreateFineTune"); - scope.Start(); - try - { - using PipelineMessage message = CreateCreateFineTuneRequest(content, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) - { - scope.Failed(e); - throw; - } - } - - /// List your organization's fine-tuning jobs. - /// The cancellation token to use. - [Obsolete("deprecated")] - public virtual async Task> GetFineTunesAsync(CancellationToken cancellationToken = default) - { - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = await GetFineTunesAsync(context).ConfigureAwait(false); - return Result.FromValue(ListFineTunesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); - } - - /// List your organization's fine-tuning jobs. - /// The cancellation token to use. - [Obsolete("deprecated")] - public virtual Result GetFineTunes(CancellationToken cancellationToken = default) - { - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = GetFineTunes(context); - return Result.FromValue(ListFineTunesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); - } - - /// - /// [Protocol Method] List your organization's fine-tuning jobs - /// - /// - /// - /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. - /// - /// - /// - /// - /// Please try the simpler convenience overload with strongly typed models first. - /// - /// - /// - /// - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// Service returned a non-success status code. - /// The response returned from the service. - [Obsolete("deprecated")] - public virtual async Task GetFineTunesAsync(RequestOptions context) - { - using var scope = ClientDiagnostics.CreateSpan("FineTunes.GetFineTunes"); - scope.Start(); - try - { - using PipelineMessage message = CreateGetFineTunesRequest(context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) - { - scope.Failed(e); - throw; - } - } - - /// - /// [Protocol Method] List your organization's fine-tuning jobs - /// - /// - /// - /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. - /// - /// - /// - /// - /// Please try the simpler convenience overload with strongly typed models first. - /// - /// - /// - /// - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// Service returned a non-success status code. - /// The response returned from the service. - [Obsolete("deprecated")] - public virtual Result GetFineTunes(RequestOptions context) - { - using var scope = ClientDiagnostics.CreateSpan("FineTunes.GetFineTunes"); - scope.Start(); - try - { - using PipelineMessage message = CreateGetFineTunesRequest(context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) - { - scope.Failed(e); - throw; - } - } - - /// - /// Gets info about the fine-tune job. - /// - /// [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) - /// - /// The ID of the fine-tune job. - /// The cancellation token to use. - /// is null. - /// is an empty string, and was expected to be non-empty. - [Obsolete("deprecated")] - public virtual async Task> RetrieveFineTuneAsync(string fineTuneId, CancellationToken cancellationToken = default) - { - ClientUtilities.AssertNotNullOrEmpty(fineTuneId, nameof(fineTuneId)); - - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = await RetrieveFineTuneAsync(fineTuneId, context).ConfigureAwait(false); - return Result.FromValue(FineTune.FromResponse(result.GetRawResponse()), result.GetRawResponse()); - } - - /// - /// Gets info about the fine-tune job. - /// - /// [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) - /// - /// The ID of the fine-tune job. - /// The cancellation token to use. - /// is null. - /// is an empty string, and was expected to be non-empty. - [Obsolete("deprecated")] - public virtual Result RetrieveFineTune(string fineTuneId, CancellationToken cancellationToken = default) - { - ClientUtilities.AssertNotNullOrEmpty(fineTuneId, nameof(fineTuneId)); - - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = RetrieveFineTune(fineTuneId, context); - return Result.FromValue(FineTune.FromResponse(result.GetRawResponse()), result.GetRawResponse()); - } - - /// - /// [Protocol Method] Gets info about the fine-tune job. - /// - /// [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) - /// - /// - /// - /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. - /// - /// - /// - /// - /// Please try the simpler convenience overload with strongly typed models first. - /// - /// - /// - /// - /// The ID of the fine-tune job. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// is null. - /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. - /// The response returned from the service. - [Obsolete("deprecated")] - public virtual async Task RetrieveFineTuneAsync(string fineTuneId, RequestOptions context) - { - ClientUtilities.AssertNotNullOrEmpty(fineTuneId, nameof(fineTuneId)); - - using var scope = ClientDiagnostics.CreateSpan("FineTunes.RetrieveFineTune"); - scope.Start(); - try - { - using PipelineMessage message = CreateRetrieveFineTuneRequest(fineTuneId, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) - { - scope.Failed(e); - throw; - } - } - - /// - /// [Protocol Method] Gets info about the fine-tune job. - /// - /// [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) - /// - /// - /// - /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. - /// - /// - /// - /// - /// Please try the simpler convenience overload with strongly typed models first. - /// - /// - /// - /// - /// The ID of the fine-tune job. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// is null. - /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. - /// The response returned from the service. - [Obsolete("deprecated")] - public virtual Result RetrieveFineTune(string fineTuneId, RequestOptions context) - { - ClientUtilities.AssertNotNullOrEmpty(fineTuneId, nameof(fineTuneId)); - - using var scope = ClientDiagnostics.CreateSpan("FineTunes.RetrieveFineTune"); - scope.Start(); - try - { - using PipelineMessage message = CreateRetrieveFineTuneRequest(fineTuneId, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) - { - scope.Failed(e); - throw; - } - } - - /// Get fine-grained status updates for a fine-tune job. - /// The ID of the fine-tune job to get events for. - /// - /// Whether to stream events for the fine-tune job. If set to true, events will be sent as - /// data-only - /// [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - /// as they become available. The stream will terminate with a `data: [DONE]` message when the - /// job is finished (succeeded, cancelled, or failed). - /// - /// If set to false, only events generated so far will be returned. - /// - /// The cancellation token to use. - /// is null. - /// is an empty string, and was expected to be non-empty. - [Obsolete("deprecated")] - public virtual async Task> GetFineTuneEventsAsync(string fineTuneId, bool? stream = null, CancellationToken cancellationToken = default) - { - ClientUtilities.AssertNotNullOrEmpty(fineTuneId, nameof(fineTuneId)); - - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = await GetFineTuneEventsAsync(fineTuneId, stream, context).ConfigureAwait(false); - return Result.FromValue(ListFineTuneEventsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); - } - - /// Get fine-grained status updates for a fine-tune job. - /// The ID of the fine-tune job to get events for. - /// - /// Whether to stream events for the fine-tune job. If set to true, events will be sent as - /// data-only - /// [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - /// as they become available. The stream will terminate with a `data: [DONE]` message when the - /// job is finished (succeeded, cancelled, or failed). - /// - /// If set to false, only events generated so far will be returned. - /// - /// The cancellation token to use. - /// is null. - /// is an empty string, and was expected to be non-empty. - [Obsolete("deprecated")] - public virtual Result GetFineTuneEvents(string fineTuneId, bool? stream = null, CancellationToken cancellationToken = default) - { - ClientUtilities.AssertNotNullOrEmpty(fineTuneId, nameof(fineTuneId)); - - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = GetFineTuneEvents(fineTuneId, stream, context); - return Result.FromValue(ListFineTuneEventsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); - } - - /// - /// [Protocol Method] Get fine-grained status updates for a fine-tune job. - /// - /// - /// - /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. - /// - /// - /// - /// - /// Please try the simpler convenience overload with strongly typed models first. - /// - /// - /// - /// - /// The ID of the fine-tune job to get events for. - /// - /// Whether to stream events for the fine-tune job. If set to true, events will be sent as - /// data-only - /// [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - /// as they become available. The stream will terminate with a `data: [DONE]` message when the - /// job is finished (succeeded, cancelled, or failed). - /// - /// If set to false, only events generated so far will be returned. - /// - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// is null. - /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. - /// The response returned from the service. - [Obsolete("deprecated")] - public virtual async Task GetFineTuneEventsAsync(string fineTuneId, bool? stream, RequestOptions context) - { - ClientUtilities.AssertNotNullOrEmpty(fineTuneId, nameof(fineTuneId)); - - using var scope = ClientDiagnostics.CreateSpan("FineTunes.GetFineTuneEvents"); - scope.Start(); - try - { - using PipelineMessage message = CreateGetFineTuneEventsRequest(fineTuneId, stream, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) - { - scope.Failed(e); - throw; - } - } - - /// - /// [Protocol Method] Get fine-grained status updates for a fine-tune job. - /// - /// - /// - /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. - /// - /// - /// - /// - /// Please try the simpler convenience overload with strongly typed models first. - /// - /// - /// - /// - /// The ID of the fine-tune job to get events for. - /// - /// Whether to stream events for the fine-tune job. If set to true, events will be sent as - /// data-only - /// [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - /// as they become available. The stream will terminate with a `data: [DONE]` message when the - /// job is finished (succeeded, cancelled, or failed). - /// - /// If set to false, only events generated so far will be returned. - /// - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// is null. - /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. - /// The response returned from the service. - [Obsolete("deprecated")] - public virtual Result GetFineTuneEvents(string fineTuneId, bool? stream, RequestOptions context) - { - ClientUtilities.AssertNotNullOrEmpty(fineTuneId, nameof(fineTuneId)); - - using var scope = ClientDiagnostics.CreateSpan("FineTunes.GetFineTuneEvents"); - scope.Start(); - try - { - using PipelineMessage message = CreateGetFineTuneEventsRequest(fineTuneId, stream, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) - { - scope.Failed(e); - throw; - } - } - - /// Immediately cancel a fine-tune job. - /// The ID of the fine-tune job to cancel. - /// The cancellation token to use. - /// is null. - /// is an empty string, and was expected to be non-empty. - [Obsolete("deprecated")] - public virtual async Task> CancelFineTuneAsync(string fineTuneId, CancellationToken cancellationToken = default) - { - ClientUtilities.AssertNotNullOrEmpty(fineTuneId, nameof(fineTuneId)); - - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = await CancelFineTuneAsync(fineTuneId, context).ConfigureAwait(false); - return Result.FromValue(FineTune.FromResponse(result.GetRawResponse()), result.GetRawResponse()); - } - - /// Immediately cancel a fine-tune job. - /// The ID of the fine-tune job to cancel. - /// The cancellation token to use. - /// is null. - /// is an empty string, and was expected to be non-empty. - [Obsolete("deprecated")] - public virtual Result CancelFineTune(string fineTuneId, CancellationToken cancellationToken = default) - { - ClientUtilities.AssertNotNullOrEmpty(fineTuneId, nameof(fineTuneId)); - - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = CancelFineTune(fineTuneId, context); - return Result.FromValue(FineTune.FromResponse(result.GetRawResponse()), result.GetRawResponse()); - } - - /// - /// [Protocol Method] Immediately cancel a fine-tune job. - /// - /// - /// - /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. - /// - /// - /// - /// - /// Please try the simpler convenience overload with strongly typed models first. - /// - /// - /// - /// - /// The ID of the fine-tune job to cancel. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// is null. - /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. - /// The response returned from the service. - [Obsolete("deprecated")] - public virtual async Task CancelFineTuneAsync(string fineTuneId, RequestOptions context) - { - ClientUtilities.AssertNotNullOrEmpty(fineTuneId, nameof(fineTuneId)); - - using var scope = ClientDiagnostics.CreateSpan("FineTunes.CancelFineTune"); - scope.Start(); - try - { - using PipelineMessage message = CreateCancelFineTuneRequest(fineTuneId, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) - { - scope.Failed(e); - throw; - } - } - - /// - /// [Protocol Method] Immediately cancel a fine-tune job. - /// - /// - /// - /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. - /// - /// - /// - /// - /// Please try the simpler convenience overload with strongly typed models first. - /// - /// - /// - /// - /// The ID of the fine-tune job to cancel. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// is null. - /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. - /// The response returned from the service. - [Obsolete("deprecated")] - public virtual Result CancelFineTune(string fineTuneId, RequestOptions context) - { - ClientUtilities.AssertNotNullOrEmpty(fineTuneId, nameof(fineTuneId)); - - using var scope = ClientDiagnostics.CreateSpan("FineTunes.CancelFineTune"); - scope.Start(); - try - { - using PipelineMessage message = CreateCancelFineTuneRequest(fineTuneId, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) - { - scope.Failed(e); - throw; - } - } - - internal PipelineMessage CreateCreateFineTuneRequest(RequestBody content, RequestOptions context) - { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("POST"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/fine-tunes", false); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); - request.SetHeaderValue("Content-Type", "application/json"); - request.Content = content; - return message; - } - - internal PipelineMessage CreateGetFineTunesRequest(RequestOptions context) - { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("GET"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/fine-tunes", false); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); - return message; - } - - internal PipelineMessage CreateRetrieveFineTuneRequest(string fineTuneId, RequestOptions context) - { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("GET"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/fine-tunes/", false); - uri.AppendPath(fineTuneId, true); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); - return message; - } - - internal PipelineMessage CreateGetFineTuneEventsRequest(string fineTuneId, bool? stream, RequestOptions context) - { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("GET"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/fine-tunes/", false); - uri.AppendPath(fineTuneId, true); - uri.AppendPath("/events", false); - if (stream != null) - { - uri.AppendQuery("stream", stream.Value, true); - } - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); - return message; - } - - internal PipelineMessage CreateCancelFineTuneRequest(string fineTuneId, RequestOptions context) - { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("POST"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/fine-tunes/", false); - uri.AppendPath(fineTuneId, true); - uri.AppendPath("/cancel", false); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); - return message; - } - - private static RequestOptions DefaultRequestContext = new RequestOptions(); - internal static RequestOptions FromCancellationToken(CancellationToken cancellationToken = default) - { - if (!cancellationToken.CanBeCanceled) - { - return DefaultRequestContext; - } - - return new RequestOptions() { CancellationToken = cancellationToken }; - } - - private static ResponseErrorClassifier _responseErrorClassifier200; - private static ResponseErrorClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= new StatusResponseClassifier(stackalloc ushort[] { 200 }); - } -} diff --git a/.dotnet/src/Generated/FineTuning.cs b/.dotnet/src/Generated/FineTuning.cs index 40f208a6e..a8038525b 100644 --- a/.dotnet/src/Generated/FineTuning.cs +++ b/.dotnet/src/Generated/FineTuning.cs @@ -4,9 +4,12 @@ using System; using System.ClientModel; +using System.ClientModel.Internal; using System.ClientModel.Primitives; using System.ClientModel.Primitives.Pipeline; using System.Threading; +using System.Threading.Tasks; +using OpenAI.Models; namespace OpenAI { @@ -44,12 +47,662 @@ internal FineTuning(TelemetrySource clientDiagnostics, MessagePipeline pipeline, _endpoint = endpoint; } - private FineTuningJobs _cachedFineTuningJobs; + /// + /// Creates a fine-tuning job which begins the process of creating a new model from a given dataset. + /// + /// Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + /// + /// [Learn more about fine-tuning](/docs/guides/fine-tuning) + /// + /// The to use. + /// The cancellation token to use. + /// is null. + public virtual async Task> CreateFineTuningJobAsync(CreateFineTuningJobRequest job, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNull(job, nameof(job)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = job.ToRequestBody(); + Result result = await CreateFineTuningJobAsync(content, context).ConfigureAwait(false); + return Result.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// Creates a fine-tuning job which begins the process of creating a new model from a given dataset. + /// + /// Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + /// + /// [Learn more about fine-tuning](/docs/guides/fine-tuning) + /// + /// The to use. + /// The cancellation token to use. + /// is null. + public virtual Result CreateFineTuningJob(CreateFineTuningJobRequest job, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNull(job, nameof(job)); + + RequestOptions context = FromCancellationToken(cancellationToken); + using RequestBody content = job.ToRequestBody(); + Result result = CreateFineTuningJob(content, context); + return Result.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Creates a fine-tuning job which begins the process of creating a new model from a given dataset. + /// + /// Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + /// + /// [Learn more about fine-tuning](/docs/guides/fine-tuning) + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CreateFineTuningJobAsync(RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("FineTuning.CreateFineTuningJob"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateFineTuningJobRequest(content, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Creates a fine-tuning job which begins the process of creating a new model from a given dataset. + /// + /// Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + /// + /// [Learn more about fine-tuning](/docs/guides/fine-tuning) + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result CreateFineTuningJob(RequestBody content, RequestOptions context = null) + { + ClientUtilities.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateSpan("FineTuning.CreateFineTuningJob"); + scope.Start(); + try + { + using PipelineMessage message = CreateCreateFineTuningJobRequest(content, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// List your organization's fine-tuning jobs. + /// Identifier for the last job from the previous pagination request. + /// Number of fine-tuning jobs to retrieve. + /// The cancellation token to use. + public virtual async Task> GetPaginatedFineTuningJobsAsync(string after = null, long? limit = null, CancellationToken cancellationToken = default) + { + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = await GetPaginatedFineTuningJobsAsync(after, limit, context).ConfigureAwait(false); + return Result.FromValue(ListPaginatedFineTuningJobsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// List your organization's fine-tuning jobs. + /// Identifier for the last job from the previous pagination request. + /// Number of fine-tuning jobs to retrieve. + /// The cancellation token to use. + public virtual Result GetPaginatedFineTuningJobs(string after = null, long? limit = null, CancellationToken cancellationToken = default) + { + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = GetPaginatedFineTuningJobs(after, limit, context); + return Result.FromValue(ListPaginatedFineTuningJobsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] List your organization's fine-tuning jobs + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// Identifier for the last job from the previous pagination request. + /// Number of fine-tuning jobs to retrieve. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task GetPaginatedFineTuningJobsAsync(string after, long? limit, RequestOptions context) + { + using var scope = ClientDiagnostics.CreateSpan("FineTuning.GetPaginatedFineTuningJobs"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetPaginatedFineTuningJobsRequest(after, limit, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] List your organization's fine-tuning jobs + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// Identifier for the last job from the previous pagination request. + /// Number of fine-tuning jobs to retrieve. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result GetPaginatedFineTuningJobs(string after, long? limit, RequestOptions context) + { + using var scope = ClientDiagnostics.CreateSpan("FineTuning.GetPaginatedFineTuningJobs"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetPaginatedFineTuningJobsRequest(after, limit, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// Get info about a fine-tuning job. + /// + /// [Learn more about fine-tuning](/docs/guides/fine-tuning) + /// + /// The to use. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> RetrieveFineTuningJobAsync(string fineTuningJobId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = await RetrieveFineTuningJobAsync(fineTuningJobId, context).ConfigureAwait(false); + return Result.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// Get info about a fine-tuning job. + /// + /// [Learn more about fine-tuning](/docs/guides/fine-tuning) + /// + /// The to use. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual Result RetrieveFineTuningJob(string fineTuningJobId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = RetrieveFineTuningJob(fineTuningJobId, context); + return Result.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Get info about a fine-tuning job. + /// + /// [Learn more about fine-tuning](/docs/guides/fine-tuning) + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The to use. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task RetrieveFineTuningJobAsync(string fineTuningJobId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + + using var scope = ClientDiagnostics.CreateSpan("FineTuning.RetrieveFineTuningJob"); + scope.Start(); + try + { + using PipelineMessage message = CreateRetrieveFineTuningJobRequest(fineTuningJobId, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Get info about a fine-tuning job. + /// + /// [Learn more about fine-tuning](/docs/guides/fine-tuning) + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The to use. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result RetrieveFineTuningJob(string fineTuningJobId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + + using var scope = ClientDiagnostics.CreateSpan("FineTuning.RetrieveFineTuningJob"); + scope.Start(); + try + { + using PipelineMessage message = CreateRetrieveFineTuningJobRequest(fineTuningJobId, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Immediately cancel a fine-tune job. + /// The ID of the fine-tuning job to cancel. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> CancelFineTuningJobAsync(string fineTuningJobId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = await CancelFineTuningJobAsync(fineTuningJobId, context).ConfigureAwait(false); + return Result.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Immediately cancel a fine-tune job. + /// The ID of the fine-tuning job to cancel. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual Result CancelFineTuningJob(string fineTuningJobId, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = CancelFineTuningJob(fineTuningJobId, context); + return Result.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Immediately cancel a fine-tune job. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the fine-tuning job to cancel. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CancelFineTuningJobAsync(string fineTuningJobId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + + using var scope = ClientDiagnostics.CreateSpan("FineTuning.CancelFineTuningJob"); + scope.Start(); + try + { + using PipelineMessage message = CreateCancelFineTuningJobRequest(fineTuningJobId, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Immediately cancel a fine-tune job. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the fine-tuning job to cancel. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result CancelFineTuningJob(string fineTuningJobId, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + + using var scope = ClientDiagnostics.CreateSpan("FineTuning.CancelFineTuningJob"); + scope.Start(); + try + { + using PipelineMessage message = CreateCancelFineTuningJobRequest(fineTuningJobId, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Get status updates for a fine-tuning job. + /// The ID of the fine-tuning job to get events for. + /// Identifier for the last event from the previous pagination request. + /// Number of events to retrieve. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> GetFineTuningEventsAsync(string fineTuningJobId, string after = null, int? limit = null, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = await GetFineTuningEventsAsync(fineTuningJobId, after, limit, context).ConfigureAwait(false); + return Result.FromValue(ListFineTuningJobEventsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Get status updates for a fine-tuning job. + /// The ID of the fine-tuning job to get events for. + /// Identifier for the last event from the previous pagination request. + /// Number of events to retrieve. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual Result GetFineTuningEvents(string fineTuningJobId, string after = null, int? limit = null, CancellationToken cancellationToken = default) + { + ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + + RequestOptions context = FromCancellationToken(cancellationToken); + Result result = GetFineTuningEvents(fineTuningJobId, after, limit, context); + return Result.FromValue(ListFineTuningJobEventsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Get status updates for a fine-tuning job. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the fine-tuning job to get events for. + /// Identifier for the last event from the previous pagination request. + /// Number of events to retrieve. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task GetFineTuningEventsAsync(string fineTuningJobId, string after, int? limit, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); - /// Initializes a new instance of FineTuningJobs. - public virtual FineTuningJobs GetFineTuningJobsClient() + using var scope = ClientDiagnostics.CreateSpan("FineTuning.GetFineTuningEvents"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetFineTuningEventsRequest(fineTuningJobId, after, limit, context); + return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Get status updates for a fine-tuning job. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the fine-tuning job to get events for. + /// Identifier for the last event from the previous pagination request. + /// Number of events to retrieve. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Result GetFineTuningEvents(string fineTuningJobId, string after, int? limit, RequestOptions context) + { + ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + + using var scope = ClientDiagnostics.CreateSpan("FineTuning.GetFineTuningEvents"); + scope.Start(); + try + { + using PipelineMessage message = CreateGetFineTuningEventsRequest(fineTuningJobId, after, limit, context); + return Result.FromResponse(_pipeline.ProcessMessage(message, context)); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + internal PipelineMessage CreateCreateFineTuningJobRequest(RequestBody content, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("POST"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/fine_tuning/jobs", false); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + request.SetHeaderValue("Content-Type", "application/json"); + request.Content = content; + return message; + } + + internal PipelineMessage CreateGetPaginatedFineTuningJobsRequest(string after, long? limit, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("GET"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/fine_tuning/jobs", false); + if (after != null) + { + uri.AppendQuery("after", after, true); + } + if (limit != null) + { + uri.AppendQuery("limit", limit.Value, true); + } + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateRetrieveFineTuningJobRequest(string fineTuningJobId, RequestOptions context) { - return Volatile.Read(ref _cachedFineTuningJobs) ?? Interlocked.CompareExchange(ref _cachedFineTuningJobs, new FineTuningJobs(ClientDiagnostics, _pipeline, _keyCredential, _endpoint), null) ?? _cachedFineTuningJobs; + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("GET"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/fine_tuning/jobs/", false); + uri.AppendPath(fineTuningJobId, true); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + return message; } + + internal PipelineMessage CreateCancelFineTuningJobRequest(string fineTuningJobId, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("POST"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/fine_tuning/jobs/", false); + uri.AppendPath(fineTuningJobId, true); + uri.AppendPath("/cancel", false); + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateGetFineTuningEventsRequest(string fineTuningJobId, string after, int? limit, RequestOptions context) + { + var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); + var request = message.Request; + request.SetMethod("GET"); + var uri = new RequestUri(); + uri.Reset(_endpoint); + uri.AppendPath("/fine_tuning/jobs/", false); + uri.AppendPath(fineTuningJobId, true); + uri.AppendPath("/events", false); + if (after != null) + { + uri.AppendQuery("after", after, true); + } + if (limit != null) + { + uri.AppendQuery("limit", limit.Value, true); + } + request.Uri = uri.ToUri(); + request.SetHeaderValue("Accept", "application/json"); + return message; + } + + private static RequestOptions DefaultRequestContext = new RequestOptions(); + internal static RequestOptions FromCancellationToken(CancellationToken cancellationToken = default) + { + if (!cancellationToken.CanBeCanceled) + { + return DefaultRequestContext; + } + + return new RequestOptions() { CancellationToken = cancellationToken }; + } + + private static ResponseErrorClassifier _responseErrorClassifier200; + private static ResponseErrorClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= new StatusResponseClassifier(stackalloc ushort[] { 200 }); } } diff --git a/.dotnet/src/Generated/FineTuningJobs.cs b/.dotnet/src/Generated/FineTuningJobs.cs deleted file mode 100644 index 669480a3d..000000000 --- a/.dotnet/src/Generated/FineTuningJobs.cs +++ /dev/null @@ -1,710 +0,0 @@ -// - -#nullable disable - -using System; -using System.ClientModel; -using System.ClientModel.Internal; -using System.ClientModel.Primitives; -using System.ClientModel.Primitives.Pipeline; -using System.Threading; -using System.Threading.Tasks; -using OpenAI.Models; - -namespace OpenAI -{ - // Data plane generated sub-client. - /// The FineTuningJobs sub-client. - public partial class FineTuningJobs - { - private const string AuthorizationHeader = "Authorization"; - private readonly KeyCredential _keyCredential; - private const string AuthorizationApiKeyPrefix = "Bearer"; - private readonly MessagePipeline _pipeline; - private readonly Uri _endpoint; - - /// The ClientDiagnostics is used to provide tracing support for the client library. - internal TelemetrySource ClientDiagnostics { get; } - - /// The HTTP pipeline for sending and receiving REST requests and responses. - public virtual MessagePipeline Pipeline => _pipeline; - - /// Initializes a new instance of FineTuningJobs for mocking. - protected FineTuningJobs() - { - } - - /// Initializes a new instance of FineTuningJobs. - /// The handler for diagnostic messaging in the client. - /// The HTTP pipeline for sending and receiving REST requests and responses. - /// The key credential to copy. - /// OpenAI Endpoint. - internal FineTuningJobs(TelemetrySource clientDiagnostics, MessagePipeline pipeline, KeyCredential keyCredential, Uri endpoint) - { - ClientDiagnostics = clientDiagnostics; - _pipeline = pipeline; - _keyCredential = keyCredential; - _endpoint = endpoint; - } - - /// - /// Creates a job that fine-tunes a specified model from a given dataset. - /// - /// Response includes details of the enqueued job including job status and the name of the - /// fine-tuned models once complete. - /// - /// [Learn more about fine-tuning](/docs/guides/fine-tuning) - /// - /// The to use. - /// The cancellation token to use. - /// is null. - public virtual async Task> CreateFineTuningJobAsync(CreateFineTuningJobRequest job, CancellationToken cancellationToken = default) - { - ClientUtilities.AssertNotNull(job, nameof(job)); - - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = job.ToRequestBody(); - Result result = await CreateFineTuningJobAsync(content, context).ConfigureAwait(false); - return Result.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); - } - - /// - /// Creates a job that fine-tunes a specified model from a given dataset. - /// - /// Response includes details of the enqueued job including job status and the name of the - /// fine-tuned models once complete. - /// - /// [Learn more about fine-tuning](/docs/guides/fine-tuning) - /// - /// The to use. - /// The cancellation token to use. - /// is null. - public virtual Result CreateFineTuningJob(CreateFineTuningJobRequest job, CancellationToken cancellationToken = default) - { - ClientUtilities.AssertNotNull(job, nameof(job)); - - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = job.ToRequestBody(); - Result result = CreateFineTuningJob(content, context); - return Result.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); - } - - /// - /// [Protocol Method] Creates a job that fine-tunes a specified model from a given dataset. - /// - /// Response includes details of the enqueued job including job status and the name of the - /// fine-tuned models once complete. - /// - /// [Learn more about fine-tuning](/docs/guides/fine-tuning) - /// - /// - /// - /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. - /// - /// - /// - /// - /// Please try the simpler convenience overload with strongly typed models first. - /// - /// - /// - /// - /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// is null. - /// Service returned a non-success status code. - /// The response returned from the service. - public virtual async Task CreateFineTuningJobAsync(RequestBody content, RequestOptions context = null) - { - ClientUtilities.AssertNotNull(content, nameof(content)); - - using var scope = ClientDiagnostics.CreateSpan("FineTuningJobs.CreateFineTuningJob"); - scope.Start(); - try - { - using PipelineMessage message = CreateCreateFineTuningJobRequest(content, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) - { - scope.Failed(e); - throw; - } - } - - /// - /// [Protocol Method] Creates a job that fine-tunes a specified model from a given dataset. - /// - /// Response includes details of the enqueued job including job status and the name of the - /// fine-tuned models once complete. - /// - /// [Learn more about fine-tuning](/docs/guides/fine-tuning) - /// - /// - /// - /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. - /// - /// - /// - /// - /// Please try the simpler convenience overload with strongly typed models first. - /// - /// - /// - /// - /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// is null. - /// Service returned a non-success status code. - /// The response returned from the service. - public virtual Result CreateFineTuningJob(RequestBody content, RequestOptions context = null) - { - ClientUtilities.AssertNotNull(content, nameof(content)); - - using var scope = ClientDiagnostics.CreateSpan("FineTuningJobs.CreateFineTuningJob"); - scope.Start(); - try - { - using PipelineMessage message = CreateCreateFineTuningJobRequest(content, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) - { - scope.Failed(e); - throw; - } - } - - /// Identifier for the last job from the previous pagination request. - /// Number of fine-tuning jobs to retrieve. - /// The cancellation token to use. - public virtual async Task> GetPaginatedFineTuningJobsAsync(string after = null, long? limit = null, CancellationToken cancellationToken = default) - { - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = await GetPaginatedFineTuningJobsAsync(after, limit, context).ConfigureAwait(false); - return Result.FromValue(ListPaginatedFineTuningJobsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); - } - - /// Identifier for the last job from the previous pagination request. - /// Number of fine-tuning jobs to retrieve. - /// The cancellation token to use. - public virtual Result GetPaginatedFineTuningJobs(string after = null, long? limit = null, CancellationToken cancellationToken = default) - { - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = GetPaginatedFineTuningJobs(after, limit, context); - return Result.FromValue(ListPaginatedFineTuningJobsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); - } - - /// - /// [Protocol Method] - /// - /// - /// - /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. - /// - /// - /// - /// - /// Please try the simpler convenience overload with strongly typed models first. - /// - /// - /// - /// - /// Identifier for the last job from the previous pagination request. - /// Number of fine-tuning jobs to retrieve. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// Service returned a non-success status code. - /// The response returned from the service. - public virtual async Task GetPaginatedFineTuningJobsAsync(string after, long? limit, RequestOptions context) - { - using var scope = ClientDiagnostics.CreateSpan("FineTuningJobs.GetPaginatedFineTuningJobs"); - scope.Start(); - try - { - using PipelineMessage message = CreateGetPaginatedFineTuningJobsRequest(after, limit, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) - { - scope.Failed(e); - throw; - } - } - - /// - /// [Protocol Method] - /// - /// - /// - /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. - /// - /// - /// - /// - /// Please try the simpler convenience overload with strongly typed models first. - /// - /// - /// - /// - /// Identifier for the last job from the previous pagination request. - /// Number of fine-tuning jobs to retrieve. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// Service returned a non-success status code. - /// The response returned from the service. - public virtual Result GetPaginatedFineTuningJobs(string after, long? limit, RequestOptions context) - { - using var scope = ClientDiagnostics.CreateSpan("FineTuningJobs.GetPaginatedFineTuningJobs"); - scope.Start(); - try - { - using PipelineMessage message = CreateGetPaginatedFineTuningJobsRequest(after, limit, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) - { - scope.Failed(e); - throw; - } - } - - /// - /// Get info about a fine-tuning job. - /// - /// [Learn more about fine-tuning](/docs/guides/fine-tuning) - /// - /// The to use. - /// The cancellation token to use. - /// is null. - /// is an empty string, and was expected to be non-empty. - public virtual async Task> RetrieveFineTuningJobAsync(string fineTuningJobId, CancellationToken cancellationToken = default) - { - ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); - - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = await RetrieveFineTuningJobAsync(fineTuningJobId, context).ConfigureAwait(false); - return Result.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); - } - - /// - /// Get info about a fine-tuning job. - /// - /// [Learn more about fine-tuning](/docs/guides/fine-tuning) - /// - /// The to use. - /// The cancellation token to use. - /// is null. - /// is an empty string, and was expected to be non-empty. - public virtual Result RetrieveFineTuningJob(string fineTuningJobId, CancellationToken cancellationToken = default) - { - ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); - - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = RetrieveFineTuningJob(fineTuningJobId, context); - return Result.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); - } - - /// - /// [Protocol Method] Get info about a fine-tuning job. - /// - /// [Learn more about fine-tuning](/docs/guides/fine-tuning) - /// - /// - /// - /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. - /// - /// - /// - /// - /// Please try the simpler convenience overload with strongly typed models first. - /// - /// - /// - /// - /// The to use. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// is null. - /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. - /// The response returned from the service. - public virtual async Task RetrieveFineTuningJobAsync(string fineTuningJobId, RequestOptions context) - { - ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); - - using var scope = ClientDiagnostics.CreateSpan("FineTuningJobs.RetrieveFineTuningJob"); - scope.Start(); - try - { - using PipelineMessage message = CreateRetrieveFineTuningJobRequest(fineTuningJobId, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) - { - scope.Failed(e); - throw; - } - } - - /// - /// [Protocol Method] Get info about a fine-tuning job. - /// - /// [Learn more about fine-tuning](/docs/guides/fine-tuning) - /// - /// - /// - /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. - /// - /// - /// - /// - /// Please try the simpler convenience overload with strongly typed models first. - /// - /// - /// - /// - /// The to use. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// is null. - /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. - /// The response returned from the service. - public virtual Result RetrieveFineTuningJob(string fineTuningJobId, RequestOptions context) - { - ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); - - using var scope = ClientDiagnostics.CreateSpan("FineTuningJobs.RetrieveFineTuningJob"); - scope.Start(); - try - { - using PipelineMessage message = CreateRetrieveFineTuningJobRequest(fineTuningJobId, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) - { - scope.Failed(e); - throw; - } - } - - /// Get status updates for a fine-tuning job. - /// The ID of the fine-tuning job to get events for. - /// Identifier for the last event from the previous pagination request. - /// Number of events to retrieve. - /// The cancellation token to use. - /// is null. - /// is an empty string, and was expected to be non-empty. - public virtual async Task> GetFineTuningEventsAsync(string fineTuningJobId, string after = null, int? limit = null, CancellationToken cancellationToken = default) - { - ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); - - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = await GetFineTuningEventsAsync(fineTuningJobId, after, limit, context).ConfigureAwait(false); - return Result.FromValue(ListFineTuningJobEventsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); - } - - /// Get status updates for a fine-tuning job. - /// The ID of the fine-tuning job to get events for. - /// Identifier for the last event from the previous pagination request. - /// Number of events to retrieve. - /// The cancellation token to use. - /// is null. - /// is an empty string, and was expected to be non-empty. - public virtual Result GetFineTuningEvents(string fineTuningJobId, string after = null, int? limit = null, CancellationToken cancellationToken = default) - { - ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); - - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = GetFineTuningEvents(fineTuningJobId, after, limit, context); - return Result.FromValue(ListFineTuningJobEventsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); - } - - /// - /// [Protocol Method] Get status updates for a fine-tuning job. - /// - /// - /// - /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. - /// - /// - /// - /// - /// Please try the simpler convenience overload with strongly typed models first. - /// - /// - /// - /// - /// The ID of the fine-tuning job to get events for. - /// Identifier for the last event from the previous pagination request. - /// Number of events to retrieve. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// is null. - /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. - /// The response returned from the service. - public virtual async Task GetFineTuningEventsAsync(string fineTuningJobId, string after, int? limit, RequestOptions context) - { - ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); - - using var scope = ClientDiagnostics.CreateSpan("FineTuningJobs.GetFineTuningEvents"); - scope.Start(); - try - { - using PipelineMessage message = CreateGetFineTuningEventsRequest(fineTuningJobId, after, limit, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) - { - scope.Failed(e); - throw; - } - } - - /// - /// [Protocol Method] Get status updates for a fine-tuning job. - /// - /// - /// - /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. - /// - /// - /// - /// - /// Please try the simpler convenience overload with strongly typed models first. - /// - /// - /// - /// - /// The ID of the fine-tuning job to get events for. - /// Identifier for the last event from the previous pagination request. - /// Number of events to retrieve. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// is null. - /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. - /// The response returned from the service. - public virtual Result GetFineTuningEvents(string fineTuningJobId, string after, int? limit, RequestOptions context) - { - ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); - - using var scope = ClientDiagnostics.CreateSpan("FineTuningJobs.GetFineTuningEvents"); - scope.Start(); - try - { - using PipelineMessage message = CreateGetFineTuningEventsRequest(fineTuningJobId, after, limit, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) - { - scope.Failed(e); - throw; - } - } - - /// Immediately cancel a fine-tune job. - /// The ID of the fine-tuning job to cancel. - /// The cancellation token to use. - /// is null. - /// is an empty string, and was expected to be non-empty. - public virtual async Task> CancelFineTuningJobAsync(string fineTuningJobId, CancellationToken cancellationToken = default) - { - ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); - - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = await CancelFineTuningJobAsync(fineTuningJobId, context).ConfigureAwait(false); - return Result.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); - } - - /// Immediately cancel a fine-tune job. - /// The ID of the fine-tuning job to cancel. - /// The cancellation token to use. - /// is null. - /// is an empty string, and was expected to be non-empty. - public virtual Result CancelFineTuningJob(string fineTuningJobId, CancellationToken cancellationToken = default) - { - ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); - - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = CancelFineTuningJob(fineTuningJobId, context); - return Result.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); - } - - /// - /// [Protocol Method] Immediately cancel a fine-tune job. - /// - /// - /// - /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. - /// - /// - /// - /// - /// Please try the simpler convenience overload with strongly typed models first. - /// - /// - /// - /// - /// The ID of the fine-tuning job to cancel. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// is null. - /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. - /// The response returned from the service. - public virtual async Task CancelFineTuningJobAsync(string fineTuningJobId, RequestOptions context) - { - ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); - - using var scope = ClientDiagnostics.CreateSpan("FineTuningJobs.CancelFineTuningJob"); - scope.Start(); - try - { - using PipelineMessage message = CreateCancelFineTuningJobRequest(fineTuningJobId, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) - { - scope.Failed(e); - throw; - } - } - - /// - /// [Protocol Method] Immediately cancel a fine-tune job. - /// - /// - /// - /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. - /// - /// - /// - /// - /// Please try the simpler convenience overload with strongly typed models first. - /// - /// - /// - /// - /// The ID of the fine-tuning job to cancel. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// is null. - /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. - /// The response returned from the service. - public virtual Result CancelFineTuningJob(string fineTuningJobId, RequestOptions context) - { - ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); - - using var scope = ClientDiagnostics.CreateSpan("FineTuningJobs.CancelFineTuningJob"); - scope.Start(); - try - { - using PipelineMessage message = CreateCancelFineTuningJobRequest(fineTuningJobId, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) - { - scope.Failed(e); - throw; - } - } - - internal PipelineMessage CreateCreateFineTuningJobRequest(RequestBody content, RequestOptions context) - { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("POST"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/fine_tuning/jobs", false); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); - request.SetHeaderValue("Content-Type", "application/json"); - request.Content = content; - return message; - } - - internal PipelineMessage CreateGetPaginatedFineTuningJobsRequest(string after, long? limit, RequestOptions context) - { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("GET"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/fine_tuning/jobs", false); - if (after != null) - { - uri.AppendQuery("after", after, true); - } - if (limit != null) - { - uri.AppendQuery("limit", limit.Value, true); - } - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); - return message; - } - - internal PipelineMessage CreateRetrieveFineTuningJobRequest(string fineTuningJobId, RequestOptions context) - { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("GET"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/fine_tuning/jobs/", false); - uri.AppendPath(fineTuningJobId, true); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); - return message; - } - - internal PipelineMessage CreateGetFineTuningEventsRequest(string fineTuningJobId, string after, int? limit, RequestOptions context) - { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("GET"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/fine_tuning/jobs/", false); - uri.AppendPath(fineTuningJobId, true); - uri.AppendPath("/events", false); - if (after != null) - { - uri.AppendQuery("after", after, true); - } - if (limit != null) - { - uri.AppendQuery("limit", limit.Value, true); - } - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); - return message; - } - - internal PipelineMessage CreateCancelFineTuningJobRequest(string fineTuningJobId, RequestOptions context) - { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("POST"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/fine_tuning/jobs/", false); - uri.AppendPath(fineTuningJobId, true); - uri.AppendPath("/cancel", false); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); - return message; - } - - private static RequestOptions DefaultRequestContext = new RequestOptions(); - internal static RequestOptions FromCancellationToken(CancellationToken cancellationToken = default) - { - if (!cancellationToken.CanBeCanceled) - { - return DefaultRequestContext; - } - - return new RequestOptions() { CancellationToken = cancellationToken }; - } - - private static ResponseErrorClassifier _responseErrorClassifier200; - private static ResponseErrorClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= new StatusResponseClassifier(stackalloc ushort[] { 200 }); - } -} diff --git a/.dotnet/src/Generated/Models/CreateFineTuneRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateFineTuneRequest.Serialization.cs deleted file mode 100644 index c83a0246a..000000000 --- a/.dotnet/src/Generated/Models/CreateFineTuneRequest.Serialization.cs +++ /dev/null @@ -1,386 +0,0 @@ -// - -#nullable disable - -using System; -using System.ClientModel.Internal; -using System.ClientModel.Primitives; -using System.Collections.Generic; -using System.Text.Json; - -namespace OpenAI.Models -{ - public partial class CreateFineTuneRequest : IUtf8JsonWriteable, IJsonModel - { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - if (format != "J") - { - throw new FormatException($"The model {nameof(CreateFineTuneRequest)} does not support '{format}' format."); - } - - writer.WriteStartObject(); - writer.WritePropertyName("training_file"u8); - writer.WriteStringValue(TrainingFile); - if (OptionalProperty.IsDefined(ValidationFile)) - { - if (ValidationFile != null) - { - writer.WritePropertyName("validation_file"u8); - writer.WriteStringValue(ValidationFile); - } - else - { - writer.WriteNull("validation_file"); - } - } - if (OptionalProperty.IsDefined(Model)) - { - writer.WritePropertyName("model"u8); - writer.WriteStringValue(Model.Value.ToString()); - } - if (OptionalProperty.IsDefined(NEpochs)) - { - if (NEpochs != null) - { - writer.WritePropertyName("n_epochs"u8); - writer.WriteNumberValue(NEpochs.Value); - } - else - { - writer.WriteNull("n_epochs"); - } - } - if (OptionalProperty.IsDefined(BatchSize)) - { - if (BatchSize != null) - { - writer.WritePropertyName("batch_size"u8); - writer.WriteNumberValue(BatchSize.Value); - } - else - { - writer.WriteNull("batch_size"); - } - } - if (OptionalProperty.IsDefined(LearningRateMultiplier)) - { - if (LearningRateMultiplier != null) - { - writer.WritePropertyName("learning_rate_multiplier"u8); - writer.WriteNumberValue(LearningRateMultiplier.Value); - } - else - { - writer.WriteNull("learning_rate_multiplier"); - } - } - if (OptionalProperty.IsDefined(PromptLossRate)) - { - if (PromptLossRate != null) - { - writer.WritePropertyName("prompt_loss_rate"u8); - writer.WriteNumberValue(PromptLossRate.Value); - } - else - { - writer.WriteNull("prompt_loss_rate"); - } - } - if (OptionalProperty.IsDefined(ComputeClassificationMetrics)) - { - if (ComputeClassificationMetrics != null) - { - writer.WritePropertyName("compute_classification_metrics"u8); - writer.WriteBooleanValue(ComputeClassificationMetrics.Value); - } - else - { - writer.WriteNull("compute_classification_metrics"); - } - } - if (OptionalProperty.IsDefined(ClassificationNClasses)) - { - if (ClassificationNClasses != null) - { - writer.WritePropertyName("classification_n_classes"u8); - writer.WriteNumberValue(ClassificationNClasses.Value); - } - else - { - writer.WriteNull("classification_n_classes"); - } - } - if (OptionalProperty.IsDefined(ClassificationPositiveClass)) - { - if (ClassificationPositiveClass != null) - { - writer.WritePropertyName("classification_positive_class"u8); - writer.WriteStringValue(ClassificationPositiveClass); - } - else - { - writer.WriteNull("classification_positive_class"); - } - } - if (OptionalProperty.IsCollectionDefined(ClassificationBetas)) - { - if (ClassificationBetas != null) - { - writer.WritePropertyName("classification_betas"u8); - writer.WriteStartArray(); - foreach (var item in ClassificationBetas) - { - writer.WriteNumberValue(item); - } - writer.WriteEndArray(); - } - else - { - writer.WriteNull("classification_betas"); - } - } - if (OptionalProperty.IsDefined(Suffix)) - { - if (Suffix != null) - { - writer.WritePropertyName("suffix"u8); - writer.WriteStringValue(Suffix); - } - else - { - writer.WriteNull("suffix"); - } - } - if (options.Format != "W" && _serializedAdditionalRawData != null) - { - foreach (var item in _serializedAdditionalRawData) - { - writer.WritePropertyName(item.Key); -#if NET6_0_OR_GREATER - writer.WriteRawValue(item.Value); -#else - using (JsonDocument document = JsonDocument.Parse(item.Value)) - { - JsonSerializer.Serialize(writer, document.RootElement); - } -#endif - } - } - writer.WriteEndObject(); - } - - CreateFineTuneRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - if (format != "J") - { - throw new FormatException($"The model {nameof(CreateFineTuneRequest)} does not support '{format}' format."); - } - - using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeCreateFineTuneRequest(document.RootElement, options); - } - - internal static CreateFineTuneRequest DeserializeCreateFineTuneRequest(JsonElement element, ModelReaderWriterOptions options = null) - { - options ??= new ModelReaderWriterOptions("W"); - - if (element.ValueKind == JsonValueKind.Null) - { - return null; - } - string trainingFile = default; - OptionalProperty validationFile = default; - OptionalProperty model = default; - OptionalProperty nEpochs = default; - OptionalProperty batchSize = default; - OptionalProperty learningRateMultiplier = default; - OptionalProperty promptLossRate = default; - OptionalProperty computeClassificationMetrics = default; - OptionalProperty classificationNClasses = default; - OptionalProperty classificationPositiveClass = default; - OptionalProperty> classificationBetas = default; - OptionalProperty suffix = default; - IDictionary serializedAdditionalRawData = default; - Dictionary additionalPropertiesDictionary = new Dictionary(); - foreach (var property in element.EnumerateObject()) - { - if (property.NameEquals("training_file"u8)) - { - trainingFile = property.Value.GetString(); - continue; - } - if (property.NameEquals("validation_file"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - validationFile = null; - continue; - } - validationFile = property.Value.GetString(); - continue; - } - if (property.NameEquals("model"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - model = new CreateFineTuneRequestModel(property.Value.GetString()); - continue; - } - if (property.NameEquals("n_epochs"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - nEpochs = null; - continue; - } - nEpochs = property.Value.GetInt64(); - continue; - } - if (property.NameEquals("batch_size"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - batchSize = null; - continue; - } - batchSize = property.Value.GetInt64(); - continue; - } - if (property.NameEquals("learning_rate_multiplier"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - learningRateMultiplier = null; - continue; - } - learningRateMultiplier = property.Value.GetDouble(); - continue; - } - if (property.NameEquals("prompt_loss_rate"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - promptLossRate = null; - continue; - } - promptLossRate = property.Value.GetDouble(); - continue; - } - if (property.NameEquals("compute_classification_metrics"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - computeClassificationMetrics = null; - continue; - } - computeClassificationMetrics = property.Value.GetBoolean(); - continue; - } - if (property.NameEquals("classification_n_classes"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - classificationNClasses = null; - continue; - } - classificationNClasses = property.Value.GetInt64(); - continue; - } - if (property.NameEquals("classification_positive_class"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - classificationPositiveClass = null; - continue; - } - classificationPositiveClass = property.Value.GetString(); - continue; - } - if (property.NameEquals("classification_betas"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - List array = new List(); - foreach (var item in property.Value.EnumerateArray()) - { - array.Add(item.GetDouble()); - } - classificationBetas = array; - continue; - } - if (property.NameEquals("suffix"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - suffix = null; - continue; - } - suffix = property.Value.GetString(); - continue; - } - if (options.Format != "W") - { - additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); - } - } - serializedAdditionalRawData = additionalPropertiesDictionary; - return new CreateFineTuneRequest(trainingFile, validationFile.Value, OptionalProperty.ToNullable(model), OptionalProperty.ToNullable(nEpochs), OptionalProperty.ToNullable(batchSize), OptionalProperty.ToNullable(learningRateMultiplier), OptionalProperty.ToNullable(promptLossRate), OptionalProperty.ToNullable(computeClassificationMetrics), OptionalProperty.ToNullable(classificationNClasses), classificationPositiveClass.Value, OptionalProperty.ToList(classificationBetas), suffix.Value, serializedAdditionalRawData); - } - - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - - switch (format) - { - case "J": - return ModelReaderWriter.Write(this, options); - default: - throw new FormatException($"The model {nameof(CreateFineTuneRequest)} does not support '{options.Format}' format."); - } - } - - CreateFineTuneRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - - switch (format) - { - case "J": - { - using JsonDocument document = JsonDocument.Parse(data); - return DeserializeCreateFineTuneRequest(document.RootElement, options); - } - default: - throw new FormatException($"The model {nameof(CreateFineTuneRequest)} does not support '{options.Format}' format."); - } - } - - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; - - /// Deserializes the model from a raw response. - /// The result to deserialize the model from. - internal static CreateFineTuneRequest FromResponse(PipelineResponse response) - { - using var document = JsonDocument.Parse(response.Content); - return DeserializeCreateFineTuneRequest(document.RootElement); - } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } - } -} diff --git a/.dotnet/src/Generated/Models/CreateFineTuneRequest.cs b/.dotnet/src/Generated/Models/CreateFineTuneRequest.cs deleted file mode 100644 index 31b410dcb..000000000 --- a/.dotnet/src/Generated/Models/CreateFineTuneRequest.cs +++ /dev/null @@ -1,295 +0,0 @@ -// - -#nullable disable - -using System; -using System.ClientModel.Internal; -using System.Collections.Generic; - -namespace OpenAI.Models -{ - /// The CreateFineTuneRequest. - public partial class CreateFineTuneRequest - { - /// - /// Keeps track of any properties unknown to the library. - /// - /// To assign an object to the value of this property use . - /// - /// - /// To assign an already formatted json string to this property use . - /// - /// - /// Examples: - /// - /// - /// BinaryData.FromObjectAsJson("foo") - /// Creates a payload of "foo". - /// - /// - /// BinaryData.FromString("\"foo\"") - /// Creates a payload of "foo". - /// - /// - /// BinaryData.FromObjectAsJson(new { key = "value" }) - /// Creates a payload of { "key": "value" }. - /// - /// - /// BinaryData.FromString("{\"key\": \"value\"}") - /// Creates a payload of { "key": "value" }. - /// - /// - /// - /// - private IDictionary _serializedAdditionalRawData; - - /// Initializes a new instance of . - /// - /// The ID of an uploaded file that contains training data. - /// - /// See [upload file](/docs/api-reference/files/upload) for how to upload a file. - /// - /// Your dataset must be formatted as a JSONL file, where each training example is a JSON object - /// with the keys "prompt" and "completion". Additionally, you must upload your file with the - /// purpose `fine-tune`. - /// - /// See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more - /// details. - /// - /// is null. - public CreateFineTuneRequest(string trainingFile) - { - ClientUtilities.AssertNotNull(trainingFile, nameof(trainingFile)); - - TrainingFile = trainingFile; - ClassificationBetas = new OptionalList(); - } - - /// Initializes a new instance of . - /// - /// The ID of an uploaded file that contains training data. - /// - /// See [upload file](/docs/api-reference/files/upload) for how to upload a file. - /// - /// Your dataset must be formatted as a JSONL file, where each training example is a JSON object - /// with the keys "prompt" and "completion". Additionally, you must upload your file with the - /// purpose `fine-tune`. - /// - /// See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more - /// details. - /// - /// - /// The ID of an uploaded file that contains validation data. - /// - /// If you provide this file, the data is used to generate validation metrics periodically during - /// fine-tuning. These metrics can be viewed in the - /// [fine-tuning results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). - /// Your train and validation data should be mutually exclusive. - /// - /// Your dataset must be formatted as a JSONL file, where each validation example is a JSON object - /// with the keys "prompt" and "completion". Additionally, you must upload your file with the - /// purpose `fine-tune`. - /// - /// See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more - /// details. - /// - /// - /// The name of the base model to fine-tune. You can select one of "ada", "babbage", "curie", - /// "davinci", or a fine-tuned model created after 2022-04-21 and before 2023-08-22. To learn more - /// about these models, see the [Models](/docs/models) documentation. - /// - /// - /// The number of epochs to train the model for. An epoch refers to one full cycle through the - /// training dataset. - /// - /// - /// The batch size to use for training. The batch size is the number of training examples used to - /// train a single forward and backward pass. - /// - /// By default, the batch size will be dynamically configured to be ~0.2% of the number of examples - /// in the training set, capped at 256 - in general, we've found that larger batch sizes tend to - /// work better for larger datasets. - /// - /// - /// The learning rate multiplier to use for training. The fine-tuning learning rate is the original - /// learning rate used for pretraining multiplied by this value. - /// - /// By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 depending on final - /// `batch_size` (larger learning rates tend to perform better with larger batch sizes). We - /// recommend experimenting with values in the range 0.02 to 0.2 to see what produces the best - /// results. - /// - /// - /// The weight to use for loss on the prompt tokens. This controls how much the model tries to - /// learn to generate the prompt (as compared to the completion which always has a weight of 1.0), - /// and can add a stabilizing effect to training when completions are short. - /// - /// If prompts are extremely long (relative to completions), it may make sense to reduce this - /// weight so as to avoid over-prioritizing learning the prompt. - /// - /// - /// If set, we calculate classification-specific metrics such as accuracy and F-1 score using the - /// validation set at the end of every epoch. These metrics can be viewed in the - /// [results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). - /// - /// In order to compute classification metrics, you must provide a `validation_file`. Additionally, - /// you must specify `classification_n_classes` for multiclass classification or - /// `classification_positive_class` for binary classification. - /// - /// - /// The number of classes in a classification task. - /// - /// This parameter is required for multiclass classification. - /// - /// - /// The positive class in binary classification. - /// - /// This parameter is needed to generate precision, recall, and F1 metrics when doing binary - /// classification. - /// - /// - /// If this is provided, we calculate F-beta scores at the specified beta values. The F-beta score - /// is a generalization of F-1 score. This is only used for binary classification. - /// - /// With a beta of 1 (i.e. the F-1 score), precision and recall are given the same weight. A larger - /// beta score puts more weight on recall and less on precision. A smaller beta score puts more - /// weight on precision and less on recall. - /// - /// - /// A string of up to 18 characters that will be added to your fine-tuned model name. - /// - /// For example, a `suffix` of "custom-model-name" would produce a model name like - /// `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`. - /// - /// Keeps track of any properties unknown to the library. - internal CreateFineTuneRequest(string trainingFile, string validationFile, CreateFineTuneRequestModel? model, long? nEpochs, long? batchSize, double? learningRateMultiplier, double? promptLossRate, bool? computeClassificationMetrics, long? classificationNClasses, string classificationPositiveClass, IList classificationBetas, string suffix, IDictionary serializedAdditionalRawData) - { - TrainingFile = trainingFile; - ValidationFile = validationFile; - Model = model; - NEpochs = nEpochs; - BatchSize = batchSize; - LearningRateMultiplier = learningRateMultiplier; - PromptLossRate = promptLossRate; - ComputeClassificationMetrics = computeClassificationMetrics; - ClassificationNClasses = classificationNClasses; - ClassificationPositiveClass = classificationPositiveClass; - ClassificationBetas = classificationBetas; - Suffix = suffix; - _serializedAdditionalRawData = serializedAdditionalRawData; - } - - /// Initializes a new instance of for deserialization. - internal CreateFineTuneRequest() - { - } - - /// - /// The ID of an uploaded file that contains training data. - /// - /// See [upload file](/docs/api-reference/files/upload) for how to upload a file. - /// - /// Your dataset must be formatted as a JSONL file, where each training example is a JSON object - /// with the keys "prompt" and "completion". Additionally, you must upload your file with the - /// purpose `fine-tune`. - /// - /// See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more - /// details. - /// - public string TrainingFile { get; } - /// - /// The ID of an uploaded file that contains validation data. - /// - /// If you provide this file, the data is used to generate validation metrics periodically during - /// fine-tuning. These metrics can be viewed in the - /// [fine-tuning results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). - /// Your train and validation data should be mutually exclusive. - /// - /// Your dataset must be formatted as a JSONL file, where each validation example is a JSON object - /// with the keys "prompt" and "completion". Additionally, you must upload your file with the - /// purpose `fine-tune`. - /// - /// See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more - /// details. - /// - public string ValidationFile { get; set; } - /// - /// The name of the base model to fine-tune. You can select one of "ada", "babbage", "curie", - /// "davinci", or a fine-tuned model created after 2022-04-21 and before 2023-08-22. To learn more - /// about these models, see the [Models](/docs/models) documentation. - /// - public CreateFineTuneRequestModel? Model { get; set; } - /// - /// The number of epochs to train the model for. An epoch refers to one full cycle through the - /// training dataset. - /// - public long? NEpochs { get; set; } - /// - /// The batch size to use for training. The batch size is the number of training examples used to - /// train a single forward and backward pass. - /// - /// By default, the batch size will be dynamically configured to be ~0.2% of the number of examples - /// in the training set, capped at 256 - in general, we've found that larger batch sizes tend to - /// work better for larger datasets. - /// - public long? BatchSize { get; set; } - /// - /// The learning rate multiplier to use for training. The fine-tuning learning rate is the original - /// learning rate used for pretraining multiplied by this value. - /// - /// By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 depending on final - /// `batch_size` (larger learning rates tend to perform better with larger batch sizes). We - /// recommend experimenting with values in the range 0.02 to 0.2 to see what produces the best - /// results. - /// - public double? LearningRateMultiplier { get; set; } - /// - /// The weight to use for loss on the prompt tokens. This controls how much the model tries to - /// learn to generate the prompt (as compared to the completion which always has a weight of 1.0), - /// and can add a stabilizing effect to training when completions are short. - /// - /// If prompts are extremely long (relative to completions), it may make sense to reduce this - /// weight so as to avoid over-prioritizing learning the prompt. - /// - public double? PromptLossRate { get; set; } - /// - /// If set, we calculate classification-specific metrics such as accuracy and F-1 score using the - /// validation set at the end of every epoch. These metrics can be viewed in the - /// [results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). - /// - /// In order to compute classification metrics, you must provide a `validation_file`. Additionally, - /// you must specify `classification_n_classes` for multiclass classification or - /// `classification_positive_class` for binary classification. - /// - public bool? ComputeClassificationMetrics { get; set; } - /// - /// The number of classes in a classification task. - /// - /// This parameter is required for multiclass classification. - /// - public long? ClassificationNClasses { get; set; } - /// - /// The positive class in binary classification. - /// - /// This parameter is needed to generate precision, recall, and F1 metrics when doing binary - /// classification. - /// - public string ClassificationPositiveClass { get; set; } - /// - /// If this is provided, we calculate F-beta scores at the specified beta values. The F-beta score - /// is a generalization of F-1 score. This is only used for binary classification. - /// - /// With a beta of 1 (i.e. the F-1 score), precision and recall are given the same weight. A larger - /// beta score puts more weight on recall and less on precision. A smaller beta score puts more - /// weight on precision and less on recall. - /// - public IList ClassificationBetas { get; set; } - /// - /// A string of up to 18 characters that will be added to your fine-tuned model name. - /// - /// For example, a `suffix` of "custom-model-name" would produce a model name like - /// `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`. - /// - public string Suffix { get; set; } - } -} diff --git a/.dotnet/src/Generated/Models/CreateFineTuneRequestModel.cs b/.dotnet/src/Generated/Models/CreateFineTuneRequestModel.cs deleted file mode 100644 index de5b6f368..000000000 --- a/.dotnet/src/Generated/Models/CreateFineTuneRequestModel.cs +++ /dev/null @@ -1,54 +0,0 @@ -// - -#nullable disable - -using System; -using System.ComponentModel; - -namespace OpenAI.Models -{ - /// Enum for model in CreateFineTuneRequest. - public readonly partial struct CreateFineTuneRequestModel : IEquatable - { - private readonly string _value; - - /// Initializes a new instance of . - /// is null. - public CreateFineTuneRequestModel(string value) - { - _value = value ?? throw new ArgumentNullException(nameof(value)); - } - - private const string AdaValue = "ada"; - private const string BabbageValue = "babbage"; - private const string CurieValue = "curie"; - private const string DavinciValue = "davinci"; - - /// ada. - public static CreateFineTuneRequestModel Ada { get; } = new CreateFineTuneRequestModel(AdaValue); - /// babbage. - public static CreateFineTuneRequestModel Babbage { get; } = new CreateFineTuneRequestModel(BabbageValue); - /// curie. - public static CreateFineTuneRequestModel Curie { get; } = new CreateFineTuneRequestModel(CurieValue); - /// davinci. - public static CreateFineTuneRequestModel Davinci { get; } = new CreateFineTuneRequestModel(DavinciValue); - /// Determines if two values are the same. - public static bool operator ==(CreateFineTuneRequestModel left, CreateFineTuneRequestModel right) => left.Equals(right); - /// Determines if two values are not the same. - public static bool operator !=(CreateFineTuneRequestModel left, CreateFineTuneRequestModel right) => !left.Equals(right); - /// Converts a string to a . - public static implicit operator CreateFineTuneRequestModel(string value) => new CreateFineTuneRequestModel(value); - - /// - [EditorBrowsable(EditorBrowsableState.Never)] - public override bool Equals(object obj) => obj is CreateFineTuneRequestModel other && Equals(other); - /// - public bool Equals(CreateFineTuneRequestModel other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); - - /// - [EditorBrowsable(EditorBrowsableState.Never)] - public override int GetHashCode() => _value?.GetHashCode() ?? 0; - /// - public override string ToString() => _value; - } -} diff --git a/.dotnet/src/Generated/Models/FineTune.Serialization.cs b/.dotnet/src/Generated/Models/FineTune.Serialization.cs deleted file mode 100644 index cdbb27bb0..000000000 --- a/.dotnet/src/Generated/Models/FineTune.Serialization.cs +++ /dev/null @@ -1,287 +0,0 @@ -// - -#nullable disable - -using System; -using System.ClientModel.Internal; -using System.ClientModel.Primitives; -using System.Collections.Generic; -using System.Text.Json; - -namespace OpenAI.Models -{ - public partial class FineTune : IUtf8JsonWriteable, IJsonModel - { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - if (format != "J") - { - throw new FormatException($"The model {nameof(FineTune)} does not support '{format}' format."); - } - - writer.WriteStartObject(); - writer.WritePropertyName("id"u8); - writer.WriteStringValue(Id); - writer.WritePropertyName("object"u8); - writer.WriteStringValue(Object.ToString()); - writer.WritePropertyName("created_at"u8); - writer.WriteNumberValue(CreatedAt, "U"); - writer.WritePropertyName("updated_at"u8); - writer.WriteNumberValue(UpdatedAt, "U"); - writer.WritePropertyName("model"u8); - writer.WriteStringValue(Model); - if (FineTunedModel != null) - { - writer.WritePropertyName("fine_tuned_model"u8); - writer.WriteStringValue(FineTunedModel); - } - else - { - writer.WriteNull("fine_tuned_model"); - } - writer.WritePropertyName("organization_id"u8); - writer.WriteStringValue(OrganizationId); - writer.WritePropertyName("status"u8); - writer.WriteStringValue(Status.ToString()); - writer.WritePropertyName("hyperparams"u8); - writer.WriteObjectValue(Hyperparams); - writer.WritePropertyName("training_files"u8); - writer.WriteStartArray(); - foreach (var item in TrainingFiles) - { - writer.WriteObjectValue(item); - } - writer.WriteEndArray(); - writer.WritePropertyName("validation_files"u8); - writer.WriteStartArray(); - foreach (var item in ValidationFiles) - { - writer.WriteObjectValue(item); - } - writer.WriteEndArray(); - writer.WritePropertyName("result_files"u8); - writer.WriteStartArray(); - foreach (var item in ResultFiles) - { - writer.WriteObjectValue(item); - } - writer.WriteEndArray(); - if (OptionalProperty.IsCollectionDefined(Events)) - { - writer.WritePropertyName("events"u8); - writer.WriteStartArray(); - foreach (var item in Events) - { - writer.WriteObjectValue(item); - } - writer.WriteEndArray(); - } - if (options.Format != "W" && _serializedAdditionalRawData != null) - { - foreach (var item in _serializedAdditionalRawData) - { - writer.WritePropertyName(item.Key); -#if NET6_0_OR_GREATER - writer.WriteRawValue(item.Value); -#else - using (JsonDocument document = JsonDocument.Parse(item.Value)) - { - JsonSerializer.Serialize(writer, document.RootElement); - } -#endif - } - } - writer.WriteEndObject(); - } - - FineTune IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - if (format != "J") - { - throw new FormatException($"The model {nameof(FineTune)} does not support '{format}' format."); - } - - using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeFineTune(document.RootElement, options); - } - - internal static FineTune DeserializeFineTune(JsonElement element, ModelReaderWriterOptions options = null) - { - options ??= new ModelReaderWriterOptions("W"); - - if (element.ValueKind == JsonValueKind.Null) - { - return null; - } - string id = default; - FineTuneObject @object = default; - DateTimeOffset createdAt = default; - DateTimeOffset updatedAt = default; - string model = default; - string fineTunedModel = default; - string organizationId = default; - FineTuneStatus status = default; - FineTuneHyperparams hyperparams = default; - IReadOnlyList trainingFiles = default; - IReadOnlyList validationFiles = default; - IReadOnlyList resultFiles = default; - OptionalProperty> events = default; - IDictionary serializedAdditionalRawData = default; - Dictionary additionalPropertiesDictionary = new Dictionary(); - foreach (var property in element.EnumerateObject()) - { - if (property.NameEquals("id"u8)) - { - id = property.Value.GetString(); - continue; - } - if (property.NameEquals("object"u8)) - { - @object = new FineTuneObject(property.Value.GetString()); - continue; - } - if (property.NameEquals("created_at"u8)) - { - createdAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); - continue; - } - if (property.NameEquals("updated_at"u8)) - { - updatedAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); - continue; - } - if (property.NameEquals("model"u8)) - { - model = property.Value.GetString(); - continue; - } - if (property.NameEquals("fine_tuned_model"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - fineTunedModel = null; - continue; - } - fineTunedModel = property.Value.GetString(); - continue; - } - if (property.NameEquals("organization_id"u8)) - { - organizationId = property.Value.GetString(); - continue; - } - if (property.NameEquals("status"u8)) - { - status = new FineTuneStatus(property.Value.GetString()); - continue; - } - if (property.NameEquals("hyperparams"u8)) - { - hyperparams = FineTuneHyperparams.DeserializeFineTuneHyperparams(property.Value); - continue; - } - if (property.NameEquals("training_files"u8)) - { - List array = new List(); - foreach (var item in property.Value.EnumerateArray()) - { - array.Add(OpenAIFile.DeserializeOpenAIFile(item)); - } - trainingFiles = array; - continue; - } - if (property.NameEquals("validation_files"u8)) - { - List array = new List(); - foreach (var item in property.Value.EnumerateArray()) - { - array.Add(OpenAIFile.DeserializeOpenAIFile(item)); - } - validationFiles = array; - continue; - } - if (property.NameEquals("result_files"u8)) - { - List array = new List(); - foreach (var item in property.Value.EnumerateArray()) - { - array.Add(OpenAIFile.DeserializeOpenAIFile(item)); - } - resultFiles = array; - continue; - } - if (property.NameEquals("events"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - List array = new List(); - foreach (var item in property.Value.EnumerateArray()) - { - array.Add(FineTuneEvent.DeserializeFineTuneEvent(item)); - } - events = array; - continue; - } - if (options.Format != "W") - { - additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); - } - } - serializedAdditionalRawData = additionalPropertiesDictionary; - return new FineTune(id, @object, createdAt, updatedAt, model, fineTunedModel, organizationId, status, hyperparams, trainingFiles, validationFiles, resultFiles, OptionalProperty.ToList(events), serializedAdditionalRawData); - } - - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - - switch (format) - { - case "J": - return ModelReaderWriter.Write(this, options); - default: - throw new FormatException($"The model {nameof(FineTune)} does not support '{options.Format}' format."); - } - } - - FineTune IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - - switch (format) - { - case "J": - { - using JsonDocument document = JsonDocument.Parse(data); - return DeserializeFineTune(document.RootElement, options); - } - default: - throw new FormatException($"The model {nameof(FineTune)} does not support '{options.Format}' format."); - } - } - - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; - - /// Deserializes the model from a raw response. - /// The result to deserialize the model from. - internal static FineTune FromResponse(PipelineResponse response) - { - using var document = JsonDocument.Parse(response.Content); - return DeserializeFineTune(document.RootElement); - } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } - } -} diff --git a/.dotnet/src/Generated/Models/FineTune.cs b/.dotnet/src/Generated/Models/FineTune.cs deleted file mode 100644 index 6300b6a99..000000000 --- a/.dotnet/src/Generated/Models/FineTune.cs +++ /dev/null @@ -1,169 +0,0 @@ -// - -#nullable disable - -using System; -using System.ClientModel.Internal; -using System.Collections.Generic; -using System.Linq; - -namespace OpenAI.Models -{ - /// The `FineTune` object represents a legacy fine-tune job that has been created through the API. - [Obsolete("deprecated")] - public partial class FineTune - { - /// - /// Keeps track of any properties unknown to the library. - /// - /// To assign an object to the value of this property use . - /// - /// - /// To assign an already formatted json string to this property use . - /// - /// - /// Examples: - /// - /// - /// BinaryData.FromObjectAsJson("foo") - /// Creates a payload of "foo". - /// - /// - /// BinaryData.FromString("\"foo\"") - /// Creates a payload of "foo". - /// - /// - /// BinaryData.FromObjectAsJson(new { key = "value" }) - /// Creates a payload of { "key": "value" }. - /// - /// - /// BinaryData.FromString("{\"key\": \"value\"}") - /// Creates a payload of { "key": "value" }. - /// - /// - /// - /// - private IDictionary _serializedAdditionalRawData; - - /// Initializes a new instance of . - /// The object identifier, which can be referenced in the API endpoints. - /// The Unix timestamp (in seconds) for when the fine-tuning job was created. - /// The Unix timestamp (in seconds) for when the fine-tuning job was last updated. - /// The base model that is being fine-tuned. - /// The name of the fine-tuned model that is being created. - /// The organization that owns the fine-tuning job. - /// - /// The current status of the fine-tuning job, which can be either `created`, `running`, - /// `succeeded`, `failed`, or `cancelled`. - /// - /// - /// The hyperparameters used for the fine-tuning job. See the - /// [fine-tuning guide](/docs/guides/legacy-fine-tuning/hyperparameters) for more details. - /// - /// The list of files used for training. - /// The list of files used for validation. - /// The compiled results files for the fine-tuning job. - /// , , , , , or is null. - internal FineTune(string id, DateTimeOffset createdAt, DateTimeOffset updatedAt, string model, string fineTunedModel, string organizationId, FineTuneStatus status, FineTuneHyperparams hyperparams, IEnumerable trainingFiles, IEnumerable validationFiles, IEnumerable resultFiles) - { - ClientUtilities.AssertNotNull(id, nameof(id)); - ClientUtilities.AssertNotNull(model, nameof(model)); - ClientUtilities.AssertNotNull(organizationId, nameof(organizationId)); - ClientUtilities.AssertNotNull(hyperparams, nameof(hyperparams)); - ClientUtilities.AssertNotNull(trainingFiles, nameof(trainingFiles)); - ClientUtilities.AssertNotNull(validationFiles, nameof(validationFiles)); - ClientUtilities.AssertNotNull(resultFiles, nameof(resultFiles)); - - Id = id; - CreatedAt = createdAt; - UpdatedAt = updatedAt; - Model = model; - FineTunedModel = fineTunedModel; - OrganizationId = organizationId; - Status = status; - Hyperparams = hyperparams; - TrainingFiles = trainingFiles.ToList(); - ValidationFiles = validationFiles.ToList(); - ResultFiles = resultFiles.ToList(); - Events = new OptionalList(); - } - - /// Initializes a new instance of . - /// The object identifier, which can be referenced in the API endpoints. - /// The object type, which is always "fine-tune". - /// The Unix timestamp (in seconds) for when the fine-tuning job was created. - /// The Unix timestamp (in seconds) for when the fine-tuning job was last updated. - /// The base model that is being fine-tuned. - /// The name of the fine-tuned model that is being created. - /// The organization that owns the fine-tuning job. - /// - /// The current status of the fine-tuning job, which can be either `created`, `running`, - /// `succeeded`, `failed`, or `cancelled`. - /// - /// - /// The hyperparameters used for the fine-tuning job. See the - /// [fine-tuning guide](/docs/guides/legacy-fine-tuning/hyperparameters) for more details. - /// - /// The list of files used for training. - /// The list of files used for validation. - /// The compiled results files for the fine-tuning job. - /// The list of events that have been observed in the lifecycle of the FineTune job. - /// Keeps track of any properties unknown to the library. - internal FineTune(string id, FineTuneObject @object, DateTimeOffset createdAt, DateTimeOffset updatedAt, string model, string fineTunedModel, string organizationId, FineTuneStatus status, FineTuneHyperparams hyperparams, IReadOnlyList trainingFiles, IReadOnlyList validationFiles, IReadOnlyList resultFiles, IReadOnlyList events, IDictionary serializedAdditionalRawData) - { - Id = id; - Object = @object; - CreatedAt = createdAt; - UpdatedAt = updatedAt; - Model = model; - FineTunedModel = fineTunedModel; - OrganizationId = organizationId; - Status = status; - Hyperparams = hyperparams; - TrainingFiles = trainingFiles; - ValidationFiles = validationFiles; - ResultFiles = resultFiles; - Events = events; - _serializedAdditionalRawData = serializedAdditionalRawData; - } - - /// Initializes a new instance of for deserialization. - internal FineTune() - { - } - - /// The object identifier, which can be referenced in the API endpoints. - public string Id { get; } - /// The object type, which is always "fine-tune". - public FineTuneObject Object { get; } = FineTuneObject.FineTune; - - /// The Unix timestamp (in seconds) for when the fine-tuning job was created. - public DateTimeOffset CreatedAt { get; } - /// The Unix timestamp (in seconds) for when the fine-tuning job was last updated. - public DateTimeOffset UpdatedAt { get; } - /// The base model that is being fine-tuned. - public string Model { get; } - /// The name of the fine-tuned model that is being created. - public string FineTunedModel { get; } - /// The organization that owns the fine-tuning job. - public string OrganizationId { get; } - /// - /// The current status of the fine-tuning job, which can be either `created`, `running`, - /// `succeeded`, `failed`, or `cancelled`. - /// - public FineTuneStatus Status { get; } - /// - /// The hyperparameters used for the fine-tuning job. See the - /// [fine-tuning guide](/docs/guides/legacy-fine-tuning/hyperparameters) for more details. - /// - public FineTuneHyperparams Hyperparams { get; } - /// The list of files used for training. - public IReadOnlyList TrainingFiles { get; } - /// The list of files used for validation. - public IReadOnlyList ValidationFiles { get; } - /// The compiled results files for the fine-tuning job. - public IReadOnlyList ResultFiles { get; } - /// The list of events that have been observed in the lifecycle of the FineTune job. - public IReadOnlyList Events { get; } - } -} diff --git a/.dotnet/src/Generated/Models/FineTuneEvent.Serialization.cs b/.dotnet/src/Generated/Models/FineTuneEvent.Serialization.cs deleted file mode 100644 index 185b9830f..000000000 --- a/.dotnet/src/Generated/Models/FineTuneEvent.Serialization.cs +++ /dev/null @@ -1,156 +0,0 @@ -// - -#nullable disable - -using System; -using System.ClientModel.Internal; -using System.ClientModel.Primitives; -using System.Collections.Generic; -using System.Text.Json; - -namespace OpenAI.Models -{ - public partial class FineTuneEvent : IUtf8JsonWriteable, IJsonModel - { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - if (format != "J") - { - throw new FormatException($"The model {nameof(FineTuneEvent)} does not support '{format}' format."); - } - - writer.WriteStartObject(); - writer.WritePropertyName("object"u8); - writer.WriteStringValue(Object); - writer.WritePropertyName("created_at"u8); - writer.WriteNumberValue(CreatedAt, "U"); - writer.WritePropertyName("level"u8); - writer.WriteStringValue(Level); - writer.WritePropertyName("message"u8); - writer.WriteStringValue(Message); - if (options.Format != "W" && _serializedAdditionalRawData != null) - { - foreach (var item in _serializedAdditionalRawData) - { - writer.WritePropertyName(item.Key); -#if NET6_0_OR_GREATER - writer.WriteRawValue(item.Value); -#else - using (JsonDocument document = JsonDocument.Parse(item.Value)) - { - JsonSerializer.Serialize(writer, document.RootElement); - } -#endif - } - } - writer.WriteEndObject(); - } - - FineTuneEvent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - if (format != "J") - { - throw new FormatException($"The model {nameof(FineTuneEvent)} does not support '{format}' format."); - } - - using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeFineTuneEvent(document.RootElement, options); - } - - internal static FineTuneEvent DeserializeFineTuneEvent(JsonElement element, ModelReaderWriterOptions options = null) - { - options ??= new ModelReaderWriterOptions("W"); - - if (element.ValueKind == JsonValueKind.Null) - { - return null; - } - string @object = default; - DateTimeOffset createdAt = default; - string level = default; - string message = default; - IDictionary serializedAdditionalRawData = default; - Dictionary additionalPropertiesDictionary = new Dictionary(); - foreach (var property in element.EnumerateObject()) - { - if (property.NameEquals("object"u8)) - { - @object = property.Value.GetString(); - continue; - } - if (property.NameEquals("created_at"u8)) - { - createdAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); - continue; - } - if (property.NameEquals("level"u8)) - { - level = property.Value.GetString(); - continue; - } - if (property.NameEquals("message"u8)) - { - message = property.Value.GetString(); - continue; - } - if (options.Format != "W") - { - additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); - } - } - serializedAdditionalRawData = additionalPropertiesDictionary; - return new FineTuneEvent(@object, createdAt, level, message, serializedAdditionalRawData); - } - - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - - switch (format) - { - case "J": - return ModelReaderWriter.Write(this, options); - default: - throw new FormatException($"The model {nameof(FineTuneEvent)} does not support '{options.Format}' format."); - } - } - - FineTuneEvent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - - switch (format) - { - case "J": - { - using JsonDocument document = JsonDocument.Parse(data); - return DeserializeFineTuneEvent(document.RootElement, options); - } - default: - throw new FormatException($"The model {nameof(FineTuneEvent)} does not support '{options.Format}' format."); - } - } - - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; - - /// Deserializes the model from a raw response. - /// The result to deserialize the model from. - internal static FineTuneEvent FromResponse(PipelineResponse response) - { - using var document = JsonDocument.Parse(response.Content); - return DeserializeFineTuneEvent(document.RootElement); - } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } - } -} diff --git a/.dotnet/src/Generated/Models/FineTuneEvent.cs b/.dotnet/src/Generated/Models/FineTuneEvent.cs deleted file mode 100644 index dcf13faa4..000000000 --- a/.dotnet/src/Generated/Models/FineTuneEvent.cs +++ /dev/null @@ -1,93 +0,0 @@ -// - -#nullable disable - -using System; -using System.ClientModel.Internal; -using System.Collections.Generic; - -namespace OpenAI.Models -{ - /// The FineTuneEvent. - public partial class FineTuneEvent - { - /// - /// Keeps track of any properties unknown to the library. - /// - /// To assign an object to the value of this property use . - /// - /// - /// To assign an already formatted json string to this property use . - /// - /// - /// Examples: - /// - /// - /// BinaryData.FromObjectAsJson("foo") - /// Creates a payload of "foo". - /// - /// - /// BinaryData.FromString("\"foo\"") - /// Creates a payload of "foo". - /// - /// - /// BinaryData.FromObjectAsJson(new { key = "value" }) - /// Creates a payload of { "key": "value" }. - /// - /// - /// BinaryData.FromString("{\"key\": \"value\"}") - /// Creates a payload of { "key": "value" }. - /// - /// - /// - /// - private IDictionary _serializedAdditionalRawData; - - /// Initializes a new instance of . - /// - /// - /// - /// - /// , or is null. - internal FineTuneEvent(string @object, DateTimeOffset createdAt, string level, string message) - { - ClientUtilities.AssertNotNull(@object, nameof(@object)); - ClientUtilities.AssertNotNull(level, nameof(level)); - ClientUtilities.AssertNotNull(message, nameof(message)); - - Object = @object; - CreatedAt = createdAt; - Level = level; - Message = message; - } - - /// Initializes a new instance of . - /// - /// - /// - /// - /// Keeps track of any properties unknown to the library. - internal FineTuneEvent(string @object, DateTimeOffset createdAt, string level, string message, IDictionary serializedAdditionalRawData) - { - Object = @object; - CreatedAt = createdAt; - Level = level; - Message = message; - _serializedAdditionalRawData = serializedAdditionalRawData; - } - - /// Initializes a new instance of for deserialization. - internal FineTuneEvent() - { - } - - /// Gets the object. - public string Object { get; } - /// Gets the created at. - public DateTimeOffset CreatedAt { get; } - /// Gets the level. - public string Level { get; } - /// Gets the message. - public string Message { get; } - } -} diff --git a/.dotnet/src/Generated/Models/FineTuneHyperparams.Serialization.cs b/.dotnet/src/Generated/Models/FineTuneHyperparams.Serialization.cs deleted file mode 100644 index 8e6588b67..000000000 --- a/.dotnet/src/Generated/Models/FineTuneHyperparams.Serialization.cs +++ /dev/null @@ -1,197 +0,0 @@ -// - -#nullable disable - -using System; -using System.ClientModel.Internal; -using System.ClientModel.Primitives; -using System.Collections.Generic; -using System.Text.Json; - -namespace OpenAI.Models -{ - public partial class FineTuneHyperparams : IUtf8JsonWriteable, IJsonModel - { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - if (format != "J") - { - throw new FormatException($"The model {nameof(FineTuneHyperparams)} does not support '{format}' format."); - } - - writer.WriteStartObject(); - writer.WritePropertyName("n_epochs"u8); - writer.WriteNumberValue(NEpochs); - writer.WritePropertyName("batch_size"u8); - writer.WriteNumberValue(BatchSize); - writer.WritePropertyName("prompt_loss_weight"u8); - writer.WriteNumberValue(PromptLossWeight); - writer.WritePropertyName("learning_rate_multiplier"u8); - writer.WriteNumberValue(LearningRateMultiplier); - if (OptionalProperty.IsDefined(ComputeClassificationMetrics)) - { - writer.WritePropertyName("compute_classification_metrics"u8); - writer.WriteBooleanValue(ComputeClassificationMetrics.Value); - } - if (OptionalProperty.IsDefined(ClassificationPositiveClass)) - { - writer.WritePropertyName("classification_positive_class"u8); - writer.WriteStringValue(ClassificationPositiveClass); - } - if (OptionalProperty.IsDefined(ClassificationNClasses)) - { - writer.WritePropertyName("classification_n_classes"u8); - writer.WriteNumberValue(ClassificationNClasses.Value); - } - if (options.Format != "W" && _serializedAdditionalRawData != null) - { - foreach (var item in _serializedAdditionalRawData) - { - writer.WritePropertyName(item.Key); -#if NET6_0_OR_GREATER - writer.WriteRawValue(item.Value); -#else - using (JsonDocument document = JsonDocument.Parse(item.Value)) - { - JsonSerializer.Serialize(writer, document.RootElement); - } -#endif - } - } - writer.WriteEndObject(); - } - - FineTuneHyperparams IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - if (format != "J") - { - throw new FormatException($"The model {nameof(FineTuneHyperparams)} does not support '{format}' format."); - } - - using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeFineTuneHyperparams(document.RootElement, options); - } - - internal static FineTuneHyperparams DeserializeFineTuneHyperparams(JsonElement element, ModelReaderWriterOptions options = null) - { - options ??= new ModelReaderWriterOptions("W"); - - if (element.ValueKind == JsonValueKind.Null) - { - return null; - } - long nEpochs = default; - long batchSize = default; - double promptLossWeight = default; - double learningRateMultiplier = default; - OptionalProperty computeClassificationMetrics = default; - OptionalProperty classificationPositiveClass = default; - OptionalProperty classificationNClasses = default; - IDictionary serializedAdditionalRawData = default; - Dictionary additionalPropertiesDictionary = new Dictionary(); - foreach (var property in element.EnumerateObject()) - { - if (property.NameEquals("n_epochs"u8)) - { - nEpochs = property.Value.GetInt64(); - continue; - } - if (property.NameEquals("batch_size"u8)) - { - batchSize = property.Value.GetInt64(); - continue; - } - if (property.NameEquals("prompt_loss_weight"u8)) - { - promptLossWeight = property.Value.GetDouble(); - continue; - } - if (property.NameEquals("learning_rate_multiplier"u8)) - { - learningRateMultiplier = property.Value.GetDouble(); - continue; - } - if (property.NameEquals("compute_classification_metrics"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - computeClassificationMetrics = property.Value.GetBoolean(); - continue; - } - if (property.NameEquals("classification_positive_class"u8)) - { - classificationPositiveClass = property.Value.GetString(); - continue; - } - if (property.NameEquals("classification_n_classes"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - classificationNClasses = property.Value.GetInt64(); - continue; - } - if (options.Format != "W") - { - additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); - } - } - serializedAdditionalRawData = additionalPropertiesDictionary; - return new FineTuneHyperparams(nEpochs, batchSize, promptLossWeight, learningRateMultiplier, OptionalProperty.ToNullable(computeClassificationMetrics), classificationPositiveClass.Value, OptionalProperty.ToNullable(classificationNClasses), serializedAdditionalRawData); - } - - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - - switch (format) - { - case "J": - return ModelReaderWriter.Write(this, options); - default: - throw new FormatException($"The model {nameof(FineTuneHyperparams)} does not support '{options.Format}' format."); - } - } - - FineTuneHyperparams IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - - switch (format) - { - case "J": - { - using JsonDocument document = JsonDocument.Parse(data); - return DeserializeFineTuneHyperparams(document.RootElement, options); - } - default: - throw new FormatException($"The model {nameof(FineTuneHyperparams)} does not support '{options.Format}' format."); - } - } - - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; - - /// Deserializes the model from a raw response. - /// The result to deserialize the model from. - internal static FineTuneHyperparams FromResponse(PipelineResponse response) - { - using var document = JsonDocument.Parse(response.Content); - return DeserializeFineTuneHyperparams(document.RootElement); - } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } - } -} diff --git a/.dotnet/src/Generated/Models/FineTuneHyperparams.cs b/.dotnet/src/Generated/Models/FineTuneHyperparams.cs deleted file mode 100644 index f45f7fe7a..000000000 --- a/.dotnet/src/Generated/Models/FineTuneHyperparams.cs +++ /dev/null @@ -1,117 +0,0 @@ -// - -#nullable disable - -using System; -using System.Collections.Generic; - -namespace OpenAI.Models -{ - /// The FineTuneHyperparams. - public partial class FineTuneHyperparams - { - /// - /// Keeps track of any properties unknown to the library. - /// - /// To assign an object to the value of this property use . - /// - /// - /// To assign an already formatted json string to this property use . - /// - /// - /// Examples: - /// - /// - /// BinaryData.FromObjectAsJson("foo") - /// Creates a payload of "foo". - /// - /// - /// BinaryData.FromString("\"foo\"") - /// Creates a payload of "foo". - /// - /// - /// BinaryData.FromObjectAsJson(new { key = "value" }) - /// Creates a payload of { "key": "value" }. - /// - /// - /// BinaryData.FromString("{\"key\": \"value\"}") - /// Creates a payload of { "key": "value" }. - /// - /// - /// - /// - private IDictionary _serializedAdditionalRawData; - - /// Initializes a new instance of . - /// - /// The number of epochs to train the model for. An epoch refers to one full cycle through the - /// training dataset. - /// - /// - /// The batch size to use for training. The batch size is the number of training examples used to - /// train a single forward and backward pass. - /// - /// The weight to use for loss on the prompt tokens. - /// The learning rate multiplier to use for training. - internal FineTuneHyperparams(long nEpochs, long batchSize, double promptLossWeight, double learningRateMultiplier) - { - NEpochs = nEpochs; - BatchSize = batchSize; - PromptLossWeight = promptLossWeight; - LearningRateMultiplier = learningRateMultiplier; - } - - /// Initializes a new instance of . - /// - /// The number of epochs to train the model for. An epoch refers to one full cycle through the - /// training dataset. - /// - /// - /// The batch size to use for training. The batch size is the number of training examples used to - /// train a single forward and backward pass. - /// - /// The weight to use for loss on the prompt tokens. - /// The learning rate multiplier to use for training. - /// The classification metrics to compute using the validation dataset at the end of every epoch. - /// The positive class to use for computing classification metrics. - /// The number of classes to use for computing classification metrics. - /// Keeps track of any properties unknown to the library. - internal FineTuneHyperparams(long nEpochs, long batchSize, double promptLossWeight, double learningRateMultiplier, bool? computeClassificationMetrics, string classificationPositiveClass, long? classificationNClasses, IDictionary serializedAdditionalRawData) - { - NEpochs = nEpochs; - BatchSize = batchSize; - PromptLossWeight = promptLossWeight; - LearningRateMultiplier = learningRateMultiplier; - ComputeClassificationMetrics = computeClassificationMetrics; - ClassificationPositiveClass = classificationPositiveClass; - ClassificationNClasses = classificationNClasses; - _serializedAdditionalRawData = serializedAdditionalRawData; - } - - /// Initializes a new instance of for deserialization. - internal FineTuneHyperparams() - { - } - - /// - /// The number of epochs to train the model for. An epoch refers to one full cycle through the - /// training dataset. - /// - public long NEpochs { get; } - /// - /// The batch size to use for training. The batch size is the number of training examples used to - /// train a single forward and backward pass. - /// - public long BatchSize { get; } - /// The weight to use for loss on the prompt tokens. - public double PromptLossWeight { get; } - /// The learning rate multiplier to use for training. - public double LearningRateMultiplier { get; } - /// The classification metrics to compute using the validation dataset at the end of every epoch. - public bool? ComputeClassificationMetrics { get; } - /// The positive class to use for computing classification metrics. - public string ClassificationPositiveClass { get; } - /// The number of classes to use for computing classification metrics. - public long? ClassificationNClasses { get; } - } -} diff --git a/.dotnet/src/Generated/Models/FineTuneObject.cs b/.dotnet/src/Generated/Models/FineTuneObject.cs deleted file mode 100644 index 7c848fe16..000000000 --- a/.dotnet/src/Generated/Models/FineTuneObject.cs +++ /dev/null @@ -1,45 +0,0 @@ -// - -#nullable disable - -using System; -using System.ComponentModel; - -namespace OpenAI.Models -{ - /// The FineTune_object. - public readonly partial struct FineTuneObject : IEquatable - { - private readonly string _value; - - /// Initializes a new instance of . - /// is null. - public FineTuneObject(string value) - { - _value = value ?? throw new ArgumentNullException(nameof(value)); - } - - private const string FineTuneValue = "fine-tune"; - - /// fine-tune. - public static FineTuneObject FineTune { get; } = new FineTuneObject(FineTuneValue); - /// Determines if two values are the same. - public static bool operator ==(FineTuneObject left, FineTuneObject right) => left.Equals(right); - /// Determines if two values are not the same. - public static bool operator !=(FineTuneObject left, FineTuneObject right) => !left.Equals(right); - /// Converts a string to a . - public static implicit operator FineTuneObject(string value) => new FineTuneObject(value); - - /// - [EditorBrowsable(EditorBrowsableState.Never)] - public override bool Equals(object obj) => obj is FineTuneObject other && Equals(other); - /// - public bool Equals(FineTuneObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); - - /// - [EditorBrowsable(EditorBrowsableState.Never)] - public override int GetHashCode() => _value?.GetHashCode() ?? 0; - /// - public override string ToString() => _value; - } -} diff --git a/.dotnet/src/Generated/Models/FineTuneStatus.cs b/.dotnet/src/Generated/Models/FineTuneStatus.cs deleted file mode 100644 index 00121f091..000000000 --- a/.dotnet/src/Generated/Models/FineTuneStatus.cs +++ /dev/null @@ -1,57 +0,0 @@ -// - -#nullable disable - -using System; -using System.ComponentModel; - -namespace OpenAI.Models -{ - /// Enum for status in FineTune. - public readonly partial struct FineTuneStatus : IEquatable - { - private readonly string _value; - - /// Initializes a new instance of . - /// is null. - public FineTuneStatus(string value) - { - _value = value ?? throw new ArgumentNullException(nameof(value)); - } - - private const string CreatedValue = "created"; - private const string RunningValue = "running"; - private const string SucceededValue = "succeeded"; - private const string FailedValue = "failed"; - private const string CancelledValue = "cancelled"; - - /// created. - public static FineTuneStatus Created { get; } = new FineTuneStatus(CreatedValue); - /// running. - public static FineTuneStatus Running { get; } = new FineTuneStatus(RunningValue); - /// succeeded. - public static FineTuneStatus Succeeded { get; } = new FineTuneStatus(SucceededValue); - /// failed. - public static FineTuneStatus Failed { get; } = new FineTuneStatus(FailedValue); - /// cancelled. - public static FineTuneStatus Cancelled { get; } = new FineTuneStatus(CancelledValue); - /// Determines if two values are the same. - public static bool operator ==(FineTuneStatus left, FineTuneStatus right) => left.Equals(right); - /// Determines if two values are not the same. - public static bool operator !=(FineTuneStatus left, FineTuneStatus right) => !left.Equals(right); - /// Converts a string to a . - public static implicit operator FineTuneStatus(string value) => new FineTuneStatus(value); - - /// - [EditorBrowsable(EditorBrowsableState.Never)] - public override bool Equals(object obj) => obj is FineTuneStatus other && Equals(other); - /// - public bool Equals(FineTuneStatus other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); - - /// - [EditorBrowsable(EditorBrowsableState.Never)] - public override int GetHashCode() => _value?.GetHashCode() ?? 0; - /// - public override string ToString() => _value; - } -} diff --git a/.dotnet/src/Generated/Models/ListFineTuneEventsResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListFineTuneEventsResponse.Serialization.cs deleted file mode 100644 index bd3431eed..000000000 --- a/.dotnet/src/Generated/Models/ListFineTuneEventsResponse.Serialization.cs +++ /dev/null @@ -1,150 +0,0 @@ -// - -#nullable disable - -using System; -using System.ClientModel.Internal; -using System.ClientModel.Primitives; -using System.Collections.Generic; -using System.Text.Json; - -namespace OpenAI.Models -{ - public partial class ListFineTuneEventsResponse : IUtf8JsonWriteable, IJsonModel - { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - if (format != "J") - { - throw new FormatException($"The model {nameof(ListFineTuneEventsResponse)} does not support '{format}' format."); - } - - writer.WriteStartObject(); - writer.WritePropertyName("object"u8); - writer.WriteStringValue(Object); - writer.WritePropertyName("data"u8); - writer.WriteStartArray(); - foreach (var item in Data) - { - writer.WriteObjectValue(item); - } - writer.WriteEndArray(); - if (options.Format != "W" && _serializedAdditionalRawData != null) - { - foreach (var item in _serializedAdditionalRawData) - { - writer.WritePropertyName(item.Key); -#if NET6_0_OR_GREATER - writer.WriteRawValue(item.Value); -#else - using (JsonDocument document = JsonDocument.Parse(item.Value)) - { - JsonSerializer.Serialize(writer, document.RootElement); - } -#endif - } - } - writer.WriteEndObject(); - } - - ListFineTuneEventsResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - if (format != "J") - { - throw new FormatException($"The model {nameof(ListFineTuneEventsResponse)} does not support '{format}' format."); - } - - using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeListFineTuneEventsResponse(document.RootElement, options); - } - - internal static ListFineTuneEventsResponse DeserializeListFineTuneEventsResponse(JsonElement element, ModelReaderWriterOptions options = null) - { - options ??= new ModelReaderWriterOptions("W"); - - if (element.ValueKind == JsonValueKind.Null) - { - return null; - } - string @object = default; - IReadOnlyList data = default; - IDictionary serializedAdditionalRawData = default; - Dictionary additionalPropertiesDictionary = new Dictionary(); - foreach (var property in element.EnumerateObject()) - { - if (property.NameEquals("object"u8)) - { - @object = property.Value.GetString(); - continue; - } - if (property.NameEquals("data"u8)) - { - List array = new List(); - foreach (var item in property.Value.EnumerateArray()) - { - array.Add(FineTuneEvent.DeserializeFineTuneEvent(item)); - } - data = array; - continue; - } - if (options.Format != "W") - { - additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); - } - } - serializedAdditionalRawData = additionalPropertiesDictionary; - return new ListFineTuneEventsResponse(@object, data, serializedAdditionalRawData); - } - - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - - switch (format) - { - case "J": - return ModelReaderWriter.Write(this, options); - default: - throw new FormatException($"The model {nameof(ListFineTuneEventsResponse)} does not support '{options.Format}' format."); - } - } - - ListFineTuneEventsResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - - switch (format) - { - case "J": - { - using JsonDocument document = JsonDocument.Parse(data); - return DeserializeListFineTuneEventsResponse(document.RootElement, options); - } - default: - throw new FormatException($"The model {nameof(ListFineTuneEventsResponse)} does not support '{options.Format}' format."); - } - } - - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; - - /// Deserializes the model from a raw response. - /// The result to deserialize the model from. - internal static ListFineTuneEventsResponse FromResponse(PipelineResponse response) - { - using var document = JsonDocument.Parse(response.Content); - return DeserializeListFineTuneEventsResponse(document.RootElement); - } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } - } -} diff --git a/.dotnet/src/Generated/Models/ListFineTuneEventsResponse.cs b/.dotnet/src/Generated/Models/ListFineTuneEventsResponse.cs deleted file mode 100644 index 743f4ee68..000000000 --- a/.dotnet/src/Generated/Models/ListFineTuneEventsResponse.cs +++ /dev/null @@ -1,81 +0,0 @@ -// - -#nullable disable - -using System; -using System.ClientModel.Internal; -using System.Collections.Generic; -using System.Linq; - -namespace OpenAI.Models -{ - /// The ListFineTuneEventsResponse. - public partial class ListFineTuneEventsResponse - { - /// - /// Keeps track of any properties unknown to the library. - /// - /// To assign an object to the value of this property use . - /// - /// - /// To assign an already formatted json string to this property use . - /// - /// - /// Examples: - /// - /// - /// BinaryData.FromObjectAsJson("foo") - /// Creates a payload of "foo". - /// - /// - /// BinaryData.FromString("\"foo\"") - /// Creates a payload of "foo". - /// - /// - /// BinaryData.FromObjectAsJson(new { key = "value" }) - /// Creates a payload of { "key": "value" }. - /// - /// - /// BinaryData.FromString("{\"key\": \"value\"}") - /// Creates a payload of { "key": "value" }. - /// - /// - /// - /// - private IDictionary _serializedAdditionalRawData; - - /// Initializes a new instance of . - /// - /// - /// or is null. - internal ListFineTuneEventsResponse(string @object, IEnumerable data) - { - ClientUtilities.AssertNotNull(@object, nameof(@object)); - ClientUtilities.AssertNotNull(data, nameof(data)); - - Object = @object; - Data = data.ToList(); - } - - /// Initializes a new instance of . - /// - /// - /// Keeps track of any properties unknown to the library. - internal ListFineTuneEventsResponse(string @object, IReadOnlyList data, IDictionary serializedAdditionalRawData) - { - Object = @object; - Data = data; - _serializedAdditionalRawData = serializedAdditionalRawData; - } - - /// Initializes a new instance of for deserialization. - internal ListFineTuneEventsResponse() - { - } - - /// Gets the object. - public string Object { get; } - /// Gets the data. - public IReadOnlyList Data { get; } - } -} diff --git a/.dotnet/src/Generated/Models/ListFineTunesResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListFineTunesResponse.Serialization.cs deleted file mode 100644 index bfd1f5cf9..000000000 --- a/.dotnet/src/Generated/Models/ListFineTunesResponse.Serialization.cs +++ /dev/null @@ -1,150 +0,0 @@ -// - -#nullable disable - -using System; -using System.ClientModel.Internal; -using System.ClientModel.Primitives; -using System.Collections.Generic; -using System.Text.Json; - -namespace OpenAI.Models -{ - public partial class ListFineTunesResponse : IUtf8JsonWriteable, IJsonModel - { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - if (format != "J") - { - throw new FormatException($"The model {nameof(ListFineTunesResponse)} does not support '{format}' format."); - } - - writer.WriteStartObject(); - writer.WritePropertyName("object"u8); - writer.WriteStringValue(Object); - writer.WritePropertyName("data"u8); - writer.WriteStartArray(); - foreach (var item in Data) - { - writer.WriteObjectValue(item); - } - writer.WriteEndArray(); - if (options.Format != "W" && _serializedAdditionalRawData != null) - { - foreach (var item in _serializedAdditionalRawData) - { - writer.WritePropertyName(item.Key); -#if NET6_0_OR_GREATER - writer.WriteRawValue(item.Value); -#else - using (JsonDocument document = JsonDocument.Parse(item.Value)) - { - JsonSerializer.Serialize(writer, document.RootElement); - } -#endif - } - } - writer.WriteEndObject(); - } - - ListFineTunesResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - if (format != "J") - { - throw new FormatException($"The model {nameof(ListFineTunesResponse)} does not support '{format}' format."); - } - - using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeListFineTunesResponse(document.RootElement, options); - } - - internal static ListFineTunesResponse DeserializeListFineTunesResponse(JsonElement element, ModelReaderWriterOptions options = null) - { - options ??= new ModelReaderWriterOptions("W"); - - if (element.ValueKind == JsonValueKind.Null) - { - return null; - } - string @object = default; - IReadOnlyList data = default; - IDictionary serializedAdditionalRawData = default; - Dictionary additionalPropertiesDictionary = new Dictionary(); - foreach (var property in element.EnumerateObject()) - { - if (property.NameEquals("object"u8)) - { - @object = property.Value.GetString(); - continue; - } - if (property.NameEquals("data"u8)) - { - List array = new List(); - foreach (var item in property.Value.EnumerateArray()) - { - array.Add(FineTune.DeserializeFineTune(item)); - } - data = array; - continue; - } - if (options.Format != "W") - { - additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); - } - } - serializedAdditionalRawData = additionalPropertiesDictionary; - return new ListFineTunesResponse(@object, data, serializedAdditionalRawData); - } - - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - - switch (format) - { - case "J": - return ModelReaderWriter.Write(this, options); - default: - throw new FormatException($"The model {nameof(ListFineTunesResponse)} does not support '{options.Format}' format."); - } - } - - ListFineTunesResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - - switch (format) - { - case "J": - { - using JsonDocument document = JsonDocument.Parse(data); - return DeserializeListFineTunesResponse(document.RootElement, options); - } - default: - throw new FormatException($"The model {nameof(ListFineTunesResponse)} does not support '{options.Format}' format."); - } - } - - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; - - /// Deserializes the model from a raw response. - /// The result to deserialize the model from. - internal static ListFineTunesResponse FromResponse(PipelineResponse response) - { - using var document = JsonDocument.Parse(response.Content); - return DeserializeListFineTunesResponse(document.RootElement); - } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } - } -} diff --git a/.dotnet/src/Generated/Models/ListFineTunesResponse.cs b/.dotnet/src/Generated/Models/ListFineTunesResponse.cs deleted file mode 100644 index 1355ef3be..000000000 --- a/.dotnet/src/Generated/Models/ListFineTunesResponse.cs +++ /dev/null @@ -1,81 +0,0 @@ -// - -#nullable disable - -using System; -using System.ClientModel.Internal; -using System.Collections.Generic; -using System.Linq; - -namespace OpenAI.Models -{ - /// The ListFineTunesResponse. - public partial class ListFineTunesResponse - { - /// - /// Keeps track of any properties unknown to the library. - /// - /// To assign an object to the value of this property use . - /// - /// - /// To assign an already formatted json string to this property use . - /// - /// - /// Examples: - /// - /// - /// BinaryData.FromObjectAsJson("foo") - /// Creates a payload of "foo". - /// - /// - /// BinaryData.FromString("\"foo\"") - /// Creates a payload of "foo". - /// - /// - /// BinaryData.FromObjectAsJson(new { key = "value" }) - /// Creates a payload of { "key": "value" }. - /// - /// - /// BinaryData.FromString("{\"key\": \"value\"}") - /// Creates a payload of { "key": "value" }. - /// - /// - /// - /// - private IDictionary _serializedAdditionalRawData; - - /// Initializes a new instance of . - /// - /// - /// or is null. - internal ListFineTunesResponse(string @object, IEnumerable data) - { - ClientUtilities.AssertNotNull(@object, nameof(@object)); - ClientUtilities.AssertNotNull(data, nameof(data)); - - Object = @object; - Data = data.ToList(); - } - - /// Initializes a new instance of . - /// - /// - /// Keeps track of any properties unknown to the library. - internal ListFineTunesResponse(string @object, IReadOnlyList data, IDictionary serializedAdditionalRawData) - { - Object = @object; - Data = data; - _serializedAdditionalRawData = serializedAdditionalRawData; - } - - /// Initializes a new instance of for deserialization. - internal ListFineTunesResponse() - { - } - - /// Gets the object. - public string Object { get; } - /// Gets the data. - public IReadOnlyList Data { get; } - } -} diff --git a/.dotnet/src/Generated/OpenAIClient.cs b/.dotnet/src/Generated/OpenAIClient.cs index ba4da1bc6..67ceeca49 100644 --- a/.dotnet/src/Generated/OpenAIClient.cs +++ b/.dotnet/src/Generated/OpenAIClient.cs @@ -56,14 +56,13 @@ public OpenAIClient(Uri endpoint, KeyCredential credential, OpenAIClientOptions _endpoint = endpoint; } - private FineTuning _cachedFineTuning; private Audio _cachedAudio; private Assistants _cachedAssistants; private Chat _cachedChat; private Completions _cachedCompletions; private Embeddings _cachedEmbeddings; private Files _cachedFiles; - private FineTunes _cachedFineTunes; + private FineTuning _cachedFineTuning; private Images _cachedImages; private Messages _cachedMessages; private ModelsOps _cachedModelsOps; @@ -71,12 +70,6 @@ public OpenAIClient(Uri endpoint, KeyCredential credential, OpenAIClientOptions private Runs _cachedRuns; private Threads _cachedThreads; - /// Initializes a new instance of FineTuning. - public virtual FineTuning GetFineTuningClient() - { - return Volatile.Read(ref _cachedFineTuning) ?? Interlocked.CompareExchange(ref _cachedFineTuning, new FineTuning(ClientDiagnostics, _pipeline, _keyCredential, _endpoint), null) ?? _cachedFineTuning; - } - /// Initializes a new instance of Audio. public virtual Audio GetAudioClient() { @@ -113,10 +106,10 @@ public virtual Files GetFilesClient() return Volatile.Read(ref _cachedFiles) ?? Interlocked.CompareExchange(ref _cachedFiles, new Files(ClientDiagnostics, _pipeline, _keyCredential, _endpoint), null) ?? _cachedFiles; } - /// Initializes a new instance of FineTunes. - public virtual FineTunes GetFineTunesClient() + /// Initializes a new instance of FineTuning. + public virtual FineTuning GetFineTuningClient() { - return Volatile.Read(ref _cachedFineTunes) ?? Interlocked.CompareExchange(ref _cachedFineTunes, new FineTunes(ClientDiagnostics, _pipeline, _keyCredential, _endpoint), null) ?? _cachedFineTunes; + return Volatile.Read(ref _cachedFineTuning) ?? Interlocked.CompareExchange(ref _cachedFineTuning, new FineTuning(ClientDiagnostics, _pipeline, _keyCredential, _endpoint), null) ?? _cachedFineTuning; } /// Initializes a new instance of Images. diff --git a/.dotnet/src/Generated/OpenAIModelFactory.cs b/.dotnet/src/Generated/OpenAIModelFactory.cs index ae070eb8a..494ab0f0a 100644 --- a/.dotnet/src/Generated/OpenAIModelFactory.cs +++ b/.dotnet/src/Generated/OpenAIModelFactory.cs @@ -11,158 +11,6 @@ namespace OpenAI.Models /// Model factory for models. public static partial class OpenAIModelFactory { - /// Initializes a new instance of . - /// - /// The ID of an uploaded file that contains training data. - /// - /// See [upload file](/docs/api-reference/files/upload) for how to upload a file. - /// - /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with - /// the purpose `fine-tune`. - /// - /// See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. - /// - /// - /// The ID of an uploaded file that contains validation data. - /// - /// If you provide this file, the data is used to generate validation metrics periodically during - /// fine-tuning. These metrics can be viewed in the fine-tuning results file. The same data should - /// not be present in both train and validation files. - /// - /// Your dataset must be formatted as a JSONL file. You must upload your file with the purpose - /// `fine-tune`. - /// - /// See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. - /// - /// - /// The name of the model to fine-tune. You can select one of the - /// [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). - /// - /// The hyperparameters used for the fine-tuning job. - /// - /// A string of up to 18 characters that will be added to your fine-tuned model name. - /// - /// For example, a `suffix` of "custom-model-name" would produce a model name like - /// `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. - /// - /// A new instance for mocking. - public static CreateFineTuningJobRequest CreateFineTuningJobRequest(string trainingFile = null, string validationFile = null, CreateFineTuningJobRequestModel model = default, CreateFineTuningJobRequestHyperparameters hyperparameters = null, string suffix = null) - { - return new CreateFineTuningJobRequest(trainingFile, validationFile, model, hyperparameters, suffix, serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// The object identifier, which can be referenced in the API endpoints. - /// The object type, which is always "fine_tuning.job". - /// The Unix timestamp (in seconds) for when the fine-tuning job was created. - /// - /// The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be - /// null if the fine-tuning job is still running. - /// - /// The base model that is being fine-tuned. - /// - /// The name of the fine-tuned model that is being created. The value will be null if the - /// fine-tuning job is still running. - /// - /// The organization that owns the fine-tuning job. - /// - /// The current status of the fine-tuning job, which can be either `created`, `pending`, `running`, - /// `succeeded`, `failed`, or `cancelled`. - /// - /// - /// The hyperparameters used for the fine-tuning job. See the - /// [fine-tuning guide](/docs/guides/fine-tuning) for more details. - /// - /// - /// The file ID used for training. You can retrieve the training data with the - /// [Files API](/docs/api-reference/files/retrieve-contents). - /// - /// - /// The file ID used for validation. You can retrieve the validation results with the - /// [Files API](/docs/api-reference/files/retrieve-contents). - /// - /// - /// The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the - /// [Files API](/docs/api-reference/files/retrieve-contents). - /// - /// - /// The total number of billable tokens processed by this fine tuning job. The value will be null - /// if the fine-tuning job is still running. - /// - /// - /// For fine-tuning jobs that have `failed`, this will contain more information on the cause of the - /// failure. - /// - /// A new instance for mocking. - public static FineTuningJob FineTuningJob(string id = null, FineTuningJobObject @object = default, DateTimeOffset createdAt = default, DateTimeOffset? finishedAt = null, string model = null, string fineTunedModel = null, string organizationId = null, FineTuningJobStatus status = default, FineTuningJobHyperparameters hyperparameters = null, string trainingFile = null, string validationFile = null, IEnumerable resultFiles = null, long? trainedTokens = null, FineTuningJobError error = null) - { - resultFiles ??= new List(); - - return new FineTuningJob(id, @object, createdAt, finishedAt, model, fineTunedModel, organizationId, status, hyperparameters, trainingFile, validationFile, resultFiles?.ToList(), trainedTokens, error, serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// - /// The number of epochs to train the model for. An epoch refers to one full cycle through the - /// training dataset. - /// - /// "Auto" decides the optimal number of epochs based on the size of the dataset. If setting the - /// number manually, we support any number between 1 and 50 epochs. - /// - /// A new instance for mocking. - public static FineTuningJobHyperparameters FineTuningJobHyperparameters(BinaryData nEpochs = null) - { - return new FineTuningJobHyperparameters(nEpochs, serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// A human-readable error message. - /// A machine-readable error code. - /// - /// The parameter that was invalid, usually `training_file` or `validation_file`. This field - /// will be null if the failure was not parameter-specific. - /// - /// A new instance for mocking. - public static FineTuningJobError FineTuningJobError(string message = null, string code = null, string param = null) - { - return new FineTuningJobError(message, code, param, serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// - /// - /// - /// A new instance for mocking. - public static ListPaginatedFineTuningJobsResponse ListPaginatedFineTuningJobsResponse(string @object = null, IEnumerable data = null, bool hasMore = default) - { - data ??= new List(); - - return new ListPaginatedFineTuningJobsResponse(@object, data?.ToList(), hasMore, serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// - /// - /// A new instance for mocking. - public static ListFineTuningJobEventsResponse ListFineTuningJobEventsResponse(string @object = null, IEnumerable data = null) - { - data ??= new List(); - - return new ListFineTuningJobEventsResponse(@object, data?.ToList(), serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// - /// - /// - /// - /// - /// A new instance for mocking. - public static FineTuningJobEvent FineTuningJobEvent(string id = null, string @object = null, DateTimeOffset createdAt = default, FineTuningJobEventLevel level = default, string message = null) - { - return new FineTuningJobEvent(id, @object, createdAt, level, message, serializedAdditionalRawData: null); - } - /// Initializes a new instance of . /// One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd`. /// The text to generate audio for. The maximum length is 4096 characters. @@ -990,192 +838,156 @@ public static DeleteFileResponse DeleteFileResponse(string id = null, DeleteFile return new DeleteFileResponse(id, @object, deleted, serializedAdditionalRawData: null); } - /// Initializes a new instance of . + /// Initializes a new instance of . /// /// The ID of an uploaded file that contains training data. /// /// See [upload file](/docs/api-reference/files/upload) for how to upload a file. /// - /// Your dataset must be formatted as a JSONL file, where each training example is a JSON object - /// with the keys "prompt" and "completion". Additionally, you must upload your file with the - /// purpose `fine-tune`. + /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with + /// the purpose `fine-tune`. /// - /// See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more - /// details. + /// See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. /// /// /// The ID of an uploaded file that contains validation data. /// /// If you provide this file, the data is used to generate validation metrics periodically during - /// fine-tuning. These metrics can be viewed in the - /// [fine-tuning results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). - /// Your train and validation data should be mutually exclusive. + /// fine-tuning. These metrics can be viewed in the fine-tuning results file. The same data should + /// not be present in both train and validation files. /// - /// Your dataset must be formatted as a JSONL file, where each validation example is a JSON object - /// with the keys "prompt" and "completion". Additionally, you must upload your file with the - /// purpose `fine-tune`. + /// Your dataset must be formatted as a JSONL file. You must upload your file with the purpose + /// `fine-tune`. /// - /// See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more - /// details. + /// See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. /// /// - /// The name of the base model to fine-tune. You can select one of "ada", "babbage", "curie", - /// "davinci", or a fine-tuned model created after 2022-04-21 and before 2023-08-22. To learn more - /// about these models, see the [Models](/docs/models) documentation. - /// - /// - /// The number of epochs to train the model for. An epoch refers to one full cycle through the - /// training dataset. - /// - /// - /// The batch size to use for training. The batch size is the number of training examples used to - /// train a single forward and backward pass. - /// - /// By default, the batch size will be dynamically configured to be ~0.2% of the number of examples - /// in the training set, capped at 256 - in general, we've found that larger batch sizes tend to - /// work better for larger datasets. - /// - /// - /// The learning rate multiplier to use for training. The fine-tuning learning rate is the original - /// learning rate used for pretraining multiplied by this value. - /// - /// By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 depending on final - /// `batch_size` (larger learning rates tend to perform better with larger batch sizes). We - /// recommend experimenting with values in the range 0.02 to 0.2 to see what produces the best - /// results. - /// - /// - /// The weight to use for loss on the prompt tokens. This controls how much the model tries to - /// learn to generate the prompt (as compared to the completion which always has a weight of 1.0), - /// and can add a stabilizing effect to training when completions are short. - /// - /// If prompts are extremely long (relative to completions), it may make sense to reduce this - /// weight so as to avoid over-prioritizing learning the prompt. - /// - /// - /// If set, we calculate classification-specific metrics such as accuracy and F-1 score using the - /// validation set at the end of every epoch. These metrics can be viewed in the - /// [results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). - /// - /// In order to compute classification metrics, you must provide a `validation_file`. Additionally, - /// you must specify `classification_n_classes` for multiclass classification or - /// `classification_positive_class` for binary classification. - /// - /// - /// The number of classes in a classification task. - /// - /// This parameter is required for multiclass classification. - /// - /// - /// The positive class in binary classification. - /// - /// This parameter is needed to generate precision, recall, and F1 metrics when doing binary - /// classification. - /// - /// - /// If this is provided, we calculate F-beta scores at the specified beta values. The F-beta score - /// is a generalization of F-1 score. This is only used for binary classification. - /// - /// With a beta of 1 (i.e. the F-1 score), precision and recall are given the same weight. A larger - /// beta score puts more weight on recall and less on precision. A smaller beta score puts more - /// weight on precision and less on recall. + /// The name of the model to fine-tune. You can select one of the + /// [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). /// + /// The hyperparameters used for the fine-tuning job. /// /// A string of up to 18 characters that will be added to your fine-tuned model name. /// /// For example, a `suffix` of "custom-model-name" would produce a model name like - /// `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`. + /// `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. /// - /// A new instance for mocking. - public static CreateFineTuneRequest CreateFineTuneRequest(string trainingFile = null, string validationFile = null, CreateFineTuneRequestModel? model = null, long? nEpochs = null, long? batchSize = null, double? learningRateMultiplier = null, double? promptLossRate = null, bool? computeClassificationMetrics = null, long? classificationNClasses = null, string classificationPositiveClass = null, IEnumerable classificationBetas = null, string suffix = null) + /// A new instance for mocking. + public static CreateFineTuningJobRequest CreateFineTuningJobRequest(string trainingFile = null, string validationFile = null, CreateFineTuningJobRequestModel model = default, CreateFineTuningJobRequestHyperparameters hyperparameters = null, string suffix = null) { - classificationBetas ??= new List(); - - return new CreateFineTuneRequest(trainingFile, validationFile, model, nEpochs, batchSize, learningRateMultiplier, promptLossRate, computeClassificationMetrics, classificationNClasses, classificationPositiveClass, classificationBetas?.ToList(), suffix, serializedAdditionalRawData: null); + return new CreateFineTuningJobRequest(trainingFile, validationFile, model, hyperparameters, suffix, serializedAdditionalRawData: null); } - /// Initializes a new instance of . + /// Initializes a new instance of . /// The object identifier, which can be referenced in the API endpoints. - /// The object type, which is always "fine-tune". + /// The object type, which is always "fine_tuning.job". /// The Unix timestamp (in seconds) for when the fine-tuning job was created. - /// The Unix timestamp (in seconds) for when the fine-tuning job was last updated. + /// + /// The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be + /// null if the fine-tuning job is still running. + /// /// The base model that is being fine-tuned. - /// The name of the fine-tuned model that is being created. + /// + /// The name of the fine-tuned model that is being created. The value will be null if the + /// fine-tuning job is still running. + /// /// The organization that owns the fine-tuning job. /// - /// The current status of the fine-tuning job, which can be either `created`, `running`, + /// The current status of the fine-tuning job, which can be either `created`, `pending`, `running`, /// `succeeded`, `failed`, or `cancelled`. /// - /// + /// /// The hyperparameters used for the fine-tuning job. See the - /// [fine-tuning guide](/docs/guides/legacy-fine-tuning/hyperparameters) for more details. + /// [fine-tuning guide](/docs/guides/fine-tuning) for more details. + /// + /// + /// The file ID used for training. You can retrieve the training data with the + /// [Files API](/docs/api-reference/files/retrieve-contents). + /// + /// + /// The file ID used for validation. You can retrieve the validation results with the + /// [Files API](/docs/api-reference/files/retrieve-contents). + /// + /// + /// The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the + /// [Files API](/docs/api-reference/files/retrieve-contents). + /// + /// + /// The total number of billable tokens processed by this fine tuning job. The value will be null + /// if the fine-tuning job is still running. + /// + /// + /// For fine-tuning jobs that have `failed`, this will contain more information on the cause of the + /// failure. /// - /// The list of files used for training. - /// The list of files used for validation. - /// The compiled results files for the fine-tuning job. - /// The list of events that have been observed in the lifecycle of the FineTune job. - /// A new instance for mocking. - public static FineTune FineTune(string id = null, FineTuneObject @object = default, DateTimeOffset createdAt = default, DateTimeOffset updatedAt = default, string model = null, string fineTunedModel = null, string organizationId = null, FineTuneStatus status = default, FineTuneHyperparams hyperparams = null, IEnumerable trainingFiles = null, IEnumerable validationFiles = null, IEnumerable resultFiles = null, IEnumerable events = null) + /// A new instance for mocking. + public static FineTuningJob FineTuningJob(string id = null, FineTuningJobObject @object = default, DateTimeOffset createdAt = default, DateTimeOffset? finishedAt = null, string model = null, string fineTunedModel = null, string organizationId = null, FineTuningJobStatus status = default, FineTuningJobHyperparameters hyperparameters = null, string trainingFile = null, string validationFile = null, IEnumerable resultFiles = null, long? trainedTokens = null, FineTuningJobError error = null) { - trainingFiles ??= new List(); - validationFiles ??= new List(); - resultFiles ??= new List(); - events ??= new List(); + resultFiles ??= new List(); - return new FineTune(id, @object, createdAt, updatedAt, model, fineTunedModel, organizationId, status, hyperparams, trainingFiles?.ToList(), validationFiles?.ToList(), resultFiles?.ToList(), events?.ToList(), serializedAdditionalRawData: null); + return new FineTuningJob(id, @object, createdAt, finishedAt, model, fineTunedModel, organizationId, status, hyperparameters, trainingFile, validationFile, resultFiles?.ToList(), trainedTokens, error, serializedAdditionalRawData: null); } - /// Initializes a new instance of . + /// Initializes a new instance of . /// /// The number of epochs to train the model for. An epoch refers to one full cycle through the /// training dataset. + /// + /// "Auto" decides the optimal number of epochs based on the size of the dataset. If setting the + /// number manually, we support any number between 1 and 50 epochs. /// - /// - /// The batch size to use for training. The batch size is the number of training examples used to - /// train a single forward and backward pass. - /// - /// The weight to use for loss on the prompt tokens. - /// The learning rate multiplier to use for training. - /// The classification metrics to compute using the validation dataset at the end of every epoch. - /// The positive class to use for computing classification metrics. - /// The number of classes to use for computing classification metrics. - /// A new instance for mocking. - public static FineTuneHyperparams FineTuneHyperparams(long nEpochs = default, long batchSize = default, double promptLossWeight = default, double learningRateMultiplier = default, bool? computeClassificationMetrics = null, string classificationPositiveClass = null, long? classificationNClasses = null) + /// A new instance for mocking. + public static FineTuningJobHyperparameters FineTuningJobHyperparameters(BinaryData nEpochs = null) { - return new FineTuneHyperparams(nEpochs, batchSize, promptLossWeight, learningRateMultiplier, computeClassificationMetrics, classificationPositiveClass, classificationNClasses, serializedAdditionalRawData: null); + return new FineTuningJobHyperparameters(nEpochs, serializedAdditionalRawData: null); } - /// Initializes a new instance of . - /// - /// - /// - /// - /// A new instance for mocking. - public static FineTuneEvent FineTuneEvent(string @object = null, DateTimeOffset createdAt = default, string level = null, string message = null) + /// Initializes a new instance of . + /// A human-readable error message. + /// A machine-readable error code. + /// + /// The parameter that was invalid, usually `training_file` or `validation_file`. This field + /// will be null if the failure was not parameter-specific. + /// + /// A new instance for mocking. + public static FineTuningJobError FineTuningJobError(string message = null, string code = null, string param = null) { - return new FineTuneEvent(@object, createdAt, level, message, serializedAdditionalRawData: null); + return new FineTuningJobError(message, code, param, serializedAdditionalRawData: null); } - /// Initializes a new instance of . + /// Initializes a new instance of . /// /// - /// A new instance for mocking. - public static ListFineTunesResponse ListFineTunesResponse(string @object = null, IEnumerable data = null) + /// + /// A new instance for mocking. + public static ListPaginatedFineTuningJobsResponse ListPaginatedFineTuningJobsResponse(string @object = null, IEnumerable data = null, bool hasMore = default) { - data ??= new List(); + data ??= new List(); - return new ListFineTunesResponse(@object, data?.ToList(), serializedAdditionalRawData: null); + return new ListPaginatedFineTuningJobsResponse(@object, data?.ToList(), hasMore, serializedAdditionalRawData: null); } - /// Initializes a new instance of . + /// Initializes a new instance of . /// /// - /// A new instance for mocking. - public static ListFineTuneEventsResponse ListFineTuneEventsResponse(string @object = null, IEnumerable data = null) + /// A new instance for mocking. + public static ListFineTuningJobEventsResponse ListFineTuningJobEventsResponse(string @object = null, IEnumerable data = null) { - data ??= new List(); + data ??= new List(); + + return new ListFineTuningJobEventsResponse(@object, data?.ToList(), serializedAdditionalRawData: null); + } - return new ListFineTuneEventsResponse(@object, data?.ToList(), serializedAdditionalRawData: null); + /// Initializes a new instance of . + /// + /// + /// + /// + /// + /// A new instance for mocking. + public static FineTuningJobEvent FineTuningJobEvent(string id = null, string @object = null, DateTimeOffset createdAt = default, FineTuningJobEventLevel level = default, string message = null) + { + return new FineTuningJobEvent(id, @object, createdAt, level, message, serializedAdditionalRawData: null); } /// Initializes a new instance of . diff --git a/.dotnet/tests/Generated/Tests/FineTuningJobsTests.cs b/.dotnet/tests/Generated/Tests/FineTuningTests.cs similarity index 70% rename from .dotnet/tests/Generated/Tests/FineTuningJobsTests.cs rename to .dotnet/tests/Generated/Tests/FineTuningTests.cs index ea1fc2ad2..40abbe22d 100644 --- a/.dotnet/tests/Generated/Tests/FineTuningJobsTests.cs +++ b/.dotnet/tests/Generated/Tests/FineTuningTests.cs @@ -9,13 +9,13 @@ namespace OpenAI.Tests { - public partial class FineTuningJobsTests + public partial class FineTuningTests { [Test] public void SmokeTest() { KeyCredential credential = new KeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); - FineTuningJobs client = new OpenAIClient(credential).GetFineTuningClient().GetFineTuningJobsClient(); + FineTuning client = new OpenAIClient(credential).GetFineTuningClient(); Assert.IsNotNull(client); } } diff --git a/.dotnet/tsp-output/@typespec/openapi3/openapi.yaml b/.dotnet/tsp-output/@typespec/openapi3/openapi.yaml deleted file mode 100644 index cc299da11..000000000 --- a/.dotnet/tsp-output/@typespec/openapi3/openapi.yaml +++ /dev/null @@ -1,6019 +0,0 @@ -openapi: 3.0.0 -info: - title: OpenAI API - version: 2.0.0 - description: The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. -tags: - - name: Fine-tuning - - name: Audio - - name: Assistants - - name: Chat - - name: Completions - - name: Embeddings - - name: Files - - name: Images - - name: Models - - name: Moderations -paths: - /assistants: - post: - tags: - - Assistants - operationId: createAssistant - summary: Create an assistant with a model and instructions. - parameters: [] - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/AssistantObject' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/CreateAssistantRequest' - get: - tags: - - Assistants - operationId: listAssistants - summary: Returns a list of assistants. - parameters: - - name: limit - in: query - required: false - description: |- - A limit on the number of objects to be returned. Limit can range between 1 and 100, and the - default is 20. - schema: - type: integer - format: int32 - default: 20 - - name: order - in: query - required: false - description: |- - Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` - for descending order. - schema: - $ref: '#/components/schemas/ListOrder' - default: desc - - name: after - in: query - required: false - description: |- - A cursor for use in pagination. `after` is an object ID that defines your place in the list. - For instance, if you make a list request and receive 100 objects, ending with obj_foo, your - subsequent call can include after=obj_foo in order to fetch the next page of the list. - schema: - type: string - - name: before - in: query - required: false - description: |- - A cursor for use in pagination. `before` is an object ID that defines your place in the list. - For instance, if you make a list request and receive 100 objects, ending with obj_foo, your - subsequent call can include before=obj_foo in order to fetch the previous page of the list. - schema: - type: string - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/ListAssistantsResponse' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - /assistants/{assistant_id}: - get: - tags: - - Assistants - operationId: getAssistant - summary: Retrieves an assistant. - parameters: - - name: assistant_id - in: path - required: true - description: The ID of the assistant to retrieve. - schema: - type: string - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/AssistantObject' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - post: - tags: - - Assistants - operationId: modifyAssistant - summary: Modifies an assistant. - parameters: - - name: assistant_id - in: path - required: true - description: The ID of the assistant to modify. - schema: - type: string - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/AssistantObject' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/ModifyAssistantRequest' - delete: - tags: - - Assistants - operationId: deleteAssistant - summary: Delete an assistant. - parameters: - - name: assistant_id - in: path - required: true - description: The ID of the assistant to delete. - schema: - type: string - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/DeleteAssistantResponse' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - /assistants/{assistant_id}/files: - post: - tags: - - Assistants - operationId: createAssistantFile - summary: |- - Create an assistant file by attaching a [File](/docs/api-reference/files) to a - [assistant](/docs/api-reference/assistants). - parameters: - - name: assistant_id - in: path - required: true - description: The ID of the assistant for which to create a file. - schema: - type: string - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/AssistantFileObject' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/CreateAssistantFileRequest' - get: - tags: - - Assistants - operationId: listAssistantFiles - summary: Returns a list of assistant files. - parameters: - - name: assistant_id - in: path - required: true - description: The ID of the assistant the file belongs to. - schema: - type: string - - name: limit - in: query - required: false - description: |- - A limit on the number of objects to be returned. Limit can range between 1 and 100, and the - default is 20. - schema: - type: integer - format: int32 - default: 20 - - name: order - in: query - required: false - description: |- - Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` - for descending order. - schema: - $ref: '#/components/schemas/ListOrder' - default: desc - - name: after - in: query - required: false - description: |- - A cursor for use in pagination. `after` is an object ID that defines your place in the list. - For instance, if you make a list request and receive 100 objects, ending with obj_foo, your - subsequent call can include after=obj_foo in order to fetch the next page of the list. - schema: - type: string - - name: before - in: query - required: false - description: |- - A cursor for use in pagination. `before` is an object ID that defines your place in the list. - For instance, if you make a list request and receive 100 objects, ending with obj_foo, your - subsequent call can include before=obj_foo in order to fetch the previous page of the list. - schema: - type: string - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/ListAssistantFilesResponse' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - /assistants/{assistant_id}/files/{file_id}: - get: - tags: - - Assistants - operationId: getAssistantFile - summary: Retrieves an assistant file. - parameters: - - name: assistant_id - in: path - required: true - description: The ID of the assistant the file belongs to. - schema: - type: string - - name: file_id - in: path - required: true - description: The ID of the file we're getting. - schema: - type: string - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/AssistantFileObject' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - delete: - tags: - - Assistants - operationId: deleteAssistantFile - summary: Delete an assistant file. - parameters: - - name: assistant_id - in: path - required: true - description: The ID of the assistant the file belongs to. - schema: - type: string - - name: file_id - in: path - required: true - description: The ID of the file to delete. - schema: - type: string - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/DeleteAssistantFileResponse' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - /audio/speech: - post: - tags: - - Audio - operationId: createSpeech - summary: Generates audio from the input text. - parameters: [] - responses: - '200': - description: The request has succeeded. - headers: - Transfer-Encoding: - required: false - description: chunked - schema: - type: string - content: - application/octet-stream: - schema: - type: string - format: binary - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/CreateSpeechRequest' - /audio/transcriptions: - post: - tags: - - Audio - operationId: createTranscription - summary: Transcribes audio into the input language. - parameters: [] - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/CreateTranscriptionResponse' - text/plain: - schema: - type: string - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - requestBody: - required: true - content: - multipart/form-data: - schema: - $ref: '#/components/schemas/CreateTranscriptionRequestMultiPart' - /audio/translations: - post: - tags: - - Audio - operationId: createTranslation - summary: Translates audio into English.. - parameters: [] - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/CreateTranslationResponse' - text/plain: - schema: - type: string - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - requestBody: - required: true - content: - multipart/form-data: - schema: - $ref: '#/components/schemas/CreateTranslationRequestMultiPart' - /chat/completions: - post: - tags: - - Chat - operationId: createChatCompletion - summary: Creates a model response for the given chat conversation. - parameters: [] - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/CreateChatCompletionResponse' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/CreateChatCompletionRequest' - /completions: - post: - tags: - - Completions - operationId: createCompletion - summary: Creates a completion for the provided prompt and parameters. - parameters: [] - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/CreateCompletionResponse' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/CreateCompletionRequest' - /embeddings: - post: - tags: - - Embeddings - operationId: createEmbedding - summary: Creates an embedding vector representing the input text. - parameters: [] - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/CreateEmbeddingResponse' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/CreateEmbeddingRequest' - /files: - post: - tags: - - Files - operationId: createFile - summary: |- - Upload a file that can be used across various endpoints. The size of all the files uploaded by - one organization can be up to 100 GB. - - The size of individual files can be a maximum of 512 MB or 2 million tokens for Assistants. See - the [Assistants Tools guide](/docs/assistants/tools) to learn more about the types of files - supported. The Fine-tuning API only supports `.jsonl` files. - - Please [contact us](https://help.openai.com/) if you need to increase these storage limits. - parameters: [] - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/OpenAIFile' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - requestBody: - required: true - content: - multipart/form-data: - schema: - $ref: '#/components/schemas/CreateFileRequestMultiPart' - get: - tags: - - Files - operationId: listFiles - summary: Returns a list of files that belong to the user's organization. - parameters: - - name: purpose - in: query - required: false - description: Only return files with the given purpose. - schema: - type: string - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/ListFilesResponse' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - /files/{file_id}: - get: - tags: - - Files - operationId: retrieveFile - summary: Returns information about a specific file. - parameters: - - name: file_id - in: path - required: true - description: The ID of the file to use for this request. - schema: - type: string - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/OpenAIFile' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - delete: - tags: - - Files - operationId: deleteFile - summary: Delete a file - parameters: - - name: file_id - in: path - required: true - description: The ID of the file to use for this request. - schema: - type: string - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/DeleteFileResponse' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - /files/{file_id}/content: - get: - tags: - - Files - operationId: downloadFile - summary: Returns the contents of the specified file. - parameters: - - name: file_id - in: path - required: true - description: The ID of the file to use for this request. - schema: - type: string - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - type: string - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - /fine-tunes: - post: - tags: - - Fine-tuning - operationId: createFineTune - summary: |- - Creates a job that fine-tunes a specified model from a given dataset. - - Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. - - [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) - parameters: [] - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/FineTune' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/CreateFineTuneRequest' - deprecated: true - get: - tags: - - Fine-tuning - operationId: listFineTunes - summary: List your organization's fine-tuning jobs - parameters: [] - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/ListFineTunesResponse' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - deprecated: true - /fine-tunes/{fine_tune_id}: - get: - tags: - - Fine-tuning - operationId: retrieveFineTune - summary: |- - Gets info about the fine-tune job. - - [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) - parameters: - - name: fine_tune_id - in: path - required: true - description: The ID of the fine-tune job - schema: - type: string - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/FineTune' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - deprecated: true - /fine-tunes/{fine_tune_id}/cancel: - post: - tags: - - Fine-tuning - operationId: cancelFineTune - summary: Immediately cancel a fine-tune job. - parameters: - - name: fine_tune_id - in: path - required: true - description: The ID of the fine-tune job to cancel - schema: - type: string - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/FineTune' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - deprecated: true - /fine-tunes/{fine_tune_id}/events: - get: - tags: - - Fine-tuning - operationId: listFineTuneEvents - summary: Get fine-grained status updates for a fine-tune job. - parameters: - - name: fine_tune_id - in: path - required: true - description: The ID of the fine-tune job to get events for. - schema: - type: string - - name: stream - in: query - required: false - description: |- - Whether to stream events for the fine-tune job. If set to true, events will be sent as - data-only - [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - as they become available. The stream will terminate with a `data: [DONE]` message when the - job is finished (succeeded, cancelled, or failed). - - If set to false, only events generated so far will be returned. - schema: - type: boolean - default: false - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/ListFineTuneEventsResponse' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - deprecated: true - /fine_tuning/jobs: - post: - tags: - - Fine-tuning - operationId: createFineTuningJob - description: |- - Creates a job that fine-tunes a specified model from a given dataset. - - Response includes details of the enqueued job including job status and the name of the - fine-tuned models once complete. - - [Learn more about fine-tuning](/docs/guides/fine-tuning) - parameters: [] - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/FineTuningJob' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/CreateFineTuningJobRequest' - get: - tags: - - Fine-tuning - operationId: listPaginatedFineTuningJobs - parameters: - - name: after - in: query - required: false - description: Identifier for the last job from the previous pagination request. - schema: - type: string - - name: limit - in: query - required: false - description: Number of fine-tuning jobs to retrieve. - schema: - type: integer - format: int64 - default: 20 - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/ListPaginatedFineTuningJobsResponse' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - /fine_tuning/jobs/{fine_tuning_job_id}: - get: - tags: - - Fine-tuning - operationId: retrieveFineTuningJob - summary: |- - Get info about a fine-tuning job. - - [Learn more about fine-tuning](/docs/guides/fine-tuning) - parameters: - - name: fine_tuning_job_id - in: path - required: true - schema: - type: string - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/FineTuningJob' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - /fine_tuning/jobs/{fine_tuning_job_id}/cancel: - post: - tags: - - Fine-tuning - operationId: cancelFineTuningJob - summary: Immediately cancel a fine-tune job. - parameters: - - name: fine_tuning_job_id - in: path - required: true - description: The ID of the fine-tuning job to cancel. - schema: - type: string - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/FineTuningJob' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - /fine_tuning/jobs/{fine_tuning_job_id}/events: - get: - tags: - - Fine-tuning - operationId: listFineTuningEvents - summary: Get status updates for a fine-tuning job. - parameters: - - name: fine_tuning_job_id - in: path - required: true - description: The ID of the fine-tuning job to get events for. - schema: - type: string - - name: after - in: query - required: false - description: Identifier for the last event from the previous pagination request. - schema: - type: string - - name: limit - in: query - required: false - description: Number of events to retrieve. - schema: - type: integer - default: 20 - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/ListFineTuningJobEventsResponse' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - /images/edits: - post: - tags: - - Images - operationId: createImageEdit - summary: Creates an edited or extended image given an original image and a prompt. - parameters: [] - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/ImagesResponse' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - requestBody: - required: true - content: - multipart/form-data: - schema: - $ref: '#/components/schemas/CreateImageEditRequestMultiPart' - /images/generations: - post: - tags: - - Images - operationId: createImage - summary: Creates an image given a prompt - parameters: [] - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/ImagesResponse' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/CreateImageRequest' - /images/variations: - post: - tags: - - Images - operationId: createImageVariation - summary: Creates an edited or extended image given an original image and a prompt. - parameters: [] - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/ImagesResponse' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - requestBody: - required: true - content: - multipart/form-data: - schema: - $ref: '#/components/schemas/CreateImageVariationRequestMultiPart' - /models: - get: - tags: - - Models - operationId: listModels - summary: |- - Lists the currently available models, and provides basic information about each one such as the - owner and availability. - parameters: [] - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/ListModelsResponse' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - /models/{model}: - get: - tags: - - Models - operationId: retrieveModel - summary: |- - Retrieves a model instance, providing basic information about the model such as the owner and - permissioning. - parameters: - - name: model - in: path - required: true - description: The ID of the model to use for this request. - schema: - type: string - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/Model' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - delete: - tags: - - Models - operationId: deleteModel - summary: Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. - parameters: - - name: model - in: path - required: true - description: The model to delete - schema: - type: string - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/DeleteModelResponse' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - /moderations: - post: - tags: - - Moderations - operationId: createModeration - summary: Classifies if text violates OpenAI's Content Policy - parameters: [] - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/CreateModerationResponse' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/CreateModerationRequest' - /threads: - post: - tags: - - Assistants - operationId: createThread - summary: Create a thread. - parameters: [] - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/ThreadObject' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/CreateThreadRequest' - /threads/runs: - post: - tags: - - Assistants - operationId: createThreadAndRun - summary: Create a thread and run it in one request. - parameters: [] - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/RunObject' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/CreateThreadAndRunRequest' - /threads/{thread_id}: - get: - tags: - - Assistants - operationId: getThread - summary: Retrieves a thread. - parameters: - - name: thread_id - in: path - required: true - description: The ID of the thread to retrieve. - schema: - type: string - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/ThreadObject' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - post: - tags: - - Assistants - operationId: modifyThread - summary: Modifies a thread. - parameters: - - name: thread_id - in: path - required: true - description: The ID of the thread to modify. Only the `metadata` can be modified. - schema: - type: string - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/ThreadObject' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/ModifyThreadRequest' - delete: - tags: - - Assistants - operationId: deleteThread - summary: Delete a thread. - parameters: - - name: thread_id - in: path - required: true - description: The ID of the thread to delete. - schema: - type: string - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/DeleteThreadResponse' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - /threads/{thread_id}/messages: - post: - tags: - - Assistants - operationId: createMessage - summary: Create a message. - parameters: - - name: thread_id - in: path - required: true - description: The ID of the [thread](/docs/api-reference/threads) to create a message for. - schema: - type: string - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/MessageObject' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/CreateMessageRequest' - get: - tags: - - Assistants - operationId: listMessages - summary: Returns a list of messages for a given thread. - parameters: - - name: thread_id - in: path - required: true - description: The ID of the [thread](/docs/api-reference/threads) the messages belong to. - schema: - type: string - - name: limit - in: query - required: false - description: |- - A limit on the number of objects to be returned. Limit can range between 1 and 100, and the - default is 20. - schema: - type: integer - format: int32 - default: 20 - - name: order - in: query - required: false - description: |- - Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` - for descending order. - schema: - $ref: '#/components/schemas/ListOrder' - default: desc - - name: after - in: query - required: false - description: |- - A cursor for use in pagination. `after` is an object ID that defines your place in the list. - For instance, if you make a list request and receive 100 objects, ending with obj_foo, your - subsequent call can include after=obj_foo in order to fetch the next page of the list. - schema: - type: string - - name: before - in: query - required: false - description: |- - A cursor for use in pagination. `before` is an object ID that defines your place in the list. - For instance, if you make a list request and receive 100 objects, ending with obj_foo, your - subsequent call can include before=obj_foo in order to fetch the previous page of the list. - schema: - type: string - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/ListMessagesResponse' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - /threads/{thread_id}/messages/{message_id}: - get: - tags: - - Assistants - operationId: getMessage - summary: Retrieve a message. - parameters: - - name: thread_id - in: path - required: true - description: The ID of the [thread](/docs/api-reference/threads) to which this message belongs. - schema: - type: string - - name: message_id - in: path - required: true - description: The ID of the message to retrieve. - schema: - type: string - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/MessageObject' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - post: - tags: - - Assistants - operationId: modifyMessage - summary: Modifies a message. - parameters: - - name: thread_id - in: path - required: true - description: The ID of the thread to which this message belongs. - schema: - type: string - - name: message_id - in: path - required: true - description: The ID of the message to modify. - schema: - type: string - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/MessageObject' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/ModifyMessageRequest' - /threads/{thread_id}/messages/{message_id}/files: - get: - tags: - - Assistants - operationId: listMessageFiles - summary: Returns a list of message files. - parameters: - - name: thread_id - in: path - required: true - description: The ID of the thread that the message and files belong to. - schema: - type: string - - name: message_id - in: path - required: true - description: The ID of the message that the files belongs to. - schema: - type: string - - name: limit - in: query - required: false - description: |- - A limit on the number of objects to be returned. Limit can range between 1 and 100, and the - default is 20. - schema: - type: integer - format: int32 - default: 20 - - name: order - in: query - required: false - description: |- - Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` - for descending order. - schema: - $ref: '#/components/schemas/ListOrder' - default: desc - - name: after - in: query - required: false - description: |- - A cursor for use in pagination. `after` is an object ID that defines your place in the list. - For instance, if you make a list request and receive 100 objects, ending with obj_foo, your - subsequent call can include after=obj_foo in order to fetch the next page of the list. - schema: - type: string - - name: before - in: query - required: false - description: |- - A cursor for use in pagination. `before` is an object ID that defines your place in the list. - For instance, if you make a list request and receive 100 objects, ending with obj_foo, your - subsequent call can include before=obj_foo in order to fetch the previous page of the list. - schema: - type: string - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/ListMessageFilesResponse' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - /threads/{thread_id}/messages/{message_id}/files/{file_id}: - get: - tags: - - Assistants - operationId: getMessageFile - summary: Retrieves a message file. - parameters: - - name: thread_id - in: path - required: true - description: The ID of the thread to which the message and File belong. - schema: - type: string - - name: message_id - in: path - required: true - description: The ID of the message the file belongs to. - schema: - type: string - - name: file_id - in: path - required: true - description: The ID of the file being retrieved. - schema: - type: string - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/MessageFileObject' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - /threads/{thread_id}/runs: - post: - tags: - - Assistants - operationId: createRun - summary: Create a run. - parameters: - - name: thread_id - in: path - required: true - description: The ID of the thread to run. - schema: - type: string - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/RunObject' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/CreateRunRequest' - get: - tags: - - Assistants - operationId: listRuns - summary: Returns a list of runs belonging to a thread. - parameters: - - name: thread_id - in: path - required: true - description: The ID of the thread the run belongs to. - schema: - type: string - - name: limit - in: query - required: false - description: |- - A limit on the number of objects to be returned. Limit can range between 1 and 100, and the - default is 20. - schema: - type: integer - format: int32 - default: 20 - - name: order - in: query - required: false - description: |- - Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` - for descending order. - schema: - $ref: '#/components/schemas/ListOrder' - default: desc - - name: after - in: query - required: false - description: |- - A cursor for use in pagination. `after` is an object ID that defines your place in the list. - For instance, if you make a list request and receive 100 objects, ending with obj_foo, your - subsequent call can include after=obj_foo in order to fetch the next page of the list. - schema: - type: string - - name: before - in: query - required: false - description: |- - A cursor for use in pagination. `before` is an object ID that defines your place in the list. - For instance, if you make a list request and receive 100 objects, ending with obj_foo, your - subsequent call can include before=obj_foo in order to fetch the previous page of the list. - schema: - type: string - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/ListRunsResponse' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - /threads/{thread_id}/runs/{run_id}: - get: - tags: - - Assistants - operationId: getRun - summary: Retrieves a run. - parameters: - - name: thread_id - in: path - required: true - description: The ID of the [thread](/docs/api-reference/threads) that was run. - schema: - type: string - - name: run_id - in: path - required: true - description: The ID of the run to retrieve. - schema: - type: string - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/RunObject' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - post: - tags: - - Assistants - operationId: modifyRun - summary: Modifies a run. - parameters: - - name: thread_id - in: path - required: true - description: The ID of the [thread](/docs/api-reference/threads) that was run. - schema: - type: string - - name: run_id - in: path - required: true - description: The ID of the run to modify. - schema: - type: string - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/RunObject' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/ModifyRunRequest' - /threads/{thread_id}/runs/{run_id}/cancel: - post: - tags: - - Assistants - operationId: cancelRun - summary: Cancels a run that is `in_progress`. - parameters: - - name: thread_id - in: path - required: true - description: The ID of the thread to which this run belongs. - schema: - type: string - - name: run_id - in: path - required: true - description: The ID of the run to cancel. - schema: - type: string - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/RunObject' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - /threads/{thread_id}/runs/{run_id}/steps: - get: - tags: - - Assistants - operationId: listRunSteps - summary: Returns a list of run steps belonging to a run. - parameters: - - name: thread_id - in: path - required: true - description: The ID of the thread the run and run steps belong to. - schema: - type: string - - name: run_id - in: path - required: true - description: The ID of the run the run steps belong to. - schema: - type: string - - name: limit - in: query - required: false - description: |- - A limit on the number of objects to be returned. Limit can range between 1 and 100, and the - default is 20. - schema: - type: integer - format: int32 - default: 20 - - name: order - in: query - required: false - description: |- - Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` - for descending order. - schema: - $ref: '#/components/schemas/ListOrder' - default: desc - - name: after - in: query - required: false - description: |- - A cursor for use in pagination. `after` is an object ID that defines your place in the list. - For instance, if you make a list request and receive 100 objects, ending with obj_foo, your - subsequent call can include after=obj_foo in order to fetch the next page of the list. - schema: - type: string - - name: before - in: query - required: false - description: |- - A cursor for use in pagination. `before` is an object ID that defines your place in the list. - For instance, if you make a list request and receive 100 objects, ending with obj_foo, your - subsequent call can include before=obj_foo in order to fetch the previous page of the list. - schema: - type: string - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/ListRunStepsResponse' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - /threads/{thread_id}/runs/{run_id}/steps/{step_id}: - get: - tags: - - Assistants - operationId: getRunStep - summary: Retrieves a run step. - parameters: - - name: thread_id - in: path - required: true - description: The ID of the thread to which the run and run step belongs. - schema: - type: string - - name: run_id - in: path - required: true - description: The ID of the run to which the run step belongs. - schema: - type: string - - name: step_id - in: path - required: true - description: The ID of the run step to retrieve. - schema: - type: string - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/RunStepObject' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - /threads/{thread_id}/runs/{run_id}/submit_tool_outputs: - post: - tags: - - Assistants - operationId: submitToolOuputsToRun - summary: |- - When a run has the `status: "requires_action"` and `required_action.type` is - `submit_tool_outputs`, this endpoint can be used to submit the outputs from the tool calls once - they're all completed. All outputs must be submitted in a single request. - parameters: - - name: thread_id - in: path - required: true - description: The ID of the [thread](/docs/api-reference/threads) to which this run belongs. - schema: - type: string - - name: run_id - in: path - required: true - description: The ID of the run that requires the tool output submission. - schema: - type: string - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/RunObject' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/SubmitToolOutputsRunRequest' -security: - - BearerAuth: [] -components: - schemas: - AssistantFileObject: - type: object - required: - - id - - object - - created_at - - assistant_id - properties: - id: - type: string - description: The identifier, which can be referenced in API endpoints. - object: - type: string - enum: - - assistant.file - description: The object type, which is always `assistant.file`. - created_at: - type: integer - format: unixtime - description: The Unix timestamp (in seconds) for when the assistant file was created. - assistant_id: - type: string - description: The assistant ID that the file is attached to. - description: A list of [Files](/docs/api-reference/files) attached to an `assistant`. - AssistantObject: - type: object - required: - - id - - object - - created_at - - name - - description - - model - - instructions - - tools - - file_ids - - metadata - properties: - id: - type: string - description: The identifier, which can be referenced in API endpoints. - object: - type: string - enum: - - assistant - description: The object type, which is always `assistant`. - created_at: - type: integer - format: unixtime - description: The Unix timestamp (in seconds) for when the assistant was created. - name: - type: string - nullable: true - maxLength: 256 - description: The name of the assistant. The maximum length is 256 characters. - description: - type: string - nullable: true - maxLength: 512 - description: The description of the assistant. The maximum length is 512 characters. - model: - type: string - description: |- - ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to - see all of your available models, or see our [Model overview](/docs/models/overview) for - descriptions of them. - instructions: - type: string - nullable: true - maxLength: 32768 - description: The system instructions that the assistant uses. The maximum length is 32768 characters. - tools: - allOf: - - $ref: '#/components/schemas/CreateAssistantRequestToolsItem' - description: |- - A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. - Tools can be of types `code_interpreter`, `retrieval`, or `function`. - default: [] - file_ids: - type: array - items: - type: string - maxItems: 20 - description: |- - A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a - maximum of 20 files attached to the assistant. Files are ordered by their creation date in - ascending order. - default: [] - metadata: - type: object - additionalProperties: - type: string - nullable: true - description: |- - Set of 16 key-value pairs that can be attached to an object. This can be useful for storing - additional information about the object in a structured format. Keys can be a maximum of 64 - characters long and values can be a maxium of 512 characters long. - x-oaiTypeLabel: map - description: Represents an `assistant` that can call the model and use tools. - AssistantToolsCode: - type: object - required: - - type - properties: - type: - type: string - enum: - - code_interpreter - description: 'The type of tool being defined: `code_interpreter`' - AssistantToolsFunction: - type: object - required: - - type - - function - properties: - type: - type: string - enum: - - function - description: 'The type of tool being defined: `function`' - function: - $ref: '#/components/schemas/FunctionObject' - AssistantToolsRetrieval: - type: object - required: - - type - properties: - type: - type: string - enum: - - retrieval - description: 'The type of tool being defined: `retrieval`' - AudioSegment: - type: object - required: - - id - - seek - - start - - end - - text - - tokens - - temperature - - avg_logprob - - compression_ratio - - no_speech_prob - properties: - id: - type: integer - format: int64 - description: The zero-based index of this segment. - seek: - type: integer - format: int64 - description: |- - The seek position associated with the processing of this audio segment. Seek positions are - expressed as hundredths of seconds. The model may process several segments from a single seek - position, so while the seek position will never represent a later time than the segment's - start, the segment's start may represent a significantly later time than the segment's - associated seek position. - start: - type: number - format: double - description: The time at which this segment started relative to the beginning of the audio. - end: - type: number - format: double - description: The time at which this segment ended relative to the beginning of the audio. - text: - type: string - description: The text that was part of this audio segment. - tokens: - allOf: - - $ref: '#/components/schemas/TokenArrayItem' - description: The token IDs matching the text in this audio segment. - temperature: - type: number - format: double - minimum: 0 - maximum: 1 - description: The temperature score associated with this audio segment. - avg_logprob: - type: number - format: double - description: The average log probability associated with this audio segment. - compression_ratio: - type: number - format: double - description: The compression ratio of this audio segment. - no_speech_prob: - type: number - format: double - description: The probability of no speech detection within this audio segment. - ChatCompletionFunctionCallOption: - type: object - required: - - name - properties: - name: - type: string - description: The name of the function to call. - description: |- - Specifying a particular function via `{"name": "my_function"}` forces the model to call that - function. - ChatCompletionFunctions: - type: object - required: - - name - properties: - description: - type: string - description: |- - A description of what the function does, used by the model to choose when and how to call the - function. - name: - type: string - description: |- - The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and - dashes, with a maximum length of 64. - parameters: - $ref: '#/components/schemas/FunctionParameters' - deprecated: true - ChatCompletionMessageToolCall: - type: object - required: - - id - - type - - function - properties: - id: - type: string - description: The ID of the tool call. - type: - type: string - enum: - - function - description: The type of the tool. Currently, only `function` is supported. - function: - type: object - properties: - name: - type: string - description: The name of the function to call. - arguments: - type: string - description: |- - The arguments to call the function with, as generated by the model in JSON format. Note that - the model does not always generate valid JSON, and may hallucinate parameters not defined by - your function schema. Validate the arguments in your code before calling your function. - required: - - name - - arguments - description: The function that the model called. - ChatCompletionMessageToolCallsItem: - type: array - items: - $ref: '#/components/schemas/ChatCompletionMessageToolCall' - description: The tool calls generated by the model, such as function calls. - ChatCompletionNamedToolChoice: - type: object - required: - - type - - function - properties: - type: - type: string - enum: - - function - description: The type of the tool. Currently, only `function` is supported. - function: - type: object - properties: - name: - type: string - description: The name of the function to call. - required: - - name - description: Specifies a tool the model should use. Use to force the model to call a specific function. - ChatCompletionRequestAssistantMessage: - type: object - required: - - role - properties: - content: - type: string - nullable: true - description: |- - The contents of the assistant message. Required unless `tool_calls` or `function_call` is' - specified. - role: - type: string - enum: - - assistant - description: The role of the messages author, in this case `assistant`. - name: - type: string - description: |- - An optional name for the participant. Provides the model information to differentiate between - participants of the same role. - tool_calls: - $ref: '#/components/schemas/ChatCompletionMessageToolCallsItem' - function_call: - type: object - properties: - arguments: - type: string - description: |- - The arguments to call the function with, as generated by the model in JSON format. Note that - the model does not always generate valid JSON, and may hallucinate parameters not defined by - your function schema. Validate the arguments in your code before calling your function. - name: - type: string - description: The name of the function to call. - required: - - arguments - - name - description: |- - Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be - called, as generated by the model. - deprecated: true - ChatCompletionRequestFunctionMessage: - type: object - required: - - role - - content - - name - properties: - role: - type: string - enum: - - function - description: The role of the messages author, in this case `function`. - content: - type: string - nullable: true - description: The contents of the function message. - name: - type: string - description: The name of the function to call. - ChatCompletionRequestMessage: - oneOf: - - $ref: '#/components/schemas/ChatCompletionRequestSystemMessage' - - $ref: '#/components/schemas/ChatCompletionRequestUserMessage' - - $ref: '#/components/schemas/ChatCompletionRequestAssistantMessage' - - $ref: '#/components/schemas/ChatCompletionRequestToolMessage' - - $ref: '#/components/schemas/ChatCompletionRequestFunctionMessage' - x-oaiExpandable: true - ChatCompletionRequestMessageContentPart: - oneOf: - - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' - - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartImage' - x-oaiExpandable: true - ChatCompletionRequestMessageContentPartImage: - type: object - required: - - type - - image_url - properties: - type: - type: string - enum: - - image_url - description: The type of the content part. - image_url: - type: object - properties: - url: - anyOf: - - type: string - format: uri - - type: string - description: Either a URL of the image or the base64 encoded image data. - detail: - type: string - enum: - - auto - - low - - high - description: |- - Specifies the detail level of the image. Learn more in the - [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding). - default: auto - required: - - url - ChatCompletionRequestMessageContentPartText: - type: object - required: - - type - - text - properties: - type: - type: string - enum: - - text - - json_object - description: The type of the content part. - text: - type: string - description: The text content. - ChatCompletionRequestMessageContentParts: - type: array - items: - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPart' - minItems: 1 - ChatCompletionRequestSystemMessage: - type: object - required: - - content - - role - properties: - content: - type: string - description: The contents of the system message. - x-oaiExpandable: true - role: - type: string - enum: - - system - description: The role of the messages author, in this case `system`. - name: - type: string - description: |- - An optional name for the participant. Provides the model information to differentiate between - participants of the same role. - ChatCompletionRequestToolMessage: - type: object - required: - - role - - content - - tool_call_id - properties: - role: - type: string - enum: - - tool - description: The role of the messages author, in this case `tool`. - content: - type: string - description: The contents of the tool message. - tool_call_id: - type: string - description: Tool call that this message is responding to. - ChatCompletionRequestUserMessage: - type: object - required: - - content - - role - properties: - content: - allOf: - - $ref: '#/components/schemas/ChatCompletionRequestUserMessageContent' - description: The contents of the system message. - x-oaiExpandable: true - role: - type: string - enum: - - user - - assistant - description: The role of the messages author, in this case `user`. - name: - type: string - description: |- - An optional name for the participant. Provides the model information to differentiate between - participants of the same role. - ChatCompletionRequestUserMessageContent: - oneOf: - - type: string - - $ref: '#/components/schemas/ChatCompletionRequestMessageContentParts' - ChatCompletionResponseMessage: - type: object - required: - - content - - role - properties: - content: - type: string - nullable: true - description: The contents of the message. - tool_calls: - $ref: '#/components/schemas/ChatCompletionMessageToolCallsItem' - role: - type: string - enum: - - assistant - description: The role of the author of this message. - function_call: - type: object - properties: - arguments: - type: string - description: |- - The arguments to call the function with, as generated by the model in JSON format. Note that - the model does not always generate valid JSON, and may hallucinate parameters not defined by - your function schema. Validate the arguments in your code before calling your function. - name: - type: string - description: The name of the function to call. - required: - - arguments - - name - description: Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. - deprecated: true - ChatCompletionTokenLogprob: - type: object - required: - - token - - logprob - - bytes - - top_logprobs - properties: - token: - type: string - description: The token. - logprob: - type: number - format: double - description: The log probability of this token. - bytes: - type: array - items: - type: integer - format: int64 - nullable: true - description: |- - A list of integers representing the UTF-8 bytes representation of the token. Useful in - instances where characters are represented by multiple tokens and their byte representations - must be combined to generate the correct text representation. Can be `null` if there is no - bytes representation for the token. - top_logprobs: - type: array - items: - type: object - properties: - token: - type: string - description: The token. - logprob: - type: number - format: double - description: The log probability of this token. - bytes: - type: array - items: - type: integer - format: int64 - nullable: true - description: |- - A list of integers representing the UTF-8 bytes representation of the token. Useful in - instances where characters are represented by multiple tokens and their byte representations - must be combined to generate the correct text representation. Can be `null` if there is no - bytes representation for the token. - required: - - token - - logprob - - bytes - description: |- - List of the most likely tokens and their log probability, at this token position. In rare - cases, there may be fewer than the number of requested `top_logprobs` returned. - ChatCompletionTool: - type: object - required: - - type - - function - properties: - type: - type: string - enum: - - function - description: The type of the tool. Currently, only `function` is supported. - function: - $ref: '#/components/schemas/FunctionObject' - ChatCompletionToolChoiceOption: - oneOf: - - type: string - enum: - - none - - auto - - auto - - $ref: '#/components/schemas/ChatCompletionNamedToolChoice' - description: |- - Controls which (if any) function is called by the model. `none` means the model will not call a - function and instead generates a message. `auto` means the model can pick between generating a - message or calling a function. Specifying a particular function via - `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that - function. - - `none` is the default when no functions are present. `auto` is the default if functions are - present. - x-oaiExpandable: true - CompletionUsage: - type: object - required: - - prompt_tokens - - completion_tokens - - total_tokens - properties: - prompt_tokens: - type: integer - format: int64 - description: Number of tokens in the prompt. - completion_tokens: - type: integer - format: int64 - description: Number of tokens in the generated completion - total_tokens: - type: integer - format: int64 - description: Total number of tokens used in the request (prompt + completion). - description: Usage statistics for the completion request. - CreateAssistantFileRequest: - type: object - required: - - file_id - properties: - file_id: - type: string - description: |- - A [File](/docs/api-reference/files) ID (with `purpose="assistants"`) that the assistant should - use. Useful for tools like `retrieval` and `code_interpreter` that can access files. - CreateAssistantRequest: - type: object - required: - - model - properties: - model: - type: string - description: |- - ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to - see all of your available models, or see our [Model overview](/docs/models/overview) for - descriptions of them. - name: - type: string - nullable: true - maxLength: 256 - description: The name of the assistant. The maximum length is 256 characters. - description: - type: string - nullable: true - maxLength: 512 - description: The description of the assistant. The maximum length is 512 characters. - instructions: - type: string - nullable: true - maxLength: 32768 - description: The system instructions that the assistant uses. The maximum length is 32768 characters. - tools: - allOf: - - $ref: '#/components/schemas/CreateAssistantRequestToolsItem' - description: |- - A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. - Tools can be of types `code_interpreter`, `retrieval`, or `function`. - default: [] - file_ids: - type: array - items: - type: string - maxItems: 20 - description: |- - A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a - maximum of 20 files attached to the assistant. Files are ordered by their creation date in - ascending order. - default: [] - metadata: - type: object - additionalProperties: - type: string - nullable: true - description: |- - Set of 16 key-value pairs that can be attached to an object. This can be useful for storing - additional information about the object in a structured format. Keys can be a maximum of 64 - characters long and values can be a maxium of 512 characters long. - x-oaiTypeLabel: map - CreateAssistantRequestTool: - oneOf: - - $ref: '#/components/schemas/AssistantToolsCode' - - $ref: '#/components/schemas/AssistantToolsRetrieval' - - $ref: '#/components/schemas/AssistantToolsFunction' - x-oaiExpandable: true - CreateAssistantRequestToolsItem: - type: array - items: - $ref: '#/components/schemas/CreateAssistantRequestTool' - maxItems: 128 - CreateChatCompletionRequest: - type: object - required: - - messages - - model - properties: - messages: - type: array - items: - $ref: '#/components/schemas/ChatCompletionRequestMessage' - minItems: 1 - description: |- - A list of messages comprising the conversation so far. - [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). - model: - anyOf: - - type: string - - type: string - enum: - - gpt-4-0125-preview - - gpt-4-turbo-preview - - gpt-4-1106-preview - - gpt-4-vision-preview - - gpt-4 - - gpt-4-0314 - - gpt-4-0613 - - gpt-4-32k - - gpt-4-32k-0314 - - gpt-4-32k-0613 - - gpt-3.5-turbo - - gpt-3.5-turbo-16k - - gpt-3.5-turbo-0301 - - gpt-3.5-turbo-0613 - - gpt-3.5-turbo-1106 - - gpt-3.5-turbo-16k-0613 - description: |- - ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) - table for details on which models work with the Chat API. - x-oaiTypeLabel: string - frequency_penalty: - type: number - format: double - nullable: true - minimum: -2 - maximum: 2 - description: |- - Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing - frequency in the text so far, decreasing the model's likelihood to repeat the same line - verbatim. - - [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) - default: 0 - logit_bias: - type: object - additionalProperties: - type: integer - format: int64 - nullable: true - description: |- - Modify the likelihood of specified tokens appearing in the completion. - - Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an - associated bias value from -100 to 100. Mathematically, the bias is added to the logits - generated by the model prior to sampling. The exact effect will vary per model, but values - between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 - should result in a ban or exclusive selection of the relevant token. - x-oaiTypeLabel: map - default: null - logprobs: - type: boolean - nullable: true - description: |- - Whether to return log probabilities of the output tokens or not. If true, returns the log - probabilities of each output token returned in the `content` of `message`. This option is - currently not available on the `gpt-4-vision-preview` model. - default: false - top_logprobs: - type: integer - format: int64 - nullable: true - minimum: 0 - maximum: 5 - description: |- - An integer between 0 and 5 specifying the number of most likely tokens to return at each token - position, each with an associated log probability. `logprobs` must be set to `true` if this - parameter is used. - max_tokens: - type: integer - format: int64 - nullable: true - minimum: 0 - description: |- - The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. - - The total length of input tokens and generated tokens is limited by the model's context length. - [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - for counting tokens. - default: 16 - n: - type: integer - format: int64 - nullable: true - minimum: 1 - maximum: 128 - description: |- - How many chat completion choices to generate for each input message. Note that you will be - charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to - minimize costs. - default: 1 - presence_penalty: - type: number - format: double - nullable: true - minimum: -2 - maximum: 2 - description: |- - Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear - in the text so far, increasing the model's likelihood to talk about new topics. - - [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) - default: 0 - response_format: - type: object - properties: - type: - type: string - enum: - - text - - json_object - description: Must be one of `text` or `json_object`. - default: text - description: |- - An object specifying the format that the model must output. Compatible with - [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the - model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to produce JSON - yourself via a system or user message. Without this, the model may generate an unending stream - of whitespace until the generation reaches the token limit, resulting in a long-running and - seemingly "stuck" request. Also note that the message content may be partially cut off if - `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the - conversation exceeded the max context length. - seed: - type: integer - format: int64 - nullable: true - minimum: -9223372036854776000 - maximum: 9223372036854776000 - description: |- - This feature is in Beta. - - If specified, our system will make a best effort to sample deterministically, such that - repeated requests with the same `seed` and parameters should return the same result. - - Determinism is not guaranteed, and you should refer to the `system_fingerprint` response - parameter to monitor changes in the backend. - x-oaiMeta: - beta: true - stop: - oneOf: - - $ref: '#/components/schemas/Stop' - nullable: true - description: Up to 4 sequences where the API will stop generating further tokens. - default: null - stream: - type: boolean - nullable: true - description: |- - If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only - [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - as they become available, with the stream terminated by a `data: [DONE]` message. - [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). - default: false - temperature: - type: number - format: double - nullable: true - minimum: 0 - maximum: 2 - description: |- - What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. - - We generally recommend altering this or `top_p` but not both. - default: 1 - top_p: - type: number - format: double - nullable: true - minimum: 0 - maximum: 1 - description: |- - An alternative to sampling with temperature, called nucleus sampling, where the model considers - the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising - the top 10% probability mass are considered. - - We generally recommend altering this or `temperature` but not both. - default: 1 - tools: - type: array - items: - $ref: '#/components/schemas/ChatCompletionTool' - description: |- - A list of tools the model may call. Currently, only functions are supported as a tool. Use this - to provide a list of functions the model may generate JSON inputs for. - tool_choice: - $ref: '#/components/schemas/ChatCompletionToolChoiceOption' - user: - allOf: - - $ref: '#/components/schemas/User' - description: |- - A unique identifier representing your end-user, which can help OpenAI to monitor and detect - abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). - function_call: - anyOf: - - type: string - enum: - - none - - auto - - auto - - $ref: '#/components/schemas/ChatCompletionFunctionCallOption' - description: |- - Deprecated in favor of `tool_choice`. - - Controls which (if any) function is called by the model. `none` means the model will not call a - function and instead generates a message. `auto` means the model can pick between generating a - message or calling a function. Specifying a particular function via `{"name": "my_function"}` - forces the model to call that function. - - `none` is the default when no functions are present. `auto` is the default if functions are - present. - deprecated: true - x-oaiExpandable: true - functions: - type: array - items: - $ref: '#/components/schemas/ChatCompletionFunctions' - minItems: 1 - maxItems: 128 - description: |- - Deprecated in favor of `tools`. - - A list of functions the model may generate JSON inputs for. - deprecated: true - CreateChatCompletionResponse: - type: object - required: - - id - - choices - - created - - model - - object - properties: - id: - type: string - description: A unique identifier for the chat completion. - choices: - type: array - items: - type: object - properties: - finish_reason: - type: string - enum: - - stop - - length - - tool_calls - - content_filter - - function_call - - length - - content_filter - description: |- - The reason the model stopped generating tokens. This will be `stop` if the model hit a - natural stop point or a provided stop sequence, `length` if the maximum number of tokens - specified in the request was reached, `content_filter` if content was omitted due to a flag - from our content filters, `tool_calls` if the model called a tool, or `function_call` - (deprecated) if the model called a function. - index: - type: integer - format: int64 - description: The index of the choice in the list of choices. - message: - $ref: '#/components/schemas/ChatCompletionResponseMessage' - logprobs: - type: object - properties: - content: - type: array - items: - $ref: '#/components/schemas/ChatCompletionTokenLogprob' - nullable: true - required: - - content - nullable: true - description: Log probability information for the choice. - required: - - finish_reason - - index - - message - - logprobs - description: A list of chat completion choices. Can be more than one if `n` is greater than 1. - created: - type: integer - format: unixtime - description: The Unix timestamp (in seconds) of when the chat completion was created. - model: - type: string - description: The model used for the chat completion. - system_fingerprint: - type: string - description: |- - This fingerprint represents the backend configuration that the model runs with. - - Can be used in conjunction with the `seed` request parameter to understand when backend changes - have been made that might impact determinism. - object: - type: string - enum: - - chat.completion - description: The object type, which is always `chat.completion`. - usage: - $ref: '#/components/schemas/CompletionUsage' - description: Represents a chat completion response returned by model, based on the provided input. - CreateCompletionRequest: - type: object - required: - - model - - prompt - properties: - model: - anyOf: - - type: string - - type: string - enum: - - gpt-3.5-turbo-instruct - - davinci-002 - - babbage-002 - description: |- - ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to - see all of your available models, or see our [Model overview](/docs/models/overview) for - descriptions of them. - x-oaiTypeLabel: string - prompt: - oneOf: - - $ref: '#/components/schemas/Prompt' - nullable: true - description: |- - The prompt(s) to generate completions for, encoded as a string, array of strings, array of - tokens, or array of token arrays. - - Note that <|endoftext|> is the document separator that the model sees during training, so if a - prompt is not specified the model will generate as if from the beginning of a new document. - default: <|endoftext|> - best_of: - type: integer - format: int64 - nullable: true - minimum: 0 - maximum: 20 - description: |- - Generates `best_of` completions server-side and returns the "best" (the one with the highest - log probability per token). Results cannot be streamed. - - When used with `n`, `best_of` controls the number of candidate completions and `n` specifies - how many to return – `best_of` must be greater than `n`. - - **Note:** Because this parameter generates many completions, it can quickly consume your token - quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. - default: 1 - echo: - type: boolean - nullable: true - description: Echo back the prompt in addition to the completion - default: false - frequency_penalty: - type: number - format: double - nullable: true - minimum: -2 - maximum: 2 - description: |- - Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing - frequency in the text so far, decreasing the model's likelihood to repeat the same line - verbatim. - - [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) - default: 0 - logit_bias: - type: object - additionalProperties: - type: integer - format: int64 - nullable: true - description: |- - Modify the likelihood of specified tokens appearing in the completion. - - Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an - associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) - to convert text to token IDs. Mathematically, the bias is added to the logits generated by the - model prior to sampling. The exact effect will vary per model, but values between -1 and 1 - should decrease or increase likelihood of selection; values like -100 or 100 should result in a - ban or exclusive selection of the relevant token. - - As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being - generated. - x-oaiTypeLabel: map - default: null - logprobs: - type: integer - format: int64 - nullable: true - minimum: 0 - maximum: 5 - description: |- - Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. - For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The - API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` - elements in the response. - - The maximum value for `logprobs` is 5. - default: null - max_tokens: - type: integer - format: int64 - nullable: true - minimum: 0 - description: |- - The maximum number of [tokens](/tokenizer) to generate in the completion. - - The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) - for counting tokens. - default: 16 - n: - type: integer - format: int64 - nullable: true - minimum: 1 - maximum: 128 - description: |- - How many completions to generate for each prompt. - - **Note:** Because this parameter generates many completions, it can quickly consume your token - quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. - default: 1 - presence_penalty: - type: number - format: double - nullable: true - minimum: -2 - maximum: 2 - description: |- - Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear - in the text so far, increasing the model's likelihood to talk about new topics. - - [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) - default: 0 - seed: - type: integer - format: int64 - nullable: true - minimum: -9223372036854776000 - maximum: 9223372036854776000 - description: |- - If specified, our system will make a best effort to sample deterministically, such that - repeated requests with the same `seed` and parameters should return the same result. - - Determinism is not guaranteed, and you should refer to the `system_fingerprint` response - parameter to monitor changes in the backend. - x-oaiMeta: - beta: true - stop: - oneOf: - - $ref: '#/components/schemas/Stop' - nullable: true - description: Up to 4 sequences where the API will stop generating further tokens. - default: null - stream: - type: boolean - nullable: true - description: |- - If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only - [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - as they become available, with the stream terminated by a `data: [DONE]` message. - [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). - default: false - suffix: - type: string - nullable: true - description: The suffix that comes after a completion of inserted text. - default: null - temperature: - type: number - format: double - nullable: true - minimum: 0 - maximum: 2 - description: |- - What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. - - We generally recommend altering this or `top_p` but not both. - default: 1 - top_p: - type: number - format: double - nullable: true - minimum: 0 - maximum: 1 - description: |- - An alternative to sampling with temperature, called nucleus sampling, where the model considers - the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising - the top 10% probability mass are considered. - - We generally recommend altering this or `temperature` but not both. - default: 1 - user: - allOf: - - $ref: '#/components/schemas/User' - description: |- - A unique identifier representing your end-user, which can help OpenAI to monitor and detect - abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). - CreateCompletionResponse: - type: object - required: - - id - - choices - - created - - model - - object - properties: - id: - type: string - description: A unique identifier for the completion. - choices: - type: array - items: - type: object - properties: - index: - type: integer - format: int64 - text: - type: string - logprobs: - type: object - properties: - tokens: - type: array - items: - type: string - token_logprobs: - type: array - items: - type: number - format: double - top_logprobs: - type: array - items: - type: object - additionalProperties: - type: integer - format: int64 - text_offset: - type: array - items: - type: integer - format: int64 - required: - - tokens - - token_logprobs - - top_logprobs - - text_offset - nullable: true - finish_reason: - type: string - enum: - - stop - - length - - tool_calls - - content_filter - - function_call - - length - - content_filter - description: |- - The reason the model stopped generating tokens. This will be `stop` if the model hit a - natural stop point or a provided stop sequence, or `content_filter` if content was omitted - due to a flag from our content filters, `length` if the maximum number of tokens specified - in the request was reached, or `content_filter` if content was omitted due to a flag from our - content filters. - required: - - index - - text - - logprobs - - finish_reason - description: The list of completion choices the model generated for the input. - created: - type: integer - format: unixtime - description: The Unix timestamp (in seconds) of when the completion was created. - model: - type: string - description: The model used for the completion. - system_fingerprint: - type: string - description: |- - This fingerprint represents the backend configuration that the model runs with. - - Can be used in conjunction with the `seed` request parameter to understand when backend changes - have been made that might impact determinism. - object: - type: string - enum: - - text_completion - description: The object type, which is always `text_completion`. - usage: - allOf: - - $ref: '#/components/schemas/CompletionUsage' - description: Usage statistics for the completion request. - description: |- - Represents a completion response from the API. Note: both the streamed and non-streamed response - objects share the same shape (unlike the chat endpoint). - CreateEmbeddingRequest: - type: object - required: - - input - - model - properties: - input: - allOf: - - $ref: '#/components/schemas/CreateEmbeddingRequestInput' - description: |- - Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a - single request, pass an array of strings or array of token arrays. Each input must not exceed - the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an - empty string. - [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) - for counting tokens. - x-oaiExpandable: true - model: - anyOf: - - type: string - - type: string - enum: - - text-embedding-ada-002 - - text-embedding-3-small - - text-embedding-3-large - description: |- - ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to - see all of your available models, or see our [Model overview](/docs/models/overview) for - descriptions of them. - x-oaiTypeLabel: string - encoding_format: - type: string - enum: - - float - - base64 - description: |- - The format to return the embeddings in. Can be either `float` or - [`base64`](https://pypi.org/project/pybase64/). - default: float - dimensions: - type: integer - format: int64 - minimum: 1 - description: |- - The number of dimensions the resulting output embeddings should have. Only supported in - `text-embedding-3` and later models. - user: - allOf: - - $ref: '#/components/schemas/User' - description: |- - A unique identifier representing your end-user, which can help OpenAI to monitor and detect - abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). - CreateEmbeddingRequestInput: - oneOf: - - type: string - - type: array - items: - type: string - - $ref: '#/components/schemas/TokenArrayItem' - - $ref: '#/components/schemas/TokenArrayArray' - CreateEmbeddingResponse: - type: object - required: - - data - - model - - object - - usage - properties: - data: - type: array - items: - $ref: '#/components/schemas/Embedding' - description: The list of embeddings generated by the model. - model: - type: string - description: The name of the model used to generate the embedding. - object: - type: string - enum: - - list - description: The object type, which is always "list". - usage: - type: object - properties: - prompt_tokens: - type: integer - format: int64 - description: The number of tokens used by the prompt. - total_tokens: - type: integer - format: int64 - description: The total number of tokens used by the request. - required: - - prompt_tokens - - total_tokens - description: The usage information for the request. - CreateFileRequestMultiPart: - type: object - required: - - file - - purpose - properties: - file: - type: string - format: binary - description: The file object (not file name) to be uploaded. - purpose: - type: string - enum: - - fine-tune - - assistants - description: |- - The intended purpose of the uploaded file. Use "fine-tune" for - [Fine-tuning](/docs/api-reference/fine-tuning) and "assistants" for - [Assistants](/docs/api-reference/assistants) and [Messages](/docs/api-reference/messages). This - allows us to validate the format of the uploaded file is correct for fine-tuning. - CreateFineTuneRequest: - type: object - required: - - training_file - properties: - training_file: - type: string - description: |- - The ID of an uploaded file that contains training data. - - See [upload file](/docs/api-reference/files/upload) for how to upload a file. - - Your dataset must be formatted as a JSONL file, where each training example is a JSON object - with the keys "prompt" and "completion". Additionally, you must upload your file with the - purpose `fine-tune`. - - See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more - details. - validation_file: - type: string - nullable: true - description: |- - The ID of an uploaded file that contains validation data. - - If you provide this file, the data is used to generate validation metrics periodically during - fine-tuning. These metrics can be viewed in the - [fine-tuning results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). - Your train and validation data should be mutually exclusive. - - Your dataset must be formatted as a JSONL file, where each validation example is a JSON object - with the keys "prompt" and "completion". Additionally, you must upload your file with the - purpose `fine-tune`. - - See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more - details. - model: - anyOf: - - type: string - - type: string - enum: - - ada - - babbage - - curie - - davinci - nullable: true - description: |- - The name of the base model to fine-tune. You can select one of "ada", "babbage", "curie", - "davinci", or a fine-tuned model created after 2022-04-21 and before 2023-08-22. To learn more - about these models, see the [Models](/docs/models) documentation. - x-oaiTypeLabel: string - n_epochs: - type: integer - format: int64 - nullable: true - description: |- - The number of epochs to train the model for. An epoch refers to one full cycle through the - training dataset. - default: 4 - batch_size: - type: integer - format: int64 - nullable: true - description: |- - The batch size to use for training. The batch size is the number of training examples used to - train a single forward and backward pass. - - By default, the batch size will be dynamically configured to be ~0.2% of the number of examples - in the training set, capped at 256 - in general, we've found that larger batch sizes tend to - work better for larger datasets. - default: null - learning_rate_multiplier: - type: number - format: double - nullable: true - description: |- - The learning rate multiplier to use for training. The fine-tuning learning rate is the original - learning rate used for pretraining multiplied by this value. - - By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 depending on final - `batch_size` (larger learning rates tend to perform better with larger batch sizes). We - recommend experimenting with values in the range 0.02 to 0.2 to see what produces the best - results. - default: null - prompt_loss_rate: - type: number - format: double - nullable: true - description: |- - The weight to use for loss on the prompt tokens. This controls how much the model tries to - learn to generate the prompt (as compared to the completion which always has a weight of 1.0), - and can add a stabilizing effect to training when completions are short. - - If prompts are extremely long (relative to completions), it may make sense to reduce this - weight so as to avoid over-prioritizing learning the prompt. - default: 0.01 - compute_classification_metrics: - type: boolean - nullable: true - description: |- - If set, we calculate classification-specific metrics such as accuracy and F-1 score using the - validation set at the end of every epoch. These metrics can be viewed in the - [results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). - - In order to compute classification metrics, you must provide a `validation_file`. Additionally, - you must specify `classification_n_classes` for multiclass classification or - `classification_positive_class` for binary classification. - default: false - classification_n_classes: - type: integer - format: int64 - nullable: true - description: |- - The number of classes in a classification task. - - This parameter is required for multiclass classification. - default: null - classification_positive_class: - type: string - nullable: true - description: |- - The positive class in binary classification. - - This parameter is needed to generate precision, recall, and F1 metrics when doing binary - classification. - default: null - classification_betas: - type: array - items: - type: number - format: double - nullable: true - description: |- - If this is provided, we calculate F-beta scores at the specified beta values. The F-beta score - is a generalization of F-1 score. This is only used for binary classification. - - With a beta of 1 (i.e. the F-1 score), precision and recall are given the same weight. A larger - beta score puts more weight on recall and less on precision. A smaller beta score puts more - weight on precision and less on recall. - default: null - suffix: - oneOf: - - $ref: '#/components/schemas/SuffixString' - nullable: true - description: |- - A string of up to 18 characters that will be added to your fine-tuned model name. - - For example, a `suffix` of "custom-model-name" would produce a model name like - `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`. - default: null - CreateFineTuningJobRequest: - type: object - required: - - training_file - - model - properties: - training_file: - type: string - description: |- - The ID of an uploaded file that contains training data. - - See [upload file](/docs/api-reference/files/upload) for how to upload a file. - - Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with - the purpose `fine-tune`. - - See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. - validation_file: - type: string - nullable: true - description: |- - The ID of an uploaded file that contains validation data. - - If you provide this file, the data is used to generate validation metrics periodically during - fine-tuning. These metrics can be viewed in the fine-tuning results file. The same data should - not be present in both train and validation files. - - Your dataset must be formatted as a JSONL file. You must upload your file with the purpose - `fine-tune`. - - See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. - model: - anyOf: - - type: string - - type: string - enum: - - babbage-002 - - davinci-002 - - gpt-3.5-turbo - description: |- - The name of the model to fine-tune. You can select one of the - [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). - x-oaiTypeLabel: string - hyperparameters: - type: object - properties: - n_epochs: - anyOf: - - type: string - enum: - - auto - - low - - high - - $ref: '#/components/schemas/NEpochs' - description: |- - The number of epochs to train the model for. An epoch refers to one full cycle through the - training dataset. - default: auto - description: The hyperparameters used for the fine-tuning job. - suffix: - oneOf: - - $ref: '#/components/schemas/SuffixString' - nullable: true - description: |- - A string of up to 18 characters that will be added to your fine-tuned model name. - - For example, a `suffix` of "custom-model-name" would produce a model name like - `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. - default: null - CreateImageEditRequestMultiPart: - type: object - required: - - image - - prompt - properties: - image: - type: string - format: binary - description: |- - The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not - provided, image must have transparency, which will be used as the mask. - prompt: - type: string - maxLength: 1000 - description: A text description of the desired image(s). The maximum length is 1000 characters. - mask: - type: string - format: binary - description: |- - An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where - `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions - as `image`. - model: - anyOf: - - type: string - - type: string - enum: - - dall-e-2 - description: The model to use for image generation. Only `dall-e-2` is supported at this time. - x-oaiTypeLabel: string - default: dall-e-2 - n: - oneOf: - - $ref: '#/components/schemas/ImagesN' - nullable: true - description: The number of images to generate. Must be between 1 and 10. - default: 1 - size: - type: string - enum: - - 256x256 - - 512x512 - - 1024x1024 - - 512x512 - - 1024x1024 - nullable: true - description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. - default: 1024x1024 - response_format: - type: string - enum: - - url - - b64_json - - b64_json - nullable: true - description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. - default: url - user: - allOf: - - $ref: '#/components/schemas/User' - description: |- - A unique identifier representing your end-user, which can help OpenAI to monitor and detect - abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). - CreateImageRequest: - type: object - required: - - prompt - properties: - prompt: - type: string - description: |- - A text description of the desired image(s). The maximum length is 1000 characters for - `dall-e-2` and 4000 characters for `dall-e-3`. - model: - anyOf: - - type: string - - type: string - enum: - - dall-e-2 - - dall-e-3 - description: The model to use for image generation. - x-oaiTypeLabel: string - default: dall-e-2 - n: - oneOf: - - $ref: '#/components/schemas/ImagesN' - nullable: true - description: |- - The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is - supported. - default: 1 - quality: - type: string - enum: - - standard - - hd - nullable: true - description: |- - The quality of the image that will be generated. `hd` creates images with finer details and - greater consistency across the image. This param is only supported for `dall-e-3`. - default: standard - response_format: - type: string - enum: - - url - - b64_json - nullable: true - description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. - default: url - size: - type: string - enum: - - 256x256 - - 512x512 - - 1024x1024 - - 1792x1024 - - 1024x1792 - nullable: true - description: |- - The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for - `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. - default: 1024x1024 - style: - type: string - enum: - - vivid - - natural - nullable: true - description: |- - The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes the model - to lean towards generating hyper-real and dramatic images. Natural causes the model to produce - more natural, less hyper-real looking images. This param is only supported for `dall-e-3`. - default: vivid - user: - allOf: - - $ref: '#/components/schemas/User' - description: |- - A unique identifier representing your end-user, which can help OpenAI to monitor and detect - abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). - CreateImageVariationRequestMultiPart: - type: object - required: - - image - properties: - image: - type: string - format: binary - description: |- - The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, - and square. - model: - anyOf: - - type: string - - type: string - enum: - - dall-e-2 - description: The model to use for image generation. Only `dall-e-2` is supported at this time. - x-oaiTypeLabel: string - default: dall-e-2 - n: - oneOf: - - $ref: '#/components/schemas/ImagesN' - nullable: true - description: The number of images to generate. Must be between 1 and 10. - default: 1 - response_format: - type: string - enum: - - url - - b64_json - - b64_json - nullable: true - description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. - default: url - size: - type: string - enum: - - 256x256 - - 512x512 - - 1024x1024 - - 512x512 - - 1024x1024 - nullable: true - description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. - default: 1024x1024 - user: - allOf: - - $ref: '#/components/schemas/User' - description: |- - A unique identifier representing your end-user, which can help OpenAI to monitor and detect - abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). - CreateMessageRequest: - type: object - required: - - role - - content - properties: - role: - type: string - enum: - - user - - assistant - description: The role of the entity that is creating the message. Currently only `user` is supported. - content: - type: string - minLength: 1 - maxLength: 32768 - description: The content of the message. - file_ids: - type: array - items: - type: string - minItems: 1 - maxItems: 10 - description: |- - A list of [File](/docs/api-reference/files) IDs that the message should use. There can be a - maximum of 10 files attached to a message. Useful for tools like `retrieval` and - `code_interpreter` that can access and use files. - default: [] - metadata: - type: object - additionalProperties: - type: string - nullable: true - description: |- - Set of 16 key-value pairs that can be attached to an object. This can be useful for storing - additional information about the object in a structured format. Keys can be a maximum of 64 - characters long and values can be a maxium of 512 characters long. - x-oaiTypeLabel: map - CreateModerationRequest: - type: object - required: - - input - properties: - input: - allOf: - - $ref: '#/components/schemas/CreateModerationRequestInput' - description: The input text to classify - model: - anyOf: - - type: string - - type: string - enum: - - text-moderation-latest - - text-moderation-stable - description: |- - Two content moderations models are available: `text-moderation-stable` and - `text-moderation-latest`. The default is `text-moderation-latest` which will be automatically - upgraded over time. This ensures you are always using our most accurate model. If you use - `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy - of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. - x-oaiTypeLabel: string - default: text-moderation-latest - CreateModerationRequestInput: - oneOf: - - type: string - - type: array - items: - type: string - CreateModerationResponse: - type: object - required: - - id - - model - - results - properties: - id: - type: string - description: The unique identifier for the moderation request. - model: - type: string - description: The model used to generate the moderation results. - results: - type: array - items: - type: object - properties: - flagged: - type: boolean - description: Whether the content violates [OpenAI's usage policies](/policies/usage-policies). - categories: - type: object - properties: - hate: - type: boolean - description: |- - Content that expresses, incites, or promotes hate based on race, gender, ethnicity, - religion, nationality, sexual orientation, disability status, or caste. Hateful content - aimed at non-protected groups (e.g., chess players) is harrassment. - hate/threatening: - type: boolean - description: |- - Hateful content that also includes violence or serious harm towards the targeted group - based on race, gender, ethnicity, religion, nationality, sexual orientation, disability - status, or caste. - harassment: - type: boolean - description: Content that expresses, incites, or promotes harassing language towards any target. - harassment/threatening: - type: boolean - description: Harassment content that also includes violence or serious harm towards any target. - self-harm: - type: boolean - description: |- - Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, - and eating disorders. - self-harm/intent: - type: boolean - description: |- - Content where the speaker expresses that they are engaging or intend to engage in acts of - self-harm, such as suicide, cutting, and eating disorders. - self-harm/instructions: - type: boolean - description: |- - Content that encourages performing acts of self-harm, such as suicide, cutting, and eating - disorders, or that gives instructions or advice on how to commit such acts. - sexual: - type: boolean - description: |- - Content meant to arouse sexual excitement, such as the description of sexual activity, or - that promotes sexual services (excluding sex education and wellness). - sexual/minors: - type: boolean - description: Sexual content that includes an individual who is under 18 years old. - violence: - type: boolean - description: Content that depicts death, violence, or physical injury. - violence/graphic: - type: boolean - description: Content that depicts death, violence, or physical injury in graphic detail. - required: - - hate - - hate/threatening - - harassment - - harassment/threatening - - self-harm - - self-harm/intent - - self-harm/instructions - - sexual - - sexual/minors - - violence - - violence/graphic - description: A list of the categories, and whether they are flagged or not. - category_scores: - type: object - properties: - hate: - type: number - format: double - description: The score for the category 'hate'. - hate/threatening: - type: number - format: double - description: The score for the category 'hate/threatening'. - harassment: - type: number - format: double - description: The score for the category 'harassment'. - harassment/threatening: - type: number - format: double - description: The score for the category 'harassment/threatening'. - self-harm: - type: number - format: double - description: The score for the category 'self-harm'. - self-harm/intent: - type: number - format: double - description: The score for the category 'self-harm/intent'. - self-harm/instructions: - type: number - format: double - description: The score for the category 'self-harm/instructive'. - sexual: - type: number - format: double - description: The score for the category 'sexual'. - sexual/minors: - type: number - format: double - description: The score for the category 'sexual/minors'. - violence: - type: number - format: double - description: The score for the category 'violence'. - violence/graphic: - type: number - format: double - description: The score for the category 'violence/graphic'. - required: - - hate - - hate/threatening - - harassment - - harassment/threatening - - self-harm - - self-harm/intent - - self-harm/instructions - - sexual - - sexual/minors - - violence - - violence/graphic - description: A list of the categories along with their scores as predicted by model. - required: - - flagged - - categories - - category_scores - description: A list of moderation objects. - description: Represents policy compliance report by OpenAI's content moderation model against a given input. - CreateRunRequest: - type: object - required: - - assistant_id - properties: - assistant_id: - type: string - description: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. - model: - type: string - nullable: true - description: |- - The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value - is provided here, it will override the model associated with the assistant. If not, the model - associated with the assistant will be used. - instructions: - type: string - nullable: true - description: |- - Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of the assistant. - This is useful for modifying the behavior on a per-run basis. - additional_instructions: - type: string - nullable: true - description: |- - Appends additional instructions at the end of the instructions for the run. This is useful for - modifying the behavior on a per-run basis without overriding other instructions. - tools: - type: object - allOf: - - $ref: '#/components/schemas/CreateRunRequestToolsItem' - nullable: true - description: |- - Override the tools the assistant can use for this run. This is useful for modifying the - behavior on a per-run basis. - metadata: - type: object - additionalProperties: - type: string - nullable: true - description: |- - Set of 16 key-value pairs that can be attached to an object. This can be useful for storing - additional information about the object in a structured format. Keys can be a maximum of 64 - characters long and values can be a maxium of 512 characters long. - x-oaiTypeLabel: map - CreateRunRequestTool: - oneOf: - - $ref: '#/components/schemas/AssistantToolsCode' - - $ref: '#/components/schemas/AssistantToolsRetrieval' - - $ref: '#/components/schemas/AssistantToolsFunction' - x-oaiExpandable: true - CreateRunRequestToolsItem: - type: array - items: - $ref: '#/components/schemas/CreateRunRequestTool' - maxItems: 20 - CreateSpeechRequest: - type: object - required: - - model - - input - - voice - properties: - model: - anyOf: - - type: string - - type: string - enum: - - tts-1 - - tts-1-hd - description: 'One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd`' - x-oaiTypeLabel: string - input: - type: string - maxLength: 4096 - description: The text to generate audio for. The maximum length is 4096 characters. - voice: - type: string - enum: - - alloy - - echo - - fable - - onyx - - nova - - shimmer - description: |- - The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, - `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the - [Text to speech guide](/docs/guides/text-to-speech/voice-options). - response_format: - type: string - enum: - - mp3 - - opus - - aac - - flac - description: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, and `flac`. - default: mp3 - speed: - type: number - format: double - minimum: 0.25 - maximum: 4 - description: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default. - default: 1 - CreateThreadAndRunRequest: - type: object - required: - - assistant_id - properties: - assistant_id: - type: string - description: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. - thread: - allOf: - - $ref: '#/components/schemas/CreateThreadRequest' - description: If no thread is provided, an empty thread will be created. - model: - type: string - nullable: true - description: |- - The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is - provided here, it will override the model associated with the assistant. If not, the model - associated with the assistant will be used. - instructions: - type: string - nullable: true - description: |- - Override the default system message of the assistant. This is useful for modifying the behavior - on a per-run basis. - tools: - type: object - allOf: - - $ref: '#/components/schemas/CreateRunRequestToolsItem' - nullable: true - description: |- - Override the tools the assistant can use for this run. This is useful for modifying the - behavior on a per-run basis. - metadata: - type: object - additionalProperties: - type: string - nullable: true - description: |- - Set of 16 key-value pairs that can be attached to an object. This can be useful for storing - additional information about the object in a structured format. Keys can be a maximum of 64 - characters long and values can be a maxium of 512 characters long. - x-oaiTypeLabel: map - CreateThreadRequest: - type: object - properties: - messages: - type: array - items: - $ref: '#/components/schemas/CreateMessageRequest' - description: A list of [messages](/docs/api-reference/messages) to start the thread with. - metadata: - type: object - additionalProperties: - type: string - nullable: true - description: |- - Set of 16 key-value pairs that can be attached to an object. This can be useful for storing - additional information about the object in a structured format. Keys can be a maximum of 64 - characters long and values can be a maxium of 512 characters long. - CreateTranscriptionRequestMultiPart: - type: object - required: - - file - - model - properties: - file: - type: string - format: binary - description: |- - The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, - mpeg, mpga, m4a, ogg, wav, or webm. - x-oaiTypeLabel: file - model: - anyOf: - - type: string - - type: string - enum: - - whisper-1 - description: ID of the model to use. Only `whisper-1` is currently available. - x-oaiTypeLabel: string - language: - type: string - description: |- - The language of the input audio. Supplying the input language in - [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy - and latency. - prompt: - type: string - description: |- - An optional text to guide the model's style or continue a previous audio segment. The - [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. - response_format: - type: string - enum: - - json - - text - - srt - - verbose_json - - vtt - - text - - srt - - verbose_json - - vtt - description: |- - The format of the transcript output, in one of these options: json, text, srt, verbose_json, or - vtt. - default: json - temperature: - type: number - format: double - minimum: 0 - maximum: 1 - description: |- - The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more - random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, - the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to - automatically increase the temperature until certain thresholds are hit. - default: 0 - CreateTranscriptionResponse: - type: object - required: - - text - properties: - text: - type: string - description: The transcribed text for the provided audio data. - task: - type: string - enum: - - transcribe - description: The label that describes which operation type generated the accompanying response data. - language: - type: string - description: The spoken language that was detected in the audio data. - duration: - type: number - format: double - description: The total duration of the audio processed to produce accompanying transcription information. - segments: - type: array - items: - $ref: '#/components/schemas/AudioSegment' - description: |- - A collection of information about the timing, probabilities, and other detail of each processed - audio segment. - CreateTranslationRequestMultiPart: - type: object - required: - - file - - model - properties: - file: - type: string - format: binary - description: |- - The audio file object (not file name) to translate, in one of these formats: flac, mp3, mp4, - mpeg, mpga, m4a, ogg, wav, or webm. - x-oaiTypeLabel: file - model: - anyOf: - - type: string - - type: string - enum: - - whisper-1 - description: ID of the model to use. Only `whisper-1` is currently available. - x-oaiTypeLabel: string - prompt: - type: string - description: |- - An optional text to guide the model's style or continue a previous audio segment. The - [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. - response_format: - type: string - enum: - - json - - text - - srt - - verbose_json - - vtt - - text - - srt - - verbose_json - - vtt - description: |- - The format of the transcript output, in one of these options: json, text, srt, verbose_json, or - vtt. - default: json - temperature: - type: number - format: double - minimum: 0 - maximum: 1 - description: |- - The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more - random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, - the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to - automatically increase the temperature until certain thresholds are hit. - default: 0 - CreateTranslationResponse: - type: object - required: - - text - properties: - text: - type: string - description: The translated text for the provided audio data. - task: - type: string - enum: - - translate - description: The label that describes which operation type generated the accompanying response data. - language: - type: string - description: The spoken language that was detected in the audio data. - duration: - type: number - format: double - description: The total duration of the audio processed to produce accompanying translation information. - segments: - type: array - items: - $ref: '#/components/schemas/AudioSegment' - description: |- - A collection of information about the timing, probabilities, and other detail of each processed - audio segment. - DeleteAssistantFileResponse: - type: object - required: - - id - - deleted - - object - properties: - id: - type: string - deleted: - type: boolean - object: - type: string - enum: - - assistant.file.deleted - description: |- - Deletes the association between the assistant and the file, but does not delete the - [File](/docs/api-reference/files) object itself. - DeleteAssistantResponse: - type: object - required: - - id - - deleted - - object - properties: - id: - type: string - deleted: - type: boolean - object: - type: string - enum: - - assistant.deleted - DeleteFileResponse: - type: object - required: - - id - - object - - deleted - properties: - id: - type: string - object: - type: string - enum: - - file - deleted: - type: boolean - DeleteModelResponse: - type: object - required: - - id - - deleted - - object - properties: - id: - type: string - deleted: - type: boolean - object: - type: string - enum: - - model - DeleteThreadResponse: - type: object - required: - - id - - deleted - - object - properties: - id: - type: string - deleted: - type: boolean - object: - type: string - enum: - - thread.deleted - Embedding: - type: object - required: - - index - - embedding - - object - properties: - index: - type: integer - format: int64 - description: The index of the embedding in the list of embeddings. - embedding: - anyOf: - - type: array - items: - type: number - format: double - - type: string - description: |- - The embedding vector, which is a list of floats. The length of vector depends on the model as - listed in the [embedding guide](/docs/guides/embeddings). - object: - type: string - enum: - - embedding - description: The object type, which is always "embedding". - description: Represents an embedding vector returned by embedding endpoint. - Error: - type: object - required: - - type - - message - - param - - code - properties: - type: - type: string - message: - type: string - param: - type: string - nullable: true - code: - type: string - nullable: true - ErrorResponse: - type: object - required: - - error - properties: - error: - $ref: '#/components/schemas/Error' - FineTune: - type: object - required: - - id - - object - - created_at - - updated_at - - model - - fine_tuned_model - - organization_id - - status - - hyperparams - - training_files - - validation_files - - result_files - properties: - id: - type: string - description: The object identifier, which can be referenced in the API endpoints. - object: - type: string - enum: - - fine-tune - - fine-tune-results - - assistants - - assistants_output - description: The object type, which is always "fine-tune". - created_at: - type: integer - format: unixtime - description: The Unix timestamp (in seconds) for when the fine-tuning job was created. - updated_at: - type: integer - format: unixtime - description: The Unix timestamp (in seconds) for when the fine-tuning job was last updated. - model: - type: string - description: The base model that is being fine-tuned. - fine_tuned_model: - type: string - nullable: true - description: The name of the fine-tuned model that is being created. - organization_id: - type: string - description: The organization that owns the fine-tuning job. - status: - type: string - enum: - - created - - pending - - running - - succeeded - - failed - - cancelled - - running - - succeeded - - failed - - cancelled - description: |- - The current status of the fine-tuning job, which can be either `created`, `running`, - `succeeded`, `failed`, or `cancelled`. - hyperparams: - type: object - properties: - n_epochs: - type: integer - format: int64 - description: |- - The number of epochs to train the model for. An epoch refers to one full cycle through the - training dataset. - batch_size: - type: integer - format: int64 - description: |- - The batch size to use for training. The batch size is the number of training examples used to - train a single forward and backward pass. - prompt_loss_weight: - type: number - format: double - description: The weight to use for loss on the prompt tokens. - learning_rate_multiplier: - type: number - format: double - description: The learning rate multiplier to use for training. - compute_classification_metrics: - type: boolean - description: The classification metrics to compute using the validation dataset at the end of every epoch. - classification_positive_class: - type: string - description: The positive class to use for computing classification metrics. - classification_n_classes: - type: integer - format: int64 - description: The number of classes to use for computing classification metrics. - required: - - n_epochs - - batch_size - - prompt_loss_weight - - learning_rate_multiplier - description: |- - The hyperparameters used for the fine-tuning job. See the - [fine-tuning guide](/docs/guides/legacy-fine-tuning/hyperparameters) for more details. - training_files: - type: array - items: - $ref: '#/components/schemas/OpenAIFile' - description: The list of files used for training. - validation_files: - type: array - items: - $ref: '#/components/schemas/OpenAIFile' - description: The list of files used for validation. - result_files: - type: array - items: - $ref: '#/components/schemas/OpenAIFile' - description: The compiled results files for the fine-tuning job. - events: - type: array - items: - $ref: '#/components/schemas/FineTuneEvent' - description: The list of events that have been observed in the lifecycle of the FineTune job. - description: The `FineTune` object represents a legacy fine-tune job that has been created through the API. - deprecated: true - FineTuneEvent: - type: object - required: - - object - - created_at - - level - - message - properties: - object: - type: string - created_at: - type: integer - format: unixtime - level: - type: string - message: - type: string - FineTuningEvent: - type: object - required: - - object - - created_at - - level - - message - properties: - object: - type: string - created_at: - type: integer - format: unixtime - level: - type: string - message: - type: string - data: - type: object - additionalProperties: {} - nullable: true - type: - type: string - enum: - - message - - metrics - FineTuningJob: - type: object - required: - - id - - object - - created_at - - finished_at - - model - - fine_tuned_model - - organization_id - - status - - hyperparameters - - training_file - - validation_file - - result_files - - trained_tokens - - error - properties: - id: - type: string - description: The object identifier, which can be referenced in the API endpoints. - object: - type: string - enum: - - fine_tuning.job - description: The object type, which is always "fine_tuning.job". - created_at: - type: integer - format: unixtime - description: The Unix timestamp (in seconds) for when the fine-tuning job was created. - finished_at: - type: string - format: date-time - nullable: true - description: |- - The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be - null if the fine-tuning job is still running. - model: - type: string - description: The base model that is being fine-tuned. - fine_tuned_model: - type: string - nullable: true - description: |- - The name of the fine-tuned model that is being created. The value will be null if the - fine-tuning job is still running. - organization_id: - type: string - description: The organization that owns the fine-tuning job. - status: - type: string - enum: - - created - - pending - - running - - succeeded - - failed - - cancelled - - running - - succeeded - - failed - - cancelled - description: |- - The current status of the fine-tuning job, which can be either `created`, `pending`, `running`, - `succeeded`, `failed`, or `cancelled`. - hyperparameters: - type: object - properties: - n_epochs: - anyOf: - - type: string - enum: - - auto - - low - - high - - $ref: '#/components/schemas/NEpochs' - description: |- - The number of epochs to train the model for. An epoch refers to one full cycle through the - training dataset. - - "Auto" decides the optimal number of epochs based on the size of the dataset. If setting the - number manually, we support any number between 1 and 50 epochs. - default: auto - description: |- - The hyperparameters used for the fine-tuning job. See the - [fine-tuning guide](/docs/guides/fine-tuning) for more details. - training_file: - type: string - description: |- - The file ID used for training. You can retrieve the training data with the - [Files API](/docs/api-reference/files/retrieve-contents). - validation_file: - type: string - nullable: true - description: |- - The file ID used for validation. You can retrieve the validation results with the - [Files API](/docs/api-reference/files/retrieve-contents). - result_files: - type: array - items: - type: string - description: |- - The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the - [Files API](/docs/api-reference/files/retrieve-contents). - trained_tokens: - type: integer - format: int64 - nullable: true - description: |- - The total number of billable tokens processed by this fine tuning job. The value will be null - if the fine-tuning job is still running. - error: - type: object - properties: - message: - type: string - description: A human-readable error message. - code: - type: string - description: A machine-readable error code. - param: - type: string - nullable: true - description: |- - The parameter that was invalid, usually `training_file` or `validation_file`. This field - will be null if the failure was not parameter-specific. - nullable: true - description: |- - For fine-tuning jobs that have `failed`, this will contain more information on the cause of the - failure. - FineTuningJobEvent: - type: object - required: - - id - - object - - created_at - - level - - message - properties: - id: - type: string - object: - type: string - created_at: - type: integer - format: unixtime - level: - type: string - enum: - - info - - warn - - error - message: - type: string - FunctionObject: - type: object - required: - - name - properties: - description: - type: string - description: |- - A description of what the function does, used by the model to choose when and how to call the - function. - name: - type: string - description: |- - The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and - dashes, with a maximum length of 64. - parameters: - $ref: '#/components/schemas/FunctionParameters' - FunctionParameters: - type: object - additionalProperties: {} - description: |- - The parameters the functions accepts, described as a JSON Schema object. See the - [guide](/docs/guides/gpt/function-calling) for examples, and the - [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation - about the format.\n\nTo describe a function that accepts no parameters, provide the value - `{\"type\": \"object\", \"properties\": {}}`. - Image: - type: object - properties: - b64_json: - type: string - format: base64 - description: The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. - url: - type: string - format: uri - description: The URL of the generated image, if `response_format` is `url` (default). - revised_prompt: - type: string - description: The prompt that was used to generate the image, if there was any revision to the prompt. - description: Represents the url or the content of an image generated by the OpenAI API. - ImagesN: - type: integer - format: int64 - minimum: 1 - maximum: 10 - ImagesResponse: - type: object - required: - - created - - data - properties: - created: - type: integer - format: unixtime - data: - type: array - items: - $ref: '#/components/schemas/Image' - ListAssistantFilesResponse: - type: object - required: - - object - - data - - first_id - - last_id - - has_more - properties: - object: - type: string - enum: - - list - data: - type: array - items: - $ref: '#/components/schemas/AssistantFileObject' - first_id: - type: string - last_id: - type: string - has_more: - type: boolean - ListAssistantsResponse: - type: object - required: - - object - - data - - first_id - - last_id - - has_more - properties: - object: - type: string - enum: - - list - data: - type: array - items: - $ref: '#/components/schemas/AssistantObject' - first_id: - type: string - last_id: - type: string - has_more: - type: boolean - ListFilesResponse: - type: object - required: - - data - - object - properties: - data: - type: array - items: - $ref: '#/components/schemas/OpenAIFile' - object: - type: string - enum: - - list - ListFineTuneEventsResponse: - type: object - required: - - object - - data - properties: - object: - type: string - data: - type: array - items: - $ref: '#/components/schemas/FineTuneEvent' - ListFineTunesResponse: - type: object - required: - - object - - data - properties: - object: - type: string - data: - type: array - items: - $ref: '#/components/schemas/FineTune' - ListFineTuningJobEventsResponse: - type: object - required: - - object - - data - properties: - object: - type: string - data: - type: array - items: - $ref: '#/components/schemas/FineTuningJobEvent' - ListMessageFilesResponse: - type: object - required: - - object - - data - - first_id - - last_id - - has_more - properties: - object: - type: string - enum: - - list - data: - type: array - items: - $ref: '#/components/schemas/MessageFileObject' - first_id: - type: string - last_id: - type: string - has_more: - type: boolean - ListMessagesResponse: - type: object - required: - - object - - data - - first_id - - last_id - - has_more - properties: - object: - type: string - enum: - - list - data: - type: array - items: - $ref: '#/components/schemas/MessageObject' - first_id: - type: string - last_id: - type: string - has_more: - type: boolean - ListModelsResponse: - type: object - required: - - object - - data - properties: - object: - type: string - enum: - - list - data: - type: array - items: - $ref: '#/components/schemas/Model' - ListOrder: - type: string - enum: - - asc - - desc - ListPaginatedFineTuningJobsResponse: - type: object - required: - - object - - data - - has_more - properties: - object: - type: string - data: - type: array - items: - $ref: '#/components/schemas/FineTuningJob' - has_more: - type: boolean - ListRunStepsResponse: - type: object - required: - - object - - data - - first_id - - last_id - - has_more - properties: - object: - type: string - enum: - - list - data: - type: array - items: - $ref: '#/components/schemas/RunStepObject' - first_id: - type: string - last_id: - type: string - has_more: - type: boolean - ListRunsResponse: - type: object - required: - - object - - data - - first_id - - last_id - - has_more - properties: - object: - type: string - enum: - - list - data: - type: array - items: - $ref: '#/components/schemas/RunObject' - first_id: - type: string - last_id: - type: string - has_more: - type: boolean - MessageContentImageFileObject: - type: object - required: - - type - - image_file - properties: - type: - type: string - enum: - - image_file - description: Always `image_file`. - image_file: - type: object - properties: - file_id: - type: string - description: The [File](/docs/api-reference/files) ID of the image in the message content. - required: - - file_id - description: References an image [File](/docs/api-reference/files) in the content of a message. - MessageContentTextAnnotationsFileCitationObject: - type: object - required: - - type - - text - - file_citation - - start_index - - end_index - properties: - type: - type: string - enum: - - file_citation - description: Always `file_citation`. - text: - type: string - description: The text in the message content that needs to be replaced. - file_citation: - type: object - properties: - file_id: - type: string - description: The ID of the specific File the citation is from. - quote: - type: string - description: The specific quote in the file. - required: - - file_id - - quote - start_index: - type: integer - format: int64 - minimum: 0 - end_index: - type: integer - format: int64 - minimum: 0 - description: |- - A citation within the message that points to a specific quote from a specific File associated - with the assistant or the message. Generated when the assistant uses the "retrieval" tool to - search files. - MessageContentTextAnnotationsFilePathObject: - type: object - required: - - type - - text - - file_path - - start_index - - end_index - properties: - type: - type: string - enum: - - file_path - description: Always `file_path`. - text: - type: string - description: The text in the message content that needs to be replaced. - file_path: - type: object - properties: - file_id: - type: string - description: The ID of the file that was generated. - required: - - file_id - start_index: - type: integer - format: int64 - minimum: 0 - end_index: - type: integer - format: int64 - minimum: 0 - description: |- - A URL for the file that's generated when the assistant used the `code_interpreter` tool to - generate a file. - MessageContentTextObject: - type: object - required: - - type - - text - properties: - type: - type: string - enum: - - text - - json_object - description: Always `text`. - text: - type: object - properties: - value: - type: string - description: The data that makes up the text. - annotations: - type: array - items: - $ref: '#/components/schemas/MessageContentTextObjectAnnotations' - required: - - value - - annotations - description: The text content that is part of a message. - MessageContentTextObjectAnnotations: - oneOf: - - $ref: '#/components/schemas/MessageContentTextAnnotationsFileCitationObject' - - $ref: '#/components/schemas/MessageContentTextAnnotationsFilePathObject' - x-oaiExpandable: true - MessageFileObject: - type: object - required: - - id - - object - - created_at - - message_id - properties: - id: - type: string - description: TThe identifier, which can be referenced in API endpoints. - object: - type: string - enum: - - thread.message.file - description: The object type, which is always `thread.message.file`. - created_at: - type: integer - format: unixtime - description: The Unix timestamp (in seconds) for when the message file was created. - message_id: - type: string - description: The ID of the [message](/docs/api-reference/messages) that the [File](/docs/api-reference/files) is attached to. - description: A list of files attached to a `message`. - MessageObject: - type: object - required: - - id - - object - - created_at - - thread_id - - role - - content - - assistant_id - - run_id - - file_ids - - metadata - properties: - id: - type: string - description: The identifier, which can be referenced in API endpoints. - object: - type: string - enum: - - thread.message - description: The object type, which is always `thread.message`. - created_at: - type: integer - format: unixtime - description: The Unix timestamp (in seconds) for when the message was created. - thread_id: - type: string - description: The [thread](/docs/api-reference/threads) ID that this message belongs to. - role: - type: string - enum: - - user - - assistant - description: The entity that produced the message. One of `user` or `assistant`. - content: - type: array - items: - $ref: '#/components/schemas/MessageObjectContent' - description: The content of the message in array of text and/or images. - assistant_id: - type: string - nullable: true - description: |- - If applicable, the ID of the [assistant](/docs/api-reference/assistants) that authored this - message. - run_id: - type: string - nullable: true - description: |- - If applicable, the ID of the [run](/docs/api-reference/runs) associated with the authoring of - this message. - file_ids: - type: array - items: - type: string - maxItems: 10 - description: |- - A list of [file](/docs/api-reference/files) IDs that the assistant should use. Useful for - tools like retrieval and code_interpreter that can access files. A maximum of 10 files can be - attached to a message. - default: [] - metadata: - type: object - additionalProperties: - type: string - nullable: true - description: |- - Set of 16 key-value pairs that can be attached to an object. This can be useful for storing - additional information about the object in a structured format. Keys can be a maximum of 64 - characters long and values can be a maxium of 512 characters long. - x-oaiTypeLabel: map - MessageObjectContent: - oneOf: - - $ref: '#/components/schemas/MessageContentImageFileObject' - - $ref: '#/components/schemas/MessageContentTextObject' - x-oaiExpandable: true - Model: - type: object - required: - - id - - created - - object - - owned_by - properties: - id: - type: string - description: The model identifier, which can be referenced in the API endpoints. - created: - type: integer - format: unixtime - description: The Unix timestamp (in seconds) when the model was created. - object: - type: string - enum: - - model - description: The object type, which is always "model". - owned_by: - type: string - description: The organization that owns the model. - description: Describes an OpenAI model offering that can be used with the API. - ModifyAssistantRequest: - type: object - properties: - model: - type: string - description: |- - ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to - see all of your available models, or see our [Model overview](/docs/models/overview) for - descriptions of them. - name: - type: string - nullable: true - maxLength: 256 - description: The name of the assistant. The maximum length is 256 characters. - description: - type: string - nullable: true - maxLength: 512 - description: The description of the assistant. The maximum length is 512 characters. - instructions: - type: string - nullable: true - maxLength: 32768 - description: The system instructions that the assistant uses. The maximum length is 32768 characters. - tools: - allOf: - - $ref: '#/components/schemas/CreateAssistantRequestToolsItem' - description: |- - A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. - Tools can be of types `code_interpreter`, `retrieval`, or `function`. - default: [] - file_ids: - type: array - items: - type: string - maxItems: 20 - description: |- - A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a - maximum of 20 files attached to the assistant. Files are ordered by their creation date in - ascending order. - default: [] - metadata: - type: object - additionalProperties: - type: string - nullable: true - description: |- - Set of 16 key-value pairs that can be attached to an object. This can be useful for storing - additional information about the object in a structured format. Keys can be a maximum of 64 - characters long and values can be a maxium of 512 characters long. - x-oaiTypeLabel: map - ModifyMessageRequest: - type: object - properties: - metadata: - type: object - additionalProperties: - type: string - nullable: true - description: |- - Set of 16 key-value pairs that can be attached to an object. This can be useful for storing - additional information about the object in a structured format. Keys can be a maximum of 64 - characters long and values can be a maxium of 512 characters long. - x-oaiTypeLabel: map - ModifyRunRequest: - type: object - properties: - metadata: - type: object - additionalProperties: - type: string - nullable: true - description: |- - Set of 16 key-value pairs that can be attached to an object. This can be useful for storing - additional information about the object in a structured format. Keys can be a maximum of 64 - characters long and values can be a maxium of 512 characters long. - x-oaiTypeLabel: map - ModifyThreadRequest: - type: object - properties: - metadata: - type: object - additionalProperties: - type: string - nullable: true - description: |- - Set of 16 key-value pairs that can be attached to an object. This can be useful for storing - additional information about the object in a structured format. Keys can be a maximum of 64 - characters long and values can be a maxium of 512 characters long. - NEpochs: - type: integer - format: int64 - minimum: 1 - maximum: 50 - OpenAIFile: - type: object - required: - - id - - bytes - - created_at - - filename - - object - - purpose - - status - properties: - id: - type: string - description: The file identifier, which can be referenced in the API endpoints. - bytes: - type: integer - format: int64 - description: The size of the file, in bytes. - created_at: - type: integer - format: unixtime - description: The Unix timestamp (in seconds) for when the file was created. - filename: - type: string - description: The name of the file. - object: - type: string - enum: - - file - description: The object type, which is always "file". - purpose: - type: string - enum: - - fine-tune - - fine-tune-results - - assistants - - assistants_output - description: |- - The intended purpose of the file. Supported values are `fine-tune`, `fine-tune-results`, - `assistants`, and `assistants_output`. - status: - type: string - enum: - - uploaded - - processed - - error - description: |- - Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or - `error`. - deprecated: true - status_details: - type: string - description: |- - Deprecated. For details on why a fine-tuning training file failed validation, see the `error` - field on `fine_tuning.job`. - deprecated: true - description: The `File` object represents a document that has been uploaded to OpenAI. - Prompt: - oneOf: - - type: string - - type: array - items: - type: string - - $ref: '#/components/schemas/TokenArrayItem' - - $ref: '#/components/schemas/TokenArrayArray' - RunCompletionUsage: - type: object - required: - - completion_tokens - - prompt_tokens - - total_tokens - properties: - completion_tokens: - type: integer - format: int64 - description: Number of completion tokens used over the course of the run. - prompt_tokens: - type: integer - format: int64 - description: Number of prompt tokens used over the course of the run. - total_tokens: - type: integer - format: int64 - description: Total number of tokens used (prompt + completion). - description: |- - Usage statistics related to the run. This value will be `null` if the run is not in a terminal - state (i.e. `in_progress`, `queued`, etc.). - RunObject: - type: object - required: - - id - - object - - created_at - - thread_id - - assistant_id - - status - - required_action - - last_error - - expires_at - - started_at - - cancelled_at - - failed_at - - completed_at - - model - - instructions - - tools - - file_ids - - metadata - - usage - properties: - id: - type: string - description: The identifier, which can be referenced in API endpoints. - object: - type: string - enum: - - thread.run - description: The object type, which is always `thread.run`. - created_at: - type: integer - format: unixtime - description: The Unix timestamp (in seconds) for when the run was created. - thread_id: - type: string - description: |- - The ID of the [thread](/docs/api-reference/threads) that was executed on as a part of this - run. - assistant_id: - type: string - description: The ID of the [assistant](/docs/api-reference/assistants) used for execution of this run. - status: - type: string - enum: - - queued - - in_progress - - requires_action - - cancelling - - cancelled - - failed - - completed - - expired - description: |- - The status of the run, which can be either `queued`, `in_progress`, `requires_action`, - `cancelling`, `cancelled`, `failed`, `completed`, or `expired`. - required_action: - type: object - properties: - type: - type: string - enum: - - submit_tool_outputs - description: For now, this is always `submit_tool_outputs`. - submit_tool_outputs: - type: object - properties: - tool_calls: - type: array - items: - $ref: '#/components/schemas/RunToolCallObject' - description: A list of the relevant tool calls. - required: - - tool_calls - description: Details on the tool outputs needed for this run to continue. - required: - - type - - submit_tool_outputs - nullable: true - description: |- - Details on the action required to continue the run. Will be `null` if no action is - required. - last_error: - type: object - properties: - code: - type: string - enum: - - server_error - - rate_limit_exceeded - description: One of `server_error` or `rate_limit_exceeded`. - message: - type: string - description: A human-readable description of the error. - required: - - code - - message - nullable: true - description: The last error associated with this run. Will be `null` if there are no errors. - expires_at: - type: integer - format: unixtime - description: The Unix timestamp (in seconds) for when the run will expire. - started_at: - type: string - format: date-time - nullable: true - description: The Unix timestamp (in seconds) for when the run was started. - cancelled_at: - type: string - format: date-time - nullable: true - description: The Unix timestamp (in seconds) for when the run was cancelled. - failed_at: - type: string - format: date-time - nullable: true - description: The Unix timestamp (in seconds) for when the run failed. - completed_at: - type: string - format: date-time - nullable: true - description: The Unix timestamp (in seconds) for when the run was completed. - model: - type: string - description: The model that the [assistant](/docs/api-reference/assistants) used for this run. - instructions: - type: string - description: The instructions that the [assistant](/docs/api-reference/assistants) used for this run. - tools: - allOf: - - $ref: '#/components/schemas/CreateRunRequestToolsItem' - description: The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. - file_ids: - type: array - items: - type: string - description: |- - The list of [File](/docs/api-reference/files) IDs the - [assistant](/docs/api-reference/assistants) used for this run. - default: [] - metadata: - type: object - additionalProperties: - type: string - nullable: true - description: |- - Set of 16 key-value pairs that can be attached to an object. This can be useful for storing - additional information about the object in a structured format. Keys can be a maximum of 64 - characters long and values can be a maxium of 512 characters long. - x-oaiTypeLabel: map - usage: - type: object - allOf: - - $ref: '#/components/schemas/RunCompletionUsage' - nullable: true - description: Represents an execution run on a [thread](/docs/api-reference/threads). - RunStepCompletionUsage: - type: object - required: - - completion_tokens - - prompt_tokens - - total_tokens - properties: - completion_tokens: - type: integer - format: int64 - description: Number of completion tokens used over the course of the run step. - prompt_tokens: - type: integer - format: int64 - description: Number of prompt tokens used over the course of the run step. - total_tokens: - type: integer - format: int64 - description: Total number of tokens used (prompt + completion). - description: |- - Usage statistics related to the run step. This value will be `null` while the run step's status - is `in_progress`. - RunStepDetails: - oneOf: - - $ref: '#/components/schemas/RunStepDetailsMessageCreationObject' - - $ref: '#/components/schemas/RunStepDetailsToolCallsObject' - x-oaiExpandable: true - RunStepDetailsMessageCreationObject: - type: object - required: - - type - - message_creation - properties: - type: - type: string - enum: - - message_creation - description: Details of the message creation by the run step. - message_creation: - type: object - properties: - message_id: - type: string - description: The ID of the message that was created by this run step. - required: - - message_id - description: Details of the message creation by the run step. - RunStepDetailsToolCallsCodeObject: - type: object - required: - - id - - type - - code_interpreter - properties: - id: - type: string - description: The ID of the tool call. - type: - type: string - enum: - - code_interpreter - description: |- - The type of tool call. This is always going to be `code_interpreter` for this type of tool - call. - code_interpreter: - type: object - properties: - input: - type: string - description: The input to the Code Interpreter tool call. - outputs: - allOf: - - $ref: '#/components/schemas/RunStepDetailsToolCallsCodeOutputs' - description: |- - The outputs from the Code Interpreter tool call. Code Interpreter can output one or more - items, including text (`logs`) or images (`image`). Each of these are represented by a - different object type. - required: - - input - - outputs - description: The Code Interpreter tool call definition. - description: Details of the Code Interpreter tool call the run step was involved in. - RunStepDetailsToolCallsCodeOutput: - oneOf: - - $ref: '#/components/schemas/RunStepDetailsToolCallsCodeOutputLogsObject' - - $ref: '#/components/schemas/RunStepDetailsToolCallsCodeOutputImageObject' - x-oaiExpandable: true - RunStepDetailsToolCallsCodeOutputImageObject: - type: object - required: - - type - - image - properties: - type: - type: string - enum: - - image - description: Always `image`. - image: - type: object - properties: - file_id: - type: string - description: The [file](/docs/api-reference/files) ID of the image. - required: - - file_id - RunStepDetailsToolCallsCodeOutputLogsObject: - type: object - required: - - type - - logs - properties: - type: - type: string - enum: - - logs - description: Always `logs`. - logs: - type: string - description: The text output from the Code Interpreter tool call. - description: Text output from the Code Interpreter tool call as part of a run step. - RunStepDetailsToolCallsCodeOutputs: - type: array - items: - $ref: '#/components/schemas/RunStepDetailsToolCallsCodeOutput' - RunStepDetailsToolCallsFunctionObject: - type: object - required: - - id - - type - - function - properties: - id: - type: string - description: The ID of the tool call object. - type: - type: string - enum: - - function - description: The type of tool call. This is always going to be `function` for this type of tool call. - function: - type: object - properties: - name: - type: string - description: The name of the function. - arguments: - type: string - description: The arguments passed to the function. - output: - type: string - nullable: true - description: |- - The output of the function. This will be `null` if the outputs have not been - [submitted](/docs/api-reference/runs/submitToolOutputs) yet. - required: - - name - - arguments - - output - description: The definition of the function that was called. - RunStepDetailsToolCallsObject: - type: object - required: - - type - - tool_calls - properties: - type: - type: string - enum: - - tool_calls - description: Always `tool_calls`. - tool_calls: - allOf: - - $ref: '#/components/schemas/RunStepDetailsToolCallsObjectToolCallsItem' - description: |- - An array of tool calls the run step was involved in. These can be associated with one of three - types of tools: `code_interpreter`, `retrieval`, or `function`. - description: Details of the tool call. - RunStepDetailsToolCallsObjectToolCall: - oneOf: - - $ref: '#/components/schemas/RunStepDetailsToolCallsCodeObject' - - $ref: '#/components/schemas/RunStepDetailsToolCallsRetrievalObject' - - $ref: '#/components/schemas/RunStepDetailsToolCallsFunctionObject' - x-oaiExpandable: true - RunStepDetailsToolCallsObjectToolCallsItem: - type: array - items: - $ref: '#/components/schemas/RunStepDetailsToolCallsObjectToolCall' - RunStepDetailsToolCallsRetrievalObject: - type: object - required: - - id - - type - - retrieval - properties: - id: - type: string - description: The ID of the tool call object. - type: - type: string - enum: - - retrieval - description: The type of tool call. This is always going to be `retrieval` for this type of tool call. - retrieval: - type: object - description: For now, this is always going to be an empty object. - x-oaiTypeLabel: map - RunStepObject: - type: object - required: - - id - - object - - created_at - - assistant_id - - thread_id - - run_id - - type - - status - - step_details - - last_error - - expires_at - - cancelled_at - - failed_at - - completed_at - - metadata - - usage - properties: - id: - type: string - description: The identifier of the run step, which can be referenced in API endpoints. - object: - type: string - enum: - - thread.run.step - description: The object type, which is always `thread.run.step`. - created_at: - type: integer - format: unixtime - description: The Unix timestamp (in seconds) for when the run step was created. - assistant_id: - type: string - description: The ID of the [assistant](/docs/api-reference/assistants) associated with the run step. - thread_id: - type: string - description: The ID of the [thread](/docs/api-reference/threads) that was run. - run_id: - type: string - description: The ID of the [run](/docs/api-reference/runs) that this run step is a part of. - type: - type: string - enum: - - message_creation - - tool_calls - description: The type of run step, which can be either `message_creation` or `tool_calls`. - status: - type: string - enum: - - in_progress - - cancelled - - failed - - completed - - expired - description: |- - The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, - `completed`, or `expired`. - step_details: - allOf: - - $ref: '#/components/schemas/RunStepDetails' - description: The details of the run step. - last_error: - type: object - properties: - code: - type: string - enum: - - server_error - - rate_limit_exceeded - description: One of `server_error` or `rate_limit_exceeded`. - message: - type: string - description: A human-readable description of the error. - required: - - code - - message - nullable: true - description: The last error associated with this run step. Will be `null` if there are no errors. - expires_at: - type: string - format: date-time - nullable: true - description: |- - The Unix timestamp (in seconds) for when the run step expired. A step is considered expired - if the parent run is expired. - cancelled_at: - type: string - format: date-time - nullable: true - description: The Unix timestamp (in seconds) for when the run step was cancelled. - failed_at: - type: string - format: date-time - nullable: true - description: The Unix timestamp (in seconds) for when the run step failed. - completed_at: - type: string - format: date-time - nullable: true - description: T The Unix timestamp (in seconds) for when the run step completed.. - metadata: - type: object - additionalProperties: - type: string - nullable: true - description: |- - Set of 16 key-value pairs that can be attached to an object. This can be useful for storing - additional information about the object in a structured format. Keys can be a maximum of 64 - characters long and values can be a maxium of 512 characters long. - x-oaiTypeLabel: map - usage: - type: object - allOf: - - $ref: '#/components/schemas/RunCompletionUsage' - nullable: true - description: Represents a step in execution of a run. - RunToolCallObject: - type: object - required: - - id - - type - - function - properties: - id: - type: string - description: |- - The ID of the tool call. This ID must be referenced when you submit the tool outputs in using - the [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs) endpoint. - type: - type: string - enum: - - function - description: The type of tool call the output is required for. For now, this is always `function`. - function: - type: object - properties: - name: - type: string - description: The name of the function. - arguments: - type: string - description: The arguments that the model expects you to pass to the function. - required: - - name - - arguments - description: The function definition. - description: Tool call objects - Stop: - oneOf: - - type: string - - $ref: '#/components/schemas/StopSequences' - StopSequences: - type: array - items: - type: string - minItems: 1 - maxItems: 4 - SubmitToolOutputsRunRequest: - type: object - required: - - tool_outputs - properties: - tool_outputs: - type: object - properties: - tool_call_id: - type: string - description: |- - The ID of the tool call in the `required_action` object within the run object the output is - being submitted for. - output: - type: string - description: The output of the tool call to be submitted to continue the run. - description: A list of tools for which the outputs are being submitted. - SuffixString: - type: string - minLength: 1 - maxLength: 40 - ThreadObject: - type: object - required: - - id - - object - - created_at - - metadata - properties: - id: - type: string - description: The identifier, which can be referenced in API endpoints. - object: - type: string - enum: - - thread - description: The object type, which is always `thread`. - created_at: - type: integer - format: unixtime - description: The Unix timestamp (in seconds) for when the thread was created. - metadata: - type: object - additionalProperties: - type: string - nullable: true - description: |- - Set of 16 key-value pairs that can be attached to an object. This can be useful for storing - additional information about the object in a structured format. Keys can be a maximum of 64 - characters long and values can be a maxium of 512 characters long. - x-oaiTypeLabel: map - description: Represents a thread that contains [messages](/docs/api-reference/messages). - TokenArrayArray: - type: array - items: - $ref: '#/components/schemas/TokenArrayItem' - minItems: 1 - TokenArrayItem: - type: array - items: - type: integer - format: int64 - minItems: 1 - User: - type: string - securitySchemes: - BearerAuth: - type: http - scheme: bearer -servers: - - url: https://api.openai.com/v1 - description: OpenAI Endpoint - variables: {} diff --git a/fine-tuning/operations.tsp b/fine-tuning/operations.tsp index 07f48e802..f37e90cc7 100644 --- a/fine-tuning/operations.tsp +++ b/fine-tuning/operations.tsp @@ -10,145 +10,71 @@ using TypeSpec.OpenAPI; namespace OpenAI; @route("/fine_tuning") -namespace FineTuning { +interface FineTuning { @route("jobs") - interface Jobs { - /** - * Creates a job that fine-tunes a specified model from a given dataset. - * - * Response includes details of the enqueued job including job status and the name of the - * fine-tuned models once complete. - * - * [Learn more about fine-tuning](/docs/guides/fine-tuning) - */ - @post - @tag("Fine-tuning") - @operationId("createFineTuningJob") - createFineTuningJob( - @body job: CreateFineTuningJobRequest, - ): FineTuningJob | ErrorResponse; - - @get - @tag("Fine-tuning") - @operationId("listPaginatedFineTuningJobs") - listPaginatedFineTuningJobs( - /** Identifier for the last job from the previous pagination request. */ - @query after?: string, - - /** Number of fine-tuning jobs to retrieve. */ - @query limit?: safeint = 20, - ): ListPaginatedFineTuningJobsResponse | ErrorResponse; - - @summary(""" - Get info about a fine-tuning job. - - [Learn more about fine-tuning](/docs/guides/fine-tuning) - """) - @route("{fine_tuning_job_id}") - @tag("Fine-tuning") - @get - @operationId("retrieveFineTuningJob") - retrieveFineTuningJob( - @path fine_tuning_job_id: string, - ): FineTuningJob | ErrorResponse; - - @summary("Get status updates for a fine-tuning job.") - @tag("Fine-tuning") - @route("{fine_tuning_job_id}/events") - @get - @operationId("listFineTuningEvents") - listFineTuningEvents( - /** The ID of the fine-tuning job to get events for. */ - @path fine_tuning_job_id: string, - - /** Identifier for the last event from the previous pagination request. */ - @query after?: string, - - /** Number of events to retrieve. */ - @query limit?: integer = 20, - ): ListFineTuningJobEventsResponse | ErrorResponse; - - @summary("Immediately cancel a fine-tune job.") - @tag("Fine-tuning") - @route("{fine_tuning_job_id}/cancel") - @post - @operationId("cancelFineTuningJob") - cancelFineTuningJob( - /** The ID of the fine-tuning job to cancel. */ - @path fine_tuning_job_id: string, - ): FineTuningJob | ErrorResponse; - } -} - -@route("/fine-tunes") -interface FineTunes { - #deprecated "deprecated" @post + @operationId("createFineTuningJob") @tag("Fine-tuning") @summary(""" - Creates a job that fine-tunes a specified model from a given dataset. + Creates a fine-tuning job which begins the process of creating a new model from a given dataset. Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. - [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + [Learn more about fine-tuning](/docs/guides/fine-tuning) """) - @operationId("createFineTune") - createFineTune( - @body fine_tune: CreateFineTuneRequest, - ): FineTune | ErrorResponse; + createFineTuningJob( + @body job: CreateFineTuningJobRequest, + ): FineTuningJob | ErrorResponse; - #deprecated "deprecated" + @route("jobs") @get + @operationId("listPaginatedFineTuningJobs") @tag("Fine-tuning") @summary("List your organization's fine-tuning jobs") - @operationId("listFineTunes") - listFineTunes(): ListFineTunesResponse | ErrorResponse; + listPaginatedFineTuningJobs( + /** Identifier for the last job from the previous pagination request. */ + @query after?: string, - #deprecated "deprecated" + /** Number of fine-tuning jobs to retrieve. */ + @query limit?: safeint = 20, + ): ListPaginatedFineTuningJobsResponse | ErrorResponse; + + @route("jobs/{fine_tuning_job_id}") @get - @route("{fine_tune_id}") + @operationId("retrieveFineTuningJob") @tag("Fine-tuning") @summary(""" - Gets info about the fine-tune job. + Get info about a fine-tuning job. - [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + [Learn more about fine-tuning](/docs/guides/fine-tuning) """) - @operationId("retrieveFineTune") - retrieveFineTune( - /** The ID of the fine-tune job */ - @path fine_tune_id: string, - ): FineTune | ErrorResponse; + retrieveFineTuningJob( + @path fine_tuning_job_id: string, + ): FineTuningJob | ErrorResponse; + + @route("jobs/{fine_tuning_job_id}/cancel") + @post + @operationId("cancelFineTuningJob") + @tag("Fine-tuning") + @summary("Immediately cancel a fine-tune job.") + cancelFineTuningJob( + /** The ID of the fine-tuning job to cancel. */ + @path fine_tuning_job_id: string, + ): FineTuningJob | ErrorResponse; - #deprecated "deprecated" - @route("{fine_tune_id}/events") + @route("jobs/{fine_tuning_job_id}/events") @get + @operationId("listFineTuningEvents") @tag("Fine-tuning") - @summary("Get fine-grained status updates for a fine-tune job.") - @operationId("listFineTuneEvents") - listFineTuneEvents( - /** The ID of the fine-tune job to get events for. */ - @path fine_tune_id: string, + @summary("Get status updates for a fine-tuning job.") + listFineTuningEvents( + /** The ID of the fine-tuning job to get events for. */ + @path fine_tuning_job_id: string, - /** - * Whether to stream events for the fine-tune job. If set to true, events will be sent as - * data-only - * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - * as they become available. The stream will terminate with a `data: [DONE]` message when the - * job is finished (succeeded, cancelled, or failed). - * - * If set to false, only events generated so far will be returned. - */ - @query stream?: boolean = false, - ): ListFineTuneEventsResponse | ErrorResponse; + /** Identifier for the last event from the previous pagination request. */ + @query after?: string, - #deprecated "deprecated" - @route("{fine_tune_id}/cancel") - @post - @tag("Fine-tuning") - @summary("Immediately cancel a fine-tune job.") - @operationId("cancelFineTune") - cancelFineTune( - /** The ID of the fine-tune job to cancel */ - @path fine_tune_id: string, - ): FineTune | ErrorResponse; + /** Number of events to retrieve. */ + @query limit?: integer = 20, + ): ListFineTuningJobEventsResponse | ErrorResponse; } \ No newline at end of file diff --git a/tsp-output/@typespec/openapi3/openapi.yaml b/tsp-output/@typespec/openapi3/openapi.yaml index 346061391..c96cc33ed 100644 --- a/tsp-output/@typespec/openapi3/openapi.yaml +++ b/tsp-output/@typespec/openapi3/openapi.yaml @@ -4,13 +4,13 @@ info: version: 2.0.0 description: The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. tags: - - name: Fine-tuning - name: Audio - name: Assistants - name: Chat - name: Completions - name: Embeddings - name: Files + - name: Fine-tuning - name: Images - name: Models - name: Moderations @@ -637,166 +637,15 @@ paths: application/json: schema: $ref: '#/components/schemas/ErrorResponse' - /fine-tunes: - post: - tags: - - Fine-tuning - operationId: createFineTune - summary: |- - Creates a job that fine-tunes a specified model from a given dataset. - - Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. - - [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) - parameters: [] - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/FineTune' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/CreateFineTuneRequest' - deprecated: true - get: - tags: - - Fine-tuning - operationId: listFineTunes - summary: List your organization's fine-tuning jobs - parameters: [] - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/ListFineTunesResponse' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - deprecated: true - /fine-tunes/{fine_tune_id}: - get: - tags: - - Fine-tuning - operationId: retrieveFineTune - summary: |- - Gets info about the fine-tune job. - - [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) - parameters: - - name: fine_tune_id - in: path - required: true - description: The ID of the fine-tune job - schema: - type: string - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/FineTune' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - deprecated: true - /fine-tunes/{fine_tune_id}/cancel: - post: - tags: - - Fine-tuning - operationId: cancelFineTune - summary: Immediately cancel a fine-tune job. - parameters: - - name: fine_tune_id - in: path - required: true - description: The ID of the fine-tune job to cancel - schema: - type: string - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/FineTune' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - deprecated: true - /fine-tunes/{fine_tune_id}/events: - get: - tags: - - Fine-tuning - operationId: listFineTuneEvents - summary: Get fine-grained status updates for a fine-tune job. - parameters: - - name: fine_tune_id - in: path - required: true - description: The ID of the fine-tune job to get events for. - schema: - type: string - - name: stream - in: query - required: false - description: |- - Whether to stream events for the fine-tune job. If set to true, events will be sent as - data-only - [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - as they become available. The stream will terminate with a `data: [DONE]` message when the - job is finished (succeeded, cancelled, or failed). - - If set to false, only events generated so far will be returned. - schema: - type: boolean - default: false - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/ListFineTuneEventsResponse' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - deprecated: true /fine_tuning/jobs: post: tags: - Fine-tuning operationId: createFineTuningJob - description: |- - Creates a job that fine-tunes a specified model from a given dataset. + summary: |- + Creates a fine-tuning job which begins the process of creating a new model from a given dataset. - Response includes details of the enqueued job including job status and the name of the - fine-tuned models once complete. + Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. [Learn more about fine-tuning](/docs/guides/fine-tuning) parameters: [] @@ -823,6 +672,7 @@ paths: tags: - Fine-tuning operationId: listPaginatedFineTuningJobs + summary: List your organization's fine-tuning jobs parameters: - name: after in: query @@ -4922,12 +4772,10 @@ components: items: $ref: '#/components/schemas/Model' ListOrder: - anyOf: - - type: string - - type: string - enum: - - asc - - desc + type: string + enum: + - asc + - desc ListPaginatedFineTuningJobsResponse: type: object required: From b16017af6338b477e80a2e9c92283d868bdfbbf6 Mon Sep 17 00:00:00 2001 From: Jose Arriaga Maldonado Date: Tue, 20 Feb 2024 02:03:08 -0800 Subject: [PATCH 11/18] Update System.ClientModel --- .dotnet/scripts/Update-ClientModel.ps1 | 197 +++ .../ModelReaderWriterExtensions.cs | 264 ++++ .../ClientShared/ModelReaderWriterHelper.cs | 31 + .../src/ClientShared/OptionalDictionary.cs | 214 +++ .dotnet/src/ClientShared/OptionalList.cs | 191 +++ .dotnet/src/ClientShared/OptionalProperty.cs | 117 ++ .dotnet/src/ClientShared/TypeFormatters.cs | 158 ++ .dotnet/src/Generated/Assistants.cs | 1124 ++++++++------- .dotnet/src/Generated/Audio.cs | 347 ++--- .dotnet/src/Generated/Chat.cs | 141 +- .dotnet/src/Generated/Completions.cs | 141 +- .dotnet/src/Generated/Embeddings.cs | 141 +- .dotnet/src/Generated/Files.cs | 564 ++++---- .dotnet/src/Generated/FineTuning.cs | 594 ++++---- .dotnet/src/Generated/Images.cs | 347 ++--- .dotnet/src/Generated/Messages.cs | 889 ++++++------ .../AssistantFileObject.Serialization.cs | 17 +- .../Generated/Models/AssistantFileObject.cs | 9 +- .../Models/AssistantFileObjectObject.cs | 3 +- .../Models/AssistantObject.Serialization.cs | 17 +- .../src/Generated/Models/AssistantObject.cs | 13 +- .../Generated/Models/AssistantObjectObject.cs | 3 +- .../Models/AudioSegment.Serialization.cs | 17 +- .dotnet/src/Generated/Models/AudioSegment.cs | 9 +- ...pletionFunctionCallOption.Serialization.cs | 17 +- .../ChatCompletionFunctionCallOption.cs | 7 +- .../ChatCompletionFunctions.Serialization.cs | 17 +- .../Models/ChatCompletionFunctions.cs | 7 +- ...CompletionMessageToolCall.Serialization.cs | 17 +- .../Models/ChatCompletionMessageToolCall.cs | 9 +- ...onMessageToolCallFunction.Serialization.cs | 17 +- .../ChatCompletionMessageToolCallFunction.cs | 9 +- .../ChatCompletionMessageToolCallType.cs | 3 +- ...CompletionNamedToolChoice.Serialization.cs | 17 +- .../Models/ChatCompletionNamedToolChoice.cs | 7 +- ...onNamedToolChoiceFunction.Serialization.cs | 17 +- .../ChatCompletionNamedToolChoiceFunction.cs | 7 +- .../ChatCompletionNamedToolChoiceType.cs | 3 +- ...CompletionResponseMessage.Serialization.cs | 17 +- .../Models/ChatCompletionResponseMessage.cs | 5 +- ...sponseMessageFunctionCall.Serialization.cs | 17 +- ...atCompletionResponseMessageFunctionCall.cs | 9 +- .../ChatCompletionResponseMessageRole.cs | 3 +- ...hatCompletionTokenLogprob.Serialization.cs | 17 +- .../Models/ChatCompletionTokenLogprob.cs | 9 +- ...ionTokenLogprobTopLogprob.Serialization.cs | 17 +- .../ChatCompletionTokenLogprobTopLogprob.cs | 7 +- .../ChatCompletionTool.Serialization.cs | 17 +- .../Generated/Models/ChatCompletionTool.cs | 7 +- .../Models/ChatCompletionToolType.cs | 3 +- .../Models/CompletionUsage.Serialization.cs | 17 +- .../src/Generated/Models/CompletionUsage.cs | 3 +- ...reateAssistantFileRequest.Serialization.cs | 17 +- .../Models/CreateAssistantFileRequest.cs | 7 +- .../CreateAssistantRequest.Serialization.cs | 17 +- .../Models/CreateAssistantRequest.cs | 7 +- ...eateChatCompletionRequest.Serialization.cs | 17 +- .../Models/CreateChatCompletionRequest.cs | 7 +- .../CreateChatCompletionRequestModel.cs | 3 +- ...tionRequestResponseFormat.Serialization.cs | 17 +- ...eateChatCompletionRequestResponseFormat.cs | 3 +- ...ChatCompletionRequestResponseFormatType.cs | 3 +- ...ateChatCompletionResponse.Serialization.cs | 17 +- .../Models/CreateChatCompletionResponse.cs | 11 +- ...tCompletionResponseChoice.Serialization.cs | 17 +- .../CreateChatCompletionResponseChoice.cs | 7 +- ...hatCompletionResponseChoiceFinishReason.cs | 3 +- ...ionResponseChoiceLogprobs.Serialization.cs | 17 +- ...ateChatCompletionResponseChoiceLogprobs.cs | 3 +- .../CreateChatCompletionResponseObject.cs | 3 +- .../CreateCompletionRequest.Serialization.cs | 17 +- .../Models/CreateCompletionRequest.cs | 5 +- .../Models/CreateCompletionRequestModel.cs | 3 +- .../CreateCompletionResponse.Serialization.cs | 17 +- .../Models/CreateCompletionResponse.cs | 11 +- ...eCompletionResponseChoice.Serialization.cs | 17 +- .../Models/CreateCompletionResponseChoice.cs | 7 +- ...ateCompletionResponseChoiceFinishReason.cs | 3 +- ...ionResponseChoiceLogprobs.Serialization.cs | 17 +- .../CreateCompletionResponseChoiceLogprobs.cs | 13 +- .../Models/CreateCompletionResponseObject.cs | 3 +- .../CreateEmbeddingRequest.Serialization.cs | 17 +- .../Models/CreateEmbeddingRequest.cs | 7 +- .../CreateEmbeddingRequestEncodingFormat.cs | 3 +- .../Models/CreateEmbeddingRequestModel.cs | 3 +- .../CreateEmbeddingResponse.Serialization.cs | 17 +- .../Models/CreateEmbeddingResponse.cs | 11 +- .../Models/CreateEmbeddingResponseObject.cs | 3 +- ...ateEmbeddingResponseUsage.Serialization.cs | 17 +- .../Models/CreateEmbeddingResponseUsage.cs | 3 +- .../Models/CreateFileRequest.Serialization.cs | 17 +- .../src/Generated/Models/CreateFileRequest.cs | 7 +- .../Models/CreateFileRequestPurpose.cs | 3 +- ...reateFineTuningJobRequest.Serialization.cs | 17 +- .../Models/CreateFineTuningJobRequest.cs | 7 +- ...JobRequestHyperparameters.Serialization.cs | 17 +- ...eateFineTuningJobRequestHyperparameters.cs | 3 +- .../Models/CreateFineTuningJobRequestModel.cs | 3 +- .../CreateImageEditRequest.Serialization.cs | 17 +- .../Models/CreateImageEditRequest.cs | 9 +- .../Models/CreateImageEditRequestModel.cs | 3 +- .../CreateImageEditRequestResponseFormat.cs | 3 +- .../Models/CreateImageEditRequestSize.cs | 3 +- .../CreateImageRequest.Serialization.cs | 17 +- .../Generated/Models/CreateImageRequest.cs | 7 +- .../Models/CreateImageRequestModel.cs | 3 +- .../Models/CreateImageRequestQuality.cs | 3 +- .../CreateImageRequestResponseFormat.cs | 3 +- .../Models/CreateImageRequestSize.cs | 3 +- .../Models/CreateImageRequestStyle.cs | 3 +- ...eateImageVariationRequest.Serialization.cs | 17 +- .../Models/CreateImageVariationRequest.cs | 7 +- .../CreateImageVariationRequestModel.cs | 3 +- ...eateImageVariationRequestResponseFormat.cs | 3 +- .../Models/CreateImageVariationRequestSize.cs | 3 +- .../CreateMessageRequest.Serialization.cs | 17 +- .../Generated/Models/CreateMessageRequest.cs | 7 +- .../Models/CreateMessageRequestRole.cs | 3 +- .../CreateModerationRequest.Serialization.cs | 17 +- .../Models/CreateModerationRequest.cs | 7 +- .../Models/CreateModerationRequestModel.cs | 3 +- .../CreateModerationResponse.Serialization.cs | 17 +- .../Models/CreateModerationResponse.cs | 11 +- ...eModerationResponseResult.Serialization.cs | 17 +- .../Models/CreateModerationResponseResult.cs | 9 +- ...nResponseResultCategories.Serialization.cs | 17 +- ...reateModerationResponseResultCategories.cs | 3 +- ...ponseResultCategoryScores.Serialization.cs | 17 +- ...eModerationResponseResultCategoryScores.cs | 3 +- .../Models/CreateRunRequest.Serialization.cs | 17 +- .../src/Generated/Models/CreateRunRequest.cs | 7 +- .../CreateSpeechRequest.Serialization.cs | 17 +- .../Generated/Models/CreateSpeechRequest.cs | 7 +- .../Models/CreateSpeechRequestModel.cs | 3 +- .../CreateSpeechRequestResponseFormat.cs | 3 +- .../Models/CreateSpeechRequestVoice.cs | 3 +- ...CreateThreadAndRunRequest.Serialization.cs | 17 +- .../Models/CreateThreadAndRunRequest.cs | 7 +- .../CreateThreadRequest.Serialization.cs | 17 +- .../Generated/Models/CreateThreadRequest.cs | 5 +- ...reateTranscriptionRequest.Serialization.cs | 17 +- .../Models/CreateTranscriptionRequest.cs | 7 +- .../Models/CreateTranscriptionRequestModel.cs | 3 +- ...reateTranscriptionRequestResponseFormat.cs | 3 +- ...eateTranscriptionResponse.Serialization.cs | 17 +- .../Models/CreateTranscriptionResponse.cs | 7 +- .../Models/CreateTranscriptionResponseTask.cs | 3 +- .../CreateTranslationRequest.Serialization.cs | 17 +- .../Models/CreateTranslationRequest.cs | 7 +- .../Models/CreateTranslationRequestModel.cs | 3 +- .../CreateTranslationRequestResponseFormat.cs | 3 +- ...CreateTranslationResponse.Serialization.cs | 17 +- .../Models/CreateTranslationResponse.cs | 7 +- .../Models/CreateTranslationResponseTask.cs | 3 +- ...leteAssistantFileResponse.Serialization.cs | 17 +- .../Models/DeleteAssistantFileResponse.cs | 7 +- .../DeleteAssistantFileResponseObject.cs | 3 +- .../DeleteAssistantResponse.Serialization.cs | 17 +- .../Models/DeleteAssistantResponse.cs | 7 +- .../Models/DeleteAssistantResponseObject.cs | 3 +- .../DeleteFileResponse.Serialization.cs | 17 +- .../Generated/Models/DeleteFileResponse.cs | 7 +- .../Models/DeleteFileResponseObject.cs | 3 +- .../DeleteModelResponse.Serialization.cs | 17 +- .../Generated/Models/DeleteModelResponse.cs | 7 +- .../Models/DeleteModelResponseObject.cs | 3 +- .../DeleteThreadResponse.Serialization.cs | 17 +- .../Generated/Models/DeleteThreadResponse.cs | 7 +- .../Models/DeleteThreadResponseObject.cs | 3 +- .../Models/Embedding.Serialization.cs | 17 +- .dotnet/src/Generated/Models/Embedding.cs | 7 +- .../src/Generated/Models/EmbeddingObject.cs | 3 +- .../Models/FineTuningJob.Serialization.cs | 17 +- .dotnet/src/Generated/Models/FineTuningJob.cs | 17 +- .../FineTuningJobError.Serialization.cs | 17 +- .../Generated/Models/FineTuningJobError.cs | 3 +- .../FineTuningJobEvent.Serialization.cs | 17 +- .../Generated/Models/FineTuningJobEvent.cs | 11 +- .../Models/FineTuningJobEventLevel.cs | 3 +- ...eTuningJobHyperparameters.Serialization.cs | 17 +- .../Models/FineTuningJobHyperparameters.cs | 3 +- .../Generated/Models/FineTuningJobObject.cs | 3 +- .../Generated/Models/FineTuningJobStatus.cs | 3 +- .../Models/FunctionObject.Serialization.cs | 17 +- .../src/Generated/Models/FunctionObject.cs | 7 +- .../FunctionParameters.Serialization.cs | 17 +- .../Generated/Models/FunctionParameters.cs | 5 +- .../Generated/Models/Image.Serialization.cs | 17 +- .dotnet/src/Generated/Models/Image.cs | 3 +- .../Models/ImagesResponse.Serialization.cs | 17 +- .../src/Generated/Models/ImagesResponse.cs | 7 +- ...istAssistantFilesResponse.Serialization.cs | 17 +- .../Models/ListAssistantFilesResponse.cs | 11 +- .../ListAssistantFilesResponseObject.cs | 3 +- .../ListAssistantsResponse.Serialization.cs | 17 +- .../Models/ListAssistantsResponse.cs | 11 +- .../Models/ListAssistantsResponseObject.cs | 3 +- .../Models/ListFilesResponse.Serialization.cs | 17 +- .../src/Generated/Models/ListFilesResponse.cs | 7 +- .../Models/ListFilesResponseObject.cs | 3 +- ...neTuningJobEventsResponse.Serialization.cs | 17 +- .../Models/ListFineTuningJobEventsResponse.cs | 9 +- .../ListMessageFilesResponse.Serialization.cs | 17 +- .../Models/ListMessageFilesResponse.cs | 11 +- .../Models/ListMessageFilesResponseObject.cs | 3 +- .../ListMessagesResponse.Serialization.cs | 17 +- .../Generated/Models/ListMessagesResponse.cs | 11 +- .../Models/ListMessagesResponseObject.cs | 3 +- .../ListModelsResponse.Serialization.cs | 17 +- .../Generated/Models/ListModelsResponse.cs | 7 +- .../Models/ListModelsResponseObject.cs | 3 +- .dotnet/src/Generated/Models/ListOrder.cs | 3 +- ...tedFineTuningJobsResponse.Serialization.cs | 17 +- .../ListPaginatedFineTuningJobsResponse.cs | 9 +- .../ListRunStepsResponse.Serialization.cs | 17 +- .../Generated/Models/ListRunStepsResponse.cs | 11 +- .../Models/ListRunStepsResponseObject.cs | 3 +- .../Models/ListRunsResponse.Serialization.cs | 17 +- .../src/Generated/Models/ListRunsResponse.cs | 11 +- .../Models/ListRunsResponseObject.cs | 3 +- .../Models/MessageFileObject.Serialization.cs | 17 +- .../src/Generated/Models/MessageFileObject.cs | 9 +- .../Models/MessageFileObjectObject.cs | 3 +- .../Models/MessageObject.Serialization.cs | 17 +- .dotnet/src/Generated/Models/MessageObject.cs | 13 +- .../Generated/Models/MessageObjectObject.cs | 3 +- .../src/Generated/Models/MessageObjectRole.cs | 3 +- .../Generated/Models/Model.Serialization.cs | 17 +- .dotnet/src/Generated/Models/Model.cs | 9 +- .dotnet/src/Generated/Models/ModelObject.cs | 3 +- .../ModifyAssistantRequest.Serialization.cs | 17 +- .../Models/ModifyAssistantRequest.cs | 5 +- .../ModifyMessageRequest.Serialization.cs | 17 +- .../Generated/Models/ModifyMessageRequest.cs | 5 +- .../Models/ModifyRunRequest.Serialization.cs | 17 +- .../src/Generated/Models/ModifyRunRequest.cs | 5 +- .../ModifyThreadRequest.Serialization.cs | 17 +- .../Generated/Models/ModifyThreadRequest.cs | 5 +- .../Models/OpenAIFile.Serialization.cs | 17 +- .dotnet/src/Generated/Models/OpenAIFile.cs | 9 +- .../src/Generated/Models/OpenAIFileObject.cs | 3 +- .../src/Generated/Models/OpenAIFilePurpose.cs | 3 +- .../src/Generated/Models/OpenAIFileStatus.cs | 3 +- .../RunCompletionUsage.Serialization.cs | 17 +- .../Generated/Models/RunCompletionUsage.cs | 3 +- .../Models/RunObject.Serialization.cs | 17 +- .dotnet/src/Generated/Models/RunObject.cs | 19 +- .../RunObjectLastError.Serialization.cs | 17 +- .../Generated/Models/RunObjectLastError.cs | 7 +- .../Models/RunObjectLastErrorCode.cs | 3 +- .../src/Generated/Models/RunObjectObject.cs | 3 +- .../RunObjectRequiredAction.Serialization.cs | 17 +- .../Models/RunObjectRequiredAction.cs | 7 +- ...edActionSubmitToolOutputs.Serialization.cs | 17 +- ...unObjectRequiredActionSubmitToolOutputs.cs | 7 +- .../Models/RunObjectRequiredActionType.cs | 3 +- .../src/Generated/Models/RunObjectStatus.cs | 3 +- ...ailsMessageCreationObject.Serialization.cs | 17 +- .../RunStepDetailsMessageCreationObject.cs | 7 +- ...tionObjectMessageCreation.Serialization.cs | 17 +- ...ilsMessageCreationObjectMessageCreation.cs | 7 +- ...RunStepDetailsMessageCreationObjectType.cs | 3 +- ...tepDetailsToolCallsObject.Serialization.cs | 17 +- .../Models/RunStepDetailsToolCallsObject.cs | 7 +- .../RunStepDetailsToolCallsObjectType.cs | 3 +- .../Models/RunStepObject.Serialization.cs | 17 +- .dotnet/src/Generated/Models/RunStepObject.cs | 15 +- .../RunStepObjectLastError.Serialization.cs | 17 +- .../Models/RunStepObjectLastError.cs | 7 +- .../Models/RunStepObjectLastErrorCode.cs | 3 +- .../Generated/Models/RunStepObjectObject.cs | 3 +- .../Generated/Models/RunStepObjectStatus.cs | 3 +- .../src/Generated/Models/RunStepObjectType.cs | 3 +- .../Models/RunToolCallObject.Serialization.cs | 17 +- .../src/Generated/Models/RunToolCallObject.cs | 9 +- ...RunToolCallObjectFunction.Serialization.cs | 17 +- .../Models/RunToolCallObjectFunction.cs | 9 +- .../Generated/Models/RunToolCallObjectType.cs | 3 +- ...bmitToolOutputsRunRequest.Serialization.cs | 17 +- .../Models/SubmitToolOutputsRunRequest.cs | 7 +- ...putsRunRequestToolOutputs.Serialization.cs | 17 +- .../SubmitToolOutputsRunRequestToolOutputs.cs | 3 +- .../Models/ThreadObject.Serialization.cs | 17 +- .dotnet/src/Generated/Models/ThreadObject.cs | 7 +- .../Generated/Models/ThreadObjectObject.cs | 3 +- .dotnet/src/Generated/ModelsOps.cs | 338 ++--- .dotnet/src/Generated/Moderations.cs | 141 +- .dotnet/src/Generated/OpenAIClient.cs | 58 +- .dotnet/src/Generated/OpenAIClientOptions.cs | 7 +- .dotnet/src/Generated/OpenAIModelFactory.cs | 5 +- .dotnet/src/Generated/Runs.cs | 1267 +++++++++-------- .dotnet/src/Generated/Threads.cs | 477 +++---- .dotnet/src/OpenAI.csproj | 2 +- .../tests/Generated/Tests/AssistantsTests.cs | 3 +- .dotnet/tests/Generated/Tests/AudioTests.cs | 3 +- .dotnet/tests/Generated/Tests/ChatTests.cs | 3 +- .../tests/Generated/Tests/CompletionsTests.cs | 3 +- .../tests/Generated/Tests/EmbeddingsTests.cs | 3 +- .dotnet/tests/Generated/Tests/FilesTests.cs | 3 +- .../tests/Generated/Tests/FineTuningTests.cs | 3 +- .dotnet/tests/Generated/Tests/ImagesTests.cs | 3 +- .../tests/Generated/Tests/MessagesTests.cs | 3 +- .../tests/Generated/Tests/ModelsOpsTests.cs | 3 +- .../tests/Generated/Tests/ModerationsTests.cs | 3 +- .dotnet/tests/Generated/Tests/RunsTests.cs | 3 +- .dotnet/tests/Generated/Tests/ThreadsTests.cs | 3 +- 306 files changed, 5144 insertions(+), 5243 deletions(-) create mode 100644 .dotnet/scripts/Update-ClientModel.ps1 create mode 100644 .dotnet/src/ClientShared/ModelReaderWriterExtensions.cs create mode 100644 .dotnet/src/ClientShared/ModelReaderWriterHelper.cs create mode 100644 .dotnet/src/ClientShared/OptionalDictionary.cs create mode 100644 .dotnet/src/ClientShared/OptionalList.cs create mode 100644 .dotnet/src/ClientShared/OptionalProperty.cs create mode 100644 .dotnet/src/ClientShared/TypeFormatters.cs diff --git a/.dotnet/scripts/Update-ClientModel.ps1 b/.dotnet/scripts/Update-ClientModel.ps1 new file mode 100644 index 000000000..b8a9a590f --- /dev/null +++ b/.dotnet/scripts/Update-ClientModel.ps1 @@ -0,0 +1,197 @@ +function Update-ClientModelPackage { + $root = Split-Path $PSScriptRoot -Parent + $directory = Join-Path -Path $root -ChildPath "src" + + $current = Get-Location + Set-Location -Path $directory + + dotnet build + dotnet remove "OpenAI.csproj" package "System.ClientModel" + dotnet add "OpenAI.csproj" package "System.ClientModel" --version "1.1.0-alpha.20240215.1" --source "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-net/nuget/v3/index.json" + + Set-Location -Path $current +} + +function Update-OpenAIClient { + $root = Split-Path $PSScriptRoot -Parent + $directory = Join-Path -Path $root -ChildPath "src\Generated" + $file = Get-ChildItem -Path $directory -Filter "OpenAIClient.cs" + $content = Get-Content -Path $file -Raw + + Write-Output "Editing $($file.FullName)" + + $content = $content -creplace "\s+#nullable disable", "" + $content = $content -creplace "\s+using System\.ClientModel\.Internal;", "" + $content = $content -creplace "\s+using System\.ClientModel\.Primitives\.Pipeline;", "" + $content = $content -creplace " KeyCredential ", " ApiKeyCredential " + $content = $content -creplace " _keyCredential", " _credential" + $content = $content -creplace " MessagePipeline ", " ClientPipeline " + $content = $content -creplace "\s+\/\/\/ The ClientDiagnostics is used to provide tracing support for the client library. ", "" + $content = $content -creplace "\s+internal TelemetrySource ClientDiagnostics { get; }", "" + $content = $content -creplace "\(KeyCredential", "(ApiKeyCredential" + $content = $content -creplace "ClientUtilities.AssertNotNull\((?\w+), nameof\((\w+)\)\);", "if (`${var} is null) throw new ArgumentNullException(nameof(`${var}));" + $content = $content -creplace "\s+ClientDiagnostics = new TelemetrySource\(options, true\);", "" + $content = $content -creplace "_pipeline = MessagePipeline\.Create\(options, new IPipelinePolicy\[\] \{ new KeyCredentialPolicy\(_keyCredential, AuthorizationHeader, AuthorizationApiKeyPrefix\) \}, Array\.Empty>\(\)\);", "var authenticationPolicy = ApiKeyAuthenticationPolicy.CreateBearerAuthorizationPolicy(_credential);`r`n _pipeline = ClientPipeline.Create(options,`r`n perCallPolicies: ReadOnlySpan.Empty,`r`n perTryPolicies: new PipelinePolicy[] { authenticationPolicy },`r`n beforeTransportPolicies: ReadOnlySpan.Empty);" + $content = $content -creplace "\(ClientDiagnostics, ", "(" + + $content | Set-Content -Path $file.FullName +} + +function Update-OpenAIClientOptions { + $root = Split-Path $PSScriptRoot -Parent + $directory = Join-Path -Path $root -ChildPath "src\Generated" + $file = Get-ChildItem -Path $directory -Filter "OpenAIClientOptions.cs" + $content = Get-Content -Path $file -Raw + + Write-Output "Editing $($file.FullName)" + + $content = $content -creplace "\s+#nullable disable", "" + $content = $content -creplace "using System\.ClientModel;", "using System.ClientModel.Primitives;" + $content = $content -creplace ": RequestOptions", ": ClientPipelineOptions" + + $content | Set-Content -Path $file.FullName +} + +function Update-Subclients { + $root = Split-Path $PSScriptRoot -Parent + $directory = Join-Path -Path $root -ChildPath "src\Generated" + $files = Get-ChildItem -Path $($directory + "\*") -Include "*.cs" -Exclude "OpenAIClient.cs", "OpenAIClientOptions.cs" + + foreach ($file in $files) { + $content = Get-Content -Path $file -Raw + + Write-Output "Editing $($file.FullName)" + + # Delete #nullable + $content = $content -creplace "\s+#nullable disable", "" + + # Fix using statements + $content = $content -creplace "\s+using System.ClientModel.Internal;", "" + $content = $content -creplace "\s+using System.ClientModel.Primitives.Pipeline;", "" + $content = $content -creplace "using System.ClientModel.Primitives;", "using System.ClientModel.Primitives;`r`nusing System.Text;" + + # Fix ClientUtilities + $content = $content -creplace "ClientUtilities.AssertNotNull\((?\w+), nameof\((\w+)\)\);", "if (`${var} is null) throw new ArgumentNullException(nameof(`${var}));" + $content = $content -creplace "ClientUtilities.AssertNotNullOrEmpty\((?\w+), nameof\((\w+)\)\);", "if (`${var} is null) throw new ArgumentNullException(nameof(`${var}));`r`n if (string.IsNullOrEmpty(`${var})) throw new ArgumentException(nameof(`${var}));" + + # Delete TelemetrySource + $content = $content -creplace "\s+\/\/\/ The ClientDiagnostics is used to provide tracing support for the client library. ", "" + $content = $content -creplace "\s+internal TelemetrySource ClientDiagnostics { get; }", "" + + # Modify constructor + $content = $content -creplace "\s+\/\/\/ The handler for diagnostic messaging in the client. ", "" + $content = $content -creplace "", "" + $content = $content -creplace "internal (?\w+)\(TelemetrySource clientDiagnostics, MessagePipeline pipeline, KeyCredential keyCredential, Uri endpoint\)", "internal `${name}(ClientPipeline pipeline, ApiKeyCredential credential, Uri endpoint)" + $content = $content -creplace "\s+ClientDiagnostics = clientDiagnostics;", "" + + # # Modify convenience methods + $content = $content -creplace "\s+\/\/\/ The cancellation token to use. ", "" + $content = $content -creplace ", CancellationToken cancellationToken = default\)", ")" + $content = $content -creplace "RequestOptions context = FromCancellationToken\(cancellationToken\);\s+", "" + $content = $content -creplace "using RequestBody content = (?\w+)\.ToRequestBody\(\);", "using BinaryContent content = BinaryContent.Create(`${var});" + $content = $content -creplace "using RequestBody content0 = (?\w+)\.ToRequestBody\(\);", "using BinaryContent content0 = BinaryContent.Create(`${var});" + $content = $content -creplace "Result result = await (?\w+)\(context\)\.ConfigureAwait\(false\);", "ClientResult result = await `${method}().ConfigureAwait(false);" + $content = $content -creplace "Result result = (?\w+)\(context\);", "ClientResult result = `${method}();" + $content = $content -creplace "Result result = await (?\w+)\((?[(\w+)(\?.ToString\(\)*)(,\s\w+)]*), context\)\.ConfigureAwait\(false\);", "ClientResult result = await `${method}(`${params}).ConfigureAwait(false);" + $content = $content -creplace "Result result = (?\w+)\((?[(\w+)(\?.ToString\(\)*)(,\s\w+)]*), context\);", "ClientResult result = `${method}(`${params});" + + # Modify protocol methods + $content = $content -creplace "\/\/\/ Please try the simpler \w+)\((?[(\w+)(\?*)(,\s\w+)]*),CancellationToken\)`"/> convenience overload with strongly typed models first.", "/// Please try the simpler convenience overload with strongly typed models first." + $content = $content -creplace "\/\/\/ The request context, which can override default behaviors of the client pipeline on a per-call basis. ", "/// The request options, which can override default behaviors of the client pipeline on a per-call basis. " + $content = $content -creplace "\/\/\/ ", "/// " + $content = $content -creplace " Task ", " Task " + $content = $content -creplace " Result ", " ClientResult " + $content = $content -creplace "\(RequestBody content", "(BinaryContent content" + $content = $content -creplace " RequestBody content", " BinaryContent content" + $content = $content -creplace "\(RequestOptions context", "(RequestOptions options" + $content = $content -creplace " RequestOptions context", " RequestOptions options" + $content = $content -creplace "\s+using var scope = ClientDiagnostics\.CreateSpan\(`"(\w+\.\w+)`"\);", "" + $content = $content -creplace "\s+scope\.Start\(\);", "" + $content = $content -creplace "(?s)\s+try\s+\{\s+using PipelineMessage message = (?\w+)\((?[(\w+)(,\s\w+)]*)context\);\s+return Result\.FromResponse\(await _pipeline\.ProcessMessageAsync\(message, context\)\.ConfigureAwait\(false\)\);\s+\}", "`r`n options ??= new RequestOptions();`r`n using PipelineMessage message = `${method}(`${params}options);`r`n await _pipeline.SendAsync(message).ConfigureAwait(false);`r`n PipelineResponse response = message.Response!;`r`n`r`n if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default)`r`n {`r`n throw await ClientResultException.CreateAsync(response).ConfigureAwait(false);`r`n }`r`n`r`n return ClientResult.FromResponse(response);" + $content = $content -creplace "(?s)\s+try\s+\{\s+using PipelineMessage message = (?\w+)\((?[(\w+)(,\s\w+)]*)context\);\s+return Result\.FromResponse\(_pipeline.ProcessMessage\(message, context\)\);\s+\}", "`r`n options ??= new RequestOptions();`r`n using PipelineMessage message = `${method}(`${params}options);`r`n _pipeline.Send(message);`r`n PipelineResponse response = message.Response!;`r`n`r`n if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default)`r`n {`r`n throw new ClientResultException(response);`r`n }`r`n`r`n return ClientResult.FromResponse(response);" + $content = $content -creplace "(?s)\s+catch \(Exception e\)\s+\{\s+scope\.Failed\(e\);\s+throw;\s+\}", "" + + # Create request + $content = $content -creplace "\(RequestBody content", "(BinaryContent content" + $content = $content -creplace " RequestBody content", " BinaryContent content" + $content = $content -creplace " RequestOptions context", " RequestOptions options" + $content = $content -creplace "var message = _pipeline\.CreateMessage\(context, ResponseErrorClassifier200\);", "PipelineMessage message = _pipeline.CreateMessage();`r`n message.ResponseClassifier = ResponseErrorClassifier200;" + $content = $content -creplace "var request = message\.Request;", "PipelineRequest request = message.Request;" + $content = $content -creplace "request\.SetMethod\(`"(?[\w\/]+)`"\);", "request.Method = `"`${name}`";" + $content = $content -creplace "var uri = new RequestUri\(\);", "UriBuilder uriBuilder = new(_endpoint.ToString());" + $content = $content -creplace "uri\.Reset\(_endpoint\);", "StringBuilder path = new();" + $content = $content -creplace "uri\.AppendPath\((?`"?[\w\/]+`"?), (\w+)\);", "path.Append(`${path});`r`n uriBuilder.Path += path.ToString();" + $content = $content -creplace "uri\.AppendQuery\(`"(?\w+)`", (?\w+(\.Value)?), (\w+)\);", "if (uriBuilder.Query != null && uriBuilder.Query.Length > 1)`r`n {`r`n uriBuilder.Query += $`"&`${key}={`${value}}`";`r`n }`r`n else`r`n {`r`n uriBuilder.Query = $`"`${key}={`${value}}`";`r`n }" + $content = $content -creplace "request\.Uri = uri\.ToUri\(\);", "request.Uri = uriBuilder.Uri;" + $content = $content -creplace "request\.SetHeaderValue", "request.Headers.Set" + $content = $content -creplace "request\.Content = content;", "request.Content = content;`r`n message.Apply(options);" + + # Delete DefaultRequestContext + $content = $content -creplace "\s+private static RequestOptions DefaultRequestContext = new RequestOptions\(\);", "" + + # Delete FromCancellationToken + $content = $content -creplace "(?s)\s+internal static RequestOptions FromCancellationToken\(CancellationToken cancellationToken = default\).*?return new RequestOptions\(\) \{ CancellationToken = cancellationToken \};.*?\}", "" + + # Clean up ApiKeyCredential + $content = $content -creplace " KeyCredential", " ApiKeyCredential" + $content = $content -creplace "_keyCredential", "_credential" + $content = $content -creplace " keyCredential", " credential" + + # Clean up ClientPipeline + $content = $content -creplace " MessagePipeline ", " ClientPipeline " + + # Clean up ClientResult + $content = $content -creplace " Result", " ClientResult" + $content = $content -creplace "Task _responseErrorClassifier200 \?\?= new StatusResponseClassifier\(stackalloc ushort\[\] \{ 200 \}\);", "private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 });" + + $content | Set-Content -Path $file.FullName + } +} + +function Update-Models { + $root = Split-Path $PSScriptRoot -Parent + $directory = Join-Path -Path $root -ChildPath "src\Generated\Models" + $files = Get-ChildItem -Path $directory -Filter "*.cs" + + foreach ($file in $files) { + $content = Get-Content -Path $file -Raw + + Write-Output "Editing $($file.FullName)" + + $content = $content -creplace "\s+#nullable disable", "" + $content = $content -creplace "ClientUtilities.AssertNotNull\((?@?\w+), nameof\((@?\w+)\)\);", "if (`${var} is null) throw new ArgumentNullException(nameof(`${var}));" + $content = $content -creplace "using System.ClientModel.Internal;", "using OpenAI.ClientShared.Internal;" + $content = $content -creplace ": IUtf8JsonWriteable,", ":" + $content = $content -creplace "\s+void IUtf8JsonWriteable\.Write\(Utf8JsonWriter writer\) => \(\(IJsonModel<(\w+)>\)this\)\.Write\(writer, new ModelReaderWriterOptions\(`"W`"\)\);`r`n", "" + $content = $content -creplace "(?s)\s+\/\/\/ Convert into a Utf8JsonRequestBody\. .*?return content;.*?\}", "" + + $content | Set-Content -Path $file.FullName + } +} + +function Update-Tests { + $root = Split-Path $PSScriptRoot -Parent + $directory = Join-Path -Path $root -ChildPath "tests\Generated\Tests" + $files = Get-ChildItem -Path $directory -Filter "*.cs" + + foreach ($file in $files) { + $content = Get-Content -Path $file -Raw + + Write-Output "Editing $($file.FullName)" + + $content = $content -creplace " KeyCredential", " ApiKeyCredential" + + $content | Set-Content -Path $file.FullName + } +} + +Update-ClientModelPackage +Update-OpenAIClient +Update-OpenAIClientOptions +Update-Subclients +Update-Models +Update-Tests \ No newline at end of file diff --git a/.dotnet/src/ClientShared/ModelReaderWriterExtensions.cs b/.dotnet/src/ClientShared/ModelReaderWriterExtensions.cs new file mode 100644 index 000000000..25be6cbb5 --- /dev/null +++ b/.dotnet/src/ClientShared/ModelReaderWriterExtensions.cs @@ -0,0 +1,264 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#nullable enable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Diagnostics; +using System.Globalization; +using System.Linq; +using System.Text.Json; + +namespace OpenAI.ClientShared.Internal; + +internal static class ModelReaderWriterExtensions +{ + // TODO: These are copied from shared source files. If they become + // public we need to refactor and consolidate to a single place. + + #region JsonElement + + public static object? GetObject(in this JsonElement element) + { + switch (element.ValueKind) + { + case JsonValueKind.String: + return element.GetString(); + case JsonValueKind.Number: + if (element.TryGetInt32(out int intValue)) + { + return intValue; + } + if (element.TryGetInt64(out long longValue)) + { + return longValue; + } + return element.GetDouble(); + case JsonValueKind.True: + return true; + case JsonValueKind.False: + return false; + case JsonValueKind.Undefined: + case JsonValueKind.Null: + return null; + case JsonValueKind.Object: + var dictionary = new Dictionary(); + foreach (JsonProperty jsonProperty in element.EnumerateObject()) + { + dictionary.Add(jsonProperty.Name, jsonProperty.Value.GetObject()); + } + return dictionary; + case JsonValueKind.Array: + var list = new List(); + foreach (JsonElement item in element.EnumerateArray()) + { + list.Add(item.GetObject()); + } + return list.ToArray(); + default: + throw new NotSupportedException("Not supported value kind " + element.ValueKind); + } + } + + public static byte[]? GetBytesFromBase64(in this JsonElement element, string format) + { + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + + return format switch + { + "U" => TypeFormatters.FromBase64UrlString(element.GetRequiredString()), + "D" => element.GetBytesFromBase64(), + _ => throw new ArgumentException($"Format is not supported: '{format}'", nameof(format)) + }; + } + + public static DateTimeOffset GetDateTimeOffset(in this JsonElement element, string format) => format switch + { + "U" when element.ValueKind == JsonValueKind.Number => DateTimeOffset.FromUnixTimeSeconds(element.GetInt64()), + // relying on the param check of the inner call to throw ArgumentNullException if GetString() returns null + _ => TypeFormatters.ParseDateTimeOffset(element.GetString()!, format) + }; + + public static TimeSpan GetTimeSpan(in this JsonElement element, string format) => + // relying on the param check of the inner call to throw ArgumentNullException if GetString() returns null + TypeFormatters.ParseTimeSpan(element.GetString()!, format); + + public static char GetChar(this in JsonElement element) + { + if (element.ValueKind == JsonValueKind.String) + { + var text = element.GetString(); + if (text == null || text.Length != 1) + { + throw new NotSupportedException($"Cannot convert \"{text}\" to a Char"); + } + return text[0]; + } + else + { + throw new NotSupportedException($"Cannot convert {element.ValueKind} to a Char"); + } + } + + [Conditional("DEBUG")] + public static void ThrowNonNullablePropertyIsNull(this JsonProperty property) + { + throw new JsonException($"A property '{property.Name}' defined as non-nullable but received as null from the service. " + + $"This exception only happens in DEBUG builds of the library and would be ignored in the release build"); + } + + public static string GetRequiredString(in this JsonElement element) + { + var value = element.GetString(); + if (value == null) + throw new InvalidOperationException($"The requested operation requires an element of type 'String', but the target element has type '{element.ValueKind}'."); + + return value; + } + + #endregion + + #region Utf8JsonWriter + public static void WriteStringValue(this Utf8JsonWriter writer, DateTimeOffset value, string format) => + writer.WriteStringValue(TypeFormatters.ToString(value, format)); + + public static void WriteStringValue(this Utf8JsonWriter writer, DateTime value, string format) => + writer.WriteStringValue(TypeFormatters.ToString(value, format)); + + public static void WriteStringValue(this Utf8JsonWriter writer, TimeSpan value, string format) => + writer.WriteStringValue(TypeFormatters.ToString(value, format)); + + public static void WriteStringValue(this Utf8JsonWriter writer, char value) => + writer.WriteStringValue(value.ToString(CultureInfo.InvariantCulture)); + + public static void WriteNonEmptyArray(this Utf8JsonWriter writer, string name, IReadOnlyList values) + { + if (values.Any()) + { + writer.WriteStartArray(name); + foreach (var s in values) + { + writer.WriteStringValue(s); + } + + writer.WriteEndArray(); + } + } + + public static void WriteBase64StringValue(this Utf8JsonWriter writer, byte[] value, string format) + { + if (value == null) + { + writer.WriteNullValue(); + return; + } + + switch (format) + { + case "U": + writer.WriteStringValue(TypeFormatters.ToBase64UrlString(value)); + break; + case "D": + writer.WriteBase64StringValue(value); + break; + default: + throw new ArgumentException($"Format is not supported: '{format}'", nameof(format)); + } + } + + public static void WriteNumberValue(this Utf8JsonWriter writer, DateTimeOffset value, string format) + { + if (format != "U") throw new ArgumentOutOfRangeException(format, "Only 'U' format is supported when writing a DateTimeOffset as a Number."); + + writer.WriteNumberValue(value.ToUnixTimeSeconds()); + } + + public static void WriteObjectValue(this Utf8JsonWriter writer, object? value) + { + switch (value) + { + case null: + writer.WriteNullValue(); + break; + case IJsonModel writeable: + writeable.Write(writer, ModelReaderWriterHelper.WireOptions); + break; + case byte[] bytes: + writer.WriteBase64StringValue(bytes); + break; + case BinaryData bytes: + writer.WriteBase64StringValue(bytes); + break; + case JsonElement json: + json.WriteTo(writer); + break; + case int i: + writer.WriteNumberValue(i); + break; + case decimal d: + writer.WriteNumberValue(d); + break; + case double d: + if (double.IsNaN(d)) + { + writer.WriteStringValue("NaN"); + } + else + { + writer.WriteNumberValue(d); + } + break; + case float f: + writer.WriteNumberValue(f); + break; + case long l: + writer.WriteNumberValue(l); + break; + case string s: + writer.WriteStringValue(s); + break; + case bool b: + writer.WriteBooleanValue(b); + break; + case Guid g: + writer.WriteStringValue(g); + break; + case DateTimeOffset dateTimeOffset: + writer.WriteStringValue(dateTimeOffset, "O"); + break; + case DateTime dateTime: + writer.WriteStringValue(dateTime, "O"); + break; + case IEnumerable> enumerable: + writer.WriteStartObject(); + foreach (KeyValuePair pair in enumerable) + { + writer.WritePropertyName(pair.Key); + writer.WriteObjectValue(pair.Value); + } + writer.WriteEndObject(); + break; + case IEnumerable objectEnumerable: + writer.WriteStartArray(); + foreach (object item in objectEnumerable) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + break; + case TimeSpan timeSpan: + writer.WriteStringValue(timeSpan, "P"); + break; + + default: + throw new NotSupportedException("Not supported type " + value.GetType()); + } + } + + #endregion +} \ No newline at end of file diff --git a/.dotnet/src/ClientShared/ModelReaderWriterHelper.cs b/.dotnet/src/ClientShared/ModelReaderWriterHelper.cs new file mode 100644 index 000000000..21181d6d1 --- /dev/null +++ b/.dotnet/src/ClientShared/ModelReaderWriterHelper.cs @@ -0,0 +1,31 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#nullable enable + +using System; +using System.ClientModel.Primitives; +using System.Runtime.CompilerServices; + +namespace OpenAI.ClientShared.Internal; + +internal static class ModelReaderWriterHelper +{ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static void ValidateFormat(IPersistableModel model, string format) + { + bool implementsJson = model is IJsonModel; + bool isValid = (format == "J" && implementsJson) || format == "W"; + if (!isValid) + { + throw new FormatException($"The model {model.GetType().Name} does not support '{format}' format."); + } + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static void ValidateFormat(IPersistableModel model, string format) + => ValidateFormat(model, format); + + private static ModelReaderWriterOptions? _wireOptions; + public static ModelReaderWriterOptions WireOptions => _wireOptions ??= new ModelReaderWriterOptions("W"); +} \ No newline at end of file diff --git a/.dotnet/src/ClientShared/OptionalDictionary.cs b/.dotnet/src/ClientShared/OptionalDictionary.cs new file mode 100644 index 000000000..138a936da --- /dev/null +++ b/.dotnet/src/ClientShared/OptionalDictionary.cs @@ -0,0 +1,214 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#nullable enable + +using System; +using System.Collections; +using System.Collections.Generic; + +namespace OpenAI.ClientShared.Internal; + +internal class OptionalDictionary : IDictionary, IReadOnlyDictionary where TKey : notnull +{ + private IDictionary? _innerDictionary; + + public OptionalDictionary() + { + } + + public OptionalDictionary(OptionalProperty> optionalDictionary) : this(optionalDictionary.Value) + { + } + + public OptionalDictionary(OptionalProperty> optionalDictionary) : this(optionalDictionary.Value) + { + } + + private OptionalDictionary(IDictionary? dictionary) + { + if (dictionary == null) return; + + _innerDictionary = new Dictionary(dictionary); + } + + private OptionalDictionary(IReadOnlyDictionary? dictionary) + { + if (dictionary == null) return; + + _innerDictionary = new Dictionary(); + foreach (KeyValuePair pair in dictionary) + { + _innerDictionary.Add(pair); + } + } + + public bool IsUndefined => _innerDictionary == null; + + public IEnumerator> GetEnumerator() + { + if (IsUndefined) + { + IEnumerator> GetEmptyEnumerator() + { + yield break; + } + return GetEmptyEnumerator(); + } + return EnsureDictionary().GetEnumerator(); + } + + IEnumerator IEnumerable.GetEnumerator() + { + return GetEnumerator(); + } + + public void Add(KeyValuePair item) + { + EnsureDictionary().Add(item); + } + + public void Clear() + { + EnsureDictionary().Clear(); + } + + public bool Contains(KeyValuePair item) + { + if (IsUndefined) + { + return false; + } + + return EnsureDictionary().Contains(item); + } + + public void CopyTo(KeyValuePair[] array, int arrayIndex) + { + if (IsUndefined) + { + return; + } + + EnsureDictionary().CopyTo(array, arrayIndex); + } + + public bool Remove(KeyValuePair item) + { + if (IsUndefined) + { + return false; + } + + return EnsureDictionary().Remove(item); + } + + public int Count + { + get + { + if (IsUndefined) + { + return 0; + } + + return EnsureDictionary().Count; + } + } + + public bool IsReadOnly + { + get + { + if (IsUndefined) + { + return false; + } + return EnsureDictionary().IsReadOnly; + } + } + + public void Add(TKey key, TValue value) + { + EnsureDictionary().Add(key, value); + } + + public bool ContainsKey(TKey key) + { + if (IsUndefined) + { + return false; + } + + return EnsureDictionary().ContainsKey(key); + } + + public bool Remove(TKey key) + { + if (IsUndefined) + { + return false; + } + + return EnsureDictionary().Remove(key); + } + + public bool TryGetValue(TKey key, out TValue value) + { + if (IsUndefined) + { + value = default!; + return false; + } + return EnsureDictionary().TryGetValue(key, out value!); + } + + public TValue this[TKey key] + { + get + { + if (IsUndefined) + { + throw new KeyNotFoundException(nameof(key)); + } + + return EnsureDictionary()[key]; + } + set => EnsureDictionary()[key] = value; + } + + IEnumerable IReadOnlyDictionary.Keys => Keys; + + IEnumerable IReadOnlyDictionary.Values => Values; + + public ICollection Keys + { + get + { + if (IsUndefined) + { + return Array.Empty(); + } + + return EnsureDictionary().Keys; + } + } + + public ICollection Values + { + get + { + if (IsUndefined) + { + return Array.Empty(); + } + + return EnsureDictionary().Values; + } + } + + private IDictionary EnsureDictionary() + { + return _innerDictionary ??= new Dictionary(); + } +} \ No newline at end of file diff --git a/.dotnet/src/ClientShared/OptionalList.cs b/.dotnet/src/ClientShared/OptionalList.cs new file mode 100644 index 000000000..cc95a17ea --- /dev/null +++ b/.dotnet/src/ClientShared/OptionalList.cs @@ -0,0 +1,191 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#nullable enable + +using System; +using System.Collections; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI.ClientShared.Internal; + +internal class OptionalList : IList, IReadOnlyList +{ + private IList? _innerList; + + public OptionalList() + { + } + + public OptionalList(OptionalProperty> optionalList) : this(optionalList.Value) + { + } + + public OptionalList(OptionalProperty> optionalList) : this(optionalList.Value) + { + } + + private OptionalList(IEnumerable? innerList) + { + if (innerList == null) + { + return; + } + + _innerList = innerList.ToList(); + } + + private OptionalList(IList? innerList) + { + if (innerList == null) + { + return; + } + + _innerList = innerList; + } + + public bool IsUndefined => _innerList == null; + + public void Reset() + { + _innerList = null; + } + + public IEnumerator GetEnumerator() + { + if (IsUndefined) + { + IEnumerator EnumerateEmpty() + { + yield break; + } + + return EnumerateEmpty(); + } + return EnsureList().GetEnumerator(); + } + + IEnumerator IEnumerable.GetEnumerator() + { + return GetEnumerator(); + } + + public void Add(T item) + { + EnsureList().Add(item); + } + + public void Clear() + { + EnsureList().Clear(); + } + + public bool Contains(T item) + { + if (IsUndefined) + { + return false; + } + + return EnsureList().Contains(item); + } + + public void CopyTo(T[] array, int arrayIndex) + { + if (IsUndefined) + { + return; + } + + EnsureList().CopyTo(array, arrayIndex); + } + + public bool Remove(T item) + { + if (IsUndefined) + { + return false; + } + + return EnsureList().Remove(item); + } + + public int Count + { + get + { + if (IsUndefined) + { + return 0; + } + return EnsureList().Count; + } + } + + public bool IsReadOnly + { + get + { + if (IsUndefined) + { + return false; + } + + return EnsureList().IsReadOnly; + } + } + + public int IndexOf(T item) + { + if (IsUndefined) + { + return -1; + } + + return EnsureList().IndexOf(item); + } + + public void Insert(int index, T item) + { + EnsureList().Insert(index, item); + } + + public void RemoveAt(int index) + { + if (IsUndefined) + { + throw new ArgumentOutOfRangeException(nameof(index)); + } + + EnsureList().RemoveAt(index); + } + + public T this[int index] + { + get + { + if (IsUndefined) + { + throw new ArgumentOutOfRangeException(nameof(index)); + } + + return EnsureList()[index]; + } + set + { + if (IsUndefined) + { + throw new ArgumentOutOfRangeException(nameof(index)); + } + + EnsureList()[index] = value; + } + } + + private IList EnsureList() + { + return _innerList ??= new List(); + } +} \ No newline at end of file diff --git a/.dotnet/src/ClientShared/OptionalProperty.cs b/.dotnet/src/ClientShared/OptionalProperty.cs new file mode 100644 index 000000000..9ad960789 --- /dev/null +++ b/.dotnet/src/ClientShared/OptionalProperty.cs @@ -0,0 +1,117 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#nullable enable + +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.ClientShared.Internal; + +internal static class OptionalProperty +{ + public static bool IsCollectionDefined(IEnumerable collection) + { + return !(collection is OptionalList changeTrackingList && changeTrackingList.IsUndefined); + } + + public static bool IsCollectionDefined(IReadOnlyDictionary collection) + where TKey : notnull + { + return !(collection is OptionalDictionary changeTrackingList && changeTrackingList.IsUndefined); + } + + public static bool IsCollectionDefined(IDictionary? collection) + where TKey : notnull + { + if (collection is null) + return false; + + return !(collection is OptionalDictionary changeTrackingList && changeTrackingList.IsUndefined); + } + + public static bool IsDefined(T? value) where T : struct + { + return value.HasValue; + } + public static bool IsDefined(object value) + { + return value != null; + } + public static bool IsDefined(string? value) + { + return value != null; + } + + public static bool IsDefined(JsonElement value) + { + return value.ValueKind != JsonValueKind.Undefined; + } + + public static IReadOnlyDictionary ToDictionary(OptionalProperty> optional) + where TKey : notnull + { + if (optional.HasValue) + { + return optional.Value!; + } + return new OptionalDictionary(optional); + } + + public static IDictionary ToDictionary(OptionalProperty> optional) + where TKey : notnull + { + if (optional.HasValue) + { + return optional.Value!; + } + return new OptionalDictionary(optional); + } + + public static IReadOnlyList ToList(OptionalProperty> optional) + { + if (optional.HasValue) + { + return optional.Value!; + } + return new OptionalList(optional); + } + + public static IList ToList(OptionalProperty> optional) + { + if (optional.HasValue) + { + return optional.Value!; + } + return new OptionalList(optional); + } + + public static T? ToNullable(OptionalProperty optional) where T : struct + { + if (optional.HasValue) + { + return optional.Value; + } + return default; + } + + public static T? ToNullable(OptionalProperty optional) where T : struct + { + return optional.Value; + } +} + +internal readonly struct OptionalProperty +{ + public OptionalProperty(T? value) : this() + { + Value = value; + HasValue = value is not null; + } + + public T? Value { get; } + public bool HasValue { get; } + + public static implicit operator OptionalProperty(T? value) => new OptionalProperty(value); + public static implicit operator T?(OptionalProperty optional) => optional.Value; +} \ No newline at end of file diff --git a/.dotnet/src/ClientShared/TypeFormatters.cs b/.dotnet/src/ClientShared/TypeFormatters.cs new file mode 100644 index 000000000..9a00edaf3 --- /dev/null +++ b/.dotnet/src/ClientShared/TypeFormatters.cs @@ -0,0 +1,158 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#nullable enable + +using System; +using System.Collections.Generic; +using System.Globalization; +using System.Xml; + +namespace OpenAI.ClientShared.Internal; + +internal class TypeFormatters +{ + private const string RoundtripZFormat = "yyyy-MM-ddTHH:mm:ss.fffffffZ"; + public static string DefaultNumberFormat { get; } = "G"; + + public static string ToString(bool value) => value ? "true" : "false"; + + public static string ToString(DateTime value, string format) => value.Kind switch + { + DateTimeKind.Utc => ToString((DateTimeOffset)value, format), + _ => throw new NotSupportedException($"DateTime {value} has a Kind of {value.Kind}. Azure SDK requires it to be UTC. You can call DateTime.SpecifyKind to change Kind property value to DateTimeKind.Utc.") + }; + + public static string ToString(DateTimeOffset value, string format) => format switch + { + "D" => value.ToString("yyyy-MM-dd", CultureInfo.InvariantCulture), + "U" => value.ToUnixTimeSeconds().ToString(CultureInfo.InvariantCulture), + "O" => value.ToUniversalTime().ToString(RoundtripZFormat, CultureInfo.InvariantCulture), + "o" => value.ToUniversalTime().ToString(RoundtripZFormat, CultureInfo.InvariantCulture), + "R" => value.ToString("r", CultureInfo.InvariantCulture), + _ => value.ToString(format, CultureInfo.InvariantCulture) + }; + + public static string ToString(TimeSpan value, string format) => format switch + { + "P" => XmlConvert.ToString(value), + _ => value.ToString(format, CultureInfo.InvariantCulture) + }; + + public static string ToString(byte[] value, string format) => format switch + { + "U" => ToBase64UrlString(value), + "D" => Convert.ToBase64String(value), + _ => throw new ArgumentException($"Format is not supported: '{format}'", nameof(format)) + }; + + public static string ToBase64UrlString(byte[] value) + { + var numWholeOrPartialInputBlocks = checked(value.Length + 2) / 3; + var size = checked(numWholeOrPartialInputBlocks * 4); + var output = new char[size]; + + var numBase64Chars = Convert.ToBase64CharArray(value, 0, value.Length, output, 0); + + // Fix up '+' -> '-' and '/' -> '_'. Drop padding characters. + int i = 0; + for (; i < numBase64Chars; i++) + { + var ch = output[i]; + if (ch == '+') + { + output[i] = '-'; + } + else if (ch == '/') + { + output[i] = '_'; + } + else if (ch == '=') + { + // We've reached a padding character; truncate the remainder. + break; + } + } + + return new string(output, 0, i); + } + + public static byte[] FromBase64UrlString(string value) + { + var paddingCharsToAdd = GetNumBase64PaddingCharsToAddForDecode(value.Length); + + var output = new char[value.Length + paddingCharsToAdd]; + + int i; + for (i = 0; i < value.Length; i++) + { + var ch = value[i]; + if (ch == '-') + { + output[i] = '+'; + } + else if (ch == '_') + { + output[i] = '/'; + } + else + { + output[i] = ch; + } + } + + for (; i < output.Length; i++) + { + output[i] = '='; + } + + return Convert.FromBase64CharArray(output, 0, output.Length); + } + + private static int GetNumBase64PaddingCharsToAddForDecode(int inputLength) + { + switch (inputLength % 4) + { + case 0: + return 0; + case 2: + return 2; + case 3: + return 1; + default: + throw new InvalidOperationException("Malformed input"); + } + } + + public static DateTimeOffset ParseDateTimeOffset(string value, string format) + { + return format switch + { + "U" => DateTimeOffset.FromUnixTimeSeconds(long.Parse(value, CultureInfo.InvariantCulture)), + _ => DateTimeOffset.Parse(value, CultureInfo.InvariantCulture, DateTimeStyles.AssumeUniversal) + }; + } + + public static TimeSpan ParseTimeSpan(string value, string format) => format switch + { + "P" => XmlConvert.ToTimeSpan(value), + _ => TimeSpan.ParseExact(value, format, CultureInfo.InvariantCulture) + }; + + public static string ConvertToString(object? value, string? format = null) + => value switch + { + null => "null", + string s => s, + bool b => ToString(b), + int or float or double or long or decimal => ((IFormattable)value).ToString(DefaultNumberFormat, CultureInfo.InvariantCulture), + byte[] b when format != null => ToString(b, format), + IEnumerable s => string.Join(",", s), + DateTimeOffset dateTime when format != null => ToString(dateTime, format), + TimeSpan timeSpan when format != null => ToString(timeSpan, format), + TimeSpan timeSpan => XmlConvert.ToString(timeSpan), + Guid guid => guid.ToString(), + BinaryData binaryData => TypeFormatters.ConvertToString(binaryData.ToArray(), format), + _ => value.ToString()! + }; +} \ No newline at end of file diff --git a/.dotnet/src/Generated/Assistants.cs b/.dotnet/src/Generated/Assistants.cs index 748820476..afaac60ee 100644 --- a/.dotnet/src/Generated/Assistants.cs +++ b/.dotnet/src/Generated/Assistants.cs @@ -1,12 +1,9 @@ // -#nullable disable - using System; using System.ClientModel; -using System.ClientModel.Internal; using System.ClientModel.Primitives; -using System.ClientModel.Primitives.Pipeline; +using System.Text; using System.Threading; using System.Threading.Tasks; using OpenAI.Models; @@ -18,16 +15,13 @@ namespace OpenAI public partial class Assistants { private const string AuthorizationHeader = "Authorization"; - private readonly KeyCredential _keyCredential; + private readonly ApiKeyCredential _credential; private const string AuthorizationApiKeyPrefix = "Bearer"; - private readonly MessagePipeline _pipeline; + private readonly ClientPipeline _pipeline; private readonly Uri _endpoint; - /// The ClientDiagnostics is used to provide tracing support for the client library. - internal TelemetrySource ClientDiagnostics { get; } - /// The HTTP pipeline for sending and receiving REST requests and responses. - public virtual MessagePipeline Pipeline => _pipeline; + public virtual ClientPipeline Pipeline => _pipeline; /// Initializes a new instance of Assistants for mocking. protected Assistants() @@ -35,44 +29,38 @@ protected Assistants() } /// Initializes a new instance of Assistants. - /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. - /// The key credential to copy. + /// The key credential to copy. /// OpenAI Endpoint. - internal Assistants(TelemetrySource clientDiagnostics, MessagePipeline pipeline, KeyCredential keyCredential, Uri endpoint) + internal Assistants(ClientPipeline pipeline, ApiKeyCredential credential, Uri endpoint) { - ClientDiagnostics = clientDiagnostics; _pipeline = pipeline; - _keyCredential = keyCredential; + _credential = credential; _endpoint = endpoint; } /// Create an assistant with a model and instructions. /// The to use. - /// The cancellation token to use. /// is null. - public virtual async Task> CreateAssistantAsync(CreateAssistantRequest assistant, CancellationToken cancellationToken = default) + public virtual async Task> CreateAssistantAsync(CreateAssistantRequest assistant) { - ClientUtilities.AssertNotNull(assistant, nameof(assistant)); + if (assistant is null) throw new ArgumentNullException(nameof(assistant)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = assistant.ToRequestBody(); - Result result = await CreateAssistantAsync(content, context).ConfigureAwait(false); - return Result.FromValue(AssistantObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + using BinaryContent content = BinaryContent.Create(assistant); + ClientResult result = await CreateAssistantAsync(content).ConfigureAwait(false); + return ClientResult.FromValue(AssistantObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Create an assistant with a model and instructions. /// The to use. - /// The cancellation token to use. /// is null. - public virtual Result CreateAssistant(CreateAssistantRequest assistant, CancellationToken cancellationToken = default) + public virtual ClientResult CreateAssistant(CreateAssistantRequest assistant) { - ClientUtilities.AssertNotNull(assistant, nameof(assistant)); + if (assistant is null) throw new ArgumentNullException(nameof(assistant)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = assistant.ToRequestBody(); - Result result = CreateAssistant(content, context); - return Result.FromValue(AssistantObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + using BinaryContent content = BinaryContent.Create(assistant); + ClientResult result = CreateAssistant(content); + return ClientResult.FromValue(AssistantObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -85,32 +73,30 @@ public virtual Result CreateAssistant(CreateAssistantRequest as /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task CreateAssistantAsync(RequestBody content, RequestOptions context = null) + public virtual async Task CreateAssistantAsync(BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNull(content, nameof(content)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCreateAssistantRequest(content, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; - using var scope = ClientDiagnostics.CreateSpan("Assistants.CreateAssistant"); - scope.Start(); - try + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateCreateAssistantRequest(content, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -123,32 +109,30 @@ public virtual async Task CreateAssistantAsync(RequestBody content, Requ /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result CreateAssistant(RequestBody content, RequestOptions context = null) + public virtual ClientResult CreateAssistant(BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNull(content, nameof(content)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCreateAssistantRequest(content, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; - using var scope = ClientDiagnostics.CreateSpan("Assistants.CreateAssistant"); - scope.Start(); - try + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateCreateAssistantRequest(content, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } /// Returns a list of assistants. @@ -170,12 +154,10 @@ public virtual Result CreateAssistant(RequestBody content, RequestOptions contex /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. /// - /// The cancellation token to use. - public virtual async Task> GetAssistantsAsync(int? limit = null, ListOrder? order = null, string after = null, string before = null, CancellationToken cancellationToken = default) + public virtual async Task> GetAssistantsAsync(int? limit = null, ListOrder? order = null, string after = null, string before = null) { - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = await GetAssistantsAsync(limit, order?.ToString(), after, before, context).ConfigureAwait(false); - return Result.FromValue(ListAssistantsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = await GetAssistantsAsync(limit, order?.ToString(), after, before).ConfigureAwait(false); + return ClientResult.FromValue(ListAssistantsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Returns a list of assistants. @@ -197,12 +179,10 @@ public virtual async Task> GetAssistantsAsync(int /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. /// - /// The cancellation token to use. - public virtual Result GetAssistants(int? limit = null, ListOrder? order = null, string after = null, string before = null, CancellationToken cancellationToken = default) + public virtual ClientResult GetAssistants(int? limit = null, ListOrder? order = null, string after = null, string before = null) { - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = GetAssistants(limit, order?.ToString(), after, before, context); - return Result.FromValue(ListAssistantsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = GetAssistants(limit, order?.ToString(), after, before); + return ClientResult.FromValue(ListAssistantsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -215,7 +195,7 @@ public virtual Result GetAssistants(int? limit = null, L /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -238,23 +218,22 @@ public virtual Result GetAssistants(int? limit = null, L /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. /// - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// Service returned a non-success status code. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task GetAssistantsAsync(int? limit, string order, string after, string before, RequestOptions context) + public virtual async Task GetAssistantsAsync(int? limit, string order, string after, string before, RequestOptions options) { - using var scope = ClientDiagnostics.CreateSpan("Assistants.GetAssistants"); - scope.Start(); - try - { - using PipelineMessage message = CreateGetAssistantsRequest(limit, order, after, before, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) + options ??= new RequestOptions(); + using PipelineMessage message = CreateGetAssistantsRequest(limit, order, after, before, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -267,7 +246,7 @@ public virtual async Task GetAssistantsAsync(int? limit, string order, s /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -290,51 +269,48 @@ public virtual async Task GetAssistantsAsync(int? limit, string order, s /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. /// - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// Service returned a non-success status code. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result GetAssistants(int? limit, string order, string after, string before, RequestOptions context) + public virtual ClientResult GetAssistants(int? limit, string order, string after, string before, RequestOptions options) { - using var scope = ClientDiagnostics.CreateSpan("Assistants.GetAssistants"); - scope.Start(); - try - { - using PipelineMessage message = CreateGetAssistantsRequest(limit, order, after, before, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) + options ??= new RequestOptions(); + using PipelineMessage message = CreateGetAssistantsRequest(limit, order, after, before, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } /// Retrieves an assistant. /// The ID of the assistant to retrieve. - /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - public virtual async Task> GetAssistantAsync(string assistantId, CancellationToken cancellationToken = default) + public virtual async Task> GetAssistantAsync(string assistantId) { - ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); + if (string.IsNullOrEmpty(assistantId)) throw new ArgumentException(nameof(assistantId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = await GetAssistantAsync(assistantId, context).ConfigureAwait(false); - return Result.FromValue(AssistantObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = await GetAssistantAsync(assistantId).ConfigureAwait(false); + return ClientResult.FromValue(AssistantObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Retrieves an assistant. /// The ID of the assistant to retrieve. - /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - public virtual Result GetAssistant(string assistantId, CancellationToken cancellationToken = default) + public virtual ClientResult GetAssistant(string assistantId) { - ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); + if (string.IsNullOrEmpty(assistantId)) throw new ArgumentException(nameof(assistantId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = GetAssistant(assistantId, context); - return Result.FromValue(AssistantObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = GetAssistant(assistantId); + return ClientResult.FromValue(AssistantObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -347,33 +323,32 @@ public virtual Result GetAssistant(string assistantId, Cancella /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the assistant to retrieve. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task GetAssistantAsync(string assistantId, RequestOptions context) + public virtual async Task GetAssistantAsync(string assistantId, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); - - using var scope = ClientDiagnostics.CreateSpan("Assistants.GetAssistant"); - scope.Start(); - try + if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); + if (string.IsNullOrEmpty(assistantId)) throw new ArgumentException(nameof(assistantId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateGetAssistantRequest(assistantId, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateGetAssistantRequest(assistantId, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -386,67 +361,64 @@ public virtual async Task GetAssistantAsync(string assistantId, RequestO /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the assistant to retrieve. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result GetAssistant(string assistantId, RequestOptions context) + public virtual ClientResult GetAssistant(string assistantId, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); - - using var scope = ClientDiagnostics.CreateSpan("Assistants.GetAssistant"); - scope.Start(); - try - { - using PipelineMessage message = CreateGetAssistantRequest(assistantId, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) + if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); + if (string.IsNullOrEmpty(assistantId)) throw new ArgumentException(nameof(assistantId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateGetAssistantRequest(assistantId, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } /// Modifies an assistant. /// The ID of the assistant to modify. /// The to use. - /// The cancellation token to use. /// or is null. /// is an empty string, and was expected to be non-empty. - public virtual async Task> ModifyAssistantAsync(string assistantId, ModifyAssistantRequest assistant, CancellationToken cancellationToken = default) + public virtual async Task> ModifyAssistantAsync(string assistantId, ModifyAssistantRequest assistant) { - ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); - ClientUtilities.AssertNotNull(assistant, nameof(assistant)); + if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); + if (string.IsNullOrEmpty(assistantId)) throw new ArgumentException(nameof(assistantId)); + if (assistant is null) throw new ArgumentNullException(nameof(assistant)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = assistant.ToRequestBody(); - Result result = await ModifyAssistantAsync(assistantId, content, context).ConfigureAwait(false); - return Result.FromValue(AssistantObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + using BinaryContent content = BinaryContent.Create(assistant); + ClientResult result = await ModifyAssistantAsync(assistantId, content).ConfigureAwait(false); + return ClientResult.FromValue(AssistantObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Modifies an assistant. /// The ID of the assistant to modify. /// The to use. - /// The cancellation token to use. /// or is null. /// is an empty string, and was expected to be non-empty. - public virtual Result ModifyAssistant(string assistantId, ModifyAssistantRequest assistant, CancellationToken cancellationToken = default) + public virtual ClientResult ModifyAssistant(string assistantId, ModifyAssistantRequest assistant) { - ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); - ClientUtilities.AssertNotNull(assistant, nameof(assistant)); + if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); + if (string.IsNullOrEmpty(assistantId)) throw new ArgumentException(nameof(assistantId)); + if (assistant is null) throw new ArgumentNullException(nameof(assistant)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = assistant.ToRequestBody(); - Result result = ModifyAssistant(assistantId, content, context); - return Result.FromValue(AssistantObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + using BinaryContent content = BinaryContent.Create(assistant); + ClientResult result = ModifyAssistant(assistantId, content); + return ClientResult.FromValue(AssistantObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -459,35 +431,34 @@ public virtual Result ModifyAssistant(string assistantId, Modif /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the assistant to modify. /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// or is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task ModifyAssistantAsync(string assistantId, RequestBody content, RequestOptions context = null) + public virtual async Task ModifyAssistantAsync(string assistantId, BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); - ClientUtilities.AssertNotNull(content, nameof(content)); - - using var scope = ClientDiagnostics.CreateSpan("Assistants.ModifyAssistant"); - scope.Start(); - try + if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); + if (string.IsNullOrEmpty(assistantId)) throw new ArgumentException(nameof(assistantId)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateModifyAssistantRequest(assistantId, content, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateModifyAssistantRequest(assistantId, content, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -500,63 +471,60 @@ public virtual async Task ModifyAssistantAsync(string assistantId, Reque /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the assistant to modify. /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// or is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result ModifyAssistant(string assistantId, RequestBody content, RequestOptions context = null) + public virtual ClientResult ModifyAssistant(string assistantId, BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); - ClientUtilities.AssertNotNull(content, nameof(content)); - - using var scope = ClientDiagnostics.CreateSpan("Assistants.ModifyAssistant"); - scope.Start(); - try - { - using PipelineMessage message = CreateModifyAssistantRequest(assistantId, content, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) + if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); + if (string.IsNullOrEmpty(assistantId)) throw new ArgumentException(nameof(assistantId)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateModifyAssistantRequest(assistantId, content, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } /// Delete an assistant. /// The ID of the assistant to delete. - /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - public virtual async Task> DeleteAssistantAsync(string assistantId, CancellationToken cancellationToken = default) + public virtual async Task> DeleteAssistantAsync(string assistantId) { - ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); + if (string.IsNullOrEmpty(assistantId)) throw new ArgumentException(nameof(assistantId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = await DeleteAssistantAsync(assistantId, context).ConfigureAwait(false); - return Result.FromValue(DeleteAssistantResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = await DeleteAssistantAsync(assistantId).ConfigureAwait(false); + return ClientResult.FromValue(DeleteAssistantResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Delete an assistant. /// The ID of the assistant to delete. - /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - public virtual Result DeleteAssistant(string assistantId, CancellationToken cancellationToken = default) + public virtual ClientResult DeleteAssistant(string assistantId) { - ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); + if (string.IsNullOrEmpty(assistantId)) throw new ArgumentException(nameof(assistantId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = DeleteAssistant(assistantId, context); - return Result.FromValue(DeleteAssistantResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = DeleteAssistant(assistantId); + return ClientResult.FromValue(DeleteAssistantResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -569,33 +537,32 @@ public virtual Result DeleteAssistant(string assistantI /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the assistant to delete. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task DeleteAssistantAsync(string assistantId, RequestOptions context) + public virtual async Task DeleteAssistantAsync(string assistantId, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); - - using var scope = ClientDiagnostics.CreateSpan("Assistants.DeleteAssistant"); - scope.Start(); - try - { - using PipelineMessage message = CreateDeleteAssistantRequest(assistantId, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) + if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); + if (string.IsNullOrEmpty(assistantId)) throw new ArgumentException(nameof(assistantId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateDeleteAssistantRequest(assistantId, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -608,33 +575,32 @@ public virtual async Task DeleteAssistantAsync(string assistantId, Reque /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the assistant to delete. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result DeleteAssistant(string assistantId, RequestOptions context) + public virtual ClientResult DeleteAssistant(string assistantId, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); - - using var scope = ClientDiagnostics.CreateSpan("Assistants.DeleteAssistant"); - scope.Start(); - try + if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); + if (string.IsNullOrEmpty(assistantId)) throw new ArgumentException(nameof(assistantId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateDeleteAssistantRequest(assistantId, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateDeleteAssistantRequest(assistantId, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } /// @@ -643,18 +609,17 @@ public virtual Result DeleteAssistant(string assistantId, RequestOptions context /// /// The ID of the assistant for which to create a file. /// The to use. - /// The cancellation token to use. /// or is null. /// is an empty string, and was expected to be non-empty. - public virtual async Task> CreateAssistantFileAsync(string assistantId, CreateAssistantFileRequest file, CancellationToken cancellationToken = default) + public virtual async Task> CreateAssistantFileAsync(string assistantId, CreateAssistantFileRequest file) { - ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); - ClientUtilities.AssertNotNull(file, nameof(file)); + if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); + if (string.IsNullOrEmpty(assistantId)) throw new ArgumentException(nameof(assistantId)); + if (file is null) throw new ArgumentNullException(nameof(file)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = file.ToRequestBody(); - Result result = await CreateAssistantFileAsync(assistantId, content, context).ConfigureAwait(false); - return Result.FromValue(AssistantFileObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + using BinaryContent content = BinaryContent.Create(file); + ClientResult result = await CreateAssistantFileAsync(assistantId, content).ConfigureAwait(false); + return ClientResult.FromValue(AssistantFileObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -663,18 +628,17 @@ public virtual async Task> CreateAssistantFileAsync( /// /// The ID of the assistant for which to create a file. /// The to use. - /// The cancellation token to use. /// or is null. /// is an empty string, and was expected to be non-empty. - public virtual Result CreateAssistantFile(string assistantId, CreateAssistantFileRequest file, CancellationToken cancellationToken = default) + public virtual ClientResult CreateAssistantFile(string assistantId, CreateAssistantFileRequest file) { - ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); - ClientUtilities.AssertNotNull(file, nameof(file)); + if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); + if (string.IsNullOrEmpty(assistantId)) throw new ArgumentException(nameof(assistantId)); + if (file is null) throw new ArgumentNullException(nameof(file)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = file.ToRequestBody(); - Result result = CreateAssistantFile(assistantId, content, context); - return Result.FromValue(AssistantFileObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + using BinaryContent content = BinaryContent.Create(file); + ClientResult result = CreateAssistantFile(assistantId, content); + return ClientResult.FromValue(AssistantFileObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -688,35 +652,34 @@ public virtual Result CreateAssistantFile(string assistantI /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the assistant for which to create a file. /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// or is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task CreateAssistantFileAsync(string assistantId, RequestBody content, RequestOptions context = null) + public virtual async Task CreateAssistantFileAsync(string assistantId, BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); - ClientUtilities.AssertNotNull(content, nameof(content)); - - using var scope = ClientDiagnostics.CreateSpan("Assistants.CreateAssistantFile"); - scope.Start(); - try - { - using PipelineMessage message = CreateCreateAssistantFileRequest(assistantId, content, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) + if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); + if (string.IsNullOrEmpty(assistantId)) throw new ArgumentException(nameof(assistantId)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCreateAssistantFileRequest(assistantId, content, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -730,35 +693,34 @@ public virtual async Task CreateAssistantFileAsync(string assistantId, R /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the assistant for which to create a file. /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// or is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result CreateAssistantFile(string assistantId, RequestBody content, RequestOptions context = null) + public virtual ClientResult CreateAssistantFile(string assistantId, BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); - ClientUtilities.AssertNotNull(content, nameof(content)); - - using var scope = ClientDiagnostics.CreateSpan("Assistants.CreateAssistantFile"); - scope.Start(); - try + if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); + if (string.IsNullOrEmpty(assistantId)) throw new ArgumentException(nameof(assistantId)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCreateAssistantFileRequest(assistantId, content, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateCreateAssistantFileRequest(assistantId, content, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } /// Returns a list of assistant files. @@ -781,16 +743,15 @@ public virtual Result CreateAssistantFile(string assistantId, RequestBody conten /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. /// - /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - public virtual async Task> GetAssistantFilesAsync(string assistantId, int? limit = null, ListOrder? order = null, string after = null, string before = null, CancellationToken cancellationToken = default) + public virtual async Task> GetAssistantFilesAsync(string assistantId, int? limit = null, ListOrder? order = null, string after = null, string before = null) { - ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); + if (string.IsNullOrEmpty(assistantId)) throw new ArgumentException(nameof(assistantId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = await GetAssistantFilesAsync(assistantId, limit, order?.ToString(), after, before, context).ConfigureAwait(false); - return Result.FromValue(ListAssistantFilesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = await GetAssistantFilesAsync(assistantId, limit, order?.ToString(), after, before).ConfigureAwait(false); + return ClientResult.FromValue(ListAssistantFilesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Returns a list of assistant files. @@ -813,16 +774,15 @@ public virtual async Task> GetAssistantFilesA /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. /// - /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - public virtual Result GetAssistantFiles(string assistantId, int? limit = null, ListOrder? order = null, string after = null, string before = null, CancellationToken cancellationToken = default) + public virtual ClientResult GetAssistantFiles(string assistantId, int? limit = null, ListOrder? order = null, string after = null, string before = null) { - ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); + if (string.IsNullOrEmpty(assistantId)) throw new ArgumentException(nameof(assistantId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = GetAssistantFiles(assistantId, limit, order?.ToString(), after, before, context); - return Result.FromValue(ListAssistantFilesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = GetAssistantFiles(assistantId, limit, order?.ToString(), after, before); + return ClientResult.FromValue(ListAssistantFilesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -835,7 +795,7 @@ public virtual Result GetAssistantFiles(string assis /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -859,27 +819,26 @@ public virtual Result GetAssistantFiles(string assis /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. /// - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task GetAssistantFilesAsync(string assistantId, int? limit, string order, string after, string before, RequestOptions context) + public virtual async Task GetAssistantFilesAsync(string assistantId, int? limit, string order, string after, string before, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); - - using var scope = ClientDiagnostics.CreateSpan("Assistants.GetAssistantFiles"); - scope.Start(); - try - { - using PipelineMessage message = CreateGetAssistantFilesRequest(assistantId, limit, order, after, before, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) + if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); + if (string.IsNullOrEmpty(assistantId)) throw new ArgumentException(nameof(assistantId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateGetAssistantFilesRequest(assistantId, limit, order, after, before, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -892,7 +851,7 @@ public virtual async Task GetAssistantFilesAsync(string assistantId, int /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -916,59 +875,58 @@ public virtual async Task GetAssistantFilesAsync(string assistantId, int /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. /// - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result GetAssistantFiles(string assistantId, int? limit, string order, string after, string before, RequestOptions context) + public virtual ClientResult GetAssistantFiles(string assistantId, int? limit, string order, string after, string before, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); - - using var scope = ClientDiagnostics.CreateSpan("Assistants.GetAssistantFiles"); - scope.Start(); - try - { - using PipelineMessage message = CreateGetAssistantFilesRequest(assistantId, limit, order, after, before, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) + if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); + if (string.IsNullOrEmpty(assistantId)) throw new ArgumentException(nameof(assistantId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateGetAssistantFilesRequest(assistantId, limit, order, after, before, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } /// Retrieves an assistant file. /// The ID of the assistant the file belongs to. /// The ID of the file we're getting. - /// The cancellation token to use. /// or is null. /// or is an empty string, and was expected to be non-empty. - public virtual async Task> GetAssistantFileAsync(string assistantId, string fileId, CancellationToken cancellationToken = default) + public virtual async Task> GetAssistantFileAsync(string assistantId, string fileId) { - ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); - ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); + if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); + if (string.IsNullOrEmpty(assistantId)) throw new ArgumentException(nameof(assistantId)); + if (fileId is null) throw new ArgumentNullException(nameof(fileId)); + if (string.IsNullOrEmpty(fileId)) throw new ArgumentException(nameof(fileId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = await GetAssistantFileAsync(assistantId, fileId, context).ConfigureAwait(false); - return Result.FromValue(AssistantFileObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = await GetAssistantFileAsync(assistantId, fileId).ConfigureAwait(false); + return ClientResult.FromValue(AssistantFileObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Retrieves an assistant file. /// The ID of the assistant the file belongs to. /// The ID of the file we're getting. - /// The cancellation token to use. /// or is null. /// or is an empty string, and was expected to be non-empty. - public virtual Result GetAssistantFile(string assistantId, string fileId, CancellationToken cancellationToken = default) + public virtual ClientResult GetAssistantFile(string assistantId, string fileId) { - ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); - ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); + if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); + if (string.IsNullOrEmpty(assistantId)) throw new ArgumentException(nameof(assistantId)); + if (fileId is null) throw new ArgumentNullException(nameof(fileId)); + if (string.IsNullOrEmpty(fileId)) throw new ArgumentException(nameof(fileId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = GetAssistantFile(assistantId, fileId, context); - return Result.FromValue(AssistantFileObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = GetAssistantFile(assistantId, fileId); + return ClientResult.FromValue(AssistantFileObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -981,35 +939,35 @@ public virtual Result GetAssistantFile(string assistantId, /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the assistant the file belongs to. /// The ID of the file we're getting. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// or is null. /// or is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task GetAssistantFileAsync(string assistantId, string fileId, RequestOptions context) + public virtual async Task GetAssistantFileAsync(string assistantId, string fileId, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); - ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); - - using var scope = ClientDiagnostics.CreateSpan("Assistants.GetAssistantFile"); - scope.Start(); - try + if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); + if (string.IsNullOrEmpty(assistantId)) throw new ArgumentException(nameof(assistantId)); + if (fileId is null) throw new ArgumentNullException(nameof(fileId)); + if (string.IsNullOrEmpty(fileId)) throw new ArgumentException(nameof(fileId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateGetAssistantFileRequest(assistantId, fileId, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateGetAssistantFileRequest(assistantId, fileId, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -1022,67 +980,67 @@ public virtual async Task GetAssistantFileAsync(string assistantId, stri /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the assistant the file belongs to. /// The ID of the file we're getting. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// or is null. /// or is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result GetAssistantFile(string assistantId, string fileId, RequestOptions context) + public virtual ClientResult GetAssistantFile(string assistantId, string fileId, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); - ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); - - using var scope = ClientDiagnostics.CreateSpan("Assistants.GetAssistantFile"); - scope.Start(); - try - { - using PipelineMessage message = CreateGetAssistantFileRequest(assistantId, fileId, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) + if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); + if (string.IsNullOrEmpty(assistantId)) throw new ArgumentException(nameof(assistantId)); + if (fileId is null) throw new ArgumentNullException(nameof(fileId)); + if (string.IsNullOrEmpty(fileId)) throw new ArgumentException(nameof(fileId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateGetAssistantFileRequest(assistantId, fileId, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } /// Delete an assistant file. /// The ID of the assistant the file belongs to. /// The ID of the file to delete. - /// The cancellation token to use. /// or is null. /// or is an empty string, and was expected to be non-empty. - public virtual async Task> DeleteAssistantFileAsync(string assistantId, string fileId, CancellationToken cancellationToken = default) + public virtual async Task> DeleteAssistantFileAsync(string assistantId, string fileId) { - ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); - ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); + if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); + if (string.IsNullOrEmpty(assistantId)) throw new ArgumentException(nameof(assistantId)); + if (fileId is null) throw new ArgumentNullException(nameof(fileId)); + if (string.IsNullOrEmpty(fileId)) throw new ArgumentException(nameof(fileId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = await DeleteAssistantFileAsync(assistantId, fileId, context).ConfigureAwait(false); - return Result.FromValue(DeleteAssistantFileResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = await DeleteAssistantFileAsync(assistantId, fileId).ConfigureAwait(false); + return ClientResult.FromValue(DeleteAssistantFileResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Delete an assistant file. /// The ID of the assistant the file belongs to. /// The ID of the file to delete. - /// The cancellation token to use. /// or is null. /// or is an empty string, and was expected to be non-empty. - public virtual Result DeleteAssistantFile(string assistantId, string fileId, CancellationToken cancellationToken = default) + public virtual ClientResult DeleteAssistantFile(string assistantId, string fileId) { - ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); - ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); + if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); + if (string.IsNullOrEmpty(assistantId)) throw new ArgumentException(nameof(assistantId)); + if (fileId is null) throw new ArgumentNullException(nameof(fileId)); + if (string.IsNullOrEmpty(fileId)) throw new ArgumentException(nameof(fileId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = DeleteAssistantFile(assistantId, fileId, context); - return Result.FromValue(DeleteAssistantFileResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = DeleteAssistantFile(assistantId, fileId); + return ClientResult.FromValue(DeleteAssistantFileResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -1095,35 +1053,35 @@ public virtual Result DeleteAssistantFile(string as /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the assistant the file belongs to. /// The ID of the file to delete. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// or is null. /// or is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task DeleteAssistantFileAsync(string assistantId, string fileId, RequestOptions context) + public virtual async Task DeleteAssistantFileAsync(string assistantId, string fileId, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); - ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); - - using var scope = ClientDiagnostics.CreateSpan("Assistants.DeleteAssistantFile"); - scope.Start(); - try + if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); + if (string.IsNullOrEmpty(assistantId)) throw new ArgumentException(nameof(assistantId)); + if (fileId is null) throw new ArgumentNullException(nameof(fileId)); + if (string.IsNullOrEmpty(fileId)) throw new ArgumentException(nameof(fileId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateDeleteAssistantFileRequest(assistantId, fileId, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateDeleteAssistantFileRequest(assistantId, fileId, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -1136,217 +1094,297 @@ public virtual async Task DeleteAssistantFileAsync(string assistantId, s /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the assistant the file belongs to. /// The ID of the file to delete. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// or is null. /// or is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result DeleteAssistantFile(string assistantId, string fileId, RequestOptions context) + public virtual ClientResult DeleteAssistantFile(string assistantId, string fileId, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); - ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); - - using var scope = ClientDiagnostics.CreateSpan("Assistants.DeleteAssistantFile"); - scope.Start(); - try + if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); + if (string.IsNullOrEmpty(assistantId)) throw new ArgumentException(nameof(assistantId)); + if (fileId is null) throw new ArgumentNullException(nameof(fileId)); + if (string.IsNullOrEmpty(fileId)) throw new ArgumentException(nameof(fileId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateDeleteAssistantFileRequest(assistantId, fileId, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateDeleteAssistantFileRequest(assistantId, fileId, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } - internal PipelineMessage CreateCreateAssistantRequest(RequestBody content, RequestOptions context) + internal PipelineMessage CreateCreateAssistantRequest(BinaryContent content, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("POST"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/assistants", false); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); - request.SetHeaderValue("Content-Type", "application/json"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/assistants"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", "application/json"); request.Content = content; + message.Apply(options); return message; } - internal PipelineMessage CreateGetAssistantsRequest(int? limit, string order, string after, string before, RequestOptions context) + internal PipelineMessage CreateGetAssistantsRequest(int? limit, string order, string after, string before, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("GET"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/assistants", false); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/assistants"); + uriBuilder.Path += path.ToString(); if (limit != null) { - uri.AppendQuery("limit", limit.Value, true); + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&limit={limit.Value}"; + } + else + { + uriBuilder.Query = $"limit={limit.Value}"; + } } if (order != null) { - uri.AppendQuery("order", order, true); + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&order={order}"; + } + else + { + uriBuilder.Query = $"order={order}"; + } } if (after != null) { - uri.AppendQuery("after", after, true); + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&after={after}"; + } + else + { + uriBuilder.Query = $"after={after}"; + } } if (before != null) { - uri.AppendQuery("before", before, true); + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&before={before}"; + } + else + { + uriBuilder.Query = $"before={before}"; + } } - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); return message; } - internal PipelineMessage CreateGetAssistantRequest(string assistantId, RequestOptions context) + internal PipelineMessage CreateGetAssistantRequest(string assistantId, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("GET"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/assistants/", false); - uri.AppendPath(assistantId, true); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/assistants/"); + uriBuilder.Path += path.ToString(); + path.Append(assistantId); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); return message; } - internal PipelineMessage CreateModifyAssistantRequest(string assistantId, RequestBody content, RequestOptions context) + internal PipelineMessage CreateModifyAssistantRequest(string assistantId, BinaryContent content, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("POST"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/assistants/", false); - uri.AppendPath(assistantId, true); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); - request.SetHeaderValue("Content-Type", "application/json"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/assistants/"); + uriBuilder.Path += path.ToString(); + path.Append(assistantId); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", "application/json"); request.Content = content; + message.Apply(options); return message; } - internal PipelineMessage CreateDeleteAssistantRequest(string assistantId, RequestOptions context) + internal PipelineMessage CreateDeleteAssistantRequest(string assistantId, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("DELETE"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/assistants/", false); - uri.AppendPath(assistantId, true); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "DELETE"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/assistants/"); + uriBuilder.Path += path.ToString(); + path.Append(assistantId); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); return message; } - internal PipelineMessage CreateCreateAssistantFileRequest(string assistantId, RequestBody content, RequestOptions context) + internal PipelineMessage CreateCreateAssistantFileRequest(string assistantId, BinaryContent content, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("POST"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/assistants/", false); - uri.AppendPath(assistantId, true); - uri.AppendPath("/files", false); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); - request.SetHeaderValue("Content-Type", "application/json"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/assistants/"); + uriBuilder.Path += path.ToString(); + path.Append(assistantId); + uriBuilder.Path += path.ToString(); + path.Append("/files"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", "application/json"); request.Content = content; + message.Apply(options); return message; } - internal PipelineMessage CreateGetAssistantFilesRequest(string assistantId, int? limit, string order, string after, string before, RequestOptions context) + internal PipelineMessage CreateGetAssistantFilesRequest(string assistantId, int? limit, string order, string after, string before, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("GET"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/assistants/", false); - uri.AppendPath(assistantId, true); - uri.AppendPath("/files", false); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/assistants/"); + uriBuilder.Path += path.ToString(); + path.Append(assistantId); + uriBuilder.Path += path.ToString(); + path.Append("/files"); + uriBuilder.Path += path.ToString(); if (limit != null) { - uri.AppendQuery("limit", limit.Value, true); + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&limit={limit.Value}"; + } + else + { + uriBuilder.Query = $"limit={limit.Value}"; + } } if (order != null) { - uri.AppendQuery("order", order, true); + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&order={order}"; + } + else + { + uriBuilder.Query = $"order={order}"; + } } if (after != null) { - uri.AppendQuery("after", after, true); + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&after={after}"; + } + else + { + uriBuilder.Query = $"after={after}"; + } } if (before != null) { - uri.AppendQuery("before", before, true); + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&before={before}"; + } + else + { + uriBuilder.Query = $"before={before}"; + } } - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); return message; } - internal PipelineMessage CreateGetAssistantFileRequest(string assistantId, string fileId, RequestOptions context) + internal PipelineMessage CreateGetAssistantFileRequest(string assistantId, string fileId, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("GET"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/assistants/", false); - uri.AppendPath(assistantId, true); - uri.AppendPath("/files/", false); - uri.AppendPath(fileId, true); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/assistants/"); + uriBuilder.Path += path.ToString(); + path.Append(assistantId); + uriBuilder.Path += path.ToString(); + path.Append("/files/"); + uriBuilder.Path += path.ToString(); + path.Append(fileId); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); return message; } - internal PipelineMessage CreateDeleteAssistantFileRequest(string assistantId, string fileId, RequestOptions context) + internal PipelineMessage CreateDeleteAssistantFileRequest(string assistantId, string fileId, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("DELETE"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/assistants/", false); - uri.AppendPath(assistantId, true); - uri.AppendPath("/files/", false); - uri.AppendPath(fileId, true); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "DELETE"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/assistants/"); + uriBuilder.Path += path.ToString(); + path.Append(assistantId); + uriBuilder.Path += path.ToString(); + path.Append("/files/"); + uriBuilder.Path += path.ToString(); + path.Append(fileId); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); return message; } - private static RequestOptions DefaultRequestContext = new RequestOptions(); - internal static RequestOptions FromCancellationToken(CancellationToken cancellationToken = default) - { - if (!cancellationToken.CanBeCanceled) - { - return DefaultRequestContext; - } - - return new RequestOptions() { CancellationToken = cancellationToken }; - } - - private static ResponseErrorClassifier _responseErrorClassifier200; - private static ResponseErrorClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= new StatusResponseClassifier(stackalloc ushort[] { 200 }); + private static PipelineMessageClassifier _responseErrorClassifier200; + private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } } + diff --git a/.dotnet/src/Generated/Audio.cs b/.dotnet/src/Generated/Audio.cs index 6acfba501..d2bd5db96 100644 --- a/.dotnet/src/Generated/Audio.cs +++ b/.dotnet/src/Generated/Audio.cs @@ -1,12 +1,9 @@ // -#nullable disable - using System; using System.ClientModel; -using System.ClientModel.Internal; using System.ClientModel.Primitives; -using System.ClientModel.Primitives.Pipeline; +using System.Text; using System.Threading; using System.Threading.Tasks; using OpenAI.Models; @@ -18,16 +15,13 @@ namespace OpenAI public partial class Audio { private const string AuthorizationHeader = "Authorization"; - private readonly KeyCredential _keyCredential; + private readonly ApiKeyCredential _credential; private const string AuthorizationApiKeyPrefix = "Bearer"; - private readonly MessagePipeline _pipeline; + private readonly ClientPipeline _pipeline; private readonly Uri _endpoint; - /// The ClientDiagnostics is used to provide tracing support for the client library. - internal TelemetrySource ClientDiagnostics { get; } - /// The HTTP pipeline for sending and receiving REST requests and responses. - public virtual MessagePipeline Pipeline => _pipeline; + public virtual ClientPipeline Pipeline => _pipeline; /// Initializes a new instance of Audio for mocking. protected Audio() @@ -35,44 +29,38 @@ protected Audio() } /// Initializes a new instance of Audio. - /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. - /// The key credential to copy. + /// The key credential to copy. /// OpenAI Endpoint. - internal Audio(TelemetrySource clientDiagnostics, MessagePipeline pipeline, KeyCredential keyCredential, Uri endpoint) + internal Audio(ClientPipeline pipeline, ApiKeyCredential credential, Uri endpoint) { - ClientDiagnostics = clientDiagnostics; _pipeline = pipeline; - _keyCredential = keyCredential; + _credential = credential; _endpoint = endpoint; } /// Generates audio from the input text. /// The to use. - /// The cancellation token to use. /// is null. - public virtual async Task> CreateSpeechAsync(CreateSpeechRequest speech, CancellationToken cancellationToken = default) + public virtual async Task> CreateSpeechAsync(CreateSpeechRequest speech) { - ClientUtilities.AssertNotNull(speech, nameof(speech)); + if (speech is null) throw new ArgumentNullException(nameof(speech)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = speech.ToRequestBody(); - Result result = await CreateSpeechAsync(content, context).ConfigureAwait(false); - return Result.FromValue(result.GetRawResponse().Content, result.GetRawResponse()); + using BinaryContent content = BinaryContent.Create(speech); + ClientResult result = await CreateSpeechAsync(content).ConfigureAwait(false); + return ClientResult.FromValue(result.GetRawResponse().Content, result.GetRawResponse()); } /// Generates audio from the input text. /// The to use. - /// The cancellation token to use. /// is null. - public virtual Result CreateSpeech(CreateSpeechRequest speech, CancellationToken cancellationToken = default) + public virtual ClientResult CreateSpeech(CreateSpeechRequest speech) { - ClientUtilities.AssertNotNull(speech, nameof(speech)); + if (speech is null) throw new ArgumentNullException(nameof(speech)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = speech.ToRequestBody(); - Result result = CreateSpeech(content, context); - return Result.FromValue(result.GetRawResponse().Content, result.GetRawResponse()); + using BinaryContent content = BinaryContent.Create(speech); + ClientResult result = CreateSpeech(content); + return ClientResult.FromValue(result.GetRawResponse().Content, result.GetRawResponse()); } /// @@ -85,32 +73,30 @@ public virtual Result CreateSpeech(CreateSpeechRequest speech, Cance /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task CreateSpeechAsync(RequestBody content, RequestOptions context = null) + public virtual async Task CreateSpeechAsync(BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNull(content, nameof(content)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCreateSpeechRequest(content, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; - using var scope = ClientDiagnostics.CreateSpan("Audio.CreateSpeech"); - scope.Start(); - try - { - using PipelineMessage message = CreateCreateSpeechRequest(content, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -123,60 +109,54 @@ public virtual async Task CreateSpeechAsync(RequestBody content, Request /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result CreateSpeech(RequestBody content, RequestOptions context = null) + public virtual ClientResult CreateSpeech(BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNull(content, nameof(content)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCreateSpeechRequest(content, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; - using var scope = ClientDiagnostics.CreateSpan("Audio.CreateSpeech"); - scope.Start(); - try - { - using PipelineMessage message = CreateCreateSpeechRequest(content, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } /// Transcribes audio into the input language. /// The to use. - /// The cancellation token to use. /// is null. - public virtual async Task> CreateTranscriptionAsync(CreateTranscriptionRequest audio, CancellationToken cancellationToken = default) + public virtual async Task> CreateTranscriptionAsync(CreateTranscriptionRequest audio) { - ClientUtilities.AssertNotNull(audio, nameof(audio)); + if (audio is null) throw new ArgumentNullException(nameof(audio)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = audio.ToRequestBody(); - Result result = await CreateTranscriptionAsync(content, context).ConfigureAwait(false); - return Result.FromValue(CreateTranscriptionResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + using BinaryContent content = BinaryContent.Create(audio); + ClientResult result = await CreateTranscriptionAsync(content).ConfigureAwait(false); + return ClientResult.FromValue(CreateTranscriptionResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Transcribes audio into the input language. /// The to use. - /// The cancellation token to use. /// is null. - public virtual Result CreateTranscription(CreateTranscriptionRequest audio, CancellationToken cancellationToken = default) + public virtual ClientResult CreateTranscription(CreateTranscriptionRequest audio) { - ClientUtilities.AssertNotNull(audio, nameof(audio)); + if (audio is null) throw new ArgumentNullException(nameof(audio)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = audio.ToRequestBody(); - Result result = CreateTranscription(content, context); - return Result.FromValue(CreateTranscriptionResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + using BinaryContent content = BinaryContent.Create(audio); + ClientResult result = CreateTranscription(content); + return ClientResult.FromValue(CreateTranscriptionResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -189,32 +169,30 @@ public virtual Result CreateTranscription(CreateTra /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task CreateTranscriptionAsync(RequestBody content, RequestOptions context = null) + public virtual async Task CreateTranscriptionAsync(BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNull(content, nameof(content)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCreateTranscriptionRequest(content, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; - using var scope = ClientDiagnostics.CreateSpan("Audio.CreateTranscription"); - scope.Start(); - try - { - using PipelineMessage message = CreateCreateTranscriptionRequest(content, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -227,60 +205,54 @@ public virtual async Task CreateTranscriptionAsync(RequestBody content, /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result CreateTranscription(RequestBody content, RequestOptions context = null) + public virtual ClientResult CreateTranscription(BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNull(content, nameof(content)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCreateTranscriptionRequest(content, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; - using var scope = ClientDiagnostics.CreateSpan("Audio.CreateTranscription"); - scope.Start(); - try + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateCreateTranscriptionRequest(content, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } /// Translates audio into English.. /// The to use. - /// The cancellation token to use. /// is null. - public virtual async Task> CreateTranslationAsync(CreateTranslationRequest audio, CancellationToken cancellationToken = default) + public virtual async Task> CreateTranslationAsync(CreateTranslationRequest audio) { - ClientUtilities.AssertNotNull(audio, nameof(audio)); + if (audio is null) throw new ArgumentNullException(nameof(audio)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = audio.ToRequestBody(); - Result result = await CreateTranslationAsync(content, context).ConfigureAwait(false); - return Result.FromValue(CreateTranslationResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + using BinaryContent content = BinaryContent.Create(audio); + ClientResult result = await CreateTranslationAsync(content).ConfigureAwait(false); + return ClientResult.FromValue(CreateTranslationResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Translates audio into English.. /// The to use. - /// The cancellation token to use. /// is null. - public virtual Result CreateTranslation(CreateTranslationRequest audio, CancellationToken cancellationToken = default) + public virtual ClientResult CreateTranslation(CreateTranslationRequest audio) { - ClientUtilities.AssertNotNull(audio, nameof(audio)); + if (audio is null) throw new ArgumentNullException(nameof(audio)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = audio.ToRequestBody(); - Result result = CreateTranslation(content, context); - return Result.FromValue(CreateTranslationResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + using BinaryContent content = BinaryContent.Create(audio); + ClientResult result = CreateTranslation(content); + return ClientResult.FromValue(CreateTranslationResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -293,32 +265,30 @@ public virtual Result CreateTranslation(CreateTransla /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task CreateTranslationAsync(RequestBody content, RequestOptions context = null) + public virtual async Task CreateTranslationAsync(BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNull(content, nameof(content)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCreateTranslationRequest(content, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; - using var scope = ClientDiagnostics.CreateSpan("Audio.CreateTranslation"); - scope.Start(); - try + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateCreateTranslationRequest(content, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -331,91 +301,88 @@ public virtual async Task CreateTranslationAsync(RequestBody content, Re /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result CreateTranslation(RequestBody content, RequestOptions context = null) + public virtual ClientResult CreateTranslation(BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNull(content, nameof(content)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCreateTranslationRequest(content, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; - using var scope = ClientDiagnostics.CreateSpan("Audio.CreateTranslation"); - scope.Start(); - try + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateCreateTranslationRequest(content, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } - internal PipelineMessage CreateCreateSpeechRequest(RequestBody content, RequestOptions context) + internal PipelineMessage CreateCreateSpeechRequest(BinaryContent content, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("POST"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/audio/speech", false); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/octet-stream"); - request.SetHeaderValue("Content-Type", "application/json"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/audio/speech"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/octet-stream"); + request.Headers.Set("Content-Type", "application/json"); request.Content = content; + message.Apply(options); return message; } - internal PipelineMessage CreateCreateTranscriptionRequest(RequestBody content, RequestOptions context) + internal PipelineMessage CreateCreateTranscriptionRequest(BinaryContent content, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("POST"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/audio/transcriptions", false); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); - request.SetHeaderValue("content-type", "multipart/form-data"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/audio/transcriptions"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("content-type", "multipart/form-data"); request.Content = content; + message.Apply(options); return message; } - internal PipelineMessage CreateCreateTranslationRequest(RequestBody content, RequestOptions context) + internal PipelineMessage CreateCreateTranslationRequest(BinaryContent content, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("POST"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/audio/translations", false); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); - request.SetHeaderValue("content-type", "multipart/form-data"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/audio/translations"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("content-type", "multipart/form-data"); request.Content = content; + message.Apply(options); return message; } - private static RequestOptions DefaultRequestContext = new RequestOptions(); - internal static RequestOptions FromCancellationToken(CancellationToken cancellationToken = default) - { - if (!cancellationToken.CanBeCanceled) - { - return DefaultRequestContext; - } - - return new RequestOptions() { CancellationToken = cancellationToken }; - } - - private static ResponseErrorClassifier _responseErrorClassifier200; - private static ResponseErrorClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= new StatusResponseClassifier(stackalloc ushort[] { 200 }); + private static PipelineMessageClassifier _responseErrorClassifier200; + private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } } + diff --git a/.dotnet/src/Generated/Chat.cs b/.dotnet/src/Generated/Chat.cs index 8c8e0546e..3fa357d90 100644 --- a/.dotnet/src/Generated/Chat.cs +++ b/.dotnet/src/Generated/Chat.cs @@ -1,12 +1,9 @@ // -#nullable disable - using System; using System.ClientModel; -using System.ClientModel.Internal; using System.ClientModel.Primitives; -using System.ClientModel.Primitives.Pipeline; +using System.Text; using System.Threading; using System.Threading.Tasks; using OpenAI.Models; @@ -18,16 +15,13 @@ namespace OpenAI public partial class Chat { private const string AuthorizationHeader = "Authorization"; - private readonly KeyCredential _keyCredential; + private readonly ApiKeyCredential _credential; private const string AuthorizationApiKeyPrefix = "Bearer"; - private readonly MessagePipeline _pipeline; + private readonly ClientPipeline _pipeline; private readonly Uri _endpoint; - /// The ClientDiagnostics is used to provide tracing support for the client library. - internal TelemetrySource ClientDiagnostics { get; } - /// The HTTP pipeline for sending and receiving REST requests and responses. - public virtual MessagePipeline Pipeline => _pipeline; + public virtual ClientPipeline Pipeline => _pipeline; /// Initializes a new instance of Chat for mocking. protected Chat() @@ -35,44 +29,38 @@ protected Chat() } /// Initializes a new instance of Chat. - /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. - /// The key credential to copy. + /// The key credential to copy. /// OpenAI Endpoint. - internal Chat(TelemetrySource clientDiagnostics, MessagePipeline pipeline, KeyCredential keyCredential, Uri endpoint) + internal Chat(ClientPipeline pipeline, ApiKeyCredential credential, Uri endpoint) { - ClientDiagnostics = clientDiagnostics; _pipeline = pipeline; - _keyCredential = keyCredential; + _credential = credential; _endpoint = endpoint; } /// Creates a model response for the given chat conversation. /// The to use. - /// The cancellation token to use. /// is null. - public virtual async Task> CreateChatCompletionAsync(CreateChatCompletionRequest createChatCompletionRequest, CancellationToken cancellationToken = default) + public virtual async Task> CreateChatCompletionAsync(CreateChatCompletionRequest createChatCompletionRequest) { - ClientUtilities.AssertNotNull(createChatCompletionRequest, nameof(createChatCompletionRequest)); + if (createChatCompletionRequest is null) throw new ArgumentNullException(nameof(createChatCompletionRequest)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = createChatCompletionRequest.ToRequestBody(); - Result result = await CreateChatCompletionAsync(content, context).ConfigureAwait(false); - return Result.FromValue(CreateChatCompletionResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + using BinaryContent content = BinaryContent.Create(createChatCompletionRequest); + ClientResult result = await CreateChatCompletionAsync(content).ConfigureAwait(false); + return ClientResult.FromValue(CreateChatCompletionResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Creates a model response for the given chat conversation. /// The to use. - /// The cancellation token to use. /// is null. - public virtual Result CreateChatCompletion(CreateChatCompletionRequest createChatCompletionRequest, CancellationToken cancellationToken = default) + public virtual ClientResult CreateChatCompletion(CreateChatCompletionRequest createChatCompletionRequest) { - ClientUtilities.AssertNotNull(createChatCompletionRequest, nameof(createChatCompletionRequest)); + if (createChatCompletionRequest is null) throw new ArgumentNullException(nameof(createChatCompletionRequest)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = createChatCompletionRequest.ToRequestBody(); - Result result = CreateChatCompletion(content, context); - return Result.FromValue(CreateChatCompletionResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + using BinaryContent content = BinaryContent.Create(createChatCompletionRequest); + ClientResult result = CreateChatCompletion(content); + return ClientResult.FromValue(CreateChatCompletionResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -85,32 +73,30 @@ public virtual Result CreateChatCompletion(CreateC /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task CreateChatCompletionAsync(RequestBody content, RequestOptions context = null) + public virtual async Task CreateChatCompletionAsync(BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNull(content, nameof(content)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCreateChatCompletionRequest(content, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; - using var scope = ClientDiagnostics.CreateSpan("Chat.CreateChatCompletion"); - scope.Start(); - try - { - using PipelineMessage message = CreateCreateChatCompletionRequest(content, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -123,61 +109,52 @@ public virtual async Task CreateChatCompletionAsync(RequestBody content, /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result CreateChatCompletion(RequestBody content, RequestOptions context = null) + public virtual ClientResult CreateChatCompletion(BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNull(content, nameof(content)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCreateChatCompletionRequest(content, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; - using var scope = ClientDiagnostics.CreateSpan("Chat.CreateChatCompletion"); - scope.Start(); - try - { - using PipelineMessage message = CreateCreateChatCompletionRequest(content, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } - internal PipelineMessage CreateCreateChatCompletionRequest(RequestBody content, RequestOptions context) + internal PipelineMessage CreateCreateChatCompletionRequest(BinaryContent content, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("POST"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/chat/completions", false); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); - request.SetHeaderValue("Content-Type", "application/json"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/chat/completions"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", "application/json"); request.Content = content; + message.Apply(options); return message; } - private static RequestOptions DefaultRequestContext = new RequestOptions(); - internal static RequestOptions FromCancellationToken(CancellationToken cancellationToken = default) - { - if (!cancellationToken.CanBeCanceled) - { - return DefaultRequestContext; - } - - return new RequestOptions() { CancellationToken = cancellationToken }; - } - - private static ResponseErrorClassifier _responseErrorClassifier200; - private static ResponseErrorClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= new StatusResponseClassifier(stackalloc ushort[] { 200 }); + private static PipelineMessageClassifier _responseErrorClassifier200; + private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } } + diff --git a/.dotnet/src/Generated/Completions.cs b/.dotnet/src/Generated/Completions.cs index aaa1f585d..779fa66c0 100644 --- a/.dotnet/src/Generated/Completions.cs +++ b/.dotnet/src/Generated/Completions.cs @@ -1,12 +1,9 @@ // -#nullable disable - using System; using System.ClientModel; -using System.ClientModel.Internal; using System.ClientModel.Primitives; -using System.ClientModel.Primitives.Pipeline; +using System.Text; using System.Threading; using System.Threading.Tasks; using OpenAI.Models; @@ -18,16 +15,13 @@ namespace OpenAI public partial class Completions { private const string AuthorizationHeader = "Authorization"; - private readonly KeyCredential _keyCredential; + private readonly ApiKeyCredential _credential; private const string AuthorizationApiKeyPrefix = "Bearer"; - private readonly MessagePipeline _pipeline; + private readonly ClientPipeline _pipeline; private readonly Uri _endpoint; - /// The ClientDiagnostics is used to provide tracing support for the client library. - internal TelemetrySource ClientDiagnostics { get; } - /// The HTTP pipeline for sending and receiving REST requests and responses. - public virtual MessagePipeline Pipeline => _pipeline; + public virtual ClientPipeline Pipeline => _pipeline; /// Initializes a new instance of Completions for mocking. protected Completions() @@ -35,44 +29,38 @@ protected Completions() } /// Initializes a new instance of Completions. - /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. - /// The key credential to copy. + /// The key credential to copy. /// OpenAI Endpoint. - internal Completions(TelemetrySource clientDiagnostics, MessagePipeline pipeline, KeyCredential keyCredential, Uri endpoint) + internal Completions(ClientPipeline pipeline, ApiKeyCredential credential, Uri endpoint) { - ClientDiagnostics = clientDiagnostics; _pipeline = pipeline; - _keyCredential = keyCredential; + _credential = credential; _endpoint = endpoint; } /// Creates a completion for the provided prompt and parameters. /// The to use. - /// The cancellation token to use. /// is null. - public virtual async Task> CreateCompletionAsync(CreateCompletionRequest createCompletionRequest, CancellationToken cancellationToken = default) + public virtual async Task> CreateCompletionAsync(CreateCompletionRequest createCompletionRequest) { - ClientUtilities.AssertNotNull(createCompletionRequest, nameof(createCompletionRequest)); + if (createCompletionRequest is null) throw new ArgumentNullException(nameof(createCompletionRequest)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = createCompletionRequest.ToRequestBody(); - Result result = await CreateCompletionAsync(content, context).ConfigureAwait(false); - return Result.FromValue(CreateCompletionResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + using BinaryContent content = BinaryContent.Create(createCompletionRequest); + ClientResult result = await CreateCompletionAsync(content).ConfigureAwait(false); + return ClientResult.FromValue(CreateCompletionResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Creates a completion for the provided prompt and parameters. /// The to use. - /// The cancellation token to use. /// is null. - public virtual Result CreateCompletion(CreateCompletionRequest createCompletionRequest, CancellationToken cancellationToken = default) + public virtual ClientResult CreateCompletion(CreateCompletionRequest createCompletionRequest) { - ClientUtilities.AssertNotNull(createCompletionRequest, nameof(createCompletionRequest)); + if (createCompletionRequest is null) throw new ArgumentNullException(nameof(createCompletionRequest)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = createCompletionRequest.ToRequestBody(); - Result result = CreateCompletion(content, context); - return Result.FromValue(CreateCompletionResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + using BinaryContent content = BinaryContent.Create(createCompletionRequest); + ClientResult result = CreateCompletion(content); + return ClientResult.FromValue(CreateCompletionResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -85,32 +73,30 @@ public virtual Result CreateCompletion(CreateCompletio /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task CreateCompletionAsync(RequestBody content, RequestOptions context = null) + public virtual async Task CreateCompletionAsync(BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNull(content, nameof(content)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCreateCompletionRequest(content, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; - using var scope = ClientDiagnostics.CreateSpan("Completions.CreateCompletion"); - scope.Start(); - try - { - using PipelineMessage message = CreateCreateCompletionRequest(content, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -123,61 +109,52 @@ public virtual async Task CreateCompletionAsync(RequestBody content, Req /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result CreateCompletion(RequestBody content, RequestOptions context = null) + public virtual ClientResult CreateCompletion(BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNull(content, nameof(content)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCreateCompletionRequest(content, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; - using var scope = ClientDiagnostics.CreateSpan("Completions.CreateCompletion"); - scope.Start(); - try - { - using PipelineMessage message = CreateCreateCompletionRequest(content, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } - internal PipelineMessage CreateCreateCompletionRequest(RequestBody content, RequestOptions context) + internal PipelineMessage CreateCreateCompletionRequest(BinaryContent content, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("POST"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/completions", false); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); - request.SetHeaderValue("Content-Type", "application/json"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/completions"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", "application/json"); request.Content = content; + message.Apply(options); return message; } - private static RequestOptions DefaultRequestContext = new RequestOptions(); - internal static RequestOptions FromCancellationToken(CancellationToken cancellationToken = default) - { - if (!cancellationToken.CanBeCanceled) - { - return DefaultRequestContext; - } - - return new RequestOptions() { CancellationToken = cancellationToken }; - } - - private static ResponseErrorClassifier _responseErrorClassifier200; - private static ResponseErrorClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= new StatusResponseClassifier(stackalloc ushort[] { 200 }); + private static PipelineMessageClassifier _responseErrorClassifier200; + private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } } + diff --git a/.dotnet/src/Generated/Embeddings.cs b/.dotnet/src/Generated/Embeddings.cs index d42141a8f..f935d7640 100644 --- a/.dotnet/src/Generated/Embeddings.cs +++ b/.dotnet/src/Generated/Embeddings.cs @@ -1,12 +1,9 @@ // -#nullable disable - using System; using System.ClientModel; -using System.ClientModel.Internal; using System.ClientModel.Primitives; -using System.ClientModel.Primitives.Pipeline; +using System.Text; using System.Threading; using System.Threading.Tasks; using OpenAI.Models; @@ -18,16 +15,13 @@ namespace OpenAI public partial class Embeddings { private const string AuthorizationHeader = "Authorization"; - private readonly KeyCredential _keyCredential; + private readonly ApiKeyCredential _credential; private const string AuthorizationApiKeyPrefix = "Bearer"; - private readonly MessagePipeline _pipeline; + private readonly ClientPipeline _pipeline; private readonly Uri _endpoint; - /// The ClientDiagnostics is used to provide tracing support for the client library. - internal TelemetrySource ClientDiagnostics { get; } - /// The HTTP pipeline for sending and receiving REST requests and responses. - public virtual MessagePipeline Pipeline => _pipeline; + public virtual ClientPipeline Pipeline => _pipeline; /// Initializes a new instance of Embeddings for mocking. protected Embeddings() @@ -35,44 +29,38 @@ protected Embeddings() } /// Initializes a new instance of Embeddings. - /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. - /// The key credential to copy. + /// The key credential to copy. /// OpenAI Endpoint. - internal Embeddings(TelemetrySource clientDiagnostics, MessagePipeline pipeline, KeyCredential keyCredential, Uri endpoint) + internal Embeddings(ClientPipeline pipeline, ApiKeyCredential credential, Uri endpoint) { - ClientDiagnostics = clientDiagnostics; _pipeline = pipeline; - _keyCredential = keyCredential; + _credential = credential; _endpoint = endpoint; } /// Creates an embedding vector representing the input text. /// The to use. - /// The cancellation token to use. /// is null. - public virtual async Task> CreateEmbeddingAsync(CreateEmbeddingRequest embedding, CancellationToken cancellationToken = default) + public virtual async Task> CreateEmbeddingAsync(CreateEmbeddingRequest embedding) { - ClientUtilities.AssertNotNull(embedding, nameof(embedding)); + if (embedding is null) throw new ArgumentNullException(nameof(embedding)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = embedding.ToRequestBody(); - Result result = await CreateEmbeddingAsync(content, context).ConfigureAwait(false); - return Result.FromValue(CreateEmbeddingResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + using BinaryContent content = BinaryContent.Create(embedding); + ClientResult result = await CreateEmbeddingAsync(content).ConfigureAwait(false); + return ClientResult.FromValue(CreateEmbeddingResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Creates an embedding vector representing the input text. /// The to use. - /// The cancellation token to use. /// is null. - public virtual Result CreateEmbedding(CreateEmbeddingRequest embedding, CancellationToken cancellationToken = default) + public virtual ClientResult CreateEmbedding(CreateEmbeddingRequest embedding) { - ClientUtilities.AssertNotNull(embedding, nameof(embedding)); + if (embedding is null) throw new ArgumentNullException(nameof(embedding)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = embedding.ToRequestBody(); - Result result = CreateEmbedding(content, context); - return Result.FromValue(CreateEmbeddingResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + using BinaryContent content = BinaryContent.Create(embedding); + ClientResult result = CreateEmbedding(content); + return ClientResult.FromValue(CreateEmbeddingResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -85,32 +73,30 @@ public virtual Result CreateEmbedding(CreateEmbeddingRe /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task CreateEmbeddingAsync(RequestBody content, RequestOptions context = null) + public virtual async Task CreateEmbeddingAsync(BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNull(content, nameof(content)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCreateEmbeddingRequest(content, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; - using var scope = ClientDiagnostics.CreateSpan("Embeddings.CreateEmbedding"); - scope.Start(); - try - { - using PipelineMessage message = CreateCreateEmbeddingRequest(content, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -123,61 +109,52 @@ public virtual async Task CreateEmbeddingAsync(RequestBody content, Requ /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result CreateEmbedding(RequestBody content, RequestOptions context = null) + public virtual ClientResult CreateEmbedding(BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNull(content, nameof(content)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCreateEmbeddingRequest(content, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; - using var scope = ClientDiagnostics.CreateSpan("Embeddings.CreateEmbedding"); - scope.Start(); - try - { - using PipelineMessage message = CreateCreateEmbeddingRequest(content, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } - internal PipelineMessage CreateCreateEmbeddingRequest(RequestBody content, RequestOptions context) + internal PipelineMessage CreateCreateEmbeddingRequest(BinaryContent content, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("POST"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/embeddings", false); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); - request.SetHeaderValue("Content-Type", "application/json"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/embeddings"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", "application/json"); request.Content = content; + message.Apply(options); return message; } - private static RequestOptions DefaultRequestContext = new RequestOptions(); - internal static RequestOptions FromCancellationToken(CancellationToken cancellationToken = default) - { - if (!cancellationToken.CanBeCanceled) - { - return DefaultRequestContext; - } - - return new RequestOptions() { CancellationToken = cancellationToken }; - } - - private static ResponseErrorClassifier _responseErrorClassifier200; - private static ResponseErrorClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= new StatusResponseClassifier(stackalloc ushort[] { 200 }); + private static PipelineMessageClassifier _responseErrorClassifier200; + private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } } + diff --git a/.dotnet/src/Generated/Files.cs b/.dotnet/src/Generated/Files.cs index 05aa7d834..10258b608 100644 --- a/.dotnet/src/Generated/Files.cs +++ b/.dotnet/src/Generated/Files.cs @@ -1,12 +1,9 @@ // -#nullable disable - using System; using System.ClientModel; -using System.ClientModel.Internal; using System.ClientModel.Primitives; -using System.ClientModel.Primitives.Pipeline; +using System.Text; using System.Threading; using System.Threading.Tasks; using OpenAI.Models; @@ -18,16 +15,13 @@ namespace OpenAI public partial class Files { private const string AuthorizationHeader = "Authorization"; - private readonly KeyCredential _keyCredential; + private readonly ApiKeyCredential _credential; private const string AuthorizationApiKeyPrefix = "Bearer"; - private readonly MessagePipeline _pipeline; + private readonly ClientPipeline _pipeline; private readonly Uri _endpoint; - /// The ClientDiagnostics is used to provide tracing support for the client library. - internal TelemetrySource ClientDiagnostics { get; } - /// The HTTP pipeline for sending and receiving REST requests and responses. - public virtual MessagePipeline Pipeline => _pipeline; + public virtual ClientPipeline Pipeline => _pipeline; /// Initializes a new instance of Files for mocking. protected Files() @@ -35,15 +29,13 @@ protected Files() } /// Initializes a new instance of Files. - /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. - /// The key credential to copy. + /// The key credential to copy. /// OpenAI Endpoint. - internal Files(TelemetrySource clientDiagnostics, MessagePipeline pipeline, KeyCredential keyCredential, Uri endpoint) + internal Files(ClientPipeline pipeline, ApiKeyCredential credential, Uri endpoint) { - ClientDiagnostics = clientDiagnostics; _pipeline = pipeline; - _keyCredential = keyCredential; + _credential = credential; _endpoint = endpoint; } @@ -58,16 +50,14 @@ internal Files(TelemetrySource clientDiagnostics, MessagePipeline pipeline, KeyC /// Please [contact us](https://help.openai.com/) if you need to increase these storage limits. /// /// The to use. - /// The cancellation token to use. /// is null. - public virtual async Task> CreateFileAsync(CreateFileRequest file, CancellationToken cancellationToken = default) + public virtual async Task> CreateFileAsync(CreateFileRequest file) { - ClientUtilities.AssertNotNull(file, nameof(file)); + if (file is null) throw new ArgumentNullException(nameof(file)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = file.ToRequestBody(); - Result result = await CreateFileAsync(content, context).ConfigureAwait(false); - return Result.FromValue(OpenAIFile.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + using BinaryContent content = BinaryContent.Create(file); + ClientResult result = await CreateFileAsync(content).ConfigureAwait(false); + return ClientResult.FromValue(OpenAIFile.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -81,16 +71,14 @@ public virtual async Task> CreateFileAsync(CreateFileRequest /// Please [contact us](https://help.openai.com/) if you need to increase these storage limits. /// /// The to use. - /// The cancellation token to use. /// is null. - public virtual Result CreateFile(CreateFileRequest file, CancellationToken cancellationToken = default) + public virtual ClientResult CreateFile(CreateFileRequest file) { - ClientUtilities.AssertNotNull(file, nameof(file)); + if (file is null) throw new ArgumentNullException(nameof(file)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = file.ToRequestBody(); - Result result = CreateFile(content, context); - return Result.FromValue(OpenAIFile.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + using BinaryContent content = BinaryContent.Create(file); + ClientResult result = CreateFile(content); + return ClientResult.FromValue(OpenAIFile.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -110,32 +98,30 @@ public virtual Result CreateFile(CreateFileRequest file, Cancellatio /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task CreateFileAsync(RequestBody content, RequestOptions context = null) + public virtual async Task CreateFileAsync(BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNull(content, nameof(content)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCreateFileRequest(content, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; - using var scope = ClientDiagnostics.CreateSpan("Files.CreateFile"); - scope.Start(); - try - { - using PipelineMessage message = CreateCreateFileRequest(content, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -155,52 +141,46 @@ public virtual async Task CreateFileAsync(RequestBody content, RequestOp /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result CreateFile(RequestBody content, RequestOptions context = null) + public virtual ClientResult CreateFile(BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNull(content, nameof(content)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCreateFileRequest(content, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; - using var scope = ClientDiagnostics.CreateSpan("Files.CreateFile"); - scope.Start(); - try - { - using PipelineMessage message = CreateCreateFileRequest(content, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } /// Returns a list of files that belong to the user's organization. /// Only return files with the given purpose. - /// The cancellation token to use. - public virtual async Task> GetFilesAsync(string purpose = null, CancellationToken cancellationToken = default) + public virtual async Task> GetFilesAsync(string purpose = null) { - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = await GetFilesAsync(purpose, context).ConfigureAwait(false); - return Result.FromValue(ListFilesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = await GetFilesAsync(purpose).ConfigureAwait(false); + return ClientResult.FromValue(ListFilesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Returns a list of files that belong to the user's organization. /// Only return files with the given purpose. - /// The cancellation token to use. - public virtual Result GetFiles(string purpose = null, CancellationToken cancellationToken = default) + public virtual ClientResult GetFiles(string purpose = null) { - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = GetFiles(purpose, context); - return Result.FromValue(ListFilesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = GetFiles(purpose); + return ClientResult.FromValue(ListFilesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -213,29 +193,28 @@ public virtual Result GetFiles(string purpose = null, Cancell /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// Only return files with the given purpose. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// Service returned a non-success status code. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task GetFilesAsync(string purpose, RequestOptions context) + public virtual async Task GetFilesAsync(string purpose, RequestOptions options) { - using var scope = ClientDiagnostics.CreateSpan("Files.GetFiles"); - scope.Start(); - try - { - using PipelineMessage message = CreateGetFilesRequest(purpose, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) + options ??= new RequestOptions(); + using PipelineMessage message = CreateGetFilesRequest(purpose, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -248,57 +227,54 @@ public virtual async Task GetFilesAsync(string purpose, RequestOptions c /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// Only return files with the given purpose. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// Service returned a non-success status code. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result GetFiles(string purpose, RequestOptions context) + public virtual ClientResult GetFiles(string purpose, RequestOptions options) { - using var scope = ClientDiagnostics.CreateSpan("Files.GetFiles"); - scope.Start(); - try - { - using PipelineMessage message = CreateGetFilesRequest(purpose, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) + options ??= new RequestOptions(); + using PipelineMessage message = CreateGetFilesRequest(purpose, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } /// Returns information about a specific file. /// The ID of the file to use for this request. - /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - public virtual async Task> RetrieveFileAsync(string fileId, CancellationToken cancellationToken = default) + public virtual async Task> RetrieveFileAsync(string fileId) { - ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); + if (fileId is null) throw new ArgumentNullException(nameof(fileId)); + if (string.IsNullOrEmpty(fileId)) throw new ArgumentException(nameof(fileId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = await RetrieveFileAsync(fileId, context).ConfigureAwait(false); - return Result.FromValue(OpenAIFile.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = await RetrieveFileAsync(fileId).ConfigureAwait(false); + return ClientResult.FromValue(OpenAIFile.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Returns information about a specific file. /// The ID of the file to use for this request. - /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - public virtual Result RetrieveFile(string fileId, CancellationToken cancellationToken = default) + public virtual ClientResult RetrieveFile(string fileId) { - ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); + if (fileId is null) throw new ArgumentNullException(nameof(fileId)); + if (string.IsNullOrEmpty(fileId)) throw new ArgumentException(nameof(fileId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = RetrieveFile(fileId, context); - return Result.FromValue(OpenAIFile.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = RetrieveFile(fileId); + return ClientResult.FromValue(OpenAIFile.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -311,33 +287,32 @@ public virtual Result RetrieveFile(string fileId, CancellationToken /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the file to use for this request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task RetrieveFileAsync(string fileId, RequestOptions context) + public virtual async Task RetrieveFileAsync(string fileId, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); - - using var scope = ClientDiagnostics.CreateSpan("Files.RetrieveFile"); - scope.Start(); - try - { - using PipelineMessage message = CreateRetrieveFileRequest(fileId, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) + if (fileId is null) throw new ArgumentNullException(nameof(fileId)); + if (string.IsNullOrEmpty(fileId)) throw new ArgumentException(nameof(fileId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateRetrieveFileRequest(fileId, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -350,61 +325,58 @@ public virtual async Task RetrieveFileAsync(string fileId, RequestOption /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the file to use for this request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result RetrieveFile(string fileId, RequestOptions context) + public virtual ClientResult RetrieveFile(string fileId, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); - - using var scope = ClientDiagnostics.CreateSpan("Files.RetrieveFile"); - scope.Start(); - try + if (fileId is null) throw new ArgumentNullException(nameof(fileId)); + if (string.IsNullOrEmpty(fileId)) throw new ArgumentException(nameof(fileId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateRetrieveFileRequest(fileId, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateRetrieveFileRequest(fileId, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } /// Delete a file. /// The ID of the file to use for this request. - /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - public virtual async Task> DeleteFileAsync(string fileId, CancellationToken cancellationToken = default) + public virtual async Task> DeleteFileAsync(string fileId) { - ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); + if (fileId is null) throw new ArgumentNullException(nameof(fileId)); + if (string.IsNullOrEmpty(fileId)) throw new ArgumentException(nameof(fileId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = await DeleteFileAsync(fileId, context).ConfigureAwait(false); - return Result.FromValue(DeleteFileResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = await DeleteFileAsync(fileId).ConfigureAwait(false); + return ClientResult.FromValue(DeleteFileResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Delete a file. /// The ID of the file to use for this request. - /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - public virtual Result DeleteFile(string fileId, CancellationToken cancellationToken = default) + public virtual ClientResult DeleteFile(string fileId) { - ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); + if (fileId is null) throw new ArgumentNullException(nameof(fileId)); + if (string.IsNullOrEmpty(fileId)) throw new ArgumentException(nameof(fileId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = DeleteFile(fileId, context); - return Result.FromValue(DeleteFileResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = DeleteFile(fileId); + return ClientResult.FromValue(DeleteFileResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -417,33 +389,32 @@ public virtual Result DeleteFile(string fileId, Cancellation /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the file to use for this request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task DeleteFileAsync(string fileId, RequestOptions context) + public virtual async Task DeleteFileAsync(string fileId, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); - - using var scope = ClientDiagnostics.CreateSpan("Files.DeleteFile"); - scope.Start(); - try - { - using PipelineMessage message = CreateDeleteFileRequest(fileId, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) + if (fileId is null) throw new ArgumentNullException(nameof(fileId)); + if (string.IsNullOrEmpty(fileId)) throw new ArgumentException(nameof(fileId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateDeleteFileRequest(fileId, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -456,61 +427,58 @@ public virtual async Task DeleteFileAsync(string fileId, RequestOptions /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the file to use for this request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result DeleteFile(string fileId, RequestOptions context) + public virtual ClientResult DeleteFile(string fileId, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); - - using var scope = ClientDiagnostics.CreateSpan("Files.DeleteFile"); - scope.Start(); - try - { - using PipelineMessage message = CreateDeleteFileRequest(fileId, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) + if (fileId is null) throw new ArgumentNullException(nameof(fileId)); + if (string.IsNullOrEmpty(fileId)) throw new ArgumentException(nameof(fileId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateDeleteFileRequest(fileId, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } /// Returns the contents of the specified file. /// The ID of the file to use for this request. - /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - public virtual async Task> DownloadFileAsync(string fileId, CancellationToken cancellationToken = default) + public virtual async Task> DownloadFileAsync(string fileId) { - ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); + if (fileId is null) throw new ArgumentNullException(nameof(fileId)); + if (string.IsNullOrEmpty(fileId)) throw new ArgumentException(nameof(fileId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = await DownloadFileAsync(fileId, context).ConfigureAwait(false); - return Result.FromValue(result.GetRawResponse().Content.ToObjectFromJson(), result.GetRawResponse()); + ClientResult result = await DownloadFileAsync(fileId).ConfigureAwait(false); + return ClientResult.FromValue(result.GetRawResponse().Content.ToObjectFromJson(), result.GetRawResponse()); } /// Returns the contents of the specified file. /// The ID of the file to use for this request. - /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - public virtual Result DownloadFile(string fileId, CancellationToken cancellationToken = default) + public virtual ClientResult DownloadFile(string fileId) { - ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); + if (fileId is null) throw new ArgumentNullException(nameof(fileId)); + if (string.IsNullOrEmpty(fileId)) throw new ArgumentException(nameof(fileId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = DownloadFile(fileId, context); - return Result.FromValue(result.GetRawResponse().Content.ToObjectFromJson(), result.GetRawResponse()); + ClientResult result = DownloadFile(fileId); + return ClientResult.FromValue(result.GetRawResponse().Content.ToObjectFromJson(), result.GetRawResponse()); } /// @@ -523,33 +491,32 @@ public virtual Result DownloadFile(string fileId, CancellationToken canc /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the file to use for this request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task DownloadFileAsync(string fileId, RequestOptions context) + public virtual async Task DownloadFileAsync(string fileId, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); - - using var scope = ClientDiagnostics.CreateSpan("Files.DownloadFile"); - scope.Start(); - try + if (fileId is null) throw new ArgumentNullException(nameof(fileId)); + if (string.IsNullOrEmpty(fileId)) throw new ArgumentException(nameof(fileId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateDownloadFileRequest(fileId, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateDownloadFileRequest(fileId, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -562,122 +529,133 @@ public virtual async Task DownloadFileAsync(string fileId, RequestOption /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the file to use for this request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result DownloadFile(string fileId, RequestOptions context) + public virtual ClientResult DownloadFile(string fileId, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); - - using var scope = ClientDiagnostics.CreateSpan("Files.DownloadFile"); - scope.Start(); - try - { - using PipelineMessage message = CreateDownloadFileRequest(fileId, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) + if (fileId is null) throw new ArgumentNullException(nameof(fileId)); + if (string.IsNullOrEmpty(fileId)) throw new ArgumentException(nameof(fileId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateDownloadFileRequest(fileId, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } - internal PipelineMessage CreateCreateFileRequest(RequestBody content, RequestOptions context) + internal PipelineMessage CreateCreateFileRequest(BinaryContent content, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("POST"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/files", false); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); - request.SetHeaderValue("content-type", "multipart/form-data"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/files"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("content-type", "multipart/form-data"); request.Content = content; + message.Apply(options); return message; } - internal PipelineMessage CreateGetFilesRequest(string purpose, RequestOptions context) + internal PipelineMessage CreateGetFilesRequest(string purpose, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("GET"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/files", false); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/files"); + uriBuilder.Path += path.ToString(); if (purpose != null) { - uri.AppendQuery("purpose", purpose, true); + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&purpose={purpose}"; + } + else + { + uriBuilder.Query = $"purpose={purpose}"; + } } - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); return message; } - internal PipelineMessage CreateRetrieveFileRequest(string fileId, RequestOptions context) + internal PipelineMessage CreateRetrieveFileRequest(string fileId, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("GET"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/files/", false); - uri.AppendPath(fileId, true); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/files/"); + uriBuilder.Path += path.ToString(); + path.Append(fileId); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); return message; } - internal PipelineMessage CreateDeleteFileRequest(string fileId, RequestOptions context) + internal PipelineMessage CreateDeleteFileRequest(string fileId, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("DELETE"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/files/", false); - uri.AppendPath(fileId, true); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "DELETE"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/files/"); + uriBuilder.Path += path.ToString(); + path.Append(fileId); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); return message; } - internal PipelineMessage CreateDownloadFileRequest(string fileId, RequestOptions context) + internal PipelineMessage CreateDownloadFileRequest(string fileId, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("GET"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/files/", false); - uri.AppendPath(fileId, true); - uri.AppendPath("/content", false); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/files/"); + uriBuilder.Path += path.ToString(); + path.Append(fileId); + uriBuilder.Path += path.ToString(); + path.Append("/content"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); return message; } - private static RequestOptions DefaultRequestContext = new RequestOptions(); - internal static RequestOptions FromCancellationToken(CancellationToken cancellationToken = default) - { - if (!cancellationToken.CanBeCanceled) - { - return DefaultRequestContext; - } - - return new RequestOptions() { CancellationToken = cancellationToken }; - } - - private static ResponseErrorClassifier _responseErrorClassifier200; - private static ResponseErrorClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= new StatusResponseClassifier(stackalloc ushort[] { 200 }); + private static PipelineMessageClassifier _responseErrorClassifier200; + private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } } + diff --git a/.dotnet/src/Generated/FineTuning.cs b/.dotnet/src/Generated/FineTuning.cs index a8038525b..63c8b2f63 100644 --- a/.dotnet/src/Generated/FineTuning.cs +++ b/.dotnet/src/Generated/FineTuning.cs @@ -1,12 +1,9 @@ // -#nullable disable - using System; using System.ClientModel; -using System.ClientModel.Internal; using System.ClientModel.Primitives; -using System.ClientModel.Primitives.Pipeline; +using System.Text; using System.Threading; using System.Threading.Tasks; using OpenAI.Models; @@ -18,16 +15,13 @@ namespace OpenAI public partial class FineTuning { private const string AuthorizationHeader = "Authorization"; - private readonly KeyCredential _keyCredential; + private readonly ApiKeyCredential _credential; private const string AuthorizationApiKeyPrefix = "Bearer"; - private readonly MessagePipeline _pipeline; + private readonly ClientPipeline _pipeline; private readonly Uri _endpoint; - /// The ClientDiagnostics is used to provide tracing support for the client library. - internal TelemetrySource ClientDiagnostics { get; } - /// The HTTP pipeline for sending and receiving REST requests and responses. - public virtual MessagePipeline Pipeline => _pipeline; + public virtual ClientPipeline Pipeline => _pipeline; /// Initializes a new instance of FineTuning for mocking. protected FineTuning() @@ -35,15 +29,13 @@ protected FineTuning() } /// Initializes a new instance of FineTuning. - /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. - /// The key credential to copy. + /// The key credential to copy. /// OpenAI Endpoint. - internal FineTuning(TelemetrySource clientDiagnostics, MessagePipeline pipeline, KeyCredential keyCredential, Uri endpoint) + internal FineTuning(ClientPipeline pipeline, ApiKeyCredential credential, Uri endpoint) { - ClientDiagnostics = clientDiagnostics; _pipeline = pipeline; - _keyCredential = keyCredential; + _credential = credential; _endpoint = endpoint; } @@ -55,16 +47,14 @@ internal FineTuning(TelemetrySource clientDiagnostics, MessagePipeline pipeline, /// [Learn more about fine-tuning](/docs/guides/fine-tuning) /// /// The to use. - /// The cancellation token to use. /// is null. - public virtual async Task> CreateFineTuningJobAsync(CreateFineTuningJobRequest job, CancellationToken cancellationToken = default) + public virtual async Task> CreateFineTuningJobAsync(CreateFineTuningJobRequest job) { - ClientUtilities.AssertNotNull(job, nameof(job)); + if (job is null) throw new ArgumentNullException(nameof(job)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = job.ToRequestBody(); - Result result = await CreateFineTuningJobAsync(content, context).ConfigureAwait(false); - return Result.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + using BinaryContent content = BinaryContent.Create(job); + ClientResult result = await CreateFineTuningJobAsync(content).ConfigureAwait(false); + return ClientResult.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -75,16 +65,14 @@ public virtual async Task> CreateFineTuningJobAsync(Create /// [Learn more about fine-tuning](/docs/guides/fine-tuning) /// /// The to use. - /// The cancellation token to use. /// is null. - public virtual Result CreateFineTuningJob(CreateFineTuningJobRequest job, CancellationToken cancellationToken = default) + public virtual ClientResult CreateFineTuningJob(CreateFineTuningJobRequest job) { - ClientUtilities.AssertNotNull(job, nameof(job)); + if (job is null) throw new ArgumentNullException(nameof(job)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = job.ToRequestBody(); - Result result = CreateFineTuningJob(content, context); - return Result.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + using BinaryContent content = BinaryContent.Create(job); + ClientResult result = CreateFineTuningJob(content); + return ClientResult.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -101,32 +89,30 @@ public virtual Result CreateFineTuningJob(CreateFineTuningJobRequ /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task CreateFineTuningJobAsync(RequestBody content, RequestOptions context = null) + public virtual async Task CreateFineTuningJobAsync(BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNull(content, nameof(content)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCreateFineTuningJobRequest(content, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; - using var scope = ClientDiagnostics.CreateSpan("FineTuning.CreateFineTuningJob"); - scope.Start(); - try - { - using PipelineMessage message = CreateCreateFineTuningJobRequest(content, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -143,54 +129,48 @@ public virtual async Task CreateFineTuningJobAsync(RequestBody content, /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result CreateFineTuningJob(RequestBody content, RequestOptions context = null) + public virtual ClientResult CreateFineTuningJob(BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNull(content, nameof(content)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCreateFineTuningJobRequest(content, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; - using var scope = ClientDiagnostics.CreateSpan("FineTuning.CreateFineTuningJob"); - scope.Start(); - try - { - using PipelineMessage message = CreateCreateFineTuningJobRequest(content, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } /// List your organization's fine-tuning jobs. /// Identifier for the last job from the previous pagination request. /// Number of fine-tuning jobs to retrieve. - /// The cancellation token to use. - public virtual async Task> GetPaginatedFineTuningJobsAsync(string after = null, long? limit = null, CancellationToken cancellationToken = default) + public virtual async Task> GetPaginatedFineTuningJobsAsync(string after = null, long? limit = null) { - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = await GetPaginatedFineTuningJobsAsync(after, limit, context).ConfigureAwait(false); - return Result.FromValue(ListPaginatedFineTuningJobsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = await GetPaginatedFineTuningJobsAsync(after, limit).ConfigureAwait(false); + return ClientResult.FromValue(ListPaginatedFineTuningJobsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// List your organization's fine-tuning jobs. /// Identifier for the last job from the previous pagination request. /// Number of fine-tuning jobs to retrieve. - /// The cancellation token to use. - public virtual Result GetPaginatedFineTuningJobs(string after = null, long? limit = null, CancellationToken cancellationToken = default) + public virtual ClientResult GetPaginatedFineTuningJobs(string after = null, long? limit = null) { - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = GetPaginatedFineTuningJobs(after, limit, context); - return Result.FromValue(ListPaginatedFineTuningJobsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = GetPaginatedFineTuningJobs(after, limit); + return ClientResult.FromValue(ListPaginatedFineTuningJobsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -203,30 +183,29 @@ public virtual Result GetPaginatedFineTunin /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// Identifier for the last job from the previous pagination request. /// Number of fine-tuning jobs to retrieve. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// Service returned a non-success status code. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task GetPaginatedFineTuningJobsAsync(string after, long? limit, RequestOptions context) + public virtual async Task GetPaginatedFineTuningJobsAsync(string after, long? limit, RequestOptions options) { - using var scope = ClientDiagnostics.CreateSpan("FineTuning.GetPaginatedFineTuningJobs"); - scope.Start(); - try - { - using PipelineMessage message = CreateGetPaginatedFineTuningJobsRequest(after, limit, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) + options ??= new RequestOptions(); + using PipelineMessage message = CreateGetPaginatedFineTuningJobsRequest(after, limit, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -239,30 +218,29 @@ public virtual async Task GetPaginatedFineTuningJobsAsync(string after, /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// Identifier for the last job from the previous pagination request. /// Number of fine-tuning jobs to retrieve. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// Service returned a non-success status code. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result GetPaginatedFineTuningJobs(string after, long? limit, RequestOptions context) + public virtual ClientResult GetPaginatedFineTuningJobs(string after, long? limit, RequestOptions options) { - using var scope = ClientDiagnostics.CreateSpan("FineTuning.GetPaginatedFineTuningJobs"); - scope.Start(); - try - { - using PipelineMessage message = CreateGetPaginatedFineTuningJobsRequest(after, limit, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) + options ??= new RequestOptions(); + using PipelineMessage message = CreateGetPaginatedFineTuningJobsRequest(after, limit, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } /// @@ -271,16 +249,15 @@ public virtual Result GetPaginatedFineTuningJobs(string after, long? limit, Requ /// [Learn more about fine-tuning](/docs/guides/fine-tuning) /// /// The to use. - /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - public virtual async Task> RetrieveFineTuningJobAsync(string fineTuningJobId, CancellationToken cancellationToken = default) + public virtual async Task> RetrieveFineTuningJobAsync(string fineTuningJobId) { - ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + if (fineTuningJobId is null) throw new ArgumentNullException(nameof(fineTuningJobId)); + if (string.IsNullOrEmpty(fineTuningJobId)) throw new ArgumentException(nameof(fineTuningJobId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = await RetrieveFineTuningJobAsync(fineTuningJobId, context).ConfigureAwait(false); - return Result.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = await RetrieveFineTuningJobAsync(fineTuningJobId).ConfigureAwait(false); + return ClientResult.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -289,16 +266,15 @@ public virtual async Task> RetrieveFineTuningJobAsync(stri /// [Learn more about fine-tuning](/docs/guides/fine-tuning) /// /// The to use. - /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - public virtual Result RetrieveFineTuningJob(string fineTuningJobId, CancellationToken cancellationToken = default) + public virtual ClientResult RetrieveFineTuningJob(string fineTuningJobId) { - ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + if (fineTuningJobId is null) throw new ArgumentNullException(nameof(fineTuningJobId)); + if (string.IsNullOrEmpty(fineTuningJobId)) throw new ArgumentException(nameof(fineTuningJobId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = RetrieveFineTuningJob(fineTuningJobId, context); - return Result.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = RetrieveFineTuningJob(fineTuningJobId); + return ClientResult.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -313,33 +289,32 @@ public virtual Result RetrieveFineTuningJob(string fineTuningJobI /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The to use. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task RetrieveFineTuningJobAsync(string fineTuningJobId, RequestOptions context) + public virtual async Task RetrieveFineTuningJobAsync(string fineTuningJobId, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); - - using var scope = ClientDiagnostics.CreateSpan("FineTuning.RetrieveFineTuningJob"); - scope.Start(); - try - { - using PipelineMessage message = CreateRetrieveFineTuningJobRequest(fineTuningJobId, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) + if (fineTuningJobId is null) throw new ArgumentNullException(nameof(fineTuningJobId)); + if (string.IsNullOrEmpty(fineTuningJobId)) throw new ArgumentException(nameof(fineTuningJobId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateRetrieveFineTuningJobRequest(fineTuningJobId, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -354,61 +329,58 @@ public virtual async Task RetrieveFineTuningJobAsync(string fineTuningJo /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The to use. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result RetrieveFineTuningJob(string fineTuningJobId, RequestOptions context) + public virtual ClientResult RetrieveFineTuningJob(string fineTuningJobId, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); - - using var scope = ClientDiagnostics.CreateSpan("FineTuning.RetrieveFineTuningJob"); - scope.Start(); - try + if (fineTuningJobId is null) throw new ArgumentNullException(nameof(fineTuningJobId)); + if (string.IsNullOrEmpty(fineTuningJobId)) throw new ArgumentException(nameof(fineTuningJobId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateRetrieveFineTuningJobRequest(fineTuningJobId, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateRetrieveFineTuningJobRequest(fineTuningJobId, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } /// Immediately cancel a fine-tune job. /// The ID of the fine-tuning job to cancel. - /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - public virtual async Task> CancelFineTuningJobAsync(string fineTuningJobId, CancellationToken cancellationToken = default) + public virtual async Task> CancelFineTuningJobAsync(string fineTuningJobId) { - ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + if (fineTuningJobId is null) throw new ArgumentNullException(nameof(fineTuningJobId)); + if (string.IsNullOrEmpty(fineTuningJobId)) throw new ArgumentException(nameof(fineTuningJobId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = await CancelFineTuningJobAsync(fineTuningJobId, context).ConfigureAwait(false); - return Result.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = await CancelFineTuningJobAsync(fineTuningJobId).ConfigureAwait(false); + return ClientResult.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Immediately cancel a fine-tune job. /// The ID of the fine-tuning job to cancel. - /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - public virtual Result CancelFineTuningJob(string fineTuningJobId, CancellationToken cancellationToken = default) + public virtual ClientResult CancelFineTuningJob(string fineTuningJobId) { - ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + if (fineTuningJobId is null) throw new ArgumentNullException(nameof(fineTuningJobId)); + if (string.IsNullOrEmpty(fineTuningJobId)) throw new ArgumentException(nameof(fineTuningJobId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = CancelFineTuningJob(fineTuningJobId, context); - return Result.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = CancelFineTuningJob(fineTuningJobId); + return ClientResult.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -421,33 +393,32 @@ public virtual Result CancelFineTuningJob(string fineTuningJobId, /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the fine-tuning job to cancel. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task CancelFineTuningJobAsync(string fineTuningJobId, RequestOptions context) + public virtual async Task CancelFineTuningJobAsync(string fineTuningJobId, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); - - using var scope = ClientDiagnostics.CreateSpan("FineTuning.CancelFineTuningJob"); - scope.Start(); - try - { - using PipelineMessage message = CreateCancelFineTuningJobRequest(fineTuningJobId, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) + if (fineTuningJobId is null) throw new ArgumentNullException(nameof(fineTuningJobId)); + if (string.IsNullOrEmpty(fineTuningJobId)) throw new ArgumentException(nameof(fineTuningJobId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCancelFineTuningJobRequest(fineTuningJobId, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -460,65 +431,62 @@ public virtual async Task CancelFineTuningJobAsync(string fineTuningJobI /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the fine-tuning job to cancel. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result CancelFineTuningJob(string fineTuningJobId, RequestOptions context) + public virtual ClientResult CancelFineTuningJob(string fineTuningJobId, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); - - using var scope = ClientDiagnostics.CreateSpan("FineTuning.CancelFineTuningJob"); - scope.Start(); - try - { - using PipelineMessage message = CreateCancelFineTuningJobRequest(fineTuningJobId, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) + if (fineTuningJobId is null) throw new ArgumentNullException(nameof(fineTuningJobId)); + if (string.IsNullOrEmpty(fineTuningJobId)) throw new ArgumentException(nameof(fineTuningJobId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCancelFineTuningJobRequest(fineTuningJobId, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } /// Get status updates for a fine-tuning job. /// The ID of the fine-tuning job to get events for. /// Identifier for the last event from the previous pagination request. /// Number of events to retrieve. - /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - public virtual async Task> GetFineTuningEventsAsync(string fineTuningJobId, string after = null, int? limit = null, CancellationToken cancellationToken = default) + public virtual async Task> GetFineTuningEventsAsync(string fineTuningJobId, string after = null, int? limit = null) { - ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + if (fineTuningJobId is null) throw new ArgumentNullException(nameof(fineTuningJobId)); + if (string.IsNullOrEmpty(fineTuningJobId)) throw new ArgumentException(nameof(fineTuningJobId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = await GetFineTuningEventsAsync(fineTuningJobId, after, limit, context).ConfigureAwait(false); - return Result.FromValue(ListFineTuningJobEventsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = await GetFineTuningEventsAsync(fineTuningJobId, after, limit).ConfigureAwait(false); + return ClientResult.FromValue(ListFineTuningJobEventsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Get status updates for a fine-tuning job. /// The ID of the fine-tuning job to get events for. /// Identifier for the last event from the previous pagination request. /// Number of events to retrieve. - /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - public virtual Result GetFineTuningEvents(string fineTuningJobId, string after = null, int? limit = null, CancellationToken cancellationToken = default) + public virtual ClientResult GetFineTuningEvents(string fineTuningJobId, string after = null, int? limit = null) { - ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + if (fineTuningJobId is null) throw new ArgumentNullException(nameof(fineTuningJobId)); + if (string.IsNullOrEmpty(fineTuningJobId)) throw new ArgumentException(nameof(fineTuningJobId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = GetFineTuningEvents(fineTuningJobId, after, limit, context); - return Result.FromValue(ListFineTuningJobEventsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = GetFineTuningEvents(fineTuningJobId, after, limit); + return ClientResult.FromValue(ListFineTuningJobEventsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -531,7 +499,7 @@ public virtual Result GetFineTuningEvents(strin /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -539,27 +507,26 @@ public virtual Result GetFineTuningEvents(strin /// The ID of the fine-tuning job to get events for. /// Identifier for the last event from the previous pagination request. /// Number of events to retrieve. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task GetFineTuningEventsAsync(string fineTuningJobId, string after, int? limit, RequestOptions context) + public virtual async Task GetFineTuningEventsAsync(string fineTuningJobId, string after, int? limit, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); - - using var scope = ClientDiagnostics.CreateSpan("FineTuning.GetFineTuningEvents"); - scope.Start(); - try + if (fineTuningJobId is null) throw new ArgumentNullException(nameof(fineTuningJobId)); + if (string.IsNullOrEmpty(fineTuningJobId)) throw new ArgumentException(nameof(fineTuningJobId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateGetFineTuningEventsRequest(fineTuningJobId, after, limit, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateGetFineTuningEventsRequest(fineTuningJobId, after, limit, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -572,7 +539,7 @@ public virtual async Task GetFineTuningEventsAsync(string fineTuningJobI /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -580,129 +547,162 @@ public virtual async Task GetFineTuningEventsAsync(string fineTuningJobI /// The ID of the fine-tuning job to get events for. /// Identifier for the last event from the previous pagination request. /// Number of events to retrieve. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result GetFineTuningEvents(string fineTuningJobId, string after, int? limit, RequestOptions context) + public virtual ClientResult GetFineTuningEvents(string fineTuningJobId, string after, int? limit, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); - - using var scope = ClientDiagnostics.CreateSpan("FineTuning.GetFineTuningEvents"); - scope.Start(); - try - { - using PipelineMessage message = CreateGetFineTuningEventsRequest(fineTuningJobId, after, limit, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) + if (fineTuningJobId is null) throw new ArgumentNullException(nameof(fineTuningJobId)); + if (string.IsNullOrEmpty(fineTuningJobId)) throw new ArgumentException(nameof(fineTuningJobId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateGetFineTuningEventsRequest(fineTuningJobId, after, limit, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } - internal PipelineMessage CreateCreateFineTuningJobRequest(RequestBody content, RequestOptions context) + internal PipelineMessage CreateCreateFineTuningJobRequest(BinaryContent content, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("POST"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/fine_tuning/jobs", false); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); - request.SetHeaderValue("Content-Type", "application/json"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/fine_tuning/jobs"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", "application/json"); request.Content = content; + message.Apply(options); return message; } - internal PipelineMessage CreateGetPaginatedFineTuningJobsRequest(string after, long? limit, RequestOptions context) + internal PipelineMessage CreateGetPaginatedFineTuningJobsRequest(string after, long? limit, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("GET"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/fine_tuning/jobs", false); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/fine_tuning/jobs"); + uriBuilder.Path += path.ToString(); if (after != null) { - uri.AppendQuery("after", after, true); + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&after={after}"; + } + else + { + uriBuilder.Query = $"after={after}"; + } } if (limit != null) { - uri.AppendQuery("limit", limit.Value, true); + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&limit={limit.Value}"; + } + else + { + uriBuilder.Query = $"limit={limit.Value}"; + } } - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); return message; } - internal PipelineMessage CreateRetrieveFineTuningJobRequest(string fineTuningJobId, RequestOptions context) + internal PipelineMessage CreateRetrieveFineTuningJobRequest(string fineTuningJobId, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("GET"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/fine_tuning/jobs/", false); - uri.AppendPath(fineTuningJobId, true); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/fine_tuning/jobs/"); + uriBuilder.Path += path.ToString(); + path.Append(fineTuningJobId); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); return message; } - internal PipelineMessage CreateCancelFineTuningJobRequest(string fineTuningJobId, RequestOptions context) + internal PipelineMessage CreateCancelFineTuningJobRequest(string fineTuningJobId, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("POST"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/fine_tuning/jobs/", false); - uri.AppendPath(fineTuningJobId, true); - uri.AppendPath("/cancel", false); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/fine_tuning/jobs/"); + uriBuilder.Path += path.ToString(); + path.Append(fineTuningJobId); + uriBuilder.Path += path.ToString(); + path.Append("/cancel"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); return message; } - internal PipelineMessage CreateGetFineTuningEventsRequest(string fineTuningJobId, string after, int? limit, RequestOptions context) + internal PipelineMessage CreateGetFineTuningEventsRequest(string fineTuningJobId, string after, int? limit, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("GET"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/fine_tuning/jobs/", false); - uri.AppendPath(fineTuningJobId, true); - uri.AppendPath("/events", false); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/fine_tuning/jobs/"); + uriBuilder.Path += path.ToString(); + path.Append(fineTuningJobId); + uriBuilder.Path += path.ToString(); + path.Append("/events"); + uriBuilder.Path += path.ToString(); if (after != null) { - uri.AppendQuery("after", after, true); + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&after={after}"; + } + else + { + uriBuilder.Query = $"after={after}"; + } } if (limit != null) { - uri.AppendQuery("limit", limit.Value, true); + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&limit={limit.Value}"; + } + else + { + uriBuilder.Query = $"limit={limit.Value}"; + } } - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); return message; } - private static RequestOptions DefaultRequestContext = new RequestOptions(); - internal static RequestOptions FromCancellationToken(CancellationToken cancellationToken = default) - { - if (!cancellationToken.CanBeCanceled) - { - return DefaultRequestContext; - } - - return new RequestOptions() { CancellationToken = cancellationToken }; - } - - private static ResponseErrorClassifier _responseErrorClassifier200; - private static ResponseErrorClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= new StatusResponseClassifier(stackalloc ushort[] { 200 }); + private static PipelineMessageClassifier _responseErrorClassifier200; + private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } } + diff --git a/.dotnet/src/Generated/Images.cs b/.dotnet/src/Generated/Images.cs index a3de80e0b..f2777b7ad 100644 --- a/.dotnet/src/Generated/Images.cs +++ b/.dotnet/src/Generated/Images.cs @@ -1,12 +1,9 @@ // -#nullable disable - using System; using System.ClientModel; -using System.ClientModel.Internal; using System.ClientModel.Primitives; -using System.ClientModel.Primitives.Pipeline; +using System.Text; using System.Threading; using System.Threading.Tasks; using OpenAI.Models; @@ -18,16 +15,13 @@ namespace OpenAI public partial class Images { private const string AuthorizationHeader = "Authorization"; - private readonly KeyCredential _keyCredential; + private readonly ApiKeyCredential _credential; private const string AuthorizationApiKeyPrefix = "Bearer"; - private readonly MessagePipeline _pipeline; + private readonly ClientPipeline _pipeline; private readonly Uri _endpoint; - /// The ClientDiagnostics is used to provide tracing support for the client library. - internal TelemetrySource ClientDiagnostics { get; } - /// The HTTP pipeline for sending and receiving REST requests and responses. - public virtual MessagePipeline Pipeline => _pipeline; + public virtual ClientPipeline Pipeline => _pipeline; /// Initializes a new instance of Images for mocking. protected Images() @@ -35,44 +29,38 @@ protected Images() } /// Initializes a new instance of Images. - /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. - /// The key credential to copy. + /// The key credential to copy. /// OpenAI Endpoint. - internal Images(TelemetrySource clientDiagnostics, MessagePipeline pipeline, KeyCredential keyCredential, Uri endpoint) + internal Images(ClientPipeline pipeline, ApiKeyCredential credential, Uri endpoint) { - ClientDiagnostics = clientDiagnostics; _pipeline = pipeline; - _keyCredential = keyCredential; + _credential = credential; _endpoint = endpoint; } /// Creates an image given a prompt. /// The to use. - /// The cancellation token to use. /// is null. - public virtual async Task> CreateImageAsync(CreateImageRequest image, CancellationToken cancellationToken = default) + public virtual async Task> CreateImageAsync(CreateImageRequest image) { - ClientUtilities.AssertNotNull(image, nameof(image)); + if (image is null) throw new ArgumentNullException(nameof(image)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = image.ToRequestBody(); - Result result = await CreateImageAsync(content, context).ConfigureAwait(false); - return Result.FromValue(ImagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + using BinaryContent content = BinaryContent.Create(image); + ClientResult result = await CreateImageAsync(content).ConfigureAwait(false); + return ClientResult.FromValue(ImagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Creates an image given a prompt. /// The to use. - /// The cancellation token to use. /// is null. - public virtual Result CreateImage(CreateImageRequest image, CancellationToken cancellationToken = default) + public virtual ClientResult CreateImage(CreateImageRequest image) { - ClientUtilities.AssertNotNull(image, nameof(image)); + if (image is null) throw new ArgumentNullException(nameof(image)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = image.ToRequestBody(); - Result result = CreateImage(content, context); - return Result.FromValue(ImagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + using BinaryContent content = BinaryContent.Create(image); + ClientResult result = CreateImage(content); + return ClientResult.FromValue(ImagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -85,32 +73,30 @@ public virtual Result CreateImage(CreateImageRequest image, Canc /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task CreateImageAsync(RequestBody content, RequestOptions context = null) + public virtual async Task CreateImageAsync(BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNull(content, nameof(content)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCreateImageRequest(content, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; - using var scope = ClientDiagnostics.CreateSpan("Images.CreateImage"); - scope.Start(); - try - { - using PipelineMessage message = CreateCreateImageRequest(content, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -123,60 +109,54 @@ public virtual async Task CreateImageAsync(RequestBody content, RequestO /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result CreateImage(RequestBody content, RequestOptions context = null) + public virtual ClientResult CreateImage(BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNull(content, nameof(content)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCreateImageRequest(content, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; - using var scope = ClientDiagnostics.CreateSpan("Images.CreateImage"); - scope.Start(); - try - { - using PipelineMessage message = CreateCreateImageRequest(content, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } /// Creates an edited or extended image given an original image and a prompt. /// The to use. - /// The cancellation token to use. /// is null. - public virtual async Task> CreateImageEditAsync(CreateImageEditRequest image, CancellationToken cancellationToken = default) + public virtual async Task> CreateImageEditAsync(CreateImageEditRequest image) { - ClientUtilities.AssertNotNull(image, nameof(image)); + if (image is null) throw new ArgumentNullException(nameof(image)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = image.ToRequestBody(); - Result result = await CreateImageEditAsync(content, context).ConfigureAwait(false); - return Result.FromValue(ImagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + using BinaryContent content = BinaryContent.Create(image); + ClientResult result = await CreateImageEditAsync(content).ConfigureAwait(false); + return ClientResult.FromValue(ImagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Creates an edited or extended image given an original image and a prompt. /// The to use. - /// The cancellation token to use. /// is null. - public virtual Result CreateImageEdit(CreateImageEditRequest image, CancellationToken cancellationToken = default) + public virtual ClientResult CreateImageEdit(CreateImageEditRequest image) { - ClientUtilities.AssertNotNull(image, nameof(image)); + if (image is null) throw new ArgumentNullException(nameof(image)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = image.ToRequestBody(); - Result result = CreateImageEdit(content, context); - return Result.FromValue(ImagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + using BinaryContent content = BinaryContent.Create(image); + ClientResult result = CreateImageEdit(content); + return ClientResult.FromValue(ImagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -189,32 +169,30 @@ public virtual Result CreateImageEdit(CreateImageEditRequest ima /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task CreateImageEditAsync(RequestBody content, RequestOptions context = null) + public virtual async Task CreateImageEditAsync(BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNull(content, nameof(content)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCreateImageEditRequest(content, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; - using var scope = ClientDiagnostics.CreateSpan("Images.CreateImageEdit"); - scope.Start(); - try - { - using PipelineMessage message = CreateCreateImageEditRequest(content, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -227,60 +205,54 @@ public virtual async Task CreateImageEditAsync(RequestBody content, Requ /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result CreateImageEdit(RequestBody content, RequestOptions context = null) + public virtual ClientResult CreateImageEdit(BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNull(content, nameof(content)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCreateImageEditRequest(content, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; - using var scope = ClientDiagnostics.CreateSpan("Images.CreateImageEdit"); - scope.Start(); - try + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateCreateImageEditRequest(content, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } /// Creates an edited or extended image given an original image and a prompt. /// The to use. - /// The cancellation token to use. /// is null. - public virtual async Task> CreateImageVariationAsync(CreateImageVariationRequest image, CancellationToken cancellationToken = default) + public virtual async Task> CreateImageVariationAsync(CreateImageVariationRequest image) { - ClientUtilities.AssertNotNull(image, nameof(image)); + if (image is null) throw new ArgumentNullException(nameof(image)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = image.ToRequestBody(); - Result result = await CreateImageVariationAsync(content, context).ConfigureAwait(false); - return Result.FromValue(ImagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + using BinaryContent content = BinaryContent.Create(image); + ClientResult result = await CreateImageVariationAsync(content).ConfigureAwait(false); + return ClientResult.FromValue(ImagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Creates an edited or extended image given an original image and a prompt. /// The to use. - /// The cancellation token to use. /// is null. - public virtual Result CreateImageVariation(CreateImageVariationRequest image, CancellationToken cancellationToken = default) + public virtual ClientResult CreateImageVariation(CreateImageVariationRequest image) { - ClientUtilities.AssertNotNull(image, nameof(image)); + if (image is null) throw new ArgumentNullException(nameof(image)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = image.ToRequestBody(); - Result result = CreateImageVariation(content, context); - return Result.FromValue(ImagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + using BinaryContent content = BinaryContent.Create(image); + ClientResult result = CreateImageVariation(content); + return ClientResult.FromValue(ImagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -293,32 +265,30 @@ public virtual Result CreateImageVariation(CreateImageVariationR /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task CreateImageVariationAsync(RequestBody content, RequestOptions context = null) + public virtual async Task CreateImageVariationAsync(BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNull(content, nameof(content)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCreateImageVariationRequest(content, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; - using var scope = ClientDiagnostics.CreateSpan("Images.CreateImageVariation"); - scope.Start(); - try + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateCreateImageVariationRequest(content, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -331,91 +301,88 @@ public virtual async Task CreateImageVariationAsync(RequestBody content, /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result CreateImageVariation(RequestBody content, RequestOptions context = null) + public virtual ClientResult CreateImageVariation(BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNull(content, nameof(content)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCreateImageVariationRequest(content, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; - using var scope = ClientDiagnostics.CreateSpan("Images.CreateImageVariation"); - scope.Start(); - try + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateCreateImageVariationRequest(content, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } - internal PipelineMessage CreateCreateImageRequest(RequestBody content, RequestOptions context) + internal PipelineMessage CreateCreateImageRequest(BinaryContent content, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("POST"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/images/generations", false); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); - request.SetHeaderValue("Content-Type", "application/json"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/images/generations"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", "application/json"); request.Content = content; + message.Apply(options); return message; } - internal PipelineMessage CreateCreateImageEditRequest(RequestBody content, RequestOptions context) + internal PipelineMessage CreateCreateImageEditRequest(BinaryContent content, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("POST"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/images/edits", false); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); - request.SetHeaderValue("content-type", "multipart/form-data"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/images/edits"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("content-type", "multipart/form-data"); request.Content = content; + message.Apply(options); return message; } - internal PipelineMessage CreateCreateImageVariationRequest(RequestBody content, RequestOptions context) + internal PipelineMessage CreateCreateImageVariationRequest(BinaryContent content, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("POST"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/images/variations", false); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); - request.SetHeaderValue("content-type", "multipart/form-data"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/images/variations"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("content-type", "multipart/form-data"); request.Content = content; + message.Apply(options); return message; } - private static RequestOptions DefaultRequestContext = new RequestOptions(); - internal static RequestOptions FromCancellationToken(CancellationToken cancellationToken = default) - { - if (!cancellationToken.CanBeCanceled) - { - return DefaultRequestContext; - } - - return new RequestOptions() { CancellationToken = cancellationToken }; - } - - private static ResponseErrorClassifier _responseErrorClassifier200; - private static ResponseErrorClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= new StatusResponseClassifier(stackalloc ushort[] { 200 }); + private static PipelineMessageClassifier _responseErrorClassifier200; + private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } } + diff --git a/.dotnet/src/Generated/Messages.cs b/.dotnet/src/Generated/Messages.cs index 1ca608801..28ac36351 100644 --- a/.dotnet/src/Generated/Messages.cs +++ b/.dotnet/src/Generated/Messages.cs @@ -1,12 +1,9 @@ // -#nullable disable - using System; using System.ClientModel; -using System.ClientModel.Internal; using System.ClientModel.Primitives; -using System.ClientModel.Primitives.Pipeline; +using System.Text; using System.Threading; using System.Threading.Tasks; using OpenAI.Models; @@ -18,16 +15,13 @@ namespace OpenAI public partial class Messages { private const string AuthorizationHeader = "Authorization"; - private readonly KeyCredential _keyCredential; + private readonly ApiKeyCredential _credential; private const string AuthorizationApiKeyPrefix = "Bearer"; - private readonly MessagePipeline _pipeline; + private readonly ClientPipeline _pipeline; private readonly Uri _endpoint; - /// The ClientDiagnostics is used to provide tracing support for the client library. - internal TelemetrySource ClientDiagnostics { get; } - /// The HTTP pipeline for sending and receiving REST requests and responses. - public virtual MessagePipeline Pipeline => _pipeline; + public virtual ClientPipeline Pipeline => _pipeline; /// Initializes a new instance of Messages for mocking. protected Messages() @@ -35,50 +29,46 @@ protected Messages() } /// Initializes a new instance of Messages. - /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. - /// The key credential to copy. + /// The key credential to copy. /// OpenAI Endpoint. - internal Messages(TelemetrySource clientDiagnostics, MessagePipeline pipeline, KeyCredential keyCredential, Uri endpoint) + internal Messages(ClientPipeline pipeline, ApiKeyCredential credential, Uri endpoint) { - ClientDiagnostics = clientDiagnostics; _pipeline = pipeline; - _keyCredential = keyCredential; + _credential = credential; _endpoint = endpoint; } /// Create a message. /// The ID of the [thread](/docs/api-reference/threads) to create a message for. /// The to use. - /// The cancellation token to use. /// or is null. /// is an empty string, and was expected to be non-empty. - public virtual async Task> CreateMessageAsync(string threadId, CreateMessageRequest message, CancellationToken cancellationToken = default) + public virtual async Task> CreateMessageAsync(string threadId, CreateMessageRequest message) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNull(message, nameof(message)); + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (message is null) throw new ArgumentNullException(nameof(message)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = message.ToRequestBody(); - Result result = await CreateMessageAsync(threadId, content, context).ConfigureAwait(false); - return Result.FromValue(MessageObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + using BinaryContent content = BinaryContent.Create(message); + ClientResult result = await CreateMessageAsync(threadId, content).ConfigureAwait(false); + return ClientResult.FromValue(MessageObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Create a message. /// The ID of the [thread](/docs/api-reference/threads) to create a message for. /// The to use. - /// The cancellation token to use. /// or is null. /// is an empty string, and was expected to be non-empty. - public virtual Result CreateMessage(string threadId, CreateMessageRequest message, CancellationToken cancellationToken = default) + public virtual ClientResult CreateMessage(string threadId, CreateMessageRequest message) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNull(message, nameof(message)); + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (message is null) throw new ArgumentNullException(nameof(message)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = message.ToRequestBody(); - Result result = CreateMessage(threadId, content, context); - return Result.FromValue(MessageObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + using BinaryContent content = BinaryContent.Create(message); + ClientResult result = CreateMessage(threadId, content); + return ClientResult.FromValue(MessageObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -91,35 +81,34 @@ public virtual Result CreateMessage(string threadId, CreateMessag /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the [thread](/docs/api-reference/threads) to create a message for. /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// or is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task CreateMessageAsync(string threadId, RequestBody content, RequestOptions context = null) + public virtual async Task CreateMessageAsync(string threadId, BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNull(content, nameof(content)); - - using var scope = ClientDiagnostics.CreateSpan("Messages.CreateMessage"); - scope.Start(); - try - { - using PipelineMessage message = CreateCreateMessageRequest(threadId, content, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCreateMessageRequest(threadId, content, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -132,35 +121,34 @@ public virtual async Task CreateMessageAsync(string threadId, RequestBod /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the [thread](/docs/api-reference/threads) to create a message for. /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// or is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result CreateMessage(string threadId, RequestBody content, RequestOptions context = null) + public virtual ClientResult CreateMessage(string threadId, BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNull(content, nameof(content)); - - using var scope = ClientDiagnostics.CreateSpan("Messages.CreateMessage"); - scope.Start(); - try + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCreateMessageRequest(threadId, content, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateCreateMessageRequest(threadId, content, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } /// Returns a list of messages for a given thread. @@ -183,16 +171,15 @@ public virtual Result CreateMessage(string threadId, RequestBody content, Reques /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. /// - /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - public virtual async Task> GetMessagesAsync(string threadId, int? limit = null, ListOrder? order = null, string after = null, string before = null, CancellationToken cancellationToken = default) + public virtual async Task> GetMessagesAsync(string threadId, int? limit = null, ListOrder? order = null, string after = null, string before = null) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = await GetMessagesAsync(threadId, limit, order?.ToString(), after, before, context).ConfigureAwait(false); - return Result.FromValue(ListMessagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = await GetMessagesAsync(threadId, limit, order?.ToString(), after, before).ConfigureAwait(false); + return ClientResult.FromValue(ListMessagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Returns a list of messages for a given thread. @@ -215,16 +202,15 @@ public virtual async Task> GetMessagesAsync(string /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. /// - /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - public virtual Result GetMessages(string threadId, int? limit = null, ListOrder? order = null, string after = null, string before = null, CancellationToken cancellationToken = default) + public virtual ClientResult GetMessages(string threadId, int? limit = null, ListOrder? order = null, string after = null, string before = null) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = GetMessages(threadId, limit, order?.ToString(), after, before, context); - return Result.FromValue(ListMessagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = GetMessages(threadId, limit, order?.ToString(), after, before); + return ClientResult.FromValue(ListMessagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -237,7 +223,7 @@ public virtual Result GetMessages(string threadId, int? li /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -261,27 +247,26 @@ public virtual Result GetMessages(string threadId, int? li /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. /// - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task GetMessagesAsync(string threadId, int? limit, string order, string after, string before, RequestOptions context) + public virtual async Task GetMessagesAsync(string threadId, int? limit, string order, string after, string before, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - - using var scope = ClientDiagnostics.CreateSpan("Messages.GetMessages"); - scope.Start(); - try + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateGetMessagesRequest(threadId, limit, order, after, before, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateGetMessagesRequest(threadId, limit, order, after, before, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -294,7 +279,7 @@ public virtual async Task GetMessagesAsync(string threadId, int? limit, /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -318,59 +303,58 @@ public virtual async Task GetMessagesAsync(string threadId, int? limit, /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. /// - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result GetMessages(string threadId, int? limit, string order, string after, string before, RequestOptions context) + public virtual ClientResult GetMessages(string threadId, int? limit, string order, string after, string before, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - - using var scope = ClientDiagnostics.CreateSpan("Messages.GetMessages"); - scope.Start(); - try + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateGetMessagesRequest(threadId, limit, order, after, before, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateGetMessagesRequest(threadId, limit, order, after, before, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } /// Retrieve a message. /// The ID of the [thread](/docs/api-reference/threads) to which this message belongs. /// The ID of the message to retrieve. - /// The cancellation token to use. /// or is null. /// or is an empty string, and was expected to be non-empty. - public virtual async Task> GetMessageAsync(string threadId, string messageId, CancellationToken cancellationToken = default) + public virtual async Task> GetMessageAsync(string threadId, string messageId) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(messageId, nameof(messageId)); + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (messageId is null) throw new ArgumentNullException(nameof(messageId)); + if (string.IsNullOrEmpty(messageId)) throw new ArgumentException(nameof(messageId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = await GetMessageAsync(threadId, messageId, context).ConfigureAwait(false); - return Result.FromValue(MessageObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = await GetMessageAsync(threadId, messageId).ConfigureAwait(false); + return ClientResult.FromValue(MessageObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Retrieve a message. /// The ID of the [thread](/docs/api-reference/threads) to which this message belongs. /// The ID of the message to retrieve. - /// The cancellation token to use. /// or is null. /// or is an empty string, and was expected to be non-empty. - public virtual Result GetMessage(string threadId, string messageId, CancellationToken cancellationToken = default) + public virtual ClientResult GetMessage(string threadId, string messageId) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(messageId, nameof(messageId)); + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (messageId is null) throw new ArgumentNullException(nameof(messageId)); + if (string.IsNullOrEmpty(messageId)) throw new ArgumentException(nameof(messageId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = GetMessage(threadId, messageId, context); - return Result.FromValue(MessageObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = GetMessage(threadId, messageId); + return ClientResult.FromValue(MessageObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -383,35 +367,35 @@ public virtual Result GetMessage(string threadId, string messageI /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the [thread](/docs/api-reference/threads) to which this message belongs. /// The ID of the message to retrieve. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// or is null. /// or is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task GetMessageAsync(string threadId, string messageId, RequestOptions context) + public virtual async Task GetMessageAsync(string threadId, string messageId, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(messageId, nameof(messageId)); - - using var scope = ClientDiagnostics.CreateSpan("Messages.GetMessage"); - scope.Start(); - try + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (messageId is null) throw new ArgumentNullException(nameof(messageId)); + if (string.IsNullOrEmpty(messageId)) throw new ArgumentException(nameof(messageId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateGetMessageRequest(threadId, messageId, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateGetMessageRequest(threadId, messageId, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -424,73 +408,73 @@ public virtual async Task GetMessageAsync(string threadId, string messag /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the [thread](/docs/api-reference/threads) to which this message belongs. /// The ID of the message to retrieve. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// or is null. /// or is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result GetMessage(string threadId, string messageId, RequestOptions context) + public virtual ClientResult GetMessage(string threadId, string messageId, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(messageId, nameof(messageId)); - - using var scope = ClientDiagnostics.CreateSpan("Messages.GetMessage"); - scope.Start(); - try + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (messageId is null) throw new ArgumentNullException(nameof(messageId)); + if (string.IsNullOrEmpty(messageId)) throw new ArgumentException(nameof(messageId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateGetMessageRequest(threadId, messageId, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateGetMessageRequest(threadId, messageId, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } /// Modifies a message. /// The ID of the thread to which this message belongs. /// The ID of the message to modify. /// The to use. - /// The cancellation token to use. /// , or is null. /// or is an empty string, and was expected to be non-empty. - public virtual async Task> ModifyMessageAsync(string threadId, string messageId, ModifyMessageRequest message, CancellationToken cancellationToken = default) + public virtual async Task> ModifyMessageAsync(string threadId, string messageId, ModifyMessageRequest message) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(messageId, nameof(messageId)); - ClientUtilities.AssertNotNull(message, nameof(message)); - - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = message.ToRequestBody(); - Result result = await ModifyMessageAsync(threadId, messageId, content, context).ConfigureAwait(false); - return Result.FromValue(MessageObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (messageId is null) throw new ArgumentNullException(nameof(messageId)); + if (string.IsNullOrEmpty(messageId)) throw new ArgumentException(nameof(messageId)); + if (message is null) throw new ArgumentNullException(nameof(message)); + + using BinaryContent content = BinaryContent.Create(message); + ClientResult result = await ModifyMessageAsync(threadId, messageId, content).ConfigureAwait(false); + return ClientResult.FromValue(MessageObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Modifies a message. /// The ID of the thread to which this message belongs. /// The ID of the message to modify. /// The to use. - /// The cancellation token to use. /// , or is null. /// or is an empty string, and was expected to be non-empty. - public virtual Result ModifyMessage(string threadId, string messageId, ModifyMessageRequest message, CancellationToken cancellationToken = default) + public virtual ClientResult ModifyMessage(string threadId, string messageId, ModifyMessageRequest message) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(messageId, nameof(messageId)); - ClientUtilities.AssertNotNull(message, nameof(message)); - - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = message.ToRequestBody(); - Result result = ModifyMessage(threadId, messageId, content, context); - return Result.FromValue(MessageObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (messageId is null) throw new ArgumentNullException(nameof(messageId)); + if (string.IsNullOrEmpty(messageId)) throw new ArgumentException(nameof(messageId)); + if (message is null) throw new ArgumentNullException(nameof(message)); + + using BinaryContent content = BinaryContent.Create(message); + ClientResult result = ModifyMessage(threadId, messageId, content); + return ClientResult.FromValue(MessageObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -503,7 +487,7 @@ public virtual Result ModifyMessage(string threadId, string messa /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -511,29 +495,29 @@ public virtual Result ModifyMessage(string threadId, string messa /// The ID of the thread to which this message belongs. /// The ID of the message to modify. /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// , or is null. /// or is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task ModifyMessageAsync(string threadId, string messageId, RequestBody content, RequestOptions context = null) + public virtual async Task ModifyMessageAsync(string threadId, string messageId, BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(messageId, nameof(messageId)); - ClientUtilities.AssertNotNull(content, nameof(content)); - - using var scope = ClientDiagnostics.CreateSpan("Messages.ModifyMessage"); - scope.Start(); - try + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (messageId is null) throw new ArgumentNullException(nameof(messageId)); + if (string.IsNullOrEmpty(messageId)) throw new ArgumentException(nameof(messageId)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateModifyMessageRequest(threadId, messageId, content, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateModifyMessageRequest(threadId, messageId, content, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -546,7 +530,7 @@ public virtual async Task ModifyMessageAsync(string threadId, string mes /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -554,29 +538,29 @@ public virtual async Task ModifyMessageAsync(string threadId, string mes /// The ID of the thread to which this message belongs. /// The ID of the message to modify. /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// , or is null. /// or is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result ModifyMessage(string threadId, string messageId, RequestBody content, RequestOptions context = null) + public virtual ClientResult ModifyMessage(string threadId, string messageId, BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(messageId, nameof(messageId)); - ClientUtilities.AssertNotNull(content, nameof(content)); - - using var scope = ClientDiagnostics.CreateSpan("Messages.ModifyMessage"); - scope.Start(); - try - { - using PipelineMessage message = CreateModifyMessageRequest(threadId, messageId, content, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (messageId is null) throw new ArgumentNullException(nameof(messageId)); + if (string.IsNullOrEmpty(messageId)) throw new ArgumentException(nameof(messageId)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateModifyMessageRequest(threadId, messageId, content, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } /// Returns a list of message files. @@ -600,17 +584,17 @@ public virtual Result ModifyMessage(string threadId, string messageId, RequestBo /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. /// - /// The cancellation token to use. /// or is null. /// or is an empty string, and was expected to be non-empty. - public virtual async Task> GetMessageFilesAsync(string threadId, string messageId, int? limit = null, ListOrder? order = null, string after = null, string before = null, CancellationToken cancellationToken = default) + public virtual async Task> GetMessageFilesAsync(string threadId, string messageId, int? limit = null, ListOrder? order = null, string after = null, string before = null) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(messageId, nameof(messageId)); + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (messageId is null) throw new ArgumentNullException(nameof(messageId)); + if (string.IsNullOrEmpty(messageId)) throw new ArgumentException(nameof(messageId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = await GetMessageFilesAsync(threadId, messageId, limit, order?.ToString(), after, before, context).ConfigureAwait(false); - return Result.FromValue(ListMessageFilesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = await GetMessageFilesAsync(threadId, messageId, limit, order?.ToString(), after, before).ConfigureAwait(false); + return ClientResult.FromValue(ListMessageFilesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Returns a list of message files. @@ -634,17 +618,17 @@ public virtual async Task> GetMessageFilesAsync /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. /// - /// The cancellation token to use. /// or is null. /// or is an empty string, and was expected to be non-empty. - public virtual Result GetMessageFiles(string threadId, string messageId, int? limit = null, ListOrder? order = null, string after = null, string before = null, CancellationToken cancellationToken = default) + public virtual ClientResult GetMessageFiles(string threadId, string messageId, int? limit = null, ListOrder? order = null, string after = null, string before = null) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(messageId, nameof(messageId)); + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (messageId is null) throw new ArgumentNullException(nameof(messageId)); + if (string.IsNullOrEmpty(messageId)) throw new ArgumentException(nameof(messageId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = GetMessageFiles(threadId, messageId, limit, order?.ToString(), after, before, context); - return Result.FromValue(ListMessageFilesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = GetMessageFiles(threadId, messageId, limit, order?.ToString(), after, before); + return ClientResult.FromValue(ListMessageFilesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -657,7 +641,7 @@ public virtual Result GetMessageFiles(string threadId, /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -682,28 +666,28 @@ public virtual Result GetMessageFiles(string threadId, /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. /// - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// or is null. /// or is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task GetMessageFilesAsync(string threadId, string messageId, int? limit, string order, string after, string before, RequestOptions context) + public virtual async Task GetMessageFilesAsync(string threadId, string messageId, int? limit, string order, string after, string before, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(messageId, nameof(messageId)); - - using var scope = ClientDiagnostics.CreateSpan("Messages.GetMessageFiles"); - scope.Start(); - try - { - using PipelineMessage message = CreateGetMessageFilesRequest(threadId, messageId, limit, order, after, before, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (messageId is null) throw new ArgumentNullException(nameof(messageId)); + if (string.IsNullOrEmpty(messageId)) throw new ArgumentException(nameof(messageId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateGetMessageFilesRequest(threadId, messageId, limit, order, after, before, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -716,7 +700,7 @@ public virtual async Task GetMessageFilesAsync(string threadId, string m /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -741,64 +725,66 @@ public virtual async Task GetMessageFilesAsync(string threadId, string m /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. /// - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// or is null. /// or is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result GetMessageFiles(string threadId, string messageId, int? limit, string order, string after, string before, RequestOptions context) + public virtual ClientResult GetMessageFiles(string threadId, string messageId, int? limit, string order, string after, string before, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(messageId, nameof(messageId)); - - using var scope = ClientDiagnostics.CreateSpan("Messages.GetMessageFiles"); - scope.Start(); - try - { - using PipelineMessage message = CreateGetMessageFilesRequest(threadId, messageId, limit, order, after, before, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (messageId is null) throw new ArgumentNullException(nameof(messageId)); + if (string.IsNullOrEmpty(messageId)) throw new ArgumentException(nameof(messageId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateGetMessageFilesRequest(threadId, messageId, limit, order, after, before, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } /// Retrieves a message file. /// The ID of the thread to which the message and File belong. /// The ID of the message the file belongs to. /// The ID of the file being retrieved. - /// The cancellation token to use. /// , or is null. /// , or is an empty string, and was expected to be non-empty. - public virtual async Task> GetMessageFileAsync(string threadId, string messageId, string fileId, CancellationToken cancellationToken = default) + public virtual async Task> GetMessageFileAsync(string threadId, string messageId, string fileId) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(messageId, nameof(messageId)); - ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); - - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = await GetMessageFileAsync(threadId, messageId, fileId, context).ConfigureAwait(false); - return Result.FromValue(MessageFileObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (messageId is null) throw new ArgumentNullException(nameof(messageId)); + if (string.IsNullOrEmpty(messageId)) throw new ArgumentException(nameof(messageId)); + if (fileId is null) throw new ArgumentNullException(nameof(fileId)); + if (string.IsNullOrEmpty(fileId)) throw new ArgumentException(nameof(fileId)); + + ClientResult result = await GetMessageFileAsync(threadId, messageId, fileId).ConfigureAwait(false); + return ClientResult.FromValue(MessageFileObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Retrieves a message file. /// The ID of the thread to which the message and File belong. /// The ID of the message the file belongs to. /// The ID of the file being retrieved. - /// The cancellation token to use. /// , or is null. /// , or is an empty string, and was expected to be non-empty. - public virtual Result GetMessageFile(string threadId, string messageId, string fileId, CancellationToken cancellationToken = default) + public virtual ClientResult GetMessageFile(string threadId, string messageId, string fileId) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(messageId, nameof(messageId)); - ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); - - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = GetMessageFile(threadId, messageId, fileId, context); - return Result.FromValue(MessageFileObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (messageId is null) throw new ArgumentNullException(nameof(messageId)); + if (string.IsNullOrEmpty(messageId)) throw new ArgumentException(nameof(messageId)); + if (fileId is null) throw new ArgumentNullException(nameof(fileId)); + if (string.IsNullOrEmpty(fileId)) throw new ArgumentException(nameof(fileId)); + + ClientResult result = GetMessageFile(threadId, messageId, fileId); + return ClientResult.FromValue(MessageFileObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -811,7 +797,7 @@ public virtual Result GetMessageFile(string threadId, string /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -819,29 +805,30 @@ public virtual Result GetMessageFile(string threadId, string /// The ID of the thread to which the message and File belong. /// The ID of the message the file belongs to. /// The ID of the file being retrieved. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// , or is null. /// , or is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task GetMessageFileAsync(string threadId, string messageId, string fileId, RequestOptions context) + public virtual async Task GetMessageFileAsync(string threadId, string messageId, string fileId, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(messageId, nameof(messageId)); - ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); - - using var scope = ClientDiagnostics.CreateSpan("Messages.GetMessageFile"); - scope.Start(); - try + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (messageId is null) throw new ArgumentNullException(nameof(messageId)); + if (string.IsNullOrEmpty(messageId)) throw new ArgumentException(nameof(messageId)); + if (fileId is null) throw new ArgumentNullException(nameof(fileId)); + if (string.IsNullOrEmpty(fileId)) throw new ArgumentException(nameof(fileId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateGetMessageFileRequest(threadId, messageId, fileId, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateGetMessageFileRequest(threadId, messageId, fileId, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -854,7 +841,7 @@ public virtual async Task GetMessageFileAsync(string threadId, string me /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -862,176 +849,256 @@ public virtual async Task GetMessageFileAsync(string threadId, string me /// The ID of the thread to which the message and File belong. /// The ID of the message the file belongs to. /// The ID of the file being retrieved. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// , or is null. /// , or is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result GetMessageFile(string threadId, string messageId, string fileId, RequestOptions context) + public virtual ClientResult GetMessageFile(string threadId, string messageId, string fileId, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(messageId, nameof(messageId)); - ClientUtilities.AssertNotNullOrEmpty(fileId, nameof(fileId)); - - using var scope = ClientDiagnostics.CreateSpan("Messages.GetMessageFile"); - scope.Start(); - try + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (messageId is null) throw new ArgumentNullException(nameof(messageId)); + if (string.IsNullOrEmpty(messageId)) throw new ArgumentException(nameof(messageId)); + if (fileId is null) throw new ArgumentNullException(nameof(fileId)); + if (string.IsNullOrEmpty(fileId)) throw new ArgumentException(nameof(fileId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateGetMessageFileRequest(threadId, messageId, fileId, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateGetMessageFileRequest(threadId, messageId, fileId, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } - internal PipelineMessage CreateCreateMessageRequest(string threadId, RequestBody content, RequestOptions context) + internal PipelineMessage CreateCreateMessageRequest(string threadId, BinaryContent content, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("POST"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/threads/", false); - uri.AppendPath(threadId, true); - uri.AppendPath("/messages", false); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); - request.SetHeaderValue("Content-Type", "application/json"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/threads/"); + uriBuilder.Path += path.ToString(); + path.Append(threadId); + uriBuilder.Path += path.ToString(); + path.Append("/messages"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", "application/json"); request.Content = content; + message.Apply(options); return message; } - internal PipelineMessage CreateGetMessagesRequest(string threadId, int? limit, string order, string after, string before, RequestOptions context) + internal PipelineMessage CreateGetMessagesRequest(string threadId, int? limit, string order, string after, string before, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("GET"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/threads/", false); - uri.AppendPath(threadId, true); - uri.AppendPath("/messages", false); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/threads/"); + uriBuilder.Path += path.ToString(); + path.Append(threadId); + uriBuilder.Path += path.ToString(); + path.Append("/messages"); + uriBuilder.Path += path.ToString(); if (limit != null) { - uri.AppendQuery("limit", limit.Value, true); + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&limit={limit.Value}"; + } + else + { + uriBuilder.Query = $"limit={limit.Value}"; + } } if (order != null) { - uri.AppendQuery("order", order, true); + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&order={order}"; + } + else + { + uriBuilder.Query = $"order={order}"; + } } if (after != null) { - uri.AppendQuery("after", after, true); + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&after={after}"; + } + else + { + uriBuilder.Query = $"after={after}"; + } } if (before != null) { - uri.AppendQuery("before", before, true); + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&before={before}"; + } + else + { + uriBuilder.Query = $"before={before}"; + } } - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); return message; } - internal PipelineMessage CreateGetMessageRequest(string threadId, string messageId, RequestOptions context) + internal PipelineMessage CreateGetMessageRequest(string threadId, string messageId, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("GET"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/threads/", false); - uri.AppendPath(threadId, true); - uri.AppendPath("/messages/", false); - uri.AppendPath(messageId, true); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/threads/"); + uriBuilder.Path += path.ToString(); + path.Append(threadId); + uriBuilder.Path += path.ToString(); + path.Append("/messages/"); + uriBuilder.Path += path.ToString(); + path.Append(messageId); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); return message; } - internal PipelineMessage CreateModifyMessageRequest(string threadId, string messageId, RequestBody content, RequestOptions context) + internal PipelineMessage CreateModifyMessageRequest(string threadId, string messageId, BinaryContent content, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("POST"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/threads/", false); - uri.AppendPath(threadId, true); - uri.AppendPath("/messages/", false); - uri.AppendPath(messageId, true); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); - request.SetHeaderValue("Content-Type", "application/json"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/threads/"); + uriBuilder.Path += path.ToString(); + path.Append(threadId); + uriBuilder.Path += path.ToString(); + path.Append("/messages/"); + uriBuilder.Path += path.ToString(); + path.Append(messageId); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", "application/json"); request.Content = content; + message.Apply(options); return message; } - internal PipelineMessage CreateGetMessageFilesRequest(string threadId, string messageId, int? limit, string order, string after, string before, RequestOptions context) + internal PipelineMessage CreateGetMessageFilesRequest(string threadId, string messageId, int? limit, string order, string after, string before, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("GET"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/threads/", false); - uri.AppendPath(threadId, true); - uri.AppendPath("/messages/", false); - uri.AppendPath(messageId, true); - uri.AppendPath("/files", false); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/threads/"); + uriBuilder.Path += path.ToString(); + path.Append(threadId); + uriBuilder.Path += path.ToString(); + path.Append("/messages/"); + uriBuilder.Path += path.ToString(); + path.Append(messageId); + uriBuilder.Path += path.ToString(); + path.Append("/files"); + uriBuilder.Path += path.ToString(); if (limit != null) { - uri.AppendQuery("limit", limit.Value, true); + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&limit={limit.Value}"; + } + else + { + uriBuilder.Query = $"limit={limit.Value}"; + } } if (order != null) { - uri.AppendQuery("order", order, true); + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&order={order}"; + } + else + { + uriBuilder.Query = $"order={order}"; + } } if (after != null) { - uri.AppendQuery("after", after, true); + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&after={after}"; + } + else + { + uriBuilder.Query = $"after={after}"; + } } if (before != null) { - uri.AppendQuery("before", before, true); + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&before={before}"; + } + else + { + uriBuilder.Query = $"before={before}"; + } } - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); return message; } - internal PipelineMessage CreateGetMessageFileRequest(string threadId, string messageId, string fileId, RequestOptions context) + internal PipelineMessage CreateGetMessageFileRequest(string threadId, string messageId, string fileId, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("GET"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/threads/", false); - uri.AppendPath(threadId, true); - uri.AppendPath("/messages/", false); - uri.AppendPath(messageId, true); - uri.AppendPath("/files/", false); - uri.AppendPath(fileId, true); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/threads/"); + uriBuilder.Path += path.ToString(); + path.Append(threadId); + uriBuilder.Path += path.ToString(); + path.Append("/messages/"); + uriBuilder.Path += path.ToString(); + path.Append(messageId); + uriBuilder.Path += path.ToString(); + path.Append("/files/"); + uriBuilder.Path += path.ToString(); + path.Append(fileId); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); return message; } - private static RequestOptions DefaultRequestContext = new RequestOptions(); - internal static RequestOptions FromCancellationToken(CancellationToken cancellationToken = default) - { - if (!cancellationToken.CanBeCanceled) - { - return DefaultRequestContext; - } - - return new RequestOptions() { CancellationToken = cancellationToken }; - } - - private static ResponseErrorClassifier _responseErrorClassifier200; - private static ResponseErrorClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= new StatusResponseClassifier(stackalloc ushort[] { 200 }); + private static PipelineMessageClassifier _responseErrorClassifier200; + private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } } + diff --git a/.dotnet/src/Generated/Models/AssistantFileObject.Serialization.cs b/.dotnet/src/Generated/Models/AssistantFileObject.Serialization.cs index 74ad1f256..c2b95887f 100644 --- a/.dotnet/src/Generated/Models/AssistantFileObject.Serialization.cs +++ b/.dotnet/src/Generated/Models/AssistantFileObject.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class AssistantFileObject : IUtf8JsonWriteable, IJsonModel + public partial class AssistantFileObject : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -144,13 +140,6 @@ internal static AssistantFileObject FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeAssistantFileObject(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/AssistantFileObject.cs b/.dotnet/src/Generated/Models/AssistantFileObject.cs index 21aef1f7b..ee5c47c27 100644 --- a/.dotnet/src/Generated/Models/AssistantFileObject.cs +++ b/.dotnet/src/Generated/Models/AssistantFileObject.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -50,8 +48,8 @@ public partial class AssistantFileObject /// or is null. internal AssistantFileObject(string id, DateTimeOffset createdAt, string assistantId) { - ClientUtilities.AssertNotNull(id, nameof(id)); - ClientUtilities.AssertNotNull(assistantId, nameof(assistantId)); + if (id is null) throw new ArgumentNullException(nameof(id)); + if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); Id = id; CreatedAt = createdAt; @@ -89,3 +87,4 @@ internal AssistantFileObject() public string AssistantId { get; } } } + diff --git a/.dotnet/src/Generated/Models/AssistantFileObjectObject.cs b/.dotnet/src/Generated/Models/AssistantFileObjectObject.cs index d9b017682..7883e84f3 100644 --- a/.dotnet/src/Generated/Models/AssistantFileObjectObject.cs +++ b/.dotnet/src/Generated/Models/AssistantFileObjectObject.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public AssistantFileObjectObject(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/AssistantObject.Serialization.cs b/.dotnet/src/Generated/Models/AssistantObject.Serialization.cs index 3676fb26d..c4e51bcf9 100644 --- a/.dotnet/src/Generated/Models/AssistantObject.Serialization.cs +++ b/.dotnet/src/Generated/Models/AssistantObject.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class AssistantObject : IUtf8JsonWriteable, IJsonModel + public partial class AssistantObject : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -290,13 +286,6 @@ internal static AssistantObject FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeAssistantObject(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/AssistantObject.cs b/.dotnet/src/Generated/Models/AssistantObject.cs index 4816bc339..3b6d4b131 100644 --- a/.dotnet/src/Generated/Models/AssistantObject.cs +++ b/.dotnet/src/Generated/Models/AssistantObject.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; using System.Linq; @@ -72,10 +70,10 @@ public partial class AssistantObject /// , , or is null. internal AssistantObject(string id, DateTimeOffset createdAt, string name, string description, string model, string instructions, IEnumerable tools, IEnumerable fileIds, IReadOnlyDictionary metadata) { - ClientUtilities.AssertNotNull(id, nameof(id)); - ClientUtilities.AssertNotNull(model, nameof(model)); - ClientUtilities.AssertNotNull(tools, nameof(tools)); - ClientUtilities.AssertNotNull(fileIds, nameof(fileIds)); + if (id is null) throw new ArgumentNullException(nameof(id)); + if (model is null) throw new ArgumentNullException(nameof(model)); + if (tools is null) throw new ArgumentNullException(nameof(tools)); + if (fileIds is null) throw new ArgumentNullException(nameof(fileIds)); Id = id; CreatedAt = createdAt; @@ -200,3 +198,4 @@ internal AssistantObject() public IReadOnlyDictionary Metadata { get; } } } + diff --git a/.dotnet/src/Generated/Models/AssistantObjectObject.cs b/.dotnet/src/Generated/Models/AssistantObjectObject.cs index 9c8eb972f..f7a7cb21d 100644 --- a/.dotnet/src/Generated/Models/AssistantObjectObject.cs +++ b/.dotnet/src/Generated/Models/AssistantObjectObject.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public AssistantObjectObject(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/AudioSegment.Serialization.cs b/.dotnet/src/Generated/Models/AudioSegment.Serialization.cs index e114a09c0..5b4ef9b7a 100644 --- a/.dotnet/src/Generated/Models/AudioSegment.Serialization.cs +++ b/.dotnet/src/Generated/Models/AudioSegment.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class AudioSegment : IUtf8JsonWriteable, IJsonModel + public partial class AudioSegment : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -202,13 +198,6 @@ internal static AudioSegment FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeAudioSegment(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/AudioSegment.cs b/.dotnet/src/Generated/Models/AudioSegment.cs index d44bf8dc7..f25b206ff 100644 --- a/.dotnet/src/Generated/Models/AudioSegment.cs +++ b/.dotnet/src/Generated/Models/AudioSegment.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; using System.Linq; @@ -64,8 +62,8 @@ public partial class AudioSegment /// or is null. internal AudioSegment(long id, long seek, TimeSpan start, TimeSpan end, string text, IEnumerable tokens, double temperature, double avgLogprob, double compressionRatio, double noSpeechProb) { - ClientUtilities.AssertNotNull(text, nameof(text)); - ClientUtilities.AssertNotNull(tokens, nameof(tokens)); + if (text is null) throw new ArgumentNullException(nameof(text)); + if (tokens is null) throw new ArgumentNullException(nameof(tokens)); Id = id; Seek = seek; @@ -145,3 +143,4 @@ internal AudioSegment() public double NoSpeechProb { get; } } } + diff --git a/.dotnet/src/Generated/Models/ChatCompletionFunctionCallOption.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionFunctionCallOption.Serialization.cs index c85718f98..d82b8d8e5 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionFunctionCallOption.Serialization.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionFunctionCallOption.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - internal partial class ChatCompletionFunctionCallOption : IUtf8JsonWriteable, IJsonModel + internal partial class ChatCompletionFunctionCallOption : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -120,13 +116,6 @@ internal static ChatCompletionFunctionCallOption FromResponse(PipelineResponse r using var document = JsonDocument.Parse(response.Content); return DeserializeChatCompletionFunctionCallOption(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/ChatCompletionFunctionCallOption.cs b/.dotnet/src/Generated/Models/ChatCompletionFunctionCallOption.cs index 268efaf1a..40c76aacd 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionFunctionCallOption.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionFunctionCallOption.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -51,7 +49,7 @@ internal partial class ChatCompletionFunctionCallOption /// is null. public ChatCompletionFunctionCallOption(string name) { - ClientUtilities.AssertNotNull(name, nameof(name)); + if (name is null) throw new ArgumentNullException(nameof(name)); Name = name; } @@ -74,3 +72,4 @@ internal ChatCompletionFunctionCallOption() public string Name { get; } } } + diff --git a/.dotnet/src/Generated/Models/ChatCompletionFunctions.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionFunctions.Serialization.cs index 119be9a51..5558f49c3 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionFunctions.Serialization.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionFunctions.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class ChatCompletionFunctions : IUtf8JsonWriteable, IJsonModel + public partial class ChatCompletionFunctions : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -146,13 +142,6 @@ internal static ChatCompletionFunctions FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeChatCompletionFunctions(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/ChatCompletionFunctions.cs b/.dotnet/src/Generated/Models/ChatCompletionFunctions.cs index d1bbfa718..7055cc173 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionFunctions.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionFunctions.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -52,7 +50,7 @@ public partial class ChatCompletionFunctions /// is null. public ChatCompletionFunctions(string name) { - ClientUtilities.AssertNotNull(name, nameof(name)); + if (name is null) throw new ArgumentNullException(nameof(name)); Name = name; } @@ -95,3 +93,4 @@ internal ChatCompletionFunctions() public FunctionParameters Parameters { get; set; } } } + diff --git a/.dotnet/src/Generated/Models/ChatCompletionMessageToolCall.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCall.Serialization.cs index e3d872da2..4806e429e 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionMessageToolCall.Serialization.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCall.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class ChatCompletionMessageToolCall : IUtf8JsonWriteable, IJsonModel + public partial class ChatCompletionMessageToolCall : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -136,13 +132,6 @@ internal static ChatCompletionMessageToolCall FromResponse(PipelineResponse resp using var document = JsonDocument.Parse(response.Content); return DeserializeChatCompletionMessageToolCall(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/ChatCompletionMessageToolCall.cs b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCall.cs index e5249dafc..1d5bbc882 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionMessageToolCall.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCall.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -49,8 +47,8 @@ public partial class ChatCompletionMessageToolCall /// or is null. public ChatCompletionMessageToolCall(string id, ChatCompletionMessageToolCallFunction function) { - ClientUtilities.AssertNotNull(id, nameof(id)); - ClientUtilities.AssertNotNull(function, nameof(function)); + if (id is null) throw new ArgumentNullException(nameof(id)); + if (function is null) throw new ArgumentNullException(nameof(function)); Id = id; Function = function; @@ -83,3 +81,4 @@ internal ChatCompletionMessageToolCall() public ChatCompletionMessageToolCallFunction Function { get; set; } } } + diff --git a/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallFunction.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallFunction.Serialization.cs index ca4e609d1..57c8334c1 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallFunction.Serialization.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallFunction.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class ChatCompletionMessageToolCallFunction : IUtf8JsonWriteable, IJsonModel + public partial class ChatCompletionMessageToolCallFunction : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -128,13 +124,6 @@ internal static ChatCompletionMessageToolCallFunction FromResponse(PipelineRespo using var document = JsonDocument.Parse(response.Content); return DeserializeChatCompletionMessageToolCallFunction(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallFunction.cs b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallFunction.cs index 92b945f4c..892e74010 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallFunction.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallFunction.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -53,8 +51,8 @@ public partial class ChatCompletionMessageToolCallFunction /// or is null. public ChatCompletionMessageToolCallFunction(string name, string arguments) { - ClientUtilities.AssertNotNull(name, nameof(name)); - ClientUtilities.AssertNotNull(arguments, nameof(arguments)); + if (name is null) throw new ArgumentNullException(nameof(name)); + if (arguments is null) throw new ArgumentNullException(nameof(arguments)); Name = name; Arguments = arguments; @@ -90,3 +88,4 @@ internal ChatCompletionMessageToolCallFunction() public string Arguments { get; set; } } } + diff --git a/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallType.cs b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallType.cs index e6c9dff53..bdf80b599 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallType.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallType.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public ChatCompletionMessageToolCallType(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoice.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoice.Serialization.cs index 7ec5a3aeb..a5cb5af93 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoice.Serialization.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoice.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - internal partial class ChatCompletionNamedToolChoice : IUtf8JsonWriteable, IJsonModel + internal partial class ChatCompletionNamedToolChoice : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -128,13 +124,6 @@ internal static ChatCompletionNamedToolChoice FromResponse(PipelineResponse resp using var document = JsonDocument.Parse(response.Content); return DeserializeChatCompletionNamedToolChoice(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoice.cs b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoice.cs index da8b10edb..63c912acc 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoice.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoice.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -48,7 +46,7 @@ internal partial class ChatCompletionNamedToolChoice /// is null. public ChatCompletionNamedToolChoice(ChatCompletionNamedToolChoiceFunction function) { - ClientUtilities.AssertNotNull(function, nameof(function)); + if (function is null) throw new ArgumentNullException(nameof(function)); Function = function; } @@ -76,3 +74,4 @@ internal ChatCompletionNamedToolChoice() public ChatCompletionNamedToolChoiceFunction Function { get; } } } + diff --git a/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceFunction.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceFunction.Serialization.cs index 654aa4f90..cb372631e 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceFunction.Serialization.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceFunction.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - internal partial class ChatCompletionNamedToolChoiceFunction : IUtf8JsonWriteable, IJsonModel + internal partial class ChatCompletionNamedToolChoiceFunction : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -120,13 +116,6 @@ internal static ChatCompletionNamedToolChoiceFunction FromResponse(PipelineRespo using var document = JsonDocument.Parse(response.Content); return DeserializeChatCompletionNamedToolChoiceFunction(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceFunction.cs b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceFunction.cs index 383fceef2..44be066fd 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceFunction.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceFunction.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -48,7 +46,7 @@ internal partial class ChatCompletionNamedToolChoiceFunction /// is null. public ChatCompletionNamedToolChoiceFunction(string name) { - ClientUtilities.AssertNotNull(name, nameof(name)); + if (name is null) throw new ArgumentNullException(nameof(name)); Name = name; } @@ -71,3 +69,4 @@ internal ChatCompletionNamedToolChoiceFunction() public string Name { get; } } } + diff --git a/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceType.cs b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceType.cs index 8f94a2674..166411eac 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceType.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceType.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public ChatCompletionNamedToolChoiceType(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/ChatCompletionResponseMessage.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionResponseMessage.Serialization.cs index b467eb421..b819876dc 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionResponseMessage.Serialization.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionResponseMessage.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class ChatCompletionResponseMessage : IUtf8JsonWriteable, IJsonModel + public partial class ChatCompletionResponseMessage : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -180,13 +176,6 @@ internal static ChatCompletionResponseMessage FromResponse(PipelineResponse resp using var document = JsonDocument.Parse(response.Content); return DeserializeChatCompletionResponseMessage(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/ChatCompletionResponseMessage.cs b/.dotnet/src/Generated/Models/ChatCompletionResponseMessage.cs index 709a705ff..a27e26e91 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionResponseMessage.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionResponseMessage.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -82,3 +80,4 @@ internal ChatCompletionResponseMessage() public ChatCompletionResponseMessageFunctionCall FunctionCall { get; } } } + diff --git a/.dotnet/src/Generated/Models/ChatCompletionResponseMessageFunctionCall.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionResponseMessageFunctionCall.Serialization.cs index 98aac4cd5..6fd3d045a 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionResponseMessageFunctionCall.Serialization.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionResponseMessageFunctionCall.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class ChatCompletionResponseMessageFunctionCall : IUtf8JsonWriteable, IJsonModel + public partial class ChatCompletionResponseMessageFunctionCall : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -128,13 +124,6 @@ internal static ChatCompletionResponseMessageFunctionCall FromResponse(PipelineR using var document = JsonDocument.Parse(response.Content); return DeserializeChatCompletionResponseMessageFunctionCall(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/ChatCompletionResponseMessageFunctionCall.cs b/.dotnet/src/Generated/Models/ChatCompletionResponseMessageFunctionCall.cs index 7f9e8549c..8016f2af6 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionResponseMessageFunctionCall.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionResponseMessageFunctionCall.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -53,8 +51,8 @@ public partial class ChatCompletionResponseMessageFunctionCall /// or is null. internal ChatCompletionResponseMessageFunctionCall(string arguments, string name) { - ClientUtilities.AssertNotNull(arguments, nameof(arguments)); - ClientUtilities.AssertNotNull(name, nameof(name)); + if (arguments is null) throw new ArgumentNullException(nameof(arguments)); + if (name is null) throw new ArgumentNullException(nameof(name)); Arguments = arguments; Name = name; @@ -90,3 +88,4 @@ internal ChatCompletionResponseMessageFunctionCall() public string Name { get; } } } + diff --git a/.dotnet/src/Generated/Models/ChatCompletionResponseMessageRole.cs b/.dotnet/src/Generated/Models/ChatCompletionResponseMessageRole.cs index ee43ccb40..08bcd42a2 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionResponseMessageRole.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionResponseMessageRole.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public ChatCompletionResponseMessageRole(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/ChatCompletionTokenLogprob.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionTokenLogprob.Serialization.cs index b80e2c44a..b60cb9ec5 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionTokenLogprob.Serialization.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionTokenLogprob.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class ChatCompletionTokenLogprob : IUtf8JsonWriteable, IJsonModel + public partial class ChatCompletionTokenLogprob : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -176,13 +172,6 @@ internal static ChatCompletionTokenLogprob FromResponse(PipelineResponse respons using var document = JsonDocument.Parse(response.Content); return DeserializeChatCompletionTokenLogprob(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/ChatCompletionTokenLogprob.cs b/.dotnet/src/Generated/Models/ChatCompletionTokenLogprob.cs index 6e487ac5c..bd303c7a2 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionTokenLogprob.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionTokenLogprob.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; using System.Linq; @@ -60,8 +58,8 @@ public partial class ChatCompletionTokenLogprob /// or is null. internal ChatCompletionTokenLogprob(string token, double logprob, IEnumerable bytes, IEnumerable topLogprobs) { - ClientUtilities.AssertNotNull(token, nameof(token)); - ClientUtilities.AssertNotNull(topLogprobs, nameof(topLogprobs)); + if (token is null) throw new ArgumentNullException(nameof(token)); + if (topLogprobs is null) throw new ArgumentNullException(nameof(topLogprobs)); Token = token; Logprob = logprob; @@ -115,3 +113,4 @@ internal ChatCompletionTokenLogprob() public IReadOnlyList TopLogprobs { get; } } } + diff --git a/.dotnet/src/Generated/Models/ChatCompletionTokenLogprobTopLogprob.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionTokenLogprobTopLogprob.Serialization.cs index ab6b52872..b71b5ace6 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionTokenLogprobTopLogprob.Serialization.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionTokenLogprobTopLogprob.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class ChatCompletionTokenLogprobTopLogprob : IUtf8JsonWriteable, IJsonModel + public partial class ChatCompletionTokenLogprobTopLogprob : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -158,13 +154,6 @@ internal static ChatCompletionTokenLogprobTopLogprob FromResponse(PipelineRespon using var document = JsonDocument.Parse(response.Content); return DeserializeChatCompletionTokenLogprobTopLogprob(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/ChatCompletionTokenLogprobTopLogprob.cs b/.dotnet/src/Generated/Models/ChatCompletionTokenLogprobTopLogprob.cs index f0a7c191b..10e36b3a6 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionTokenLogprobTopLogprob.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionTokenLogprobTopLogprob.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; using System.Linq; @@ -56,7 +54,7 @@ public partial class ChatCompletionTokenLogprobTopLogprob /// is null. internal ChatCompletionTokenLogprobTopLogprob(string token, double logprob, IEnumerable bytes) { - ClientUtilities.AssertNotNull(token, nameof(token)); + if (token is null) throw new ArgumentNullException(nameof(token)); Token = token; Logprob = logprob; @@ -99,3 +97,4 @@ internal ChatCompletionTokenLogprobTopLogprob() public IReadOnlyList Bytes { get; } } } + diff --git a/.dotnet/src/Generated/Models/ChatCompletionTool.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionTool.Serialization.cs index 4f0233034..b5913cfcd 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionTool.Serialization.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionTool.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class ChatCompletionTool : IUtf8JsonWriteable, IJsonModel + public partial class ChatCompletionTool : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -128,13 +124,6 @@ internal static ChatCompletionTool FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeChatCompletionTool(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/ChatCompletionTool.cs b/.dotnet/src/Generated/Models/ChatCompletionTool.cs index c03dc4647..28feedefd 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionTool.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionTool.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -48,7 +46,7 @@ public partial class ChatCompletionTool /// is null. public ChatCompletionTool(FunctionObject function) { - ClientUtilities.AssertNotNull(function, nameof(function)); + if (function is null) throw new ArgumentNullException(nameof(function)); Function = function; } @@ -76,3 +74,4 @@ internal ChatCompletionTool() public FunctionObject Function { get; } } } + diff --git a/.dotnet/src/Generated/Models/ChatCompletionToolType.cs b/.dotnet/src/Generated/Models/ChatCompletionToolType.cs index f1a96376c..b1dae6880 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionToolType.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionToolType.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public ChatCompletionToolType(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/CompletionUsage.Serialization.cs b/.dotnet/src/Generated/Models/CompletionUsage.Serialization.cs index ffaa5adcb..5f4745e70 100644 --- a/.dotnet/src/Generated/Models/CompletionUsage.Serialization.cs +++ b/.dotnet/src/Generated/Models/CompletionUsage.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class CompletionUsage : IUtf8JsonWriteable, IJsonModel + public partial class CompletionUsage : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -136,13 +132,6 @@ internal static CompletionUsage FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeCompletionUsage(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/CompletionUsage.cs b/.dotnet/src/Generated/Models/CompletionUsage.cs index 07f38ee8b..440f8ef74 100644 --- a/.dotnet/src/Generated/Models/CompletionUsage.cs +++ b/.dotnet/src/Generated/Models/CompletionUsage.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.Collections.Generic; @@ -79,3 +77,4 @@ internal CompletionUsage() public long TotalTokens { get; } } } + diff --git a/.dotnet/src/Generated/Models/CreateAssistantFileRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateAssistantFileRequest.Serialization.cs index ae5a3b32e..e4d8ef38b 100644 --- a/.dotnet/src/Generated/Models/CreateAssistantFileRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateAssistantFileRequest.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class CreateAssistantFileRequest : IUtf8JsonWriteable, IJsonModel + public partial class CreateAssistantFileRequest : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -120,13 +116,6 @@ internal static CreateAssistantFileRequest FromResponse(PipelineResponse respons using var document = JsonDocument.Parse(response.Content); return DeserializeCreateAssistantFileRequest(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/CreateAssistantFileRequest.cs b/.dotnet/src/Generated/Models/CreateAssistantFileRequest.cs index 8c11246d1..7a7c067c9 100644 --- a/.dotnet/src/Generated/Models/CreateAssistantFileRequest.cs +++ b/.dotnet/src/Generated/Models/CreateAssistantFileRequest.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -51,7 +49,7 @@ public partial class CreateAssistantFileRequest /// is null. public CreateAssistantFileRequest(string fileId) { - ClientUtilities.AssertNotNull(fileId, nameof(fileId)); + if (fileId is null) throw new ArgumentNullException(nameof(fileId)); FileId = fileId; } @@ -80,3 +78,4 @@ internal CreateAssistantFileRequest() public string FileId { get; } } } + diff --git a/.dotnet/src/Generated/Models/CreateAssistantRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateAssistantRequest.Serialization.cs index 8aaa22372..6f034bc1e 100644 --- a/.dotnet/src/Generated/Models/CreateAssistantRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateAssistantRequest.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class CreateAssistantRequest : IUtf8JsonWriteable, IJsonModel + public partial class CreateAssistantRequest : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -291,13 +287,6 @@ internal static CreateAssistantRequest FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeCreateAssistantRequest(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/CreateAssistantRequest.cs b/.dotnet/src/Generated/Models/CreateAssistantRequest.cs index 115fbc0d9..f99c14f37 100644 --- a/.dotnet/src/Generated/Models/CreateAssistantRequest.cs +++ b/.dotnet/src/Generated/Models/CreateAssistantRequest.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -52,7 +50,7 @@ public partial class CreateAssistantRequest /// is null. public CreateAssistantRequest(string model) { - ClientUtilities.AssertNotNull(model, nameof(model)); + if (model is null) throw new ArgumentNullException(nameof(model)); Model = model; Tools = new OptionalList(); @@ -159,3 +157,4 @@ internal CreateAssistantRequest() public IDictionary Metadata { get; set; } } } + diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateChatCompletionRequest.Serialization.cs index 66c50785a..4310faf5b 100644 --- a/.dotnet/src/Generated/Models/CreateChatCompletionRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateChatCompletionRequest.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class CreateChatCompletionRequest : IUtf8JsonWriteable, IJsonModel + public partial class CreateChatCompletionRequest : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -570,13 +566,6 @@ internal static CreateChatCompletionRequest FromResponse(PipelineResponse respon using var document = JsonDocument.Parse(response.Content); return DeserializeCreateChatCompletionRequest(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionRequest.cs b/.dotnet/src/Generated/Models/CreateChatCompletionRequest.cs index c773e9297..80c7d9be5 100644 --- a/.dotnet/src/Generated/Models/CreateChatCompletionRequest.cs +++ b/.dotnet/src/Generated/Models/CreateChatCompletionRequest.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; using System.Linq; @@ -56,7 +54,7 @@ public partial class CreateChatCompletionRequest /// is null. public CreateChatCompletionRequest(IEnumerable messages, CreateChatCompletionRequestModel model) { - ClientUtilities.AssertNotNull(messages, nameof(messages)); + if (messages is null) throw new ArgumentNullException(nameof(messages)); Messages = messages.ToList(); Model = model; @@ -510,3 +508,4 @@ internal CreateChatCompletionRequest() public IList Functions { get; } } } + diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionRequestModel.cs b/.dotnet/src/Generated/Models/CreateChatCompletionRequestModel.cs index a0c497c47..5110350bd 100644 --- a/.dotnet/src/Generated/Models/CreateChatCompletionRequestModel.cs +++ b/.dotnet/src/Generated/Models/CreateChatCompletionRequestModel.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -88,3 +86,4 @@ public CreateChatCompletionRequestModel(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormat.Serialization.cs b/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormat.Serialization.cs index ecd0ac9b1..8a13529c4 100644 --- a/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormat.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormat.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class CreateChatCompletionRequestResponseFormat : IUtf8JsonWriteable, IJsonModel + public partial class CreateChatCompletionRequestResponseFormat : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -127,13 +123,6 @@ internal static CreateChatCompletionRequestResponseFormat FromResponse(PipelineR using var document = JsonDocument.Parse(response.Content); return DeserializeCreateChatCompletionRequestResponseFormat(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormat.cs b/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormat.cs index 5c8f4c2c0..a1dd64235 100644 --- a/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormat.cs +++ b/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormat.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.Collections.Generic; @@ -60,3 +58,4 @@ internal CreateChatCompletionRequestResponseFormat(CreateChatCompletionRequestRe public CreateChatCompletionRequestResponseFormatType? Type { get; set; } } } + diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormatType.cs b/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormatType.cs index d0332868f..894f322de 100644 --- a/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormatType.cs +++ b/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormatType.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -46,3 +44,4 @@ public CreateChatCompletionRequestResponseFormatType(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionResponse.Serialization.cs b/.dotnet/src/Generated/Models/CreateChatCompletionResponse.Serialization.cs index c560aa105..f9c94d1dc 100644 --- a/.dotnet/src/Generated/Models/CreateChatCompletionResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateChatCompletionResponse.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class CreateChatCompletionResponse : IUtf8JsonWriteable, IJsonModel + public partial class CreateChatCompletionResponse : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -188,13 +184,6 @@ internal static CreateChatCompletionResponse FromResponse(PipelineResponse respo using var document = JsonDocument.Parse(response.Content); return DeserializeCreateChatCompletionResponse(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionResponse.cs b/.dotnet/src/Generated/Models/CreateChatCompletionResponse.cs index cc16880d8..6476530ae 100644 --- a/.dotnet/src/Generated/Models/CreateChatCompletionResponse.cs +++ b/.dotnet/src/Generated/Models/CreateChatCompletionResponse.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; using System.Linq; @@ -52,9 +50,9 @@ public partial class CreateChatCompletionResponse /// , or is null. internal CreateChatCompletionResponse(string id, IEnumerable choices, DateTimeOffset created, string model) { - ClientUtilities.AssertNotNull(id, nameof(id)); - ClientUtilities.AssertNotNull(choices, nameof(choices)); - ClientUtilities.AssertNotNull(model, nameof(model)); + if (id is null) throw new ArgumentNullException(nameof(id)); + if (choices is null) throw new ArgumentNullException(nameof(choices)); + if (model is null) throw new ArgumentNullException(nameof(model)); Id = id; Choices = choices.ToList(); @@ -115,3 +113,4 @@ internal CreateChatCompletionResponse() public CompletionUsage Usage { get; } } } + diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoice.Serialization.cs b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoice.Serialization.cs index 8663dced6..5cd85183e 100644 --- a/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoice.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoice.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class CreateChatCompletionResponseChoice : IUtf8JsonWriteable, IJsonModel + public partial class CreateChatCompletionResponseChoice : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -156,13 +152,6 @@ internal static CreateChatCompletionResponseChoice FromResponse(PipelineResponse using var document = JsonDocument.Parse(response.Content); return DeserializeCreateChatCompletionResponseChoice(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoice.cs b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoice.cs index 7a5b8c5c9..17342fc9d 100644 --- a/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoice.cs +++ b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoice.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -57,7 +55,7 @@ public partial class CreateChatCompletionResponseChoice /// is null. internal CreateChatCompletionResponseChoice(CreateChatCompletionResponseChoiceFinishReason finishReason, long index, ChatCompletionResponseMessage message, CreateChatCompletionResponseChoiceLogprobs logprobs) { - ClientUtilities.AssertNotNull(message, nameof(message)); + if (message is null) throw new ArgumentNullException(nameof(message)); FinishReason = finishReason; Index = index; @@ -107,3 +105,4 @@ internal CreateChatCompletionResponseChoice() public CreateChatCompletionResponseChoiceLogprobs Logprobs { get; } } } + diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceFinishReason.cs b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceFinishReason.cs index 65e41895b..e1e180f28 100644 --- a/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceFinishReason.cs +++ b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceFinishReason.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -55,3 +53,4 @@ public CreateChatCompletionResponseChoiceFinishReason(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceLogprobs.Serialization.cs b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceLogprobs.Serialization.cs index 8893ed290..6e7b9371a 100644 --- a/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceLogprobs.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceLogprobs.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class CreateChatCompletionResponseChoiceLogprobs : IUtf8JsonWriteable, IJsonModel + public partial class CreateChatCompletionResponseChoiceLogprobs : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -142,13 +138,6 @@ internal static CreateChatCompletionResponseChoiceLogprobs FromResponse(Pipeline using var document = JsonDocument.Parse(response.Content); return DeserializeCreateChatCompletionResponseChoiceLogprobs(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceLogprobs.cs b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceLogprobs.cs index 36cc66621..db7e2ef1d 100644 --- a/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceLogprobs.cs +++ b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceLogprobs.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.Collections.Generic; using System.Linq; @@ -68,3 +66,4 @@ internal CreateChatCompletionResponseChoiceLogprobs() public IReadOnlyList Content { get; } } } + diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionResponseObject.cs b/.dotnet/src/Generated/Models/CreateChatCompletionResponseObject.cs index 3e2747865..e95f7fff6 100644 --- a/.dotnet/src/Generated/Models/CreateChatCompletionResponseObject.cs +++ b/.dotnet/src/Generated/Models/CreateChatCompletionResponseObject.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public CreateChatCompletionResponseObject(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/CreateCompletionRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateCompletionRequest.Serialization.cs index 9d1be276a..e729592b7 100644 --- a/.dotnet/src/Generated/Models/CreateCompletionRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateCompletionRequest.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class CreateCompletionRequest : IUtf8JsonWriteable, IJsonModel + public partial class CreateCompletionRequest : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -497,13 +493,6 @@ internal static CreateCompletionRequest FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeCreateCompletionRequest(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/CreateCompletionRequest.cs b/.dotnet/src/Generated/Models/CreateCompletionRequest.cs index 7797fb2a3..65ce00804 100644 --- a/.dotnet/src/Generated/Models/CreateCompletionRequest.cs +++ b/.dotnet/src/Generated/Models/CreateCompletionRequest.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -399,3 +397,4 @@ internal CreateCompletionRequest() public string User { get; set; } } } + diff --git a/.dotnet/src/Generated/Models/CreateCompletionRequestModel.cs b/.dotnet/src/Generated/Models/CreateCompletionRequestModel.cs index 885dde0a4..f0afae0c4 100644 --- a/.dotnet/src/Generated/Models/CreateCompletionRequestModel.cs +++ b/.dotnet/src/Generated/Models/CreateCompletionRequestModel.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -49,3 +47,4 @@ public CreateCompletionRequestModel(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/CreateCompletionResponse.Serialization.cs b/.dotnet/src/Generated/Models/CreateCompletionResponse.Serialization.cs index 3afa5cb8c..17ea98286 100644 --- a/.dotnet/src/Generated/Models/CreateCompletionResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateCompletionResponse.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class CreateCompletionResponse : IUtf8JsonWriteable, IJsonModel + public partial class CreateCompletionResponse : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -188,13 +184,6 @@ internal static CreateCompletionResponse FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeCreateCompletionResponse(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/CreateCompletionResponse.cs b/.dotnet/src/Generated/Models/CreateCompletionResponse.cs index 0d47cab50..944f32b56 100644 --- a/.dotnet/src/Generated/Models/CreateCompletionResponse.cs +++ b/.dotnet/src/Generated/Models/CreateCompletionResponse.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; using System.Linq; @@ -55,9 +53,9 @@ public partial class CreateCompletionResponse /// , or is null. internal CreateCompletionResponse(string id, IEnumerable choices, DateTimeOffset created, string model) { - ClientUtilities.AssertNotNull(id, nameof(id)); - ClientUtilities.AssertNotNull(choices, nameof(choices)); - ClientUtilities.AssertNotNull(model, nameof(model)); + if (id is null) throw new ArgumentNullException(nameof(id)); + if (choices is null) throw new ArgumentNullException(nameof(choices)); + if (model is null) throw new ArgumentNullException(nameof(model)); Id = id; Choices = choices.ToList(); @@ -118,3 +116,4 @@ internal CreateCompletionResponse() public CompletionUsage Usage { get; } } } + diff --git a/.dotnet/src/Generated/Models/CreateCompletionResponseChoice.Serialization.cs b/.dotnet/src/Generated/Models/CreateCompletionResponseChoice.Serialization.cs index d9f0caf93..0f9206ba2 100644 --- a/.dotnet/src/Generated/Models/CreateCompletionResponseChoice.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateCompletionResponseChoice.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class CreateCompletionResponseChoice : IUtf8JsonWriteable, IJsonModel + public partial class CreateCompletionResponseChoice : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -156,13 +152,6 @@ internal static CreateCompletionResponseChoice FromResponse(PipelineResponse res using var document = JsonDocument.Parse(response.Content); return DeserializeCreateCompletionResponseChoice(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/CreateCompletionResponseChoice.cs b/.dotnet/src/Generated/Models/CreateCompletionResponseChoice.cs index b38455976..a0f883635 100644 --- a/.dotnet/src/Generated/Models/CreateCompletionResponseChoice.cs +++ b/.dotnet/src/Generated/Models/CreateCompletionResponseChoice.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -57,7 +55,7 @@ public partial class CreateCompletionResponseChoice /// is null. internal CreateCompletionResponseChoice(long index, string text, CreateCompletionResponseChoiceLogprobs logprobs, CreateCompletionResponseChoiceFinishReason finishReason) { - ClientUtilities.AssertNotNull(text, nameof(text)); + if (text is null) throw new ArgumentNullException(nameof(text)); Index = index; Text = text; @@ -107,3 +105,4 @@ internal CreateCompletionResponseChoice() public CreateCompletionResponseChoiceFinishReason FinishReason { get; } } } + diff --git a/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceFinishReason.cs b/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceFinishReason.cs index 5071c4a7a..d4ee35578 100644 --- a/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceFinishReason.cs +++ b/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceFinishReason.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -49,3 +47,4 @@ public CreateCompletionResponseChoiceFinishReason(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceLogprobs.Serialization.cs b/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceLogprobs.Serialization.cs index 89dcafec8..b3ba1559f 100644 --- a/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceLogprobs.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceLogprobs.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class CreateCompletionResponseChoiceLogprobs : IUtf8JsonWriteable, IJsonModel + public partial class CreateCompletionResponseChoiceLogprobs : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -207,13 +203,6 @@ internal static CreateCompletionResponseChoiceLogprobs FromResponse(PipelineResp using var document = JsonDocument.Parse(response.Content); return DeserializeCreateCompletionResponseChoiceLogprobs(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceLogprobs.cs b/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceLogprobs.cs index 5b88d866b..598ec5916 100644 --- a/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceLogprobs.cs +++ b/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceLogprobs.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; using System.Linq; @@ -52,10 +50,10 @@ public partial class CreateCompletionResponseChoiceLogprobs /// , , or is null. internal CreateCompletionResponseChoiceLogprobs(IEnumerable tokens, IEnumerable tokenLogprobs, IEnumerable> topLogprobs, IEnumerable textOffset) { - ClientUtilities.AssertNotNull(tokens, nameof(tokens)); - ClientUtilities.AssertNotNull(tokenLogprobs, nameof(tokenLogprobs)); - ClientUtilities.AssertNotNull(topLogprobs, nameof(topLogprobs)); - ClientUtilities.AssertNotNull(textOffset, nameof(textOffset)); + if (tokens is null) throw new ArgumentNullException(nameof(tokens)); + if (tokenLogprobs is null) throw new ArgumentNullException(nameof(tokenLogprobs)); + if (topLogprobs is null) throw new ArgumentNullException(nameof(topLogprobs)); + if (textOffset is null) throw new ArgumentNullException(nameof(textOffset)); Tokens = tokens.ToList(); TokenLogprobs = tokenLogprobs.ToList(); @@ -93,3 +91,4 @@ internal CreateCompletionResponseChoiceLogprobs() public IReadOnlyList TextOffset { get; } } } + diff --git a/.dotnet/src/Generated/Models/CreateCompletionResponseObject.cs b/.dotnet/src/Generated/Models/CreateCompletionResponseObject.cs index 3185d279a..24c670201 100644 --- a/.dotnet/src/Generated/Models/CreateCompletionResponseObject.cs +++ b/.dotnet/src/Generated/Models/CreateCompletionResponseObject.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public CreateCompletionResponseObject(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateEmbeddingRequest.Serialization.cs index 4601bcc63..7a8ff235b 100644 --- a/.dotnet/src/Generated/Models/CreateEmbeddingRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateEmbeddingRequest.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class CreateEmbeddingRequest : IUtf8JsonWriteable, IJsonModel + public partial class CreateEmbeddingRequest : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -176,13 +172,6 @@ internal static CreateEmbeddingRequest FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeCreateEmbeddingRequest(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingRequest.cs b/.dotnet/src/Generated/Models/CreateEmbeddingRequest.cs index 770cc70db..c2a286d4c 100644 --- a/.dotnet/src/Generated/Models/CreateEmbeddingRequest.cs +++ b/.dotnet/src/Generated/Models/CreateEmbeddingRequest.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -60,7 +58,7 @@ public partial class CreateEmbeddingRequest /// is null. public CreateEmbeddingRequest(BinaryData input, CreateEmbeddingRequestModel model) { - ClientUtilities.AssertNotNull(input, nameof(input)); + if (input is null) throw new ArgumentNullException(nameof(input)); Input = input; Model = model; @@ -184,3 +182,4 @@ internal CreateEmbeddingRequest() public string User { get; set; } } } + diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingRequestEncodingFormat.cs b/.dotnet/src/Generated/Models/CreateEmbeddingRequestEncodingFormat.cs index 23debf648..e252744e4 100644 --- a/.dotnet/src/Generated/Models/CreateEmbeddingRequestEncodingFormat.cs +++ b/.dotnet/src/Generated/Models/CreateEmbeddingRequestEncodingFormat.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -46,3 +44,4 @@ public CreateEmbeddingRequestEncodingFormat(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingRequestModel.cs b/.dotnet/src/Generated/Models/CreateEmbeddingRequestModel.cs index 1b65ef4e0..628de9b7b 100644 --- a/.dotnet/src/Generated/Models/CreateEmbeddingRequestModel.cs +++ b/.dotnet/src/Generated/Models/CreateEmbeddingRequestModel.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -49,3 +47,4 @@ public CreateEmbeddingRequestModel(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingResponse.Serialization.cs b/.dotnet/src/Generated/Models/CreateEmbeddingResponse.Serialization.cs index 78595eb7e..84e854405 100644 --- a/.dotnet/src/Generated/Models/CreateEmbeddingResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateEmbeddingResponse.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class CreateEmbeddingResponse : IUtf8JsonWriteable, IJsonModel + public partial class CreateEmbeddingResponse : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -154,13 +150,6 @@ internal static CreateEmbeddingResponse FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeCreateEmbeddingResponse(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingResponse.cs b/.dotnet/src/Generated/Models/CreateEmbeddingResponse.cs index bf7e741ca..ce1b35c17 100644 --- a/.dotnet/src/Generated/Models/CreateEmbeddingResponse.cs +++ b/.dotnet/src/Generated/Models/CreateEmbeddingResponse.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; using System.Linq; @@ -51,9 +49,9 @@ public partial class CreateEmbeddingResponse /// , or is null. internal CreateEmbeddingResponse(IEnumerable data, string model, CreateEmbeddingResponseUsage usage) { - ClientUtilities.AssertNotNull(data, nameof(data)); - ClientUtilities.AssertNotNull(model, nameof(model)); - ClientUtilities.AssertNotNull(usage, nameof(usage)); + if (data is null) throw new ArgumentNullException(nameof(data)); + if (model is null) throw new ArgumentNullException(nameof(model)); + if (usage is null) throw new ArgumentNullException(nameof(usage)); Data = data.ToList(); Model = model; @@ -91,3 +89,4 @@ internal CreateEmbeddingResponse() public CreateEmbeddingResponseUsage Usage { get; } } } + diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingResponseObject.cs b/.dotnet/src/Generated/Models/CreateEmbeddingResponseObject.cs index 82ba92eec..b98b10317 100644 --- a/.dotnet/src/Generated/Models/CreateEmbeddingResponseObject.cs +++ b/.dotnet/src/Generated/Models/CreateEmbeddingResponseObject.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public CreateEmbeddingResponseObject(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingResponseUsage.Serialization.cs b/.dotnet/src/Generated/Models/CreateEmbeddingResponseUsage.Serialization.cs index c662893ed..2b10317c0 100644 --- a/.dotnet/src/Generated/Models/CreateEmbeddingResponseUsage.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateEmbeddingResponseUsage.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class CreateEmbeddingResponseUsage : IUtf8JsonWriteable, IJsonModel + public partial class CreateEmbeddingResponseUsage : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -128,13 +124,6 @@ internal static CreateEmbeddingResponseUsage FromResponse(PipelineResponse respo using var document = JsonDocument.Parse(response.Content); return DeserializeCreateEmbeddingResponseUsage(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingResponseUsage.cs b/.dotnet/src/Generated/Models/CreateEmbeddingResponseUsage.cs index d21b5b9e9..bafd0284c 100644 --- a/.dotnet/src/Generated/Models/CreateEmbeddingResponseUsage.cs +++ b/.dotnet/src/Generated/Models/CreateEmbeddingResponseUsage.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.Collections.Generic; @@ -73,3 +71,4 @@ internal CreateEmbeddingResponseUsage() public long TotalTokens { get; } } } + diff --git a/.dotnet/src/Generated/Models/CreateFileRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateFileRequest.Serialization.cs index 6f2012c20..f7d3ba503 100644 --- a/.dotnet/src/Generated/Models/CreateFileRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateFileRequest.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class CreateFileRequest : IUtf8JsonWriteable, IJsonModel + public partial class CreateFileRequest : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -128,13 +124,6 @@ internal static CreateFileRequest FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeCreateFileRequest(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/CreateFileRequest.cs b/.dotnet/src/Generated/Models/CreateFileRequest.cs index 9e10579a6..9475c30af 100644 --- a/.dotnet/src/Generated/Models/CreateFileRequest.cs +++ b/.dotnet/src/Generated/Models/CreateFileRequest.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -54,7 +52,7 @@ public partial class CreateFileRequest /// is null. public CreateFileRequest(BinaryData file, CreateFileRequestPurpose purpose) { - ClientUtilities.AssertNotNull(file, nameof(file)); + if (file is null) throw new ArgumentNullException(nameof(file)); File = file; Purpose = purpose; @@ -107,3 +105,4 @@ internal CreateFileRequest() public CreateFileRequestPurpose Purpose { get; } } } + diff --git a/.dotnet/src/Generated/Models/CreateFileRequestPurpose.cs b/.dotnet/src/Generated/Models/CreateFileRequestPurpose.cs index 866087c3b..51a939757 100644 --- a/.dotnet/src/Generated/Models/CreateFileRequestPurpose.cs +++ b/.dotnet/src/Generated/Models/CreateFileRequestPurpose.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -46,3 +44,4 @@ public CreateFileRequestPurpose(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/CreateFineTuningJobRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateFineTuningJobRequest.Serialization.cs index 6d26b67fc..08d63808e 100644 --- a/.dotnet/src/Generated/Models/CreateFineTuningJobRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateFineTuningJobRequest.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class CreateFineTuningJobRequest : IUtf8JsonWriteable, IJsonModel + public partial class CreateFineTuningJobRequest : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -189,13 +185,6 @@ internal static CreateFineTuningJobRequest FromResponse(PipelineResponse respons using var document = JsonDocument.Parse(response.Content); return DeserializeCreateFineTuningJobRequest(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/CreateFineTuningJobRequest.cs b/.dotnet/src/Generated/Models/CreateFineTuningJobRequest.cs index 7157e813c..618e6069d 100644 --- a/.dotnet/src/Generated/Models/CreateFineTuningJobRequest.cs +++ b/.dotnet/src/Generated/Models/CreateFineTuningJobRequest.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -61,7 +59,7 @@ public partial class CreateFineTuningJobRequest /// is null. public CreateFineTuningJobRequest(string trainingFile, CreateFineTuningJobRequestModel model) { - ClientUtilities.AssertNotNull(trainingFile, nameof(trainingFile)); + if (trainingFile is null) throw new ArgumentNullException(nameof(trainingFile)); TrainingFile = trainingFile; Model = model; @@ -157,3 +155,4 @@ internal CreateFineTuningJobRequest() public string Suffix { get; set; } } } + diff --git a/.dotnet/src/Generated/Models/CreateFineTuningJobRequestHyperparameters.Serialization.cs b/.dotnet/src/Generated/Models/CreateFineTuningJobRequestHyperparameters.Serialization.cs index de057600c..98b42074d 100644 --- a/.dotnet/src/Generated/Models/CreateFineTuningJobRequestHyperparameters.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateFineTuningJobRequestHyperparameters.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class CreateFineTuningJobRequestHyperparameters : IUtf8JsonWriteable, IJsonModel + public partial class CreateFineTuningJobRequestHyperparameters : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -134,13 +130,6 @@ internal static CreateFineTuningJobRequestHyperparameters FromResponse(PipelineR using var document = JsonDocument.Parse(response.Content); return DeserializeCreateFineTuningJobRequestHyperparameters(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/CreateFineTuningJobRequestHyperparameters.cs b/.dotnet/src/Generated/Models/CreateFineTuningJobRequestHyperparameters.cs index 258881cb0..8094fc378 100644 --- a/.dotnet/src/Generated/Models/CreateFineTuningJobRequestHyperparameters.cs +++ b/.dotnet/src/Generated/Models/CreateFineTuningJobRequestHyperparameters.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.Collections.Generic; @@ -104,3 +102,4 @@ internal CreateFineTuningJobRequestHyperparameters(BinaryData nEpochs, IDictiona public BinaryData NEpochs { get; set; } } } + diff --git a/.dotnet/src/Generated/Models/CreateFineTuningJobRequestModel.cs b/.dotnet/src/Generated/Models/CreateFineTuningJobRequestModel.cs index ef3c6ec0c..e36868cc4 100644 --- a/.dotnet/src/Generated/Models/CreateFineTuningJobRequestModel.cs +++ b/.dotnet/src/Generated/Models/CreateFineTuningJobRequestModel.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -49,3 +47,4 @@ public CreateFineTuningJobRequestModel(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/CreateImageEditRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateImageEditRequest.Serialization.cs index cc8938c1f..9de7cccac 100644 --- a/.dotnet/src/Generated/Models/CreateImageEditRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateImageEditRequest.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class CreateImageEditRequest : IUtf8JsonWriteable, IJsonModel + public partial class CreateImageEditRequest : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -222,13 +218,6 @@ internal static CreateImageEditRequest FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeCreateImageEditRequest(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/CreateImageEditRequest.cs b/.dotnet/src/Generated/Models/CreateImageEditRequest.cs index 12f245f91..5b78b294b 100644 --- a/.dotnet/src/Generated/Models/CreateImageEditRequest.cs +++ b/.dotnet/src/Generated/Models/CreateImageEditRequest.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -52,8 +50,8 @@ public partial class CreateImageEditRequest /// or is null. public CreateImageEditRequest(BinaryData image, string prompt) { - ClientUtilities.AssertNotNull(image, nameof(image)); - ClientUtilities.AssertNotNull(prompt, nameof(prompt)); + if (image is null) throw new ArgumentNullException(nameof(image)); + if (prompt is null) throw new ArgumentNullException(nameof(prompt)); Image = image; Prompt = prompt; @@ -151,3 +149,4 @@ internal CreateImageEditRequest() public string User { get; set; } } } + diff --git a/.dotnet/src/Generated/Models/CreateImageEditRequestModel.cs b/.dotnet/src/Generated/Models/CreateImageEditRequestModel.cs index 152466ce3..946936edb 100644 --- a/.dotnet/src/Generated/Models/CreateImageEditRequestModel.cs +++ b/.dotnet/src/Generated/Models/CreateImageEditRequestModel.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public CreateImageEditRequestModel(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/CreateImageEditRequestResponseFormat.cs b/.dotnet/src/Generated/Models/CreateImageEditRequestResponseFormat.cs index 2dfc96644..b398ca63d 100644 --- a/.dotnet/src/Generated/Models/CreateImageEditRequestResponseFormat.cs +++ b/.dotnet/src/Generated/Models/CreateImageEditRequestResponseFormat.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -46,3 +44,4 @@ public CreateImageEditRequestResponseFormat(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/CreateImageEditRequestSize.cs b/.dotnet/src/Generated/Models/CreateImageEditRequestSize.cs index 01c9a4c45..8e644e852 100644 --- a/.dotnet/src/Generated/Models/CreateImageEditRequestSize.cs +++ b/.dotnet/src/Generated/Models/CreateImageEditRequestSize.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -49,3 +47,4 @@ public CreateImageEditRequestSize(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/CreateImageRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateImageRequest.Serialization.cs index f790166ff..74b4555fc 100644 --- a/.dotnet/src/Generated/Models/CreateImageRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateImageRequest.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class CreateImageRequest : IUtf8JsonWriteable, IJsonModel + public partial class CreateImageRequest : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -229,13 +225,6 @@ internal static CreateImageRequest FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeCreateImageRequest(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/CreateImageRequest.cs b/.dotnet/src/Generated/Models/CreateImageRequest.cs index bde1c78ba..0ddf94adb 100644 --- a/.dotnet/src/Generated/Models/CreateImageRequest.cs +++ b/.dotnet/src/Generated/Models/CreateImageRequest.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -51,7 +49,7 @@ public partial class CreateImageRequest /// is null. public CreateImageRequest(string prompt) { - ClientUtilities.AssertNotNull(prompt, nameof(prompt)); + if (prompt is null) throw new ArgumentNullException(nameof(prompt)); Prompt = prompt; } @@ -140,3 +138,4 @@ internal CreateImageRequest() public string User { get; set; } } } + diff --git a/.dotnet/src/Generated/Models/CreateImageRequestModel.cs b/.dotnet/src/Generated/Models/CreateImageRequestModel.cs index 46d224028..e6401986c 100644 --- a/.dotnet/src/Generated/Models/CreateImageRequestModel.cs +++ b/.dotnet/src/Generated/Models/CreateImageRequestModel.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -46,3 +44,4 @@ public CreateImageRequestModel(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/CreateImageRequestQuality.cs b/.dotnet/src/Generated/Models/CreateImageRequestQuality.cs index e3a737c23..236be9ebf 100644 --- a/.dotnet/src/Generated/Models/CreateImageRequestQuality.cs +++ b/.dotnet/src/Generated/Models/CreateImageRequestQuality.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -46,3 +44,4 @@ public CreateImageRequestQuality(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/CreateImageRequestResponseFormat.cs b/.dotnet/src/Generated/Models/CreateImageRequestResponseFormat.cs index 5990ab315..e7f95d5ee 100644 --- a/.dotnet/src/Generated/Models/CreateImageRequestResponseFormat.cs +++ b/.dotnet/src/Generated/Models/CreateImageRequestResponseFormat.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -46,3 +44,4 @@ public CreateImageRequestResponseFormat(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/CreateImageRequestSize.cs b/.dotnet/src/Generated/Models/CreateImageRequestSize.cs index df73f6aac..091e4d3b0 100644 --- a/.dotnet/src/Generated/Models/CreateImageRequestSize.cs +++ b/.dotnet/src/Generated/Models/CreateImageRequestSize.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -55,3 +53,4 @@ public CreateImageRequestSize(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/CreateImageRequestStyle.cs b/.dotnet/src/Generated/Models/CreateImageRequestStyle.cs index a711e0635..98f67425f 100644 --- a/.dotnet/src/Generated/Models/CreateImageRequestStyle.cs +++ b/.dotnet/src/Generated/Models/CreateImageRequestStyle.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -46,3 +44,4 @@ public CreateImageRequestStyle(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/CreateImageVariationRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateImageVariationRequest.Serialization.cs index bd3a103fb..642a6c439 100644 --- a/.dotnet/src/Generated/Models/CreateImageVariationRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateImageVariationRequest.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class CreateImageVariationRequest : IUtf8JsonWriteable, IJsonModel + public partial class CreateImageVariationRequest : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -199,13 +195,6 @@ internal static CreateImageVariationRequest FromResponse(PipelineResponse respon using var document = JsonDocument.Parse(response.Content); return DeserializeCreateImageVariationRequest(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/CreateImageVariationRequest.cs b/.dotnet/src/Generated/Models/CreateImageVariationRequest.cs index f1c52762f..a034e2faa 100644 --- a/.dotnet/src/Generated/Models/CreateImageVariationRequest.cs +++ b/.dotnet/src/Generated/Models/CreateImageVariationRequest.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -51,7 +49,7 @@ public partial class CreateImageVariationRequest /// is null. public CreateImageVariationRequest(BinaryData image) { - ClientUtilities.AssertNotNull(image, nameof(image)); + if (image is null) throw new ArgumentNullException(nameof(image)); Image = image; } @@ -119,3 +117,4 @@ internal CreateImageVariationRequest() public string User { get; set; } } } + diff --git a/.dotnet/src/Generated/Models/CreateImageVariationRequestModel.cs b/.dotnet/src/Generated/Models/CreateImageVariationRequestModel.cs index e1fb70037..d40886648 100644 --- a/.dotnet/src/Generated/Models/CreateImageVariationRequestModel.cs +++ b/.dotnet/src/Generated/Models/CreateImageVariationRequestModel.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public CreateImageVariationRequestModel(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/CreateImageVariationRequestResponseFormat.cs b/.dotnet/src/Generated/Models/CreateImageVariationRequestResponseFormat.cs index 2c384bb1f..94f353a25 100644 --- a/.dotnet/src/Generated/Models/CreateImageVariationRequestResponseFormat.cs +++ b/.dotnet/src/Generated/Models/CreateImageVariationRequestResponseFormat.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -46,3 +44,4 @@ public CreateImageVariationRequestResponseFormat(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/CreateImageVariationRequestSize.cs b/.dotnet/src/Generated/Models/CreateImageVariationRequestSize.cs index 50f61d60c..0374c3214 100644 --- a/.dotnet/src/Generated/Models/CreateImageVariationRequestSize.cs +++ b/.dotnet/src/Generated/Models/CreateImageVariationRequestSize.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -49,3 +47,4 @@ public CreateImageVariationRequestSize(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/CreateMessageRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateMessageRequest.Serialization.cs index f8cdabd45..3386db660 100644 --- a/.dotnet/src/Generated/Models/CreateMessageRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateMessageRequest.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class CreateMessageRequest : IUtf8JsonWriteable, IJsonModel + public partial class CreateMessageRequest : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -186,13 +182,6 @@ internal static CreateMessageRequest FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeCreateMessageRequest(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/CreateMessageRequest.cs b/.dotnet/src/Generated/Models/CreateMessageRequest.cs index 63200e048..7d2855a9f 100644 --- a/.dotnet/src/Generated/Models/CreateMessageRequest.cs +++ b/.dotnet/src/Generated/Models/CreateMessageRequest.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -48,7 +46,7 @@ public partial class CreateMessageRequest /// is null. public CreateMessageRequest(string content) { - ClientUtilities.AssertNotNull(content, nameof(content)); + if (content is null) throw new ArgumentNullException(nameof(content)); Content = content; FileIds = new OptionalList(); @@ -102,3 +100,4 @@ internal CreateMessageRequest() public IDictionary Metadata { get; set; } } } + diff --git a/.dotnet/src/Generated/Models/CreateMessageRequestRole.cs b/.dotnet/src/Generated/Models/CreateMessageRequestRole.cs index 2f7ae075a..f5be49786 100644 --- a/.dotnet/src/Generated/Models/CreateMessageRequestRole.cs +++ b/.dotnet/src/Generated/Models/CreateMessageRequestRole.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public CreateMessageRequestRole(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/CreateModerationRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateModerationRequest.Serialization.cs index d7d7cac14..50c0cf4f7 100644 --- a/.dotnet/src/Generated/Models/CreateModerationRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateModerationRequest.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class CreateModerationRequest : IUtf8JsonWriteable, IJsonModel + public partial class CreateModerationRequest : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -142,13 +138,6 @@ internal static CreateModerationRequest FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeCreateModerationRequest(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/CreateModerationRequest.cs b/.dotnet/src/Generated/Models/CreateModerationRequest.cs index 3192ceea7..b92114f96 100644 --- a/.dotnet/src/Generated/Models/CreateModerationRequest.cs +++ b/.dotnet/src/Generated/Models/CreateModerationRequest.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -48,7 +46,7 @@ public partial class CreateModerationRequest /// is null. public CreateModerationRequest(BinaryData input) { - ClientUtilities.AssertNotNull(input, nameof(input)); + if (input is null) throw new ArgumentNullException(nameof(input)); Input = input; } @@ -127,3 +125,4 @@ internal CreateModerationRequest() public CreateModerationRequestModel? Model { get; set; } } } + diff --git a/.dotnet/src/Generated/Models/CreateModerationRequestModel.cs b/.dotnet/src/Generated/Models/CreateModerationRequestModel.cs index fcc067e02..af410f3a0 100644 --- a/.dotnet/src/Generated/Models/CreateModerationRequestModel.cs +++ b/.dotnet/src/Generated/Models/CreateModerationRequestModel.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -46,3 +44,4 @@ public CreateModerationRequestModel(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/CreateModerationResponse.Serialization.cs b/.dotnet/src/Generated/Models/CreateModerationResponse.Serialization.cs index 95902ce14..1d3fd1188 100644 --- a/.dotnet/src/Generated/Models/CreateModerationResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateModerationResponse.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class CreateModerationResponse : IUtf8JsonWriteable, IJsonModel + public partial class CreateModerationResponse : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -146,13 +142,6 @@ internal static CreateModerationResponse FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeCreateModerationResponse(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/CreateModerationResponse.cs b/.dotnet/src/Generated/Models/CreateModerationResponse.cs index 7fe06f011..caf0c4858 100644 --- a/.dotnet/src/Generated/Models/CreateModerationResponse.cs +++ b/.dotnet/src/Generated/Models/CreateModerationResponse.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; using System.Linq; @@ -51,9 +49,9 @@ public partial class CreateModerationResponse /// , or is null. internal CreateModerationResponse(string id, string model, IEnumerable results) { - ClientUtilities.AssertNotNull(id, nameof(id)); - ClientUtilities.AssertNotNull(model, nameof(model)); - ClientUtilities.AssertNotNull(results, nameof(results)); + if (id is null) throw new ArgumentNullException(nameof(id)); + if (model is null) throw new ArgumentNullException(nameof(model)); + if (results is null) throw new ArgumentNullException(nameof(results)); Id = id; Model = model; @@ -86,3 +84,4 @@ internal CreateModerationResponse() public IReadOnlyList Results { get; } } } + diff --git a/.dotnet/src/Generated/Models/CreateModerationResponseResult.Serialization.cs b/.dotnet/src/Generated/Models/CreateModerationResponseResult.Serialization.cs index 6c56b94e2..edabb3d30 100644 --- a/.dotnet/src/Generated/Models/CreateModerationResponseResult.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateModerationResponseResult.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class CreateModerationResponseResult : IUtf8JsonWriteable, IJsonModel + public partial class CreateModerationResponseResult : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -136,13 +132,6 @@ internal static CreateModerationResponseResult FromResponse(PipelineResponse res using var document = JsonDocument.Parse(response.Content); return DeserializeCreateModerationResponseResult(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/CreateModerationResponseResult.cs b/.dotnet/src/Generated/Models/CreateModerationResponseResult.cs index 3842b8365..d3abebbc3 100644 --- a/.dotnet/src/Generated/Models/CreateModerationResponseResult.cs +++ b/.dotnet/src/Generated/Models/CreateModerationResponseResult.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -50,8 +48,8 @@ public partial class CreateModerationResponseResult /// or is null. internal CreateModerationResponseResult(bool flagged, CreateModerationResponseResultCategories categories, CreateModerationResponseResultCategoryScores categoryScores) { - ClientUtilities.AssertNotNull(categories, nameof(categories)); - ClientUtilities.AssertNotNull(categoryScores, nameof(categoryScores)); + if (categories is null) throw new ArgumentNullException(nameof(categories)); + if (categoryScores is null) throw new ArgumentNullException(nameof(categoryScores)); Flagged = flagged; Categories = categories; @@ -84,3 +82,4 @@ internal CreateModerationResponseResult() public CreateModerationResponseResultCategoryScores CategoryScores { get; } } } + diff --git a/.dotnet/src/Generated/Models/CreateModerationResponseResultCategories.Serialization.cs b/.dotnet/src/Generated/Models/CreateModerationResponseResultCategories.Serialization.cs index 53595dc4a..5dce74b53 100644 --- a/.dotnet/src/Generated/Models/CreateModerationResponseResultCategories.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateModerationResponseResultCategories.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class CreateModerationResponseResultCategories : IUtf8JsonWriteable, IJsonModel + public partial class CreateModerationResponseResultCategories : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -200,13 +196,6 @@ internal static CreateModerationResponseResultCategories FromResponse(PipelineRe using var document = JsonDocument.Parse(response.Content); return DeserializeCreateModerationResponseResultCategories(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/CreateModerationResponseResultCategories.cs b/.dotnet/src/Generated/Models/CreateModerationResponseResultCategories.cs index fdb93fb6c..8af6aa519 100644 --- a/.dotnet/src/Generated/Models/CreateModerationResponseResultCategories.cs +++ b/.dotnet/src/Generated/Models/CreateModerationResponseResultCategories.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.Collections.Generic; @@ -187,3 +185,4 @@ internal CreateModerationResponseResultCategories() public bool ViolenceGraphic { get; } } } + diff --git a/.dotnet/src/Generated/Models/CreateModerationResponseResultCategoryScores.Serialization.cs b/.dotnet/src/Generated/Models/CreateModerationResponseResultCategoryScores.Serialization.cs index 2c169bdec..50b22a9d6 100644 --- a/.dotnet/src/Generated/Models/CreateModerationResponseResultCategoryScores.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateModerationResponseResultCategoryScores.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class CreateModerationResponseResultCategoryScores : IUtf8JsonWriteable, IJsonModel + public partial class CreateModerationResponseResultCategoryScores : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -200,13 +196,6 @@ internal static CreateModerationResponseResultCategoryScores FromResponse(Pipeli using var document = JsonDocument.Parse(response.Content); return DeserializeCreateModerationResponseResultCategoryScores(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/CreateModerationResponseResultCategoryScores.cs b/.dotnet/src/Generated/Models/CreateModerationResponseResultCategoryScores.cs index 2e5247ddb..0ee6e8d5a 100644 --- a/.dotnet/src/Generated/Models/CreateModerationResponseResultCategoryScores.cs +++ b/.dotnet/src/Generated/Models/CreateModerationResponseResultCategoryScores.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.Collections.Generic; @@ -127,3 +125,4 @@ internal CreateModerationResponseResultCategoryScores() public double ViolenceGraphic { get; } } } + diff --git a/.dotnet/src/Generated/Models/CreateRunRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateRunRequest.Serialization.cs index 33e243bc6..4eda947a7 100644 --- a/.dotnet/src/Generated/Models/CreateRunRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateRunRequest.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class CreateRunRequest : IUtf8JsonWriteable, IJsonModel + public partial class CreateRunRequest : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -273,13 +269,6 @@ internal static CreateRunRequest FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeCreateRunRequest(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/CreateRunRequest.cs b/.dotnet/src/Generated/Models/CreateRunRequest.cs index 25e22310d..ea2ac5123 100644 --- a/.dotnet/src/Generated/Models/CreateRunRequest.cs +++ b/.dotnet/src/Generated/Models/CreateRunRequest.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -48,7 +46,7 @@ public partial class CreateRunRequest /// is null. public CreateRunRequest(string assistantId) { - ClientUtilities.AssertNotNull(assistantId, nameof(assistantId)); + if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); AssistantId = assistantId; Tools = new OptionalList(); @@ -154,3 +152,4 @@ internal CreateRunRequest() public IDictionary Metadata { get; set; } } } + diff --git a/.dotnet/src/Generated/Models/CreateSpeechRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateSpeechRequest.Serialization.cs index 299ac0668..f2216a6e1 100644 --- a/.dotnet/src/Generated/Models/CreateSpeechRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateSpeechRequest.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class CreateSpeechRequest : IUtf8JsonWriteable, IJsonModel + public partial class CreateSpeechRequest : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -166,13 +162,6 @@ internal static CreateSpeechRequest FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeCreateSpeechRequest(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/CreateSpeechRequest.cs b/.dotnet/src/Generated/Models/CreateSpeechRequest.cs index ac765143c..eb2c71ae0 100644 --- a/.dotnet/src/Generated/Models/CreateSpeechRequest.cs +++ b/.dotnet/src/Generated/Models/CreateSpeechRequest.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -54,7 +52,7 @@ public partial class CreateSpeechRequest /// is null. public CreateSpeechRequest(CreateSpeechRequestModel model, string input, CreateSpeechRequestVoice voice) { - ClientUtilities.AssertNotNull(input, nameof(input)); + if (input is null) throw new ArgumentNullException(nameof(input)); Model = model; Input = input; @@ -103,3 +101,4 @@ internal CreateSpeechRequest() public double? Speed { get; set; } } } + diff --git a/.dotnet/src/Generated/Models/CreateSpeechRequestModel.cs b/.dotnet/src/Generated/Models/CreateSpeechRequestModel.cs index 0db564d08..590078a92 100644 --- a/.dotnet/src/Generated/Models/CreateSpeechRequestModel.cs +++ b/.dotnet/src/Generated/Models/CreateSpeechRequestModel.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -46,3 +44,4 @@ public CreateSpeechRequestModel(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/CreateSpeechRequestResponseFormat.cs b/.dotnet/src/Generated/Models/CreateSpeechRequestResponseFormat.cs index ca5b8462f..4694fe9d7 100644 --- a/.dotnet/src/Generated/Models/CreateSpeechRequestResponseFormat.cs +++ b/.dotnet/src/Generated/Models/CreateSpeechRequestResponseFormat.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -52,3 +50,4 @@ public CreateSpeechRequestResponseFormat(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/CreateSpeechRequestVoice.cs b/.dotnet/src/Generated/Models/CreateSpeechRequestVoice.cs index db9af9d9d..e764eed85 100644 --- a/.dotnet/src/Generated/Models/CreateSpeechRequestVoice.cs +++ b/.dotnet/src/Generated/Models/CreateSpeechRequestVoice.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -58,3 +56,4 @@ public CreateSpeechRequestVoice(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/CreateThreadAndRunRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateThreadAndRunRequest.Serialization.cs index 8710e7c9a..384147a67 100644 --- a/.dotnet/src/Generated/Models/CreateThreadAndRunRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateThreadAndRunRequest.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class CreateThreadAndRunRequest : IUtf8JsonWriteable, IJsonModel + public partial class CreateThreadAndRunRequest : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -265,13 +261,6 @@ internal static CreateThreadAndRunRequest FromResponse(PipelineResponse response using var document = JsonDocument.Parse(response.Content); return DeserializeCreateThreadAndRunRequest(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/CreateThreadAndRunRequest.cs b/.dotnet/src/Generated/Models/CreateThreadAndRunRequest.cs index cffca99a7..b212c90c5 100644 --- a/.dotnet/src/Generated/Models/CreateThreadAndRunRequest.cs +++ b/.dotnet/src/Generated/Models/CreateThreadAndRunRequest.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -48,7 +46,7 @@ public partial class CreateThreadAndRunRequest /// is null. public CreateThreadAndRunRequest(string assistantId) { - ClientUtilities.AssertNotNull(assistantId, nameof(assistantId)); + if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); AssistantId = assistantId; Tools = new OptionalList(); @@ -148,3 +146,4 @@ internal CreateThreadAndRunRequest() public IDictionary Metadata { get; set; } } } + diff --git a/.dotnet/src/Generated/Models/CreateThreadRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateThreadRequest.Serialization.cs index e321794d2..b899f1a9d 100644 --- a/.dotnet/src/Generated/Models/CreateThreadRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateThreadRequest.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class CreateThreadRequest : IUtf8JsonWriteable, IJsonModel + public partial class CreateThreadRequest : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -170,13 +166,6 @@ internal static CreateThreadRequest FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeCreateThreadRequest(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/CreateThreadRequest.cs b/.dotnet/src/Generated/Models/CreateThreadRequest.cs index b59545b62..a31999cb7 100644 --- a/.dotnet/src/Generated/Models/CreateThreadRequest.cs +++ b/.dotnet/src/Generated/Models/CreateThreadRequest.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -75,3 +73,4 @@ internal CreateThreadRequest(IList messages, IDictionary Metadata { get; set; } } } + diff --git a/.dotnet/src/Generated/Models/CreateTranscriptionRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateTranscriptionRequest.Serialization.cs index 8b89df137..8aa3b8a97 100644 --- a/.dotnet/src/Generated/Models/CreateTranscriptionRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateTranscriptionRequest.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class CreateTranscriptionRequest : IUtf8JsonWriteable, IJsonModel + public partial class CreateTranscriptionRequest : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -180,13 +176,6 @@ internal static CreateTranscriptionRequest FromResponse(PipelineResponse respons using var document = JsonDocument.Parse(response.Content); return DeserializeCreateTranscriptionRequest(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/CreateTranscriptionRequest.cs b/.dotnet/src/Generated/Models/CreateTranscriptionRequest.cs index 74d9d5ebd..7cccd2c15 100644 --- a/.dotnet/src/Generated/Models/CreateTranscriptionRequest.cs +++ b/.dotnet/src/Generated/Models/CreateTranscriptionRequest.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -52,7 +50,7 @@ public partial class CreateTranscriptionRequest /// is null. public CreateTranscriptionRequest(BinaryData file, CreateTranscriptionRequestModel model) { - ClientUtilities.AssertNotNull(file, nameof(file)); + if (file is null) throw new ArgumentNullException(nameof(file)); File = file; Model = model; @@ -145,3 +143,4 @@ internal CreateTranscriptionRequest() public double? Temperature { get; set; } } } + diff --git a/.dotnet/src/Generated/Models/CreateTranscriptionRequestModel.cs b/.dotnet/src/Generated/Models/CreateTranscriptionRequestModel.cs index d3c46e425..c4639fe72 100644 --- a/.dotnet/src/Generated/Models/CreateTranscriptionRequestModel.cs +++ b/.dotnet/src/Generated/Models/CreateTranscriptionRequestModel.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public CreateTranscriptionRequestModel(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/CreateTranscriptionRequestResponseFormat.cs b/.dotnet/src/Generated/Models/CreateTranscriptionRequestResponseFormat.cs index 354321879..1eb2b8b48 100644 --- a/.dotnet/src/Generated/Models/CreateTranscriptionRequestResponseFormat.cs +++ b/.dotnet/src/Generated/Models/CreateTranscriptionRequestResponseFormat.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -55,3 +53,4 @@ public CreateTranscriptionRequestResponseFormat(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/CreateTranscriptionResponse.Serialization.cs b/.dotnet/src/Generated/Models/CreateTranscriptionResponse.Serialization.cs index 7ebe36e6c..0052a0f0b 100644 --- a/.dotnet/src/Generated/Models/CreateTranscriptionResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateTranscriptionResponse.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class CreateTranscriptionResponse : IUtf8JsonWriteable, IJsonModel + public partial class CreateTranscriptionResponse : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -186,13 +182,6 @@ internal static CreateTranscriptionResponse FromResponse(PipelineResponse respon using var document = JsonDocument.Parse(response.Content); return DeserializeCreateTranscriptionResponse(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/CreateTranscriptionResponse.cs b/.dotnet/src/Generated/Models/CreateTranscriptionResponse.cs index c526f24a1..47f9428ca 100644 --- a/.dotnet/src/Generated/Models/CreateTranscriptionResponse.cs +++ b/.dotnet/src/Generated/Models/CreateTranscriptionResponse.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -48,7 +46,7 @@ public partial class CreateTranscriptionResponse /// is null. internal CreateTranscriptionResponse(string text) { - ClientUtilities.AssertNotNull(text, nameof(text)); + if (text is null) throw new ArgumentNullException(nameof(text)); Text = text; Segments = new OptionalList(); @@ -94,3 +92,4 @@ internal CreateTranscriptionResponse() public IReadOnlyList Segments { get; } } } + diff --git a/.dotnet/src/Generated/Models/CreateTranscriptionResponseTask.cs b/.dotnet/src/Generated/Models/CreateTranscriptionResponseTask.cs index 2cf79c9ae..45bb38166 100644 --- a/.dotnet/src/Generated/Models/CreateTranscriptionResponseTask.cs +++ b/.dotnet/src/Generated/Models/CreateTranscriptionResponseTask.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public CreateTranscriptionResponseTask(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/CreateTranslationRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateTranslationRequest.Serialization.cs index 2e29df2c7..2b415a177 100644 --- a/.dotnet/src/Generated/Models/CreateTranslationRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateTranslationRequest.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class CreateTranslationRequest : IUtf8JsonWriteable, IJsonModel + public partial class CreateTranslationRequest : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -169,13 +165,6 @@ internal static CreateTranslationRequest FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeCreateTranslationRequest(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/CreateTranslationRequest.cs b/.dotnet/src/Generated/Models/CreateTranslationRequest.cs index 6ee7db49a..fa5224a3e 100644 --- a/.dotnet/src/Generated/Models/CreateTranslationRequest.cs +++ b/.dotnet/src/Generated/Models/CreateTranslationRequest.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -52,7 +50,7 @@ public partial class CreateTranslationRequest /// is null. public CreateTranslationRequest(BinaryData file, CreateTranslationRequestModel model) { - ClientUtilities.AssertNotNull(file, nameof(file)); + if (file is null) throw new ArgumentNullException(nameof(file)); File = file; Model = model; @@ -133,3 +131,4 @@ internal CreateTranslationRequest() public double? Temperature { get; set; } } } + diff --git a/.dotnet/src/Generated/Models/CreateTranslationRequestModel.cs b/.dotnet/src/Generated/Models/CreateTranslationRequestModel.cs index b9d4c7076..10536d253 100644 --- a/.dotnet/src/Generated/Models/CreateTranslationRequestModel.cs +++ b/.dotnet/src/Generated/Models/CreateTranslationRequestModel.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public CreateTranslationRequestModel(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/CreateTranslationRequestResponseFormat.cs b/.dotnet/src/Generated/Models/CreateTranslationRequestResponseFormat.cs index b24279e3c..7c9a6ff50 100644 --- a/.dotnet/src/Generated/Models/CreateTranslationRequestResponseFormat.cs +++ b/.dotnet/src/Generated/Models/CreateTranslationRequestResponseFormat.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -55,3 +53,4 @@ public CreateTranslationRequestResponseFormat(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/CreateTranslationResponse.Serialization.cs b/.dotnet/src/Generated/Models/CreateTranslationResponse.Serialization.cs index 917fbfd99..10254dee9 100644 --- a/.dotnet/src/Generated/Models/CreateTranslationResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateTranslationResponse.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class CreateTranslationResponse : IUtf8JsonWriteable, IJsonModel + public partial class CreateTranslationResponse : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -186,13 +182,6 @@ internal static CreateTranslationResponse FromResponse(PipelineResponse response using var document = JsonDocument.Parse(response.Content); return DeserializeCreateTranslationResponse(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/CreateTranslationResponse.cs b/.dotnet/src/Generated/Models/CreateTranslationResponse.cs index c6c9ea458..bc4f03097 100644 --- a/.dotnet/src/Generated/Models/CreateTranslationResponse.cs +++ b/.dotnet/src/Generated/Models/CreateTranslationResponse.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -48,7 +46,7 @@ public partial class CreateTranslationResponse /// is null. internal CreateTranslationResponse(string text) { - ClientUtilities.AssertNotNull(text, nameof(text)); + if (text is null) throw new ArgumentNullException(nameof(text)); Text = text; Segments = new OptionalList(); @@ -94,3 +92,4 @@ internal CreateTranslationResponse() public IReadOnlyList Segments { get; } } } + diff --git a/.dotnet/src/Generated/Models/CreateTranslationResponseTask.cs b/.dotnet/src/Generated/Models/CreateTranslationResponseTask.cs index 4104c9b1c..fe24f6343 100644 --- a/.dotnet/src/Generated/Models/CreateTranslationResponseTask.cs +++ b/.dotnet/src/Generated/Models/CreateTranslationResponseTask.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public CreateTranslationResponseTask(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/DeleteAssistantFileResponse.Serialization.cs b/.dotnet/src/Generated/Models/DeleteAssistantFileResponse.Serialization.cs index 2f92de865..997b39d80 100644 --- a/.dotnet/src/Generated/Models/DeleteAssistantFileResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/DeleteAssistantFileResponse.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class DeleteAssistantFileResponse : IUtf8JsonWriteable, IJsonModel + public partial class DeleteAssistantFileResponse : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -136,13 +132,6 @@ internal static DeleteAssistantFileResponse FromResponse(PipelineResponse respon using var document = JsonDocument.Parse(response.Content); return DeserializeDeleteAssistantFileResponse(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/DeleteAssistantFileResponse.cs b/.dotnet/src/Generated/Models/DeleteAssistantFileResponse.cs index 478d175d2..2172613e2 100644 --- a/.dotnet/src/Generated/Models/DeleteAssistantFileResponse.cs +++ b/.dotnet/src/Generated/Models/DeleteAssistantFileResponse.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -52,7 +50,7 @@ public partial class DeleteAssistantFileResponse /// is null. internal DeleteAssistantFileResponse(string id, bool deleted) { - ClientUtilities.AssertNotNull(id, nameof(id)); + if (id is null) throw new ArgumentNullException(nameof(id)); Id = id; Deleted = deleted; @@ -84,3 +82,4 @@ internal DeleteAssistantFileResponse() public DeleteAssistantFileResponseObject Object { get; } = DeleteAssistantFileResponseObject.AssistantFileDeleted; } } + diff --git a/.dotnet/src/Generated/Models/DeleteAssistantFileResponseObject.cs b/.dotnet/src/Generated/Models/DeleteAssistantFileResponseObject.cs index 8f9d146db..df0a1e5a6 100644 --- a/.dotnet/src/Generated/Models/DeleteAssistantFileResponseObject.cs +++ b/.dotnet/src/Generated/Models/DeleteAssistantFileResponseObject.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public DeleteAssistantFileResponseObject(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/DeleteAssistantResponse.Serialization.cs b/.dotnet/src/Generated/Models/DeleteAssistantResponse.Serialization.cs index da95808b9..7b575f777 100644 --- a/.dotnet/src/Generated/Models/DeleteAssistantResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/DeleteAssistantResponse.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class DeleteAssistantResponse : IUtf8JsonWriteable, IJsonModel + public partial class DeleteAssistantResponse : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -136,13 +132,6 @@ internal static DeleteAssistantResponse FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeDeleteAssistantResponse(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/DeleteAssistantResponse.cs b/.dotnet/src/Generated/Models/DeleteAssistantResponse.cs index 059769a28..4ce652ffd 100644 --- a/.dotnet/src/Generated/Models/DeleteAssistantResponse.cs +++ b/.dotnet/src/Generated/Models/DeleteAssistantResponse.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -49,7 +47,7 @@ public partial class DeleteAssistantResponse /// is null. internal DeleteAssistantResponse(string id, bool deleted) { - ClientUtilities.AssertNotNull(id, nameof(id)); + if (id is null) throw new ArgumentNullException(nameof(id)); Id = id; Deleted = deleted; @@ -81,3 +79,4 @@ internal DeleteAssistantResponse() public DeleteAssistantResponseObject Object { get; } = DeleteAssistantResponseObject.AssistantDeleted; } } + diff --git a/.dotnet/src/Generated/Models/DeleteAssistantResponseObject.cs b/.dotnet/src/Generated/Models/DeleteAssistantResponseObject.cs index 7c80077ca..4ce063190 100644 --- a/.dotnet/src/Generated/Models/DeleteAssistantResponseObject.cs +++ b/.dotnet/src/Generated/Models/DeleteAssistantResponseObject.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public DeleteAssistantResponseObject(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/DeleteFileResponse.Serialization.cs b/.dotnet/src/Generated/Models/DeleteFileResponse.Serialization.cs index 6ed78cf28..21416fc19 100644 --- a/.dotnet/src/Generated/Models/DeleteFileResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/DeleteFileResponse.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class DeleteFileResponse : IUtf8JsonWriteable, IJsonModel + public partial class DeleteFileResponse : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -136,13 +132,6 @@ internal static DeleteFileResponse FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeDeleteFileResponse(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/DeleteFileResponse.cs b/.dotnet/src/Generated/Models/DeleteFileResponse.cs index 202cb2637..06777ae53 100644 --- a/.dotnet/src/Generated/Models/DeleteFileResponse.cs +++ b/.dotnet/src/Generated/Models/DeleteFileResponse.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -49,7 +47,7 @@ public partial class DeleteFileResponse /// is null. internal DeleteFileResponse(string id, bool deleted) { - ClientUtilities.AssertNotNull(id, nameof(id)); + if (id is null) throw new ArgumentNullException(nameof(id)); Id = id; Deleted = deleted; @@ -82,3 +80,4 @@ internal DeleteFileResponse() public bool Deleted { get; } } } + diff --git a/.dotnet/src/Generated/Models/DeleteFileResponseObject.cs b/.dotnet/src/Generated/Models/DeleteFileResponseObject.cs index 8b8080a6a..3f25d59f3 100644 --- a/.dotnet/src/Generated/Models/DeleteFileResponseObject.cs +++ b/.dotnet/src/Generated/Models/DeleteFileResponseObject.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public DeleteFileResponseObject(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/DeleteModelResponse.Serialization.cs b/.dotnet/src/Generated/Models/DeleteModelResponse.Serialization.cs index a16d26d8b..e1865ef13 100644 --- a/.dotnet/src/Generated/Models/DeleteModelResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/DeleteModelResponse.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class DeleteModelResponse : IUtf8JsonWriteable, IJsonModel + public partial class DeleteModelResponse : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -136,13 +132,6 @@ internal static DeleteModelResponse FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeDeleteModelResponse(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/DeleteModelResponse.cs b/.dotnet/src/Generated/Models/DeleteModelResponse.cs index fc8a2790b..96f122d77 100644 --- a/.dotnet/src/Generated/Models/DeleteModelResponse.cs +++ b/.dotnet/src/Generated/Models/DeleteModelResponse.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -49,7 +47,7 @@ public partial class DeleteModelResponse /// is null. internal DeleteModelResponse(string id, bool deleted) { - ClientUtilities.AssertNotNull(id, nameof(id)); + if (id is null) throw new ArgumentNullException(nameof(id)); Id = id; Deleted = deleted; @@ -81,3 +79,4 @@ internal DeleteModelResponse() public DeleteModelResponseObject Object { get; } = DeleteModelResponseObject.Model; } } + diff --git a/.dotnet/src/Generated/Models/DeleteModelResponseObject.cs b/.dotnet/src/Generated/Models/DeleteModelResponseObject.cs index ec25f197b..25f957c69 100644 --- a/.dotnet/src/Generated/Models/DeleteModelResponseObject.cs +++ b/.dotnet/src/Generated/Models/DeleteModelResponseObject.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public DeleteModelResponseObject(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/DeleteThreadResponse.Serialization.cs b/.dotnet/src/Generated/Models/DeleteThreadResponse.Serialization.cs index f287add9c..b6c04e508 100644 --- a/.dotnet/src/Generated/Models/DeleteThreadResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/DeleteThreadResponse.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class DeleteThreadResponse : IUtf8JsonWriteable, IJsonModel + public partial class DeleteThreadResponse : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -136,13 +132,6 @@ internal static DeleteThreadResponse FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeDeleteThreadResponse(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/DeleteThreadResponse.cs b/.dotnet/src/Generated/Models/DeleteThreadResponse.cs index 740de89a5..368b9ff80 100644 --- a/.dotnet/src/Generated/Models/DeleteThreadResponse.cs +++ b/.dotnet/src/Generated/Models/DeleteThreadResponse.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -49,7 +47,7 @@ public partial class DeleteThreadResponse /// is null. internal DeleteThreadResponse(string id, bool deleted) { - ClientUtilities.AssertNotNull(id, nameof(id)); + if (id is null) throw new ArgumentNullException(nameof(id)); Id = id; Deleted = deleted; @@ -81,3 +79,4 @@ internal DeleteThreadResponse() public DeleteThreadResponseObject Object { get; } = DeleteThreadResponseObject.ThreadDeleted; } } + diff --git a/.dotnet/src/Generated/Models/DeleteThreadResponseObject.cs b/.dotnet/src/Generated/Models/DeleteThreadResponseObject.cs index 87838dd02..7e4b34167 100644 --- a/.dotnet/src/Generated/Models/DeleteThreadResponseObject.cs +++ b/.dotnet/src/Generated/Models/DeleteThreadResponseObject.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public DeleteThreadResponseObject(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/Embedding.Serialization.cs b/.dotnet/src/Generated/Models/Embedding.Serialization.cs index cdb14b425..b3506e51c 100644 --- a/.dotnet/src/Generated/Models/Embedding.Serialization.cs +++ b/.dotnet/src/Generated/Models/Embedding.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class Embedding : IUtf8JsonWriteable, IJsonModel + public partial class Embedding : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -143,13 +139,6 @@ internal static Embedding FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeEmbedding(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/Embedding.cs b/.dotnet/src/Generated/Models/Embedding.cs index 40eba7486..ecabc7df9 100644 --- a/.dotnet/src/Generated/Models/Embedding.cs +++ b/.dotnet/src/Generated/Models/Embedding.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -52,7 +50,7 @@ public partial class Embedding /// is null. internal Embedding(long index, BinaryData embeddingProperty) { - ClientUtilities.AssertNotNull(embeddingProperty, nameof(embeddingProperty)); + if (embeddingProperty is null) throw new ArgumentNullException(nameof(embeddingProperty)); Index = index; EmbeddingProperty = embeddingProperty; @@ -128,3 +126,4 @@ internal Embedding() public EmbeddingObject Object { get; } = EmbeddingObject.Embedding; } } + diff --git a/.dotnet/src/Generated/Models/EmbeddingObject.cs b/.dotnet/src/Generated/Models/EmbeddingObject.cs index 1a6df67a6..2cc2f012c 100644 --- a/.dotnet/src/Generated/Models/EmbeddingObject.cs +++ b/.dotnet/src/Generated/Models/EmbeddingObject.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public EmbeddingObject(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/FineTuningJob.Serialization.cs b/.dotnet/src/Generated/Models/FineTuningJob.Serialization.cs index a7b9c02b7..930aa5fc5 100644 --- a/.dotnet/src/Generated/Models/FineTuningJob.Serialization.cs +++ b/.dotnet/src/Generated/Models/FineTuningJob.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class FineTuningJob : IUtf8JsonWriteable, IJsonModel + public partial class FineTuningJob : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -294,13 +290,6 @@ internal static FineTuningJob FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeFineTuningJob(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/FineTuningJob.cs b/.dotnet/src/Generated/Models/FineTuningJob.cs index 0203658aa..773d90651 100644 --- a/.dotnet/src/Generated/Models/FineTuningJob.cs +++ b/.dotnet/src/Generated/Models/FineTuningJob.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; using System.Linq; @@ -88,12 +86,12 @@ public partial class FineTuningJob /// , , , , or is null. internal FineTuningJob(string id, DateTimeOffset createdAt, DateTimeOffset? finishedAt, string model, string fineTunedModel, string organizationId, FineTuningJobStatus status, FineTuningJobHyperparameters hyperparameters, string trainingFile, string validationFile, IEnumerable resultFiles, long? trainedTokens, FineTuningJobError error) { - ClientUtilities.AssertNotNull(id, nameof(id)); - ClientUtilities.AssertNotNull(model, nameof(model)); - ClientUtilities.AssertNotNull(organizationId, nameof(organizationId)); - ClientUtilities.AssertNotNull(hyperparameters, nameof(hyperparameters)); - ClientUtilities.AssertNotNull(trainingFile, nameof(trainingFile)); - ClientUtilities.AssertNotNull(resultFiles, nameof(resultFiles)); + if (id is null) throw new ArgumentNullException(nameof(id)); + if (model is null) throw new ArgumentNullException(nameof(model)); + if (organizationId is null) throw new ArgumentNullException(nameof(organizationId)); + if (hyperparameters is null) throw new ArgumentNullException(nameof(hyperparameters)); + if (trainingFile is null) throw new ArgumentNullException(nameof(trainingFile)); + if (resultFiles is null) throw new ArgumentNullException(nameof(resultFiles)); Id = id; CreatedAt = createdAt; @@ -235,3 +233,4 @@ internal FineTuningJob() public FineTuningJobError Error { get; } } } + diff --git a/.dotnet/src/Generated/Models/FineTuningJobError.Serialization.cs b/.dotnet/src/Generated/Models/FineTuningJobError.Serialization.cs index e586868e7..6bfe0c908 100644 --- a/.dotnet/src/Generated/Models/FineTuningJobError.Serialization.cs +++ b/.dotnet/src/Generated/Models/FineTuningJobError.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class FineTuningJobError : IUtf8JsonWriteable, IJsonModel + public partial class FineTuningJobError : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -157,13 +153,6 @@ internal static FineTuningJobError FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeFineTuningJobError(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/FineTuningJobError.cs b/.dotnet/src/Generated/Models/FineTuningJobError.cs index 9b5b27e34..98268009c 100644 --- a/.dotnet/src/Generated/Models/FineTuningJobError.cs +++ b/.dotnet/src/Generated/Models/FineTuningJobError.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.Collections.Generic; @@ -74,3 +72,4 @@ internal FineTuningJobError(string message, string code, string param, IDictiona public string Param { get; } } } + diff --git a/.dotnet/src/Generated/Models/FineTuningJobEvent.Serialization.cs b/.dotnet/src/Generated/Models/FineTuningJobEvent.Serialization.cs index 20bb7a0fa..f61e577d3 100644 --- a/.dotnet/src/Generated/Models/FineTuningJobEvent.Serialization.cs +++ b/.dotnet/src/Generated/Models/FineTuningJobEvent.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class FineTuningJobEvent : IUtf8JsonWriteable, IJsonModel + public partial class FineTuningJobEvent : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -152,13 +148,6 @@ internal static FineTuningJobEvent FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeFineTuningJobEvent(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/FineTuningJobEvent.cs b/.dotnet/src/Generated/Models/FineTuningJobEvent.cs index a65270a76..1ed3dacf3 100644 --- a/.dotnet/src/Generated/Models/FineTuningJobEvent.cs +++ b/.dotnet/src/Generated/Models/FineTuningJobEvent.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -52,9 +50,9 @@ public partial class FineTuningJobEvent /// , or is null. internal FineTuningJobEvent(string id, string @object, DateTimeOffset createdAt, FineTuningJobEventLevel level, string message) { - ClientUtilities.AssertNotNull(id, nameof(id)); - ClientUtilities.AssertNotNull(@object, nameof(@object)); - ClientUtilities.AssertNotNull(message, nameof(message)); + if (id is null) throw new ArgumentNullException(nameof(id)); + if (@object is null) throw new ArgumentNullException(nameof(@object)); + if (message is null) throw new ArgumentNullException(nameof(message)); Id = id; Object = @object; @@ -97,3 +95,4 @@ internal FineTuningJobEvent() public string Message { get; } } } + diff --git a/.dotnet/src/Generated/Models/FineTuningJobEventLevel.cs b/.dotnet/src/Generated/Models/FineTuningJobEventLevel.cs index 7ac7da4c5..1e0ae3a85 100644 --- a/.dotnet/src/Generated/Models/FineTuningJobEventLevel.cs +++ b/.dotnet/src/Generated/Models/FineTuningJobEventLevel.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -49,3 +47,4 @@ public FineTuningJobEventLevel(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/FineTuningJobHyperparameters.Serialization.cs b/.dotnet/src/Generated/Models/FineTuningJobHyperparameters.Serialization.cs index 8e4802d8a..3b4a5c3aa 100644 --- a/.dotnet/src/Generated/Models/FineTuningJobHyperparameters.Serialization.cs +++ b/.dotnet/src/Generated/Models/FineTuningJobHyperparameters.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class FineTuningJobHyperparameters : IUtf8JsonWriteable, IJsonModel + public partial class FineTuningJobHyperparameters : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -134,13 +130,6 @@ internal static FineTuningJobHyperparameters FromResponse(PipelineResponse respo using var document = JsonDocument.Parse(response.Content); return DeserializeFineTuningJobHyperparameters(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/FineTuningJobHyperparameters.cs b/.dotnet/src/Generated/Models/FineTuningJobHyperparameters.cs index efcb91d25..830aef443 100644 --- a/.dotnet/src/Generated/Models/FineTuningJobHyperparameters.cs +++ b/.dotnet/src/Generated/Models/FineTuningJobHyperparameters.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.Collections.Generic; @@ -110,3 +108,4 @@ internal FineTuningJobHyperparameters(BinaryData nEpochs, IDictionary -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public FineTuningJobObject(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/FineTuningJobStatus.cs b/.dotnet/src/Generated/Models/FineTuningJobStatus.cs index 23924df4f..f9715b5b6 100644 --- a/.dotnet/src/Generated/Models/FineTuningJobStatus.cs +++ b/.dotnet/src/Generated/Models/FineTuningJobStatus.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -58,3 +56,4 @@ public FineTuningJobStatus(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/FunctionObject.Serialization.cs b/.dotnet/src/Generated/Models/FunctionObject.Serialization.cs index 99d391fea..fec24f853 100644 --- a/.dotnet/src/Generated/Models/FunctionObject.Serialization.cs +++ b/.dotnet/src/Generated/Models/FunctionObject.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class FunctionObject : IUtf8JsonWriteable, IJsonModel + public partial class FunctionObject : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -146,13 +142,6 @@ internal static FunctionObject FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeFunctionObject(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/FunctionObject.cs b/.dotnet/src/Generated/Models/FunctionObject.cs index 00e9c8cdb..ddd65ebfa 100644 --- a/.dotnet/src/Generated/Models/FunctionObject.cs +++ b/.dotnet/src/Generated/Models/FunctionObject.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -51,7 +49,7 @@ public partial class FunctionObject /// is null. public FunctionObject(string name) { - ClientUtilities.AssertNotNull(name, nameof(name)); + if (name is null) throw new ArgumentNullException(nameof(name)); Name = name; } @@ -94,3 +92,4 @@ internal FunctionObject() public FunctionParameters Parameters { get; set; } } } + diff --git a/.dotnet/src/Generated/Models/FunctionParameters.Serialization.cs b/.dotnet/src/Generated/Models/FunctionParameters.Serialization.cs index e6f167ed4..3d473765a 100644 --- a/.dotnet/src/Generated/Models/FunctionParameters.Serialization.cs +++ b/.dotnet/src/Generated/Models/FunctionParameters.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class FunctionParameters : IUtf8JsonWriteable, IJsonModel + public partial class FunctionParameters : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -106,13 +102,6 @@ internal static FunctionParameters FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeFunctionParameters(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/FunctionParameters.cs b/.dotnet/src/Generated/Models/FunctionParameters.cs index ae3100e0d..8490bdc5f 100644 --- a/.dotnet/src/Generated/Models/FunctionParameters.cs +++ b/.dotnet/src/Generated/Models/FunctionParameters.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -63,3 +61,4 @@ internal FunctionParameters(IDictionary additionalProperties public IDictionary AdditionalProperties { get; } } } + diff --git a/.dotnet/src/Generated/Models/Image.Serialization.cs b/.dotnet/src/Generated/Models/Image.Serialization.cs index 209416ec1..dc41f82a8 100644 --- a/.dotnet/src/Generated/Models/Image.Serialization.cs +++ b/.dotnet/src/Generated/Models/Image.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class Image : IUtf8JsonWriteable, IJsonModel + public partial class Image : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -153,13 +149,6 @@ internal static Image FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeImage(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/Image.cs b/.dotnet/src/Generated/Models/Image.cs index a813097a2..7d6c304f2 100644 --- a/.dotnet/src/Generated/Models/Image.cs +++ b/.dotnet/src/Generated/Models/Image.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.Collections.Generic; @@ -83,3 +81,4 @@ internal Image(BinaryData b64Json, Uri url, string revisedPrompt, IDictionary -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class ImagesResponse : IUtf8JsonWriteable, IJsonModel + public partial class ImagesResponse : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -138,13 +134,6 @@ internal static ImagesResponse FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeImagesResponse(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/ImagesResponse.cs b/.dotnet/src/Generated/Models/ImagesResponse.cs index d3c377be9..44b649f3f 100644 --- a/.dotnet/src/Generated/Models/ImagesResponse.cs +++ b/.dotnet/src/Generated/Models/ImagesResponse.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; using System.Linq; @@ -50,7 +48,7 @@ public partial class ImagesResponse /// is null. internal ImagesResponse(DateTimeOffset created, IEnumerable data) { - ClientUtilities.AssertNotNull(data, nameof(data)); + if (data is null) throw new ArgumentNullException(nameof(data)); Created = created; Data = data.ToList(); @@ -78,3 +76,4 @@ internal ImagesResponse() public IReadOnlyList Data { get; } } } + diff --git a/.dotnet/src/Generated/Models/ListAssistantFilesResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListAssistantFilesResponse.Serialization.cs index d1bb15a51..6e6f9758b 100644 --- a/.dotnet/src/Generated/Models/ListAssistantFilesResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/ListAssistantFilesResponse.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class ListAssistantFilesResponse : IUtf8JsonWriteable, IJsonModel + public partial class ListAssistantFilesResponse : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -162,13 +158,6 @@ internal static ListAssistantFilesResponse FromResponse(PipelineResponse respons using var document = JsonDocument.Parse(response.Content); return DeserializeListAssistantFilesResponse(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/ListAssistantFilesResponse.cs b/.dotnet/src/Generated/Models/ListAssistantFilesResponse.cs index d155bf3e6..4537942a4 100644 --- a/.dotnet/src/Generated/Models/ListAssistantFilesResponse.cs +++ b/.dotnet/src/Generated/Models/ListAssistantFilesResponse.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; using System.Linq; @@ -52,9 +50,9 @@ public partial class ListAssistantFilesResponse /// , or is null. internal ListAssistantFilesResponse(IEnumerable data, string firstId, string lastId, bool hasMore) { - ClientUtilities.AssertNotNull(data, nameof(data)); - ClientUtilities.AssertNotNull(firstId, nameof(firstId)); - ClientUtilities.AssertNotNull(lastId, nameof(lastId)); + if (data is null) throw new ArgumentNullException(nameof(data)); + if (firstId is null) throw new ArgumentNullException(nameof(firstId)); + if (lastId is null) throw new ArgumentNullException(nameof(lastId)); Data = data.ToList(); FirstId = firstId; @@ -97,3 +95,4 @@ internal ListAssistantFilesResponse() public bool HasMore { get; } } } + diff --git a/.dotnet/src/Generated/Models/ListAssistantFilesResponseObject.cs b/.dotnet/src/Generated/Models/ListAssistantFilesResponseObject.cs index 5038ede26..4b8e72c7e 100644 --- a/.dotnet/src/Generated/Models/ListAssistantFilesResponseObject.cs +++ b/.dotnet/src/Generated/Models/ListAssistantFilesResponseObject.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public ListAssistantFilesResponseObject(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/ListAssistantsResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListAssistantsResponse.Serialization.cs index f9c0b12b6..573a35240 100644 --- a/.dotnet/src/Generated/Models/ListAssistantsResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/ListAssistantsResponse.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class ListAssistantsResponse : IUtf8JsonWriteable, IJsonModel + public partial class ListAssistantsResponse : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -162,13 +158,6 @@ internal static ListAssistantsResponse FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeListAssistantsResponse(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/ListAssistantsResponse.cs b/.dotnet/src/Generated/Models/ListAssistantsResponse.cs index 002b27f3f..bf553f319 100644 --- a/.dotnet/src/Generated/Models/ListAssistantsResponse.cs +++ b/.dotnet/src/Generated/Models/ListAssistantsResponse.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; using System.Linq; @@ -52,9 +50,9 @@ public partial class ListAssistantsResponse /// , or is null. internal ListAssistantsResponse(IEnumerable data, string firstId, string lastId, bool hasMore) { - ClientUtilities.AssertNotNull(data, nameof(data)); - ClientUtilities.AssertNotNull(firstId, nameof(firstId)); - ClientUtilities.AssertNotNull(lastId, nameof(lastId)); + if (data is null) throw new ArgumentNullException(nameof(data)); + if (firstId is null) throw new ArgumentNullException(nameof(firstId)); + if (lastId is null) throw new ArgumentNullException(nameof(lastId)); Data = data.ToList(); FirstId = firstId; @@ -97,3 +95,4 @@ internal ListAssistantsResponse() public bool HasMore { get; } } } + diff --git a/.dotnet/src/Generated/Models/ListAssistantsResponseObject.cs b/.dotnet/src/Generated/Models/ListAssistantsResponseObject.cs index 5bbc18a2b..207fe09fa 100644 --- a/.dotnet/src/Generated/Models/ListAssistantsResponseObject.cs +++ b/.dotnet/src/Generated/Models/ListAssistantsResponseObject.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public ListAssistantsResponseObject(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/ListFilesResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListFilesResponse.Serialization.cs index a580904af..24134e771 100644 --- a/.dotnet/src/Generated/Models/ListFilesResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/ListFilesResponse.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class ListFilesResponse : IUtf8JsonWriteable, IJsonModel + public partial class ListFilesResponse : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -138,13 +134,6 @@ internal static ListFilesResponse FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeListFilesResponse(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/ListFilesResponse.cs b/.dotnet/src/Generated/Models/ListFilesResponse.cs index 0738efe8d..7447294ac 100644 --- a/.dotnet/src/Generated/Models/ListFilesResponse.cs +++ b/.dotnet/src/Generated/Models/ListFilesResponse.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; using System.Linq; @@ -49,7 +47,7 @@ public partial class ListFilesResponse /// is null. internal ListFilesResponse(IEnumerable data) { - ClientUtilities.AssertNotNull(data, nameof(data)); + if (data is null) throw new ArgumentNullException(nameof(data)); Data = data.ToList(); } @@ -76,3 +74,4 @@ internal ListFilesResponse() public ListFilesResponseObject Object { get; } = ListFilesResponseObject.List; } } + diff --git a/.dotnet/src/Generated/Models/ListFilesResponseObject.cs b/.dotnet/src/Generated/Models/ListFilesResponseObject.cs index ef68e3dda..1c8be15c3 100644 --- a/.dotnet/src/Generated/Models/ListFilesResponseObject.cs +++ b/.dotnet/src/Generated/Models/ListFilesResponseObject.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public ListFilesResponseObject(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/ListFineTuningJobEventsResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListFineTuningJobEventsResponse.Serialization.cs index b141b423a..588d06c98 100644 --- a/.dotnet/src/Generated/Models/ListFineTuningJobEventsResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/ListFineTuningJobEventsResponse.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class ListFineTuningJobEventsResponse : IUtf8JsonWriteable, IJsonModel + public partial class ListFineTuningJobEventsResponse : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -138,13 +134,6 @@ internal static ListFineTuningJobEventsResponse FromResponse(PipelineResponse re using var document = JsonDocument.Parse(response.Content); return DeserializeListFineTuningJobEventsResponse(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/ListFineTuningJobEventsResponse.cs b/.dotnet/src/Generated/Models/ListFineTuningJobEventsResponse.cs index 94573da09..b79f9a929 100644 --- a/.dotnet/src/Generated/Models/ListFineTuningJobEventsResponse.cs +++ b/.dotnet/src/Generated/Models/ListFineTuningJobEventsResponse.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; using System.Linq; @@ -50,8 +48,8 @@ public partial class ListFineTuningJobEventsResponse /// or is null. internal ListFineTuningJobEventsResponse(string @object, IEnumerable data) { - ClientUtilities.AssertNotNull(@object, nameof(@object)); - ClientUtilities.AssertNotNull(data, nameof(data)); + if (@object is null) throw new ArgumentNullException(nameof(@object)); + if (data is null) throw new ArgumentNullException(nameof(data)); Object = @object; Data = data.ToList(); @@ -79,3 +77,4 @@ internal ListFineTuningJobEventsResponse() public IReadOnlyList Data { get; } } } + diff --git a/.dotnet/src/Generated/Models/ListMessageFilesResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListMessageFilesResponse.Serialization.cs index 516a772d1..c8585eaf5 100644 --- a/.dotnet/src/Generated/Models/ListMessageFilesResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/ListMessageFilesResponse.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class ListMessageFilesResponse : IUtf8JsonWriteable, IJsonModel + public partial class ListMessageFilesResponse : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -162,13 +158,6 @@ internal static ListMessageFilesResponse FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeListMessageFilesResponse(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/ListMessageFilesResponse.cs b/.dotnet/src/Generated/Models/ListMessageFilesResponse.cs index e4476c13a..be35d9455 100644 --- a/.dotnet/src/Generated/Models/ListMessageFilesResponse.cs +++ b/.dotnet/src/Generated/Models/ListMessageFilesResponse.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; using System.Linq; @@ -52,9 +50,9 @@ public partial class ListMessageFilesResponse /// , or is null. internal ListMessageFilesResponse(IEnumerable data, string firstId, string lastId, bool hasMore) { - ClientUtilities.AssertNotNull(data, nameof(data)); - ClientUtilities.AssertNotNull(firstId, nameof(firstId)); - ClientUtilities.AssertNotNull(lastId, nameof(lastId)); + if (data is null) throw new ArgumentNullException(nameof(data)); + if (firstId is null) throw new ArgumentNullException(nameof(firstId)); + if (lastId is null) throw new ArgumentNullException(nameof(lastId)); Data = data.ToList(); FirstId = firstId; @@ -97,3 +95,4 @@ internal ListMessageFilesResponse() public bool HasMore { get; } } } + diff --git a/.dotnet/src/Generated/Models/ListMessageFilesResponseObject.cs b/.dotnet/src/Generated/Models/ListMessageFilesResponseObject.cs index da8016c77..01cd0514a 100644 --- a/.dotnet/src/Generated/Models/ListMessageFilesResponseObject.cs +++ b/.dotnet/src/Generated/Models/ListMessageFilesResponseObject.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public ListMessageFilesResponseObject(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/ListMessagesResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListMessagesResponse.Serialization.cs index b68c46995..fd1dad634 100644 --- a/.dotnet/src/Generated/Models/ListMessagesResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/ListMessagesResponse.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class ListMessagesResponse : IUtf8JsonWriteable, IJsonModel + public partial class ListMessagesResponse : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -162,13 +158,6 @@ internal static ListMessagesResponse FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeListMessagesResponse(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/ListMessagesResponse.cs b/.dotnet/src/Generated/Models/ListMessagesResponse.cs index 41193e112..56ef1f2ec 100644 --- a/.dotnet/src/Generated/Models/ListMessagesResponse.cs +++ b/.dotnet/src/Generated/Models/ListMessagesResponse.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; using System.Linq; @@ -52,9 +50,9 @@ public partial class ListMessagesResponse /// , or is null. internal ListMessagesResponse(IEnumerable data, string firstId, string lastId, bool hasMore) { - ClientUtilities.AssertNotNull(data, nameof(data)); - ClientUtilities.AssertNotNull(firstId, nameof(firstId)); - ClientUtilities.AssertNotNull(lastId, nameof(lastId)); + if (data is null) throw new ArgumentNullException(nameof(data)); + if (firstId is null) throw new ArgumentNullException(nameof(firstId)); + if (lastId is null) throw new ArgumentNullException(nameof(lastId)); Data = data.ToList(); FirstId = firstId; @@ -97,3 +95,4 @@ internal ListMessagesResponse() public bool HasMore { get; } } } + diff --git a/.dotnet/src/Generated/Models/ListMessagesResponseObject.cs b/.dotnet/src/Generated/Models/ListMessagesResponseObject.cs index ff1303bf4..01ad83164 100644 --- a/.dotnet/src/Generated/Models/ListMessagesResponseObject.cs +++ b/.dotnet/src/Generated/Models/ListMessagesResponseObject.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public ListMessagesResponseObject(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/ListModelsResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListModelsResponse.Serialization.cs index 8a6dbd3a7..36da18e24 100644 --- a/.dotnet/src/Generated/Models/ListModelsResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/ListModelsResponse.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class ListModelsResponse : IUtf8JsonWriteable, IJsonModel + public partial class ListModelsResponse : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -138,13 +134,6 @@ internal static ListModelsResponse FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeListModelsResponse(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/ListModelsResponse.cs b/.dotnet/src/Generated/Models/ListModelsResponse.cs index bc2ddf1a7..ee8f8e350 100644 --- a/.dotnet/src/Generated/Models/ListModelsResponse.cs +++ b/.dotnet/src/Generated/Models/ListModelsResponse.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; using System.Linq; @@ -49,7 +47,7 @@ public partial class ListModelsResponse /// is null. internal ListModelsResponse(IEnumerable data) { - ClientUtilities.AssertNotNull(data, nameof(data)); + if (data is null) throw new ArgumentNullException(nameof(data)); Data = data.ToList(); } @@ -77,3 +75,4 @@ internal ListModelsResponse() public IReadOnlyList Data { get; } } } + diff --git a/.dotnet/src/Generated/Models/ListModelsResponseObject.cs b/.dotnet/src/Generated/Models/ListModelsResponseObject.cs index bb0127059..081da58be 100644 --- a/.dotnet/src/Generated/Models/ListModelsResponseObject.cs +++ b/.dotnet/src/Generated/Models/ListModelsResponseObject.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public ListModelsResponseObject(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/ListOrder.cs b/.dotnet/src/Generated/Models/ListOrder.cs index 0343ac0f3..95b097763 100644 --- a/.dotnet/src/Generated/Models/ListOrder.cs +++ b/.dotnet/src/Generated/Models/ListOrder.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -45,3 +43,4 @@ public ListOrder(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/ListPaginatedFineTuningJobsResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListPaginatedFineTuningJobsResponse.Serialization.cs index dd8425fb7..16a324034 100644 --- a/.dotnet/src/Generated/Models/ListPaginatedFineTuningJobsResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/ListPaginatedFineTuningJobsResponse.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class ListPaginatedFineTuningJobsResponse : IUtf8JsonWriteable, IJsonModel + public partial class ListPaginatedFineTuningJobsResponse : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -146,13 +142,6 @@ internal static ListPaginatedFineTuningJobsResponse FromResponse(PipelineRespons using var document = JsonDocument.Parse(response.Content); return DeserializeListPaginatedFineTuningJobsResponse(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/ListPaginatedFineTuningJobsResponse.cs b/.dotnet/src/Generated/Models/ListPaginatedFineTuningJobsResponse.cs index db1bf28f5..3bb6a7d15 100644 --- a/.dotnet/src/Generated/Models/ListPaginatedFineTuningJobsResponse.cs +++ b/.dotnet/src/Generated/Models/ListPaginatedFineTuningJobsResponse.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; using System.Linq; @@ -51,8 +49,8 @@ public partial class ListPaginatedFineTuningJobsResponse /// or is null. internal ListPaginatedFineTuningJobsResponse(string @object, IEnumerable data, bool hasMore) { - ClientUtilities.AssertNotNull(@object, nameof(@object)); - ClientUtilities.AssertNotNull(data, nameof(data)); + if (@object is null) throw new ArgumentNullException(nameof(@object)); + if (data is null) throw new ArgumentNullException(nameof(data)); Object = @object; Data = data.ToList(); @@ -85,3 +83,4 @@ internal ListPaginatedFineTuningJobsResponse() public bool HasMore { get; } } } + diff --git a/.dotnet/src/Generated/Models/ListRunStepsResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListRunStepsResponse.Serialization.cs index 3aa87a8a1..a91044a98 100644 --- a/.dotnet/src/Generated/Models/ListRunStepsResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/ListRunStepsResponse.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class ListRunStepsResponse : IUtf8JsonWriteable, IJsonModel + public partial class ListRunStepsResponse : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -162,13 +158,6 @@ internal static ListRunStepsResponse FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeListRunStepsResponse(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/ListRunStepsResponse.cs b/.dotnet/src/Generated/Models/ListRunStepsResponse.cs index 1df4309d3..641a91e04 100644 --- a/.dotnet/src/Generated/Models/ListRunStepsResponse.cs +++ b/.dotnet/src/Generated/Models/ListRunStepsResponse.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; using System.Linq; @@ -52,9 +50,9 @@ public partial class ListRunStepsResponse /// , or is null. internal ListRunStepsResponse(IEnumerable data, string firstId, string lastId, bool hasMore) { - ClientUtilities.AssertNotNull(data, nameof(data)); - ClientUtilities.AssertNotNull(firstId, nameof(firstId)); - ClientUtilities.AssertNotNull(lastId, nameof(lastId)); + if (data is null) throw new ArgumentNullException(nameof(data)); + if (firstId is null) throw new ArgumentNullException(nameof(firstId)); + if (lastId is null) throw new ArgumentNullException(nameof(lastId)); Data = data.ToList(); FirstId = firstId; @@ -97,3 +95,4 @@ internal ListRunStepsResponse() public bool HasMore { get; } } } + diff --git a/.dotnet/src/Generated/Models/ListRunStepsResponseObject.cs b/.dotnet/src/Generated/Models/ListRunStepsResponseObject.cs index 043709495..2b613b3e4 100644 --- a/.dotnet/src/Generated/Models/ListRunStepsResponseObject.cs +++ b/.dotnet/src/Generated/Models/ListRunStepsResponseObject.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public ListRunStepsResponseObject(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/ListRunsResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListRunsResponse.Serialization.cs index d0705cd68..a934e890e 100644 --- a/.dotnet/src/Generated/Models/ListRunsResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/ListRunsResponse.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class ListRunsResponse : IUtf8JsonWriteable, IJsonModel + public partial class ListRunsResponse : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -162,13 +158,6 @@ internal static ListRunsResponse FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeListRunsResponse(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/ListRunsResponse.cs b/.dotnet/src/Generated/Models/ListRunsResponse.cs index efd6de920..f3fb538c7 100644 --- a/.dotnet/src/Generated/Models/ListRunsResponse.cs +++ b/.dotnet/src/Generated/Models/ListRunsResponse.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; using System.Linq; @@ -52,9 +50,9 @@ public partial class ListRunsResponse /// , or is null. internal ListRunsResponse(IEnumerable data, string firstId, string lastId, bool hasMore) { - ClientUtilities.AssertNotNull(data, nameof(data)); - ClientUtilities.AssertNotNull(firstId, nameof(firstId)); - ClientUtilities.AssertNotNull(lastId, nameof(lastId)); + if (data is null) throw new ArgumentNullException(nameof(data)); + if (firstId is null) throw new ArgumentNullException(nameof(firstId)); + if (lastId is null) throw new ArgumentNullException(nameof(lastId)); Data = data.ToList(); FirstId = firstId; @@ -97,3 +95,4 @@ internal ListRunsResponse() public bool HasMore { get; } } } + diff --git a/.dotnet/src/Generated/Models/ListRunsResponseObject.cs b/.dotnet/src/Generated/Models/ListRunsResponseObject.cs index 548feb288..f8a80ed2d 100644 --- a/.dotnet/src/Generated/Models/ListRunsResponseObject.cs +++ b/.dotnet/src/Generated/Models/ListRunsResponseObject.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public ListRunsResponseObject(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/MessageFileObject.Serialization.cs b/.dotnet/src/Generated/Models/MessageFileObject.Serialization.cs index 01d813ec5..62d4e0d8e 100644 --- a/.dotnet/src/Generated/Models/MessageFileObject.Serialization.cs +++ b/.dotnet/src/Generated/Models/MessageFileObject.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class MessageFileObject : IUtf8JsonWriteable, IJsonModel + public partial class MessageFileObject : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -144,13 +140,6 @@ internal static MessageFileObject FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeMessageFileObject(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/MessageFileObject.cs b/.dotnet/src/Generated/Models/MessageFileObject.cs index 0afed8c95..1f4f67e75 100644 --- a/.dotnet/src/Generated/Models/MessageFileObject.cs +++ b/.dotnet/src/Generated/Models/MessageFileObject.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -50,8 +48,8 @@ public partial class MessageFileObject /// or is null. internal MessageFileObject(string id, DateTimeOffset createdAt, string messageId) { - ClientUtilities.AssertNotNull(id, nameof(id)); - ClientUtilities.AssertNotNull(messageId, nameof(messageId)); + if (id is null) throw new ArgumentNullException(nameof(id)); + if (messageId is null) throw new ArgumentNullException(nameof(messageId)); Id = id; CreatedAt = createdAt; @@ -89,3 +87,4 @@ internal MessageFileObject() public string MessageId { get; } } } + diff --git a/.dotnet/src/Generated/Models/MessageFileObjectObject.cs b/.dotnet/src/Generated/Models/MessageFileObjectObject.cs index 55f4cc6f8..f1185adf5 100644 --- a/.dotnet/src/Generated/Models/MessageFileObjectObject.cs +++ b/.dotnet/src/Generated/Models/MessageFileObjectObject.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public MessageFileObjectObject(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/MessageObject.Serialization.cs b/.dotnet/src/Generated/Models/MessageObject.Serialization.cs index 5f9db4673..bc6f566b7 100644 --- a/.dotnet/src/Generated/Models/MessageObject.Serialization.cs +++ b/.dotnet/src/Generated/Models/MessageObject.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class MessageObject : IUtf8JsonWriteable, IJsonModel + public partial class MessageObject : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -278,13 +274,6 @@ internal static MessageObject FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeMessageObject(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/MessageObject.cs b/.dotnet/src/Generated/Models/MessageObject.cs index 54ea98399..0ed781f8d 100644 --- a/.dotnet/src/Generated/Models/MessageObject.cs +++ b/.dotnet/src/Generated/Models/MessageObject.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; using System.Linq; @@ -71,10 +69,10 @@ public partial class MessageObject /// , , or is null. internal MessageObject(string id, DateTimeOffset createdAt, string threadId, MessageObjectRole role, IEnumerable content, string assistantId, string runId, IEnumerable fileIds, IReadOnlyDictionary metadata) { - ClientUtilities.AssertNotNull(id, nameof(id)); - ClientUtilities.AssertNotNull(threadId, nameof(threadId)); - ClientUtilities.AssertNotNull(content, nameof(content)); - ClientUtilities.AssertNotNull(fileIds, nameof(fileIds)); + if (id is null) throw new ArgumentNullException(nameof(id)); + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (content is null) throw new ArgumentNullException(nameof(content)); + if (fileIds is null) throw new ArgumentNullException(nameof(fileIds)); Id = id; CreatedAt = createdAt; @@ -199,3 +197,4 @@ internal MessageObject() public IReadOnlyDictionary Metadata { get; } } } + diff --git a/.dotnet/src/Generated/Models/MessageObjectObject.cs b/.dotnet/src/Generated/Models/MessageObjectObject.cs index 5f835f436..978ab0a6a 100644 --- a/.dotnet/src/Generated/Models/MessageObjectObject.cs +++ b/.dotnet/src/Generated/Models/MessageObjectObject.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public MessageObjectObject(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/MessageObjectRole.cs b/.dotnet/src/Generated/Models/MessageObjectRole.cs index a62512cd2..53ee3cafe 100644 --- a/.dotnet/src/Generated/Models/MessageObjectRole.cs +++ b/.dotnet/src/Generated/Models/MessageObjectRole.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -46,3 +44,4 @@ public MessageObjectRole(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/Model.Serialization.cs b/.dotnet/src/Generated/Models/Model.Serialization.cs index b3a797b7b..4b0cb8d8a 100644 --- a/.dotnet/src/Generated/Models/Model.Serialization.cs +++ b/.dotnet/src/Generated/Models/Model.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class Model : IUtf8JsonWriteable, IJsonModel + public partial class Model : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -144,13 +140,6 @@ internal static Model FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeModel(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/Model.cs b/.dotnet/src/Generated/Models/Model.cs index 70edf9a14..66fe38632 100644 --- a/.dotnet/src/Generated/Models/Model.cs +++ b/.dotnet/src/Generated/Models/Model.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -50,8 +48,8 @@ public partial class Model /// or is null. internal Model(string id, DateTimeOffset created, string ownedBy) { - ClientUtilities.AssertNotNull(id, nameof(id)); - ClientUtilities.AssertNotNull(ownedBy, nameof(ownedBy)); + if (id is null) throw new ArgumentNullException(nameof(id)); + if (ownedBy is null) throw new ArgumentNullException(nameof(ownedBy)); Id = id; Created = created; @@ -89,3 +87,4 @@ internal Model() public string OwnedBy { get; } } } + diff --git a/.dotnet/src/Generated/Models/ModelObject.cs b/.dotnet/src/Generated/Models/ModelObject.cs index 063012b5a..70fd47f04 100644 --- a/.dotnet/src/Generated/Models/ModelObject.cs +++ b/.dotnet/src/Generated/Models/ModelObject.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public ModelObject(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/ModifyAssistantRequest.Serialization.cs b/.dotnet/src/Generated/Models/ModifyAssistantRequest.Serialization.cs index 7e9027ef1..7635f00f3 100644 --- a/.dotnet/src/Generated/Models/ModifyAssistantRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/ModifyAssistantRequest.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class ModifyAssistantRequest : IUtf8JsonWriteable, IJsonModel + public partial class ModifyAssistantRequest : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -294,13 +290,6 @@ internal static ModifyAssistantRequest FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeModifyAssistantRequest(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/ModifyAssistantRequest.cs b/.dotnet/src/Generated/Models/ModifyAssistantRequest.cs index abc79d17a..9882b6462 100644 --- a/.dotnet/src/Generated/Models/ModifyAssistantRequest.cs +++ b/.dotnet/src/Generated/Models/ModifyAssistantRequest.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -145,3 +143,4 @@ internal ModifyAssistantRequest(string model, string name, string description, s public IDictionary Metadata { get; set; } } } + diff --git a/.dotnet/src/Generated/Models/ModifyMessageRequest.Serialization.cs b/.dotnet/src/Generated/Models/ModifyMessageRequest.Serialization.cs index 35ee9a46e..2c5c37dec 100644 --- a/.dotnet/src/Generated/Models/ModifyMessageRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/ModifyMessageRequest.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class ModifyMessageRequest : IUtf8JsonWriteable, IJsonModel + public partial class ModifyMessageRequest : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -145,13 +141,6 @@ internal static ModifyMessageRequest FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeModifyMessageRequest(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/ModifyMessageRequest.cs b/.dotnet/src/Generated/Models/ModifyMessageRequest.cs index 3a200b238..a087c656d 100644 --- a/.dotnet/src/Generated/Models/ModifyMessageRequest.cs +++ b/.dotnet/src/Generated/Models/ModifyMessageRequest.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -70,3 +68,4 @@ internal ModifyMessageRequest(IDictionary metadata, IDictionary< public IDictionary Metadata { get; set; } } } + diff --git a/.dotnet/src/Generated/Models/ModifyRunRequest.Serialization.cs b/.dotnet/src/Generated/Models/ModifyRunRequest.Serialization.cs index 75c891191..1976fd93e 100644 --- a/.dotnet/src/Generated/Models/ModifyRunRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/ModifyRunRequest.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class ModifyRunRequest : IUtf8JsonWriteable, IJsonModel + public partial class ModifyRunRequest : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -145,13 +141,6 @@ internal static ModifyRunRequest FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeModifyRunRequest(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/ModifyRunRequest.cs b/.dotnet/src/Generated/Models/ModifyRunRequest.cs index 8a8a14588..1be502ea5 100644 --- a/.dotnet/src/Generated/Models/ModifyRunRequest.cs +++ b/.dotnet/src/Generated/Models/ModifyRunRequest.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -70,3 +68,4 @@ internal ModifyRunRequest(IDictionary metadata, IDictionary Metadata { get; set; } } } + diff --git a/.dotnet/src/Generated/Models/ModifyThreadRequest.Serialization.cs b/.dotnet/src/Generated/Models/ModifyThreadRequest.Serialization.cs index 2e8149f8e..e5bc72917 100644 --- a/.dotnet/src/Generated/Models/ModifyThreadRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/ModifyThreadRequest.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class ModifyThreadRequest : IUtf8JsonWriteable, IJsonModel + public partial class ModifyThreadRequest : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -145,13 +141,6 @@ internal static ModifyThreadRequest FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeModifyThreadRequest(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/ModifyThreadRequest.cs b/.dotnet/src/Generated/Models/ModifyThreadRequest.cs index 635172be4..084d68686 100644 --- a/.dotnet/src/Generated/Models/ModifyThreadRequest.cs +++ b/.dotnet/src/Generated/Models/ModifyThreadRequest.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -70,3 +68,4 @@ internal ModifyThreadRequest(IDictionary metadata, IDictionary Metadata { get; set; } } } + diff --git a/.dotnet/src/Generated/Models/OpenAIFile.Serialization.cs b/.dotnet/src/Generated/Models/OpenAIFile.Serialization.cs index 53c7116dd..fe73a6958 100644 --- a/.dotnet/src/Generated/Models/OpenAIFile.Serialization.cs +++ b/.dotnet/src/Generated/Models/OpenAIFile.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class OpenAIFile : IUtf8JsonWriteable, IJsonModel + public partial class OpenAIFile : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -179,13 +175,6 @@ internal static OpenAIFile FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeOpenAIFile(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/OpenAIFile.cs b/.dotnet/src/Generated/Models/OpenAIFile.cs index 7d3adf563..05b42f6de 100644 --- a/.dotnet/src/Generated/Models/OpenAIFile.cs +++ b/.dotnet/src/Generated/Models/OpenAIFile.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -59,8 +57,8 @@ public partial class OpenAIFile /// or is null. internal OpenAIFile(string id, long bytes, DateTimeOffset createdAt, string filename, OpenAIFilePurpose purpose, OpenAIFileStatus status) { - ClientUtilities.AssertNotNull(id, nameof(id)); - ClientUtilities.AssertNotNull(filename, nameof(filename)); + if (id is null) throw new ArgumentNullException(nameof(id)); + if (filename is null) throw new ArgumentNullException(nameof(filename)); Id = id; Bytes = bytes; @@ -135,3 +133,4 @@ internal OpenAIFile() public string StatusDetails { get; } } } + diff --git a/.dotnet/src/Generated/Models/OpenAIFileObject.cs b/.dotnet/src/Generated/Models/OpenAIFileObject.cs index 79e03b84c..268a0888e 100644 --- a/.dotnet/src/Generated/Models/OpenAIFileObject.cs +++ b/.dotnet/src/Generated/Models/OpenAIFileObject.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public OpenAIFileObject(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/OpenAIFilePurpose.cs b/.dotnet/src/Generated/Models/OpenAIFilePurpose.cs index 0f3c83bc5..98a369e91 100644 --- a/.dotnet/src/Generated/Models/OpenAIFilePurpose.cs +++ b/.dotnet/src/Generated/Models/OpenAIFilePurpose.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -52,3 +50,4 @@ public OpenAIFilePurpose(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/OpenAIFileStatus.cs b/.dotnet/src/Generated/Models/OpenAIFileStatus.cs index c35731688..cddc7cf60 100644 --- a/.dotnet/src/Generated/Models/OpenAIFileStatus.cs +++ b/.dotnet/src/Generated/Models/OpenAIFileStatus.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -49,3 +47,4 @@ public OpenAIFileStatus(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/RunCompletionUsage.Serialization.cs b/.dotnet/src/Generated/Models/RunCompletionUsage.Serialization.cs index e857ef3b6..b730fa9c8 100644 --- a/.dotnet/src/Generated/Models/RunCompletionUsage.Serialization.cs +++ b/.dotnet/src/Generated/Models/RunCompletionUsage.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class RunCompletionUsage : IUtf8JsonWriteable, IJsonModel + public partial class RunCompletionUsage : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -136,13 +132,6 @@ internal static RunCompletionUsage FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeRunCompletionUsage(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/RunCompletionUsage.cs b/.dotnet/src/Generated/Models/RunCompletionUsage.cs index be9401604..a1bd07938 100644 --- a/.dotnet/src/Generated/Models/RunCompletionUsage.cs +++ b/.dotnet/src/Generated/Models/RunCompletionUsage.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.Collections.Generic; @@ -82,3 +80,4 @@ internal RunCompletionUsage() public long TotalTokens { get; } } } + diff --git a/.dotnet/src/Generated/Models/RunObject.Serialization.cs b/.dotnet/src/Generated/Models/RunObject.Serialization.cs index 7128918dc..1c7779a88 100644 --- a/.dotnet/src/Generated/Models/RunObject.Serialization.cs +++ b/.dotnet/src/Generated/Models/RunObject.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class RunObject : IUtf8JsonWriteable, IJsonModel + public partial class RunObject : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -410,13 +406,6 @@ internal static RunObject FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeRunObject(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/RunObject.cs b/.dotnet/src/Generated/Models/RunObject.cs index 8222d1eec..a1baf8de2 100644 --- a/.dotnet/src/Generated/Models/RunObject.cs +++ b/.dotnet/src/Generated/Models/RunObject.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; using System.Linq; @@ -82,13 +80,13 @@ public partial class RunObject /// , , , , , or is null. internal RunObject(string id, DateTimeOffset createdAt, string threadId, string assistantId, RunObjectStatus status, RunObjectRequiredAction requiredAction, RunObjectLastError lastError, DateTimeOffset expiresAt, DateTimeOffset? startedAt, DateTimeOffset? cancelledAt, DateTimeOffset? failedAt, DateTimeOffset? completedAt, string model, string instructions, IEnumerable tools, IEnumerable fileIds, IReadOnlyDictionary metadata, RunCompletionUsage usage) { - ClientUtilities.AssertNotNull(id, nameof(id)); - ClientUtilities.AssertNotNull(threadId, nameof(threadId)); - ClientUtilities.AssertNotNull(assistantId, nameof(assistantId)); - ClientUtilities.AssertNotNull(model, nameof(model)); - ClientUtilities.AssertNotNull(instructions, nameof(instructions)); - ClientUtilities.AssertNotNull(tools, nameof(tools)); - ClientUtilities.AssertNotNull(fileIds, nameof(fileIds)); + if (id is null) throw new ArgumentNullException(nameof(id)); + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); + if (model is null) throw new ArgumentNullException(nameof(model)); + if (instructions is null) throw new ArgumentNullException(nameof(instructions)); + if (tools is null) throw new ArgumentNullException(nameof(tools)); + if (fileIds is null) throw new ArgumentNullException(nameof(fileIds)); Id = id; CreatedAt = createdAt; @@ -262,3 +260,4 @@ internal RunObject() public RunCompletionUsage Usage { get; } } } + diff --git a/.dotnet/src/Generated/Models/RunObjectLastError.Serialization.cs b/.dotnet/src/Generated/Models/RunObjectLastError.Serialization.cs index f40c2aa0a..f3486aafe 100644 --- a/.dotnet/src/Generated/Models/RunObjectLastError.Serialization.cs +++ b/.dotnet/src/Generated/Models/RunObjectLastError.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class RunObjectLastError : IUtf8JsonWriteable, IJsonModel + public partial class RunObjectLastError : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -128,13 +124,6 @@ internal static RunObjectLastError FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeRunObjectLastError(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/RunObjectLastError.cs b/.dotnet/src/Generated/Models/RunObjectLastError.cs index f2e26a107..b46274c65 100644 --- a/.dotnet/src/Generated/Models/RunObjectLastError.cs +++ b/.dotnet/src/Generated/Models/RunObjectLastError.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -49,7 +47,7 @@ public partial class RunObjectLastError /// is null. internal RunObjectLastError(RunObjectLastErrorCode code, string message) { - ClientUtilities.AssertNotNull(message, nameof(message)); + if (message is null) throw new ArgumentNullException(nameof(message)); Code = code; Message = message; @@ -77,3 +75,4 @@ internal RunObjectLastError() public string Message { get; } } } + diff --git a/.dotnet/src/Generated/Models/RunObjectLastErrorCode.cs b/.dotnet/src/Generated/Models/RunObjectLastErrorCode.cs index abcb19758..d851f2397 100644 --- a/.dotnet/src/Generated/Models/RunObjectLastErrorCode.cs +++ b/.dotnet/src/Generated/Models/RunObjectLastErrorCode.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -46,3 +44,4 @@ public RunObjectLastErrorCode(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/RunObjectObject.cs b/.dotnet/src/Generated/Models/RunObjectObject.cs index c9b970b98..19925055a 100644 --- a/.dotnet/src/Generated/Models/RunObjectObject.cs +++ b/.dotnet/src/Generated/Models/RunObjectObject.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public RunObjectObject(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/RunObjectRequiredAction.Serialization.cs b/.dotnet/src/Generated/Models/RunObjectRequiredAction.Serialization.cs index 8b354c03e..98fb7bbc1 100644 --- a/.dotnet/src/Generated/Models/RunObjectRequiredAction.Serialization.cs +++ b/.dotnet/src/Generated/Models/RunObjectRequiredAction.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class RunObjectRequiredAction : IUtf8JsonWriteable, IJsonModel + public partial class RunObjectRequiredAction : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -128,13 +124,6 @@ internal static RunObjectRequiredAction FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeRunObjectRequiredAction(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/RunObjectRequiredAction.cs b/.dotnet/src/Generated/Models/RunObjectRequiredAction.cs index fa11171c8..034fedcca 100644 --- a/.dotnet/src/Generated/Models/RunObjectRequiredAction.cs +++ b/.dotnet/src/Generated/Models/RunObjectRequiredAction.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -48,7 +46,7 @@ public partial class RunObjectRequiredAction /// is null. internal RunObjectRequiredAction(RunObjectRequiredActionSubmitToolOutputs submitToolOutputs) { - ClientUtilities.AssertNotNull(submitToolOutputs, nameof(submitToolOutputs)); + if (submitToolOutputs is null) throw new ArgumentNullException(nameof(submitToolOutputs)); SubmitToolOutputs = submitToolOutputs; } @@ -76,3 +74,4 @@ internal RunObjectRequiredAction() public RunObjectRequiredActionSubmitToolOutputs SubmitToolOutputs { get; } } } + diff --git a/.dotnet/src/Generated/Models/RunObjectRequiredActionSubmitToolOutputs.Serialization.cs b/.dotnet/src/Generated/Models/RunObjectRequiredActionSubmitToolOutputs.Serialization.cs index 6f7c1b4e5..0f374b56a 100644 --- a/.dotnet/src/Generated/Models/RunObjectRequiredActionSubmitToolOutputs.Serialization.cs +++ b/.dotnet/src/Generated/Models/RunObjectRequiredActionSubmitToolOutputs.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class RunObjectRequiredActionSubmitToolOutputs : IUtf8JsonWriteable, IJsonModel + public partial class RunObjectRequiredActionSubmitToolOutputs : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -130,13 +126,6 @@ internal static RunObjectRequiredActionSubmitToolOutputs FromResponse(PipelineRe using var document = JsonDocument.Parse(response.Content); return DeserializeRunObjectRequiredActionSubmitToolOutputs(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/RunObjectRequiredActionSubmitToolOutputs.cs b/.dotnet/src/Generated/Models/RunObjectRequiredActionSubmitToolOutputs.cs index e9f7b6a58..ffdc5ff0f 100644 --- a/.dotnet/src/Generated/Models/RunObjectRequiredActionSubmitToolOutputs.cs +++ b/.dotnet/src/Generated/Models/RunObjectRequiredActionSubmitToolOutputs.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; using System.Linq; @@ -49,7 +47,7 @@ public partial class RunObjectRequiredActionSubmitToolOutputs /// is null. internal RunObjectRequiredActionSubmitToolOutputs(IEnumerable toolCalls) { - ClientUtilities.AssertNotNull(toolCalls, nameof(toolCalls)); + if (toolCalls is null) throw new ArgumentNullException(nameof(toolCalls)); ToolCalls = toolCalls.ToList(); } @@ -72,3 +70,4 @@ internal RunObjectRequiredActionSubmitToolOutputs() public IReadOnlyList ToolCalls { get; } } } + diff --git a/.dotnet/src/Generated/Models/RunObjectRequiredActionType.cs b/.dotnet/src/Generated/Models/RunObjectRequiredActionType.cs index eb11f17ac..d8d1c8f07 100644 --- a/.dotnet/src/Generated/Models/RunObjectRequiredActionType.cs +++ b/.dotnet/src/Generated/Models/RunObjectRequiredActionType.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public RunObjectRequiredActionType(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/RunObjectStatus.cs b/.dotnet/src/Generated/Models/RunObjectStatus.cs index 52f7c1603..cd13d9b49 100644 --- a/.dotnet/src/Generated/Models/RunObjectStatus.cs +++ b/.dotnet/src/Generated/Models/RunObjectStatus.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -64,3 +62,4 @@ public RunObjectStatus(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObject.Serialization.cs b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObject.Serialization.cs index 9bfd95a67..38408c8d1 100644 --- a/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObject.Serialization.cs +++ b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObject.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - internal partial class RunStepDetailsMessageCreationObject : IUtf8JsonWriteable, IJsonModel + internal partial class RunStepDetailsMessageCreationObject : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -128,13 +124,6 @@ internal static RunStepDetailsMessageCreationObject FromResponse(PipelineRespons using var document = JsonDocument.Parse(response.Content); return DeserializeRunStepDetailsMessageCreationObject(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObject.cs b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObject.cs index d0db44dad..ffac3bdb1 100644 --- a/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObject.cs +++ b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObject.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -48,7 +46,7 @@ internal partial class RunStepDetailsMessageCreationObject /// is null. internal RunStepDetailsMessageCreationObject(RunStepDetailsMessageCreationObjectMessageCreation messageCreation) { - ClientUtilities.AssertNotNull(messageCreation, nameof(messageCreation)); + if (messageCreation is null) throw new ArgumentNullException(nameof(messageCreation)); MessageCreation = messageCreation; } @@ -76,3 +74,4 @@ internal RunStepDetailsMessageCreationObject() public RunStepDetailsMessageCreationObjectMessageCreation MessageCreation { get; } } } + diff --git a/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectMessageCreation.Serialization.cs b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectMessageCreation.Serialization.cs index aadaece5e..1d02bc66a 100644 --- a/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectMessageCreation.Serialization.cs +++ b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectMessageCreation.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - internal partial class RunStepDetailsMessageCreationObjectMessageCreation : IUtf8JsonWriteable, IJsonModel + internal partial class RunStepDetailsMessageCreationObjectMessageCreation : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -120,13 +116,6 @@ internal static RunStepDetailsMessageCreationObjectMessageCreation FromResponse( using var document = JsonDocument.Parse(response.Content); return DeserializeRunStepDetailsMessageCreationObjectMessageCreation(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectMessageCreation.cs b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectMessageCreation.cs index 3163ffe5c..447962ae3 100644 --- a/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectMessageCreation.cs +++ b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectMessageCreation.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -48,7 +46,7 @@ internal partial class RunStepDetailsMessageCreationObjectMessageCreation /// is null. internal RunStepDetailsMessageCreationObjectMessageCreation(string messageId) { - ClientUtilities.AssertNotNull(messageId, nameof(messageId)); + if (messageId is null) throw new ArgumentNullException(nameof(messageId)); MessageId = messageId; } @@ -71,3 +69,4 @@ internal RunStepDetailsMessageCreationObjectMessageCreation() public string MessageId { get; } } } + diff --git a/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectType.cs b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectType.cs index 007fd9eed..bf0a1f637 100644 --- a/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectType.cs +++ b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectType.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public RunStepDetailsMessageCreationObjectType(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObject.Serialization.cs b/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObject.Serialization.cs index 5fa47d28b..ebf127b7d 100644 --- a/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObject.Serialization.cs +++ b/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObject.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - internal partial class RunStepDetailsToolCallsObject : IUtf8JsonWriteable, IJsonModel + internal partial class RunStepDetailsToolCallsObject : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -157,13 +153,6 @@ internal static RunStepDetailsToolCallsObject FromResponse(PipelineResponse resp using var document = JsonDocument.Parse(response.Content); return DeserializeRunStepDetailsToolCallsObject(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObject.cs b/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObject.cs index 76cdc1d4c..f290db55e 100644 --- a/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObject.cs +++ b/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObject.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; using System.Linq; @@ -52,7 +50,7 @@ internal partial class RunStepDetailsToolCallsObject /// is null. internal RunStepDetailsToolCallsObject(IEnumerable toolCalls) { - ClientUtilities.AssertNotNull(toolCalls, nameof(toolCalls)); + if (toolCalls is null) throw new ArgumentNullException(nameof(toolCalls)); ToolCalls = toolCalls.ToList(); } @@ -113,3 +111,4 @@ internal RunStepDetailsToolCallsObject() public IReadOnlyList ToolCalls { get; } } } + diff --git a/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObjectType.cs b/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObjectType.cs index 56b168ac3..a307fc4c4 100644 --- a/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObjectType.cs +++ b/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObjectType.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public RunStepDetailsToolCallsObjectType(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/RunStepObject.Serialization.cs b/.dotnet/src/Generated/Models/RunStepObject.Serialization.cs index 74f027391..a73966402 100644 --- a/.dotnet/src/Generated/Models/RunStepObject.Serialization.cs +++ b/.dotnet/src/Generated/Models/RunStepObject.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class RunStepObject : IUtf8JsonWriteable, IJsonModel + public partial class RunStepObject : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -342,13 +338,6 @@ internal static RunStepObject FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeRunStepObject(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/RunStepObject.cs b/.dotnet/src/Generated/Models/RunStepObject.cs index 801dc2367..24cbfec6e 100644 --- a/.dotnet/src/Generated/Models/RunStepObject.cs +++ b/.dotnet/src/Generated/Models/RunStepObject.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -72,11 +70,11 @@ public partial class RunStepObject /// , , , or is null. internal RunStepObject(string id, DateTimeOffset createdAt, string assistantId, string threadId, string runId, RunStepObjectType type, RunStepObjectStatus status, BinaryData stepDetails, RunStepObjectLastError lastError, DateTimeOffset? expiresAt, DateTimeOffset? cancelledAt, DateTimeOffset? failedAt, DateTimeOffset? completedAt, IReadOnlyDictionary metadata, RunCompletionUsage usage) { - ClientUtilities.AssertNotNull(id, nameof(id)); - ClientUtilities.AssertNotNull(assistantId, nameof(assistantId)); - ClientUtilities.AssertNotNull(threadId, nameof(threadId)); - ClientUtilities.AssertNotNull(runId, nameof(runId)); - ClientUtilities.AssertNotNull(stepDetails, nameof(stepDetails)); + if (id is null) throw new ArgumentNullException(nameof(id)); + if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (runId is null) throw new ArgumentNullException(nameof(runId)); + if (stepDetails is null) throw new ArgumentNullException(nameof(stepDetails)); Id = id; CreatedAt = createdAt; @@ -234,3 +232,4 @@ internal RunStepObject() public RunCompletionUsage Usage { get; } } } + diff --git a/.dotnet/src/Generated/Models/RunStepObjectLastError.Serialization.cs b/.dotnet/src/Generated/Models/RunStepObjectLastError.Serialization.cs index 368f9006e..71a0e8a2f 100644 --- a/.dotnet/src/Generated/Models/RunStepObjectLastError.Serialization.cs +++ b/.dotnet/src/Generated/Models/RunStepObjectLastError.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class RunStepObjectLastError : IUtf8JsonWriteable, IJsonModel + public partial class RunStepObjectLastError : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -128,13 +124,6 @@ internal static RunStepObjectLastError FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeRunStepObjectLastError(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/RunStepObjectLastError.cs b/.dotnet/src/Generated/Models/RunStepObjectLastError.cs index 8742db0c0..2bf3b1db8 100644 --- a/.dotnet/src/Generated/Models/RunStepObjectLastError.cs +++ b/.dotnet/src/Generated/Models/RunStepObjectLastError.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -49,7 +47,7 @@ public partial class RunStepObjectLastError /// is null. internal RunStepObjectLastError(RunStepObjectLastErrorCode code, string message) { - ClientUtilities.AssertNotNull(message, nameof(message)); + if (message is null) throw new ArgumentNullException(nameof(message)); Code = code; Message = message; @@ -77,3 +75,4 @@ internal RunStepObjectLastError() public string Message { get; } } } + diff --git a/.dotnet/src/Generated/Models/RunStepObjectLastErrorCode.cs b/.dotnet/src/Generated/Models/RunStepObjectLastErrorCode.cs index e3b156259..9bf035ea1 100644 --- a/.dotnet/src/Generated/Models/RunStepObjectLastErrorCode.cs +++ b/.dotnet/src/Generated/Models/RunStepObjectLastErrorCode.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -46,3 +44,4 @@ public RunStepObjectLastErrorCode(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/RunStepObjectObject.cs b/.dotnet/src/Generated/Models/RunStepObjectObject.cs index 3c774719f..21477c60d 100644 --- a/.dotnet/src/Generated/Models/RunStepObjectObject.cs +++ b/.dotnet/src/Generated/Models/RunStepObjectObject.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public RunStepObjectObject(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/RunStepObjectStatus.cs b/.dotnet/src/Generated/Models/RunStepObjectStatus.cs index 4b11e62a4..21abb0c10 100644 --- a/.dotnet/src/Generated/Models/RunStepObjectStatus.cs +++ b/.dotnet/src/Generated/Models/RunStepObjectStatus.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -55,3 +53,4 @@ public RunStepObjectStatus(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/RunStepObjectType.cs b/.dotnet/src/Generated/Models/RunStepObjectType.cs index 49adee903..432a90099 100644 --- a/.dotnet/src/Generated/Models/RunStepObjectType.cs +++ b/.dotnet/src/Generated/Models/RunStepObjectType.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -46,3 +44,4 @@ public RunStepObjectType(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/RunToolCallObject.Serialization.cs b/.dotnet/src/Generated/Models/RunToolCallObject.Serialization.cs index 421bb3a02..47b58bfd9 100644 --- a/.dotnet/src/Generated/Models/RunToolCallObject.Serialization.cs +++ b/.dotnet/src/Generated/Models/RunToolCallObject.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class RunToolCallObject : IUtf8JsonWriteable, IJsonModel + public partial class RunToolCallObject : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -136,13 +132,6 @@ internal static RunToolCallObject FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeRunToolCallObject(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/RunToolCallObject.cs b/.dotnet/src/Generated/Models/RunToolCallObject.cs index b6f3694a3..d70655724 100644 --- a/.dotnet/src/Generated/Models/RunToolCallObject.cs +++ b/.dotnet/src/Generated/Models/RunToolCallObject.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -52,8 +50,8 @@ public partial class RunToolCallObject /// or is null. internal RunToolCallObject(string id, RunToolCallObjectFunction function) { - ClientUtilities.AssertNotNull(id, nameof(id)); - ClientUtilities.AssertNotNull(function, nameof(function)); + if (id is null) throw new ArgumentNullException(nameof(id)); + if (function is null) throw new ArgumentNullException(nameof(function)); Id = id; Function = function; @@ -92,3 +90,4 @@ internal RunToolCallObject() public RunToolCallObjectFunction Function { get; } } } + diff --git a/.dotnet/src/Generated/Models/RunToolCallObjectFunction.Serialization.cs b/.dotnet/src/Generated/Models/RunToolCallObjectFunction.Serialization.cs index ecef680d8..a62970151 100644 --- a/.dotnet/src/Generated/Models/RunToolCallObjectFunction.Serialization.cs +++ b/.dotnet/src/Generated/Models/RunToolCallObjectFunction.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class RunToolCallObjectFunction : IUtf8JsonWriteable, IJsonModel + public partial class RunToolCallObjectFunction : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -128,13 +124,6 @@ internal static RunToolCallObjectFunction FromResponse(PipelineResponse response using var document = JsonDocument.Parse(response.Content); return DeserializeRunToolCallObjectFunction(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/RunToolCallObjectFunction.cs b/.dotnet/src/Generated/Models/RunToolCallObjectFunction.cs index c94197dac..1fab15eac 100644 --- a/.dotnet/src/Generated/Models/RunToolCallObjectFunction.cs +++ b/.dotnet/src/Generated/Models/RunToolCallObjectFunction.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -49,8 +47,8 @@ public partial class RunToolCallObjectFunction /// or is null. internal RunToolCallObjectFunction(string name, string arguments) { - ClientUtilities.AssertNotNull(name, nameof(name)); - ClientUtilities.AssertNotNull(arguments, nameof(arguments)); + if (name is null) throw new ArgumentNullException(nameof(name)); + if (arguments is null) throw new ArgumentNullException(nameof(arguments)); Name = name; Arguments = arguments; @@ -78,3 +76,4 @@ internal RunToolCallObjectFunction() public string Arguments { get; } } } + diff --git a/.dotnet/src/Generated/Models/RunToolCallObjectType.cs b/.dotnet/src/Generated/Models/RunToolCallObjectType.cs index fb277c525..d882bc7f3 100644 --- a/.dotnet/src/Generated/Models/RunToolCallObjectType.cs +++ b/.dotnet/src/Generated/Models/RunToolCallObjectType.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public RunToolCallObjectType(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequest.Serialization.cs b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequest.Serialization.cs index b1c71aec2..a5bf6fda2 100644 --- a/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequest.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class SubmitToolOutputsRunRequest : IUtf8JsonWriteable, IJsonModel + public partial class SubmitToolOutputsRunRequest : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -120,13 +116,6 @@ internal static SubmitToolOutputsRunRequest FromResponse(PipelineResponse respon using var document = JsonDocument.Parse(response.Content); return DeserializeSubmitToolOutputsRunRequest(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequest.cs b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequest.cs index 47d39c5d4..abdcfb9e3 100644 --- a/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequest.cs +++ b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequest.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -48,7 +46,7 @@ public partial class SubmitToolOutputsRunRequest /// is null. public SubmitToolOutputsRunRequest(SubmitToolOutputsRunRequestToolOutputs toolOutputs) { - ClientUtilities.AssertNotNull(toolOutputs, nameof(toolOutputs)); + if (toolOutputs is null) throw new ArgumentNullException(nameof(toolOutputs)); ToolOutputs = toolOutputs; } @@ -71,3 +69,4 @@ internal SubmitToolOutputsRunRequest() public SubmitToolOutputsRunRequestToolOutputs ToolOutputs { get; } } } + diff --git a/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutputs.Serialization.cs b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutputs.Serialization.cs index dc7e3a12d..f7c09dcf1 100644 --- a/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutputs.Serialization.cs +++ b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutputs.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class SubmitToolOutputsRunRequestToolOutputs : IUtf8JsonWriteable, IJsonModel + public partial class SubmitToolOutputsRunRequestToolOutputs : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -134,13 +130,6 @@ internal static SubmitToolOutputsRunRequestToolOutputs FromResponse(PipelineResp using var document = JsonDocument.Parse(response.Content); return DeserializeSubmitToolOutputsRunRequestToolOutputs(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutputs.cs b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutputs.cs index 45de2cc25..8d8a8109c 100644 --- a/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutputs.cs +++ b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutputs.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.Collections.Generic; @@ -70,3 +68,4 @@ internal SubmitToolOutputsRunRequestToolOutputs(string toolCallId, string output public string Output { get; set; } } } + diff --git a/.dotnet/src/Generated/Models/ThreadObject.Serialization.cs b/.dotnet/src/Generated/Models/ThreadObject.Serialization.cs index 1d3b80be1..4644d1aa9 100644 --- a/.dotnet/src/Generated/Models/ThreadObject.Serialization.cs +++ b/.dotnet/src/Generated/Models/ThreadObject.Serialization.cs @@ -1,19 +1,15 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models { - public partial class ThreadObject : IUtf8JsonWriteable, IJsonModel + public partial class ThreadObject : IJsonModel { - void IUtf8JsonWriteable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W")); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -167,13 +163,6 @@ internal static ThreadObject FromResponse(PipelineResponse response) using var document = JsonDocument.Parse(response.Content); return DeserializeThreadObject(document.RootElement); } - - /// Convert into a Utf8JsonRequestBody. - internal virtual RequestBody ToRequestBody() - { - var content = new Utf8JsonRequestBody(); - content.JsonWriter.WriteObjectValue(this); - return content; - } } } + diff --git a/.dotnet/src/Generated/Models/ThreadObject.cs b/.dotnet/src/Generated/Models/ThreadObject.cs index 600c5b250..22d754976 100644 --- a/.dotnet/src/Generated/Models/ThreadObject.cs +++ b/.dotnet/src/Generated/Models/ThreadObject.cs @@ -1,9 +1,7 @@ // -#nullable disable - using System; -using System.ClientModel.Internal; +using OpenAI.ClientShared.Internal; using System.Collections.Generic; namespace OpenAI.Models @@ -54,7 +52,7 @@ public partial class ThreadObject /// is null. internal ThreadObject(string id, DateTimeOffset createdAt, IReadOnlyDictionary metadata) { - ClientUtilities.AssertNotNull(id, nameof(id)); + if (id is null) throw new ArgumentNullException(nameof(id)); Id = id; CreatedAt = createdAt; @@ -100,3 +98,4 @@ internal ThreadObject() public IReadOnlyDictionary Metadata { get; } } } + diff --git a/.dotnet/src/Generated/Models/ThreadObjectObject.cs b/.dotnet/src/Generated/Models/ThreadObjectObject.cs index 4fd5f5e05..8eb382a0b 100644 --- a/.dotnet/src/Generated/Models/ThreadObjectObject.cs +++ b/.dotnet/src/Generated/Models/ThreadObjectObject.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.ComponentModel; @@ -43,3 +41,4 @@ public ThreadObjectObject(string value) public override string ToString() => _value; } } + diff --git a/.dotnet/src/Generated/ModelsOps.cs b/.dotnet/src/Generated/ModelsOps.cs index 5b9442920..e806f3cef 100644 --- a/.dotnet/src/Generated/ModelsOps.cs +++ b/.dotnet/src/Generated/ModelsOps.cs @@ -1,12 +1,9 @@ // -#nullable disable - using System; using System.ClientModel; -using System.ClientModel.Internal; using System.ClientModel.Primitives; -using System.ClientModel.Primitives.Pipeline; +using System.Text; using System.Threading; using System.Threading.Tasks; using OpenAI.Models; @@ -18,16 +15,13 @@ namespace OpenAI public partial class ModelsOps { private const string AuthorizationHeader = "Authorization"; - private readonly KeyCredential _keyCredential; + private readonly ApiKeyCredential _credential; private const string AuthorizationApiKeyPrefix = "Bearer"; - private readonly MessagePipeline _pipeline; + private readonly ClientPipeline _pipeline; private readonly Uri _endpoint; - /// The ClientDiagnostics is used to provide tracing support for the client library. - internal TelemetrySource ClientDiagnostics { get; } - /// The HTTP pipeline for sending and receiving REST requests and responses. - public virtual MessagePipeline Pipeline => _pipeline; + public virtual ClientPipeline Pipeline => _pipeline; /// Initializes a new instance of ModelsOps for mocking. protected ModelsOps() @@ -35,15 +29,13 @@ protected ModelsOps() } /// Initializes a new instance of ModelsOps. - /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. - /// The key credential to copy. + /// The key credential to copy. /// OpenAI Endpoint. - internal ModelsOps(TelemetrySource clientDiagnostics, MessagePipeline pipeline, KeyCredential keyCredential, Uri endpoint) + internal ModelsOps(ClientPipeline pipeline, ApiKeyCredential credential, Uri endpoint) { - ClientDiagnostics = clientDiagnostics; _pipeline = pipeline; - _keyCredential = keyCredential; + _credential = credential; _endpoint = endpoint; } @@ -51,24 +43,20 @@ internal ModelsOps(TelemetrySource clientDiagnostics, MessagePipeline pipeline, /// Lists the currently available models, and provides basic information about each one such as the /// owner and availability. /// - /// The cancellation token to use. - public virtual async Task> GetModelsAsync(CancellationToken cancellationToken = default) + public virtual async Task> GetModelsAsync(CancellationToken cancellationToken = default) { - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = await GetModelsAsync(context).ConfigureAwait(false); - return Result.FromValue(ListModelsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = await GetModelsAsync().ConfigureAwait(false); + return ClientResult.FromValue(ListModelsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// /// Lists the currently available models, and provides basic information about each one such as the /// owner and availability. /// - /// The cancellation token to use. - public virtual Result GetModels(CancellationToken cancellationToken = default) + public virtual ClientResult GetModels(CancellationToken cancellationToken = default) { - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = GetModels(context); - return Result.FromValue(ListModelsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = GetModels(); + return ClientResult.FromValue(ListModelsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -87,23 +75,22 @@ public virtual Result GetModels(CancellationToken cancellati /// /// /// - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// Service returned a non-success status code. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task GetModelsAsync(RequestOptions context) + public virtual async Task GetModelsAsync(RequestOptions options) { - using var scope = ClientDiagnostics.CreateSpan("ModelsOps.GetModels"); - scope.Start(); - try - { - using PipelineMessage message = CreateGetModelsRequest(context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) + options ??= new RequestOptions(); + using PipelineMessage message = CreateGetModelsRequest(options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -122,23 +109,22 @@ public virtual async Task GetModelsAsync(RequestOptions context) /// /// /// - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// Service returned a non-success status code. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result GetModels(RequestOptions context) + public virtual ClientResult GetModels(RequestOptions options) { - using var scope = ClientDiagnostics.CreateSpan("ModelsOps.GetModels"); - scope.Start(); - try - { - using PipelineMessage message = CreateGetModelsRequest(context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) + options ??= new RequestOptions(); + using PipelineMessage message = CreateGetModelsRequest(options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } /// @@ -146,16 +132,15 @@ public virtual Result GetModels(RequestOptions context) /// permissioning. /// /// The ID of the model to use for this request. - /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - public virtual async Task> RetrieveAsync(string model, CancellationToken cancellationToken = default) + public virtual async Task> RetrieveAsync(string model) { - ClientUtilities.AssertNotNullOrEmpty(model, nameof(model)); + if (model is null) throw new ArgumentNullException(nameof(model)); + if (string.IsNullOrEmpty(model)) throw new ArgumentException(nameof(model)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = await RetrieveAsync(model, context).ConfigureAwait(false); - return Result.FromValue(Model.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = await RetrieveAsync(model).ConfigureAwait(false); + return ClientResult.FromValue(Model.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -163,16 +148,15 @@ public virtual async Task> RetrieveAsync(string model, Cancellatio /// permissioning. /// /// The ID of the model to use for this request. - /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - public virtual Result Retrieve(string model, CancellationToken cancellationToken = default) + public virtual ClientResult Retrieve(string model) { - ClientUtilities.AssertNotNullOrEmpty(model, nameof(model)); + if (model is null) throw new ArgumentNullException(nameof(model)); + if (string.IsNullOrEmpty(model)) throw new ArgumentException(nameof(model)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = Retrieve(model, context); - return Result.FromValue(Model.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = Retrieve(model); + return ClientResult.FromValue(Model.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -186,33 +170,32 @@ public virtual Result Retrieve(string model, CancellationToken cancellati /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the model to use for this request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task RetrieveAsync(string model, RequestOptions context) + public virtual async Task RetrieveAsync(string model, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(model, nameof(model)); - - using var scope = ClientDiagnostics.CreateSpan("ModelsOps.Retrieve"); - scope.Start(); - try - { - using PipelineMessage message = CreateRetrieveRequest(model, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) + if (model is null) throw new ArgumentNullException(nameof(model)); + if (string.IsNullOrEmpty(model)) throw new ArgumentException(nameof(model)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateRetrieveRequest(model, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -226,61 +209,58 @@ public virtual async Task RetrieveAsync(string model, RequestOptions con /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the model to use for this request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result Retrieve(string model, RequestOptions context) + public virtual ClientResult Retrieve(string model, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(model, nameof(model)); - - using var scope = ClientDiagnostics.CreateSpan("ModelsOps.Retrieve"); - scope.Start(); - try - { - using PipelineMessage message = CreateRetrieveRequest(model, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) + if (model is null) throw new ArgumentNullException(nameof(model)); + if (string.IsNullOrEmpty(model)) throw new ArgumentException(nameof(model)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateRetrieveRequest(model, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } /// Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. /// The model to delete. - /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - public virtual async Task> DeleteAsync(string model, CancellationToken cancellationToken = default) + public virtual async Task> DeleteAsync(string model) { - ClientUtilities.AssertNotNullOrEmpty(model, nameof(model)); + if (model is null) throw new ArgumentNullException(nameof(model)); + if (string.IsNullOrEmpty(model)) throw new ArgumentException(nameof(model)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = await DeleteAsync(model, context).ConfigureAwait(false); - return Result.FromValue(DeleteModelResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = await DeleteAsync(model).ConfigureAwait(false); + return ClientResult.FromValue(DeleteModelResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. /// The model to delete. - /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - public virtual Result Delete(string model, CancellationToken cancellationToken = default) + public virtual ClientResult Delete(string model) { - ClientUtilities.AssertNotNullOrEmpty(model, nameof(model)); + if (model is null) throw new ArgumentNullException(nameof(model)); + if (string.IsNullOrEmpty(model)) throw new ArgumentException(nameof(model)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = Delete(model, context); - return Result.FromValue(DeleteModelResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = Delete(model); + return ClientResult.FromValue(DeleteModelResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -293,33 +273,32 @@ public virtual Result Delete(string model, CancellationToke /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The model to delete. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task DeleteAsync(string model, RequestOptions context) + public virtual async Task DeleteAsync(string model, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(model, nameof(model)); - - using var scope = ClientDiagnostics.CreateSpan("ModelsOps.Delete"); - scope.Start(); - try - { - using PipelineMessage message = CreateDeleteRequest(model, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) + if (model is null) throw new ArgumentNullException(nameof(model)); + if (string.IsNullOrEmpty(model)) throw new ArgumentException(nameof(model)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateDeleteRequest(model, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -332,88 +311,85 @@ public virtual async Task DeleteAsync(string model, RequestOptions conte /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The model to delete. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result Delete(string model, RequestOptions context) + public virtual ClientResult Delete(string model, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(model, nameof(model)); - - using var scope = ClientDiagnostics.CreateSpan("ModelsOps.Delete"); - scope.Start(); - try - { - using PipelineMessage message = CreateDeleteRequest(model, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) + if (model is null) throw new ArgumentNullException(nameof(model)); + if (string.IsNullOrEmpty(model)) throw new ArgumentException(nameof(model)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateDeleteRequest(model, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw new ClientResultException(response); } - } - internal PipelineMessage CreateGetModelsRequest(RequestOptions context) - { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("GET"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/models", false); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); - return message; + return ClientResult.FromResponse(response); } - internal PipelineMessage CreateRetrieveRequest(string model, RequestOptions context) + internal PipelineMessage CreateGetModelsRequest(RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("GET"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/models/", false); - uri.AppendPath(model, true); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/models"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); return message; } - internal PipelineMessage CreateDeleteRequest(string model, RequestOptions context) + internal PipelineMessage CreateRetrieveRequest(string model, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("DELETE"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/models/", false); - uri.AppendPath(model, true); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/models/"); + uriBuilder.Path += path.ToString(); + path.Append(model); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); return message; } - private static RequestOptions DefaultRequestContext = new RequestOptions(); - internal static RequestOptions FromCancellationToken(CancellationToken cancellationToken = default) + internal PipelineMessage CreateDeleteRequest(string model, RequestOptions options) { - if (!cancellationToken.CanBeCanceled) - { - return DefaultRequestContext; - } - - return new RequestOptions() { CancellationToken = cancellationToken }; + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "DELETE"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/models/"); + uriBuilder.Path += path.ToString(); + path.Append(model); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + return message; } - private static ResponseErrorClassifier _responseErrorClassifier200; - private static ResponseErrorClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= new StatusResponseClassifier(stackalloc ushort[] { 200 }); + private static PipelineMessageClassifier _responseErrorClassifier200; + private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } } + diff --git a/.dotnet/src/Generated/Moderations.cs b/.dotnet/src/Generated/Moderations.cs index 7c4520943..e0127be76 100644 --- a/.dotnet/src/Generated/Moderations.cs +++ b/.dotnet/src/Generated/Moderations.cs @@ -1,12 +1,9 @@ // -#nullable disable - using System; using System.ClientModel; -using System.ClientModel.Internal; using System.ClientModel.Primitives; -using System.ClientModel.Primitives.Pipeline; +using System.Text; using System.Threading; using System.Threading.Tasks; using OpenAI.Models; @@ -18,16 +15,13 @@ namespace OpenAI public partial class Moderations { private const string AuthorizationHeader = "Authorization"; - private readonly KeyCredential _keyCredential; + private readonly ApiKeyCredential _credential; private const string AuthorizationApiKeyPrefix = "Bearer"; - private readonly MessagePipeline _pipeline; + private readonly ClientPipeline _pipeline; private readonly Uri _endpoint; - /// The ClientDiagnostics is used to provide tracing support for the client library. - internal TelemetrySource ClientDiagnostics { get; } - /// The HTTP pipeline for sending and receiving REST requests and responses. - public virtual MessagePipeline Pipeline => _pipeline; + public virtual ClientPipeline Pipeline => _pipeline; /// Initializes a new instance of Moderations for mocking. protected Moderations() @@ -35,44 +29,38 @@ protected Moderations() } /// Initializes a new instance of Moderations. - /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. - /// The key credential to copy. + /// The key credential to copy. /// OpenAI Endpoint. - internal Moderations(TelemetrySource clientDiagnostics, MessagePipeline pipeline, KeyCredential keyCredential, Uri endpoint) + internal Moderations(ClientPipeline pipeline, ApiKeyCredential credential, Uri endpoint) { - ClientDiagnostics = clientDiagnostics; _pipeline = pipeline; - _keyCredential = keyCredential; + _credential = credential; _endpoint = endpoint; } /// Classifies if text violates OpenAI's Content Policy. /// The to use. - /// The cancellation token to use. /// is null. - public virtual async Task> CreateModerationAsync(CreateModerationRequest content, CancellationToken cancellationToken = default) + public virtual async Task> CreateModerationAsync(CreateModerationRequest content) { - ClientUtilities.AssertNotNull(content, nameof(content)); + if (content is null) throw new ArgumentNullException(nameof(content)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content0 = content.ToRequestBody(); - Result result = await CreateModerationAsync(content0, context).ConfigureAwait(false); - return Result.FromValue(CreateModerationResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + using BinaryContent content0 = BinaryContent.Create(content); + ClientResult result = await CreateModerationAsync(content0).ConfigureAwait(false); + return ClientResult.FromValue(CreateModerationResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Classifies if text violates OpenAI's Content Policy. /// The to use. - /// The cancellation token to use. /// is null. - public virtual Result CreateModeration(CreateModerationRequest content, CancellationToken cancellationToken = default) + public virtual ClientResult CreateModeration(CreateModerationRequest content) { - ClientUtilities.AssertNotNull(content, nameof(content)); + if (content is null) throw new ArgumentNullException(nameof(content)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content0 = content.ToRequestBody(); - Result result = CreateModeration(content0, context); - return Result.FromValue(CreateModerationResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + using BinaryContent content0 = BinaryContent.Create(content); + ClientResult result = CreateModeration(content0); + return ClientResult.FromValue(CreateModerationResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -85,32 +73,30 @@ public virtual Result CreateModeration(CreateModeratio /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task CreateModerationAsync(RequestBody content, RequestOptions context = null) + public virtual async Task CreateModerationAsync(BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNull(content, nameof(content)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCreateModerationRequest(content, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; - using var scope = ClientDiagnostics.CreateSpan("Moderations.CreateModeration"); - scope.Start(); - try - { - using PipelineMessage message = CreateCreateModerationRequest(content, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -123,61 +109,52 @@ public virtual async Task CreateModerationAsync(RequestBody content, Req /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result CreateModeration(RequestBody content, RequestOptions context = null) + public virtual ClientResult CreateModeration(BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNull(content, nameof(content)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCreateModerationRequest(content, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; - using var scope = ClientDiagnostics.CreateSpan("Moderations.CreateModeration"); - scope.Start(); - try - { - using PipelineMessage message = CreateCreateModerationRequest(content, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } - internal PipelineMessage CreateCreateModerationRequest(RequestBody content, RequestOptions context) + internal PipelineMessage CreateCreateModerationRequest(BinaryContent content, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("POST"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/moderations", false); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); - request.SetHeaderValue("Content-Type", "application/json"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/moderations"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", "application/json"); request.Content = content; + message.Apply(options); return message; } - private static RequestOptions DefaultRequestContext = new RequestOptions(); - internal static RequestOptions FromCancellationToken(CancellationToken cancellationToken = default) - { - if (!cancellationToken.CanBeCanceled) - { - return DefaultRequestContext; - } - - return new RequestOptions() { CancellationToken = cancellationToken }; - } - - private static ResponseErrorClassifier _responseErrorClassifier200; - private static ResponseErrorClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= new StatusResponseClassifier(stackalloc ushort[] { 200 }); + private static PipelineMessageClassifier _responseErrorClassifier200; + private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } } + diff --git a/.dotnet/src/Generated/OpenAIClient.cs b/.dotnet/src/Generated/OpenAIClient.cs index 67ceeca49..29785453c 100644 --- a/.dotnet/src/Generated/OpenAIClient.cs +++ b/.dotnet/src/Generated/OpenAIClient.cs @@ -1,12 +1,8 @@ // -#nullable disable - using System; using System.ClientModel; -using System.ClientModel.Internal; using System.ClientModel.Primitives; -using System.ClientModel.Primitives.Pipeline; using System.Threading; namespace OpenAI @@ -16,16 +12,13 @@ namespace OpenAI public partial class OpenAIClient { private const string AuthorizationHeader = "Authorization"; - private readonly KeyCredential _keyCredential; + private readonly ApiKeyCredential _credential; private const string AuthorizationApiKeyPrefix = "Bearer"; - private readonly MessagePipeline _pipeline; + private readonly ClientPipeline _pipeline; private readonly Uri _endpoint; - /// The ClientDiagnostics is used to provide tracing support for the client library. - internal TelemetrySource ClientDiagnostics { get; } - /// The HTTP pipeline for sending and receiving REST requests and responses. - public virtual MessagePipeline Pipeline => _pipeline; + public virtual ClientPipeline Pipeline => _pipeline; /// Initializes a new instance of OpenAIClient for mocking. protected OpenAIClient() @@ -35,7 +28,7 @@ protected OpenAIClient() /// Initializes a new instance of OpenAIClient. /// A credential used to authenticate to an Azure Service. /// is null. - public OpenAIClient(KeyCredential credential) : this(new Uri("https://api.openai.com/v1"), credential, new OpenAIClientOptions()) + public OpenAIClient(ApiKeyCredential credential) : this(new Uri("https://api.openai.com/v1"), credential, new OpenAIClientOptions()) { } @@ -44,15 +37,17 @@ protected OpenAIClient() /// A credential used to authenticate to an Azure Service. /// The options for configuring the client. /// or is null. - public OpenAIClient(Uri endpoint, KeyCredential credential, OpenAIClientOptions options) + public OpenAIClient(Uri endpoint, ApiKeyCredential credential, OpenAIClientOptions options) { - ClientUtilities.AssertNotNull(endpoint, nameof(endpoint)); - ClientUtilities.AssertNotNull(credential, nameof(credential)); + if (endpoint is null) throw new ArgumentNullException(nameof(endpoint)); + if (credential is null) throw new ArgumentNullException(nameof(credential)); options ??= new OpenAIClientOptions(); - - ClientDiagnostics = new TelemetrySource(options, true); - _keyCredential = credential; - _pipeline = MessagePipeline.Create(options, new IPipelinePolicy[] { new KeyCredentialPolicy(_keyCredential, AuthorizationHeader, AuthorizationApiKeyPrefix) }, Array.Empty>()); + _credential = credential; + var authenticationPolicy = ApiKeyAuthenticationPolicy.CreateBearerAuthorizationPolicy(_credential); + _pipeline = ClientPipeline.Create(options, + perCallPolicies: ReadOnlySpan.Empty, + perTryPolicies: new PipelinePolicy[] { authenticationPolicy }, + beforeTransportPolicies: ReadOnlySpan.Empty); _endpoint = endpoint; } @@ -73,79 +68,80 @@ public OpenAIClient(Uri endpoint, KeyCredential credential, OpenAIClientOptions /// Initializes a new instance of Audio. public virtual Audio GetAudioClient() { - return Volatile.Read(ref _cachedAudio) ?? Interlocked.CompareExchange(ref _cachedAudio, new Audio(ClientDiagnostics, _pipeline, _keyCredential, _endpoint), null) ?? _cachedAudio; + return Volatile.Read(ref _cachedAudio) ?? Interlocked.CompareExchange(ref _cachedAudio, new Audio(_pipeline, _credential, _endpoint), null) ?? _cachedAudio; } /// Initializes a new instance of Assistants. public virtual Assistants GetAssistantsClient() { - return Volatile.Read(ref _cachedAssistants) ?? Interlocked.CompareExchange(ref _cachedAssistants, new Assistants(ClientDiagnostics, _pipeline, _keyCredential, _endpoint), null) ?? _cachedAssistants; + return Volatile.Read(ref _cachedAssistants) ?? Interlocked.CompareExchange(ref _cachedAssistants, new Assistants(_pipeline, _credential, _endpoint), null) ?? _cachedAssistants; } /// Initializes a new instance of Chat. public virtual Chat GetChatClient() { - return Volatile.Read(ref _cachedChat) ?? Interlocked.CompareExchange(ref _cachedChat, new Chat(ClientDiagnostics, _pipeline, _keyCredential, _endpoint), null) ?? _cachedChat; + return Volatile.Read(ref _cachedChat) ?? Interlocked.CompareExchange(ref _cachedChat, new Chat(_pipeline, _credential, _endpoint), null) ?? _cachedChat; } /// Initializes a new instance of Completions. public virtual Completions GetCompletionsClient() { - return Volatile.Read(ref _cachedCompletions) ?? Interlocked.CompareExchange(ref _cachedCompletions, new Completions(ClientDiagnostics, _pipeline, _keyCredential, _endpoint), null) ?? _cachedCompletions; + return Volatile.Read(ref _cachedCompletions) ?? Interlocked.CompareExchange(ref _cachedCompletions, new Completions(_pipeline, _credential, _endpoint), null) ?? _cachedCompletions; } /// Initializes a new instance of Embeddings. public virtual Embeddings GetEmbeddingsClient() { - return Volatile.Read(ref _cachedEmbeddings) ?? Interlocked.CompareExchange(ref _cachedEmbeddings, new Embeddings(ClientDiagnostics, _pipeline, _keyCredential, _endpoint), null) ?? _cachedEmbeddings; + return Volatile.Read(ref _cachedEmbeddings) ?? Interlocked.CompareExchange(ref _cachedEmbeddings, new Embeddings(_pipeline, _credential, _endpoint), null) ?? _cachedEmbeddings; } /// Initializes a new instance of Files. public virtual Files GetFilesClient() { - return Volatile.Read(ref _cachedFiles) ?? Interlocked.CompareExchange(ref _cachedFiles, new Files(ClientDiagnostics, _pipeline, _keyCredential, _endpoint), null) ?? _cachedFiles; + return Volatile.Read(ref _cachedFiles) ?? Interlocked.CompareExchange(ref _cachedFiles, new Files(_pipeline, _credential, _endpoint), null) ?? _cachedFiles; } /// Initializes a new instance of FineTuning. public virtual FineTuning GetFineTuningClient() { - return Volatile.Read(ref _cachedFineTuning) ?? Interlocked.CompareExchange(ref _cachedFineTuning, new FineTuning(ClientDiagnostics, _pipeline, _keyCredential, _endpoint), null) ?? _cachedFineTuning; + return Volatile.Read(ref _cachedFineTuning) ?? Interlocked.CompareExchange(ref _cachedFineTuning, new FineTuning(_pipeline, _credential, _endpoint), null) ?? _cachedFineTuning; } /// Initializes a new instance of Images. public virtual Images GetImagesClient() { - return Volatile.Read(ref _cachedImages) ?? Interlocked.CompareExchange(ref _cachedImages, new Images(ClientDiagnostics, _pipeline, _keyCredential, _endpoint), null) ?? _cachedImages; + return Volatile.Read(ref _cachedImages) ?? Interlocked.CompareExchange(ref _cachedImages, new Images(_pipeline, _credential, _endpoint), null) ?? _cachedImages; } /// Initializes a new instance of Messages. public virtual Messages GetMessagesClient() { - return Volatile.Read(ref _cachedMessages) ?? Interlocked.CompareExchange(ref _cachedMessages, new Messages(ClientDiagnostics, _pipeline, _keyCredential, _endpoint), null) ?? _cachedMessages; + return Volatile.Read(ref _cachedMessages) ?? Interlocked.CompareExchange(ref _cachedMessages, new Messages(_pipeline, _credential, _endpoint), null) ?? _cachedMessages; } /// Initializes a new instance of ModelsOps. public virtual ModelsOps GetModelsOpsClient() { - return Volatile.Read(ref _cachedModelsOps) ?? Interlocked.CompareExchange(ref _cachedModelsOps, new ModelsOps(ClientDiagnostics, _pipeline, _keyCredential, _endpoint), null) ?? _cachedModelsOps; + return Volatile.Read(ref _cachedModelsOps) ?? Interlocked.CompareExchange(ref _cachedModelsOps, new ModelsOps(_pipeline, _credential, _endpoint), null) ?? _cachedModelsOps; } /// Initializes a new instance of Moderations. public virtual Moderations GetModerationsClient() { - return Volatile.Read(ref _cachedModerations) ?? Interlocked.CompareExchange(ref _cachedModerations, new Moderations(ClientDiagnostics, _pipeline, _keyCredential, _endpoint), null) ?? _cachedModerations; + return Volatile.Read(ref _cachedModerations) ?? Interlocked.CompareExchange(ref _cachedModerations, new Moderations(_pipeline, _credential, _endpoint), null) ?? _cachedModerations; } /// Initializes a new instance of Runs. public virtual Runs GetRunsClient() { - return Volatile.Read(ref _cachedRuns) ?? Interlocked.CompareExchange(ref _cachedRuns, new Runs(ClientDiagnostics, _pipeline, _keyCredential, _endpoint), null) ?? _cachedRuns; + return Volatile.Read(ref _cachedRuns) ?? Interlocked.CompareExchange(ref _cachedRuns, new Runs(_pipeline, _credential, _endpoint), null) ?? _cachedRuns; } /// Initializes a new instance of Threads. public virtual Threads GetThreadsClient() { - return Volatile.Read(ref _cachedThreads) ?? Interlocked.CompareExchange(ref _cachedThreads, new Threads(ClientDiagnostics, _pipeline, _keyCredential, _endpoint), null) ?? _cachedThreads; + return Volatile.Read(ref _cachedThreads) ?? Interlocked.CompareExchange(ref _cachedThreads, new Threads(_pipeline, _credential, _endpoint), null) ?? _cachedThreads; } } } + diff --git a/.dotnet/src/Generated/OpenAIClientOptions.cs b/.dotnet/src/Generated/OpenAIClientOptions.cs index 5b661878a..31df0818d 100644 --- a/.dotnet/src/Generated/OpenAIClientOptions.cs +++ b/.dotnet/src/Generated/OpenAIClientOptions.cs @@ -1,13 +1,12 @@ // -#nullable disable - -using System.ClientModel; +using System.ClientModel.Primitives; namespace OpenAI { /// Client options for OpenAIClient. - public partial class OpenAIClientOptions : RequestOptions + public partial class OpenAIClientOptions : ClientPipelineOptions { } } + diff --git a/.dotnet/src/Generated/OpenAIModelFactory.cs b/.dotnet/src/Generated/OpenAIModelFactory.cs index 494ab0f0a..c8de3f61a 100644 --- a/.dotnet/src/Generated/OpenAIModelFactory.cs +++ b/.dotnet/src/Generated/OpenAIModelFactory.cs @@ -1,7 +1,5 @@ // -#nullable disable - using System; using System.Collections.Generic; using System.Linq; @@ -576,7 +574,7 @@ public static CompletionUsage CompletionUsage(long promptTokens = default, long /// /// /// Generates `best_of` completions server-side and returns the "best" (the one with the highest - /// log probability per token). Results cannot be streamed. + /// log probability per token). ClientResults cannot be streamed. /// /// When used with `n`, `best_of` controls the number of candidate completions and `n` specifies /// how many to return – `best_of` must be greater than `n`. @@ -1579,3 +1577,4 @@ public static DeleteThreadResponse DeleteThreadResponse(string id = null, bool d } } } + diff --git a/.dotnet/src/Generated/Runs.cs b/.dotnet/src/Generated/Runs.cs index fbe954204..05661caa5 100644 --- a/.dotnet/src/Generated/Runs.cs +++ b/.dotnet/src/Generated/Runs.cs @@ -1,12 +1,9 @@ // -#nullable disable - using System; using System.ClientModel; -using System.ClientModel.Internal; using System.ClientModel.Primitives; -using System.ClientModel.Primitives.Pipeline; +using System.Text; using System.Threading; using System.Threading.Tasks; using OpenAI.Models; @@ -18,16 +15,13 @@ namespace OpenAI public partial class Runs { private const string AuthorizationHeader = "Authorization"; - private readonly KeyCredential _keyCredential; + private readonly ApiKeyCredential _credential; private const string AuthorizationApiKeyPrefix = "Bearer"; - private readonly MessagePipeline _pipeline; + private readonly ClientPipeline _pipeline; private readonly Uri _endpoint; - /// The ClientDiagnostics is used to provide tracing support for the client library. - internal TelemetrySource ClientDiagnostics { get; } - /// The HTTP pipeline for sending and receiving REST requests and responses. - public virtual MessagePipeline Pipeline => _pipeline; + public virtual ClientPipeline Pipeline => _pipeline; /// Initializes a new instance of Runs for mocking. protected Runs() @@ -35,44 +29,38 @@ protected Runs() } /// Initializes a new instance of Runs. - /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. - /// The key credential to copy. + /// The key credential to copy. /// OpenAI Endpoint. - internal Runs(TelemetrySource clientDiagnostics, MessagePipeline pipeline, KeyCredential keyCredential, Uri endpoint) + internal Runs(ClientPipeline pipeline, ApiKeyCredential credential, Uri endpoint) { - ClientDiagnostics = clientDiagnostics; _pipeline = pipeline; - _keyCredential = keyCredential; + _credential = credential; _endpoint = endpoint; } /// Create a thread and run it in one request. /// The to use. - /// The cancellation token to use. /// is null. - public virtual async Task> CreateThreadAndRunAsync(CreateThreadAndRunRequest threadAndRun, CancellationToken cancellationToken = default) + public virtual async Task> CreateThreadAndRunAsync(CreateThreadAndRunRequest threadAndRun) { - ClientUtilities.AssertNotNull(threadAndRun, nameof(threadAndRun)); + if (threadAndRun is null) throw new ArgumentNullException(nameof(threadAndRun)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = threadAndRun.ToRequestBody(); - Result result = await CreateThreadAndRunAsync(content, context).ConfigureAwait(false); - return Result.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + using BinaryContent content = BinaryContent.Create(threadAndRun); + ClientResult result = await CreateThreadAndRunAsync(content).ConfigureAwait(false); + return ClientResult.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Create a thread and run it in one request. /// The to use. - /// The cancellation token to use. /// is null. - public virtual Result CreateThreadAndRun(CreateThreadAndRunRequest threadAndRun, CancellationToken cancellationToken = default) + public virtual ClientResult CreateThreadAndRun(CreateThreadAndRunRequest threadAndRun) { - ClientUtilities.AssertNotNull(threadAndRun, nameof(threadAndRun)); + if (threadAndRun is null) throw new ArgumentNullException(nameof(threadAndRun)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = threadAndRun.ToRequestBody(); - Result result = CreateThreadAndRun(content, context); - return Result.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + using BinaryContent content = BinaryContent.Create(threadAndRun); + ClientResult result = CreateThreadAndRun(content); + return ClientResult.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -85,32 +73,30 @@ public virtual Result CreateThreadAndRun(CreateThreadAndRunRequest th /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task CreateThreadAndRunAsync(RequestBody content, RequestOptions context = null) + public virtual async Task CreateThreadAndRunAsync(BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNull(content, nameof(content)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCreateThreadAndRunRequest(content, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; - using var scope = ClientDiagnostics.CreateSpan("Runs.CreateThreadAndRun"); - scope.Start(); - try - { - using PipelineMessage message = CreateCreateThreadAndRunRequest(content, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -123,66 +109,62 @@ public virtual async Task CreateThreadAndRunAsync(RequestBody content, R /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result CreateThreadAndRun(RequestBody content, RequestOptions context = null) + public virtual ClientResult CreateThreadAndRun(BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNull(content, nameof(content)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCreateThreadAndRunRequest(content, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; - using var scope = ClientDiagnostics.CreateSpan("Runs.CreateThreadAndRun"); - scope.Start(); - try + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateCreateThreadAndRunRequest(content, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } /// Create a run. /// The ID of the thread to run. /// The to use. - /// The cancellation token to use. /// or is null. /// is an empty string, and was expected to be non-empty. - public virtual async Task> CreateRunAsync(string threadId, CreateRunRequest run, CancellationToken cancellationToken = default) + public virtual async Task> CreateRunAsync(string threadId, CreateRunRequest run) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNull(run, nameof(run)); + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (run is null) throw new ArgumentNullException(nameof(run)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = run.ToRequestBody(); - Result result = await CreateRunAsync(threadId, content, context).ConfigureAwait(false); - return Result.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + using BinaryContent content = BinaryContent.Create(run); + ClientResult result = await CreateRunAsync(threadId, content).ConfigureAwait(false); + return ClientResult.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Create a run. /// The ID of the thread to run. /// The to use. - /// The cancellation token to use. /// or is null. /// is an empty string, and was expected to be non-empty. - public virtual Result CreateRun(string threadId, CreateRunRequest run, CancellationToken cancellationToken = default) + public virtual ClientResult CreateRun(string threadId, CreateRunRequest run) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNull(run, nameof(run)); + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (run is null) throw new ArgumentNullException(nameof(run)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = run.ToRequestBody(); - Result result = CreateRun(threadId, content, context); - return Result.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + using BinaryContent content = BinaryContent.Create(run); + ClientResult result = CreateRun(threadId, content); + return ClientResult.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -195,35 +177,34 @@ public virtual Result CreateRun(string threadId, CreateRunRequest run /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the thread to run. /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// or is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task CreateRunAsync(string threadId, RequestBody content, RequestOptions context = null) + public virtual async Task CreateRunAsync(string threadId, BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNull(content, nameof(content)); - - using var scope = ClientDiagnostics.CreateSpan("Runs.CreateRun"); - scope.Start(); - try - { - using PipelineMessage message = CreateCreateRunRequest(threadId, content, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCreateRunRequest(threadId, content, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -236,35 +217,34 @@ public virtual async Task CreateRunAsync(string threadId, RequestBody co /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the thread to run. /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// or is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result CreateRun(string threadId, RequestBody content, RequestOptions context = null) + public virtual ClientResult CreateRun(string threadId, BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNull(content, nameof(content)); - - using var scope = ClientDiagnostics.CreateSpan("Runs.CreateRun"); - scope.Start(); - try + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCreateRunRequest(threadId, content, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateCreateRunRequest(threadId, content, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } /// Returns a list of runs belonging to a thread. @@ -287,16 +267,15 @@ public virtual Result CreateRun(string threadId, RequestBody content, RequestOpt /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. /// - /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - public virtual async Task> GetRunsAsync(string threadId, int? limit = null, ListOrder? order = null, string after = null, string before = null, CancellationToken cancellationToken = default) + public virtual async Task> GetRunsAsync(string threadId, int? limit = null, ListOrder? order = null, string after = null, string before = null) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = await GetRunsAsync(threadId, limit, order?.ToString(), after, before, context).ConfigureAwait(false); - return Result.FromValue(ListRunsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = await GetRunsAsync(threadId, limit, order?.ToString(), after, before).ConfigureAwait(false); + return ClientResult.FromValue(ListRunsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Returns a list of runs belonging to a thread. @@ -319,16 +298,15 @@ public virtual async Task> GetRunsAsync(string threadId /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. /// - /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - public virtual Result GetRuns(string threadId, int? limit = null, ListOrder? order = null, string after = null, string before = null, CancellationToken cancellationToken = default) + public virtual ClientResult GetRuns(string threadId, int? limit = null, ListOrder? order = null, string after = null, string before = null) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = GetRuns(threadId, limit, order?.ToString(), after, before, context); - return Result.FromValue(ListRunsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = GetRuns(threadId, limit, order?.ToString(), after, before); + return ClientResult.FromValue(ListRunsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -341,7 +319,7 @@ public virtual Result GetRuns(string threadId, int? limit = nu /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -365,27 +343,26 @@ public virtual Result GetRuns(string threadId, int? limit = nu /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. /// - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task GetRunsAsync(string threadId, int? limit, string order, string after, string before, RequestOptions context) + public virtual async Task GetRunsAsync(string threadId, int? limit, string order, string after, string before, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - - using var scope = ClientDiagnostics.CreateSpan("Runs.GetRuns"); - scope.Start(); - try - { - using PipelineMessage message = CreateGetRunsRequest(threadId, limit, order, after, before, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateGetRunsRequest(threadId, limit, order, after, before, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -398,7 +375,7 @@ public virtual async Task GetRunsAsync(string threadId, int? limit, stri /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -422,59 +399,58 @@ public virtual async Task GetRunsAsync(string threadId, int? limit, stri /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. /// - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result GetRuns(string threadId, int? limit, string order, string after, string before, RequestOptions context) + public virtual ClientResult GetRuns(string threadId, int? limit, string order, string after, string before, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - - using var scope = ClientDiagnostics.CreateSpan("Runs.GetRuns"); - scope.Start(); - try + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateGetRunsRequest(threadId, limit, order, after, before, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateGetRunsRequest(threadId, limit, order, after, before, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } /// Retrieves a run. /// The ID of the [thread](/docs/api-reference/threads) that was run. /// The ID of the run to retrieve. - /// The cancellation token to use. /// or is null. /// or is an empty string, and was expected to be non-empty. - public virtual async Task> GetRunAsync(string threadId, string runId, CancellationToken cancellationToken = default) + public virtual async Task> GetRunAsync(string threadId, string runId) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (runId is null) throw new ArgumentNullException(nameof(runId)); + if (string.IsNullOrEmpty(runId)) throw new ArgumentException(nameof(runId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = await GetRunAsync(threadId, runId, context).ConfigureAwait(false); - return Result.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = await GetRunAsync(threadId, runId).ConfigureAwait(false); + return ClientResult.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Retrieves a run. /// The ID of the [thread](/docs/api-reference/threads) that was run. /// The ID of the run to retrieve. - /// The cancellation token to use. /// or is null. /// or is an empty string, and was expected to be non-empty. - public virtual Result GetRun(string threadId, string runId, CancellationToken cancellationToken = default) + public virtual ClientResult GetRun(string threadId, string runId) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (runId is null) throw new ArgumentNullException(nameof(runId)); + if (string.IsNullOrEmpty(runId)) throw new ArgumentException(nameof(runId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = GetRun(threadId, runId, context); - return Result.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = GetRun(threadId, runId); + return ClientResult.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -487,35 +463,35 @@ public virtual Result GetRun(string threadId, string runId, Cancellat /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the [thread](/docs/api-reference/threads) that was run. /// The ID of the run to retrieve. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// or is null. /// or is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task GetRunAsync(string threadId, string runId, RequestOptions context) + public virtual async Task GetRunAsync(string threadId, string runId, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); - - using var scope = ClientDiagnostics.CreateSpan("Runs.GetRun"); - scope.Start(); - try - { - using PipelineMessage message = CreateGetRunRequest(threadId, runId, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (runId is null) throw new ArgumentNullException(nameof(runId)); + if (string.IsNullOrEmpty(runId)) throw new ArgumentException(nameof(runId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateGetRunRequest(threadId, runId, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -528,73 +504,73 @@ public virtual async Task GetRunAsync(string threadId, string runId, Req /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the [thread](/docs/api-reference/threads) that was run. /// The ID of the run to retrieve. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// or is null. /// or is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result GetRun(string threadId, string runId, RequestOptions context) + public virtual ClientResult GetRun(string threadId, string runId, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); - - using var scope = ClientDiagnostics.CreateSpan("Runs.GetRun"); - scope.Start(); - try - { - using PipelineMessage message = CreateGetRunRequest(threadId, runId, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (runId is null) throw new ArgumentNullException(nameof(runId)); + if (string.IsNullOrEmpty(runId)) throw new ArgumentException(nameof(runId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateGetRunRequest(threadId, runId, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } /// Modifies a run. /// The ID of the [thread](/docs/api-reference/threads) that was run. /// The ID of the run to modify. /// The to use. - /// The cancellation token to use. /// , or is null. /// or is an empty string, and was expected to be non-empty. - public virtual async Task> ModifyRunAsync(string threadId, string runId, ModifyRunRequest run, CancellationToken cancellationToken = default) + public virtual async Task> ModifyRunAsync(string threadId, string runId, ModifyRunRequest run) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); - ClientUtilities.AssertNotNull(run, nameof(run)); - - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = run.ToRequestBody(); - Result result = await ModifyRunAsync(threadId, runId, content, context).ConfigureAwait(false); - return Result.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (runId is null) throw new ArgumentNullException(nameof(runId)); + if (string.IsNullOrEmpty(runId)) throw new ArgumentException(nameof(runId)); + if (run is null) throw new ArgumentNullException(nameof(run)); + + using BinaryContent content = BinaryContent.Create(run); + ClientResult result = await ModifyRunAsync(threadId, runId, content).ConfigureAwait(false); + return ClientResult.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Modifies a run. /// The ID of the [thread](/docs/api-reference/threads) that was run. /// The ID of the run to modify. /// The to use. - /// The cancellation token to use. /// , or is null. /// or is an empty string, and was expected to be non-empty. - public virtual Result ModifyRun(string threadId, string runId, ModifyRunRequest run, CancellationToken cancellationToken = default) + public virtual ClientResult ModifyRun(string threadId, string runId, ModifyRunRequest run) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); - ClientUtilities.AssertNotNull(run, nameof(run)); - - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = run.ToRequestBody(); - Result result = ModifyRun(threadId, runId, content, context); - return Result.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (runId is null) throw new ArgumentNullException(nameof(runId)); + if (string.IsNullOrEmpty(runId)) throw new ArgumentException(nameof(runId)); + if (run is null) throw new ArgumentNullException(nameof(run)); + + using BinaryContent content = BinaryContent.Create(run); + ClientResult result = ModifyRun(threadId, runId, content); + return ClientResult.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -607,7 +583,7 @@ public virtual Result ModifyRun(string threadId, string runId, Modify /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -615,29 +591,29 @@ public virtual Result ModifyRun(string threadId, string runId, Modify /// The ID of the [thread](/docs/api-reference/threads) that was run. /// The ID of the run to modify. /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// , or is null. /// or is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task ModifyRunAsync(string threadId, string runId, RequestBody content, RequestOptions context = null) + public virtual async Task ModifyRunAsync(string threadId, string runId, BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); - ClientUtilities.AssertNotNull(content, nameof(content)); - - using var scope = ClientDiagnostics.CreateSpan("Runs.ModifyRun"); - scope.Start(); - try + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (runId is null) throw new ArgumentNullException(nameof(runId)); + if (string.IsNullOrEmpty(runId)) throw new ArgumentException(nameof(runId)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateModifyRunRequest(threadId, runId, content, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateModifyRunRequest(threadId, runId, content, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -650,7 +626,7 @@ public virtual async Task ModifyRunAsync(string threadId, string runId, /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -658,61 +634,61 @@ public virtual async Task ModifyRunAsync(string threadId, string runId, /// The ID of the [thread](/docs/api-reference/threads) that was run. /// The ID of the run to modify. /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// , or is null. /// or is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result ModifyRun(string threadId, string runId, RequestBody content, RequestOptions context = null) + public virtual ClientResult ModifyRun(string threadId, string runId, BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); - ClientUtilities.AssertNotNull(content, nameof(content)); - - using var scope = ClientDiagnostics.CreateSpan("Runs.ModifyRun"); - scope.Start(); - try - { - using PipelineMessage message = CreateModifyRunRequest(threadId, runId, content, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (runId is null) throw new ArgumentNullException(nameof(runId)); + if (string.IsNullOrEmpty(runId)) throw new ArgumentException(nameof(runId)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateModifyRunRequest(threadId, runId, content, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } /// Cancels a run that is `in_progress`. /// The ID of the thread to which this run belongs. /// The ID of the run to cancel. - /// The cancellation token to use. /// or is null. /// or is an empty string, and was expected to be non-empty. - public virtual async Task> CancelRunAsync(string threadId, string runId, CancellationToken cancellationToken = default) + public virtual async Task> CancelRunAsync(string threadId, string runId) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (runId is null) throw new ArgumentNullException(nameof(runId)); + if (string.IsNullOrEmpty(runId)) throw new ArgumentException(nameof(runId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = await CancelRunAsync(threadId, runId, context).ConfigureAwait(false); - return Result.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = await CancelRunAsync(threadId, runId).ConfigureAwait(false); + return ClientResult.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Cancels a run that is `in_progress`. /// The ID of the thread to which this run belongs. /// The ID of the run to cancel. - /// The cancellation token to use. /// or is null. /// or is an empty string, and was expected to be non-empty. - public virtual Result CancelRun(string threadId, string runId, CancellationToken cancellationToken = default) + public virtual ClientResult CancelRun(string threadId, string runId) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (runId is null) throw new ArgumentNullException(nameof(runId)); + if (string.IsNullOrEmpty(runId)) throw new ArgumentException(nameof(runId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = CancelRun(threadId, runId, context); - return Result.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = CancelRun(threadId, runId); + return ClientResult.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -725,35 +701,35 @@ public virtual Result CancelRun(string threadId, string runId, Cancel /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the thread to which this run belongs. /// The ID of the run to cancel. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// or is null. /// or is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task CancelRunAsync(string threadId, string runId, RequestOptions context) + public virtual async Task CancelRunAsync(string threadId, string runId, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); - - using var scope = ClientDiagnostics.CreateSpan("Runs.CancelRun"); - scope.Start(); - try + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (runId is null) throw new ArgumentNullException(nameof(runId)); + if (string.IsNullOrEmpty(runId)) throw new ArgumentException(nameof(runId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCancelRunRequest(threadId, runId, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateCancelRunRequest(threadId, runId, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -766,35 +742,35 @@ public virtual async Task CancelRunAsync(string threadId, string runId, /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the thread to which this run belongs. /// The ID of the run to cancel. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// or is null. /// or is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result CancelRun(string threadId, string runId, RequestOptions context) + public virtual ClientResult CancelRun(string threadId, string runId, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); - - using var scope = ClientDiagnostics.CreateSpan("Runs.CancelRun"); - scope.Start(); - try - { - using PipelineMessage message = CreateCancelRunRequest(threadId, runId, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (runId is null) throw new ArgumentNullException(nameof(runId)); + if (string.IsNullOrEmpty(runId)) throw new ArgumentException(nameof(runId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCancelRunRequest(threadId, runId, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } /// @@ -805,19 +781,19 @@ public virtual Result CancelRun(string threadId, string runId, RequestOptions co /// The ID of the [thread](/docs/api-reference/threads) to which this run belongs. /// The ID of the run that requires the tool output submission. /// The to use. - /// The cancellation token to use. /// , or is null. /// or is an empty string, and was expected to be non-empty. - public virtual async Task> SubmitToolOuputsToRunAsync(string threadId, string runId, SubmitToolOutputsRunRequest submitToolOutputsRun, CancellationToken cancellationToken = default) + public virtual async Task> SubmitToolOuputsToRunAsync(string threadId, string runId, SubmitToolOutputsRunRequest submitToolOutputsRun) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); - ClientUtilities.AssertNotNull(submitToolOutputsRun, nameof(submitToolOutputsRun)); - - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = submitToolOutputsRun.ToRequestBody(); - Result result = await SubmitToolOuputsToRunAsync(threadId, runId, content, context).ConfigureAwait(false); - return Result.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (runId is null) throw new ArgumentNullException(nameof(runId)); + if (string.IsNullOrEmpty(runId)) throw new ArgumentException(nameof(runId)); + if (submitToolOutputsRun is null) throw new ArgumentNullException(nameof(submitToolOutputsRun)); + + using BinaryContent content = BinaryContent.Create(submitToolOutputsRun); + ClientResult result = await SubmitToolOuputsToRunAsync(threadId, runId, content).ConfigureAwait(false); + return ClientResult.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -828,19 +804,19 @@ public virtual async Task> SubmitToolOuputsToRunAsync(string t /// The ID of the [thread](/docs/api-reference/threads) to which this run belongs. /// The ID of the run that requires the tool output submission. /// The to use. - /// The cancellation token to use. /// , or is null. /// or is an empty string, and was expected to be non-empty. - public virtual Result SubmitToolOuputsToRun(string threadId, string runId, SubmitToolOutputsRunRequest submitToolOutputsRun, CancellationToken cancellationToken = default) + public virtual ClientResult SubmitToolOuputsToRun(string threadId, string runId, SubmitToolOutputsRunRequest submitToolOutputsRun) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); - ClientUtilities.AssertNotNull(submitToolOutputsRun, nameof(submitToolOutputsRun)); - - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = submitToolOutputsRun.ToRequestBody(); - Result result = SubmitToolOuputsToRun(threadId, runId, content, context); - return Result.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (runId is null) throw new ArgumentNullException(nameof(runId)); + if (string.IsNullOrEmpty(runId)) throw new ArgumentException(nameof(runId)); + if (submitToolOutputsRun is null) throw new ArgumentNullException(nameof(submitToolOutputsRun)); + + using BinaryContent content = BinaryContent.Create(submitToolOutputsRun); + ClientResult result = SubmitToolOuputsToRun(threadId, runId, content); + return ClientResult.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -855,7 +831,7 @@ public virtual Result SubmitToolOuputsToRun(string threadId, string r /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -863,29 +839,29 @@ public virtual Result SubmitToolOuputsToRun(string threadId, string r /// The ID of the [thread](/docs/api-reference/threads) to which this run belongs. /// The ID of the run that requires the tool output submission. /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// , or is null. /// or is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task SubmitToolOuputsToRunAsync(string threadId, string runId, RequestBody content, RequestOptions context = null) + public virtual async Task SubmitToolOuputsToRunAsync(string threadId, string runId, BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); - ClientUtilities.AssertNotNull(content, nameof(content)); - - using var scope = ClientDiagnostics.CreateSpan("Runs.SubmitToolOuputsToRun"); - scope.Start(); - try - { - using PipelineMessage message = CreateSubmitToolOuputsToRunRequest(threadId, runId, content, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (runId is null) throw new ArgumentNullException(nameof(runId)); + if (string.IsNullOrEmpty(runId)) throw new ArgumentException(nameof(runId)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateSubmitToolOuputsToRunRequest(threadId, runId, content, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -900,7 +876,7 @@ public virtual async Task SubmitToolOuputsToRunAsync(string threadId, st /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -908,29 +884,29 @@ public virtual async Task SubmitToolOuputsToRunAsync(string threadId, st /// The ID of the [thread](/docs/api-reference/threads) to which this run belongs. /// The ID of the run that requires the tool output submission. /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// , or is null. /// or is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result SubmitToolOuputsToRun(string threadId, string runId, RequestBody content, RequestOptions context = null) + public virtual ClientResult SubmitToolOuputsToRun(string threadId, string runId, BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); - ClientUtilities.AssertNotNull(content, nameof(content)); - - using var scope = ClientDiagnostics.CreateSpan("Runs.SubmitToolOuputsToRun"); - scope.Start(); - try + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (runId is null) throw new ArgumentNullException(nameof(runId)); + if (string.IsNullOrEmpty(runId)) throw new ArgumentException(nameof(runId)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateSubmitToolOuputsToRunRequest(threadId, runId, content, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateSubmitToolOuputsToRunRequest(threadId, runId, content, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } /// Returns a list of run steps belonging to a run. @@ -954,17 +930,17 @@ public virtual Result SubmitToolOuputsToRun(string threadId, string runId, Reque /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. /// - /// The cancellation token to use. /// or is null. /// or is an empty string, and was expected to be non-empty. - public virtual async Task> GetRunStepsAsync(string threadId, string runId, int? limit = null, ListOrder? order = null, string after = null, string before = null, CancellationToken cancellationToken = default) + public virtual async Task> GetRunStepsAsync(string threadId, string runId, int? limit = null, ListOrder? order = null, string after = null, string before = null) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (runId is null) throw new ArgumentNullException(nameof(runId)); + if (string.IsNullOrEmpty(runId)) throw new ArgumentException(nameof(runId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = await GetRunStepsAsync(threadId, runId, limit, order?.ToString(), after, before, context).ConfigureAwait(false); - return Result.FromValue(ListRunStepsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = await GetRunStepsAsync(threadId, runId, limit, order?.ToString(), after, before).ConfigureAwait(false); + return ClientResult.FromValue(ListRunStepsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Returns a list of run steps belonging to a run. @@ -988,17 +964,17 @@ public virtual async Task> GetRunStepsAsync(string /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. /// - /// The cancellation token to use. /// or is null. /// or is an empty string, and was expected to be non-empty. - public virtual Result GetRunSteps(string threadId, string runId, int? limit = null, ListOrder? order = null, string after = null, string before = null, CancellationToken cancellationToken = default) + public virtual ClientResult GetRunSteps(string threadId, string runId, int? limit = null, ListOrder? order = null, string after = null, string before = null) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (runId is null) throw new ArgumentNullException(nameof(runId)); + if (string.IsNullOrEmpty(runId)) throw new ArgumentException(nameof(runId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = GetRunSteps(threadId, runId, limit, order?.ToString(), after, before, context); - return Result.FromValue(ListRunStepsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = GetRunSteps(threadId, runId, limit, order?.ToString(), after, before); + return ClientResult.FromValue(ListRunStepsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -1011,7 +987,7 @@ public virtual Result GetRunSteps(string threadId, string /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -1036,28 +1012,28 @@ public virtual Result GetRunSteps(string threadId, string /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. /// - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// or is null. /// or is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task GetRunStepsAsync(string threadId, string runId, int? limit, string order, string after, string before, RequestOptions context) + public virtual async Task GetRunStepsAsync(string threadId, string runId, int? limit, string order, string after, string before, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); - - using var scope = ClientDiagnostics.CreateSpan("Runs.GetRunSteps"); - scope.Start(); - try - { - using PipelineMessage message = CreateGetRunStepsRequest(threadId, runId, limit, order, after, before, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (runId is null) throw new ArgumentNullException(nameof(runId)); + if (string.IsNullOrEmpty(runId)) throw new ArgumentException(nameof(runId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateGetRunStepsRequest(threadId, runId, limit, order, after, before, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -1070,7 +1046,7 @@ public virtual async Task GetRunStepsAsync(string threadId, string runId /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -1095,64 +1071,66 @@ public virtual async Task GetRunStepsAsync(string threadId, string runId /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. /// - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// or is null. /// or is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result GetRunSteps(string threadId, string runId, int? limit, string order, string after, string before, RequestOptions context) + public virtual ClientResult GetRunSteps(string threadId, string runId, int? limit, string order, string after, string before, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); - - using var scope = ClientDiagnostics.CreateSpan("Runs.GetRunSteps"); - scope.Start(); - try + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (runId is null) throw new ArgumentNullException(nameof(runId)); + if (string.IsNullOrEmpty(runId)) throw new ArgumentException(nameof(runId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateGetRunStepsRequest(threadId, runId, limit, order, after, before, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateGetRunStepsRequest(threadId, runId, limit, order, after, before, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } /// Retrieves a run step. /// The ID of the thread to which the run and run step belongs. /// The ID of the run to which the run step belongs. /// The ID of the run step to retrieve. - /// The cancellation token to use. /// , or is null. /// , or is an empty string, and was expected to be non-empty. - public virtual async Task> GetRunStepAsync(string threadId, string runId, string stepId, CancellationToken cancellationToken = default) + public virtual async Task> GetRunStepAsync(string threadId, string runId, string stepId) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); - ClientUtilities.AssertNotNullOrEmpty(stepId, nameof(stepId)); - - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = await GetRunStepAsync(threadId, runId, stepId, context).ConfigureAwait(false); - return Result.FromValue(RunStepObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (runId is null) throw new ArgumentNullException(nameof(runId)); + if (string.IsNullOrEmpty(runId)) throw new ArgumentException(nameof(runId)); + if (stepId is null) throw new ArgumentNullException(nameof(stepId)); + if (string.IsNullOrEmpty(stepId)) throw new ArgumentException(nameof(stepId)); + + ClientResult result = await GetRunStepAsync(threadId, runId, stepId).ConfigureAwait(false); + return ClientResult.FromValue(RunStepObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Retrieves a run step. /// The ID of the thread to which the run and run step belongs. /// The ID of the run to which the run step belongs. /// The ID of the run step to retrieve. - /// The cancellation token to use. /// , or is null. /// , or is an empty string, and was expected to be non-empty. - public virtual Result GetRunStep(string threadId, string runId, string stepId, CancellationToken cancellationToken = default) + public virtual ClientResult GetRunStep(string threadId, string runId, string stepId) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); - ClientUtilities.AssertNotNullOrEmpty(stepId, nameof(stepId)); - - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = GetRunStep(threadId, runId, stepId, context); - return Result.FromValue(RunStepObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (runId is null) throw new ArgumentNullException(nameof(runId)); + if (string.IsNullOrEmpty(runId)) throw new ArgumentException(nameof(runId)); + if (stepId is null) throw new ArgumentNullException(nameof(stepId)); + if (string.IsNullOrEmpty(stepId)) throw new ArgumentException(nameof(stepId)); + + ClientResult result = GetRunStep(threadId, runId, stepId); + return ClientResult.FromValue(RunStepObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -1165,7 +1143,7 @@ public virtual Result GetRunStep(string threadId, string runId, s /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -1173,29 +1151,30 @@ public virtual Result GetRunStep(string threadId, string runId, s /// The ID of the thread to which the run and run step belongs. /// The ID of the run to which the run step belongs. /// The ID of the run step to retrieve. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// , or is null. /// , or is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task GetRunStepAsync(string threadId, string runId, string stepId, RequestOptions context) + public virtual async Task GetRunStepAsync(string threadId, string runId, string stepId, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); - ClientUtilities.AssertNotNullOrEmpty(stepId, nameof(stepId)); - - using var scope = ClientDiagnostics.CreateSpan("Runs.GetRunStep"); - scope.Start(); - try - { - using PipelineMessage message = CreateGetRunStepRequest(threadId, runId, stepId, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (runId is null) throw new ArgumentNullException(nameof(runId)); + if (string.IsNullOrEmpty(runId)) throw new ArgumentException(nameof(runId)); + if (stepId is null) throw new ArgumentNullException(nameof(stepId)); + if (string.IsNullOrEmpty(stepId)) throw new ArgumentException(nameof(stepId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateGetRunStepRequest(threadId, runId, stepId, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -1208,7 +1187,7 @@ public virtual async Task GetRunStepAsync(string threadId, string runId, /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -1216,227 +1195,323 @@ public virtual async Task GetRunStepAsync(string threadId, string runId, /// The ID of the thread to which the run and run step belongs. /// The ID of the run to which the run step belongs. /// The ID of the run step to retrieve. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// , or is null. /// , or is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result GetRunStep(string threadId, string runId, string stepId, RequestOptions context) + public virtual ClientResult GetRunStep(string threadId, string runId, string stepId, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNullOrEmpty(runId, nameof(runId)); - ClientUtilities.AssertNotNullOrEmpty(stepId, nameof(stepId)); - - using var scope = ClientDiagnostics.CreateSpan("Runs.GetRunStep"); - scope.Start(); - try + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (runId is null) throw new ArgumentNullException(nameof(runId)); + if (string.IsNullOrEmpty(runId)) throw new ArgumentException(nameof(runId)); + if (stepId is null) throw new ArgumentNullException(nameof(stepId)); + if (string.IsNullOrEmpty(stepId)) throw new ArgumentException(nameof(stepId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateGetRunStepRequest(threadId, runId, stepId, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateGetRunStepRequest(threadId, runId, stepId, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } - internal PipelineMessage CreateCreateThreadAndRunRequest(RequestBody content, RequestOptions context) + internal PipelineMessage CreateCreateThreadAndRunRequest(BinaryContent content, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("POST"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/threads/runs", false); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); - request.SetHeaderValue("Content-Type", "application/json"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/threads/runs"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", "application/json"); request.Content = content; + message.Apply(options); return message; } - internal PipelineMessage CreateCreateRunRequest(string threadId, RequestBody content, RequestOptions context) + internal PipelineMessage CreateCreateRunRequest(string threadId, BinaryContent content, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("POST"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/threads/", false); - uri.AppendPath(threadId, true); - uri.AppendPath("/runs", false); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); - request.SetHeaderValue("Content-Type", "application/json"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/threads/"); + uriBuilder.Path += path.ToString(); + path.Append(threadId); + uriBuilder.Path += path.ToString(); + path.Append("/runs"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", "application/json"); request.Content = content; + message.Apply(options); return message; } - internal PipelineMessage CreateGetRunsRequest(string threadId, int? limit, string order, string after, string before, RequestOptions context) + internal PipelineMessage CreateGetRunsRequest(string threadId, int? limit, string order, string after, string before, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("GET"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/threads/", false); - uri.AppendPath(threadId, true); - uri.AppendPath("/runs", false); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/threads/"); + uriBuilder.Path += path.ToString(); + path.Append(threadId); + uriBuilder.Path += path.ToString(); + path.Append("/runs"); + uriBuilder.Path += path.ToString(); if (limit != null) { - uri.AppendQuery("limit", limit.Value, true); + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&limit={limit.Value}"; + } + else + { + uriBuilder.Query = $"limit={limit.Value}"; + } } if (order != null) { - uri.AppendQuery("order", order, true); + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&order={order}"; + } + else + { + uriBuilder.Query = $"order={order}"; + } } if (after != null) { - uri.AppendQuery("after", after, true); + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&after={after}"; + } + else + { + uriBuilder.Query = $"after={after}"; + } } if (before != null) { - uri.AppendQuery("before", before, true); + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&before={before}"; + } + else + { + uriBuilder.Query = $"before={before}"; + } } - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); return message; } - internal PipelineMessage CreateGetRunRequest(string threadId, string runId, RequestOptions context) + internal PipelineMessage CreateGetRunRequest(string threadId, string runId, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("GET"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/threads/", false); - uri.AppendPath(threadId, true); - uri.AppendPath("/runs/", false); - uri.AppendPath(runId, true); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/threads/"); + uriBuilder.Path += path.ToString(); + path.Append(threadId); + uriBuilder.Path += path.ToString(); + path.Append("/runs/"); + uriBuilder.Path += path.ToString(); + path.Append(runId); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); return message; } - internal PipelineMessage CreateModifyRunRequest(string threadId, string runId, RequestBody content, RequestOptions context) + internal PipelineMessage CreateModifyRunRequest(string threadId, string runId, BinaryContent content, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("POST"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/threads/", false); - uri.AppendPath(threadId, true); - uri.AppendPath("/runs/", false); - uri.AppendPath(runId, true); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); - request.SetHeaderValue("Content-Type", "application/json"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/threads/"); + uriBuilder.Path += path.ToString(); + path.Append(threadId); + uriBuilder.Path += path.ToString(); + path.Append("/runs/"); + uriBuilder.Path += path.ToString(); + path.Append(runId); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", "application/json"); request.Content = content; + message.Apply(options); return message; } - internal PipelineMessage CreateCancelRunRequest(string threadId, string runId, RequestOptions context) + internal PipelineMessage CreateCancelRunRequest(string threadId, string runId, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("POST"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/threads/", false); - uri.AppendPath(threadId, true); - uri.AppendPath("/runs/", false); - uri.AppendPath(runId, true); - uri.AppendPath("/cancel", false); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/threads/"); + uriBuilder.Path += path.ToString(); + path.Append(threadId); + uriBuilder.Path += path.ToString(); + path.Append("/runs/"); + uriBuilder.Path += path.ToString(); + path.Append(runId); + uriBuilder.Path += path.ToString(); + path.Append("/cancel"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); return message; } - internal PipelineMessage CreateSubmitToolOuputsToRunRequest(string threadId, string runId, RequestBody content, RequestOptions context) + internal PipelineMessage CreateSubmitToolOuputsToRunRequest(string threadId, string runId, BinaryContent content, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("POST"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/threads/", false); - uri.AppendPath(threadId, true); - uri.AppendPath("/runs/", false); - uri.AppendPath(runId, true); - uri.AppendPath("/submit_tool_outputs", false); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); - request.SetHeaderValue("Content-Type", "application/json"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/threads/"); + uriBuilder.Path += path.ToString(); + path.Append(threadId); + uriBuilder.Path += path.ToString(); + path.Append("/runs/"); + uriBuilder.Path += path.ToString(); + path.Append(runId); + uriBuilder.Path += path.ToString(); + path.Append("/submit_tool_outputs"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", "application/json"); request.Content = content; + message.Apply(options); return message; } - internal PipelineMessage CreateGetRunStepsRequest(string threadId, string runId, int? limit, string order, string after, string before, RequestOptions context) + internal PipelineMessage CreateGetRunStepsRequest(string threadId, string runId, int? limit, string order, string after, string before, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("GET"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/threads/", false); - uri.AppendPath(threadId, true); - uri.AppendPath("/runs/", false); - uri.AppendPath(runId, true); - uri.AppendPath("/steps", false); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/threads/"); + uriBuilder.Path += path.ToString(); + path.Append(threadId); + uriBuilder.Path += path.ToString(); + path.Append("/runs/"); + uriBuilder.Path += path.ToString(); + path.Append(runId); + uriBuilder.Path += path.ToString(); + path.Append("/steps"); + uriBuilder.Path += path.ToString(); if (limit != null) { - uri.AppendQuery("limit", limit.Value, true); + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&limit={limit.Value}"; + } + else + { + uriBuilder.Query = $"limit={limit.Value}"; + } } if (order != null) { - uri.AppendQuery("order", order, true); + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&order={order}"; + } + else + { + uriBuilder.Query = $"order={order}"; + } } if (after != null) { - uri.AppendQuery("after", after, true); + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&after={after}"; + } + else + { + uriBuilder.Query = $"after={after}"; + } } if (before != null) { - uri.AppendQuery("before", before, true); + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&before={before}"; + } + else + { + uriBuilder.Query = $"before={before}"; + } } - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); return message; } - internal PipelineMessage CreateGetRunStepRequest(string threadId, string runId, string stepId, RequestOptions context) + internal PipelineMessage CreateGetRunStepRequest(string threadId, string runId, string stepId, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("GET"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/threads/", false); - uri.AppendPath(threadId, true); - uri.AppendPath("/runs/", false); - uri.AppendPath(runId, true); - uri.AppendPath("/steps/", false); - uri.AppendPath(stepId, true); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/threads/"); + uriBuilder.Path += path.ToString(); + path.Append(threadId); + uriBuilder.Path += path.ToString(); + path.Append("/runs/"); + uriBuilder.Path += path.ToString(); + path.Append(runId); + uriBuilder.Path += path.ToString(); + path.Append("/steps/"); + uriBuilder.Path += path.ToString(); + path.Append(stepId); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); return message; } - private static RequestOptions DefaultRequestContext = new RequestOptions(); - internal static RequestOptions FromCancellationToken(CancellationToken cancellationToken = default) - { - if (!cancellationToken.CanBeCanceled) - { - return DefaultRequestContext; - } - - return new RequestOptions() { CancellationToken = cancellationToken }; - } - - private static ResponseErrorClassifier _responseErrorClassifier200; - private static ResponseErrorClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= new StatusResponseClassifier(stackalloc ushort[] { 200 }); + private static PipelineMessageClassifier _responseErrorClassifier200; + private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } } + diff --git a/.dotnet/src/Generated/Threads.cs b/.dotnet/src/Generated/Threads.cs index b7ef0388d..462f88718 100644 --- a/.dotnet/src/Generated/Threads.cs +++ b/.dotnet/src/Generated/Threads.cs @@ -1,12 +1,9 @@ // -#nullable disable - using System; using System.ClientModel; -using System.ClientModel.Internal; using System.ClientModel.Primitives; -using System.ClientModel.Primitives.Pipeline; +using System.Text; using System.Threading; using System.Threading.Tasks; using OpenAI.Models; @@ -18,16 +15,13 @@ namespace OpenAI public partial class Threads { private const string AuthorizationHeader = "Authorization"; - private readonly KeyCredential _keyCredential; + private readonly ApiKeyCredential _credential; private const string AuthorizationApiKeyPrefix = "Bearer"; - private readonly MessagePipeline _pipeline; + private readonly ClientPipeline _pipeline; private readonly Uri _endpoint; - /// The ClientDiagnostics is used to provide tracing support for the client library. - internal TelemetrySource ClientDiagnostics { get; } - /// The HTTP pipeline for sending and receiving REST requests and responses. - public virtual MessagePipeline Pipeline => _pipeline; + public virtual ClientPipeline Pipeline => _pipeline; /// Initializes a new instance of Threads for mocking. protected Threads() @@ -35,44 +29,38 @@ protected Threads() } /// Initializes a new instance of Threads. - /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. - /// The key credential to copy. + /// The key credential to copy. /// OpenAI Endpoint. - internal Threads(TelemetrySource clientDiagnostics, MessagePipeline pipeline, KeyCredential keyCredential, Uri endpoint) + internal Threads(ClientPipeline pipeline, ApiKeyCredential credential, Uri endpoint) { - ClientDiagnostics = clientDiagnostics; _pipeline = pipeline; - _keyCredential = keyCredential; + _credential = credential; _endpoint = endpoint; } /// Create a thread. /// The to use. - /// The cancellation token to use. /// is null. - public virtual async Task> CreateThreadAsync(CreateThreadRequest thread, CancellationToken cancellationToken = default) + public virtual async Task> CreateThreadAsync(CreateThreadRequest thread) { - ClientUtilities.AssertNotNull(thread, nameof(thread)); + if (thread is null) throw new ArgumentNullException(nameof(thread)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = thread.ToRequestBody(); - Result result = await CreateThreadAsync(content, context).ConfigureAwait(false); - return Result.FromValue(ThreadObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + using BinaryContent content = BinaryContent.Create(thread); + ClientResult result = await CreateThreadAsync(content).ConfigureAwait(false); + return ClientResult.FromValue(ThreadObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Create a thread. /// The to use. - /// The cancellation token to use. /// is null. - public virtual Result CreateThread(CreateThreadRequest thread, CancellationToken cancellationToken = default) + public virtual ClientResult CreateThread(CreateThreadRequest thread) { - ClientUtilities.AssertNotNull(thread, nameof(thread)); + if (thread is null) throw new ArgumentNullException(nameof(thread)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = thread.ToRequestBody(); - Result result = CreateThread(content, context); - return Result.FromValue(ThreadObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + using BinaryContent content = BinaryContent.Create(thread); + ClientResult result = CreateThread(content); + return ClientResult.FromValue(ThreadObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -85,32 +73,30 @@ public virtual Result CreateThread(CreateThreadRequest thread, Can /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task CreateThreadAsync(RequestBody content, RequestOptions context = null) + public virtual async Task CreateThreadAsync(BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNull(content, nameof(content)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCreateThreadRequest(content, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; - using var scope = ClientDiagnostics.CreateSpan("Threads.CreateThread"); - scope.Start(); - try + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateCreateThreadRequest(content, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -123,60 +109,56 @@ public virtual async Task CreateThreadAsync(RequestBody content, Request /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result CreateThread(RequestBody content, RequestOptions context = null) + public virtual ClientResult CreateThread(BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNull(content, nameof(content)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateCreateThreadRequest(content, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; - using var scope = ClientDiagnostics.CreateSpan("Threads.CreateThread"); - scope.Start(); - try + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateCreateThreadRequest(content, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } /// Retrieves a thread. /// The ID of the thread to retrieve. - /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - public virtual async Task> GetThreadAsync(string threadId, CancellationToken cancellationToken = default) + public virtual async Task> GetThreadAsync(string threadId) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = await GetThreadAsync(threadId, context).ConfigureAwait(false); - return Result.FromValue(ThreadObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = await GetThreadAsync(threadId).ConfigureAwait(false); + return ClientResult.FromValue(ThreadObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Retrieves a thread. /// The ID of the thread to retrieve. - /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - public virtual Result GetThread(string threadId, CancellationToken cancellationToken = default) + public virtual ClientResult GetThread(string threadId) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = GetThread(threadId, context); - return Result.FromValue(ThreadObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = GetThread(threadId); + return ClientResult.FromValue(ThreadObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -189,33 +171,32 @@ public virtual Result GetThread(string threadId, CancellationToken /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the thread to retrieve. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task GetThreadAsync(string threadId, RequestOptions context) + public virtual async Task GetThreadAsync(string threadId, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - - using var scope = ClientDiagnostics.CreateSpan("Threads.GetThread"); - scope.Start(); - try - { - using PipelineMessage message = CreateGetThreadRequest(threadId, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateGetThreadRequest(threadId, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -228,67 +209,64 @@ public virtual async Task GetThreadAsync(string threadId, RequestOptions /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the thread to retrieve. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result GetThread(string threadId, RequestOptions context) + public virtual ClientResult GetThread(string threadId, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - - using var scope = ClientDiagnostics.CreateSpan("Threads.GetThread"); - scope.Start(); - try - { - using PipelineMessage message = CreateGetThreadRequest(threadId, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateGetThreadRequest(threadId, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } /// Modifies a thread. /// The ID of the thread to modify. Only the `metadata` can be modified. /// The to use. - /// The cancellation token to use. /// or is null. /// is an empty string, and was expected to be non-empty. - public virtual async Task> ModifyThreadAsync(string threadId, ModifyThreadRequest thread, CancellationToken cancellationToken = default) + public virtual async Task> ModifyThreadAsync(string threadId, ModifyThreadRequest thread) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNull(thread, nameof(thread)); + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (thread is null) throw new ArgumentNullException(nameof(thread)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = thread.ToRequestBody(); - Result result = await ModifyThreadAsync(threadId, content, context).ConfigureAwait(false); - return Result.FromValue(ThreadObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + using BinaryContent content = BinaryContent.Create(thread); + ClientResult result = await ModifyThreadAsync(threadId, content).ConfigureAwait(false); + return ClientResult.FromValue(ThreadObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Modifies a thread. /// The ID of the thread to modify. Only the `metadata` can be modified. /// The to use. - /// The cancellation token to use. /// or is null. /// is an empty string, and was expected to be non-empty. - public virtual Result ModifyThread(string threadId, ModifyThreadRequest thread, CancellationToken cancellationToken = default) + public virtual ClientResult ModifyThread(string threadId, ModifyThreadRequest thread) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNull(thread, nameof(thread)); + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (thread is null) throw new ArgumentNullException(nameof(thread)); - RequestOptions context = FromCancellationToken(cancellationToken); - using RequestBody content = thread.ToRequestBody(); - Result result = ModifyThread(threadId, content, context); - return Result.FromValue(ThreadObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + using BinaryContent content = BinaryContent.Create(thread); + ClientResult result = ModifyThread(threadId, content); + return ClientResult.FromValue(ThreadObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -301,35 +279,34 @@ public virtual Result ModifyThread(string threadId, ModifyThreadRe /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the thread to modify. Only the `metadata` can be modified. /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// or is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task ModifyThreadAsync(string threadId, RequestBody content, RequestOptions context = null) + public virtual async Task ModifyThreadAsync(string threadId, BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNull(content, nameof(content)); - - using var scope = ClientDiagnostics.CreateSpan("Threads.ModifyThread"); - scope.Start(); - try + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateModifyThreadRequest(threadId, content, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateModifyThreadRequest(threadId, content, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -342,63 +319,60 @@ public virtual async Task ModifyThreadAsync(string threadId, RequestBody /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the thread to modify. Only the `metadata` can be modified. /// The content to send as the body of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// or is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result ModifyThread(string threadId, RequestBody content, RequestOptions context = null) + public virtual ClientResult ModifyThread(string threadId, BinaryContent content, RequestOptions options = null) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - ClientUtilities.AssertNotNull(content, nameof(content)); - - using var scope = ClientDiagnostics.CreateSpan("Threads.ModifyThread"); - scope.Start(); - try - { - using PipelineMessage message = CreateModifyThreadRequest(threadId, content, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + if (content is null) throw new ArgumentNullException(nameof(content)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateModifyThreadRequest(threadId, content, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } /// Delete a thread. /// The ID of the thread to delete. - /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - public virtual async Task> DeleteThreadAsync(string threadId, CancellationToken cancellationToken = default) + public virtual async Task> DeleteThreadAsync(string threadId) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = await DeleteThreadAsync(threadId, context).ConfigureAwait(false); - return Result.FromValue(DeleteThreadResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = await DeleteThreadAsync(threadId).ConfigureAwait(false); + return ClientResult.FromValue(DeleteThreadResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Delete a thread. /// The ID of the thread to delete. - /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - public virtual Result DeleteThread(string threadId, CancellationToken cancellationToken = default) + public virtual ClientResult DeleteThread(string threadId) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); - RequestOptions context = FromCancellationToken(cancellationToken); - Result result = DeleteThread(threadId, context); - return Result.FromValue(DeleteThreadResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + ClientResult result = DeleteThread(threadId); + return ClientResult.FromValue(DeleteThreadResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -411,33 +385,32 @@ public virtual Result DeleteThread(string threadId, Cancel /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the thread to delete. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task DeleteThreadAsync(string threadId, RequestOptions context) + public virtual async Task DeleteThreadAsync(string threadId, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - - using var scope = ClientDiagnostics.CreateSpan("Threads.DeleteThread"); - scope.Start(); - try + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateDeleteThreadRequest(threadId, options); + await _pipeline.SendAsync(message).ConfigureAwait(false); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateDeleteThreadRequest(threadId, context); - return Result.FromResponse(await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); } + + return ClientResult.FromResponse(response); } /// @@ -450,106 +423,108 @@ public virtual async Task DeleteThreadAsync(string threadId, RequestOpti /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the thread to delete. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. + /// Service returned a non-success status code. /// The response returned from the service. - public virtual Result DeleteThread(string threadId, RequestOptions context) + public virtual ClientResult DeleteThread(string threadId, RequestOptions options) { - ClientUtilities.AssertNotNullOrEmpty(threadId, nameof(threadId)); - - using var scope = ClientDiagnostics.CreateSpan("Threads.DeleteThread"); - scope.Start(); - try + if (threadId is null) throw new ArgumentNullException(nameof(threadId)); + if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); + options ??= new RequestOptions(); + using PipelineMessage message = CreateDeleteThreadRequest(threadId, options); + _pipeline.Send(message); + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) { - using PipelineMessage message = CreateDeleteThreadRequest(threadId, context); - return Result.FromResponse(_pipeline.ProcessMessage(message, context)); - } - catch (Exception e) - { - scope.Failed(e); - throw; + throw new ClientResultException(response); } + + return ClientResult.FromResponse(response); } - internal PipelineMessage CreateCreateThreadRequest(RequestBody content, RequestOptions context) + internal PipelineMessage CreateCreateThreadRequest(BinaryContent content, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("POST"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/threads", false); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); - request.SetHeaderValue("Content-Type", "application/json"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/threads"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", "application/json"); request.Content = content; + message.Apply(options); return message; } - internal PipelineMessage CreateGetThreadRequest(string threadId, RequestOptions context) + internal PipelineMessage CreateGetThreadRequest(string threadId, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("GET"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/threads/", false); - uri.AppendPath(threadId, true); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/threads/"); + uriBuilder.Path += path.ToString(); + path.Append(threadId); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); return message; } - internal PipelineMessage CreateModifyThreadRequest(string threadId, RequestBody content, RequestOptions context) + internal PipelineMessage CreateModifyThreadRequest(string threadId, BinaryContent content, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("POST"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/threads/", false); - uri.AppendPath(threadId, true); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); - request.SetHeaderValue("Content-Type", "application/json"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/threads/"); + uriBuilder.Path += path.ToString(); + path.Append(threadId); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", "application/json"); request.Content = content; + message.Apply(options); return message; } - internal PipelineMessage CreateDeleteThreadRequest(string threadId, RequestOptions context) + internal PipelineMessage CreateDeleteThreadRequest(string threadId, RequestOptions options) { - var message = _pipeline.CreateMessage(context, ResponseErrorClassifier200); - var request = message.Request; - request.SetMethod("DELETE"); - var uri = new RequestUri(); - uri.Reset(_endpoint); - uri.AppendPath("/threads/", false); - uri.AppendPath(threadId, true); - request.Uri = uri.ToUri(); - request.SetHeaderValue("Accept", "application/json"); + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "DELETE"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/threads/"); + uriBuilder.Path += path.ToString(); + path.Append(threadId); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); return message; } - private static RequestOptions DefaultRequestContext = new RequestOptions(); - internal static RequestOptions FromCancellationToken(CancellationToken cancellationToken = default) - { - if (!cancellationToken.CanBeCanceled) - { - return DefaultRequestContext; - } - - return new RequestOptions() { CancellationToken = cancellationToken }; - } - - private static ResponseErrorClassifier _responseErrorClassifier200; - private static ResponseErrorClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= new StatusResponseClassifier(stackalloc ushort[] { 200 }); + private static PipelineMessageClassifier _responseErrorClassifier200; + private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } } + diff --git a/.dotnet/src/OpenAI.csproj b/.dotnet/src/OpenAI.csproj index c07146eaf..c53137241 100644 --- a/.dotnet/src/OpenAI.csproj +++ b/.dotnet/src/OpenAI.csproj @@ -10,7 +10,7 @@ - + diff --git a/.dotnet/tests/Generated/Tests/AssistantsTests.cs b/.dotnet/tests/Generated/Tests/AssistantsTests.cs index ea014b7fb..6c1ba99d2 100644 --- a/.dotnet/tests/Generated/Tests/AssistantsTests.cs +++ b/.dotnet/tests/Generated/Tests/AssistantsTests.cs @@ -14,9 +14,10 @@ public partial class AssistantsTests [Test] public void SmokeTest() { - KeyCredential credential = new KeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + ApiKeyCredential credential = new ApiKeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); Assistants client = new OpenAIClient(credential).GetAssistantsClient(); Assert.IsNotNull(client); } } } + diff --git a/.dotnet/tests/Generated/Tests/AudioTests.cs b/.dotnet/tests/Generated/Tests/AudioTests.cs index 8fe314b48..6ff9cd6f5 100644 --- a/.dotnet/tests/Generated/Tests/AudioTests.cs +++ b/.dotnet/tests/Generated/Tests/AudioTests.cs @@ -14,9 +14,10 @@ public partial class AudioTests [Test] public void SmokeTest() { - KeyCredential credential = new KeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + ApiKeyCredential credential = new ApiKeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); Audio client = new OpenAIClient(credential).GetAudioClient(); Assert.IsNotNull(client); } } } + diff --git a/.dotnet/tests/Generated/Tests/ChatTests.cs b/.dotnet/tests/Generated/Tests/ChatTests.cs index eb5b40763..17a9746da 100644 --- a/.dotnet/tests/Generated/Tests/ChatTests.cs +++ b/.dotnet/tests/Generated/Tests/ChatTests.cs @@ -14,9 +14,10 @@ public partial class ChatTests [Test] public void SmokeTest() { - KeyCredential credential = new KeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + ApiKeyCredential credential = new ApiKeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); Chat client = new OpenAIClient(credential).GetChatClient(); Assert.IsNotNull(client); } } } + diff --git a/.dotnet/tests/Generated/Tests/CompletionsTests.cs b/.dotnet/tests/Generated/Tests/CompletionsTests.cs index 3a0695403..159a4444d 100644 --- a/.dotnet/tests/Generated/Tests/CompletionsTests.cs +++ b/.dotnet/tests/Generated/Tests/CompletionsTests.cs @@ -14,9 +14,10 @@ public partial class CompletionsTests [Test] public void SmokeTest() { - KeyCredential credential = new KeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + ApiKeyCredential credential = new ApiKeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); Completions client = new OpenAIClient(credential).GetCompletionsClient(); Assert.IsNotNull(client); } } } + diff --git a/.dotnet/tests/Generated/Tests/EmbeddingsTests.cs b/.dotnet/tests/Generated/Tests/EmbeddingsTests.cs index 8a6052960..b4deac0fc 100644 --- a/.dotnet/tests/Generated/Tests/EmbeddingsTests.cs +++ b/.dotnet/tests/Generated/Tests/EmbeddingsTests.cs @@ -14,9 +14,10 @@ public partial class EmbeddingsTests [Test] public void SmokeTest() { - KeyCredential credential = new KeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + ApiKeyCredential credential = new ApiKeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); Embeddings client = new OpenAIClient(credential).GetEmbeddingsClient(); Assert.IsNotNull(client); } } } + diff --git a/.dotnet/tests/Generated/Tests/FilesTests.cs b/.dotnet/tests/Generated/Tests/FilesTests.cs index 9372d75f4..c08f7aee8 100644 --- a/.dotnet/tests/Generated/Tests/FilesTests.cs +++ b/.dotnet/tests/Generated/Tests/FilesTests.cs @@ -14,9 +14,10 @@ public partial class FilesTests [Test] public void SmokeTest() { - KeyCredential credential = new KeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + ApiKeyCredential credential = new ApiKeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); Files client = new OpenAIClient(credential).GetFilesClient(); Assert.IsNotNull(client); } } } + diff --git a/.dotnet/tests/Generated/Tests/FineTuningTests.cs b/.dotnet/tests/Generated/Tests/FineTuningTests.cs index 40abbe22d..7504035a2 100644 --- a/.dotnet/tests/Generated/Tests/FineTuningTests.cs +++ b/.dotnet/tests/Generated/Tests/FineTuningTests.cs @@ -14,9 +14,10 @@ public partial class FineTuningTests [Test] public void SmokeTest() { - KeyCredential credential = new KeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + ApiKeyCredential credential = new ApiKeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); FineTuning client = new OpenAIClient(credential).GetFineTuningClient(); Assert.IsNotNull(client); } } } + diff --git a/.dotnet/tests/Generated/Tests/ImagesTests.cs b/.dotnet/tests/Generated/Tests/ImagesTests.cs index a48795229..097054471 100644 --- a/.dotnet/tests/Generated/Tests/ImagesTests.cs +++ b/.dotnet/tests/Generated/Tests/ImagesTests.cs @@ -14,9 +14,10 @@ public partial class ImagesTests [Test] public void SmokeTest() { - KeyCredential credential = new KeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + ApiKeyCredential credential = new ApiKeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); Images client = new OpenAIClient(credential).GetImagesClient(); Assert.IsNotNull(client); } } } + diff --git a/.dotnet/tests/Generated/Tests/MessagesTests.cs b/.dotnet/tests/Generated/Tests/MessagesTests.cs index 4b62bbf50..ab0223685 100644 --- a/.dotnet/tests/Generated/Tests/MessagesTests.cs +++ b/.dotnet/tests/Generated/Tests/MessagesTests.cs @@ -14,9 +14,10 @@ public partial class MessagesTests [Test] public void SmokeTest() { - KeyCredential credential = new KeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + ApiKeyCredential credential = new ApiKeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); Messages client = new OpenAIClient(credential).GetMessagesClient(); Assert.IsNotNull(client); } } } + diff --git a/.dotnet/tests/Generated/Tests/ModelsOpsTests.cs b/.dotnet/tests/Generated/Tests/ModelsOpsTests.cs index 24c9896c0..0560956dc 100644 --- a/.dotnet/tests/Generated/Tests/ModelsOpsTests.cs +++ b/.dotnet/tests/Generated/Tests/ModelsOpsTests.cs @@ -14,9 +14,10 @@ public partial class ModelsOpsTests [Test] public void SmokeTest() { - KeyCredential credential = new KeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + ApiKeyCredential credential = new ApiKeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); ModelsOps client = new OpenAIClient(credential).GetModelsOpsClient(); Assert.IsNotNull(client); } } } + diff --git a/.dotnet/tests/Generated/Tests/ModerationsTests.cs b/.dotnet/tests/Generated/Tests/ModerationsTests.cs index 138b487d3..7fc5a1d0b 100644 --- a/.dotnet/tests/Generated/Tests/ModerationsTests.cs +++ b/.dotnet/tests/Generated/Tests/ModerationsTests.cs @@ -14,9 +14,10 @@ public partial class ModerationsTests [Test] public void SmokeTest() { - KeyCredential credential = new KeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + ApiKeyCredential credential = new ApiKeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); Moderations client = new OpenAIClient(credential).GetModerationsClient(); Assert.IsNotNull(client); } } } + diff --git a/.dotnet/tests/Generated/Tests/RunsTests.cs b/.dotnet/tests/Generated/Tests/RunsTests.cs index e3b8ee12b..bd4de7947 100644 --- a/.dotnet/tests/Generated/Tests/RunsTests.cs +++ b/.dotnet/tests/Generated/Tests/RunsTests.cs @@ -14,9 +14,10 @@ public partial class RunsTests [Test] public void SmokeTest() { - KeyCredential credential = new KeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + ApiKeyCredential credential = new ApiKeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); Runs client = new OpenAIClient(credential).GetRunsClient(); Assert.IsNotNull(client); } } } + diff --git a/.dotnet/tests/Generated/Tests/ThreadsTests.cs b/.dotnet/tests/Generated/Tests/ThreadsTests.cs index 9b7ef6f78..b5ea58e10 100644 --- a/.dotnet/tests/Generated/Tests/ThreadsTests.cs +++ b/.dotnet/tests/Generated/Tests/ThreadsTests.cs @@ -14,9 +14,10 @@ public partial class ThreadsTests [Test] public void SmokeTest() { - KeyCredential credential = new KeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + ApiKeyCredential credential = new ApiKeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); Threads client = new OpenAIClient(credential).GetThreadsClient(); Assert.IsNotNull(client); } } } + From a1ba42ca112a4e482a1ccfbf8f98d83f85dee582 Mon Sep 17 00:00:00 2001 From: Jose Arriaga Maldonado Date: Tue, 20 Feb 2024 11:52:21 -0800 Subject: [PATCH 12/18] Fix Update-ClientModel.ps1 script --- .dotnet/scripts/Update-ClientModel.ps1 | 1 - 1 file changed, 1 deletion(-) diff --git a/.dotnet/scripts/Update-ClientModel.ps1 b/.dotnet/scripts/Update-ClientModel.ps1 index b8a9a590f..1e533dde4 100644 --- a/.dotnet/scripts/Update-ClientModel.ps1 +++ b/.dotnet/scripts/Update-ClientModel.ps1 @@ -5,7 +5,6 @@ function Update-ClientModelPackage { $current = Get-Location Set-Location -Path $directory - dotnet build dotnet remove "OpenAI.csproj" package "System.ClientModel" dotnet add "OpenAI.csproj" package "System.ClientModel" --version "1.1.0-alpha.20240215.1" --source "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-net/nuget/v3/index.json" From 54851e2b0617f56ed59ebbde546f6e8aad2ba3bd Mon Sep 17 00:00:00 2001 From: Jose Arriaga Maldonado Date: Tue, 20 Feb 2024 12:50:02 -0800 Subject: [PATCH 13/18] Add embeddings example --- .dotnet/src/Custom/Embeddings/Embeddings.cs | 93 ++++++++++++++++ .../Models/Embedding.Serialization.cs | 67 ++++++++++++ .../src/Custom/Embeddings/Models/Embedding.cs | 68 ++++++++++++ .../Embeddings/Models/EmbeddingObject.cs | 10 ++ .../Models/GenerateEmbeddingsOptions.cs | 13 +++ .dotnet/src/Custom/OpenAIClient.cs | 22 ++++ .dotnet/src/Custom/OpenAIModelFactory.cs | 36 +++++++ .dotnet/src/Generated/Embeddings.cs | 20 ++-- .../CreateEmbeddingRequestEncodingFormat.cs | 47 -------- .../Models/CreateEmbeddingRequestModel.cs | 50 --------- .../Models/CreateEmbeddingResponseObject.cs | 44 -------- .../Models/Embedding.Serialization.cs | 40 ------- .dotnet/src/Generated/Models/Embedding.cs | 45 -------- ...s => EmbeddingCollection.Serialization.cs} | 48 ++++----- ...dingResponse.cs => EmbeddingCollection.cs} | 20 ++-- .../Models/EmbeddingCollectionObject.cs | 44 ++++++++ .../src/Generated/Models/EmbeddingObject.cs | 2 +- ...s => EmbeddingTokenUsage.Serialization.cs} | 40 +++---- ...esponseUsage.cs => EmbeddingTokenUsage.cs} | 16 +-- ...enerateEmbeddingsOptions.Serialization.cs} | 48 ++++----- ...equest.cs => GenerateEmbeddingsOptions.cs} | 78 ++------------ ...GenerateEmbeddingsOptionsEncodingFormat.cs | 47 ++++++++ .../Models/GenerateEmbeddingsOptionsModel.cs | 50 +++++++++ .dotnet/src/Generated/OpenAIClient.cs | 6 -- .dotnet/src/Generated/OpenAIModelFactory.cs | 61 ++--------- .dotnet/tests/EmbeddingsTests.cs | 101 ++++++++++++++++++ .../tests/Generated/Tests/EmbeddingsTests.cs | 23 ---- embeddings/models.tsp | 19 ++-- main.tsp | 11 ++ tsp-output/@typespec/openapi3/openapi.yaml | 29 ++--- 30 files changed, 702 insertions(+), 496 deletions(-) create mode 100644 .dotnet/src/Custom/Embeddings/Embeddings.cs create mode 100644 .dotnet/src/Custom/Embeddings/Models/Embedding.Serialization.cs create mode 100644 .dotnet/src/Custom/Embeddings/Models/Embedding.cs create mode 100644 .dotnet/src/Custom/Embeddings/Models/EmbeddingObject.cs create mode 100644 .dotnet/src/Custom/Embeddings/Models/GenerateEmbeddingsOptions.cs create mode 100644 .dotnet/src/Custom/OpenAIClient.cs create mode 100644 .dotnet/src/Custom/OpenAIModelFactory.cs delete mode 100644 .dotnet/src/Generated/Models/CreateEmbeddingRequestEncodingFormat.cs delete mode 100644 .dotnet/src/Generated/Models/CreateEmbeddingRequestModel.cs delete mode 100644 .dotnet/src/Generated/Models/CreateEmbeddingResponseObject.cs rename .dotnet/src/Generated/Models/{CreateEmbeddingResponse.Serialization.cs => EmbeddingCollection.Serialization.cs} (64%) rename .dotnet/src/Generated/Models/{CreateEmbeddingResponse.cs => EmbeddingCollection.cs} (77%) create mode 100644 .dotnet/src/Generated/Models/EmbeddingCollectionObject.cs rename .dotnet/src/Generated/Models/{CreateEmbeddingResponseUsage.Serialization.cs => EmbeddingTokenUsage.Serialization.cs} (62%) rename .dotnet/src/Generated/Models/{CreateEmbeddingResponseUsage.cs => EmbeddingTokenUsage.cs} (77%) rename .dotnet/src/Generated/Models/{CreateEmbeddingRequest.Serialization.cs => GenerateEmbeddingsOptions.Serialization.cs} (67%) rename .dotnet/src/Generated/Models/{CreateEmbeddingRequest.cs => GenerateEmbeddingsOptions.cs} (58%) create mode 100644 .dotnet/src/Generated/Models/GenerateEmbeddingsOptionsEncodingFormat.cs create mode 100644 .dotnet/src/Generated/Models/GenerateEmbeddingsOptionsModel.cs create mode 100644 .dotnet/tests/EmbeddingsTests.cs delete mode 100644 .dotnet/tests/Generated/Tests/EmbeddingsTests.cs diff --git a/.dotnet/src/Custom/Embeddings/Embeddings.cs b/.dotnet/src/Custom/Embeddings/Embeddings.cs new file mode 100644 index 000000000..1dad5bf62 --- /dev/null +++ b/.dotnet/src/Custom/Embeddings/Embeddings.cs @@ -0,0 +1,93 @@ +using OpenAI.Models; +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Linq; +using System.Threading.Tasks; + +namespace OpenAI +{ + public partial class Embeddings + { + private readonly string _model; + + /// Initializes a new instance of Embeddings. + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// The key credential to copy. + /// OpenAI Endpoint. + internal Embeddings(string model, ClientPipeline pipeline, ApiKeyCredential credential, Uri endpoint) + : this(pipeline, credential, endpoint) + { + _model = model; + } + + public virtual async Task> GenerateEmbeddingAsync(string input, GenerateEmbeddingsOptions options = null) + { + options ??= new GenerateEmbeddingsOptions(); + options.Model = new GenerateEmbeddingsOptionsModel(_model); + options.Input = BinaryData.FromString(input); + ClientResult result = await CreateEmbeddingAsync(options).ConfigureAwait(false); + return ClientResult.FromValue(result.Value.Data[0], result.GetRawResponse()); + } + + public virtual ClientResult GenerateEmbedding(string input, GenerateEmbeddingsOptions options = null) + { + options ??= new GenerateEmbeddingsOptions(); + options.Model = new GenerateEmbeddingsOptionsModel(_model); + options.Input = BinaryData.FromObjectAsJson(input); + ClientResult result = CreateEmbedding(options); + return ClientResult.FromValue(result.Value.Data[0], result.GetRawResponse()); + } + + public virtual async Task> GenerateEmbeddingAsync(IEnumerable input, GenerateEmbeddingsOptions options = null) + { + options ??= new GenerateEmbeddingsOptions(); + options.Model = new GenerateEmbeddingsOptionsModel(_model); + options.Input = BinaryData.FromObjectAsJson(input.ToArray()); + ClientResult result = await CreateEmbeddingAsync(options).ConfigureAwait(false); + return ClientResult.FromValue(result.Value.Data[0], result.GetRawResponse()); + } + + public virtual ClientResult GenerateEmbedding(IEnumerable input, GenerateEmbeddingsOptions options = null) + { + options ??= new GenerateEmbeddingsOptions(); + options.Model = new GenerateEmbeddingsOptionsModel(_model); + options.Input = BinaryData.FromObjectAsJson(input.ToArray()); + ClientResult result = CreateEmbedding(options); + return ClientResult.FromValue(result.Value.Data[0], result.GetRawResponse()); + } + + public virtual async Task> GenerateEmbeddingsAsync(IEnumerable inputs, GenerateEmbeddingsOptions options = null) + { + options ??= new GenerateEmbeddingsOptions(); + options.Model = new GenerateEmbeddingsOptionsModel(_model); + options.Input = BinaryData.FromObjectAsJson(inputs.ToArray()); + return await CreateEmbeddingAsync(options).ConfigureAwait(false); + } + + public virtual ClientResult GenerateEmbeddings(IEnumerable inputs, GenerateEmbeddingsOptions options = null) + { + options ??= new GenerateEmbeddingsOptions(); + options.Model = new GenerateEmbeddingsOptionsModel(_model); + options.Input = BinaryData.FromObjectAsJson(inputs.ToArray()); + return CreateEmbedding(options); + } + + public virtual async Task> GenerateEmbeddingsAsync(IEnumerable> inputs, GenerateEmbeddingsOptions options = null) + { + options ??= new GenerateEmbeddingsOptions(); + options.Model = new GenerateEmbeddingsOptionsModel(_model); + options.Input = BinaryData.FromObjectAsJson(inputs.ToArray()); + return await CreateEmbeddingAsync(options).ConfigureAwait(false); + } + + public virtual ClientResult GenerateEmbeddings(IEnumerable> inputs, GenerateEmbeddingsOptions options = null) + { + options ??= new GenerateEmbeddingsOptions(); + options.Model = new GenerateEmbeddingsOptionsModel(_model); + options.Input = BinaryData.FromObjectAsJson(inputs.ToArray()); + return CreateEmbedding(options); + } + } +} diff --git a/.dotnet/src/Custom/Embeddings/Models/Embedding.Serialization.cs b/.dotnet/src/Custom/Embeddings/Models/Embedding.Serialization.cs new file mode 100644 index 000000000..602547f22 --- /dev/null +++ b/.dotnet/src/Custom/Embeddings/Models/Embedding.Serialization.cs @@ -0,0 +1,67 @@ +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Models +{ + public partial class Embedding + { + internal static Embedding DeserializeEmbedding(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + long index = default; + BinaryData embedding = default; + EmbeddingObject @object = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("index"u8)) + { + index = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("embedding"u8)) + { + embedding = BinaryData.FromString(property.Value.GetRawText()); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new EmbeddingObject(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + + ReadOnlyMemory? embeddingAsFloats = default; + BinaryData embeddingAsBase64Data = default; + JsonDocument doc = JsonDocument.Parse(embedding); + + if (doc.RootElement.ValueKind == JsonValueKind.Array) + { + List floats = new(); + foreach (var item in doc.RootElement.EnumerateArray()) + { + floats.Add(item.GetSingle()); + } + embeddingAsFloats = new ReadOnlyMemory(floats.ToArray()); + } + else if (doc.RootElement.ValueKind == JsonValueKind.String) + { + embeddingAsBase64Data = embedding; + } + + return new(index, embedding, @object, serializedAdditionalRawData, embeddingAsFloats, embeddingAsBase64Data); + } + } +} diff --git a/.dotnet/src/Custom/Embeddings/Models/Embedding.cs b/.dotnet/src/Custom/Embeddings/Models/Embedding.cs new file mode 100644 index 000000000..0cf994146 --- /dev/null +++ b/.dotnet/src/Custom/Embeddings/Models/Embedding.cs @@ -0,0 +1,68 @@ +using System; +using System.Collections.Generic; + +#nullable disable + +namespace OpenAI.Models +{ + public partial class Embedding + { + /// + /// The embedding vector, which is a list of floats. The length of vector depends on the model as + /// listed in the [embedding guide](/docs/guides/embeddings). + /// + /// To assign an object to this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// + /// Supported types: + /// + /// + /// where T is of type + /// + /// + /// + /// + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + internal BinaryData EmbeddingProperty { get; } + /// The object type, which is always "embedding". + internal EmbeddingObject Object { get; } = EmbeddingObject.Embedding; + + internal Embedding(long index, BinaryData embeddingProperty, EmbeddingObject @object, IDictionary serializedAdditionalRawData, ReadOnlyMemory? embeddingAsFloats, BinaryData embeddingAsBase64Data) + : this(index, embeddingProperty, @object, serializedAdditionalRawData) + { + EmbeddingAsFloats = embeddingAsFloats; + EmbeddingAsBase64Data = embeddingAsBase64Data; + } + + /// The embedding represented as a vector of floats. + public ReadOnlyMemory? EmbeddingAsFloats { get; } + /// The embedding represented as a Base64-encoded string. + public BinaryData EmbeddingAsBase64Data { get; } + } +} diff --git a/.dotnet/src/Custom/Embeddings/Models/EmbeddingObject.cs b/.dotnet/src/Custom/Embeddings/Models/EmbeddingObject.cs new file mode 100644 index 000000000..a2bd2f70e --- /dev/null +++ b/.dotnet/src/Custom/Embeddings/Models/EmbeddingObject.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace OpenAI.Models +{ + internal readonly partial struct EmbeddingObject + { + } +} diff --git a/.dotnet/src/Custom/Embeddings/Models/GenerateEmbeddingsOptions.cs b/.dotnet/src/Custom/Embeddings/Models/GenerateEmbeddingsOptions.cs new file mode 100644 index 000000000..64500d31a --- /dev/null +++ b/.dotnet/src/Custom/Embeddings/Models/GenerateEmbeddingsOptions.cs @@ -0,0 +1,13 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace OpenAI.Models +{ + public partial class GenerateEmbeddingsOptions + { + internal BinaryData Input { get; set; } + + internal GenerateEmbeddingsOptionsModel Model { get; set; } + } +} diff --git a/.dotnet/src/Custom/OpenAIClient.cs b/.dotnet/src/Custom/OpenAIClient.cs new file mode 100644 index 000000000..2c7ed2700 --- /dev/null +++ b/.dotnet/src/Custom/OpenAIClient.cs @@ -0,0 +1,22 @@ +using System; +using System.Collections.Generic; +using System.Text; +using System.Threading; + +namespace OpenAI +{ + public partial class OpenAIClient + { + // TODO: This needs to be suppressed. + internal virtual Embeddings GetEmbeddingsClient() + { + return Volatile.Read(ref _cachedEmbeddings) ?? Interlocked.CompareExchange(ref _cachedEmbeddings, new Embeddings(_pipeline, _credential, _endpoint), null) ?? _cachedEmbeddings; + } + + /// Initializes a new instance of Embeddings. + public virtual Embeddings GetEmbeddingsClient(string model) + { + return Volatile.Read(ref _cachedEmbeddings) ?? Interlocked.CompareExchange(ref _cachedEmbeddings, new Embeddings(model, _pipeline, _credential, _endpoint), null) ?? _cachedEmbeddings; + } + } +} diff --git a/.dotnet/src/Custom/OpenAIModelFactory.cs b/.dotnet/src/Custom/OpenAIModelFactory.cs new file mode 100644 index 000000000..895344070 --- /dev/null +++ b/.dotnet/src/Custom/OpenAIModelFactory.cs @@ -0,0 +1,36 @@ +using OpenAI.Models; +using System; + +namespace OpenAI +{ + /// Model factory for models. + public static partial class OpenAIModelFactory + { + /// Initializes a new instance of . + /// The index of the embedding in the list of embeddings. + /// + /// The embedding vector, which is a list of floats. The length of vector depends on the model as + /// listed in the [embedding guide](/docs/guides/embeddings). + /// + /// The object type, which is always "embedding". + /// A new instance for mocking. + public static Embedding Embedding(long index = default, ReadOnlyMemory? embeddingAsFloats = null) + { + // TODO: We need to populate the embedding property from the embeddingAsFloats parameter. + return new Embedding(index, embeddingProperty: null, EmbeddingObject.Embedding, serializedAdditionalRawData: null, embeddingAsFloats, embeddingAsBase64Data: null); + } + + /// Initializes a new instance of . + /// The index of the embedding in the list of embeddings. + /// + /// The embedding vector, which is a list of floats. The length of vector depends on the model as + /// listed in the [embedding guide](/docs/guides/embeddings). + /// + /// The object type, which is always "embedding". + /// A new instance for mocking. + public static Embedding Embedding(long index = default, BinaryData embeddingAsBase64Data = default) + { + return new Embedding(index, embeddingAsBase64Data, EmbeddingObject.Embedding, serializedAdditionalRawData: null, embeddingAsFloats: null, embeddingAsBase64Data); + } + } +} diff --git a/.dotnet/src/Generated/Embeddings.cs b/.dotnet/src/Generated/Embeddings.cs index f935d7640..d6b7802a6 100644 --- a/.dotnet/src/Generated/Embeddings.cs +++ b/.dotnet/src/Generated/Embeddings.cs @@ -40,27 +40,27 @@ internal Embeddings(ClientPipeline pipeline, ApiKeyCredential credential, Uri en } /// Creates an embedding vector representing the input text. - /// The to use. + /// The to use. /// is null. - public virtual async Task> CreateEmbeddingAsync(CreateEmbeddingRequest embedding) + internal virtual async Task> CreateEmbeddingAsync(GenerateEmbeddingsOptions embedding) { if (embedding is null) throw new ArgumentNullException(nameof(embedding)); using BinaryContent content = BinaryContent.Create(embedding); ClientResult result = await CreateEmbeddingAsync(content).ConfigureAwait(false); - return ClientResult.FromValue(CreateEmbeddingResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + return ClientResult.FromValue(EmbeddingCollection.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Creates an embedding vector representing the input text. - /// The to use. + /// The to use. /// is null. - public virtual ClientResult CreateEmbedding(CreateEmbeddingRequest embedding) + internal virtual ClientResult CreateEmbedding(GenerateEmbeddingsOptions embedding) { if (embedding is null) throw new ArgumentNullException(nameof(embedding)); using BinaryContent content = BinaryContent.Create(embedding); ClientResult result = CreateEmbedding(content); - return ClientResult.FromValue(CreateEmbeddingResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + return ClientResult.FromValue(EmbeddingCollection.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -73,7 +73,7 @@ public virtual ClientResult CreateEmbedding(CreateEmbed /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -83,7 +83,7 @@ public virtual ClientResult CreateEmbedding(CreateEmbed /// is null. /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task CreateEmbeddingAsync(BinaryContent content, RequestOptions options = null) + internal virtual async Task CreateEmbeddingAsync(BinaryContent content, RequestOptions options = null) { if (content is null) throw new ArgumentNullException(nameof(content)); options ??= new RequestOptions(); @@ -109,7 +109,7 @@ public virtual async Task CreateEmbeddingAsync(BinaryContent conte /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -119,7 +119,7 @@ public virtual async Task CreateEmbeddingAsync(BinaryContent conte /// is null. /// Service returned a non-success status code. /// The response returned from the service. - public virtual ClientResult CreateEmbedding(BinaryContent content, RequestOptions options = null) + internal virtual ClientResult CreateEmbedding(BinaryContent content, RequestOptions options = null) { if (content is null) throw new ArgumentNullException(nameof(content)); options ??= new RequestOptions(); diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingRequestEncodingFormat.cs b/.dotnet/src/Generated/Models/CreateEmbeddingRequestEncodingFormat.cs deleted file mode 100644 index e252744e4..000000000 --- a/.dotnet/src/Generated/Models/CreateEmbeddingRequestEncodingFormat.cs +++ /dev/null @@ -1,47 +0,0 @@ -// - -using System; -using System.ComponentModel; - -namespace OpenAI.Models -{ - /// Enum for encoding_format in CreateEmbeddingRequest. - public readonly partial struct CreateEmbeddingRequestEncodingFormat : IEquatable - { - private readonly string _value; - - /// Initializes a new instance of . - /// is null. - public CreateEmbeddingRequestEncodingFormat(string value) - { - _value = value ?? throw new ArgumentNullException(nameof(value)); - } - - private const string FloatValue = "float"; - private const string Base64Value = "base64"; - - /// float. - public static CreateEmbeddingRequestEncodingFormat Float { get; } = new CreateEmbeddingRequestEncodingFormat(FloatValue); - /// base64. - public static CreateEmbeddingRequestEncodingFormat Base64 { get; } = new CreateEmbeddingRequestEncodingFormat(Base64Value); - /// Determines if two values are the same. - public static bool operator ==(CreateEmbeddingRequestEncodingFormat left, CreateEmbeddingRequestEncodingFormat right) => left.Equals(right); - /// Determines if two values are not the same. - public static bool operator !=(CreateEmbeddingRequestEncodingFormat left, CreateEmbeddingRequestEncodingFormat right) => !left.Equals(right); - /// Converts a string to a . - public static implicit operator CreateEmbeddingRequestEncodingFormat(string value) => new CreateEmbeddingRequestEncodingFormat(value); - - /// - [EditorBrowsable(EditorBrowsableState.Never)] - public override bool Equals(object obj) => obj is CreateEmbeddingRequestEncodingFormat other && Equals(other); - /// - public bool Equals(CreateEmbeddingRequestEncodingFormat other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); - - /// - [EditorBrowsable(EditorBrowsableState.Never)] - public override int GetHashCode() => _value?.GetHashCode() ?? 0; - /// - public override string ToString() => _value; - } -} - diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingRequestModel.cs b/.dotnet/src/Generated/Models/CreateEmbeddingRequestModel.cs deleted file mode 100644 index 628de9b7b..000000000 --- a/.dotnet/src/Generated/Models/CreateEmbeddingRequestModel.cs +++ /dev/null @@ -1,50 +0,0 @@ -// - -using System; -using System.ComponentModel; - -namespace OpenAI.Models -{ - /// Enum for model in CreateEmbeddingRequest. - public readonly partial struct CreateEmbeddingRequestModel : IEquatable - { - private readonly string _value; - - /// Initializes a new instance of . - /// is null. - public CreateEmbeddingRequestModel(string value) - { - _value = value ?? throw new ArgumentNullException(nameof(value)); - } - - private const string TextEmbeddingAda002Value = "text-embedding-ada-002"; - private const string TextEmbedding3SmallValue = "text-embedding-3-small"; - private const string TextEmbedding3LargeValue = "text-embedding-3-large"; - - /// text-embedding-ada-002. - public static CreateEmbeddingRequestModel TextEmbeddingAda002 { get; } = new CreateEmbeddingRequestModel(TextEmbeddingAda002Value); - /// text-embedding-3-small. - public static CreateEmbeddingRequestModel TextEmbedding3Small { get; } = new CreateEmbeddingRequestModel(TextEmbedding3SmallValue); - /// text-embedding-3-large. - public static CreateEmbeddingRequestModel TextEmbedding3Large { get; } = new CreateEmbeddingRequestModel(TextEmbedding3LargeValue); - /// Determines if two values are the same. - public static bool operator ==(CreateEmbeddingRequestModel left, CreateEmbeddingRequestModel right) => left.Equals(right); - /// Determines if two values are not the same. - public static bool operator !=(CreateEmbeddingRequestModel left, CreateEmbeddingRequestModel right) => !left.Equals(right); - /// Converts a string to a . - public static implicit operator CreateEmbeddingRequestModel(string value) => new CreateEmbeddingRequestModel(value); - - /// - [EditorBrowsable(EditorBrowsableState.Never)] - public override bool Equals(object obj) => obj is CreateEmbeddingRequestModel other && Equals(other); - /// - public bool Equals(CreateEmbeddingRequestModel other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); - - /// - [EditorBrowsable(EditorBrowsableState.Never)] - public override int GetHashCode() => _value?.GetHashCode() ?? 0; - /// - public override string ToString() => _value; - } -} - diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingResponseObject.cs b/.dotnet/src/Generated/Models/CreateEmbeddingResponseObject.cs deleted file mode 100644 index b98b10317..000000000 --- a/.dotnet/src/Generated/Models/CreateEmbeddingResponseObject.cs +++ /dev/null @@ -1,44 +0,0 @@ -// - -using System; -using System.ComponentModel; - -namespace OpenAI.Models -{ - /// The CreateEmbeddingResponse_object. - public readonly partial struct CreateEmbeddingResponseObject : IEquatable - { - private readonly string _value; - - /// Initializes a new instance of . - /// is null. - public CreateEmbeddingResponseObject(string value) - { - _value = value ?? throw new ArgumentNullException(nameof(value)); - } - - private const string ListValue = "list"; - - /// list. - public static CreateEmbeddingResponseObject List { get; } = new CreateEmbeddingResponseObject(ListValue); - /// Determines if two values are the same. - public static bool operator ==(CreateEmbeddingResponseObject left, CreateEmbeddingResponseObject right) => left.Equals(right); - /// Determines if two values are not the same. - public static bool operator !=(CreateEmbeddingResponseObject left, CreateEmbeddingResponseObject right) => !left.Equals(right); - /// Converts a string to a . - public static implicit operator CreateEmbeddingResponseObject(string value) => new CreateEmbeddingResponseObject(value); - - /// - [EditorBrowsable(EditorBrowsableState.Never)] - public override bool Equals(object obj) => obj is CreateEmbeddingResponseObject other && Equals(other); - /// - public bool Equals(CreateEmbeddingResponseObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); - - /// - [EditorBrowsable(EditorBrowsableState.Never)] - public override int GetHashCode() => _value?.GetHashCode() ?? 0; - /// - public override string ToString() => _value; - } -} - diff --git a/.dotnet/src/Generated/Models/Embedding.Serialization.cs b/.dotnet/src/Generated/Models/Embedding.Serialization.cs index b3506e51c..0f9790a19 100644 --- a/.dotnet/src/Generated/Models/Embedding.Serialization.cs +++ b/.dotnet/src/Generated/Models/Embedding.Serialization.cs @@ -3,7 +3,6 @@ using System; using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; -using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models @@ -62,45 +61,6 @@ Embedding IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWri return DeserializeEmbedding(document.RootElement, options); } - internal static Embedding DeserializeEmbedding(JsonElement element, ModelReaderWriterOptions options = null) - { - options ??= new ModelReaderWriterOptions("W"); - - if (element.ValueKind == JsonValueKind.Null) - { - return null; - } - long index = default; - BinaryData embedding = default; - EmbeddingObject @object = default; - IDictionary serializedAdditionalRawData = default; - Dictionary additionalPropertiesDictionary = new Dictionary(); - foreach (var property in element.EnumerateObject()) - { - if (property.NameEquals("index"u8)) - { - index = property.Value.GetInt64(); - continue; - } - if (property.NameEquals("embedding"u8)) - { - embedding = BinaryData.FromString(property.Value.GetRawText()); - continue; - } - if (property.NameEquals("object"u8)) - { - @object = new EmbeddingObject(property.Value.GetString()); - continue; - } - if (options.Format != "W") - { - additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); - } - } - serializedAdditionalRawData = additionalPropertiesDictionary; - return new Embedding(index, embedding, @object, serializedAdditionalRawData); - } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; diff --git a/.dotnet/src/Generated/Models/Embedding.cs b/.dotnet/src/Generated/Models/Embedding.cs index ecabc7df9..9a3e895f8 100644 --- a/.dotnet/src/Generated/Models/Embedding.cs +++ b/.dotnet/src/Generated/Models/Embedding.cs @@ -79,51 +79,6 @@ internal Embedding() /// The index of the embedding in the list of embeddings. public long Index { get; } - /// - /// The embedding vector, which is a list of floats. The length of vector depends on the model as - /// listed in the [embedding guide](/docs/guides/embeddings). - /// - /// To assign an object to this property use . - /// - /// - /// To assign an already formatted json string to this property use . - /// - /// - /// - /// Supported types: - /// - /// - /// where T is of type - /// - /// - /// - /// - /// - /// - /// Examples: - /// - /// - /// BinaryData.FromObjectAsJson("foo") - /// Creates a payload of "foo". - /// - /// - /// BinaryData.FromString("\"foo\"") - /// Creates a payload of "foo". - /// - /// - /// BinaryData.FromObjectAsJson(new { key = "value" }) - /// Creates a payload of { "key": "value" }. - /// - /// - /// BinaryData.FromString("{\"key\": \"value\"}") - /// Creates a payload of { "key": "value" }. - /// - /// - /// - /// - public BinaryData EmbeddingProperty { get; } - /// The object type, which is always "embedding". - public EmbeddingObject Object { get; } = EmbeddingObject.Embedding; } } diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingResponse.Serialization.cs b/.dotnet/src/Generated/Models/EmbeddingCollection.Serialization.cs similarity index 64% rename from .dotnet/src/Generated/Models/CreateEmbeddingResponse.Serialization.cs rename to .dotnet/src/Generated/Models/EmbeddingCollection.Serialization.cs index 84e854405..c4a8aa240 100644 --- a/.dotnet/src/Generated/Models/CreateEmbeddingResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/EmbeddingCollection.Serialization.cs @@ -8,14 +8,14 @@ namespace OpenAI.Models { - public partial class CreateEmbeddingResponse : IJsonModel + public partial class EmbeddingCollection : IJsonModel { - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(CreateEmbeddingResponse)} does not support '{format}' format."); + throw new FormatException($"The model {nameof(EmbeddingCollection)} does not support '{format}' format."); } writer.WriteStartObject(); @@ -50,19 +50,19 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReade writer.WriteEndObject(); } - CreateEmbeddingResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + EmbeddingCollection IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(CreateEmbeddingResponse)} does not support '{format}' format."); + throw new FormatException($"The model {nameof(EmbeddingCollection)} does not support '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeCreateEmbeddingResponse(document.RootElement, options); + return DeserializeEmbeddingCollection(document.RootElement, options); } - internal static CreateEmbeddingResponse DeserializeCreateEmbeddingResponse(JsonElement element, ModelReaderWriterOptions options = null) + internal static EmbeddingCollection DeserializeEmbeddingCollection(JsonElement element, ModelReaderWriterOptions options = null) { options ??= new ModelReaderWriterOptions("W"); @@ -72,8 +72,8 @@ internal static CreateEmbeddingResponse DeserializeCreateEmbeddingResponse(JsonE } IReadOnlyList data = default; string model = default; - CreateEmbeddingResponseObject @object = default; - CreateEmbeddingResponseUsage usage = default; + EmbeddingCollectionObject @object = default; + EmbeddingTokenUsage usage = default; IDictionary serializedAdditionalRawData = default; Dictionary additionalPropertiesDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) @@ -95,12 +95,12 @@ internal static CreateEmbeddingResponse DeserializeCreateEmbeddingResponse(JsonE } if (property.NameEquals("object"u8)) { - @object = new CreateEmbeddingResponseObject(property.Value.GetString()); + @object = new EmbeddingCollectionObject(property.Value.GetString()); continue; } if (property.NameEquals("usage"u8)) { - usage = CreateEmbeddingResponseUsage.DeserializeCreateEmbeddingResponseUsage(property.Value); + usage = EmbeddingTokenUsage.DeserializeEmbeddingTokenUsage(property.Value); continue; } if (options.Format != "W") @@ -109,46 +109,46 @@ internal static CreateEmbeddingResponse DeserializeCreateEmbeddingResponse(JsonE } } serializedAdditionalRawData = additionalPropertiesDictionary; - return new CreateEmbeddingResponse(data, model, @object, usage, serializedAdditionalRawData); + return new EmbeddingCollection(data, model, @object, usage, serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options); default: - throw new FormatException($"The model {nameof(CreateEmbeddingResponse)} does not support '{options.Format}' format."); + throw new FormatException($"The model {nameof(EmbeddingCollection)} does not support '{options.Format}' format."); } } - CreateEmbeddingResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + EmbeddingCollection IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data); - return DeserializeCreateEmbeddingResponse(document.RootElement, options); + return DeserializeEmbeddingCollection(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(CreateEmbeddingResponse)} does not support '{options.Format}' format."); + throw new FormatException($"The model {nameof(EmbeddingCollection)} does not support '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The result to deserialize the model from. - internal static CreateEmbeddingResponse FromResponse(PipelineResponse response) + internal static EmbeddingCollection FromResponse(PipelineResponse response) { using var document = JsonDocument.Parse(response.Content); - return DeserializeCreateEmbeddingResponse(document.RootElement); + return DeserializeEmbeddingCollection(document.RootElement); } } } diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingResponse.cs b/.dotnet/src/Generated/Models/EmbeddingCollection.cs similarity index 77% rename from .dotnet/src/Generated/Models/CreateEmbeddingResponse.cs rename to .dotnet/src/Generated/Models/EmbeddingCollection.cs index ce1b35c17..d7dd3ec73 100644 --- a/.dotnet/src/Generated/Models/CreateEmbeddingResponse.cs +++ b/.dotnet/src/Generated/Models/EmbeddingCollection.cs @@ -7,8 +7,8 @@ namespace OpenAI.Models { - /// The CreateEmbeddingResponse. - public partial class CreateEmbeddingResponse + /// The EmbeddingCollection. + public partial class EmbeddingCollection { /// /// Keeps track of any properties unknown to the library. @@ -42,12 +42,12 @@ public partial class CreateEmbeddingResponse /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . + /// Initializes a new instance of . /// The list of embeddings generated by the model. /// The name of the model used to generate the embedding. /// The usage information for the request. /// , or is null. - internal CreateEmbeddingResponse(IEnumerable data, string model, CreateEmbeddingResponseUsage usage) + internal EmbeddingCollection(IEnumerable data, string model, EmbeddingTokenUsage usage) { if (data is null) throw new ArgumentNullException(nameof(data)); if (model is null) throw new ArgumentNullException(nameof(model)); @@ -58,13 +58,13 @@ internal CreateEmbeddingResponse(IEnumerable data, string model, Crea Usage = usage; } - /// Initializes a new instance of . + /// Initializes a new instance of . /// The list of embeddings generated by the model. /// The name of the model used to generate the embedding. /// The object type, which is always "list". /// The usage information for the request. /// Keeps track of any properties unknown to the library. - internal CreateEmbeddingResponse(IReadOnlyList data, string model, CreateEmbeddingResponseObject @object, CreateEmbeddingResponseUsage usage, IDictionary serializedAdditionalRawData) + internal EmbeddingCollection(IReadOnlyList data, string model, EmbeddingCollectionObject @object, EmbeddingTokenUsage usage, IDictionary serializedAdditionalRawData) { Data = data; Model = model; @@ -73,8 +73,8 @@ internal CreateEmbeddingResponse(IReadOnlyList data, string model, Cr _serializedAdditionalRawData = serializedAdditionalRawData; } - /// Initializes a new instance of for deserialization. - internal CreateEmbeddingResponse() + /// Initializes a new instance of for deserialization. + internal EmbeddingCollection() { } @@ -83,10 +83,10 @@ internal CreateEmbeddingResponse() /// The name of the model used to generate the embedding. public string Model { get; } /// The object type, which is always "list". - public CreateEmbeddingResponseObject Object { get; } = CreateEmbeddingResponseObject.List; + public EmbeddingCollectionObject Object { get; } = EmbeddingCollectionObject.List; /// The usage information for the request. - public CreateEmbeddingResponseUsage Usage { get; } + public EmbeddingTokenUsage Usage { get; } } } diff --git a/.dotnet/src/Generated/Models/EmbeddingCollectionObject.cs b/.dotnet/src/Generated/Models/EmbeddingCollectionObject.cs new file mode 100644 index 000000000..b045cf349 --- /dev/null +++ b/.dotnet/src/Generated/Models/EmbeddingCollectionObject.cs @@ -0,0 +1,44 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The EmbeddingCollection_object. + public readonly partial struct EmbeddingCollectionObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public EmbeddingCollectionObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ListValue = "list"; + + /// list. + public static EmbeddingCollectionObject List { get; } = new EmbeddingCollectionObject(ListValue); + /// Determines if two values are the same. + public static bool operator ==(EmbeddingCollectionObject left, EmbeddingCollectionObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(EmbeddingCollectionObject left, EmbeddingCollectionObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator EmbeddingCollectionObject(string value) => new EmbeddingCollectionObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is EmbeddingCollectionObject other && Equals(other); + /// + public bool Equals(EmbeddingCollectionObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} + diff --git a/.dotnet/src/Generated/Models/EmbeddingObject.cs b/.dotnet/src/Generated/Models/EmbeddingObject.cs index 2cc2f012c..c6088b162 100644 --- a/.dotnet/src/Generated/Models/EmbeddingObject.cs +++ b/.dotnet/src/Generated/Models/EmbeddingObject.cs @@ -6,7 +6,7 @@ namespace OpenAI.Models { /// The Embedding_object. - public readonly partial struct EmbeddingObject : IEquatable + internal readonly partial struct EmbeddingObject : IEquatable { private readonly string _value; diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingResponseUsage.Serialization.cs b/.dotnet/src/Generated/Models/EmbeddingTokenUsage.Serialization.cs similarity index 62% rename from .dotnet/src/Generated/Models/CreateEmbeddingResponseUsage.Serialization.cs rename to .dotnet/src/Generated/Models/EmbeddingTokenUsage.Serialization.cs index 2b10317c0..8026af740 100644 --- a/.dotnet/src/Generated/Models/CreateEmbeddingResponseUsage.Serialization.cs +++ b/.dotnet/src/Generated/Models/EmbeddingTokenUsage.Serialization.cs @@ -8,14 +8,14 @@ namespace OpenAI.Models { - public partial class CreateEmbeddingResponseUsage : IJsonModel + public partial class EmbeddingTokenUsage : IJsonModel { - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(CreateEmbeddingResponseUsage)} does not support '{format}' format."); + throw new FormatException($"The model {nameof(EmbeddingTokenUsage)} does not support '{format}' format."); } writer.WriteStartObject(); @@ -41,19 +41,19 @@ void IJsonModel.Write(Utf8JsonWriter writer, Model writer.WriteEndObject(); } - CreateEmbeddingResponseUsage IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + EmbeddingTokenUsage IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(CreateEmbeddingResponseUsage)} does not support '{format}' format."); + throw new FormatException($"The model {nameof(EmbeddingTokenUsage)} does not support '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeCreateEmbeddingResponseUsage(document.RootElement, options); + return DeserializeEmbeddingTokenUsage(document.RootElement, options); } - internal static CreateEmbeddingResponseUsage DeserializeCreateEmbeddingResponseUsage(JsonElement element, ModelReaderWriterOptions options = null) + internal static EmbeddingTokenUsage DeserializeEmbeddingTokenUsage(JsonElement element, ModelReaderWriterOptions options = null) { options ??= new ModelReaderWriterOptions("W"); @@ -83,46 +83,46 @@ internal static CreateEmbeddingResponseUsage DeserializeCreateEmbeddingResponseU } } serializedAdditionalRawData = additionalPropertiesDictionary; - return new CreateEmbeddingResponseUsage(promptTokens, totalTokens, serializedAdditionalRawData); + return new EmbeddingTokenUsage(promptTokens, totalTokens, serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options); default: - throw new FormatException($"The model {nameof(CreateEmbeddingResponseUsage)} does not support '{options.Format}' format."); + throw new FormatException($"The model {nameof(EmbeddingTokenUsage)} does not support '{options.Format}' format."); } } - CreateEmbeddingResponseUsage IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + EmbeddingTokenUsage IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data); - return DeserializeCreateEmbeddingResponseUsage(document.RootElement, options); + return DeserializeEmbeddingTokenUsage(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(CreateEmbeddingResponseUsage)} does not support '{options.Format}' format."); + throw new FormatException($"The model {nameof(EmbeddingTokenUsage)} does not support '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The result to deserialize the model from. - internal static CreateEmbeddingResponseUsage FromResponse(PipelineResponse response) + internal static EmbeddingTokenUsage FromResponse(PipelineResponse response) { using var document = JsonDocument.Parse(response.Content); - return DeserializeCreateEmbeddingResponseUsage(document.RootElement); + return DeserializeEmbeddingTokenUsage(document.RootElement); } } } diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingResponseUsage.cs b/.dotnet/src/Generated/Models/EmbeddingTokenUsage.cs similarity index 77% rename from .dotnet/src/Generated/Models/CreateEmbeddingResponseUsage.cs rename to .dotnet/src/Generated/Models/EmbeddingTokenUsage.cs index bafd0284c..e235e4d4b 100644 --- a/.dotnet/src/Generated/Models/CreateEmbeddingResponseUsage.cs +++ b/.dotnet/src/Generated/Models/EmbeddingTokenUsage.cs @@ -5,8 +5,8 @@ namespace OpenAI.Models { - /// The CreateEmbeddingResponseUsage. - public partial class CreateEmbeddingResponseUsage + /// The EmbeddingTokenUsage. + public partial class EmbeddingTokenUsage { /// /// Keeps track of any properties unknown to the library. @@ -40,28 +40,28 @@ public partial class CreateEmbeddingResponseUsage /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . + /// Initializes a new instance of . /// The number of tokens used by the prompt. /// The total number of tokens used by the request. - internal CreateEmbeddingResponseUsage(long promptTokens, long totalTokens) + internal EmbeddingTokenUsage(long promptTokens, long totalTokens) { PromptTokens = promptTokens; TotalTokens = totalTokens; } - /// Initializes a new instance of . + /// Initializes a new instance of . /// The number of tokens used by the prompt. /// The total number of tokens used by the request. /// Keeps track of any properties unknown to the library. - internal CreateEmbeddingResponseUsage(long promptTokens, long totalTokens, IDictionary serializedAdditionalRawData) + internal EmbeddingTokenUsage(long promptTokens, long totalTokens, IDictionary serializedAdditionalRawData) { PromptTokens = promptTokens; TotalTokens = totalTokens; _serializedAdditionalRawData = serializedAdditionalRawData; } - /// Initializes a new instance of for deserialization. - internal CreateEmbeddingResponseUsage() + /// Initializes a new instance of for deserialization. + internal EmbeddingTokenUsage() { } diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingRequest.Serialization.cs b/.dotnet/src/Generated/Models/GenerateEmbeddingsOptions.Serialization.cs similarity index 67% rename from .dotnet/src/Generated/Models/CreateEmbeddingRequest.Serialization.cs rename to .dotnet/src/Generated/Models/GenerateEmbeddingsOptions.Serialization.cs index 7a8ff235b..5d6ec0838 100644 --- a/.dotnet/src/Generated/Models/CreateEmbeddingRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/GenerateEmbeddingsOptions.Serialization.cs @@ -8,14 +8,14 @@ namespace OpenAI.Models { - public partial class CreateEmbeddingRequest : IJsonModel + public partial class GenerateEmbeddingsOptions : IJsonModel { - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(CreateEmbeddingRequest)} does not support '{format}' format."); + throw new FormatException($"The model {nameof(GenerateEmbeddingsOptions)} does not support '{format}' format."); } writer.WriteStartObject(); @@ -63,19 +63,19 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReader writer.WriteEndObject(); } - CreateEmbeddingRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + GenerateEmbeddingsOptions IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(CreateEmbeddingRequest)} does not support '{format}' format."); + throw new FormatException($"The model {nameof(GenerateEmbeddingsOptions)} does not support '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeCreateEmbeddingRequest(document.RootElement, options); + return DeserializeGenerateEmbeddingsOptions(document.RootElement, options); } - internal static CreateEmbeddingRequest DeserializeCreateEmbeddingRequest(JsonElement element, ModelReaderWriterOptions options = null) + internal static GenerateEmbeddingsOptions DeserializeGenerateEmbeddingsOptions(JsonElement element, ModelReaderWriterOptions options = null) { options ??= new ModelReaderWriterOptions("W"); @@ -84,8 +84,8 @@ internal static CreateEmbeddingRequest DeserializeCreateEmbeddingRequest(JsonEle return null; } BinaryData input = default; - CreateEmbeddingRequestModel model = default; - OptionalProperty encodingFormat = default; + GenerateEmbeddingsOptionsModel model = default; + OptionalProperty encodingFormat = default; OptionalProperty dimensions = default; OptionalProperty user = default; IDictionary serializedAdditionalRawData = default; @@ -99,7 +99,7 @@ internal static CreateEmbeddingRequest DeserializeCreateEmbeddingRequest(JsonEle } if (property.NameEquals("model"u8)) { - model = new CreateEmbeddingRequestModel(property.Value.GetString()); + model = new GenerateEmbeddingsOptionsModel(property.Value.GetString()); continue; } if (property.NameEquals("encoding_format"u8)) @@ -108,7 +108,7 @@ internal static CreateEmbeddingRequest DeserializeCreateEmbeddingRequest(JsonEle { continue; } - encodingFormat = new CreateEmbeddingRequestEncodingFormat(property.Value.GetString()); + encodingFormat = new GenerateEmbeddingsOptionsEncodingFormat(property.Value.GetString()); continue; } if (property.NameEquals("dimensions"u8)) @@ -131,46 +131,46 @@ internal static CreateEmbeddingRequest DeserializeCreateEmbeddingRequest(JsonEle } } serializedAdditionalRawData = additionalPropertiesDictionary; - return new CreateEmbeddingRequest(input, model, OptionalProperty.ToNullable(encodingFormat), OptionalProperty.ToNullable(dimensions), user.Value, serializedAdditionalRawData); + return new GenerateEmbeddingsOptions(input, model, OptionalProperty.ToNullable(encodingFormat), OptionalProperty.ToNullable(dimensions), user.Value, serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options); default: - throw new FormatException($"The model {nameof(CreateEmbeddingRequest)} does not support '{options.Format}' format."); + throw new FormatException($"The model {nameof(GenerateEmbeddingsOptions)} does not support '{options.Format}' format."); } } - CreateEmbeddingRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + GenerateEmbeddingsOptions IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data); - return DeserializeCreateEmbeddingRequest(document.RootElement, options); + return DeserializeGenerateEmbeddingsOptions(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(CreateEmbeddingRequest)} does not support '{options.Format}' format."); + throw new FormatException($"The model {nameof(GenerateEmbeddingsOptions)} does not support '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The result to deserialize the model from. - internal static CreateEmbeddingRequest FromResponse(PipelineResponse response) + internal static GenerateEmbeddingsOptions FromResponse(PipelineResponse response) { using var document = JsonDocument.Parse(response.Content); - return DeserializeCreateEmbeddingRequest(document.RootElement); + return DeserializeGenerateEmbeddingsOptions(document.RootElement); } } } diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingRequest.cs b/.dotnet/src/Generated/Models/GenerateEmbeddingsOptions.cs similarity index 58% rename from .dotnet/src/Generated/Models/CreateEmbeddingRequest.cs rename to .dotnet/src/Generated/Models/GenerateEmbeddingsOptions.cs index c2a286d4c..d8a90823c 100644 --- a/.dotnet/src/Generated/Models/CreateEmbeddingRequest.cs +++ b/.dotnet/src/Generated/Models/GenerateEmbeddingsOptions.cs @@ -6,8 +6,8 @@ namespace OpenAI.Models { - /// The CreateEmbeddingRequest. - public partial class CreateEmbeddingRequest + /// The GenerateEmbeddingsOptions. + public partial class GenerateEmbeddingsOptions { /// /// Keeps track of any properties unknown to the library. @@ -41,7 +41,7 @@ public partial class CreateEmbeddingRequest /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . + /// Initializes a new instance of . /// /// Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a /// single request, pass an array of strings or array of token arrays. Each input must not exceed @@ -56,7 +56,7 @@ public partial class CreateEmbeddingRequest /// descriptions of them. /// /// is null. - public CreateEmbeddingRequest(BinaryData input, CreateEmbeddingRequestModel model) + public GenerateEmbeddingsOptions(BinaryData input, GenerateEmbeddingsOptionsModel model) { if (input is null) throw new ArgumentNullException(nameof(input)); @@ -64,7 +64,7 @@ public CreateEmbeddingRequest(BinaryData input, CreateEmbeddingRequestModel mode Model = model; } - /// Initializes a new instance of . + /// Initializes a new instance of . /// /// Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a /// single request, pass an array of strings or array of token arrays. Each input must not exceed @@ -91,7 +91,7 @@ public CreateEmbeddingRequest(BinaryData input, CreateEmbeddingRequestModel mode /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). /// /// Keeps track of any properties unknown to the library. - internal CreateEmbeddingRequest(BinaryData input, CreateEmbeddingRequestModel model, CreateEmbeddingRequestEncodingFormat? encodingFormat, long? dimensions, string user, IDictionary serializedAdditionalRawData) + internal GenerateEmbeddingsOptions(BinaryData input, GenerateEmbeddingsOptionsModel model, GenerateEmbeddingsOptionsEncodingFormat? encodingFormat, long? dimensions, string user, IDictionary serializedAdditionalRawData) { Input = input; Model = model; @@ -101,75 +101,15 @@ internal CreateEmbeddingRequest(BinaryData input, CreateEmbeddingRequestModel mo _serializedAdditionalRawData = serializedAdditionalRawData; } - /// Initializes a new instance of for deserialization. - internal CreateEmbeddingRequest() + /// Initializes a new instance of for deserialization. + internal GenerateEmbeddingsOptions() { } - - /// - /// Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a - /// single request, pass an array of strings or array of token arrays. Each input must not exceed - /// the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an - /// empty string. - /// [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) - /// for counting tokens. - /// - /// To assign an object to this property use . - /// - /// - /// To assign an already formatted json string to this property use . - /// - /// - /// - /// Supported types: - /// - /// - /// - /// - /// - /// where T is of type - /// - /// - /// where T is of type - /// - /// - /// where T is of type IList{long} - /// - /// - /// - /// Examples: - /// - /// - /// BinaryData.FromObjectAsJson("foo") - /// Creates a payload of "foo". - /// - /// - /// BinaryData.FromString("\"foo\"") - /// Creates a payload of "foo". - /// - /// - /// BinaryData.FromObjectAsJson(new { key = "value" }) - /// Creates a payload of { "key": "value" }. - /// - /// - /// BinaryData.FromString("{\"key\": \"value\"}") - /// Creates a payload of { "key": "value" }. - /// - /// - /// - /// - public BinaryData Input { get; } - /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to - /// see all of your available models, or see our [Model overview](/docs/models/overview) for - /// descriptions of them. - /// - public CreateEmbeddingRequestModel Model { get; } /// /// The format to return the embeddings in. Can be either `float` or /// [`base64`](https://pypi.org/project/pybase64/). /// - public CreateEmbeddingRequestEncodingFormat? EncodingFormat { get; set; } + public GenerateEmbeddingsOptionsEncodingFormat? EncodingFormat { get; set; } /// /// The number of dimensions the resulting output embeddings should have. Only supported in /// `text-embedding-3` and later models. diff --git a/.dotnet/src/Generated/Models/GenerateEmbeddingsOptionsEncodingFormat.cs b/.dotnet/src/Generated/Models/GenerateEmbeddingsOptionsEncodingFormat.cs new file mode 100644 index 000000000..805a38b93 --- /dev/null +++ b/.dotnet/src/Generated/Models/GenerateEmbeddingsOptionsEncodingFormat.cs @@ -0,0 +1,47 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for encoding_format in GenerateEmbeddingsOptions. + public readonly partial struct GenerateEmbeddingsOptionsEncodingFormat : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public GenerateEmbeddingsOptionsEncodingFormat(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string FloatValue = "float"; + private const string Base64Value = "base64"; + + /// float. + public static GenerateEmbeddingsOptionsEncodingFormat Float { get; } = new GenerateEmbeddingsOptionsEncodingFormat(FloatValue); + /// base64. + public static GenerateEmbeddingsOptionsEncodingFormat Base64 { get; } = new GenerateEmbeddingsOptionsEncodingFormat(Base64Value); + /// Determines if two values are the same. + public static bool operator ==(GenerateEmbeddingsOptionsEncodingFormat left, GenerateEmbeddingsOptionsEncodingFormat right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(GenerateEmbeddingsOptionsEncodingFormat left, GenerateEmbeddingsOptionsEncodingFormat right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator GenerateEmbeddingsOptionsEncodingFormat(string value) => new GenerateEmbeddingsOptionsEncodingFormat(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is GenerateEmbeddingsOptionsEncodingFormat other && Equals(other); + /// + public bool Equals(GenerateEmbeddingsOptionsEncodingFormat other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} + diff --git a/.dotnet/src/Generated/Models/GenerateEmbeddingsOptionsModel.cs b/.dotnet/src/Generated/Models/GenerateEmbeddingsOptionsModel.cs new file mode 100644 index 000000000..e1a9c9135 --- /dev/null +++ b/.dotnet/src/Generated/Models/GenerateEmbeddingsOptionsModel.cs @@ -0,0 +1,50 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for model in GenerateEmbeddingsOptions. + public readonly partial struct GenerateEmbeddingsOptionsModel : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public GenerateEmbeddingsOptionsModel(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string TextEmbeddingAda002Value = "text-embedding-ada-002"; + private const string TextEmbedding3SmallValue = "text-embedding-3-small"; + private const string TextEmbedding3LargeValue = "text-embedding-3-large"; + + /// text-embedding-ada-002. + public static GenerateEmbeddingsOptionsModel TextEmbeddingAda002 { get; } = new GenerateEmbeddingsOptionsModel(TextEmbeddingAda002Value); + /// text-embedding-3-small. + public static GenerateEmbeddingsOptionsModel TextEmbedding3Small { get; } = new GenerateEmbeddingsOptionsModel(TextEmbedding3SmallValue); + /// text-embedding-3-large. + public static GenerateEmbeddingsOptionsModel TextEmbedding3Large { get; } = new GenerateEmbeddingsOptionsModel(TextEmbedding3LargeValue); + /// Determines if two values are the same. + public static bool operator ==(GenerateEmbeddingsOptionsModel left, GenerateEmbeddingsOptionsModel right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(GenerateEmbeddingsOptionsModel left, GenerateEmbeddingsOptionsModel right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator GenerateEmbeddingsOptionsModel(string value) => new GenerateEmbeddingsOptionsModel(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is GenerateEmbeddingsOptionsModel other && Equals(other); + /// + public bool Equals(GenerateEmbeddingsOptionsModel other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} + diff --git a/.dotnet/src/Generated/OpenAIClient.cs b/.dotnet/src/Generated/OpenAIClient.cs index 29785453c..7565c4dd1 100644 --- a/.dotnet/src/Generated/OpenAIClient.cs +++ b/.dotnet/src/Generated/OpenAIClient.cs @@ -89,12 +89,6 @@ public virtual Completions GetCompletionsClient() return Volatile.Read(ref _cachedCompletions) ?? Interlocked.CompareExchange(ref _cachedCompletions, new Completions(_pipeline, _credential, _endpoint), null) ?? _cachedCompletions; } - /// Initializes a new instance of Embeddings. - public virtual Embeddings GetEmbeddingsClient() - { - return Volatile.Read(ref _cachedEmbeddings) ?? Interlocked.CompareExchange(ref _cachedEmbeddings, new Embeddings(_pipeline, _credential, _endpoint), null) ?? _cachedEmbeddings; - } - /// Initializes a new instance of Files. public virtual Files GetFilesClient() { diff --git a/.dotnet/src/Generated/OpenAIModelFactory.cs b/.dotnet/src/Generated/OpenAIModelFactory.cs index c8de3f61a..dd7118fc0 100644 --- a/.dotnet/src/Generated/OpenAIModelFactory.cs +++ b/.dotnet/src/Generated/OpenAIModelFactory.cs @@ -724,71 +724,26 @@ public static CreateCompletionResponseChoiceLogprobs CreateCompletionResponseCho return new CreateCompletionResponseChoiceLogprobs(tokens?.ToList(), tokenLogprobs?.ToList(), topLogprobs?.ToList(), textOffset?.ToList(), serializedAdditionalRawData: null); } - /// Initializes a new instance of . - /// - /// Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a - /// single request, pass an array of strings or array of token arrays. Each input must not exceed - /// the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an - /// empty string. - /// [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) - /// for counting tokens. - /// - /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to - /// see all of your available models, or see our [Model overview](/docs/models/overview) for - /// descriptions of them. - /// - /// - /// The format to return the embeddings in. Can be either `float` or - /// [`base64`](https://pypi.org/project/pybase64/). - /// - /// - /// The number of dimensions the resulting output embeddings should have. Only supported in - /// `text-embedding-3` and later models. - /// - /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect - /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). - /// - /// A new instance for mocking. - public static CreateEmbeddingRequest CreateEmbeddingRequest(BinaryData input = null, CreateEmbeddingRequestModel model = default, CreateEmbeddingRequestEncodingFormat? encodingFormat = null, long? dimensions = null, string user = null) - { - return new CreateEmbeddingRequest(input, model, encodingFormat, dimensions, user, serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . + /// Initializes a new instance of . /// The list of embeddings generated by the model. /// The name of the model used to generate the embedding. /// The object type, which is always "list". /// The usage information for the request. - /// A new instance for mocking. - public static CreateEmbeddingResponse CreateEmbeddingResponse(IEnumerable data = null, string model = null, CreateEmbeddingResponseObject @object = default, CreateEmbeddingResponseUsage usage = null) + /// A new instance for mocking. + public static EmbeddingCollection EmbeddingCollection(IEnumerable data = null, string model = null, EmbeddingCollectionObject @object = default, EmbeddingTokenUsage usage = null) { data ??= new List(); - return new CreateEmbeddingResponse(data?.ToList(), model, @object, usage, serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// The index of the embedding in the list of embeddings. - /// - /// The embedding vector, which is a list of floats. The length of vector depends on the model as - /// listed in the [embedding guide](/docs/guides/embeddings). - /// - /// The object type, which is always "embedding". - /// A new instance for mocking. - public static Embedding Embedding(long index = default, BinaryData embeddingProperty = null, EmbeddingObject @object = default) - { - return new Embedding(index, embeddingProperty, @object, serializedAdditionalRawData: null); + return new EmbeddingCollection(data?.ToList(), model, @object, usage, serializedAdditionalRawData: null); } - /// Initializes a new instance of . + /// Initializes a new instance of . /// The number of tokens used by the prompt. /// The total number of tokens used by the request. - /// A new instance for mocking. - public static CreateEmbeddingResponseUsage CreateEmbeddingResponseUsage(long promptTokens = default, long totalTokens = default) + /// A new instance for mocking. + public static EmbeddingTokenUsage EmbeddingTokenUsage(long promptTokens = default, long totalTokens = default) { - return new CreateEmbeddingResponseUsage(promptTokens, totalTokens, serializedAdditionalRawData: null); + return new EmbeddingTokenUsage(promptTokens, totalTokens, serializedAdditionalRawData: null); } /// Initializes a new instance of . diff --git a/.dotnet/tests/EmbeddingsTests.cs b/.dotnet/tests/EmbeddingsTests.cs new file mode 100644 index 000000000..ecf35be7d --- /dev/null +++ b/.dotnet/tests/EmbeddingsTests.cs @@ -0,0 +1,101 @@ +using NUnit.Framework; +using OpenAI.Models; +using System; +using System.ClientModel; +using System.Collections.Generic; +using System.Linq; +using System.Numerics; +using System.Text; +using System.Threading.Tasks; + +namespace OpenAI.Tests +{ + public partial class EmbeddingsTests + { + [Test] + public static void GetEmbeddingFromString() + { + ApiKeyCredential credential = new ApiKeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + OpenAIClient client = new(credential); + Embeddings embeddingsClient = client.GetEmbeddingsClient("text-embedding-3-small"); + + string input = "Best hotel in town if you like luxury hotels. They have an amazing infinity pool, a spa," + + " and a really helpful concierge. The location is perfect -- right downtown, close to all " + + " the tourist attractions. We highly recommend this hotel."; + + ClientResult result = embeddingsClient.GenerateEmbedding(input); + Embedding embedding = result.Value; + + ReadOnlyMemory vector = embedding.EmbeddingAsFloats.Value; + Assert.IsTrue(vector.Length == 1536); + } + + [Test] + public static void GetEmbeddingFromArrayOfTokens() + { + ApiKeyCredential credential = new ApiKeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + OpenAIClient client = new(credential); + Embeddings embeddingsClient = client.GetEmbeddingsClient("text-embedding-3-small"); + + List input = new() { 14809, 9689, 304, 6424, 422, 499, 1093, 19913, 25325, 13, 2435, 617, 459, 8056, + 56010, 7463, 11, 264, 31493, 11, 323, 264, 2216, 11190, 3613, 87103, 13, 578, 3813, 374, 4832, 1198, + 1314, 19441, 11, 3345, 311, 682, 279, 31070, 39591, 13, 1226, 7701, 7079, 420, 9689, 13 }; + + ClientResult result = embeddingsClient.GenerateEmbedding(input); + Embedding embedding = result.Value; + + ReadOnlyMemory vector = embedding.EmbeddingAsFloats.Value; + Assert.IsTrue(vector.Length == 1536); + } + + [Test] + public static void GetEmbeddingsFromArrayOfStrings() + { + ApiKeyCredential credential = new ApiKeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + OpenAIClient client = new(credential); + Embeddings embeddingsClient = client.GetEmbeddingsClient("text-embedding-3-small"); + + List inputs = new() { + "Luxury", + "Best hotel in town if you like luxury hotels. They have an amazing infinity pool, a spa," + + " and a really helpful concierge. The location is perfect -- right downtown, close to all " + + " the tourist attractions. We highly recommend this hotel." + }; + + ClientResult result = embeddingsClient.GenerateEmbeddings(inputs); + EmbeddingCollection collection = result.Value; // TODO: Make EmbeddingCollection inherit from ReadOnlyCollection. + IReadOnlyList data = collection.Data; + + ReadOnlyMemory vector0 = data[0].EmbeddingAsFloats.Value; + Assert.IsTrue(vector0.Length == 1536); + + ReadOnlyMemory vector1 = data[1].EmbeddingAsFloats.Value; + Assert.IsTrue(vector1.Length == 1536); + } + + [Test] + public static void GetEmbeddingsFromArrayOfArraysOfTokens() + { + ApiKeyCredential credential = new ApiKeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + OpenAIClient client = new(credential); + Embeddings embeddingsClient = client.GetEmbeddingsClient("text-embedding-3-small"); + + List> inputs = new() { + new List { 78379, 3431 }, + new List { 14809, 9689, 304, 6424, 422, 499, 1093, 19913, 25325, 13, 2435, 617, 459, 8056, + 56010, 7463, 11, 264, 31493, 11, 323, 264, 2216, 11190, 3613, 87103, 13, 578, 3813, 374, 4832, 1198, + 1314, 19441, 11, 3345, 311, 682, 279, 31070, 39591, 13, 1226, 7701, 7079, 420, 9689, 13 } + }; + + ClientResult result = embeddingsClient.GenerateEmbeddings(inputs); + EmbeddingCollection collection = result.Value; // TODO: Make EmbeddingCollection inherit from ReadOnlyCollection. + IReadOnlyList data = collection.Data; + + ReadOnlyMemory vector0 = data[0].EmbeddingAsFloats.Value; + Assert.IsTrue(vector0.Length == 1536); + + ReadOnlyMemory vector1 = data[1].EmbeddingAsFloats.Value; + Assert.IsTrue(vector1.Length == 1536); + } + } +} diff --git a/.dotnet/tests/Generated/Tests/EmbeddingsTests.cs b/.dotnet/tests/Generated/Tests/EmbeddingsTests.cs deleted file mode 100644 index b4deac0fc..000000000 --- a/.dotnet/tests/Generated/Tests/EmbeddingsTests.cs +++ /dev/null @@ -1,23 +0,0 @@ -// - -#nullable disable - -using System; -using System.ClientModel; -using NUnit.Framework; -using OpenAI; - -namespace OpenAI.Tests -{ - public partial class EmbeddingsTests - { - [Test] - public void SmokeTest() - { - ApiKeyCredential credential = new ApiKeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); - Embeddings client = new OpenAIClient(credential).GetEmbeddingsClient(); - Assert.IsNotNull(client); - } - } -} - diff --git a/embeddings/models.tsp b/embeddings/models.tsp index fd9c13fde..fb6ab394a 100644 --- a/embeddings/models.tsp +++ b/embeddings/models.tsp @@ -55,13 +55,7 @@ model CreateEmbeddingResponse { object: "list"; /** The usage information for the request. */ - usage: { - /** The number of tokens used by the prompt. */ - prompt_tokens: safeint; - - /** The total number of tokens used by the request. */ - total_tokens: safeint; - }; + usage: EmbeddingUsage; } alias EMBEDDINGS_MODELS = @@ -70,8 +64,7 @@ alias EMBEDDINGS_MODELS = | "text-embedding-3-large"; @oneOf -union CreateEmbeddingRequestInput -{ +union CreateEmbeddingRequestInput { /** The string that will be turned into an embedding. */ string, @@ -99,3 +92,11 @@ model Embedding { /** The object type, which is always "embedding". */ object: "embedding"; } + +model EmbeddingUsage { + /** The number of tokens used by the prompt. */ + prompt_tokens: safeint; + + /** The total number of tokens used by the request. */ + total_tokens: safeint; +}; \ No newline at end of file diff --git a/main.tsp b/main.tsp index 604a75f8c..e46a4467c 100644 --- a/main.tsp +++ b/main.tsp @@ -1,6 +1,7 @@ import "@typespec/http"; import "@typespec/openapi3"; import "@typespec/openapi"; +import "@azure-tools/typespec-client-generator-core"; import "./audio"; import "./assistants"; @@ -17,6 +18,7 @@ import "./runs"; import "./threads"; using TypeSpec.Http; +using Azure.ClientGenerator.Core; /** The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. */ @service({ @@ -35,3 +37,12 @@ using TypeSpec.Http; @server("https://api.openai.com/v1", "OpenAI Endpoint") @useAuth(BearerAuth) namespace OpenAI; + +@@projectedName(OpenAI.CreateEmbeddingRequest, "csharp", "GenerateEmbeddingsOptions"); +@@projectedName(OpenAI.CreateEmbeddingResponse, "csharp", "EmbeddingCollection"); +@@projectedName(OpenAI.EmbeddingUsage, "csharp", "EmbeddingTokenUsage"); + +@@access(OpenAI.Embeddings.createEmbedding, Access.internal, "csharp"); +@@access(OpenAI.CreateEmbeddingRequest, Access.public, "csharp"); +@@access(OpenAI.CreateEmbeddingResponse, Access.public, "csharp"); +@@access(OpenAI.EmbeddingUsage, Access.public, "csharp"); \ No newline at end of file diff --git a/tsp-output/@typespec/openapi3/openapi.yaml b/tsp-output/@typespec/openapi3/openapi.yaml index c96cc33ed..e27e57ba4 100644 --- a/tsp-output/@typespec/openapi3/openapi.yaml +++ b/tsp-output/@typespec/openapi3/openapi.yaml @@ -3112,19 +3112,8 @@ components: - list description: The object type, which is always "list". usage: - type: object - properties: - prompt_tokens: - type: integer - format: int64 - description: The number of tokens used by the prompt. - total_tokens: - type: integer - format: int64 - description: The total number of tokens used by the request. - required: - - prompt_tokens - - total_tokens + allOf: + - $ref: '#/components/schemas/EmbeddingUsage' description: The usage information for the request. CreateFileRequestMultiPart: type: object @@ -4210,6 +4199,20 @@ components: - embedding description: The object type, which is always "embedding". description: Represents an embedding vector returned by embedding endpoint. + EmbeddingUsage: + type: object + required: + - prompt_tokens + - total_tokens + properties: + prompt_tokens: + type: integer + format: int64 + description: The number of tokens used by the prompt. + total_tokens: + type: integer + format: int64 + description: The total number of tokens used by the request. Error: type: object required: From 03664d5818b57ac6fd4a751913c900b3d02dde2c Mon Sep 17 00:00:00 2001 From: Jose Arriaga Maldonado Date: Tue, 20 Feb 2024 17:15:32 -0800 Subject: [PATCH 14/18] Fix Update-ClientModel.ps1 script: DefaultRequestContext and URI path fixes --- .dotnet/scripts/Update-ClientModel.ps1 | 14 +++--- .dotnet/src/Generated/Assistants.cs | 55 ++++++++------------ .dotnet/src/Generated/Audio.cs | 14 +++--- .dotnet/src/Generated/Chat.cs | 6 ++- .dotnet/src/Generated/Completions.cs | 6 ++- .dotnet/src/Generated/Embeddings.cs | 6 ++- .dotnet/src/Generated/Files.cs | 28 +++++------ .dotnet/src/Generated/FineTuning.cs | 31 ++++++------ .dotnet/src/Generated/Images.cs | 14 +++--- .dotnet/src/Generated/Messages.cs | 49 ++++++------------ .dotnet/src/Generated/ModelsOps.cs | 16 +++--- .dotnet/src/Generated/Moderations.cs | 6 ++- .dotnet/src/Generated/Runs.cs | 69 ++++++++------------------ .dotnet/src/Generated/Threads.cs | 21 ++++---- 14 files changed, 144 insertions(+), 191 deletions(-) diff --git a/.dotnet/scripts/Update-ClientModel.ps1 b/.dotnet/scripts/Update-ClientModel.ps1 index 1e533dde4..203cdd80c 100644 --- a/.dotnet/scripts/Update-ClientModel.ps1 +++ b/.dotnet/scripts/Update-ClientModel.ps1 @@ -89,10 +89,10 @@ function Update-Subclients { $content = $content -creplace "RequestOptions context = FromCancellationToken\(cancellationToken\);\s+", "" $content = $content -creplace "using RequestBody content = (?\w+)\.ToRequestBody\(\);", "using BinaryContent content = BinaryContent.Create(`${var});" $content = $content -creplace "using RequestBody content0 = (?\w+)\.ToRequestBody\(\);", "using BinaryContent content0 = BinaryContent.Create(`${var});" - $content = $content -creplace "Result result = await (?\w+)\(context\)\.ConfigureAwait\(false\);", "ClientResult result = await `${method}().ConfigureAwait(false);" - $content = $content -creplace "Result result = (?\w+)\(context\);", "ClientResult result = `${method}();" - $content = $content -creplace "Result result = await (?\w+)\((?[(\w+)(\?.ToString\(\)*)(,\s\w+)]*), context\)\.ConfigureAwait\(false\);", "ClientResult result = await `${method}(`${params}).ConfigureAwait(false);" - $content = $content -creplace "Result result = (?\w+)\((?[(\w+)(\?.ToString\(\)*)(,\s\w+)]*), context\);", "ClientResult result = `${method}(`${params});" + $content = $content -creplace "Result result = await (?\w+)\(context\)\.ConfigureAwait\(false\);", "ClientResult result = await `${method}(DefaultRequestContext).ConfigureAwait(false);" + $content = $content -creplace "Result result = (?\w+)\(context\);", "ClientResult result = `${method}(DefaultRequestContext);" + $content = $content -creplace "Result result = await (?\w+)\((?[(\w+)(\?.ToString\(\)*)(,\s\w+)]*), context\)\.ConfigureAwait\(false\);", "ClientResult result = await `${method}(`${params}, DefaultRequestContext).ConfigureAwait(false);" + $content = $content -creplace "Result result = (?\w+)\((?[(\w+)(\?.ToString\(\)*)(,\s\w+)]*), context\);", "ClientResult result = `${method}(`${params}, DefaultRequestContext);" # Modify protocol methods $content = $content -creplace "\/\/\/ Please try the simpler \w+)\((?[(\w+)(\?*)(,\s\w+)]*),CancellationToken\)`"/> convenience overload with strongly typed models first.", "/// Please try the simpler convenience overload with strongly typed models first." @@ -119,14 +119,14 @@ function Update-Subclients { $content = $content -creplace "request\.SetMethod\(`"(?[\w\/]+)`"\);", "request.Method = `"`${name}`";" $content = $content -creplace "var uri = new RequestUri\(\);", "UriBuilder uriBuilder = new(_endpoint.ToString());" $content = $content -creplace "uri\.Reset\(_endpoint\);", "StringBuilder path = new();" - $content = $content -creplace "uri\.AppendPath\((?`"?[\w\/]+`"?), (\w+)\);", "path.Append(`${path});`r`n uriBuilder.Path += path.ToString();" + $content = $content -creplace "uri\.AppendPath\((?`"?[\w\/]+`"?), (\w+)\);", "path.Append(`${path});" $content = $content -creplace "uri\.AppendQuery\(`"(?\w+)`", (?\w+(\.Value)?), (\w+)\);", "if (uriBuilder.Query != null && uriBuilder.Query.Length > 1)`r`n {`r`n uriBuilder.Query += $`"&`${key}={`${value}}`";`r`n }`r`n else`r`n {`r`n uriBuilder.Query = $`"`${key}={`${value}}`";`r`n }" - $content = $content -creplace "request\.Uri = uri\.ToUri\(\);", "request.Uri = uriBuilder.Uri;" + $content = $content -creplace "request\.Uri = uri\.ToUri\(\);", "uriBuilder.Path += path.ToString();`r`n request.Uri = uriBuilder.Uri;" $content = $content -creplace "request\.SetHeaderValue", "request.Headers.Set" $content = $content -creplace "request\.Content = content;", "request.Content = content;`r`n message.Apply(options);" # Delete DefaultRequestContext - $content = $content -creplace "\s+private static RequestOptions DefaultRequestContext = new RequestOptions\(\);", "" + # $content = $content -creplace "\s+private static RequestOptions DefaultRequestContext = new RequestOptions\(\);", "" # Delete FromCancellationToken $content = $content -creplace "(?s)\s+internal static RequestOptions FromCancellationToken\(CancellationToken cancellationToken = default\).*?return new RequestOptions\(\) \{ CancellationToken = cancellationToken \};.*?\}", "" diff --git a/.dotnet/src/Generated/Assistants.cs b/.dotnet/src/Generated/Assistants.cs index afaac60ee..e725cfe7e 100644 --- a/.dotnet/src/Generated/Assistants.cs +++ b/.dotnet/src/Generated/Assistants.cs @@ -47,7 +47,7 @@ public virtual async Task> CreateAssistantAsync(Cr if (assistant is null) throw new ArgumentNullException(nameof(assistant)); using BinaryContent content = BinaryContent.Create(assistant); - ClientResult result = await CreateAssistantAsync(content).ConfigureAwait(false); + ClientResult result = await CreateAssistantAsync(content, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(AssistantObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -59,7 +59,7 @@ public virtual ClientResult CreateAssistant(CreateAssistantRequ if (assistant is null) throw new ArgumentNullException(nameof(assistant)); using BinaryContent content = BinaryContent.Create(assistant); - ClientResult result = CreateAssistant(content); + ClientResult result = CreateAssistant(content, DefaultRequestContext); return ClientResult.FromValue(AssistantObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -156,7 +156,7 @@ public virtual ClientResult CreateAssistant(BinaryContent content, RequestOption /// public virtual async Task> GetAssistantsAsync(int? limit = null, ListOrder? order = null, string after = null, string before = null) { - ClientResult result = await GetAssistantsAsync(limit, order?.ToString(), after, before).ConfigureAwait(false); + ClientResult result = await GetAssistantsAsync(limit, order?.ToString(), after, before, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(ListAssistantsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -181,7 +181,7 @@ public virtual async Task> GetAssistantsAsy /// public virtual ClientResult GetAssistants(int? limit = null, ListOrder? order = null, string after = null, string before = null) { - ClientResult result = GetAssistants(limit, order?.ToString(), after, before); + ClientResult result = GetAssistants(limit, order?.ToString(), after, before, DefaultRequestContext); return ClientResult.FromValue(ListAssistantsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -296,7 +296,7 @@ public virtual async Task> GetAssistantAsync(strin if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); if (string.IsNullOrEmpty(assistantId)) throw new ArgumentException(nameof(assistantId)); - ClientResult result = await GetAssistantAsync(assistantId).ConfigureAwait(false); + ClientResult result = await GetAssistantAsync(assistantId, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(AssistantObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -309,7 +309,7 @@ public virtual ClientResult GetAssistant(string assistantId) if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); if (string.IsNullOrEmpty(assistantId)) throw new ArgumentException(nameof(assistantId)); - ClientResult result = GetAssistant(assistantId); + ClientResult result = GetAssistant(assistantId, DefaultRequestContext); return ClientResult.FromValue(AssistantObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -401,7 +401,7 @@ public virtual async Task> ModifyAssistantAsync(st if (assistant is null) throw new ArgumentNullException(nameof(assistant)); using BinaryContent content = BinaryContent.Create(assistant); - ClientResult result = await ModifyAssistantAsync(assistantId, content).ConfigureAwait(false); + ClientResult result = await ModifyAssistantAsync(assistantId, content, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(AssistantObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -417,7 +417,7 @@ public virtual ClientResult ModifyAssistant(string assistantId, if (assistant is null) throw new ArgumentNullException(nameof(assistant)); using BinaryContent content = BinaryContent.Create(assistant); - ClientResult result = ModifyAssistant(assistantId, content); + ClientResult result = ModifyAssistant(assistantId, content, DefaultRequestContext); return ClientResult.FromValue(AssistantObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -510,7 +510,7 @@ public virtual async Task> DeleteAssistant if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); if (string.IsNullOrEmpty(assistantId)) throw new ArgumentException(nameof(assistantId)); - ClientResult result = await DeleteAssistantAsync(assistantId).ConfigureAwait(false); + ClientResult result = await DeleteAssistantAsync(assistantId, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(DeleteAssistantResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -523,7 +523,7 @@ public virtual ClientResult DeleteAssistant(string assi if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); if (string.IsNullOrEmpty(assistantId)) throw new ArgumentException(nameof(assistantId)); - ClientResult result = DeleteAssistant(assistantId); + ClientResult result = DeleteAssistant(assistantId, DefaultRequestContext); return ClientResult.FromValue(DeleteAssistantResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -618,7 +618,7 @@ public virtual async Task> CreateAssistantFile if (file is null) throw new ArgumentNullException(nameof(file)); using BinaryContent content = BinaryContent.Create(file); - ClientResult result = await CreateAssistantFileAsync(assistantId, content).ConfigureAwait(false); + ClientResult result = await CreateAssistantFileAsync(assistantId, content, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(AssistantFileObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -637,7 +637,7 @@ public virtual ClientResult CreateAssistantFile(string assi if (file is null) throw new ArgumentNullException(nameof(file)); using BinaryContent content = BinaryContent.Create(file); - ClientResult result = CreateAssistantFile(assistantId, content); + ClientResult result = CreateAssistantFile(assistantId, content, DefaultRequestContext); return ClientResult.FromValue(AssistantFileObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -750,7 +750,7 @@ public virtual async Task> GetAssistant if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); if (string.IsNullOrEmpty(assistantId)) throw new ArgumentException(nameof(assistantId)); - ClientResult result = await GetAssistantFilesAsync(assistantId, limit, order?.ToString(), after, before).ConfigureAwait(false); + ClientResult result = await GetAssistantFilesAsync(assistantId, limit, order?.ToString(), after, before, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(ListAssistantFilesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -781,7 +781,7 @@ public virtual ClientResult GetAssistantFiles(string if (assistantId is null) throw new ArgumentNullException(nameof(assistantId)); if (string.IsNullOrEmpty(assistantId)) throw new ArgumentException(nameof(assistantId)); - ClientResult result = GetAssistantFiles(assistantId, limit, order?.ToString(), after, before); + ClientResult result = GetAssistantFiles(assistantId, limit, order?.ToString(), after, before, DefaultRequestContext); return ClientResult.FromValue(ListAssistantFilesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -909,7 +909,7 @@ public virtual async Task> GetAssistantFileAsy if (fileId is null) throw new ArgumentNullException(nameof(fileId)); if (string.IsNullOrEmpty(fileId)) throw new ArgumentException(nameof(fileId)); - ClientResult result = await GetAssistantFileAsync(assistantId, fileId).ConfigureAwait(false); + ClientResult result = await GetAssistantFileAsync(assistantId, fileId, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(AssistantFileObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -925,7 +925,7 @@ public virtual ClientResult GetAssistantFile(string assista if (fileId is null) throw new ArgumentNullException(nameof(fileId)); if (string.IsNullOrEmpty(fileId)) throw new ArgumentException(nameof(fileId)); - ClientResult result = GetAssistantFile(assistantId, fileId); + ClientResult result = GetAssistantFile(assistantId, fileId, DefaultRequestContext); return ClientResult.FromValue(AssistantFileObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -1023,7 +1023,7 @@ public virtual async Task> DeleteAssis if (fileId is null) throw new ArgumentNullException(nameof(fileId)); if (string.IsNullOrEmpty(fileId)) throw new ArgumentException(nameof(fileId)); - ClientResult result = await DeleteAssistantFileAsync(assistantId, fileId).ConfigureAwait(false); + ClientResult result = await DeleteAssistantFileAsync(assistantId, fileId, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(DeleteAssistantFileResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -1039,7 +1039,7 @@ public virtual ClientResult DeleteAssistantFile(str if (fileId is null) throw new ArgumentNullException(nameof(fileId)); if (string.IsNullOrEmpty(fileId)) throw new ArgumentException(nameof(fileId)); - ClientResult result = DeleteAssistantFile(assistantId, fileId); + ClientResult result = DeleteAssistantFile(assistantId, fileId, DefaultRequestContext); return ClientResult.FromValue(DeleteAssistantFileResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -1152,7 +1152,6 @@ internal PipelineMessage CreateGetAssistantsRequest(int? limit, string order, st UriBuilder uriBuilder = new(_endpoint.ToString()); StringBuilder path = new(); path.Append("/assistants"); - uriBuilder.Path += path.ToString(); if (limit != null) { if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) @@ -1197,6 +1196,7 @@ internal PipelineMessage CreateGetAssistantsRequest(int? limit, string order, st uriBuilder.Query = $"before={before}"; } } + uriBuilder.Path += path.ToString(); request.Uri = uriBuilder.Uri; request.Headers.Set("Accept", "application/json"); return message; @@ -1211,7 +1211,6 @@ internal PipelineMessage CreateGetAssistantRequest(string assistantId, RequestOp UriBuilder uriBuilder = new(_endpoint.ToString()); StringBuilder path = new(); path.Append("/assistants/"); - uriBuilder.Path += path.ToString(); path.Append(assistantId); uriBuilder.Path += path.ToString(); request.Uri = uriBuilder.Uri; @@ -1228,7 +1227,6 @@ internal PipelineMessage CreateModifyAssistantRequest(string assistantId, Binary UriBuilder uriBuilder = new(_endpoint.ToString()); StringBuilder path = new(); path.Append("/assistants/"); - uriBuilder.Path += path.ToString(); path.Append(assistantId); uriBuilder.Path += path.ToString(); request.Uri = uriBuilder.Uri; @@ -1248,7 +1246,6 @@ internal PipelineMessage CreateDeleteAssistantRequest(string assistantId, Reques UriBuilder uriBuilder = new(_endpoint.ToString()); StringBuilder path = new(); path.Append("/assistants/"); - uriBuilder.Path += path.ToString(); path.Append(assistantId); uriBuilder.Path += path.ToString(); request.Uri = uriBuilder.Uri; @@ -1265,9 +1262,7 @@ internal PipelineMessage CreateCreateAssistantFileRequest(string assistantId, Bi UriBuilder uriBuilder = new(_endpoint.ToString()); StringBuilder path = new(); path.Append("/assistants/"); - uriBuilder.Path += path.ToString(); path.Append(assistantId); - uriBuilder.Path += path.ToString(); path.Append("/files"); uriBuilder.Path += path.ToString(); request.Uri = uriBuilder.Uri; @@ -1287,11 +1282,8 @@ internal PipelineMessage CreateGetAssistantFilesRequest(string assistantId, int? UriBuilder uriBuilder = new(_endpoint.ToString()); StringBuilder path = new(); path.Append("/assistants/"); - uriBuilder.Path += path.ToString(); path.Append(assistantId); - uriBuilder.Path += path.ToString(); path.Append("/files"); - uriBuilder.Path += path.ToString(); if (limit != null) { if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) @@ -1336,6 +1328,7 @@ internal PipelineMessage CreateGetAssistantFilesRequest(string assistantId, int? uriBuilder.Query = $"before={before}"; } } + uriBuilder.Path += path.ToString(); request.Uri = uriBuilder.Uri; request.Headers.Set("Accept", "application/json"); return message; @@ -1350,11 +1343,8 @@ internal PipelineMessage CreateGetAssistantFileRequest(string assistantId, strin UriBuilder uriBuilder = new(_endpoint.ToString()); StringBuilder path = new(); path.Append("/assistants/"); - uriBuilder.Path += path.ToString(); path.Append(assistantId); - uriBuilder.Path += path.ToString(); path.Append("/files/"); - uriBuilder.Path += path.ToString(); path.Append(fileId); uriBuilder.Path += path.ToString(); request.Uri = uriBuilder.Uri; @@ -1371,11 +1361,8 @@ internal PipelineMessage CreateDeleteAssistantFileRequest(string assistantId, st UriBuilder uriBuilder = new(_endpoint.ToString()); StringBuilder path = new(); path.Append("/assistants/"); - uriBuilder.Path += path.ToString(); path.Append(assistantId); - uriBuilder.Path += path.ToString(); path.Append("/files/"); - uriBuilder.Path += path.ToString(); path.Append(fileId); uriBuilder.Path += path.ToString(); request.Uri = uriBuilder.Uri; @@ -1383,6 +1370,8 @@ internal PipelineMessage CreateDeleteAssistantFileRequest(string assistantId, st return message; } + private static RequestOptions DefaultRequestContext = new RequestOptions(); + private static PipelineMessageClassifier _responseErrorClassifier200; private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } diff --git a/.dotnet/src/Generated/Audio.cs b/.dotnet/src/Generated/Audio.cs index d2bd5db96..963cfc0f2 100644 --- a/.dotnet/src/Generated/Audio.cs +++ b/.dotnet/src/Generated/Audio.cs @@ -47,7 +47,7 @@ public virtual async Task> CreateSpeechAsync(CreateSpee if (speech is null) throw new ArgumentNullException(nameof(speech)); using BinaryContent content = BinaryContent.Create(speech); - ClientResult result = await CreateSpeechAsync(content).ConfigureAwait(false); + ClientResult result = await CreateSpeechAsync(content, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(result.GetRawResponse().Content, result.GetRawResponse()); } @@ -59,7 +59,7 @@ public virtual ClientResult CreateSpeech(CreateSpeechRequest speech) if (speech is null) throw new ArgumentNullException(nameof(speech)); using BinaryContent content = BinaryContent.Create(speech); - ClientResult result = CreateSpeech(content); + ClientResult result = CreateSpeech(content, DefaultRequestContext); return ClientResult.FromValue(result.GetRawResponse().Content, result.GetRawResponse()); } @@ -143,7 +143,7 @@ public virtual async Task> CreateTrans if (audio is null) throw new ArgumentNullException(nameof(audio)); using BinaryContent content = BinaryContent.Create(audio); - ClientResult result = await CreateTranscriptionAsync(content).ConfigureAwait(false); + ClientResult result = await CreateTranscriptionAsync(content, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(CreateTranscriptionResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -155,7 +155,7 @@ public virtual ClientResult CreateTranscription(Cre if (audio is null) throw new ArgumentNullException(nameof(audio)); using BinaryContent content = BinaryContent.Create(audio); - ClientResult result = CreateTranscription(content); + ClientResult result = CreateTranscription(content, DefaultRequestContext); return ClientResult.FromValue(CreateTranscriptionResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -239,7 +239,7 @@ public virtual async Task> CreateTransla if (audio is null) throw new ArgumentNullException(nameof(audio)); using BinaryContent content = BinaryContent.Create(audio); - ClientResult result = await CreateTranslationAsync(content).ConfigureAwait(false); + ClientResult result = await CreateTranslationAsync(content, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(CreateTranslationResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -251,7 +251,7 @@ public virtual ClientResult CreateTranslation(CreateT if (audio is null) throw new ArgumentNullException(nameof(audio)); using BinaryContent content = BinaryContent.Create(audio); - ClientResult result = CreateTranslation(content); + ClientResult result = CreateTranslation(content, DefaultRequestContext); return ClientResult.FromValue(CreateTranslationResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -381,6 +381,8 @@ internal PipelineMessage CreateCreateTranslationRequest(BinaryContent content, R return message; } + private static RequestOptions DefaultRequestContext = new RequestOptions(); + private static PipelineMessageClassifier _responseErrorClassifier200; private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } diff --git a/.dotnet/src/Generated/Chat.cs b/.dotnet/src/Generated/Chat.cs index 3fa357d90..bb3bb2cc3 100644 --- a/.dotnet/src/Generated/Chat.cs +++ b/.dotnet/src/Generated/Chat.cs @@ -47,7 +47,7 @@ public virtual async Task> CreateChat if (createChatCompletionRequest is null) throw new ArgumentNullException(nameof(createChatCompletionRequest)); using BinaryContent content = BinaryContent.Create(createChatCompletionRequest); - ClientResult result = await CreateChatCompletionAsync(content).ConfigureAwait(false); + ClientResult result = await CreateChatCompletionAsync(content, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(CreateChatCompletionResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -59,7 +59,7 @@ public virtual ClientResult CreateChatCompletion(C if (createChatCompletionRequest is null) throw new ArgumentNullException(nameof(createChatCompletionRequest)); using BinaryContent content = BinaryContent.Create(createChatCompletionRequest); - ClientResult result = CreateChatCompletion(content); + ClientResult result = CreateChatCompletion(content, DefaultRequestContext); return ClientResult.FromValue(CreateChatCompletionResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -153,6 +153,8 @@ internal PipelineMessage CreateCreateChatCompletionRequest(BinaryContent content return message; } + private static RequestOptions DefaultRequestContext = new RequestOptions(); + private static PipelineMessageClassifier _responseErrorClassifier200; private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } diff --git a/.dotnet/src/Generated/Completions.cs b/.dotnet/src/Generated/Completions.cs index 779fa66c0..f55f7c729 100644 --- a/.dotnet/src/Generated/Completions.cs +++ b/.dotnet/src/Generated/Completions.cs @@ -47,7 +47,7 @@ public virtual async Task> CreateCompleti if (createCompletionRequest is null) throw new ArgumentNullException(nameof(createCompletionRequest)); using BinaryContent content = BinaryContent.Create(createCompletionRequest); - ClientResult result = await CreateCompletionAsync(content).ConfigureAwait(false); + ClientResult result = await CreateCompletionAsync(content, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(CreateCompletionResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -59,7 +59,7 @@ public virtual ClientResult CreateCompletion(CreateCom if (createCompletionRequest is null) throw new ArgumentNullException(nameof(createCompletionRequest)); using BinaryContent content = BinaryContent.Create(createCompletionRequest); - ClientResult result = CreateCompletion(content); + ClientResult result = CreateCompletion(content, DefaultRequestContext); return ClientResult.FromValue(CreateCompletionResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -153,6 +153,8 @@ internal PipelineMessage CreateCreateCompletionRequest(BinaryContent content, Re return message; } + private static RequestOptions DefaultRequestContext = new RequestOptions(); + private static PipelineMessageClassifier _responseErrorClassifier200; private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } diff --git a/.dotnet/src/Generated/Embeddings.cs b/.dotnet/src/Generated/Embeddings.cs index d6b7802a6..0788abad9 100644 --- a/.dotnet/src/Generated/Embeddings.cs +++ b/.dotnet/src/Generated/Embeddings.cs @@ -47,7 +47,7 @@ internal virtual async Task> CreateEmbeddingAs if (embedding is null) throw new ArgumentNullException(nameof(embedding)); using BinaryContent content = BinaryContent.Create(embedding); - ClientResult result = await CreateEmbeddingAsync(content).ConfigureAwait(false); + ClientResult result = await CreateEmbeddingAsync(content, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(EmbeddingCollection.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -59,7 +59,7 @@ internal virtual ClientResult CreateEmbedding(GenerateEmbed if (embedding is null) throw new ArgumentNullException(nameof(embedding)); using BinaryContent content = BinaryContent.Create(embedding); - ClientResult result = CreateEmbedding(content); + ClientResult result = CreateEmbedding(content, DefaultRequestContext); return ClientResult.FromValue(EmbeddingCollection.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -153,6 +153,8 @@ internal PipelineMessage CreateCreateEmbeddingRequest(BinaryContent content, Req return message; } + private static RequestOptions DefaultRequestContext = new RequestOptions(); + private static PipelineMessageClassifier _responseErrorClassifier200; private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } diff --git a/.dotnet/src/Generated/Files.cs b/.dotnet/src/Generated/Files.cs index 10258b608..815ae85ce 100644 --- a/.dotnet/src/Generated/Files.cs +++ b/.dotnet/src/Generated/Files.cs @@ -56,7 +56,7 @@ public virtual async Task> CreateFileAsync(CreateFileRe if (file is null) throw new ArgumentNullException(nameof(file)); using BinaryContent content = BinaryContent.Create(file); - ClientResult result = await CreateFileAsync(content).ConfigureAwait(false); + ClientResult result = await CreateFileAsync(content, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(OpenAIFile.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -77,7 +77,7 @@ public virtual ClientResult CreateFile(CreateFileRequest file) if (file is null) throw new ArgumentNullException(nameof(file)); using BinaryContent content = BinaryContent.Create(file); - ClientResult result = CreateFile(content); + ClientResult result = CreateFile(content, DefaultRequestContext); return ClientResult.FromValue(OpenAIFile.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -171,7 +171,7 @@ public virtual ClientResult CreateFile(BinaryContent content, RequestOptions opt /// Only return files with the given purpose. public virtual async Task> GetFilesAsync(string purpose = null) { - ClientResult result = await GetFilesAsync(purpose).ConfigureAwait(false); + ClientResult result = await GetFilesAsync(purpose, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(ListFilesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -179,7 +179,7 @@ public virtual async Task> GetFilesAsync(string /// Only return files with the given purpose. public virtual ClientResult GetFiles(string purpose = null) { - ClientResult result = GetFiles(purpose); + ClientResult result = GetFiles(purpose, DefaultRequestContext); return ClientResult.FromValue(ListFilesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -260,7 +260,7 @@ public virtual async Task> RetrieveFileAsync(string fil if (fileId is null) throw new ArgumentNullException(nameof(fileId)); if (string.IsNullOrEmpty(fileId)) throw new ArgumentException(nameof(fileId)); - ClientResult result = await RetrieveFileAsync(fileId).ConfigureAwait(false); + ClientResult result = await RetrieveFileAsync(fileId, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(OpenAIFile.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -273,7 +273,7 @@ public virtual ClientResult RetrieveFile(string fileId) if (fileId is null) throw new ArgumentNullException(nameof(fileId)); if (string.IsNullOrEmpty(fileId)) throw new ArgumentException(nameof(fileId)); - ClientResult result = RetrieveFile(fileId); + ClientResult result = RetrieveFile(fileId, DefaultRequestContext); return ClientResult.FromValue(OpenAIFile.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -362,7 +362,7 @@ public virtual async Task> DeleteFileAsync(stri if (fileId is null) throw new ArgumentNullException(nameof(fileId)); if (string.IsNullOrEmpty(fileId)) throw new ArgumentException(nameof(fileId)); - ClientResult result = await DeleteFileAsync(fileId).ConfigureAwait(false); + ClientResult result = await DeleteFileAsync(fileId, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(DeleteFileResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -375,7 +375,7 @@ public virtual ClientResult DeleteFile(string fileId) if (fileId is null) throw new ArgumentNullException(nameof(fileId)); if (string.IsNullOrEmpty(fileId)) throw new ArgumentException(nameof(fileId)); - ClientResult result = DeleteFile(fileId); + ClientResult result = DeleteFile(fileId, DefaultRequestContext); return ClientResult.FromValue(DeleteFileResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -464,7 +464,7 @@ public virtual async Task> DownloadFileAsync(string fileId) if (fileId is null) throw new ArgumentNullException(nameof(fileId)); if (string.IsNullOrEmpty(fileId)) throw new ArgumentException(nameof(fileId)); - ClientResult result = await DownloadFileAsync(fileId).ConfigureAwait(false); + ClientResult result = await DownloadFileAsync(fileId, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(result.GetRawResponse().Content.ToObjectFromJson(), result.GetRawResponse()); } @@ -477,7 +477,7 @@ public virtual ClientResult DownloadFile(string fileId) if (fileId is null) throw new ArgumentNullException(nameof(fileId)); if (string.IsNullOrEmpty(fileId)) throw new ArgumentException(nameof(fileId)); - ClientResult result = DownloadFile(fileId); + ClientResult result = DownloadFile(fileId, DefaultRequestContext); return ClientResult.FromValue(result.GetRawResponse().Content.ToObjectFromJson(), result.GetRawResponse()); } @@ -584,7 +584,6 @@ internal PipelineMessage CreateGetFilesRequest(string purpose, RequestOptions op UriBuilder uriBuilder = new(_endpoint.ToString()); StringBuilder path = new(); path.Append("/files"); - uriBuilder.Path += path.ToString(); if (purpose != null) { if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) @@ -596,6 +595,7 @@ internal PipelineMessage CreateGetFilesRequest(string purpose, RequestOptions op uriBuilder.Query = $"purpose={purpose}"; } } + uriBuilder.Path += path.ToString(); request.Uri = uriBuilder.Uri; request.Headers.Set("Accept", "application/json"); return message; @@ -610,7 +610,6 @@ internal PipelineMessage CreateRetrieveFileRequest(string fileId, RequestOptions UriBuilder uriBuilder = new(_endpoint.ToString()); StringBuilder path = new(); path.Append("/files/"); - uriBuilder.Path += path.ToString(); path.Append(fileId); uriBuilder.Path += path.ToString(); request.Uri = uriBuilder.Uri; @@ -627,7 +626,6 @@ internal PipelineMessage CreateDeleteFileRequest(string fileId, RequestOptions o UriBuilder uriBuilder = new(_endpoint.ToString()); StringBuilder path = new(); path.Append("/files/"); - uriBuilder.Path += path.ToString(); path.Append(fileId); uriBuilder.Path += path.ToString(); request.Uri = uriBuilder.Uri; @@ -644,9 +642,7 @@ internal PipelineMessage CreateDownloadFileRequest(string fileId, RequestOptions UriBuilder uriBuilder = new(_endpoint.ToString()); StringBuilder path = new(); path.Append("/files/"); - uriBuilder.Path += path.ToString(); path.Append(fileId); - uriBuilder.Path += path.ToString(); path.Append("/content"); uriBuilder.Path += path.ToString(); request.Uri = uriBuilder.Uri; @@ -654,6 +650,8 @@ internal PipelineMessage CreateDownloadFileRequest(string fileId, RequestOptions return message; } + private static RequestOptions DefaultRequestContext = new RequestOptions(); + private static PipelineMessageClassifier _responseErrorClassifier200; private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } diff --git a/.dotnet/src/Generated/FineTuning.cs b/.dotnet/src/Generated/FineTuning.cs index 63c8b2f63..827f7f18f 100644 --- a/.dotnet/src/Generated/FineTuning.cs +++ b/.dotnet/src/Generated/FineTuning.cs @@ -53,7 +53,7 @@ public virtual async Task> CreateFineTuningJobAsync( if (job is null) throw new ArgumentNullException(nameof(job)); using BinaryContent content = BinaryContent.Create(job); - ClientResult result = await CreateFineTuningJobAsync(content).ConfigureAwait(false); + ClientResult result = await CreateFineTuningJobAsync(content, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -71,7 +71,7 @@ public virtual ClientResult CreateFineTuningJob(CreateFineTuningJ if (job is null) throw new ArgumentNullException(nameof(job)); using BinaryContent content = BinaryContent.Create(job); - ClientResult result = CreateFineTuningJob(content); + ClientResult result = CreateFineTuningJob(content, DefaultRequestContext); return ClientResult.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -160,7 +160,7 @@ public virtual ClientResult CreateFineTuningJob(BinaryContent content, RequestOp /// Number of fine-tuning jobs to retrieve. public virtual async Task> GetPaginatedFineTuningJobsAsync(string after = null, long? limit = null) { - ClientResult result = await GetPaginatedFineTuningJobsAsync(after, limit).ConfigureAwait(false); + ClientResult result = await GetPaginatedFineTuningJobsAsync(after, limit, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(ListPaginatedFineTuningJobsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -169,7 +169,7 @@ public virtual async Task> Get /// Number of fine-tuning jobs to retrieve. public virtual ClientResult GetPaginatedFineTuningJobs(string after = null, long? limit = null) { - ClientResult result = GetPaginatedFineTuningJobs(after, limit); + ClientResult result = GetPaginatedFineTuningJobs(after, limit, DefaultRequestContext); return ClientResult.FromValue(ListPaginatedFineTuningJobsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -256,7 +256,7 @@ public virtual async Task> RetrieveFineTuningJobAsyn if (fineTuningJobId is null) throw new ArgumentNullException(nameof(fineTuningJobId)); if (string.IsNullOrEmpty(fineTuningJobId)) throw new ArgumentException(nameof(fineTuningJobId)); - ClientResult result = await RetrieveFineTuningJobAsync(fineTuningJobId).ConfigureAwait(false); + ClientResult result = await RetrieveFineTuningJobAsync(fineTuningJobId, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -273,7 +273,7 @@ public virtual ClientResult RetrieveFineTuningJob(string fineTuni if (fineTuningJobId is null) throw new ArgumentNullException(nameof(fineTuningJobId)); if (string.IsNullOrEmpty(fineTuningJobId)) throw new ArgumentException(nameof(fineTuningJobId)); - ClientResult result = RetrieveFineTuningJob(fineTuningJobId); + ClientResult result = RetrieveFineTuningJob(fineTuningJobId, DefaultRequestContext); return ClientResult.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -366,7 +366,7 @@ public virtual async Task> CancelFineTuningJobAsync( if (fineTuningJobId is null) throw new ArgumentNullException(nameof(fineTuningJobId)); if (string.IsNullOrEmpty(fineTuningJobId)) throw new ArgumentException(nameof(fineTuningJobId)); - ClientResult result = await CancelFineTuningJobAsync(fineTuningJobId).ConfigureAwait(false); + ClientResult result = await CancelFineTuningJobAsync(fineTuningJobId, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -379,7 +379,7 @@ public virtual ClientResult CancelFineTuningJob(string fineTuning if (fineTuningJobId is null) throw new ArgumentNullException(nameof(fineTuningJobId)); if (string.IsNullOrEmpty(fineTuningJobId)) throw new ArgumentException(nameof(fineTuningJobId)); - ClientResult result = CancelFineTuningJob(fineTuningJobId); + ClientResult result = CancelFineTuningJob(fineTuningJobId, DefaultRequestContext); return ClientResult.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -470,7 +470,7 @@ public virtual async Task> GetFine if (fineTuningJobId is null) throw new ArgumentNullException(nameof(fineTuningJobId)); if (string.IsNullOrEmpty(fineTuningJobId)) throw new ArgumentException(nameof(fineTuningJobId)); - ClientResult result = await GetFineTuningEventsAsync(fineTuningJobId, after, limit).ConfigureAwait(false); + ClientResult result = await GetFineTuningEventsAsync(fineTuningJobId, after, limit, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(ListFineTuningJobEventsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -485,7 +485,7 @@ public virtual ClientResult GetFineTuningEvents if (fineTuningJobId is null) throw new ArgumentNullException(nameof(fineTuningJobId)); if (string.IsNullOrEmpty(fineTuningJobId)) throw new ArgumentException(nameof(fineTuningJobId)); - ClientResult result = GetFineTuningEvents(fineTuningJobId, after, limit); + ClientResult result = GetFineTuningEvents(fineTuningJobId, after, limit, DefaultRequestContext); return ClientResult.FromValue(ListFineTuningJobEventsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -596,7 +596,6 @@ internal PipelineMessage CreateGetPaginatedFineTuningJobsRequest(string after, l UriBuilder uriBuilder = new(_endpoint.ToString()); StringBuilder path = new(); path.Append("/fine_tuning/jobs"); - uriBuilder.Path += path.ToString(); if (after != null) { if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) @@ -619,6 +618,7 @@ internal PipelineMessage CreateGetPaginatedFineTuningJobsRequest(string after, l uriBuilder.Query = $"limit={limit.Value}"; } } + uriBuilder.Path += path.ToString(); request.Uri = uriBuilder.Uri; request.Headers.Set("Accept", "application/json"); return message; @@ -633,7 +633,6 @@ internal PipelineMessage CreateRetrieveFineTuningJobRequest(string fineTuningJob UriBuilder uriBuilder = new(_endpoint.ToString()); StringBuilder path = new(); path.Append("/fine_tuning/jobs/"); - uriBuilder.Path += path.ToString(); path.Append(fineTuningJobId); uriBuilder.Path += path.ToString(); request.Uri = uriBuilder.Uri; @@ -650,9 +649,7 @@ internal PipelineMessage CreateCancelFineTuningJobRequest(string fineTuningJobId UriBuilder uriBuilder = new(_endpoint.ToString()); StringBuilder path = new(); path.Append("/fine_tuning/jobs/"); - uriBuilder.Path += path.ToString(); path.Append(fineTuningJobId); - uriBuilder.Path += path.ToString(); path.Append("/cancel"); uriBuilder.Path += path.ToString(); request.Uri = uriBuilder.Uri; @@ -669,11 +666,8 @@ internal PipelineMessage CreateGetFineTuningEventsRequest(string fineTuningJobId UriBuilder uriBuilder = new(_endpoint.ToString()); StringBuilder path = new(); path.Append("/fine_tuning/jobs/"); - uriBuilder.Path += path.ToString(); path.Append(fineTuningJobId); - uriBuilder.Path += path.ToString(); path.Append("/events"); - uriBuilder.Path += path.ToString(); if (after != null) { if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) @@ -696,11 +690,14 @@ internal PipelineMessage CreateGetFineTuningEventsRequest(string fineTuningJobId uriBuilder.Query = $"limit={limit.Value}"; } } + uriBuilder.Path += path.ToString(); request.Uri = uriBuilder.Uri; request.Headers.Set("Accept", "application/json"); return message; } + private static RequestOptions DefaultRequestContext = new RequestOptions(); + private static PipelineMessageClassifier _responseErrorClassifier200; private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } diff --git a/.dotnet/src/Generated/Images.cs b/.dotnet/src/Generated/Images.cs index f2777b7ad..fb99a3319 100644 --- a/.dotnet/src/Generated/Images.cs +++ b/.dotnet/src/Generated/Images.cs @@ -47,7 +47,7 @@ public virtual async Task> CreateImageAsync(CreateI if (image is null) throw new ArgumentNullException(nameof(image)); using BinaryContent content = BinaryContent.Create(image); - ClientResult result = await CreateImageAsync(content).ConfigureAwait(false); + ClientResult result = await CreateImageAsync(content, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(ImagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -59,7 +59,7 @@ public virtual ClientResult CreateImage(CreateImageRequest image if (image is null) throw new ArgumentNullException(nameof(image)); using BinaryContent content = BinaryContent.Create(image); - ClientResult result = CreateImage(content); + ClientResult result = CreateImage(content, DefaultRequestContext); return ClientResult.FromValue(ImagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -143,7 +143,7 @@ public virtual async Task> CreateImageEditAsync(Cre if (image is null) throw new ArgumentNullException(nameof(image)); using BinaryContent content = BinaryContent.Create(image); - ClientResult result = await CreateImageEditAsync(content).ConfigureAwait(false); + ClientResult result = await CreateImageEditAsync(content, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(ImagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -155,7 +155,7 @@ public virtual ClientResult CreateImageEdit(CreateImageEditReque if (image is null) throw new ArgumentNullException(nameof(image)); using BinaryContent content = BinaryContent.Create(image); - ClientResult result = CreateImageEdit(content); + ClientResult result = CreateImageEdit(content, DefaultRequestContext); return ClientResult.FromValue(ImagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -239,7 +239,7 @@ public virtual async Task> CreateImageVariationAsyn if (image is null) throw new ArgumentNullException(nameof(image)); using BinaryContent content = BinaryContent.Create(image); - ClientResult result = await CreateImageVariationAsync(content).ConfigureAwait(false); + ClientResult result = await CreateImageVariationAsync(content, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(ImagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -251,7 +251,7 @@ public virtual ClientResult CreateImageVariation(CreateImageVari if (image is null) throw new ArgumentNullException(nameof(image)); using BinaryContent content = BinaryContent.Create(image); - ClientResult result = CreateImageVariation(content); + ClientResult result = CreateImageVariation(content, DefaultRequestContext); return ClientResult.FromValue(ImagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -381,6 +381,8 @@ internal PipelineMessage CreateCreateImageVariationRequest(BinaryContent content return message; } + private static RequestOptions DefaultRequestContext = new RequestOptions(); + private static PipelineMessageClassifier _responseErrorClassifier200; private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } diff --git a/.dotnet/src/Generated/Messages.cs b/.dotnet/src/Generated/Messages.cs index 28ac36351..f3abd4bad 100644 --- a/.dotnet/src/Generated/Messages.cs +++ b/.dotnet/src/Generated/Messages.cs @@ -51,7 +51,7 @@ public virtual async Task> CreateMessageAsync(string if (message is null) throw new ArgumentNullException(nameof(message)); using BinaryContent content = BinaryContent.Create(message); - ClientResult result = await CreateMessageAsync(threadId, content).ConfigureAwait(false); + ClientResult result = await CreateMessageAsync(threadId, content, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(MessageObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -67,7 +67,7 @@ public virtual ClientResult CreateMessage(string threadId, Create if (message is null) throw new ArgumentNullException(nameof(message)); using BinaryContent content = BinaryContent.Create(message); - ClientResult result = CreateMessage(threadId, content); + ClientResult result = CreateMessage(threadId, content, DefaultRequestContext); return ClientResult.FromValue(MessageObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -178,7 +178,7 @@ public virtual async Task> GetMessagesAsync(s if (threadId is null) throw new ArgumentNullException(nameof(threadId)); if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); - ClientResult result = await GetMessagesAsync(threadId, limit, order?.ToString(), after, before).ConfigureAwait(false); + ClientResult result = await GetMessagesAsync(threadId, limit, order?.ToString(), after, before, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(ListMessagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -209,7 +209,7 @@ public virtual ClientResult GetMessages(string threadId, i if (threadId is null) throw new ArgumentNullException(nameof(threadId)); if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); - ClientResult result = GetMessages(threadId, limit, order?.ToString(), after, before); + ClientResult result = GetMessages(threadId, limit, order?.ToString(), after, before, DefaultRequestContext); return ClientResult.FromValue(ListMessagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -337,7 +337,7 @@ public virtual async Task> GetMessageAsync(string th if (messageId is null) throw new ArgumentNullException(nameof(messageId)); if (string.IsNullOrEmpty(messageId)) throw new ArgumentException(nameof(messageId)); - ClientResult result = await GetMessageAsync(threadId, messageId).ConfigureAwait(false); + ClientResult result = await GetMessageAsync(threadId, messageId, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(MessageObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -353,7 +353,7 @@ public virtual ClientResult GetMessage(string threadId, string me if (messageId is null) throw new ArgumentNullException(nameof(messageId)); if (string.IsNullOrEmpty(messageId)) throw new ArgumentException(nameof(messageId)); - ClientResult result = GetMessage(threadId, messageId); + ClientResult result = GetMessage(threadId, messageId, DefaultRequestContext); return ClientResult.FromValue(MessageObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -454,7 +454,7 @@ public virtual async Task> ModifyMessageAsync(string if (message is null) throw new ArgumentNullException(nameof(message)); using BinaryContent content = BinaryContent.Create(message); - ClientResult result = await ModifyMessageAsync(threadId, messageId, content).ConfigureAwait(false); + ClientResult result = await ModifyMessageAsync(threadId, messageId, content, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(MessageObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -473,7 +473,7 @@ public virtual ClientResult ModifyMessage(string threadId, string if (message is null) throw new ArgumentNullException(nameof(message)); using BinaryContent content = BinaryContent.Create(message); - ClientResult result = ModifyMessage(threadId, messageId, content); + ClientResult result = ModifyMessage(threadId, messageId, content, DefaultRequestContext); return ClientResult.FromValue(MessageObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -593,7 +593,7 @@ public virtual async Task> GetMessageFile if (messageId is null) throw new ArgumentNullException(nameof(messageId)); if (string.IsNullOrEmpty(messageId)) throw new ArgumentException(nameof(messageId)); - ClientResult result = await GetMessageFilesAsync(threadId, messageId, limit, order?.ToString(), after, before).ConfigureAwait(false); + ClientResult result = await GetMessageFilesAsync(threadId, messageId, limit, order?.ToString(), after, before, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(ListMessageFilesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -627,7 +627,7 @@ public virtual ClientResult GetMessageFiles(string thr if (messageId is null) throw new ArgumentNullException(nameof(messageId)); if (string.IsNullOrEmpty(messageId)) throw new ArgumentException(nameof(messageId)); - ClientResult result = GetMessageFiles(threadId, messageId, limit, order?.ToString(), after, before); + ClientResult result = GetMessageFiles(threadId, messageId, limit, order?.ToString(), after, before, DefaultRequestContext); return ClientResult.FromValue(ListMessageFilesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -764,7 +764,7 @@ public virtual async Task> GetMessageFileAsync(s if (fileId is null) throw new ArgumentNullException(nameof(fileId)); if (string.IsNullOrEmpty(fileId)) throw new ArgumentException(nameof(fileId)); - ClientResult result = await GetMessageFileAsync(threadId, messageId, fileId).ConfigureAwait(false); + ClientResult result = await GetMessageFileAsync(threadId, messageId, fileId, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(MessageFileObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -783,7 +783,7 @@ public virtual ClientResult GetMessageFile(string threadId, s if (fileId is null) throw new ArgumentNullException(nameof(fileId)); if (string.IsNullOrEmpty(fileId)) throw new ArgumentException(nameof(fileId)); - ClientResult result = GetMessageFile(threadId, messageId, fileId); + ClientResult result = GetMessageFile(threadId, messageId, fileId, DefaultRequestContext); return ClientResult.FromValue(MessageFileObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -884,9 +884,7 @@ internal PipelineMessage CreateCreateMessageRequest(string threadId, BinaryConte UriBuilder uriBuilder = new(_endpoint.ToString()); StringBuilder path = new(); path.Append("/threads/"); - uriBuilder.Path += path.ToString(); path.Append(threadId); - uriBuilder.Path += path.ToString(); path.Append("/messages"); uriBuilder.Path += path.ToString(); request.Uri = uriBuilder.Uri; @@ -906,11 +904,8 @@ internal PipelineMessage CreateGetMessagesRequest(string threadId, int? limit, s UriBuilder uriBuilder = new(_endpoint.ToString()); StringBuilder path = new(); path.Append("/threads/"); - uriBuilder.Path += path.ToString(); path.Append(threadId); - uriBuilder.Path += path.ToString(); path.Append("/messages"); - uriBuilder.Path += path.ToString(); if (limit != null) { if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) @@ -955,6 +950,7 @@ internal PipelineMessage CreateGetMessagesRequest(string threadId, int? limit, s uriBuilder.Query = $"before={before}"; } } + uriBuilder.Path += path.ToString(); request.Uri = uriBuilder.Uri; request.Headers.Set("Accept", "application/json"); return message; @@ -969,11 +965,8 @@ internal PipelineMessage CreateGetMessageRequest(string threadId, string message UriBuilder uriBuilder = new(_endpoint.ToString()); StringBuilder path = new(); path.Append("/threads/"); - uriBuilder.Path += path.ToString(); path.Append(threadId); - uriBuilder.Path += path.ToString(); path.Append("/messages/"); - uriBuilder.Path += path.ToString(); path.Append(messageId); uriBuilder.Path += path.ToString(); request.Uri = uriBuilder.Uri; @@ -990,11 +983,8 @@ internal PipelineMessage CreateModifyMessageRequest(string threadId, string mess UriBuilder uriBuilder = new(_endpoint.ToString()); StringBuilder path = new(); path.Append("/threads/"); - uriBuilder.Path += path.ToString(); path.Append(threadId); - uriBuilder.Path += path.ToString(); path.Append("/messages/"); - uriBuilder.Path += path.ToString(); path.Append(messageId); uriBuilder.Path += path.ToString(); request.Uri = uriBuilder.Uri; @@ -1014,15 +1004,10 @@ internal PipelineMessage CreateGetMessageFilesRequest(string threadId, string me UriBuilder uriBuilder = new(_endpoint.ToString()); StringBuilder path = new(); path.Append("/threads/"); - uriBuilder.Path += path.ToString(); path.Append(threadId); - uriBuilder.Path += path.ToString(); path.Append("/messages/"); - uriBuilder.Path += path.ToString(); path.Append(messageId); - uriBuilder.Path += path.ToString(); path.Append("/files"); - uriBuilder.Path += path.ToString(); if (limit != null) { if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) @@ -1067,6 +1052,7 @@ internal PipelineMessage CreateGetMessageFilesRequest(string threadId, string me uriBuilder.Query = $"before={before}"; } } + uriBuilder.Path += path.ToString(); request.Uri = uriBuilder.Uri; request.Headers.Set("Accept", "application/json"); return message; @@ -1081,15 +1067,10 @@ internal PipelineMessage CreateGetMessageFileRequest(string threadId, string mes UriBuilder uriBuilder = new(_endpoint.ToString()); StringBuilder path = new(); path.Append("/threads/"); - uriBuilder.Path += path.ToString(); path.Append(threadId); - uriBuilder.Path += path.ToString(); path.Append("/messages/"); - uriBuilder.Path += path.ToString(); path.Append(messageId); - uriBuilder.Path += path.ToString(); path.Append("/files/"); - uriBuilder.Path += path.ToString(); path.Append(fileId); uriBuilder.Path += path.ToString(); request.Uri = uriBuilder.Uri; @@ -1097,6 +1078,8 @@ internal PipelineMessage CreateGetMessageFileRequest(string threadId, string mes return message; } + private static RequestOptions DefaultRequestContext = new RequestOptions(); + private static PipelineMessageClassifier _responseErrorClassifier200; private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } diff --git a/.dotnet/src/Generated/ModelsOps.cs b/.dotnet/src/Generated/ModelsOps.cs index e806f3cef..528bd4d2f 100644 --- a/.dotnet/src/Generated/ModelsOps.cs +++ b/.dotnet/src/Generated/ModelsOps.cs @@ -45,7 +45,7 @@ internal ModelsOps(ClientPipeline pipeline, ApiKeyCredential credential, Uri end /// public virtual async Task> GetModelsAsync(CancellationToken cancellationToken = default) { - ClientResult result = await GetModelsAsync().ConfigureAwait(false); + ClientResult result = await GetModelsAsync(DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(ListModelsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -55,7 +55,7 @@ public virtual async Task> GetModelsAsync(Cance /// public virtual ClientResult GetModels(CancellationToken cancellationToken = default) { - ClientResult result = GetModels(); + ClientResult result = GetModels(DefaultRequestContext); return ClientResult.FromValue(ListModelsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -139,7 +139,7 @@ public virtual async Task> RetrieveAsync(string model) if (model is null) throw new ArgumentNullException(nameof(model)); if (string.IsNullOrEmpty(model)) throw new ArgumentException(nameof(model)); - ClientResult result = await RetrieveAsync(model).ConfigureAwait(false); + ClientResult result = await RetrieveAsync(model, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(Model.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -155,7 +155,7 @@ public virtual ClientResult Retrieve(string model) if (model is null) throw new ArgumentNullException(nameof(model)); if (string.IsNullOrEmpty(model)) throw new ArgumentException(nameof(model)); - ClientResult result = Retrieve(model); + ClientResult result = Retrieve(model, DefaultRequestContext); return ClientResult.FromValue(Model.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -246,7 +246,7 @@ public virtual async Task> DeleteAsync(string if (model is null) throw new ArgumentNullException(nameof(model)); if (string.IsNullOrEmpty(model)) throw new ArgumentException(nameof(model)); - ClientResult result = await DeleteAsync(model).ConfigureAwait(false); + ClientResult result = await DeleteAsync(model, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(DeleteModelResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -259,7 +259,7 @@ public virtual ClientResult Delete(string model) if (model is null) throw new ArgumentNullException(nameof(model)); if (string.IsNullOrEmpty(model)) throw new ArgumentException(nameof(model)); - ClientResult result = Delete(model); + ClientResult result = Delete(model, DefaultRequestContext); return ClientResult.FromValue(DeleteModelResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -363,7 +363,6 @@ internal PipelineMessage CreateRetrieveRequest(string model, RequestOptions opti UriBuilder uriBuilder = new(_endpoint.ToString()); StringBuilder path = new(); path.Append("/models/"); - uriBuilder.Path += path.ToString(); path.Append(model); uriBuilder.Path += path.ToString(); request.Uri = uriBuilder.Uri; @@ -380,7 +379,6 @@ internal PipelineMessage CreateDeleteRequest(string model, RequestOptions option UriBuilder uriBuilder = new(_endpoint.ToString()); StringBuilder path = new(); path.Append("/models/"); - uriBuilder.Path += path.ToString(); path.Append(model); uriBuilder.Path += path.ToString(); request.Uri = uriBuilder.Uri; @@ -388,6 +386,8 @@ internal PipelineMessage CreateDeleteRequest(string model, RequestOptions option return message; } + private static RequestOptions DefaultRequestContext = new RequestOptions(); + private static PipelineMessageClassifier _responseErrorClassifier200; private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } diff --git a/.dotnet/src/Generated/Moderations.cs b/.dotnet/src/Generated/Moderations.cs index e0127be76..3c728d50a 100644 --- a/.dotnet/src/Generated/Moderations.cs +++ b/.dotnet/src/Generated/Moderations.cs @@ -47,7 +47,7 @@ public virtual async Task> CreateModerati if (content is null) throw new ArgumentNullException(nameof(content)); using BinaryContent content0 = BinaryContent.Create(content); - ClientResult result = await CreateModerationAsync(content0).ConfigureAwait(false); + ClientResult result = await CreateModerationAsync(content0, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(CreateModerationResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -59,7 +59,7 @@ public virtual ClientResult CreateModeration(CreateMod if (content is null) throw new ArgumentNullException(nameof(content)); using BinaryContent content0 = BinaryContent.Create(content); - ClientResult result = CreateModeration(content0); + ClientResult result = CreateModeration(content0, DefaultRequestContext); return ClientResult.FromValue(CreateModerationResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -153,6 +153,8 @@ internal PipelineMessage CreateCreateModerationRequest(BinaryContent content, Re return message; } + private static RequestOptions DefaultRequestContext = new RequestOptions(); + private static PipelineMessageClassifier _responseErrorClassifier200; private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } diff --git a/.dotnet/src/Generated/Runs.cs b/.dotnet/src/Generated/Runs.cs index 05661caa5..5d79922ec 100644 --- a/.dotnet/src/Generated/Runs.cs +++ b/.dotnet/src/Generated/Runs.cs @@ -47,7 +47,7 @@ public virtual async Task> CreateThreadAndRunAsync(Creat if (threadAndRun is null) throw new ArgumentNullException(nameof(threadAndRun)); using BinaryContent content = BinaryContent.Create(threadAndRun); - ClientResult result = await CreateThreadAndRunAsync(content).ConfigureAwait(false); + ClientResult result = await CreateThreadAndRunAsync(content, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -59,7 +59,7 @@ public virtual ClientResult CreateThreadAndRun(CreateThreadAndRunRequ if (threadAndRun is null) throw new ArgumentNullException(nameof(threadAndRun)); using BinaryContent content = BinaryContent.Create(threadAndRun); - ClientResult result = CreateThreadAndRun(content); + ClientResult result = CreateThreadAndRun(content, DefaultRequestContext); return ClientResult.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -147,7 +147,7 @@ public virtual async Task> CreateRunAsync(string threadI if (run is null) throw new ArgumentNullException(nameof(run)); using BinaryContent content = BinaryContent.Create(run); - ClientResult result = await CreateRunAsync(threadId, content).ConfigureAwait(false); + ClientResult result = await CreateRunAsync(threadId, content, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -163,7 +163,7 @@ public virtual ClientResult CreateRun(string threadId, CreateRunReque if (run is null) throw new ArgumentNullException(nameof(run)); using BinaryContent content = BinaryContent.Create(run); - ClientResult result = CreateRun(threadId, content); + ClientResult result = CreateRun(threadId, content, DefaultRequestContext); return ClientResult.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -274,7 +274,7 @@ public virtual async Task> GetRunsAsync(string th if (threadId is null) throw new ArgumentNullException(nameof(threadId)); if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); - ClientResult result = await GetRunsAsync(threadId, limit, order?.ToString(), after, before).ConfigureAwait(false); + ClientResult result = await GetRunsAsync(threadId, limit, order?.ToString(), after, before, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(ListRunsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -305,7 +305,7 @@ public virtual ClientResult GetRuns(string threadId, int? limi if (threadId is null) throw new ArgumentNullException(nameof(threadId)); if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); - ClientResult result = GetRuns(threadId, limit, order?.ToString(), after, before); + ClientResult result = GetRuns(threadId, limit, order?.ToString(), after, before, DefaultRequestContext); return ClientResult.FromValue(ListRunsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -433,7 +433,7 @@ public virtual async Task> GetRunAsync(string threadId, if (runId is null) throw new ArgumentNullException(nameof(runId)); if (string.IsNullOrEmpty(runId)) throw new ArgumentException(nameof(runId)); - ClientResult result = await GetRunAsync(threadId, runId).ConfigureAwait(false); + ClientResult result = await GetRunAsync(threadId, runId, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -449,7 +449,7 @@ public virtual ClientResult GetRun(string threadId, string runId) if (runId is null) throw new ArgumentNullException(nameof(runId)); if (string.IsNullOrEmpty(runId)) throw new ArgumentException(nameof(runId)); - ClientResult result = GetRun(threadId, runId); + ClientResult result = GetRun(threadId, runId, DefaultRequestContext); return ClientResult.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -550,7 +550,7 @@ public virtual async Task> ModifyRunAsync(string threadI if (run is null) throw new ArgumentNullException(nameof(run)); using BinaryContent content = BinaryContent.Create(run); - ClientResult result = await ModifyRunAsync(threadId, runId, content).ConfigureAwait(false); + ClientResult result = await ModifyRunAsync(threadId, runId, content, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -569,7 +569,7 @@ public virtual ClientResult ModifyRun(string threadId, string runId, if (run is null) throw new ArgumentNullException(nameof(run)); using BinaryContent content = BinaryContent.Create(run); - ClientResult result = ModifyRun(threadId, runId, content); + ClientResult result = ModifyRun(threadId, runId, content, DefaultRequestContext); return ClientResult.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -671,7 +671,7 @@ public virtual async Task> CancelRunAsync(string threadI if (runId is null) throw new ArgumentNullException(nameof(runId)); if (string.IsNullOrEmpty(runId)) throw new ArgumentException(nameof(runId)); - ClientResult result = await CancelRunAsync(threadId, runId).ConfigureAwait(false); + ClientResult result = await CancelRunAsync(threadId, runId, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -687,7 +687,7 @@ public virtual ClientResult CancelRun(string threadId, string runId) if (runId is null) throw new ArgumentNullException(nameof(runId)); if (string.IsNullOrEmpty(runId)) throw new ArgumentException(nameof(runId)); - ClientResult result = CancelRun(threadId, runId); + ClientResult result = CancelRun(threadId, runId, DefaultRequestContext); return ClientResult.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -792,7 +792,7 @@ public virtual async Task> SubmitToolOuputsToRunAsync(st if (submitToolOutputsRun is null) throw new ArgumentNullException(nameof(submitToolOutputsRun)); using BinaryContent content = BinaryContent.Create(submitToolOutputsRun); - ClientResult result = await SubmitToolOuputsToRunAsync(threadId, runId, content).ConfigureAwait(false); + ClientResult result = await SubmitToolOuputsToRunAsync(threadId, runId, content, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -815,7 +815,7 @@ public virtual ClientResult SubmitToolOuputsToRun(string threadId, st if (submitToolOutputsRun is null) throw new ArgumentNullException(nameof(submitToolOutputsRun)); using BinaryContent content = BinaryContent.Create(submitToolOutputsRun); - ClientResult result = SubmitToolOuputsToRun(threadId, runId, content); + ClientResult result = SubmitToolOuputsToRun(threadId, runId, content, DefaultRequestContext); return ClientResult.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -939,7 +939,7 @@ public virtual async Task> GetRunStepsAsync(s if (runId is null) throw new ArgumentNullException(nameof(runId)); if (string.IsNullOrEmpty(runId)) throw new ArgumentException(nameof(runId)); - ClientResult result = await GetRunStepsAsync(threadId, runId, limit, order?.ToString(), after, before).ConfigureAwait(false); + ClientResult result = await GetRunStepsAsync(threadId, runId, limit, order?.ToString(), after, before, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(ListRunStepsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -973,7 +973,7 @@ public virtual ClientResult GetRunSteps(string threadId, s if (runId is null) throw new ArgumentNullException(nameof(runId)); if (string.IsNullOrEmpty(runId)) throw new ArgumentException(nameof(runId)); - ClientResult result = GetRunSteps(threadId, runId, limit, order?.ToString(), after, before); + ClientResult result = GetRunSteps(threadId, runId, limit, order?.ToString(), after, before, DefaultRequestContext); return ClientResult.FromValue(ListRunStepsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -1110,7 +1110,7 @@ public virtual async Task> GetRunStepAsync(string th if (stepId is null) throw new ArgumentNullException(nameof(stepId)); if (string.IsNullOrEmpty(stepId)) throw new ArgumentException(nameof(stepId)); - ClientResult result = await GetRunStepAsync(threadId, runId, stepId).ConfigureAwait(false); + ClientResult result = await GetRunStepAsync(threadId, runId, stepId, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(RunStepObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -1129,7 +1129,7 @@ public virtual ClientResult GetRunStep(string threadId, string ru if (stepId is null) throw new ArgumentNullException(nameof(stepId)); if (string.IsNullOrEmpty(stepId)) throw new ArgumentException(nameof(stepId)); - ClientResult result = GetRunStep(threadId, runId, stepId); + ClientResult result = GetRunStep(threadId, runId, stepId, DefaultRequestContext); return ClientResult.FromValue(RunStepObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -1248,9 +1248,7 @@ internal PipelineMessage CreateCreateRunRequest(string threadId, BinaryContent c UriBuilder uriBuilder = new(_endpoint.ToString()); StringBuilder path = new(); path.Append("/threads/"); - uriBuilder.Path += path.ToString(); path.Append(threadId); - uriBuilder.Path += path.ToString(); path.Append("/runs"); uriBuilder.Path += path.ToString(); request.Uri = uriBuilder.Uri; @@ -1270,11 +1268,8 @@ internal PipelineMessage CreateGetRunsRequest(string threadId, int? limit, strin UriBuilder uriBuilder = new(_endpoint.ToString()); StringBuilder path = new(); path.Append("/threads/"); - uriBuilder.Path += path.ToString(); path.Append(threadId); - uriBuilder.Path += path.ToString(); path.Append("/runs"); - uriBuilder.Path += path.ToString(); if (limit != null) { if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) @@ -1319,6 +1314,7 @@ internal PipelineMessage CreateGetRunsRequest(string threadId, int? limit, strin uriBuilder.Query = $"before={before}"; } } + uriBuilder.Path += path.ToString(); request.Uri = uriBuilder.Uri; request.Headers.Set("Accept", "application/json"); return message; @@ -1333,11 +1329,8 @@ internal PipelineMessage CreateGetRunRequest(string threadId, string runId, Requ UriBuilder uriBuilder = new(_endpoint.ToString()); StringBuilder path = new(); path.Append("/threads/"); - uriBuilder.Path += path.ToString(); path.Append(threadId); - uriBuilder.Path += path.ToString(); path.Append("/runs/"); - uriBuilder.Path += path.ToString(); path.Append(runId); uriBuilder.Path += path.ToString(); request.Uri = uriBuilder.Uri; @@ -1354,11 +1347,8 @@ internal PipelineMessage CreateModifyRunRequest(string threadId, string runId, B UriBuilder uriBuilder = new(_endpoint.ToString()); StringBuilder path = new(); path.Append("/threads/"); - uriBuilder.Path += path.ToString(); path.Append(threadId); - uriBuilder.Path += path.ToString(); path.Append("/runs/"); - uriBuilder.Path += path.ToString(); path.Append(runId); uriBuilder.Path += path.ToString(); request.Uri = uriBuilder.Uri; @@ -1378,13 +1368,9 @@ internal PipelineMessage CreateCancelRunRequest(string threadId, string runId, R UriBuilder uriBuilder = new(_endpoint.ToString()); StringBuilder path = new(); path.Append("/threads/"); - uriBuilder.Path += path.ToString(); path.Append(threadId); - uriBuilder.Path += path.ToString(); path.Append("/runs/"); - uriBuilder.Path += path.ToString(); path.Append(runId); - uriBuilder.Path += path.ToString(); path.Append("/cancel"); uriBuilder.Path += path.ToString(); request.Uri = uriBuilder.Uri; @@ -1401,13 +1387,9 @@ internal PipelineMessage CreateSubmitToolOuputsToRunRequest(string threadId, str UriBuilder uriBuilder = new(_endpoint.ToString()); StringBuilder path = new(); path.Append("/threads/"); - uriBuilder.Path += path.ToString(); path.Append(threadId); - uriBuilder.Path += path.ToString(); path.Append("/runs/"); - uriBuilder.Path += path.ToString(); path.Append(runId); - uriBuilder.Path += path.ToString(); path.Append("/submit_tool_outputs"); uriBuilder.Path += path.ToString(); request.Uri = uriBuilder.Uri; @@ -1427,15 +1409,10 @@ internal PipelineMessage CreateGetRunStepsRequest(string threadId, string runId, UriBuilder uriBuilder = new(_endpoint.ToString()); StringBuilder path = new(); path.Append("/threads/"); - uriBuilder.Path += path.ToString(); path.Append(threadId); - uriBuilder.Path += path.ToString(); path.Append("/runs/"); - uriBuilder.Path += path.ToString(); path.Append(runId); - uriBuilder.Path += path.ToString(); path.Append("/steps"); - uriBuilder.Path += path.ToString(); if (limit != null) { if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) @@ -1480,6 +1457,7 @@ internal PipelineMessage CreateGetRunStepsRequest(string threadId, string runId, uriBuilder.Query = $"before={before}"; } } + uriBuilder.Path += path.ToString(); request.Uri = uriBuilder.Uri; request.Headers.Set("Accept", "application/json"); return message; @@ -1494,15 +1472,10 @@ internal PipelineMessage CreateGetRunStepRequest(string threadId, string runId, UriBuilder uriBuilder = new(_endpoint.ToString()); StringBuilder path = new(); path.Append("/threads/"); - uriBuilder.Path += path.ToString(); path.Append(threadId); - uriBuilder.Path += path.ToString(); path.Append("/runs/"); - uriBuilder.Path += path.ToString(); path.Append(runId); - uriBuilder.Path += path.ToString(); path.Append("/steps/"); - uriBuilder.Path += path.ToString(); path.Append(stepId); uriBuilder.Path += path.ToString(); request.Uri = uriBuilder.Uri; @@ -1510,6 +1483,8 @@ internal PipelineMessage CreateGetRunStepRequest(string threadId, string runId, return message; } + private static RequestOptions DefaultRequestContext = new RequestOptions(); + private static PipelineMessageClassifier _responseErrorClassifier200; private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } diff --git a/.dotnet/src/Generated/Threads.cs b/.dotnet/src/Generated/Threads.cs index 462f88718..6332bef17 100644 --- a/.dotnet/src/Generated/Threads.cs +++ b/.dotnet/src/Generated/Threads.cs @@ -47,7 +47,7 @@ public virtual async Task> CreateThreadAsync(CreateTh if (thread is null) throw new ArgumentNullException(nameof(thread)); using BinaryContent content = BinaryContent.Create(thread); - ClientResult result = await CreateThreadAsync(content).ConfigureAwait(false); + ClientResult result = await CreateThreadAsync(content, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(ThreadObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -59,7 +59,7 @@ public virtual ClientResult CreateThread(CreateThreadRequest threa if (thread is null) throw new ArgumentNullException(nameof(thread)); using BinaryContent content = BinaryContent.Create(thread); - ClientResult result = CreateThread(content); + ClientResult result = CreateThread(content, DefaultRequestContext); return ClientResult.FromValue(ThreadObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -144,7 +144,7 @@ public virtual async Task> GetThreadAsync(string thre if (threadId is null) throw new ArgumentNullException(nameof(threadId)); if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); - ClientResult result = await GetThreadAsync(threadId).ConfigureAwait(false); + ClientResult result = await GetThreadAsync(threadId, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(ThreadObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -157,7 +157,7 @@ public virtual ClientResult GetThread(string threadId) if (threadId is null) throw new ArgumentNullException(nameof(threadId)); if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); - ClientResult result = GetThread(threadId); + ClientResult result = GetThread(threadId, DefaultRequestContext); return ClientResult.FromValue(ThreadObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -249,7 +249,7 @@ public virtual async Task> ModifyThreadAsync(string t if (thread is null) throw new ArgumentNullException(nameof(thread)); using BinaryContent content = BinaryContent.Create(thread); - ClientResult result = await ModifyThreadAsync(threadId, content).ConfigureAwait(false); + ClientResult result = await ModifyThreadAsync(threadId, content, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(ThreadObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -265,7 +265,7 @@ public virtual ClientResult ModifyThread(string threadId, ModifyTh if (thread is null) throw new ArgumentNullException(nameof(thread)); using BinaryContent content = BinaryContent.Create(thread); - ClientResult result = ModifyThread(threadId, content); + ClientResult result = ModifyThread(threadId, content, DefaultRequestContext); return ClientResult.FromValue(ThreadObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -358,7 +358,7 @@ public virtual async Task> DeleteThreadAsync( if (threadId is null) throw new ArgumentNullException(nameof(threadId)); if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); - ClientResult result = await DeleteThreadAsync(threadId).ConfigureAwait(false); + ClientResult result = await DeleteThreadAsync(threadId, DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(DeleteThreadResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -371,7 +371,7 @@ public virtual ClientResult DeleteThread(string threadId) if (threadId is null) throw new ArgumentNullException(nameof(threadId)); if (string.IsNullOrEmpty(threadId)) throw new ArgumentException(nameof(threadId)); - ClientResult result = DeleteThread(threadId); + ClientResult result = DeleteThread(threadId, DefaultRequestContext); return ClientResult.FromValue(DeleteThreadResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } @@ -478,7 +478,6 @@ internal PipelineMessage CreateGetThreadRequest(string threadId, RequestOptions UriBuilder uriBuilder = new(_endpoint.ToString()); StringBuilder path = new(); path.Append("/threads/"); - uriBuilder.Path += path.ToString(); path.Append(threadId); uriBuilder.Path += path.ToString(); request.Uri = uriBuilder.Uri; @@ -495,7 +494,6 @@ internal PipelineMessage CreateModifyThreadRequest(string threadId, BinaryConten UriBuilder uriBuilder = new(_endpoint.ToString()); StringBuilder path = new(); path.Append("/threads/"); - uriBuilder.Path += path.ToString(); path.Append(threadId); uriBuilder.Path += path.ToString(); request.Uri = uriBuilder.Uri; @@ -515,7 +513,6 @@ internal PipelineMessage CreateDeleteThreadRequest(string threadId, RequestOptio UriBuilder uriBuilder = new(_endpoint.ToString()); StringBuilder path = new(); path.Append("/threads/"); - uriBuilder.Path += path.ToString(); path.Append(threadId); uriBuilder.Path += path.ToString(); request.Uri = uriBuilder.Uri; @@ -523,6 +520,8 @@ internal PipelineMessage CreateDeleteThreadRequest(string threadId, RequestOptio return message; } + private static RequestOptions DefaultRequestContext = new RequestOptions(); + private static PipelineMessageClassifier _responseErrorClassifier200; private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } From c398c45c7cec8658038ed1f01f9073f3c06d7ca8 Mon Sep 17 00:00:00 2001 From: Jose Arriaga Maldonado Date: Tue, 27 Feb 2024 21:35:06 -0800 Subject: [PATCH 15/18] Fix SubmitToolOutputsRunRequest, remove Embeddings customizations, edit Update-ClietModel.ps1 script to System.ClientModel 1.1.0-alpha.20240227.1 --- .dotnet/scripts/Update-ClientModel.ps1 | 12 ++- .dotnet/src/Custom/Embeddings/Embeddings.cs | 93 ---------------- .../Models/Embedding.Serialization.cs | 67 ------------ .../src/Custom/Embeddings/Models/Embedding.cs | 68 ------------ .../Embeddings/Models/EmbeddingObject.cs | 10 -- .../Models/GenerateEmbeddingsOptions.cs | 13 --- .dotnet/src/Custom/OpenAIClient.cs | 22 ---- .dotnet/src/Custom/OpenAIModelFactory.cs | 36 ------- .dotnet/src/Generated/Embeddings.cs | 20 ++-- ...> CreateEmbeddingRequest.Serialization.cs} | 48 ++++----- ...gsOptions.cs => CreateEmbeddingRequest.cs} | 78 ++++++++++++-- .../CreateEmbeddingRequestEncodingFormat.cs | 47 ++++++++ .../Models/CreateEmbeddingRequestModel.cs | 50 +++++++++ ... CreateEmbeddingResponse.Serialization.cs} | 48 ++++----- ...llection.cs => CreateEmbeddingResponse.cs} | 20 ++-- .../Models/CreateEmbeddingResponseObject.cs | 44 ++++++++ .../Models/Embedding.Serialization.cs | 40 +++++++ .dotnet/src/Generated/Models/Embedding.cs | 45 ++++++++ .../Models/EmbeddingCollectionObject.cs | 44 -------- .../src/Generated/Models/EmbeddingObject.cs | 2 +- ...ion.cs => EmbeddingUsage.Serialization.cs} | 40 +++---- ...beddingTokenUsage.cs => EmbeddingUsage.cs} | 16 +-- ...GenerateEmbeddingsOptionsEncodingFormat.cs | 47 -------- .../Models/GenerateEmbeddingsOptionsModel.cs | 50 --------- ...bmitToolOutputsRunRequest.Serialization.cs | 16 ++- .../Models/SubmitToolOutputsRunRequest.cs | 9 +- ...putsRunRequestToolOutput.Serialization.cs} | 40 +++---- ... SubmitToolOutputsRunRequestToolOutput.cs} | 12 +-- .dotnet/src/Generated/OpenAIClient.cs | 6 ++ .dotnet/src/Generated/OpenAIModelFactory.cs | 61 +++++++++-- .dotnet/src/OpenAI.csproj | 2 +- .dotnet/tests/EmbeddingsTests.cs | 101 ------------------ .../tests/Generated/Tests/EmbeddingsTests.cs | 23 ++++ .dotnet/tests/OpenAI.Tests.csproj | 1 + main.tsp | 13 +-- runs/models.tsp | 2 +- tsp-output/@typespec/openapi3/openapi.yaml | 22 ++-- 37 files changed, 543 insertions(+), 725 deletions(-) delete mode 100644 .dotnet/src/Custom/Embeddings/Embeddings.cs delete mode 100644 .dotnet/src/Custom/Embeddings/Models/Embedding.Serialization.cs delete mode 100644 .dotnet/src/Custom/Embeddings/Models/Embedding.cs delete mode 100644 .dotnet/src/Custom/Embeddings/Models/EmbeddingObject.cs delete mode 100644 .dotnet/src/Custom/Embeddings/Models/GenerateEmbeddingsOptions.cs delete mode 100644 .dotnet/src/Custom/OpenAIClient.cs delete mode 100644 .dotnet/src/Custom/OpenAIModelFactory.cs rename .dotnet/src/Generated/Models/{GenerateEmbeddingsOptions.Serialization.cs => CreateEmbeddingRequest.Serialization.cs} (67%) rename .dotnet/src/Generated/Models/{GenerateEmbeddingsOptions.cs => CreateEmbeddingRequest.cs} (58%) create mode 100644 .dotnet/src/Generated/Models/CreateEmbeddingRequestEncodingFormat.cs create mode 100644 .dotnet/src/Generated/Models/CreateEmbeddingRequestModel.cs rename .dotnet/src/Generated/Models/{EmbeddingCollection.Serialization.cs => CreateEmbeddingResponse.Serialization.cs} (65%) rename .dotnet/src/Generated/Models/{EmbeddingCollection.cs => CreateEmbeddingResponse.cs} (78%) create mode 100644 .dotnet/src/Generated/Models/CreateEmbeddingResponseObject.cs delete mode 100644 .dotnet/src/Generated/Models/EmbeddingCollectionObject.cs rename .dotnet/src/Generated/Models/{EmbeddingTokenUsage.Serialization.cs => EmbeddingUsage.Serialization.cs} (67%) rename .dotnet/src/Generated/Models/{EmbeddingTokenUsage.cs => EmbeddingUsage.cs} (85%) delete mode 100644 .dotnet/src/Generated/Models/GenerateEmbeddingsOptionsEncodingFormat.cs delete mode 100644 .dotnet/src/Generated/Models/GenerateEmbeddingsOptionsModel.cs rename .dotnet/src/Generated/Models/{SubmitToolOutputsRunRequestToolOutputs.Serialization.cs => SubmitToolOutputsRunRequestToolOutput.Serialization.cs} (68%) rename .dotnet/src/Generated/Models/{SubmitToolOutputsRunRequestToolOutputs.cs => SubmitToolOutputsRunRequestToolOutput.cs} (85%) delete mode 100644 .dotnet/tests/EmbeddingsTests.cs create mode 100644 .dotnet/tests/Generated/Tests/EmbeddingsTests.cs diff --git a/.dotnet/scripts/Update-ClientModel.ps1 b/.dotnet/scripts/Update-ClientModel.ps1 index 203cdd80c..83013e82e 100644 --- a/.dotnet/scripts/Update-ClientModel.ps1 +++ b/.dotnet/scripts/Update-ClientModel.ps1 @@ -1,12 +1,18 @@ function Update-ClientModelPackage { + $current = Get-Location $root = Split-Path $PSScriptRoot -Parent - $directory = Join-Path -Path $root -ChildPath "src" - $current = Get-Location + $directory = Join-Path -Path $root -ChildPath "src" Set-Location -Path $directory dotnet remove "OpenAI.csproj" package "System.ClientModel" - dotnet add "OpenAI.csproj" package "System.ClientModel" --version "1.1.0-alpha.20240215.1" --source "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-net/nuget/v3/index.json" + dotnet add "OpenAI.csproj" package "System.ClientModel" --version "1.1.0-alpha.20240227.1" --source "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-net/nuget/v3/index.json" + + $directory = Join-Path -Path $root -ChildPath "tests" + Set-Location -Path $directory + + dotnet remove "OpenAI.Tests.csproj" package "System.ClientModel" + dotnet add "OpenAI.Tests.csproj" package "System.ClientModel" --version "1.1.0-alpha.20240227.1" --source "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-net/nuget/v3/index.json" Set-Location -Path $current } diff --git a/.dotnet/src/Custom/Embeddings/Embeddings.cs b/.dotnet/src/Custom/Embeddings/Embeddings.cs deleted file mode 100644 index 1dad5bf62..000000000 --- a/.dotnet/src/Custom/Embeddings/Embeddings.cs +++ /dev/null @@ -1,93 +0,0 @@ -using OpenAI.Models; -using System; -using System.ClientModel; -using System.ClientModel.Primitives; -using System.Collections.Generic; -using System.Linq; -using System.Threading.Tasks; - -namespace OpenAI -{ - public partial class Embeddings - { - private readonly string _model; - - /// Initializes a new instance of Embeddings. - /// The HTTP pipeline for sending and receiving REST requests and responses. - /// The key credential to copy. - /// OpenAI Endpoint. - internal Embeddings(string model, ClientPipeline pipeline, ApiKeyCredential credential, Uri endpoint) - : this(pipeline, credential, endpoint) - { - _model = model; - } - - public virtual async Task> GenerateEmbeddingAsync(string input, GenerateEmbeddingsOptions options = null) - { - options ??= new GenerateEmbeddingsOptions(); - options.Model = new GenerateEmbeddingsOptionsModel(_model); - options.Input = BinaryData.FromString(input); - ClientResult result = await CreateEmbeddingAsync(options).ConfigureAwait(false); - return ClientResult.FromValue(result.Value.Data[0], result.GetRawResponse()); - } - - public virtual ClientResult GenerateEmbedding(string input, GenerateEmbeddingsOptions options = null) - { - options ??= new GenerateEmbeddingsOptions(); - options.Model = new GenerateEmbeddingsOptionsModel(_model); - options.Input = BinaryData.FromObjectAsJson(input); - ClientResult result = CreateEmbedding(options); - return ClientResult.FromValue(result.Value.Data[0], result.GetRawResponse()); - } - - public virtual async Task> GenerateEmbeddingAsync(IEnumerable input, GenerateEmbeddingsOptions options = null) - { - options ??= new GenerateEmbeddingsOptions(); - options.Model = new GenerateEmbeddingsOptionsModel(_model); - options.Input = BinaryData.FromObjectAsJson(input.ToArray()); - ClientResult result = await CreateEmbeddingAsync(options).ConfigureAwait(false); - return ClientResult.FromValue(result.Value.Data[0], result.GetRawResponse()); - } - - public virtual ClientResult GenerateEmbedding(IEnumerable input, GenerateEmbeddingsOptions options = null) - { - options ??= new GenerateEmbeddingsOptions(); - options.Model = new GenerateEmbeddingsOptionsModel(_model); - options.Input = BinaryData.FromObjectAsJson(input.ToArray()); - ClientResult result = CreateEmbedding(options); - return ClientResult.FromValue(result.Value.Data[0], result.GetRawResponse()); - } - - public virtual async Task> GenerateEmbeddingsAsync(IEnumerable inputs, GenerateEmbeddingsOptions options = null) - { - options ??= new GenerateEmbeddingsOptions(); - options.Model = new GenerateEmbeddingsOptionsModel(_model); - options.Input = BinaryData.FromObjectAsJson(inputs.ToArray()); - return await CreateEmbeddingAsync(options).ConfigureAwait(false); - } - - public virtual ClientResult GenerateEmbeddings(IEnumerable inputs, GenerateEmbeddingsOptions options = null) - { - options ??= new GenerateEmbeddingsOptions(); - options.Model = new GenerateEmbeddingsOptionsModel(_model); - options.Input = BinaryData.FromObjectAsJson(inputs.ToArray()); - return CreateEmbedding(options); - } - - public virtual async Task> GenerateEmbeddingsAsync(IEnumerable> inputs, GenerateEmbeddingsOptions options = null) - { - options ??= new GenerateEmbeddingsOptions(); - options.Model = new GenerateEmbeddingsOptionsModel(_model); - options.Input = BinaryData.FromObjectAsJson(inputs.ToArray()); - return await CreateEmbeddingAsync(options).ConfigureAwait(false); - } - - public virtual ClientResult GenerateEmbeddings(IEnumerable> inputs, GenerateEmbeddingsOptions options = null) - { - options ??= new GenerateEmbeddingsOptions(); - options.Model = new GenerateEmbeddingsOptionsModel(_model); - options.Input = BinaryData.FromObjectAsJson(inputs.ToArray()); - return CreateEmbedding(options); - } - } -} diff --git a/.dotnet/src/Custom/Embeddings/Models/Embedding.Serialization.cs b/.dotnet/src/Custom/Embeddings/Models/Embedding.Serialization.cs deleted file mode 100644 index 602547f22..000000000 --- a/.dotnet/src/Custom/Embeddings/Models/Embedding.Serialization.cs +++ /dev/null @@ -1,67 +0,0 @@ -using System; -using System.ClientModel.Primitives; -using System.Collections.Generic; -using System.Text.Json; - -namespace OpenAI.Models -{ - public partial class Embedding - { - internal static Embedding DeserializeEmbedding(JsonElement element, ModelReaderWriterOptions options = null) - { - options ??= new ModelReaderWriterOptions("W"); - - if (element.ValueKind == JsonValueKind.Null) - { - return null; - } - long index = default; - BinaryData embedding = default; - EmbeddingObject @object = default; - IDictionary serializedAdditionalRawData = default; - Dictionary additionalPropertiesDictionary = new Dictionary(); - foreach (var property in element.EnumerateObject()) - { - if (property.NameEquals("index"u8)) - { - index = property.Value.GetInt64(); - continue; - } - if (property.NameEquals("embedding"u8)) - { - embedding = BinaryData.FromString(property.Value.GetRawText()); - continue; - } - if (property.NameEquals("object"u8)) - { - @object = new EmbeddingObject(property.Value.GetString()); - continue; - } - if (options.Format != "W") - { - additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); - } - } - - ReadOnlyMemory? embeddingAsFloats = default; - BinaryData embeddingAsBase64Data = default; - JsonDocument doc = JsonDocument.Parse(embedding); - - if (doc.RootElement.ValueKind == JsonValueKind.Array) - { - List floats = new(); - foreach (var item in doc.RootElement.EnumerateArray()) - { - floats.Add(item.GetSingle()); - } - embeddingAsFloats = new ReadOnlyMemory(floats.ToArray()); - } - else if (doc.RootElement.ValueKind == JsonValueKind.String) - { - embeddingAsBase64Data = embedding; - } - - return new(index, embedding, @object, serializedAdditionalRawData, embeddingAsFloats, embeddingAsBase64Data); - } - } -} diff --git a/.dotnet/src/Custom/Embeddings/Models/Embedding.cs b/.dotnet/src/Custom/Embeddings/Models/Embedding.cs deleted file mode 100644 index 0cf994146..000000000 --- a/.dotnet/src/Custom/Embeddings/Models/Embedding.cs +++ /dev/null @@ -1,68 +0,0 @@ -using System; -using System.Collections.Generic; - -#nullable disable - -namespace OpenAI.Models -{ - public partial class Embedding - { - /// - /// The embedding vector, which is a list of floats. The length of vector depends on the model as - /// listed in the [embedding guide](/docs/guides/embeddings). - /// - /// To assign an object to this property use . - /// - /// - /// To assign an already formatted json string to this property use . - /// - /// - /// - /// Supported types: - /// - /// - /// where T is of type - /// - /// - /// - /// - /// - /// - /// Examples: - /// - /// - /// BinaryData.FromObjectAsJson("foo") - /// Creates a payload of "foo". - /// - /// - /// BinaryData.FromString("\"foo\"") - /// Creates a payload of "foo". - /// - /// - /// BinaryData.FromObjectAsJson(new { key = "value" }) - /// Creates a payload of { "key": "value" }. - /// - /// - /// BinaryData.FromString("{\"key\": \"value\"}") - /// Creates a payload of { "key": "value" }. - /// - /// - /// - /// - internal BinaryData EmbeddingProperty { get; } - /// The object type, which is always "embedding". - internal EmbeddingObject Object { get; } = EmbeddingObject.Embedding; - - internal Embedding(long index, BinaryData embeddingProperty, EmbeddingObject @object, IDictionary serializedAdditionalRawData, ReadOnlyMemory? embeddingAsFloats, BinaryData embeddingAsBase64Data) - : this(index, embeddingProperty, @object, serializedAdditionalRawData) - { - EmbeddingAsFloats = embeddingAsFloats; - EmbeddingAsBase64Data = embeddingAsBase64Data; - } - - /// The embedding represented as a vector of floats. - public ReadOnlyMemory? EmbeddingAsFloats { get; } - /// The embedding represented as a Base64-encoded string. - public BinaryData EmbeddingAsBase64Data { get; } - } -} diff --git a/.dotnet/src/Custom/Embeddings/Models/EmbeddingObject.cs b/.dotnet/src/Custom/Embeddings/Models/EmbeddingObject.cs deleted file mode 100644 index a2bd2f70e..000000000 --- a/.dotnet/src/Custom/Embeddings/Models/EmbeddingObject.cs +++ /dev/null @@ -1,10 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Text; - -namespace OpenAI.Models -{ - internal readonly partial struct EmbeddingObject - { - } -} diff --git a/.dotnet/src/Custom/Embeddings/Models/GenerateEmbeddingsOptions.cs b/.dotnet/src/Custom/Embeddings/Models/GenerateEmbeddingsOptions.cs deleted file mode 100644 index 64500d31a..000000000 --- a/.dotnet/src/Custom/Embeddings/Models/GenerateEmbeddingsOptions.cs +++ /dev/null @@ -1,13 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Text; - -namespace OpenAI.Models -{ - public partial class GenerateEmbeddingsOptions - { - internal BinaryData Input { get; set; } - - internal GenerateEmbeddingsOptionsModel Model { get; set; } - } -} diff --git a/.dotnet/src/Custom/OpenAIClient.cs b/.dotnet/src/Custom/OpenAIClient.cs deleted file mode 100644 index 2c7ed2700..000000000 --- a/.dotnet/src/Custom/OpenAIClient.cs +++ /dev/null @@ -1,22 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Text; -using System.Threading; - -namespace OpenAI -{ - public partial class OpenAIClient - { - // TODO: This needs to be suppressed. - internal virtual Embeddings GetEmbeddingsClient() - { - return Volatile.Read(ref _cachedEmbeddings) ?? Interlocked.CompareExchange(ref _cachedEmbeddings, new Embeddings(_pipeline, _credential, _endpoint), null) ?? _cachedEmbeddings; - } - - /// Initializes a new instance of Embeddings. - public virtual Embeddings GetEmbeddingsClient(string model) - { - return Volatile.Read(ref _cachedEmbeddings) ?? Interlocked.CompareExchange(ref _cachedEmbeddings, new Embeddings(model, _pipeline, _credential, _endpoint), null) ?? _cachedEmbeddings; - } - } -} diff --git a/.dotnet/src/Custom/OpenAIModelFactory.cs b/.dotnet/src/Custom/OpenAIModelFactory.cs deleted file mode 100644 index 895344070..000000000 --- a/.dotnet/src/Custom/OpenAIModelFactory.cs +++ /dev/null @@ -1,36 +0,0 @@ -using OpenAI.Models; -using System; - -namespace OpenAI -{ - /// Model factory for models. - public static partial class OpenAIModelFactory - { - /// Initializes a new instance of . - /// The index of the embedding in the list of embeddings. - /// - /// The embedding vector, which is a list of floats. The length of vector depends on the model as - /// listed in the [embedding guide](/docs/guides/embeddings). - /// - /// The object type, which is always "embedding". - /// A new instance for mocking. - public static Embedding Embedding(long index = default, ReadOnlyMemory? embeddingAsFloats = null) - { - // TODO: We need to populate the embedding property from the embeddingAsFloats parameter. - return new Embedding(index, embeddingProperty: null, EmbeddingObject.Embedding, serializedAdditionalRawData: null, embeddingAsFloats, embeddingAsBase64Data: null); - } - - /// Initializes a new instance of . - /// The index of the embedding in the list of embeddings. - /// - /// The embedding vector, which is a list of floats. The length of vector depends on the model as - /// listed in the [embedding guide](/docs/guides/embeddings). - /// - /// The object type, which is always "embedding". - /// A new instance for mocking. - public static Embedding Embedding(long index = default, BinaryData embeddingAsBase64Data = default) - { - return new Embedding(index, embeddingAsBase64Data, EmbeddingObject.Embedding, serializedAdditionalRawData: null, embeddingAsFloats: null, embeddingAsBase64Data); - } - } -} diff --git a/.dotnet/src/Generated/Embeddings.cs b/.dotnet/src/Generated/Embeddings.cs index 0788abad9..8056ebd08 100644 --- a/.dotnet/src/Generated/Embeddings.cs +++ b/.dotnet/src/Generated/Embeddings.cs @@ -40,27 +40,27 @@ internal Embeddings(ClientPipeline pipeline, ApiKeyCredential credential, Uri en } /// Creates an embedding vector representing the input text. - /// The to use. + /// The to use. /// is null. - internal virtual async Task> CreateEmbeddingAsync(GenerateEmbeddingsOptions embedding) + public virtual async Task> CreateEmbeddingAsync(CreateEmbeddingRequest embedding) { if (embedding is null) throw new ArgumentNullException(nameof(embedding)); using BinaryContent content = BinaryContent.Create(embedding); ClientResult result = await CreateEmbeddingAsync(content, DefaultRequestContext).ConfigureAwait(false); - return ClientResult.FromValue(EmbeddingCollection.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + return ClientResult.FromValue(CreateEmbeddingResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// Creates an embedding vector representing the input text. - /// The to use. + /// The to use. /// is null. - internal virtual ClientResult CreateEmbedding(GenerateEmbeddingsOptions embedding) + public virtual ClientResult CreateEmbedding(CreateEmbeddingRequest embedding) { if (embedding is null) throw new ArgumentNullException(nameof(embedding)); using BinaryContent content = BinaryContent.Create(embedding); ClientResult result = CreateEmbedding(content, DefaultRequestContext); - return ClientResult.FromValue(EmbeddingCollection.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + return ClientResult.FromValue(CreateEmbeddingResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } /// @@ -73,7 +73,7 @@ internal virtual ClientResult CreateEmbedding(GenerateEmbed /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -83,7 +83,7 @@ internal virtual ClientResult CreateEmbedding(GenerateEmbed /// is null. /// Service returned a non-success status code. /// The response returned from the service. - internal virtual async Task CreateEmbeddingAsync(BinaryContent content, RequestOptions options = null) + public virtual async Task CreateEmbeddingAsync(BinaryContent content, RequestOptions options = null) { if (content is null) throw new ArgumentNullException(nameof(content)); options ??= new RequestOptions(); @@ -109,7 +109,7 @@ internal virtual async Task CreateEmbeddingAsync(BinaryContent con /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -119,7 +119,7 @@ internal virtual async Task CreateEmbeddingAsync(BinaryContent con /// is null. /// Service returned a non-success status code. /// The response returned from the service. - internal virtual ClientResult CreateEmbedding(BinaryContent content, RequestOptions options = null) + public virtual ClientResult CreateEmbedding(BinaryContent content, RequestOptions options = null) { if (content is null) throw new ArgumentNullException(nameof(content)); options ??= new RequestOptions(); diff --git a/.dotnet/src/Generated/Models/GenerateEmbeddingsOptions.Serialization.cs b/.dotnet/src/Generated/Models/CreateEmbeddingRequest.Serialization.cs similarity index 67% rename from .dotnet/src/Generated/Models/GenerateEmbeddingsOptions.Serialization.cs rename to .dotnet/src/Generated/Models/CreateEmbeddingRequest.Serialization.cs index 5d6ec0838..7a8ff235b 100644 --- a/.dotnet/src/Generated/Models/GenerateEmbeddingsOptions.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateEmbeddingRequest.Serialization.cs @@ -8,14 +8,14 @@ namespace OpenAI.Models { - public partial class GenerateEmbeddingsOptions : IJsonModel + public partial class CreateEmbeddingRequest : IJsonModel { - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(GenerateEmbeddingsOptions)} does not support '{format}' format."); + throw new FormatException($"The model {nameof(CreateEmbeddingRequest)} does not support '{format}' format."); } writer.WriteStartObject(); @@ -63,19 +63,19 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelRea writer.WriteEndObject(); } - GenerateEmbeddingsOptions IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + CreateEmbeddingRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(GenerateEmbeddingsOptions)} does not support '{format}' format."); + throw new FormatException($"The model {nameof(CreateEmbeddingRequest)} does not support '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeGenerateEmbeddingsOptions(document.RootElement, options); + return DeserializeCreateEmbeddingRequest(document.RootElement, options); } - internal static GenerateEmbeddingsOptions DeserializeGenerateEmbeddingsOptions(JsonElement element, ModelReaderWriterOptions options = null) + internal static CreateEmbeddingRequest DeserializeCreateEmbeddingRequest(JsonElement element, ModelReaderWriterOptions options = null) { options ??= new ModelReaderWriterOptions("W"); @@ -84,8 +84,8 @@ internal static GenerateEmbeddingsOptions DeserializeGenerateEmbeddingsOptions(J return null; } BinaryData input = default; - GenerateEmbeddingsOptionsModel model = default; - OptionalProperty encodingFormat = default; + CreateEmbeddingRequestModel model = default; + OptionalProperty encodingFormat = default; OptionalProperty dimensions = default; OptionalProperty user = default; IDictionary serializedAdditionalRawData = default; @@ -99,7 +99,7 @@ internal static GenerateEmbeddingsOptions DeserializeGenerateEmbeddingsOptions(J } if (property.NameEquals("model"u8)) { - model = new GenerateEmbeddingsOptionsModel(property.Value.GetString()); + model = new CreateEmbeddingRequestModel(property.Value.GetString()); continue; } if (property.NameEquals("encoding_format"u8)) @@ -108,7 +108,7 @@ internal static GenerateEmbeddingsOptions DeserializeGenerateEmbeddingsOptions(J { continue; } - encodingFormat = new GenerateEmbeddingsOptionsEncodingFormat(property.Value.GetString()); + encodingFormat = new CreateEmbeddingRequestEncodingFormat(property.Value.GetString()); continue; } if (property.NameEquals("dimensions"u8)) @@ -131,46 +131,46 @@ internal static GenerateEmbeddingsOptions DeserializeGenerateEmbeddingsOptions(J } } serializedAdditionalRawData = additionalPropertiesDictionary; - return new GenerateEmbeddingsOptions(input, model, OptionalProperty.ToNullable(encodingFormat), OptionalProperty.ToNullable(dimensions), user.Value, serializedAdditionalRawData); + return new CreateEmbeddingRequest(input, model, OptionalProperty.ToNullable(encodingFormat), OptionalProperty.ToNullable(dimensions), user.Value, serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options); default: - throw new FormatException($"The model {nameof(GenerateEmbeddingsOptions)} does not support '{options.Format}' format."); + throw new FormatException($"The model {nameof(CreateEmbeddingRequest)} does not support '{options.Format}' format."); } } - GenerateEmbeddingsOptions IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + CreateEmbeddingRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data); - return DeserializeGenerateEmbeddingsOptions(document.RootElement, options); + return DeserializeCreateEmbeddingRequest(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(GenerateEmbeddingsOptions)} does not support '{options.Format}' format."); + throw new FormatException($"The model {nameof(CreateEmbeddingRequest)} does not support '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The result to deserialize the model from. - internal static GenerateEmbeddingsOptions FromResponse(PipelineResponse response) + internal static CreateEmbeddingRequest FromResponse(PipelineResponse response) { using var document = JsonDocument.Parse(response.Content); - return DeserializeGenerateEmbeddingsOptions(document.RootElement); + return DeserializeCreateEmbeddingRequest(document.RootElement); } } } diff --git a/.dotnet/src/Generated/Models/GenerateEmbeddingsOptions.cs b/.dotnet/src/Generated/Models/CreateEmbeddingRequest.cs similarity index 58% rename from .dotnet/src/Generated/Models/GenerateEmbeddingsOptions.cs rename to .dotnet/src/Generated/Models/CreateEmbeddingRequest.cs index d8a90823c..c2a286d4c 100644 --- a/.dotnet/src/Generated/Models/GenerateEmbeddingsOptions.cs +++ b/.dotnet/src/Generated/Models/CreateEmbeddingRequest.cs @@ -6,8 +6,8 @@ namespace OpenAI.Models { - /// The GenerateEmbeddingsOptions. - public partial class GenerateEmbeddingsOptions + /// The CreateEmbeddingRequest. + public partial class CreateEmbeddingRequest { /// /// Keeps track of any properties unknown to the library. @@ -41,7 +41,7 @@ public partial class GenerateEmbeddingsOptions /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . + /// Initializes a new instance of . /// /// Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a /// single request, pass an array of strings or array of token arrays. Each input must not exceed @@ -56,7 +56,7 @@ public partial class GenerateEmbeddingsOptions /// descriptions of them. /// /// is null. - public GenerateEmbeddingsOptions(BinaryData input, GenerateEmbeddingsOptionsModel model) + public CreateEmbeddingRequest(BinaryData input, CreateEmbeddingRequestModel model) { if (input is null) throw new ArgumentNullException(nameof(input)); @@ -64,7 +64,7 @@ public GenerateEmbeddingsOptions(BinaryData input, GenerateEmbeddingsOptionsMode Model = model; } - /// Initializes a new instance of . + /// Initializes a new instance of . /// /// Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a /// single request, pass an array of strings or array of token arrays. Each input must not exceed @@ -91,7 +91,7 @@ public GenerateEmbeddingsOptions(BinaryData input, GenerateEmbeddingsOptionsMode /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). /// /// Keeps track of any properties unknown to the library. - internal GenerateEmbeddingsOptions(BinaryData input, GenerateEmbeddingsOptionsModel model, GenerateEmbeddingsOptionsEncodingFormat? encodingFormat, long? dimensions, string user, IDictionary serializedAdditionalRawData) + internal CreateEmbeddingRequest(BinaryData input, CreateEmbeddingRequestModel model, CreateEmbeddingRequestEncodingFormat? encodingFormat, long? dimensions, string user, IDictionary serializedAdditionalRawData) { Input = input; Model = model; @@ -101,15 +101,75 @@ internal GenerateEmbeddingsOptions(BinaryData input, GenerateEmbeddingsOptionsMo _serializedAdditionalRawData = serializedAdditionalRawData; } - /// Initializes a new instance of for deserialization. - internal GenerateEmbeddingsOptions() + /// Initializes a new instance of for deserialization. + internal CreateEmbeddingRequest() { } + + /// + /// Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a + /// single request, pass an array of strings or array of token arrays. Each input must not exceed + /// the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an + /// empty string. + /// [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + /// for counting tokens. + /// + /// To assign an object to this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// + /// Supported types: + /// + /// + /// + /// + /// + /// where T is of type + /// + /// + /// where T is of type + /// + /// + /// where T is of type IList{long} + /// + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public BinaryData Input { get; } + /// + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + /// see all of your available models, or see our [Model overview](/docs/models/overview) for + /// descriptions of them. + /// + public CreateEmbeddingRequestModel Model { get; } /// /// The format to return the embeddings in. Can be either `float` or /// [`base64`](https://pypi.org/project/pybase64/). /// - public GenerateEmbeddingsOptionsEncodingFormat? EncodingFormat { get; set; } + public CreateEmbeddingRequestEncodingFormat? EncodingFormat { get; set; } /// /// The number of dimensions the resulting output embeddings should have. Only supported in /// `text-embedding-3` and later models. diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingRequestEncodingFormat.cs b/.dotnet/src/Generated/Models/CreateEmbeddingRequestEncodingFormat.cs new file mode 100644 index 000000000..e252744e4 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateEmbeddingRequestEncodingFormat.cs @@ -0,0 +1,47 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for encoding_format in CreateEmbeddingRequest. + public readonly partial struct CreateEmbeddingRequestEncodingFormat : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateEmbeddingRequestEncodingFormat(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string FloatValue = "float"; + private const string Base64Value = "base64"; + + /// float. + public static CreateEmbeddingRequestEncodingFormat Float { get; } = new CreateEmbeddingRequestEncodingFormat(FloatValue); + /// base64. + public static CreateEmbeddingRequestEncodingFormat Base64 { get; } = new CreateEmbeddingRequestEncodingFormat(Base64Value); + /// Determines if two values are the same. + public static bool operator ==(CreateEmbeddingRequestEncodingFormat left, CreateEmbeddingRequestEncodingFormat right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateEmbeddingRequestEncodingFormat left, CreateEmbeddingRequestEncodingFormat right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateEmbeddingRequestEncodingFormat(string value) => new CreateEmbeddingRequestEncodingFormat(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateEmbeddingRequestEncodingFormat other && Equals(other); + /// + public bool Equals(CreateEmbeddingRequestEncodingFormat other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} + diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingRequestModel.cs b/.dotnet/src/Generated/Models/CreateEmbeddingRequestModel.cs new file mode 100644 index 000000000..628de9b7b --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateEmbeddingRequestModel.cs @@ -0,0 +1,50 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// Enum for model in CreateEmbeddingRequest. + public readonly partial struct CreateEmbeddingRequestModel : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateEmbeddingRequestModel(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string TextEmbeddingAda002Value = "text-embedding-ada-002"; + private const string TextEmbedding3SmallValue = "text-embedding-3-small"; + private const string TextEmbedding3LargeValue = "text-embedding-3-large"; + + /// text-embedding-ada-002. + public static CreateEmbeddingRequestModel TextEmbeddingAda002 { get; } = new CreateEmbeddingRequestModel(TextEmbeddingAda002Value); + /// text-embedding-3-small. + public static CreateEmbeddingRequestModel TextEmbedding3Small { get; } = new CreateEmbeddingRequestModel(TextEmbedding3SmallValue); + /// text-embedding-3-large. + public static CreateEmbeddingRequestModel TextEmbedding3Large { get; } = new CreateEmbeddingRequestModel(TextEmbedding3LargeValue); + /// Determines if two values are the same. + public static bool operator ==(CreateEmbeddingRequestModel left, CreateEmbeddingRequestModel right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateEmbeddingRequestModel left, CreateEmbeddingRequestModel right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateEmbeddingRequestModel(string value) => new CreateEmbeddingRequestModel(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateEmbeddingRequestModel other && Equals(other); + /// + public bool Equals(CreateEmbeddingRequestModel other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} + diff --git a/.dotnet/src/Generated/Models/EmbeddingCollection.Serialization.cs b/.dotnet/src/Generated/Models/CreateEmbeddingResponse.Serialization.cs similarity index 65% rename from .dotnet/src/Generated/Models/EmbeddingCollection.Serialization.cs rename to .dotnet/src/Generated/Models/CreateEmbeddingResponse.Serialization.cs index c4a8aa240..deaf48c02 100644 --- a/.dotnet/src/Generated/Models/EmbeddingCollection.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateEmbeddingResponse.Serialization.cs @@ -8,14 +8,14 @@ namespace OpenAI.Models { - public partial class EmbeddingCollection : IJsonModel + public partial class CreateEmbeddingResponse : IJsonModel { - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(EmbeddingCollection)} does not support '{format}' format."); + throw new FormatException($"The model {nameof(CreateEmbeddingResponse)} does not support '{format}' format."); } writer.WriteStartObject(); @@ -50,19 +50,19 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWri writer.WriteEndObject(); } - EmbeddingCollection IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + CreateEmbeddingResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(EmbeddingCollection)} does not support '{format}' format."); + throw new FormatException($"The model {nameof(CreateEmbeddingResponse)} does not support '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeEmbeddingCollection(document.RootElement, options); + return DeserializeCreateEmbeddingResponse(document.RootElement, options); } - internal static EmbeddingCollection DeserializeEmbeddingCollection(JsonElement element, ModelReaderWriterOptions options = null) + internal static CreateEmbeddingResponse DeserializeCreateEmbeddingResponse(JsonElement element, ModelReaderWriterOptions options = null) { options ??= new ModelReaderWriterOptions("W"); @@ -72,8 +72,8 @@ internal static EmbeddingCollection DeserializeEmbeddingCollection(JsonElement e } IReadOnlyList data = default; string model = default; - EmbeddingCollectionObject @object = default; - EmbeddingTokenUsage usage = default; + CreateEmbeddingResponseObject @object = default; + EmbeddingUsage usage = default; IDictionary serializedAdditionalRawData = default; Dictionary additionalPropertiesDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) @@ -95,12 +95,12 @@ internal static EmbeddingCollection DeserializeEmbeddingCollection(JsonElement e } if (property.NameEquals("object"u8)) { - @object = new EmbeddingCollectionObject(property.Value.GetString()); + @object = new CreateEmbeddingResponseObject(property.Value.GetString()); continue; } if (property.NameEquals("usage"u8)) { - usage = EmbeddingTokenUsage.DeserializeEmbeddingTokenUsage(property.Value); + usage = EmbeddingUsage.DeserializeEmbeddingUsage(property.Value); continue; } if (options.Format != "W") @@ -109,46 +109,46 @@ internal static EmbeddingCollection DeserializeEmbeddingCollection(JsonElement e } } serializedAdditionalRawData = additionalPropertiesDictionary; - return new EmbeddingCollection(data, model, @object, usage, serializedAdditionalRawData); + return new CreateEmbeddingResponse(data, model, @object, usage, serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options); default: - throw new FormatException($"The model {nameof(EmbeddingCollection)} does not support '{options.Format}' format."); + throw new FormatException($"The model {nameof(CreateEmbeddingResponse)} does not support '{options.Format}' format."); } } - EmbeddingCollection IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + CreateEmbeddingResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data); - return DeserializeEmbeddingCollection(document.RootElement, options); + return DeserializeCreateEmbeddingResponse(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(EmbeddingCollection)} does not support '{options.Format}' format."); + throw new FormatException($"The model {nameof(CreateEmbeddingResponse)} does not support '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The result to deserialize the model from. - internal static EmbeddingCollection FromResponse(PipelineResponse response) + internal static CreateEmbeddingResponse FromResponse(PipelineResponse response) { using var document = JsonDocument.Parse(response.Content); - return DeserializeEmbeddingCollection(document.RootElement); + return DeserializeCreateEmbeddingResponse(document.RootElement); } } } diff --git a/.dotnet/src/Generated/Models/EmbeddingCollection.cs b/.dotnet/src/Generated/Models/CreateEmbeddingResponse.cs similarity index 78% rename from .dotnet/src/Generated/Models/EmbeddingCollection.cs rename to .dotnet/src/Generated/Models/CreateEmbeddingResponse.cs index d7dd3ec73..f0695c29a 100644 --- a/.dotnet/src/Generated/Models/EmbeddingCollection.cs +++ b/.dotnet/src/Generated/Models/CreateEmbeddingResponse.cs @@ -7,8 +7,8 @@ namespace OpenAI.Models { - /// The EmbeddingCollection. - public partial class EmbeddingCollection + /// The CreateEmbeddingResponse. + public partial class CreateEmbeddingResponse { /// /// Keeps track of any properties unknown to the library. @@ -42,12 +42,12 @@ public partial class EmbeddingCollection /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . + /// Initializes a new instance of . /// The list of embeddings generated by the model. /// The name of the model used to generate the embedding. /// The usage information for the request. /// , or is null. - internal EmbeddingCollection(IEnumerable data, string model, EmbeddingTokenUsage usage) + internal CreateEmbeddingResponse(IEnumerable data, string model, EmbeddingUsage usage) { if (data is null) throw new ArgumentNullException(nameof(data)); if (model is null) throw new ArgumentNullException(nameof(model)); @@ -58,13 +58,13 @@ internal EmbeddingCollection(IEnumerable data, string model, Embeddin Usage = usage; } - /// Initializes a new instance of . + /// Initializes a new instance of . /// The list of embeddings generated by the model. /// The name of the model used to generate the embedding. /// The object type, which is always "list". /// The usage information for the request. /// Keeps track of any properties unknown to the library. - internal EmbeddingCollection(IReadOnlyList data, string model, EmbeddingCollectionObject @object, EmbeddingTokenUsage usage, IDictionary serializedAdditionalRawData) + internal CreateEmbeddingResponse(IReadOnlyList data, string model, CreateEmbeddingResponseObject @object, EmbeddingUsage usage, IDictionary serializedAdditionalRawData) { Data = data; Model = model; @@ -73,8 +73,8 @@ internal EmbeddingCollection(IReadOnlyList data, string model, Embedd _serializedAdditionalRawData = serializedAdditionalRawData; } - /// Initializes a new instance of for deserialization. - internal EmbeddingCollection() + /// Initializes a new instance of for deserialization. + internal CreateEmbeddingResponse() { } @@ -83,10 +83,10 @@ internal EmbeddingCollection() /// The name of the model used to generate the embedding. public string Model { get; } /// The object type, which is always "list". - public EmbeddingCollectionObject Object { get; } = EmbeddingCollectionObject.List; + public CreateEmbeddingResponseObject Object { get; } = CreateEmbeddingResponseObject.List; /// The usage information for the request. - public EmbeddingTokenUsage Usage { get; } + public EmbeddingUsage Usage { get; } } } diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingResponseObject.cs b/.dotnet/src/Generated/Models/CreateEmbeddingResponseObject.cs new file mode 100644 index 000000000..b98b10317 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateEmbeddingResponseObject.cs @@ -0,0 +1,44 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Models +{ + /// The CreateEmbeddingResponse_object. + public readonly partial struct CreateEmbeddingResponseObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateEmbeddingResponseObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ListValue = "list"; + + /// list. + public static CreateEmbeddingResponseObject List { get; } = new CreateEmbeddingResponseObject(ListValue); + /// Determines if two values are the same. + public static bool operator ==(CreateEmbeddingResponseObject left, CreateEmbeddingResponseObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateEmbeddingResponseObject left, CreateEmbeddingResponseObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateEmbeddingResponseObject(string value) => new CreateEmbeddingResponseObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateEmbeddingResponseObject other && Equals(other); + /// + public bool Equals(CreateEmbeddingResponseObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} + diff --git a/.dotnet/src/Generated/Models/Embedding.Serialization.cs b/.dotnet/src/Generated/Models/Embedding.Serialization.cs index 0f9790a19..b3506e51c 100644 --- a/.dotnet/src/Generated/Models/Embedding.Serialization.cs +++ b/.dotnet/src/Generated/Models/Embedding.Serialization.cs @@ -3,6 +3,7 @@ using System; using OpenAI.ClientShared.Internal; using System.ClientModel.Primitives; +using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Models @@ -61,6 +62,45 @@ Embedding IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWri return DeserializeEmbedding(document.RootElement, options); } + internal static Embedding DeserializeEmbedding(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + long index = default; + BinaryData embedding = default; + EmbeddingObject @object = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("index"u8)) + { + index = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("embedding"u8)) + { + embedding = BinaryData.FromString(property.Value.GetRawText()); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new EmbeddingObject(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new Embedding(index, embedding, @object, serializedAdditionalRawData); + } + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; diff --git a/.dotnet/src/Generated/Models/Embedding.cs b/.dotnet/src/Generated/Models/Embedding.cs index 9a3e895f8..ecabc7df9 100644 --- a/.dotnet/src/Generated/Models/Embedding.cs +++ b/.dotnet/src/Generated/Models/Embedding.cs @@ -79,6 +79,51 @@ internal Embedding() /// The index of the embedding in the list of embeddings. public long Index { get; } + /// + /// The embedding vector, which is a list of floats. The length of vector depends on the model as + /// listed in the [embedding guide](/docs/guides/embeddings). + /// + /// To assign an object to this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// + /// Supported types: + /// + /// + /// where T is of type + /// + /// + /// + /// + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public BinaryData EmbeddingProperty { get; } + /// The object type, which is always "embedding". + public EmbeddingObject Object { get; } = EmbeddingObject.Embedding; } } diff --git a/.dotnet/src/Generated/Models/EmbeddingCollectionObject.cs b/.dotnet/src/Generated/Models/EmbeddingCollectionObject.cs deleted file mode 100644 index b045cf349..000000000 --- a/.dotnet/src/Generated/Models/EmbeddingCollectionObject.cs +++ /dev/null @@ -1,44 +0,0 @@ -// - -using System; -using System.ComponentModel; - -namespace OpenAI.Models -{ - /// The EmbeddingCollection_object. - public readonly partial struct EmbeddingCollectionObject : IEquatable - { - private readonly string _value; - - /// Initializes a new instance of . - /// is null. - public EmbeddingCollectionObject(string value) - { - _value = value ?? throw new ArgumentNullException(nameof(value)); - } - - private const string ListValue = "list"; - - /// list. - public static EmbeddingCollectionObject List { get; } = new EmbeddingCollectionObject(ListValue); - /// Determines if two values are the same. - public static bool operator ==(EmbeddingCollectionObject left, EmbeddingCollectionObject right) => left.Equals(right); - /// Determines if two values are not the same. - public static bool operator !=(EmbeddingCollectionObject left, EmbeddingCollectionObject right) => !left.Equals(right); - /// Converts a string to a . - public static implicit operator EmbeddingCollectionObject(string value) => new EmbeddingCollectionObject(value); - - /// - [EditorBrowsable(EditorBrowsableState.Never)] - public override bool Equals(object obj) => obj is EmbeddingCollectionObject other && Equals(other); - /// - public bool Equals(EmbeddingCollectionObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); - - /// - [EditorBrowsable(EditorBrowsableState.Never)] - public override int GetHashCode() => _value?.GetHashCode() ?? 0; - /// - public override string ToString() => _value; - } -} - diff --git a/.dotnet/src/Generated/Models/EmbeddingObject.cs b/.dotnet/src/Generated/Models/EmbeddingObject.cs index c6088b162..2cc2f012c 100644 --- a/.dotnet/src/Generated/Models/EmbeddingObject.cs +++ b/.dotnet/src/Generated/Models/EmbeddingObject.cs @@ -6,7 +6,7 @@ namespace OpenAI.Models { /// The Embedding_object. - internal readonly partial struct EmbeddingObject : IEquatable + public readonly partial struct EmbeddingObject : IEquatable { private readonly string _value; diff --git a/.dotnet/src/Generated/Models/EmbeddingTokenUsage.Serialization.cs b/.dotnet/src/Generated/Models/EmbeddingUsage.Serialization.cs similarity index 67% rename from .dotnet/src/Generated/Models/EmbeddingTokenUsage.Serialization.cs rename to .dotnet/src/Generated/Models/EmbeddingUsage.Serialization.cs index 8026af740..23beb37ad 100644 --- a/.dotnet/src/Generated/Models/EmbeddingTokenUsage.Serialization.cs +++ b/.dotnet/src/Generated/Models/EmbeddingUsage.Serialization.cs @@ -8,14 +8,14 @@ namespace OpenAI.Models { - public partial class EmbeddingTokenUsage : IJsonModel + public partial class EmbeddingUsage : IJsonModel { - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(EmbeddingTokenUsage)} does not support '{format}' format."); + throw new FormatException($"The model {nameof(EmbeddingUsage)} does not support '{format}' format."); } writer.WriteStartObject(); @@ -41,19 +41,19 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWri writer.WriteEndObject(); } - EmbeddingTokenUsage IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + EmbeddingUsage IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(EmbeddingTokenUsage)} does not support '{format}' format."); + throw new FormatException($"The model {nameof(EmbeddingUsage)} does not support '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeEmbeddingTokenUsage(document.RootElement, options); + return DeserializeEmbeddingUsage(document.RootElement, options); } - internal static EmbeddingTokenUsage DeserializeEmbeddingTokenUsage(JsonElement element, ModelReaderWriterOptions options = null) + internal static EmbeddingUsage DeserializeEmbeddingUsage(JsonElement element, ModelReaderWriterOptions options = null) { options ??= new ModelReaderWriterOptions("W"); @@ -83,46 +83,46 @@ internal static EmbeddingTokenUsage DeserializeEmbeddingTokenUsage(JsonElement e } } serializedAdditionalRawData = additionalPropertiesDictionary; - return new EmbeddingTokenUsage(promptTokens, totalTokens, serializedAdditionalRawData); + return new EmbeddingUsage(promptTokens, totalTokens, serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options); default: - throw new FormatException($"The model {nameof(EmbeddingTokenUsage)} does not support '{options.Format}' format."); + throw new FormatException($"The model {nameof(EmbeddingUsage)} does not support '{options.Format}' format."); } } - EmbeddingTokenUsage IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + EmbeddingUsage IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data); - return DeserializeEmbeddingTokenUsage(document.RootElement, options); + return DeserializeEmbeddingUsage(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(EmbeddingTokenUsage)} does not support '{options.Format}' format."); + throw new FormatException($"The model {nameof(EmbeddingUsage)} does not support '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The result to deserialize the model from. - internal static EmbeddingTokenUsage FromResponse(PipelineResponse response) + internal static EmbeddingUsage FromResponse(PipelineResponse response) { using var document = JsonDocument.Parse(response.Content); - return DeserializeEmbeddingTokenUsage(document.RootElement); + return DeserializeEmbeddingUsage(document.RootElement); } } } diff --git a/.dotnet/src/Generated/Models/EmbeddingTokenUsage.cs b/.dotnet/src/Generated/Models/EmbeddingUsage.cs similarity index 85% rename from .dotnet/src/Generated/Models/EmbeddingTokenUsage.cs rename to .dotnet/src/Generated/Models/EmbeddingUsage.cs index e235e4d4b..6c03d3210 100644 --- a/.dotnet/src/Generated/Models/EmbeddingTokenUsage.cs +++ b/.dotnet/src/Generated/Models/EmbeddingUsage.cs @@ -5,8 +5,8 @@ namespace OpenAI.Models { - /// The EmbeddingTokenUsage. - public partial class EmbeddingTokenUsage + /// The EmbeddingUsage. + public partial class EmbeddingUsage { /// /// Keeps track of any properties unknown to the library. @@ -40,28 +40,28 @@ public partial class EmbeddingTokenUsage /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . + /// Initializes a new instance of . /// The number of tokens used by the prompt. /// The total number of tokens used by the request. - internal EmbeddingTokenUsage(long promptTokens, long totalTokens) + internal EmbeddingUsage(long promptTokens, long totalTokens) { PromptTokens = promptTokens; TotalTokens = totalTokens; } - /// Initializes a new instance of . + /// Initializes a new instance of . /// The number of tokens used by the prompt. /// The total number of tokens used by the request. /// Keeps track of any properties unknown to the library. - internal EmbeddingTokenUsage(long promptTokens, long totalTokens, IDictionary serializedAdditionalRawData) + internal EmbeddingUsage(long promptTokens, long totalTokens, IDictionary serializedAdditionalRawData) { PromptTokens = promptTokens; TotalTokens = totalTokens; _serializedAdditionalRawData = serializedAdditionalRawData; } - /// Initializes a new instance of for deserialization. - internal EmbeddingTokenUsage() + /// Initializes a new instance of for deserialization. + internal EmbeddingUsage() { } diff --git a/.dotnet/src/Generated/Models/GenerateEmbeddingsOptionsEncodingFormat.cs b/.dotnet/src/Generated/Models/GenerateEmbeddingsOptionsEncodingFormat.cs deleted file mode 100644 index 805a38b93..000000000 --- a/.dotnet/src/Generated/Models/GenerateEmbeddingsOptionsEncodingFormat.cs +++ /dev/null @@ -1,47 +0,0 @@ -// - -using System; -using System.ComponentModel; - -namespace OpenAI.Models -{ - /// Enum for encoding_format in GenerateEmbeddingsOptions. - public readonly partial struct GenerateEmbeddingsOptionsEncodingFormat : IEquatable - { - private readonly string _value; - - /// Initializes a new instance of . - /// is null. - public GenerateEmbeddingsOptionsEncodingFormat(string value) - { - _value = value ?? throw new ArgumentNullException(nameof(value)); - } - - private const string FloatValue = "float"; - private const string Base64Value = "base64"; - - /// float. - public static GenerateEmbeddingsOptionsEncodingFormat Float { get; } = new GenerateEmbeddingsOptionsEncodingFormat(FloatValue); - /// base64. - public static GenerateEmbeddingsOptionsEncodingFormat Base64 { get; } = new GenerateEmbeddingsOptionsEncodingFormat(Base64Value); - /// Determines if two values are the same. - public static bool operator ==(GenerateEmbeddingsOptionsEncodingFormat left, GenerateEmbeddingsOptionsEncodingFormat right) => left.Equals(right); - /// Determines if two values are not the same. - public static bool operator !=(GenerateEmbeddingsOptionsEncodingFormat left, GenerateEmbeddingsOptionsEncodingFormat right) => !left.Equals(right); - /// Converts a string to a . - public static implicit operator GenerateEmbeddingsOptionsEncodingFormat(string value) => new GenerateEmbeddingsOptionsEncodingFormat(value); - - /// - [EditorBrowsable(EditorBrowsableState.Never)] - public override bool Equals(object obj) => obj is GenerateEmbeddingsOptionsEncodingFormat other && Equals(other); - /// - public bool Equals(GenerateEmbeddingsOptionsEncodingFormat other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); - - /// - [EditorBrowsable(EditorBrowsableState.Never)] - public override int GetHashCode() => _value?.GetHashCode() ?? 0; - /// - public override string ToString() => _value; - } -} - diff --git a/.dotnet/src/Generated/Models/GenerateEmbeddingsOptionsModel.cs b/.dotnet/src/Generated/Models/GenerateEmbeddingsOptionsModel.cs deleted file mode 100644 index e1a9c9135..000000000 --- a/.dotnet/src/Generated/Models/GenerateEmbeddingsOptionsModel.cs +++ /dev/null @@ -1,50 +0,0 @@ -// - -using System; -using System.ComponentModel; - -namespace OpenAI.Models -{ - /// Enum for model in GenerateEmbeddingsOptions. - public readonly partial struct GenerateEmbeddingsOptionsModel : IEquatable - { - private readonly string _value; - - /// Initializes a new instance of . - /// is null. - public GenerateEmbeddingsOptionsModel(string value) - { - _value = value ?? throw new ArgumentNullException(nameof(value)); - } - - private const string TextEmbeddingAda002Value = "text-embedding-ada-002"; - private const string TextEmbedding3SmallValue = "text-embedding-3-small"; - private const string TextEmbedding3LargeValue = "text-embedding-3-large"; - - /// text-embedding-ada-002. - public static GenerateEmbeddingsOptionsModel TextEmbeddingAda002 { get; } = new GenerateEmbeddingsOptionsModel(TextEmbeddingAda002Value); - /// text-embedding-3-small. - public static GenerateEmbeddingsOptionsModel TextEmbedding3Small { get; } = new GenerateEmbeddingsOptionsModel(TextEmbedding3SmallValue); - /// text-embedding-3-large. - public static GenerateEmbeddingsOptionsModel TextEmbedding3Large { get; } = new GenerateEmbeddingsOptionsModel(TextEmbedding3LargeValue); - /// Determines if two values are the same. - public static bool operator ==(GenerateEmbeddingsOptionsModel left, GenerateEmbeddingsOptionsModel right) => left.Equals(right); - /// Determines if two values are not the same. - public static bool operator !=(GenerateEmbeddingsOptionsModel left, GenerateEmbeddingsOptionsModel right) => !left.Equals(right); - /// Converts a string to a . - public static implicit operator GenerateEmbeddingsOptionsModel(string value) => new GenerateEmbeddingsOptionsModel(value); - - /// - [EditorBrowsable(EditorBrowsableState.Never)] - public override bool Equals(object obj) => obj is GenerateEmbeddingsOptionsModel other && Equals(other); - /// - public bool Equals(GenerateEmbeddingsOptionsModel other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); - - /// - [EditorBrowsable(EditorBrowsableState.Never)] - public override int GetHashCode() => _value?.GetHashCode() ?? 0; - /// - public override string ToString() => _value; - } -} - diff --git a/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequest.Serialization.cs b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequest.Serialization.cs index a5bf6fda2..68b1ad66f 100644 --- a/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequest.Serialization.cs @@ -20,7 +20,12 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelR writer.WriteStartObject(); writer.WritePropertyName("tool_outputs"u8); - writer.WriteObjectValue(ToolOutputs); + writer.WriteStartArray(); + foreach (var item in ToolOutputs) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); if (options.Format != "W" && _serializedAdditionalRawData != null) { foreach (var item in _serializedAdditionalRawData) @@ -59,14 +64,19 @@ internal static SubmitToolOutputsRunRequest DeserializeSubmitToolOutputsRunReque { return null; } - SubmitToolOutputsRunRequestToolOutputs toolOutputs = default; + IList toolOutputs = default; IDictionary serializedAdditionalRawData = default; Dictionary additionalPropertiesDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) { if (property.NameEquals("tool_outputs"u8)) { - toolOutputs = SubmitToolOutputsRunRequestToolOutputs.DeserializeSubmitToolOutputsRunRequestToolOutputs(property.Value); + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(SubmitToolOutputsRunRequestToolOutput.DeserializeSubmitToolOutputsRunRequestToolOutput(item)); + } + toolOutputs = array; continue; } if (options.Format != "W") diff --git a/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequest.cs b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequest.cs index abdcfb9e3..f489e6bb1 100644 --- a/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequest.cs +++ b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequest.cs @@ -3,6 +3,7 @@ using System; using OpenAI.ClientShared.Internal; using System.Collections.Generic; +using System.Linq; namespace OpenAI.Models { @@ -44,17 +45,17 @@ public partial class SubmitToolOutputsRunRequest /// Initializes a new instance of . /// A list of tools for which the outputs are being submitted. /// is null. - public SubmitToolOutputsRunRequest(SubmitToolOutputsRunRequestToolOutputs toolOutputs) + public SubmitToolOutputsRunRequest(IEnumerable toolOutputs) { if (toolOutputs is null) throw new ArgumentNullException(nameof(toolOutputs)); - ToolOutputs = toolOutputs; + ToolOutputs = toolOutputs.ToList(); } /// Initializes a new instance of . /// A list of tools for which the outputs are being submitted. /// Keeps track of any properties unknown to the library. - internal SubmitToolOutputsRunRequest(SubmitToolOutputsRunRequestToolOutputs toolOutputs, IDictionary serializedAdditionalRawData) + internal SubmitToolOutputsRunRequest(IList toolOutputs, IDictionary serializedAdditionalRawData) { ToolOutputs = toolOutputs; _serializedAdditionalRawData = serializedAdditionalRawData; @@ -66,7 +67,7 @@ internal SubmitToolOutputsRunRequest() } /// A list of tools for which the outputs are being submitted. - public SubmitToolOutputsRunRequestToolOutputs ToolOutputs { get; } + public IList ToolOutputs { get; } } } diff --git a/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutputs.Serialization.cs b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutput.Serialization.cs similarity index 68% rename from .dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutputs.Serialization.cs rename to .dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutput.Serialization.cs index f7c09dcf1..709807021 100644 --- a/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutputs.Serialization.cs +++ b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutput.Serialization.cs @@ -8,14 +8,14 @@ namespace OpenAI.Models { - public partial class SubmitToolOutputsRunRequestToolOutputs : IJsonModel + public partial class SubmitToolOutputsRunRequestToolOutput : IJsonModel { - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(SubmitToolOutputsRunRequestToolOutputs)} does not support '{format}' format."); + throw new FormatException($"The model {nameof(SubmitToolOutputsRunRequestToolOutput)} does not support '{format}' format."); } writer.WriteStartObject(); @@ -47,19 +47,19 @@ void IJsonModel.Write(Utf8JsonWriter wri writer.WriteEndObject(); } - SubmitToolOutputsRunRequestToolOutputs IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + SubmitToolOutputsRunRequestToolOutput IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(SubmitToolOutputsRunRequestToolOutputs)} does not support '{format}' format."); + throw new FormatException($"The model {nameof(SubmitToolOutputsRunRequestToolOutput)} does not support '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeSubmitToolOutputsRunRequestToolOutputs(document.RootElement, options); + return DeserializeSubmitToolOutputsRunRequestToolOutput(document.RootElement, options); } - internal static SubmitToolOutputsRunRequestToolOutputs DeserializeSubmitToolOutputsRunRequestToolOutputs(JsonElement element, ModelReaderWriterOptions options = null) + internal static SubmitToolOutputsRunRequestToolOutput DeserializeSubmitToolOutputsRunRequestToolOutput(JsonElement element, ModelReaderWriterOptions options = null) { options ??= new ModelReaderWriterOptions("W"); @@ -89,46 +89,46 @@ internal static SubmitToolOutputsRunRequestToolOutputs DeserializeSubmitToolOutp } } serializedAdditionalRawData = additionalPropertiesDictionary; - return new SubmitToolOutputsRunRequestToolOutputs(toolCallId.Value, output.Value, serializedAdditionalRawData); + return new SubmitToolOutputsRunRequestToolOutput(toolCallId.Value, output.Value, serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options); default: - throw new FormatException($"The model {nameof(SubmitToolOutputsRunRequestToolOutputs)} does not support '{options.Format}' format."); + throw new FormatException($"The model {nameof(SubmitToolOutputsRunRequestToolOutput)} does not support '{options.Format}' format."); } } - SubmitToolOutputsRunRequestToolOutputs IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + SubmitToolOutputsRunRequestToolOutput IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data); - return DeserializeSubmitToolOutputsRunRequestToolOutputs(document.RootElement, options); + return DeserializeSubmitToolOutputsRunRequestToolOutput(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(SubmitToolOutputsRunRequestToolOutputs)} does not support '{options.Format}' format."); + throw new FormatException($"The model {nameof(SubmitToolOutputsRunRequestToolOutput)} does not support '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The result to deserialize the model from. - internal static SubmitToolOutputsRunRequestToolOutputs FromResponse(PipelineResponse response) + internal static SubmitToolOutputsRunRequestToolOutput FromResponse(PipelineResponse response) { using var document = JsonDocument.Parse(response.Content); - return DeserializeSubmitToolOutputsRunRequestToolOutputs(document.RootElement); + return DeserializeSubmitToolOutputsRunRequestToolOutput(document.RootElement); } } } diff --git a/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutputs.cs b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutput.cs similarity index 85% rename from .dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutputs.cs rename to .dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutput.cs index 8d8a8109c..096a5584b 100644 --- a/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutputs.cs +++ b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutput.cs @@ -5,8 +5,8 @@ namespace OpenAI.Models { - /// The SubmitToolOutputsRunRequestToolOutputs. - public partial class SubmitToolOutputsRunRequestToolOutputs + /// The SubmitToolOutputsRunRequestToolOutput. + public partial class SubmitToolOutputsRunRequestToolOutput { /// /// Keeps track of any properties unknown to the library. @@ -40,19 +40,19 @@ public partial class SubmitToolOutputsRunRequestToolOutputs /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . - public SubmitToolOutputsRunRequestToolOutputs() + /// Initializes a new instance of . + public SubmitToolOutputsRunRequestToolOutput() { } - /// Initializes a new instance of . + /// Initializes a new instance of . /// /// The ID of the tool call in the `required_action` object within the run object the output is /// being submitted for. /// /// The output of the tool call to be submitted to continue the run. /// Keeps track of any properties unknown to the library. - internal SubmitToolOutputsRunRequestToolOutputs(string toolCallId, string output, IDictionary serializedAdditionalRawData) + internal SubmitToolOutputsRunRequestToolOutput(string toolCallId, string output, IDictionary serializedAdditionalRawData) { ToolCallId = toolCallId; Output = output; diff --git a/.dotnet/src/Generated/OpenAIClient.cs b/.dotnet/src/Generated/OpenAIClient.cs index 7565c4dd1..29785453c 100644 --- a/.dotnet/src/Generated/OpenAIClient.cs +++ b/.dotnet/src/Generated/OpenAIClient.cs @@ -89,6 +89,12 @@ public virtual Completions GetCompletionsClient() return Volatile.Read(ref _cachedCompletions) ?? Interlocked.CompareExchange(ref _cachedCompletions, new Completions(_pipeline, _credential, _endpoint), null) ?? _cachedCompletions; } + /// Initializes a new instance of Embeddings. + public virtual Embeddings GetEmbeddingsClient() + { + return Volatile.Read(ref _cachedEmbeddings) ?? Interlocked.CompareExchange(ref _cachedEmbeddings, new Embeddings(_pipeline, _credential, _endpoint), null) ?? _cachedEmbeddings; + } + /// Initializes a new instance of Files. public virtual Files GetFilesClient() { diff --git a/.dotnet/src/Generated/OpenAIModelFactory.cs b/.dotnet/src/Generated/OpenAIModelFactory.cs index dd7118fc0..8e81ba327 100644 --- a/.dotnet/src/Generated/OpenAIModelFactory.cs +++ b/.dotnet/src/Generated/OpenAIModelFactory.cs @@ -724,26 +724,71 @@ public static CreateCompletionResponseChoiceLogprobs CreateCompletionResponseCho return new CreateCompletionResponseChoiceLogprobs(tokens?.ToList(), tokenLogprobs?.ToList(), topLogprobs?.ToList(), textOffset?.ToList(), serializedAdditionalRawData: null); } - /// Initializes a new instance of . + /// Initializes a new instance of . + /// + /// Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a + /// single request, pass an array of strings or array of token arrays. Each input must not exceed + /// the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an + /// empty string. + /// [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + /// for counting tokens. + /// + /// + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + /// see all of your available models, or see our [Model overview](/docs/models/overview) for + /// descriptions of them. + /// + /// + /// The format to return the embeddings in. Can be either `float` or + /// [`base64`](https://pypi.org/project/pybase64/). + /// + /// + /// The number of dimensions the resulting output embeddings should have. Only supported in + /// `text-embedding-3` and later models. + /// + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect + /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + /// + /// A new instance for mocking. + public static CreateEmbeddingRequest CreateEmbeddingRequest(BinaryData input = null, CreateEmbeddingRequestModel model = default, CreateEmbeddingRequestEncodingFormat? encodingFormat = null, long? dimensions = null, string user = null) + { + return new CreateEmbeddingRequest(input, model, encodingFormat, dimensions, user, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . /// The list of embeddings generated by the model. /// The name of the model used to generate the embedding. /// The object type, which is always "list". /// The usage information for the request. - /// A new instance for mocking. - public static EmbeddingCollection EmbeddingCollection(IEnumerable data = null, string model = null, EmbeddingCollectionObject @object = default, EmbeddingTokenUsage usage = null) + /// A new instance for mocking. + public static CreateEmbeddingResponse CreateEmbeddingResponse(IEnumerable data = null, string model = null, CreateEmbeddingResponseObject @object = default, EmbeddingUsage usage = null) { data ??= new List(); - return new EmbeddingCollection(data?.ToList(), model, @object, usage, serializedAdditionalRawData: null); + return new CreateEmbeddingResponse(data?.ToList(), model, @object, usage, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The index of the embedding in the list of embeddings. + /// + /// The embedding vector, which is a list of floats. The length of vector depends on the model as + /// listed in the [embedding guide](/docs/guides/embeddings). + /// + /// The object type, which is always "embedding". + /// A new instance for mocking. + public static Embedding Embedding(long index = default, BinaryData embeddingProperty = null, EmbeddingObject @object = default) + { + return new Embedding(index, embeddingProperty, @object, serializedAdditionalRawData: null); } - /// Initializes a new instance of . + /// Initializes a new instance of . /// The number of tokens used by the prompt. /// The total number of tokens used by the request. - /// A new instance for mocking. - public static EmbeddingTokenUsage EmbeddingTokenUsage(long promptTokens = default, long totalTokens = default) + /// A new instance for mocking. + public static EmbeddingUsage EmbeddingUsage(long promptTokens = default, long totalTokens = default) { - return new EmbeddingTokenUsage(promptTokens, totalTokens, serializedAdditionalRawData: null); + return new EmbeddingUsage(promptTokens, totalTokens, serializedAdditionalRawData: null); } /// Initializes a new instance of . diff --git a/.dotnet/src/OpenAI.csproj b/.dotnet/src/OpenAI.csproj index c53137241..e03a4dc1e 100644 --- a/.dotnet/src/OpenAI.csproj +++ b/.dotnet/src/OpenAI.csproj @@ -10,7 +10,7 @@ - + diff --git a/.dotnet/tests/EmbeddingsTests.cs b/.dotnet/tests/EmbeddingsTests.cs deleted file mode 100644 index ecf35be7d..000000000 --- a/.dotnet/tests/EmbeddingsTests.cs +++ /dev/null @@ -1,101 +0,0 @@ -using NUnit.Framework; -using OpenAI.Models; -using System; -using System.ClientModel; -using System.Collections.Generic; -using System.Linq; -using System.Numerics; -using System.Text; -using System.Threading.Tasks; - -namespace OpenAI.Tests -{ - public partial class EmbeddingsTests - { - [Test] - public static void GetEmbeddingFromString() - { - ApiKeyCredential credential = new ApiKeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); - OpenAIClient client = new(credential); - Embeddings embeddingsClient = client.GetEmbeddingsClient("text-embedding-3-small"); - - string input = "Best hotel in town if you like luxury hotels. They have an amazing infinity pool, a spa," - + " and a really helpful concierge. The location is perfect -- right downtown, close to all " - + " the tourist attractions. We highly recommend this hotel."; - - ClientResult result = embeddingsClient.GenerateEmbedding(input); - Embedding embedding = result.Value; - - ReadOnlyMemory vector = embedding.EmbeddingAsFloats.Value; - Assert.IsTrue(vector.Length == 1536); - } - - [Test] - public static void GetEmbeddingFromArrayOfTokens() - { - ApiKeyCredential credential = new ApiKeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); - OpenAIClient client = new(credential); - Embeddings embeddingsClient = client.GetEmbeddingsClient("text-embedding-3-small"); - - List input = new() { 14809, 9689, 304, 6424, 422, 499, 1093, 19913, 25325, 13, 2435, 617, 459, 8056, - 56010, 7463, 11, 264, 31493, 11, 323, 264, 2216, 11190, 3613, 87103, 13, 578, 3813, 374, 4832, 1198, - 1314, 19441, 11, 3345, 311, 682, 279, 31070, 39591, 13, 1226, 7701, 7079, 420, 9689, 13 }; - - ClientResult result = embeddingsClient.GenerateEmbedding(input); - Embedding embedding = result.Value; - - ReadOnlyMemory vector = embedding.EmbeddingAsFloats.Value; - Assert.IsTrue(vector.Length == 1536); - } - - [Test] - public static void GetEmbeddingsFromArrayOfStrings() - { - ApiKeyCredential credential = new ApiKeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); - OpenAIClient client = new(credential); - Embeddings embeddingsClient = client.GetEmbeddingsClient("text-embedding-3-small"); - - List inputs = new() { - "Luxury", - "Best hotel in town if you like luxury hotels. They have an amazing infinity pool, a spa," - + " and a really helpful concierge. The location is perfect -- right downtown, close to all " - + " the tourist attractions. We highly recommend this hotel." - }; - - ClientResult result = embeddingsClient.GenerateEmbeddings(inputs); - EmbeddingCollection collection = result.Value; // TODO: Make EmbeddingCollection inherit from ReadOnlyCollection. - IReadOnlyList data = collection.Data; - - ReadOnlyMemory vector0 = data[0].EmbeddingAsFloats.Value; - Assert.IsTrue(vector0.Length == 1536); - - ReadOnlyMemory vector1 = data[1].EmbeddingAsFloats.Value; - Assert.IsTrue(vector1.Length == 1536); - } - - [Test] - public static void GetEmbeddingsFromArrayOfArraysOfTokens() - { - ApiKeyCredential credential = new ApiKeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); - OpenAIClient client = new(credential); - Embeddings embeddingsClient = client.GetEmbeddingsClient("text-embedding-3-small"); - - List> inputs = new() { - new List { 78379, 3431 }, - new List { 14809, 9689, 304, 6424, 422, 499, 1093, 19913, 25325, 13, 2435, 617, 459, 8056, - 56010, 7463, 11, 264, 31493, 11, 323, 264, 2216, 11190, 3613, 87103, 13, 578, 3813, 374, 4832, 1198, - 1314, 19441, 11, 3345, 311, 682, 279, 31070, 39591, 13, 1226, 7701, 7079, 420, 9689, 13 } - }; - - ClientResult result = embeddingsClient.GenerateEmbeddings(inputs); - EmbeddingCollection collection = result.Value; // TODO: Make EmbeddingCollection inherit from ReadOnlyCollection. - IReadOnlyList data = collection.Data; - - ReadOnlyMemory vector0 = data[0].EmbeddingAsFloats.Value; - Assert.IsTrue(vector0.Length == 1536); - - ReadOnlyMemory vector1 = data[1].EmbeddingAsFloats.Value; - Assert.IsTrue(vector1.Length == 1536); - } - } -} diff --git a/.dotnet/tests/Generated/Tests/EmbeddingsTests.cs b/.dotnet/tests/Generated/Tests/EmbeddingsTests.cs new file mode 100644 index 000000000..b4deac0fc --- /dev/null +++ b/.dotnet/tests/Generated/Tests/EmbeddingsTests.cs @@ -0,0 +1,23 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using NUnit.Framework; +using OpenAI; + +namespace OpenAI.Tests +{ + public partial class EmbeddingsTests + { + [Test] + public void SmokeTest() + { + ApiKeyCredential credential = new ApiKeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + Embeddings client = new OpenAIClient(credential).GetEmbeddingsClient(); + Assert.IsNotNull(client); + } + } +} + diff --git a/.dotnet/tests/OpenAI.Tests.csproj b/.dotnet/tests/OpenAI.Tests.csproj index a279ac212..e590c0e52 100644 --- a/.dotnet/tests/OpenAI.Tests.csproj +++ b/.dotnet/tests/OpenAI.Tests.csproj @@ -14,5 +14,6 @@ + diff --git a/main.tsp b/main.tsp index e46a4467c..d7bbdec33 100644 --- a/main.tsp +++ b/main.tsp @@ -1,7 +1,6 @@ import "@typespec/http"; import "@typespec/openapi3"; import "@typespec/openapi"; -import "@azure-tools/typespec-client-generator-core"; import "./audio"; import "./assistants"; @@ -18,7 +17,6 @@ import "./runs"; import "./threads"; using TypeSpec.Http; -using Azure.ClientGenerator.Core; /** The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. */ @service({ @@ -36,13 +34,4 @@ using Azure.ClientGenerator.Core; }) @server("https://api.openai.com/v1", "OpenAI Endpoint") @useAuth(BearerAuth) -namespace OpenAI; - -@@projectedName(OpenAI.CreateEmbeddingRequest, "csharp", "GenerateEmbeddingsOptions"); -@@projectedName(OpenAI.CreateEmbeddingResponse, "csharp", "EmbeddingCollection"); -@@projectedName(OpenAI.EmbeddingUsage, "csharp", "EmbeddingTokenUsage"); - -@@access(OpenAI.Embeddings.createEmbedding, Access.internal, "csharp"); -@@access(OpenAI.CreateEmbeddingRequest, Access.public, "csharp"); -@@access(OpenAI.CreateEmbeddingResponse, Access.public, "csharp"); -@@access(OpenAI.EmbeddingUsage, Access.public, "csharp"); \ No newline at end of file +namespace OpenAI; \ No newline at end of file diff --git a/runs/models.tsp b/runs/models.tsp index d5c33bb22..42d7935ef 100644 --- a/runs/models.tsp +++ b/runs/models.tsp @@ -99,7 +99,7 @@ model SubmitToolOutputsRunRequest { /** The output of the tool call to be submitted to continue the run. */ output?: string; - } + }[]; } model ListRunsResponse { diff --git a/tsp-output/@typespec/openapi3/openapi.yaml b/tsp-output/@typespec/openapi3/openapi.yaml index e27e57ba4..10cc8f54e 100644 --- a/tsp-output/@typespec/openapi3/openapi.yaml +++ b/tsp-output/@typespec/openapi3/openapi.yaml @@ -5803,16 +5803,18 @@ components: - tool_outputs properties: tool_outputs: - type: object - properties: - tool_call_id: - type: string - description: |- - The ID of the tool call in the `required_action` object within the run object the output is - being submitted for. - output: - type: string - description: The output of the tool call to be submitted to continue the run. + type: array + items: + type: object + properties: + tool_call_id: + type: string + description: |- + The ID of the tool call in the `required_action` object within the run object the output is + being submitted for. + output: + type: string + description: The output of the tool call to be submitted to continue the run. description: A list of tools for which the outputs are being submitted. SuffixString: type: string From 4decb12d1e9bd556e7cd8b3bf874e096f94c8974 Mon Sep 17 00:00:00 2001 From: Jose Arriaga Maldonado Date: Tue, 27 Feb 2024 23:02:08 -0800 Subject: [PATCH 16/18] Fix Update-ClientModel.ps1 script: Convenience methods with a single CancellationToken parameter --- .dotnet/scripts/Update-ClientModel.ps1 | 8 +++++--- .dotnet/src/Generated/ModelsOps.cs | 8 ++++---- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/.dotnet/scripts/Update-ClientModel.ps1 b/.dotnet/scripts/Update-ClientModel.ps1 index 83013e82e..13b8beb80 100644 --- a/.dotnet/scripts/Update-ClientModel.ps1 +++ b/.dotnet/scripts/Update-ClientModel.ps1 @@ -83,6 +83,9 @@ function Update-Subclients { $content = $content -creplace "\s+\/\/\/ The ClientDiagnostics is used to provide tracing support for the client library. ", "" $content = $content -creplace "\s+internal TelemetrySource ClientDiagnostics { get; }", "" + # Delete FromCancellationToken + $content = $content -creplace "(?s)\s+internal static RequestOptions FromCancellationToken\(CancellationToken cancellationToken = default\).*?return new RequestOptions\(\) \{ CancellationToken = cancellationToken \};.*?\}", "" + # Modify constructor $content = $content -creplace "\s+\/\/\/ The handler for diagnostic messaging in the client. ", "" $content = $content -creplace "", "" @@ -91,6 +94,7 @@ function Update-Subclients { # # Modify convenience methods $content = $content -creplace "\s+\/\/\/ The cancellation token to use. ", "" + $content = $content -creplace "\(CancellationToken cancellationToken = default\)", "()" $content = $content -creplace ", CancellationToken cancellationToken = default\)", ")" $content = $content -creplace "RequestOptions context = FromCancellationToken\(cancellationToken\);\s+", "" $content = $content -creplace "using RequestBody content = (?\w+)\.ToRequestBody\(\);", "using BinaryContent content = BinaryContent.Create(`${var});" @@ -101,6 +105,7 @@ function Update-Subclients { $content = $content -creplace "Result result = (?\w+)\((?[(\w+)(\?.ToString\(\)*)(,\s\w+)]*), context\);", "ClientResult result = `${method}(`${params}, DefaultRequestContext);" # Modify protocol methods + $content = $content -creplace "\/\/\/ Please try the simpler \w+)\(CancellationToken\)`"/> convenience overload with strongly typed models first.", "/// Please try the simpler convenience overload with strongly typed models first." $content = $content -creplace "\/\/\/ Please try the simpler \w+)\((?[(\w+)(\?*)(,\s\w+)]*),CancellationToken\)`"/> convenience overload with strongly typed models first.", "/// Please try the simpler convenience overload with strongly typed models first." $content = $content -creplace "\/\/\/ The request context, which can override default behaviors of the client pipeline on a per-call basis. ", "/// The request options, which can override default behaviors of the client pipeline on a per-call basis. " $content = $content -creplace "\/\/\/ ", "/// " @@ -134,9 +139,6 @@ function Update-Subclients { # Delete DefaultRequestContext # $content = $content -creplace "\s+private static RequestOptions DefaultRequestContext = new RequestOptions\(\);", "" - # Delete FromCancellationToken - $content = $content -creplace "(?s)\s+internal static RequestOptions FromCancellationToken\(CancellationToken cancellationToken = default\).*?return new RequestOptions\(\) \{ CancellationToken = cancellationToken \};.*?\}", "" - # Clean up ApiKeyCredential $content = $content -creplace " KeyCredential", " ApiKeyCredential" $content = $content -creplace "_keyCredential", "_credential" diff --git a/.dotnet/src/Generated/ModelsOps.cs b/.dotnet/src/Generated/ModelsOps.cs index 528bd4d2f..c4b6cbfbb 100644 --- a/.dotnet/src/Generated/ModelsOps.cs +++ b/.dotnet/src/Generated/ModelsOps.cs @@ -43,7 +43,7 @@ internal ModelsOps(ClientPipeline pipeline, ApiKeyCredential credential, Uri end /// Lists the currently available models, and provides basic information about each one such as the /// owner and availability. /// - public virtual async Task> GetModelsAsync(CancellationToken cancellationToken = default) + public virtual async Task> GetModelsAsync() { ClientResult result = await GetModelsAsync(DefaultRequestContext).ConfigureAwait(false); return ClientResult.FromValue(ListModelsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); @@ -53,7 +53,7 @@ public virtual async Task> GetModelsAsync(Cance /// Lists the currently available models, and provides basic information about each one such as the /// owner and availability. /// - public virtual ClientResult GetModels(CancellationToken cancellationToken = default) + public virtual ClientResult GetModels() { ClientResult result = GetModels(DefaultRequestContext); return ClientResult.FromValue(ListModelsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); @@ -70,7 +70,7 @@ public virtual ClientResult GetModels(CancellationToken canc /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -104,7 +104,7 @@ public virtual async Task GetModelsAsync(RequestOptions options) /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// From 8c0f76d98d9071f2ddc1030bf652793a91604658 Mon Sep 17 00:00:00 2001 From: Jose Arriaga Maldonado Date: Wed, 28 Feb 2024 11:25:40 -0800 Subject: [PATCH 17/18] Add ConvertTo-Internal.ps1 script --- .dotnet/scripts/ConvertTo-Internal.ps1 | 17 +++++++++++++++++ .dotnet/scripts/Update-ClientModel.ps1 | 10 +++++----- .dotnet/src/Generated/Assistants.cs | 7 +++---- .dotnet/src/Generated/Audio.cs | 7 +++---- .dotnet/src/Generated/Chat.cs | 7 +++---- .dotnet/src/Generated/Completions.cs | 7 +++---- .dotnet/src/Generated/Embeddings.cs | 7 +++---- .dotnet/src/Generated/Files.cs | 7 +++---- .dotnet/src/Generated/FineTuning.cs | 7 +++---- .dotnet/src/Generated/Images.cs | 7 +++---- .dotnet/src/Generated/Messages.cs | 7 +++---- .../Models/AssistantFileObject.Serialization.cs | 5 ++--- .../src/Generated/Models/AssistantFileObject.cs | 5 ++--- .../Models/AssistantFileObjectObject.cs | 5 ++--- .../Models/AssistantObject.Serialization.cs | 5 ++--- .dotnet/src/Generated/Models/AssistantObject.cs | 5 ++--- .../Generated/Models/AssistantObjectObject.cs | 5 ++--- .../Models/AudioSegment.Serialization.cs | 5 ++--- .dotnet/src/Generated/Models/AudioSegment.cs | 5 ++--- ...ompletionFunctionCallOption.Serialization.cs | 3 +-- .../Models/ChatCompletionFunctionCallOption.cs | 3 +-- .../ChatCompletionFunctions.Serialization.cs | 5 ++--- .../Generated/Models/ChatCompletionFunctions.cs | 5 ++--- ...atCompletionMessageToolCall.Serialization.cs | 5 ++--- .../Models/ChatCompletionMessageToolCall.cs | 5 ++--- ...tionMessageToolCallFunction.Serialization.cs | 5 ++--- .../ChatCompletionMessageToolCallFunction.cs | 5 ++--- .../Models/ChatCompletionMessageToolCallType.cs | 5 ++--- ...atCompletionNamedToolChoice.Serialization.cs | 3 +-- .../Models/ChatCompletionNamedToolChoice.cs | 3 +-- ...tionNamedToolChoiceFunction.Serialization.cs | 3 +-- .../ChatCompletionNamedToolChoiceFunction.cs | 3 +-- .../Models/ChatCompletionNamedToolChoiceType.cs | 3 +-- ...atCompletionResponseMessage.Serialization.cs | 5 ++--- .../Models/ChatCompletionResponseMessage.cs | 5 ++--- ...ResponseMessageFunctionCall.Serialization.cs | 5 ++--- ...ChatCompletionResponseMessageFunctionCall.cs | 5 ++--- .../Models/ChatCompletionResponseMessageRole.cs | 5 ++--- .../ChatCompletionTokenLogprob.Serialization.cs | 5 ++--- .../Models/ChatCompletionTokenLogprob.cs | 5 ++--- ...etionTokenLogprobTopLogprob.Serialization.cs | 5 ++--- .../ChatCompletionTokenLogprobTopLogprob.cs | 5 ++--- .../Models/ChatCompletionTool.Serialization.cs | 5 ++--- .../src/Generated/Models/ChatCompletionTool.cs | 5 ++--- .../Generated/Models/ChatCompletionToolType.cs | 5 ++--- .../Models/CompletionUsage.Serialization.cs | 5 ++--- .dotnet/src/Generated/Models/CompletionUsage.cs | 5 ++--- .../CreateAssistantFileRequest.Serialization.cs | 5 ++--- .../Models/CreateAssistantFileRequest.cs | 5 ++--- .../CreateAssistantRequest.Serialization.cs | 5 ++--- .../Generated/Models/CreateAssistantRequest.cs | 5 ++--- ...CreateChatCompletionRequest.Serialization.cs | 5 ++--- .../Models/CreateChatCompletionRequest.cs | 5 ++--- .../Models/CreateChatCompletionRequestModel.cs | 5 ++--- ...letionRequestResponseFormat.Serialization.cs | 5 ++--- ...CreateChatCompletionRequestResponseFormat.cs | 5 ++--- ...teChatCompletionRequestResponseFormatType.cs | 5 ++--- ...reateChatCompletionResponse.Serialization.cs | 5 ++--- .../Models/CreateChatCompletionResponse.cs | 5 ++--- ...hatCompletionResponseChoice.Serialization.cs | 5 ++--- .../CreateChatCompletionResponseChoice.cs | 5 ++--- ...eChatCompletionResponseChoiceFinishReason.cs | 5 ++--- ...etionResponseChoiceLogprobs.Serialization.cs | 5 ++--- ...reateChatCompletionResponseChoiceLogprobs.cs | 5 ++--- .../CreateChatCompletionResponseObject.cs | 5 ++--- .../CreateCompletionRequest.Serialization.cs | 5 ++--- .../Generated/Models/CreateCompletionRequest.cs | 5 ++--- .../Models/CreateCompletionRequestModel.cs | 5 ++--- .../CreateCompletionResponse.Serialization.cs | 5 ++--- .../Models/CreateCompletionResponse.cs | 5 ++--- ...ateCompletionResponseChoice.Serialization.cs | 5 ++--- .../Models/CreateCompletionResponseChoice.cs | 5 ++--- ...reateCompletionResponseChoiceFinishReason.cs | 5 ++--- ...etionResponseChoiceLogprobs.Serialization.cs | 5 ++--- .../CreateCompletionResponseChoiceLogprobs.cs | 5 ++--- .../Models/CreateCompletionResponseObject.cs | 5 ++--- .../CreateEmbeddingRequest.Serialization.cs | 5 ++--- .../Generated/Models/CreateEmbeddingRequest.cs | 5 ++--- .../CreateEmbeddingRequestEncodingFormat.cs | 5 ++--- .../Models/CreateEmbeddingRequestModel.cs | 5 ++--- .../CreateEmbeddingResponse.Serialization.cs | 5 ++--- .../Generated/Models/CreateEmbeddingResponse.cs | 5 ++--- .../Models/CreateEmbeddingResponseObject.cs | 5 ++--- .../Models/CreateFileRequest.Serialization.cs | 5 ++--- .../src/Generated/Models/CreateFileRequest.cs | 5 ++--- .../Models/CreateFileRequestPurpose.cs | 5 ++--- .../CreateFineTuningJobRequest.Serialization.cs | 5 ++--- .../Models/CreateFineTuningJobRequest.cs | 5 ++--- ...ngJobRequestHyperparameters.Serialization.cs | 5 ++--- ...CreateFineTuningJobRequestHyperparameters.cs | 5 ++--- .../Models/CreateFineTuningJobRequestModel.cs | 5 ++--- .../CreateImageEditRequest.Serialization.cs | 5 ++--- .../Generated/Models/CreateImageEditRequest.cs | 5 ++--- .../Models/CreateImageEditRequestModel.cs | 5 ++--- .../CreateImageEditRequestResponseFormat.cs | 5 ++--- .../Models/CreateImageEditRequestSize.cs | 5 ++--- .../Models/CreateImageRequest.Serialization.cs | 5 ++--- .../src/Generated/Models/CreateImageRequest.cs | 5 ++--- .../Generated/Models/CreateImageRequestModel.cs | 5 ++--- .../Models/CreateImageRequestQuality.cs | 5 ++--- .../Models/CreateImageRequestResponseFormat.cs | 5 ++--- .../Generated/Models/CreateImageRequestSize.cs | 5 ++--- .../Generated/Models/CreateImageRequestStyle.cs | 5 ++--- ...CreateImageVariationRequest.Serialization.cs | 5 ++--- .../Models/CreateImageVariationRequest.cs | 5 ++--- .../Models/CreateImageVariationRequestModel.cs | 5 ++--- ...CreateImageVariationRequestResponseFormat.cs | 5 ++--- .../Models/CreateImageVariationRequestSize.cs | 5 ++--- .../CreateMessageRequest.Serialization.cs | 5 ++--- .../Generated/Models/CreateMessageRequest.cs | 5 ++--- .../Models/CreateMessageRequestRole.cs | 5 ++--- .../CreateModerationRequest.Serialization.cs | 5 ++--- .../Generated/Models/CreateModerationRequest.cs | 5 ++--- .../Models/CreateModerationRequestModel.cs | 5 ++--- .../CreateModerationResponse.Serialization.cs | 5 ++--- .../Models/CreateModerationResponse.cs | 5 ++--- ...ateModerationResponseResult.Serialization.cs | 5 ++--- .../Models/CreateModerationResponseResult.cs | 5 ++--- ...ionResponseResultCategories.Serialization.cs | 5 ++--- .../CreateModerationResponseResultCategories.cs | 5 ++--- ...esponseResultCategoryScores.Serialization.cs | 5 ++--- ...ateModerationResponseResultCategoryScores.cs | 5 ++--- .../Models/CreateRunRequest.Serialization.cs | 5 ++--- .../src/Generated/Models/CreateRunRequest.cs | 5 ++--- .../Models/CreateSpeechRequest.Serialization.cs | 5 ++--- .../src/Generated/Models/CreateSpeechRequest.cs | 5 ++--- .../Models/CreateSpeechRequestModel.cs | 5 ++--- .../Models/CreateSpeechRequestResponseFormat.cs | 5 ++--- .../Models/CreateSpeechRequestVoice.cs | 5 ++--- .../CreateThreadAndRunRequest.Serialization.cs | 5 ++--- .../Models/CreateThreadAndRunRequest.cs | 5 ++--- .../Models/CreateThreadRequest.Serialization.cs | 5 ++--- .../src/Generated/Models/CreateThreadRequest.cs | 5 ++--- .../CreateTranscriptionRequest.Serialization.cs | 5 ++--- .../Models/CreateTranscriptionRequest.cs | 5 ++--- .../Models/CreateTranscriptionRequestModel.cs | 5 ++--- .../CreateTranscriptionRequestResponseFormat.cs | 5 ++--- ...CreateTranscriptionResponse.Serialization.cs | 5 ++--- .../Models/CreateTranscriptionResponse.cs | 5 ++--- .../Models/CreateTranscriptionResponseTask.cs | 5 ++--- .../CreateTranslationRequest.Serialization.cs | 5 ++--- .../Models/CreateTranslationRequest.cs | 5 ++--- .../Models/CreateTranslationRequestModel.cs | 5 ++--- .../CreateTranslationRequestResponseFormat.cs | 5 ++--- .../CreateTranslationResponse.Serialization.cs | 5 ++--- .../Models/CreateTranslationResponse.cs | 5 ++--- .../Models/CreateTranslationResponseTask.cs | 5 ++--- ...DeleteAssistantFileResponse.Serialization.cs | 5 ++--- .../Models/DeleteAssistantFileResponse.cs | 5 ++--- .../Models/DeleteAssistantFileResponseObject.cs | 5 ++--- .../DeleteAssistantResponse.Serialization.cs | 5 ++--- .../Generated/Models/DeleteAssistantResponse.cs | 5 ++--- .../Models/DeleteAssistantResponseObject.cs | 5 ++--- .../Models/DeleteFileResponse.Serialization.cs | 5 ++--- .../src/Generated/Models/DeleteFileResponse.cs | 5 ++--- .../Models/DeleteFileResponseObject.cs | 5 ++--- .../Models/DeleteModelResponse.Serialization.cs | 5 ++--- .../src/Generated/Models/DeleteModelResponse.cs | 5 ++--- .../Models/DeleteModelResponseObject.cs | 5 ++--- .../DeleteThreadResponse.Serialization.cs | 5 ++--- .../Generated/Models/DeleteThreadResponse.cs | 5 ++--- .../Models/DeleteThreadResponseObject.cs | 5 ++--- .../Generated/Models/Embedding.Serialization.cs | 5 ++--- .dotnet/src/Generated/Models/Embedding.cs | 5 ++--- .dotnet/src/Generated/Models/EmbeddingObject.cs | 5 ++--- .../Models/EmbeddingUsage.Serialization.cs | 5 ++--- .dotnet/src/Generated/Models/EmbeddingUsage.cs | 5 ++--- .../Models/FineTuningJob.Serialization.cs | 5 ++--- .dotnet/src/Generated/Models/FineTuningJob.cs | 5 ++--- .../Models/FineTuningJobError.Serialization.cs | 5 ++--- .../src/Generated/Models/FineTuningJobError.cs | 5 ++--- .../Models/FineTuningJobEvent.Serialization.cs | 5 ++--- .../src/Generated/Models/FineTuningJobEvent.cs | 5 ++--- .../Generated/Models/FineTuningJobEventLevel.cs | 5 ++--- ...ineTuningJobHyperparameters.Serialization.cs | 5 ++--- .../Models/FineTuningJobHyperparameters.cs | 5 ++--- .../src/Generated/Models/FineTuningJobObject.cs | 5 ++--- .../src/Generated/Models/FineTuningJobStatus.cs | 5 ++--- .../Models/FunctionObject.Serialization.cs | 5 ++--- .dotnet/src/Generated/Models/FunctionObject.cs | 5 ++--- .../Models/FunctionParameters.Serialization.cs | 5 ++--- .../src/Generated/Models/FunctionParameters.cs | 5 ++--- .../src/Generated/Models/Image.Serialization.cs | 5 ++--- .dotnet/src/Generated/Models/Image.cs | 5 ++--- .../Models/ImagesResponse.Serialization.cs | 5 ++--- .dotnet/src/Generated/Models/ImagesResponse.cs | 5 ++--- .../ListAssistantFilesResponse.Serialization.cs | 5 ++--- .../Models/ListAssistantFilesResponse.cs | 5 ++--- .../Models/ListAssistantFilesResponseObject.cs | 5 ++--- .../ListAssistantsResponse.Serialization.cs | 5 ++--- .../Generated/Models/ListAssistantsResponse.cs | 5 ++--- .../Models/ListAssistantsResponseObject.cs | 5 ++--- .../Models/ListFilesResponse.Serialization.cs | 5 ++--- .../src/Generated/Models/ListFilesResponse.cs | 5 ++--- .../Generated/Models/ListFilesResponseObject.cs | 5 ++--- ...FineTuningJobEventsResponse.Serialization.cs | 5 ++--- .../Models/ListFineTuningJobEventsResponse.cs | 5 ++--- .../ListMessageFilesResponse.Serialization.cs | 5 ++--- .../Models/ListMessageFilesResponse.cs | 5 ++--- .../Models/ListMessageFilesResponseObject.cs | 5 ++--- .../ListMessagesResponse.Serialization.cs | 5 ++--- .../Generated/Models/ListMessagesResponse.cs | 5 ++--- .../Models/ListMessagesResponseObject.cs | 5 ++--- .../Models/ListModelsResponse.Serialization.cs | 5 ++--- .../src/Generated/Models/ListModelsResponse.cs | 5 ++--- .../Models/ListModelsResponseObject.cs | 5 ++--- .dotnet/src/Generated/Models/ListOrder.cs | 5 ++--- ...natedFineTuningJobsResponse.Serialization.cs | 5 ++--- .../ListPaginatedFineTuningJobsResponse.cs | 5 ++--- .../ListRunStepsResponse.Serialization.cs | 5 ++--- .../Generated/Models/ListRunStepsResponse.cs | 5 ++--- .../Models/ListRunStepsResponseObject.cs | 5 ++--- .../Models/ListRunsResponse.Serialization.cs | 5 ++--- .../src/Generated/Models/ListRunsResponse.cs | 5 ++--- .../Generated/Models/ListRunsResponseObject.cs | 5 ++--- .../Models/MessageFileObject.Serialization.cs | 5 ++--- .../src/Generated/Models/MessageFileObject.cs | 5 ++--- .../Generated/Models/MessageFileObjectObject.cs | 5 ++--- .../Models/MessageObject.Serialization.cs | 5 ++--- .dotnet/src/Generated/Models/MessageObject.cs | 5 ++--- .../src/Generated/Models/MessageObjectObject.cs | 5 ++--- .../src/Generated/Models/MessageObjectRole.cs | 5 ++--- .../src/Generated/Models/Model.Serialization.cs | 5 ++--- .dotnet/src/Generated/Models/Model.cs | 5 ++--- .dotnet/src/Generated/Models/ModelObject.cs | 5 ++--- .../ModifyAssistantRequest.Serialization.cs | 5 ++--- .../Generated/Models/ModifyAssistantRequest.cs | 5 ++--- .../ModifyMessageRequest.Serialization.cs | 5 ++--- .../Generated/Models/ModifyMessageRequest.cs | 5 ++--- .../Models/ModifyRunRequest.Serialization.cs | 5 ++--- .../src/Generated/Models/ModifyRunRequest.cs | 5 ++--- .../Models/ModifyThreadRequest.Serialization.cs | 5 ++--- .../src/Generated/Models/ModifyThreadRequest.cs | 5 ++--- .../Models/OpenAIFile.Serialization.cs | 5 ++--- .dotnet/src/Generated/Models/OpenAIFile.cs | 5 ++--- .../src/Generated/Models/OpenAIFileObject.cs | 5 ++--- .../src/Generated/Models/OpenAIFilePurpose.cs | 5 ++--- .../src/Generated/Models/OpenAIFileStatus.cs | 5 ++--- .../Models/RunCompletionUsage.Serialization.cs | 5 ++--- .../src/Generated/Models/RunCompletionUsage.cs | 5 ++--- .../Generated/Models/RunObject.Serialization.cs | 5 ++--- .dotnet/src/Generated/Models/RunObject.cs | 5 ++--- .../Models/RunObjectLastError.Serialization.cs | 5 ++--- .../src/Generated/Models/RunObjectLastError.cs | 5 ++--- .../Generated/Models/RunObjectLastErrorCode.cs | 5 ++--- .dotnet/src/Generated/Models/RunObjectObject.cs | 5 ++--- .../RunObjectRequiredAction.Serialization.cs | 5 ++--- .../Generated/Models/RunObjectRequiredAction.cs | 5 ++--- ...iredActionSubmitToolOutputs.Serialization.cs | 5 ++--- .../RunObjectRequiredActionSubmitToolOutputs.cs | 5 ++--- .../Models/RunObjectRequiredActionType.cs | 5 ++--- .dotnet/src/Generated/Models/RunObjectStatus.cs | 5 ++--- ...etailsMessageCreationObject.Serialization.cs | 3 +-- .../RunStepDetailsMessageCreationObject.cs | 3 +-- ...eationObjectMessageCreation.Serialization.cs | 3 +-- ...tailsMessageCreationObjectMessageCreation.cs | 3 +-- .../RunStepDetailsMessageCreationObjectType.cs | 3 +-- ...nStepDetailsToolCallsObject.Serialization.cs | 3 +-- .../Models/RunStepDetailsToolCallsObject.cs | 3 +-- .../Models/RunStepDetailsToolCallsObjectType.cs | 3 +-- .../Models/RunStepObject.Serialization.cs | 5 ++--- .dotnet/src/Generated/Models/RunStepObject.cs | 5 ++--- .../RunStepObjectLastError.Serialization.cs | 5 ++--- .../Generated/Models/RunStepObjectLastError.cs | 5 ++--- .../Models/RunStepObjectLastErrorCode.cs | 5 ++--- .../src/Generated/Models/RunStepObjectObject.cs | 5 ++--- .../src/Generated/Models/RunStepObjectStatus.cs | 5 ++--- .../src/Generated/Models/RunStepObjectType.cs | 5 ++--- .../Models/RunToolCallObject.Serialization.cs | 5 ++--- .../src/Generated/Models/RunToolCallObject.cs | 5 ++--- .../RunToolCallObjectFunction.Serialization.cs | 5 ++--- .../Models/RunToolCallObjectFunction.cs | 5 ++--- .../Generated/Models/RunToolCallObjectType.cs | 5 ++--- ...SubmitToolOutputsRunRequest.Serialization.cs | 5 ++--- .../Models/SubmitToolOutputsRunRequest.cs | 5 ++--- ...OutputsRunRequestToolOutput.Serialization.cs | 5 ++--- .../SubmitToolOutputsRunRequestToolOutput.cs | 5 ++--- .../Models/ThreadObject.Serialization.cs | 5 ++--- .dotnet/src/Generated/Models/ThreadObject.cs | 5 ++--- .../src/Generated/Models/ThreadObjectObject.cs | 5 ++--- .dotnet/src/Generated/ModelsOps.cs | 7 +++---- .dotnet/src/Generated/Moderations.cs | 7 +++---- .dotnet/src/Generated/OpenAIClient.cs | 5 ++--- .dotnet/src/Generated/OpenAIClientOptions.cs | 5 ++--- .dotnet/src/Generated/OpenAIModelFactory.cs | 5 ++--- .dotnet/src/Generated/Runs.cs | 7 +++---- .dotnet/src/Generated/Threads.cs | 7 +++---- .../tests/Generated/Tests/AssistantsTests.cs | 1 - .dotnet/tests/Generated/Tests/AudioTests.cs | 1 - .dotnet/tests/Generated/Tests/ChatTests.cs | 1 - .../tests/Generated/Tests/CompletionsTests.cs | 1 - .../tests/Generated/Tests/EmbeddingsTests.cs | 1 - .dotnet/tests/Generated/Tests/FilesTests.cs | 1 - .../tests/Generated/Tests/FineTuningTests.cs | 1 - .dotnet/tests/Generated/Tests/ImagesTests.cs | 1 - .dotnet/tests/Generated/Tests/MessagesTests.cs | 1 - .dotnet/tests/Generated/Tests/ModelsOpsTests.cs | 1 - .../tests/Generated/Tests/ModerationsTests.cs | 1 - .dotnet/tests/Generated/Tests/RunsTests.cs | 1 - .dotnet/tests/Generated/Tests/ThreadsTests.cs | 1 - 300 files changed, 590 insertions(+), 871 deletions(-) create mode 100644 .dotnet/scripts/ConvertTo-Internal.ps1 diff --git a/.dotnet/scripts/ConvertTo-Internal.ps1 b/.dotnet/scripts/ConvertTo-Internal.ps1 new file mode 100644 index 000000000..ec809e075 --- /dev/null +++ b/.dotnet/scripts/ConvertTo-Internal.ps1 @@ -0,0 +1,17 @@ +$root = Split-Path $PSScriptRoot -Parent +$directory = Join-Path -Path $root -ChildPath "src\Generated" +$files = Get-ChildItem -Path $($directory + "\*") -Include "*.cs" -Recurse + +foreach ($file in $files) { + $content = Get-Content -Path $file -Raw + + Write-Output "Editing $($file.FullName)" + + $content = $content -creplace "public partial class", "internal partial class" + $content = $content -creplace "public readonly partial struct", "internal readonly partial struct" + $content = $content -creplace "public static partial class", "internal static partial class" + $content = $content -creplace "namespace OpenAI", "namespace OpenAI.Internal" + $content = $content -creplace "using OpenAI.Models;", "using OpenAI.Internal.Models;" + + $content | Set-Content -Path $file.FullName -NoNewline +} \ No newline at end of file diff --git a/.dotnet/scripts/Update-ClientModel.ps1 b/.dotnet/scripts/Update-ClientModel.ps1 index 13b8beb80..9cab3caf9 100644 --- a/.dotnet/scripts/Update-ClientModel.ps1 +++ b/.dotnet/scripts/Update-ClientModel.ps1 @@ -39,7 +39,7 @@ function Update-OpenAIClient { $content = $content -creplace "_pipeline = MessagePipeline\.Create\(options, new IPipelinePolicy\[\] \{ new KeyCredentialPolicy\(_keyCredential, AuthorizationHeader, AuthorizationApiKeyPrefix\) \}, Array\.Empty>\(\)\);", "var authenticationPolicy = ApiKeyAuthenticationPolicy.CreateBearerAuthorizationPolicy(_credential);`r`n _pipeline = ClientPipeline.Create(options,`r`n perCallPolicies: ReadOnlySpan.Empty,`r`n perTryPolicies: new PipelinePolicy[] { authenticationPolicy },`r`n beforeTransportPolicies: ReadOnlySpan.Empty);" $content = $content -creplace "\(ClientDiagnostics, ", "(" - $content | Set-Content -Path $file.FullName + $content | Set-Content -Path $file.FullName -NoNewline } function Update-OpenAIClientOptions { @@ -54,7 +54,7 @@ function Update-OpenAIClientOptions { $content = $content -creplace "using System\.ClientModel;", "using System.ClientModel.Primitives;" $content = $content -creplace ": RequestOptions", ": ClientPipelineOptions" - $content | Set-Content -Path $file.FullName + $content | Set-Content -Path $file.FullName -NoNewline } function Update-Subclients { @@ -155,7 +155,7 @@ function Update-Subclients { $content = $content -creplace "private static ResponseErrorClassifier _responseErrorClassifier200;", "private static PipelineMessageClassifier _responseErrorClassifier200;" $content = $content -creplace "private static ResponseErrorClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 \?\?= new StatusResponseClassifier\(stackalloc ushort\[\] \{ 200 \}\);", "private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 });" - $content | Set-Content -Path $file.FullName + $content | Set-Content -Path $file.FullName -NoNewline } } @@ -176,7 +176,7 @@ function Update-Models { $content = $content -creplace "\s+void IUtf8JsonWriteable\.Write\(Utf8JsonWriter writer\) => \(\(IJsonModel<(\w+)>\)this\)\.Write\(writer, new ModelReaderWriterOptions\(`"W`"\)\);`r`n", "" $content = $content -creplace "(?s)\s+\/\/\/ Convert into a Utf8JsonRequestBody\. .*?return content;.*?\}", "" - $content | Set-Content -Path $file.FullName + $content | Set-Content -Path $file.FullName -NoNewline } } @@ -192,7 +192,7 @@ function Update-Tests { $content = $content -creplace " KeyCredential", " ApiKeyCredential" - $content | Set-Content -Path $file.FullName + $content | Set-Content -Path $file.FullName -NoNewline } } diff --git a/.dotnet/src/Generated/Assistants.cs b/.dotnet/src/Generated/Assistants.cs index e725cfe7e..8ccb9e55f 100644 --- a/.dotnet/src/Generated/Assistants.cs +++ b/.dotnet/src/Generated/Assistants.cs @@ -6,13 +6,13 @@ using System.Text; using System.Threading; using System.Threading.Tasks; -using OpenAI.Models; +using OpenAI.Internal.Models; -namespace OpenAI +namespace OpenAI.Internal { // Data plane generated sub-client. /// The Assistants sub-client. - public partial class Assistants + internal partial class Assistants { private const string AuthorizationHeader = "Authorization"; private readonly ApiKeyCredential _credential; @@ -1376,4 +1376,3 @@ internal PipelineMessage CreateDeleteAssistantFileRequest(string assistantId, st private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } } - diff --git a/.dotnet/src/Generated/Audio.cs b/.dotnet/src/Generated/Audio.cs index 963cfc0f2..9065e02c7 100644 --- a/.dotnet/src/Generated/Audio.cs +++ b/.dotnet/src/Generated/Audio.cs @@ -6,13 +6,13 @@ using System.Text; using System.Threading; using System.Threading.Tasks; -using OpenAI.Models; +using OpenAI.Internal.Models; -namespace OpenAI +namespace OpenAI.Internal { // Data plane generated sub-client. /// The Audio sub-client. - public partial class Audio + internal partial class Audio { private const string AuthorizationHeader = "Authorization"; private readonly ApiKeyCredential _credential; @@ -387,4 +387,3 @@ internal PipelineMessage CreateCreateTranslationRequest(BinaryContent content, R private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } } - diff --git a/.dotnet/src/Generated/Chat.cs b/.dotnet/src/Generated/Chat.cs index bb3bb2cc3..69528f3d3 100644 --- a/.dotnet/src/Generated/Chat.cs +++ b/.dotnet/src/Generated/Chat.cs @@ -6,13 +6,13 @@ using System.Text; using System.Threading; using System.Threading.Tasks; -using OpenAI.Models; +using OpenAI.Internal.Models; -namespace OpenAI +namespace OpenAI.Internal { // Data plane generated sub-client. /// The Chat sub-client. - public partial class Chat + internal partial class Chat { private const string AuthorizationHeader = "Authorization"; private readonly ApiKeyCredential _credential; @@ -159,4 +159,3 @@ internal PipelineMessage CreateCreateChatCompletionRequest(BinaryContent content private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } } - diff --git a/.dotnet/src/Generated/Completions.cs b/.dotnet/src/Generated/Completions.cs index f55f7c729..d43e6be89 100644 --- a/.dotnet/src/Generated/Completions.cs +++ b/.dotnet/src/Generated/Completions.cs @@ -6,13 +6,13 @@ using System.Text; using System.Threading; using System.Threading.Tasks; -using OpenAI.Models; +using OpenAI.Internal.Models; -namespace OpenAI +namespace OpenAI.Internal { // Data plane generated sub-client. /// The Completions sub-client. - public partial class Completions + internal partial class Completions { private const string AuthorizationHeader = "Authorization"; private readonly ApiKeyCredential _credential; @@ -159,4 +159,3 @@ internal PipelineMessage CreateCreateCompletionRequest(BinaryContent content, Re private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } } - diff --git a/.dotnet/src/Generated/Embeddings.cs b/.dotnet/src/Generated/Embeddings.cs index 8056ebd08..eb53ff05a 100644 --- a/.dotnet/src/Generated/Embeddings.cs +++ b/.dotnet/src/Generated/Embeddings.cs @@ -6,13 +6,13 @@ using System.Text; using System.Threading; using System.Threading.Tasks; -using OpenAI.Models; +using OpenAI.Internal.Models; -namespace OpenAI +namespace OpenAI.Internal { // Data plane generated sub-client. /// The Embeddings sub-client. - public partial class Embeddings + internal partial class Embeddings { private const string AuthorizationHeader = "Authorization"; private readonly ApiKeyCredential _credential; @@ -159,4 +159,3 @@ internal PipelineMessage CreateCreateEmbeddingRequest(BinaryContent content, Req private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } } - diff --git a/.dotnet/src/Generated/Files.cs b/.dotnet/src/Generated/Files.cs index 815ae85ce..acbd43209 100644 --- a/.dotnet/src/Generated/Files.cs +++ b/.dotnet/src/Generated/Files.cs @@ -6,13 +6,13 @@ using System.Text; using System.Threading; using System.Threading.Tasks; -using OpenAI.Models; +using OpenAI.Internal.Models; -namespace OpenAI +namespace OpenAI.Internal { // Data plane generated sub-client. /// The Files sub-client. - public partial class Files + internal partial class Files { private const string AuthorizationHeader = "Authorization"; private readonly ApiKeyCredential _credential; @@ -656,4 +656,3 @@ internal PipelineMessage CreateDownloadFileRequest(string fileId, RequestOptions private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } } - diff --git a/.dotnet/src/Generated/FineTuning.cs b/.dotnet/src/Generated/FineTuning.cs index 827f7f18f..f8c408c7d 100644 --- a/.dotnet/src/Generated/FineTuning.cs +++ b/.dotnet/src/Generated/FineTuning.cs @@ -6,13 +6,13 @@ using System.Text; using System.Threading; using System.Threading.Tasks; -using OpenAI.Models; +using OpenAI.Internal.Models; -namespace OpenAI +namespace OpenAI.Internal { // Data plane generated sub-client. /// The FineTuning sub-client. - public partial class FineTuning + internal partial class FineTuning { private const string AuthorizationHeader = "Authorization"; private readonly ApiKeyCredential _credential; @@ -702,4 +702,3 @@ internal PipelineMessage CreateGetFineTuningEventsRequest(string fineTuningJobId private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } } - diff --git a/.dotnet/src/Generated/Images.cs b/.dotnet/src/Generated/Images.cs index fb99a3319..e5d910962 100644 --- a/.dotnet/src/Generated/Images.cs +++ b/.dotnet/src/Generated/Images.cs @@ -6,13 +6,13 @@ using System.Text; using System.Threading; using System.Threading.Tasks; -using OpenAI.Models; +using OpenAI.Internal.Models; -namespace OpenAI +namespace OpenAI.Internal { // Data plane generated sub-client. /// The Images sub-client. - public partial class Images + internal partial class Images { private const string AuthorizationHeader = "Authorization"; private readonly ApiKeyCredential _credential; @@ -387,4 +387,3 @@ internal PipelineMessage CreateCreateImageVariationRequest(BinaryContent content private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } } - diff --git a/.dotnet/src/Generated/Messages.cs b/.dotnet/src/Generated/Messages.cs index f3abd4bad..32579b8bd 100644 --- a/.dotnet/src/Generated/Messages.cs +++ b/.dotnet/src/Generated/Messages.cs @@ -6,13 +6,13 @@ using System.Text; using System.Threading; using System.Threading.Tasks; -using OpenAI.Models; +using OpenAI.Internal.Models; -namespace OpenAI +namespace OpenAI.Internal { // Data plane generated sub-client. /// The Messages sub-client. - public partial class Messages + internal partial class Messages { private const string AuthorizationHeader = "Authorization"; private readonly ApiKeyCredential _credential; @@ -1084,4 +1084,3 @@ internal PipelineMessage CreateGetMessageFileRequest(string threadId, string mes private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } } - diff --git a/.dotnet/src/Generated/Models/AssistantFileObject.Serialization.cs b/.dotnet/src/Generated/Models/AssistantFileObject.Serialization.cs index c2b95887f..eebbf2b6f 100644 --- a/.dotnet/src/Generated/Models/AssistantFileObject.Serialization.cs +++ b/.dotnet/src/Generated/Models/AssistantFileObject.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class AssistantFileObject : IJsonModel + internal partial class AssistantFileObject : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -142,4 +142,3 @@ internal static AssistantFileObject FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/AssistantFileObject.cs b/.dotnet/src/Generated/Models/AssistantFileObject.cs index ee5c47c27..bb30fc168 100644 --- a/.dotnet/src/Generated/Models/AssistantFileObject.cs +++ b/.dotnet/src/Generated/Models/AssistantFileObject.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// A list of [Files](/docs/api-reference/files) attached to an `assistant`. - public partial class AssistantFileObject + internal partial class AssistantFileObject { /// /// Keeps track of any properties unknown to the library. @@ -87,4 +87,3 @@ internal AssistantFileObject() public string AssistantId { get; } } } - diff --git a/.dotnet/src/Generated/Models/AssistantFileObjectObject.cs b/.dotnet/src/Generated/Models/AssistantFileObjectObject.cs index 7883e84f3..08816d6a5 100644 --- a/.dotnet/src/Generated/Models/AssistantFileObjectObject.cs +++ b/.dotnet/src/Generated/Models/AssistantFileObjectObject.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The AssistantFileObject_object. - public readonly partial struct AssistantFileObjectObject : IEquatable + internal readonly partial struct AssistantFileObjectObject : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public AssistantFileObjectObject(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/AssistantObject.Serialization.cs b/.dotnet/src/Generated/Models/AssistantObject.Serialization.cs index c4e51bcf9..962670141 100644 --- a/.dotnet/src/Generated/Models/AssistantObject.Serialization.cs +++ b/.dotnet/src/Generated/Models/AssistantObject.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class AssistantObject : IJsonModel + internal partial class AssistantObject : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -288,4 +288,3 @@ internal static AssistantObject FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/AssistantObject.cs b/.dotnet/src/Generated/Models/AssistantObject.cs index 3b6d4b131..d3b5e5e20 100644 --- a/.dotnet/src/Generated/Models/AssistantObject.cs +++ b/.dotnet/src/Generated/Models/AssistantObject.cs @@ -5,10 +5,10 @@ using System.Collections.Generic; using System.Linq; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Represents an `assistant` that can call the model and use tools. - public partial class AssistantObject + internal partial class AssistantObject { /// /// Keeps track of any properties unknown to the library. @@ -198,4 +198,3 @@ internal AssistantObject() public IReadOnlyDictionary Metadata { get; } } } - diff --git a/.dotnet/src/Generated/Models/AssistantObjectObject.cs b/.dotnet/src/Generated/Models/AssistantObjectObject.cs index f7a7cb21d..64e27426c 100644 --- a/.dotnet/src/Generated/Models/AssistantObjectObject.cs +++ b/.dotnet/src/Generated/Models/AssistantObjectObject.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The AssistantObject_object. - public readonly partial struct AssistantObjectObject : IEquatable + internal readonly partial struct AssistantObjectObject : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public AssistantObjectObject(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/AudioSegment.Serialization.cs b/.dotnet/src/Generated/Models/AudioSegment.Serialization.cs index 5b4ef9b7a..e1641e3c0 100644 --- a/.dotnet/src/Generated/Models/AudioSegment.Serialization.cs +++ b/.dotnet/src/Generated/Models/AudioSegment.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class AudioSegment : IJsonModel + internal partial class AudioSegment : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -200,4 +200,3 @@ internal static AudioSegment FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/AudioSegment.cs b/.dotnet/src/Generated/Models/AudioSegment.cs index f25b206ff..0ee02b78b 100644 --- a/.dotnet/src/Generated/Models/AudioSegment.cs +++ b/.dotnet/src/Generated/Models/AudioSegment.cs @@ -5,10 +5,10 @@ using System.Collections.Generic; using System.Linq; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The AudioSegment. - public partial class AudioSegment + internal partial class AudioSegment { /// /// Keeps track of any properties unknown to the library. @@ -143,4 +143,3 @@ internal AudioSegment() public double NoSpeechProb { get; } } } - diff --git a/.dotnet/src/Generated/Models/ChatCompletionFunctionCallOption.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionFunctionCallOption.Serialization.cs index d82b8d8e5..639a83a94 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionFunctionCallOption.Serialization.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionFunctionCallOption.Serialization.cs @@ -6,7 +6,7 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { internal partial class ChatCompletionFunctionCallOption : IJsonModel { @@ -118,4 +118,3 @@ internal static ChatCompletionFunctionCallOption FromResponse(PipelineResponse r } } } - diff --git a/.dotnet/src/Generated/Models/ChatCompletionFunctionCallOption.cs b/.dotnet/src/Generated/Models/ChatCompletionFunctionCallOption.cs index 40c76aacd..148eb7588 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionFunctionCallOption.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionFunctionCallOption.cs @@ -4,7 +4,7 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// /// Specifying a particular function via `{"name": "my_function"}` forces the model to call that @@ -72,4 +72,3 @@ internal ChatCompletionFunctionCallOption() public string Name { get; } } } - diff --git a/.dotnet/src/Generated/Models/ChatCompletionFunctions.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionFunctions.Serialization.cs index 5558f49c3..af8f82426 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionFunctions.Serialization.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionFunctions.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class ChatCompletionFunctions : IJsonModel + internal partial class ChatCompletionFunctions : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -144,4 +144,3 @@ internal static ChatCompletionFunctions FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/ChatCompletionFunctions.cs b/.dotnet/src/Generated/Models/ChatCompletionFunctions.cs index 7055cc173..a8f409a52 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionFunctions.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionFunctions.cs @@ -4,11 +4,11 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The ChatCompletionFunctions. [Obsolete("deprecated")] - public partial class ChatCompletionFunctions + internal partial class ChatCompletionFunctions { /// /// Keeps track of any properties unknown to the library. @@ -93,4 +93,3 @@ internal ChatCompletionFunctions() public FunctionParameters Parameters { get; set; } } } - diff --git a/.dotnet/src/Generated/Models/ChatCompletionMessageToolCall.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCall.Serialization.cs index 4806e429e..71b3260cc 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionMessageToolCall.Serialization.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCall.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class ChatCompletionMessageToolCall : IJsonModel + internal partial class ChatCompletionMessageToolCall : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -134,4 +134,3 @@ internal static ChatCompletionMessageToolCall FromResponse(PipelineResponse resp } } } - diff --git a/.dotnet/src/Generated/Models/ChatCompletionMessageToolCall.cs b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCall.cs index 1d5bbc882..dc5ffd9d0 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionMessageToolCall.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCall.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The ChatCompletionMessageToolCall. - public partial class ChatCompletionMessageToolCall + internal partial class ChatCompletionMessageToolCall { /// /// Keeps track of any properties unknown to the library. @@ -81,4 +81,3 @@ internal ChatCompletionMessageToolCall() public ChatCompletionMessageToolCallFunction Function { get; set; } } } - diff --git a/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallFunction.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallFunction.Serialization.cs index 57c8334c1..e9a8bf6c1 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallFunction.Serialization.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallFunction.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class ChatCompletionMessageToolCallFunction : IJsonModel + internal partial class ChatCompletionMessageToolCallFunction : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -126,4 +126,3 @@ internal static ChatCompletionMessageToolCallFunction FromResponse(PipelineRespo } } } - diff --git a/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallFunction.cs b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallFunction.cs index 892e74010..fa5776ea0 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallFunction.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallFunction.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The ChatCompletionMessageToolCallFunction. - public partial class ChatCompletionMessageToolCallFunction + internal partial class ChatCompletionMessageToolCallFunction { /// /// Keeps track of any properties unknown to the library. @@ -88,4 +88,3 @@ internal ChatCompletionMessageToolCallFunction() public string Arguments { get; set; } } } - diff --git a/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallType.cs b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallType.cs index bdf80b599..3da609cee 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallType.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallType.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The ChatCompletionMessageToolCall_type. - public readonly partial struct ChatCompletionMessageToolCallType : IEquatable + internal readonly partial struct ChatCompletionMessageToolCallType : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public ChatCompletionMessageToolCallType(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoice.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoice.Serialization.cs index a5cb5af93..d4a77ea78 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoice.Serialization.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoice.Serialization.cs @@ -6,7 +6,7 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { internal partial class ChatCompletionNamedToolChoice : IJsonModel { @@ -126,4 +126,3 @@ internal static ChatCompletionNamedToolChoice FromResponse(PipelineResponse resp } } } - diff --git a/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoice.cs b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoice.cs index 63c912acc..149097e23 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoice.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoice.cs @@ -4,7 +4,7 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Specifies a tool the model should use. Use to force the model to call a specific function. internal partial class ChatCompletionNamedToolChoice @@ -74,4 +74,3 @@ internal ChatCompletionNamedToolChoice() public ChatCompletionNamedToolChoiceFunction Function { get; } } } - diff --git a/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceFunction.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceFunction.Serialization.cs index cb372631e..b85f015d9 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceFunction.Serialization.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceFunction.Serialization.cs @@ -6,7 +6,7 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { internal partial class ChatCompletionNamedToolChoiceFunction : IJsonModel { @@ -118,4 +118,3 @@ internal static ChatCompletionNamedToolChoiceFunction FromResponse(PipelineRespo } } } - diff --git a/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceFunction.cs b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceFunction.cs index 44be066fd..520def419 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceFunction.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceFunction.cs @@ -4,7 +4,7 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The ChatCompletionNamedToolChoiceFunction. internal partial class ChatCompletionNamedToolChoiceFunction @@ -69,4 +69,3 @@ internal ChatCompletionNamedToolChoiceFunction() public string Name { get; } } } - diff --git a/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceType.cs b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceType.cs index 166411eac..0765fe138 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceType.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceType.cs @@ -3,7 +3,7 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The ChatCompletionNamedToolChoice_type. internal readonly partial struct ChatCompletionNamedToolChoiceType : IEquatable @@ -41,4 +41,3 @@ public ChatCompletionNamedToolChoiceType(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/ChatCompletionResponseMessage.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionResponseMessage.Serialization.cs index b819876dc..b2b4ee706 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionResponseMessage.Serialization.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionResponseMessage.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class ChatCompletionResponseMessage : IJsonModel + internal partial class ChatCompletionResponseMessage : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -178,4 +178,3 @@ internal static ChatCompletionResponseMessage FromResponse(PipelineResponse resp } } } - diff --git a/.dotnet/src/Generated/Models/ChatCompletionResponseMessage.cs b/.dotnet/src/Generated/Models/ChatCompletionResponseMessage.cs index a27e26e91..27bcffd13 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionResponseMessage.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionResponseMessage.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The ChatCompletionResponseMessage. - public partial class ChatCompletionResponseMessage + internal partial class ChatCompletionResponseMessage { /// /// Keeps track of any properties unknown to the library. @@ -80,4 +80,3 @@ internal ChatCompletionResponseMessage() public ChatCompletionResponseMessageFunctionCall FunctionCall { get; } } } - diff --git a/.dotnet/src/Generated/Models/ChatCompletionResponseMessageFunctionCall.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionResponseMessageFunctionCall.Serialization.cs index 6fd3d045a..5b55a6990 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionResponseMessageFunctionCall.Serialization.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionResponseMessageFunctionCall.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class ChatCompletionResponseMessageFunctionCall : IJsonModel + internal partial class ChatCompletionResponseMessageFunctionCall : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -126,4 +126,3 @@ internal static ChatCompletionResponseMessageFunctionCall FromResponse(PipelineR } } } - diff --git a/.dotnet/src/Generated/Models/ChatCompletionResponseMessageFunctionCall.cs b/.dotnet/src/Generated/Models/ChatCompletionResponseMessageFunctionCall.cs index 8016f2af6..e788deaeb 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionResponseMessageFunctionCall.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionResponseMessageFunctionCall.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The ChatCompletionResponseMessageFunctionCall. - public partial class ChatCompletionResponseMessageFunctionCall + internal partial class ChatCompletionResponseMessageFunctionCall { /// /// Keeps track of any properties unknown to the library. @@ -88,4 +88,3 @@ internal ChatCompletionResponseMessageFunctionCall() public string Name { get; } } } - diff --git a/.dotnet/src/Generated/Models/ChatCompletionResponseMessageRole.cs b/.dotnet/src/Generated/Models/ChatCompletionResponseMessageRole.cs index 08bcd42a2..5ff79558a 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionResponseMessageRole.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionResponseMessageRole.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The ChatCompletionResponseMessage_role. - public readonly partial struct ChatCompletionResponseMessageRole : IEquatable + internal readonly partial struct ChatCompletionResponseMessageRole : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public ChatCompletionResponseMessageRole(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/ChatCompletionTokenLogprob.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionTokenLogprob.Serialization.cs index b60cb9ec5..cbb9356bb 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionTokenLogprob.Serialization.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionTokenLogprob.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class ChatCompletionTokenLogprob : IJsonModel + internal partial class ChatCompletionTokenLogprob : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -174,4 +174,3 @@ internal static ChatCompletionTokenLogprob FromResponse(PipelineResponse respons } } } - diff --git a/.dotnet/src/Generated/Models/ChatCompletionTokenLogprob.cs b/.dotnet/src/Generated/Models/ChatCompletionTokenLogprob.cs index bd303c7a2..0a7ae0ba5 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionTokenLogprob.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionTokenLogprob.cs @@ -5,10 +5,10 @@ using System.Collections.Generic; using System.Linq; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The ChatCompletionTokenLogprob. - public partial class ChatCompletionTokenLogprob + internal partial class ChatCompletionTokenLogprob { /// /// Keeps track of any properties unknown to the library. @@ -113,4 +113,3 @@ internal ChatCompletionTokenLogprob() public IReadOnlyList TopLogprobs { get; } } } - diff --git a/.dotnet/src/Generated/Models/ChatCompletionTokenLogprobTopLogprob.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionTokenLogprobTopLogprob.Serialization.cs index b71b5ace6..7f84c1bca 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionTokenLogprobTopLogprob.Serialization.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionTokenLogprobTopLogprob.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class ChatCompletionTokenLogprobTopLogprob : IJsonModel + internal partial class ChatCompletionTokenLogprobTopLogprob : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -156,4 +156,3 @@ internal static ChatCompletionTokenLogprobTopLogprob FromResponse(PipelineRespon } } } - diff --git a/.dotnet/src/Generated/Models/ChatCompletionTokenLogprobTopLogprob.cs b/.dotnet/src/Generated/Models/ChatCompletionTokenLogprobTopLogprob.cs index 10e36b3a6..74a42012a 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionTokenLogprobTopLogprob.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionTokenLogprobTopLogprob.cs @@ -5,10 +5,10 @@ using System.Collections.Generic; using System.Linq; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The ChatCompletionTokenLogprobTopLogprob. - public partial class ChatCompletionTokenLogprobTopLogprob + internal partial class ChatCompletionTokenLogprobTopLogprob { /// /// Keeps track of any properties unknown to the library. @@ -97,4 +97,3 @@ internal ChatCompletionTokenLogprobTopLogprob() public IReadOnlyList Bytes { get; } } } - diff --git a/.dotnet/src/Generated/Models/ChatCompletionTool.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionTool.Serialization.cs index b5913cfcd..cb99d68c8 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionTool.Serialization.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionTool.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class ChatCompletionTool : IJsonModel + internal partial class ChatCompletionTool : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -126,4 +126,3 @@ internal static ChatCompletionTool FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/ChatCompletionTool.cs b/.dotnet/src/Generated/Models/ChatCompletionTool.cs index 28feedefd..8edef4914 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionTool.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionTool.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The ChatCompletionTool. - public partial class ChatCompletionTool + internal partial class ChatCompletionTool { /// /// Keeps track of any properties unknown to the library. @@ -74,4 +74,3 @@ internal ChatCompletionTool() public FunctionObject Function { get; } } } - diff --git a/.dotnet/src/Generated/Models/ChatCompletionToolType.cs b/.dotnet/src/Generated/Models/ChatCompletionToolType.cs index b1dae6880..af31e3c02 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionToolType.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionToolType.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The ChatCompletionTool_type. - public readonly partial struct ChatCompletionToolType : IEquatable + internal readonly partial struct ChatCompletionToolType : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public ChatCompletionToolType(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/CompletionUsage.Serialization.cs b/.dotnet/src/Generated/Models/CompletionUsage.Serialization.cs index 5f4745e70..9d53d9f78 100644 --- a/.dotnet/src/Generated/Models/CompletionUsage.Serialization.cs +++ b/.dotnet/src/Generated/Models/CompletionUsage.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class CompletionUsage : IJsonModel + internal partial class CompletionUsage : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -134,4 +134,3 @@ internal static CompletionUsage FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/CompletionUsage.cs b/.dotnet/src/Generated/Models/CompletionUsage.cs index 440f8ef74..64c5aa6ec 100644 --- a/.dotnet/src/Generated/Models/CompletionUsage.cs +++ b/.dotnet/src/Generated/Models/CompletionUsage.cs @@ -3,10 +3,10 @@ using System; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Usage statistics for the completion request. - public partial class CompletionUsage + internal partial class CompletionUsage { /// /// Keeps track of any properties unknown to the library. @@ -77,4 +77,3 @@ internal CompletionUsage() public long TotalTokens { get; } } } - diff --git a/.dotnet/src/Generated/Models/CreateAssistantFileRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateAssistantFileRequest.Serialization.cs index e4d8ef38b..e3a752244 100644 --- a/.dotnet/src/Generated/Models/CreateAssistantFileRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateAssistantFileRequest.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class CreateAssistantFileRequest : IJsonModel + internal partial class CreateAssistantFileRequest : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -118,4 +118,3 @@ internal static CreateAssistantFileRequest FromResponse(PipelineResponse respons } } } - diff --git a/.dotnet/src/Generated/Models/CreateAssistantFileRequest.cs b/.dotnet/src/Generated/Models/CreateAssistantFileRequest.cs index 7a7c067c9..be3485d43 100644 --- a/.dotnet/src/Generated/Models/CreateAssistantFileRequest.cs +++ b/.dotnet/src/Generated/Models/CreateAssistantFileRequest.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The CreateAssistantFileRequest. - public partial class CreateAssistantFileRequest + internal partial class CreateAssistantFileRequest { /// /// Keeps track of any properties unknown to the library. @@ -78,4 +78,3 @@ internal CreateAssistantFileRequest() public string FileId { get; } } } - diff --git a/.dotnet/src/Generated/Models/CreateAssistantRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateAssistantRequest.Serialization.cs index 6f034bc1e..6c7a3d0b8 100644 --- a/.dotnet/src/Generated/Models/CreateAssistantRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateAssistantRequest.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class CreateAssistantRequest : IJsonModel + internal partial class CreateAssistantRequest : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -289,4 +289,3 @@ internal static CreateAssistantRequest FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/CreateAssistantRequest.cs b/.dotnet/src/Generated/Models/CreateAssistantRequest.cs index f99c14f37..6e08eb6ac 100644 --- a/.dotnet/src/Generated/Models/CreateAssistantRequest.cs +++ b/.dotnet/src/Generated/Models/CreateAssistantRequest.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The CreateAssistantRequest. - public partial class CreateAssistantRequest + internal partial class CreateAssistantRequest { /// /// Keeps track of any properties unknown to the library. @@ -157,4 +157,3 @@ internal CreateAssistantRequest() public IDictionary Metadata { get; set; } } } - diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateChatCompletionRequest.Serialization.cs index 4310faf5b..dbd787e7b 100644 --- a/.dotnet/src/Generated/Models/CreateChatCompletionRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateChatCompletionRequest.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class CreateChatCompletionRequest : IJsonModel + internal partial class CreateChatCompletionRequest : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -568,4 +568,3 @@ internal static CreateChatCompletionRequest FromResponse(PipelineResponse respon } } } - diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionRequest.cs b/.dotnet/src/Generated/Models/CreateChatCompletionRequest.cs index 80c7d9be5..547e318d9 100644 --- a/.dotnet/src/Generated/Models/CreateChatCompletionRequest.cs +++ b/.dotnet/src/Generated/Models/CreateChatCompletionRequest.cs @@ -5,10 +5,10 @@ using System.Collections.Generic; using System.Linq; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The CreateChatCompletionRequest. - public partial class CreateChatCompletionRequest + internal partial class CreateChatCompletionRequest { /// /// Keeps track of any properties unknown to the library. @@ -508,4 +508,3 @@ internal CreateChatCompletionRequest() public IList Functions { get; } } } - diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionRequestModel.cs b/.dotnet/src/Generated/Models/CreateChatCompletionRequestModel.cs index 5110350bd..c15dcb757 100644 --- a/.dotnet/src/Generated/Models/CreateChatCompletionRequestModel.cs +++ b/.dotnet/src/Generated/Models/CreateChatCompletionRequestModel.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Enum for model in CreateChatCompletionRequest. - public readonly partial struct CreateChatCompletionRequestModel : IEquatable + internal readonly partial struct CreateChatCompletionRequestModel : IEquatable { private readonly string _value; @@ -86,4 +86,3 @@ public CreateChatCompletionRequestModel(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormat.Serialization.cs b/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormat.Serialization.cs index 8a13529c4..fd284f215 100644 --- a/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormat.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormat.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class CreateChatCompletionRequestResponseFormat : IJsonModel + internal partial class CreateChatCompletionRequestResponseFormat : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -125,4 +125,3 @@ internal static CreateChatCompletionRequestResponseFormat FromResponse(PipelineR } } } - diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormat.cs b/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormat.cs index a1dd64235..82322b0d6 100644 --- a/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormat.cs +++ b/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormat.cs @@ -3,10 +3,10 @@ using System; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The CreateChatCompletionRequestResponseFormat. - public partial class CreateChatCompletionRequestResponseFormat + internal partial class CreateChatCompletionRequestResponseFormat { /// /// Keeps track of any properties unknown to the library. @@ -58,4 +58,3 @@ internal CreateChatCompletionRequestResponseFormat(CreateChatCompletionRequestRe public CreateChatCompletionRequestResponseFormatType? Type { get; set; } } } - diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormatType.cs b/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormatType.cs index 894f322de..8d305ab28 100644 --- a/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormatType.cs +++ b/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormatType.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Enum for type in CreateChatCompletionRequestResponseFormat. - public readonly partial struct CreateChatCompletionRequestResponseFormatType : IEquatable + internal readonly partial struct CreateChatCompletionRequestResponseFormatType : IEquatable { private readonly string _value; @@ -44,4 +44,3 @@ public CreateChatCompletionRequestResponseFormatType(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionResponse.Serialization.cs b/.dotnet/src/Generated/Models/CreateChatCompletionResponse.Serialization.cs index f9c94d1dc..387a43069 100644 --- a/.dotnet/src/Generated/Models/CreateChatCompletionResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateChatCompletionResponse.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class CreateChatCompletionResponse : IJsonModel + internal partial class CreateChatCompletionResponse : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -186,4 +186,3 @@ internal static CreateChatCompletionResponse FromResponse(PipelineResponse respo } } } - diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionResponse.cs b/.dotnet/src/Generated/Models/CreateChatCompletionResponse.cs index 6476530ae..1f44df4c1 100644 --- a/.dotnet/src/Generated/Models/CreateChatCompletionResponse.cs +++ b/.dotnet/src/Generated/Models/CreateChatCompletionResponse.cs @@ -5,10 +5,10 @@ using System.Collections.Generic; using System.Linq; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Represents a chat completion response returned by model, based on the provided input. - public partial class CreateChatCompletionResponse + internal partial class CreateChatCompletionResponse { /// /// Keeps track of any properties unknown to the library. @@ -113,4 +113,3 @@ internal CreateChatCompletionResponse() public CompletionUsage Usage { get; } } } - diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoice.Serialization.cs b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoice.Serialization.cs index 5cd85183e..850a633f8 100644 --- a/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoice.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoice.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class CreateChatCompletionResponseChoice : IJsonModel + internal partial class CreateChatCompletionResponseChoice : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -154,4 +154,3 @@ internal static CreateChatCompletionResponseChoice FromResponse(PipelineResponse } } } - diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoice.cs b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoice.cs index 17342fc9d..5d127075b 100644 --- a/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoice.cs +++ b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoice.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The CreateChatCompletionResponseChoice. - public partial class CreateChatCompletionResponseChoice + internal partial class CreateChatCompletionResponseChoice { /// /// Keeps track of any properties unknown to the library. @@ -105,4 +105,3 @@ internal CreateChatCompletionResponseChoice() public CreateChatCompletionResponseChoiceLogprobs Logprobs { get; } } } - diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceFinishReason.cs b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceFinishReason.cs index e1e180f28..817df9578 100644 --- a/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceFinishReason.cs +++ b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceFinishReason.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Enum for finish_reason in CreateChatCompletionResponseChoice. - public readonly partial struct CreateChatCompletionResponseChoiceFinishReason : IEquatable + internal readonly partial struct CreateChatCompletionResponseChoiceFinishReason : IEquatable { private readonly string _value; @@ -53,4 +53,3 @@ public CreateChatCompletionResponseChoiceFinishReason(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceLogprobs.Serialization.cs b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceLogprobs.Serialization.cs index 6e7b9371a..26c056edb 100644 --- a/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceLogprobs.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceLogprobs.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class CreateChatCompletionResponseChoiceLogprobs : IJsonModel + internal partial class CreateChatCompletionResponseChoiceLogprobs : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -140,4 +140,3 @@ internal static CreateChatCompletionResponseChoiceLogprobs FromResponse(Pipeline } } } - diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceLogprobs.cs b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceLogprobs.cs index db7e2ef1d..bd570efb9 100644 --- a/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceLogprobs.cs +++ b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceLogprobs.cs @@ -4,10 +4,10 @@ using System.Collections.Generic; using System.Linq; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The CreateChatCompletionResponseChoiceLogprobs. - public partial class CreateChatCompletionResponseChoiceLogprobs + internal partial class CreateChatCompletionResponseChoiceLogprobs { /// /// Keeps track of any properties unknown to the library. @@ -66,4 +66,3 @@ internal CreateChatCompletionResponseChoiceLogprobs() public IReadOnlyList Content { get; } } } - diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionResponseObject.cs b/.dotnet/src/Generated/Models/CreateChatCompletionResponseObject.cs index e95f7fff6..622b7b56a 100644 --- a/.dotnet/src/Generated/Models/CreateChatCompletionResponseObject.cs +++ b/.dotnet/src/Generated/Models/CreateChatCompletionResponseObject.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The CreateChatCompletionResponse_object. - public readonly partial struct CreateChatCompletionResponseObject : IEquatable + internal readonly partial struct CreateChatCompletionResponseObject : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public CreateChatCompletionResponseObject(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/CreateCompletionRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateCompletionRequest.Serialization.cs index e729592b7..f5c3b1bb7 100644 --- a/.dotnet/src/Generated/Models/CreateCompletionRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateCompletionRequest.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class CreateCompletionRequest : IJsonModel + internal partial class CreateCompletionRequest : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -495,4 +495,3 @@ internal static CreateCompletionRequest FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/CreateCompletionRequest.cs b/.dotnet/src/Generated/Models/CreateCompletionRequest.cs index 65ce00804..4c439168a 100644 --- a/.dotnet/src/Generated/Models/CreateCompletionRequest.cs +++ b/.dotnet/src/Generated/Models/CreateCompletionRequest.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The CreateCompletionRequest. - public partial class CreateCompletionRequest + internal partial class CreateCompletionRequest { /// /// Keeps track of any properties unknown to the library. @@ -397,4 +397,3 @@ internal CreateCompletionRequest() public string User { get; set; } } } - diff --git a/.dotnet/src/Generated/Models/CreateCompletionRequestModel.cs b/.dotnet/src/Generated/Models/CreateCompletionRequestModel.cs index f0afae0c4..784f43da8 100644 --- a/.dotnet/src/Generated/Models/CreateCompletionRequestModel.cs +++ b/.dotnet/src/Generated/Models/CreateCompletionRequestModel.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Enum for model in CreateCompletionRequest. - public readonly partial struct CreateCompletionRequestModel : IEquatable + internal readonly partial struct CreateCompletionRequestModel : IEquatable { private readonly string _value; @@ -47,4 +47,3 @@ public CreateCompletionRequestModel(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/CreateCompletionResponse.Serialization.cs b/.dotnet/src/Generated/Models/CreateCompletionResponse.Serialization.cs index 17ea98286..2a8633f00 100644 --- a/.dotnet/src/Generated/Models/CreateCompletionResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateCompletionResponse.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class CreateCompletionResponse : IJsonModel + internal partial class CreateCompletionResponse : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -186,4 +186,3 @@ internal static CreateCompletionResponse FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/CreateCompletionResponse.cs b/.dotnet/src/Generated/Models/CreateCompletionResponse.cs index 944f32b56..a0bb30624 100644 --- a/.dotnet/src/Generated/Models/CreateCompletionResponse.cs +++ b/.dotnet/src/Generated/Models/CreateCompletionResponse.cs @@ -5,13 +5,13 @@ using System.Collections.Generic; using System.Linq; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// /// Represents a completion response from the API. Note: both the streamed and non-streamed response /// objects share the same shape (unlike the chat endpoint). /// - public partial class CreateCompletionResponse + internal partial class CreateCompletionResponse { /// /// Keeps track of any properties unknown to the library. @@ -116,4 +116,3 @@ internal CreateCompletionResponse() public CompletionUsage Usage { get; } } } - diff --git a/.dotnet/src/Generated/Models/CreateCompletionResponseChoice.Serialization.cs b/.dotnet/src/Generated/Models/CreateCompletionResponseChoice.Serialization.cs index 0f9206ba2..b68a04694 100644 --- a/.dotnet/src/Generated/Models/CreateCompletionResponseChoice.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateCompletionResponseChoice.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class CreateCompletionResponseChoice : IJsonModel + internal partial class CreateCompletionResponseChoice : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -154,4 +154,3 @@ internal static CreateCompletionResponseChoice FromResponse(PipelineResponse res } } } - diff --git a/.dotnet/src/Generated/Models/CreateCompletionResponseChoice.cs b/.dotnet/src/Generated/Models/CreateCompletionResponseChoice.cs index a0f883635..1cf6b8df1 100644 --- a/.dotnet/src/Generated/Models/CreateCompletionResponseChoice.cs +++ b/.dotnet/src/Generated/Models/CreateCompletionResponseChoice.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The CreateCompletionResponseChoice. - public partial class CreateCompletionResponseChoice + internal partial class CreateCompletionResponseChoice { /// /// Keeps track of any properties unknown to the library. @@ -105,4 +105,3 @@ internal CreateCompletionResponseChoice() public CreateCompletionResponseChoiceFinishReason FinishReason { get; } } } - diff --git a/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceFinishReason.cs b/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceFinishReason.cs index d4ee35578..fc4a928ce 100644 --- a/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceFinishReason.cs +++ b/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceFinishReason.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Enum for finish_reason in CreateCompletionResponseChoice. - public readonly partial struct CreateCompletionResponseChoiceFinishReason : IEquatable + internal readonly partial struct CreateCompletionResponseChoiceFinishReason : IEquatable { private readonly string _value; @@ -47,4 +47,3 @@ public CreateCompletionResponseChoiceFinishReason(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceLogprobs.Serialization.cs b/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceLogprobs.Serialization.cs index b3ba1559f..3658a0aab 100644 --- a/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceLogprobs.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceLogprobs.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class CreateCompletionResponseChoiceLogprobs : IJsonModel + internal partial class CreateCompletionResponseChoiceLogprobs : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -205,4 +205,3 @@ internal static CreateCompletionResponseChoiceLogprobs FromResponse(PipelineResp } } } - diff --git a/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceLogprobs.cs b/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceLogprobs.cs index 598ec5916..be7a3a78c 100644 --- a/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceLogprobs.cs +++ b/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceLogprobs.cs @@ -5,10 +5,10 @@ using System.Collections.Generic; using System.Linq; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The CreateCompletionResponseChoiceLogprobs. - public partial class CreateCompletionResponseChoiceLogprobs + internal partial class CreateCompletionResponseChoiceLogprobs { /// /// Keeps track of any properties unknown to the library. @@ -91,4 +91,3 @@ internal CreateCompletionResponseChoiceLogprobs() public IReadOnlyList TextOffset { get; } } } - diff --git a/.dotnet/src/Generated/Models/CreateCompletionResponseObject.cs b/.dotnet/src/Generated/Models/CreateCompletionResponseObject.cs index 24c670201..00ce256fc 100644 --- a/.dotnet/src/Generated/Models/CreateCompletionResponseObject.cs +++ b/.dotnet/src/Generated/Models/CreateCompletionResponseObject.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The CreateCompletionResponse_object. - public readonly partial struct CreateCompletionResponseObject : IEquatable + internal readonly partial struct CreateCompletionResponseObject : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public CreateCompletionResponseObject(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateEmbeddingRequest.Serialization.cs index 7a8ff235b..1160d6206 100644 --- a/.dotnet/src/Generated/Models/CreateEmbeddingRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateEmbeddingRequest.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class CreateEmbeddingRequest : IJsonModel + internal partial class CreateEmbeddingRequest : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -174,4 +174,3 @@ internal static CreateEmbeddingRequest FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingRequest.cs b/.dotnet/src/Generated/Models/CreateEmbeddingRequest.cs index c2a286d4c..7a649c374 100644 --- a/.dotnet/src/Generated/Models/CreateEmbeddingRequest.cs +++ b/.dotnet/src/Generated/Models/CreateEmbeddingRequest.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The CreateEmbeddingRequest. - public partial class CreateEmbeddingRequest + internal partial class CreateEmbeddingRequest { /// /// Keeps track of any properties unknown to the library. @@ -182,4 +182,3 @@ internal CreateEmbeddingRequest() public string User { get; set; } } } - diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingRequestEncodingFormat.cs b/.dotnet/src/Generated/Models/CreateEmbeddingRequestEncodingFormat.cs index e252744e4..18264f565 100644 --- a/.dotnet/src/Generated/Models/CreateEmbeddingRequestEncodingFormat.cs +++ b/.dotnet/src/Generated/Models/CreateEmbeddingRequestEncodingFormat.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Enum for encoding_format in CreateEmbeddingRequest. - public readonly partial struct CreateEmbeddingRequestEncodingFormat : IEquatable + internal readonly partial struct CreateEmbeddingRequestEncodingFormat : IEquatable { private readonly string _value; @@ -44,4 +44,3 @@ public CreateEmbeddingRequestEncodingFormat(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingRequestModel.cs b/.dotnet/src/Generated/Models/CreateEmbeddingRequestModel.cs index 628de9b7b..8570f4947 100644 --- a/.dotnet/src/Generated/Models/CreateEmbeddingRequestModel.cs +++ b/.dotnet/src/Generated/Models/CreateEmbeddingRequestModel.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Enum for model in CreateEmbeddingRequest. - public readonly partial struct CreateEmbeddingRequestModel : IEquatable + internal readonly partial struct CreateEmbeddingRequestModel : IEquatable { private readonly string _value; @@ -47,4 +47,3 @@ public CreateEmbeddingRequestModel(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingResponse.Serialization.cs b/.dotnet/src/Generated/Models/CreateEmbeddingResponse.Serialization.cs index deaf48c02..656eafe81 100644 --- a/.dotnet/src/Generated/Models/CreateEmbeddingResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateEmbeddingResponse.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class CreateEmbeddingResponse : IJsonModel + internal partial class CreateEmbeddingResponse : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -152,4 +152,3 @@ internal static CreateEmbeddingResponse FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingResponse.cs b/.dotnet/src/Generated/Models/CreateEmbeddingResponse.cs index f0695c29a..7d4a885fc 100644 --- a/.dotnet/src/Generated/Models/CreateEmbeddingResponse.cs +++ b/.dotnet/src/Generated/Models/CreateEmbeddingResponse.cs @@ -5,10 +5,10 @@ using System.Collections.Generic; using System.Linq; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The CreateEmbeddingResponse. - public partial class CreateEmbeddingResponse + internal partial class CreateEmbeddingResponse { /// /// Keeps track of any properties unknown to the library. @@ -89,4 +89,3 @@ internal CreateEmbeddingResponse() public EmbeddingUsage Usage { get; } } } - diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingResponseObject.cs b/.dotnet/src/Generated/Models/CreateEmbeddingResponseObject.cs index b98b10317..1d7bc7a1a 100644 --- a/.dotnet/src/Generated/Models/CreateEmbeddingResponseObject.cs +++ b/.dotnet/src/Generated/Models/CreateEmbeddingResponseObject.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The CreateEmbeddingResponse_object. - public readonly partial struct CreateEmbeddingResponseObject : IEquatable + internal readonly partial struct CreateEmbeddingResponseObject : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public CreateEmbeddingResponseObject(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/CreateFileRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateFileRequest.Serialization.cs index f7d3ba503..852eb6127 100644 --- a/.dotnet/src/Generated/Models/CreateFileRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateFileRequest.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class CreateFileRequest : IJsonModel + internal partial class CreateFileRequest : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -126,4 +126,3 @@ internal static CreateFileRequest FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/CreateFileRequest.cs b/.dotnet/src/Generated/Models/CreateFileRequest.cs index 9475c30af..6f39101d1 100644 --- a/.dotnet/src/Generated/Models/CreateFileRequest.cs +++ b/.dotnet/src/Generated/Models/CreateFileRequest.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The CreateFileRequest. - public partial class CreateFileRequest + internal partial class CreateFileRequest { /// /// Keeps track of any properties unknown to the library. @@ -105,4 +105,3 @@ internal CreateFileRequest() public CreateFileRequestPurpose Purpose { get; } } } - diff --git a/.dotnet/src/Generated/Models/CreateFileRequestPurpose.cs b/.dotnet/src/Generated/Models/CreateFileRequestPurpose.cs index 51a939757..8dc7f4cd7 100644 --- a/.dotnet/src/Generated/Models/CreateFileRequestPurpose.cs +++ b/.dotnet/src/Generated/Models/CreateFileRequestPurpose.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Enum for purpose in CreateFileRequest. - public readonly partial struct CreateFileRequestPurpose : IEquatable + internal readonly partial struct CreateFileRequestPurpose : IEquatable { private readonly string _value; @@ -44,4 +44,3 @@ public CreateFileRequestPurpose(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/CreateFineTuningJobRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateFineTuningJobRequest.Serialization.cs index 08d63808e..c03035768 100644 --- a/.dotnet/src/Generated/Models/CreateFineTuningJobRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateFineTuningJobRequest.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class CreateFineTuningJobRequest : IJsonModel + internal partial class CreateFineTuningJobRequest : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -187,4 +187,3 @@ internal static CreateFineTuningJobRequest FromResponse(PipelineResponse respons } } } - diff --git a/.dotnet/src/Generated/Models/CreateFineTuningJobRequest.cs b/.dotnet/src/Generated/Models/CreateFineTuningJobRequest.cs index 618e6069d..e4a89072b 100644 --- a/.dotnet/src/Generated/Models/CreateFineTuningJobRequest.cs +++ b/.dotnet/src/Generated/Models/CreateFineTuningJobRequest.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The CreateFineTuningJobRequest. - public partial class CreateFineTuningJobRequest + internal partial class CreateFineTuningJobRequest { /// /// Keeps track of any properties unknown to the library. @@ -155,4 +155,3 @@ internal CreateFineTuningJobRequest() public string Suffix { get; set; } } } - diff --git a/.dotnet/src/Generated/Models/CreateFineTuningJobRequestHyperparameters.Serialization.cs b/.dotnet/src/Generated/Models/CreateFineTuningJobRequestHyperparameters.Serialization.cs index 98b42074d..6cd4a835a 100644 --- a/.dotnet/src/Generated/Models/CreateFineTuningJobRequestHyperparameters.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateFineTuningJobRequestHyperparameters.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class CreateFineTuningJobRequestHyperparameters : IJsonModel + internal partial class CreateFineTuningJobRequestHyperparameters : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -132,4 +132,3 @@ internal static CreateFineTuningJobRequestHyperparameters FromResponse(PipelineR } } } - diff --git a/.dotnet/src/Generated/Models/CreateFineTuningJobRequestHyperparameters.cs b/.dotnet/src/Generated/Models/CreateFineTuningJobRequestHyperparameters.cs index 8094fc378..9c2712095 100644 --- a/.dotnet/src/Generated/Models/CreateFineTuningJobRequestHyperparameters.cs +++ b/.dotnet/src/Generated/Models/CreateFineTuningJobRequestHyperparameters.cs @@ -3,10 +3,10 @@ using System; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The CreateFineTuningJobRequestHyperparameters. - public partial class CreateFineTuningJobRequestHyperparameters + internal partial class CreateFineTuningJobRequestHyperparameters { /// /// Keeps track of any properties unknown to the library. @@ -102,4 +102,3 @@ internal CreateFineTuningJobRequestHyperparameters(BinaryData nEpochs, IDictiona public BinaryData NEpochs { get; set; } } } - diff --git a/.dotnet/src/Generated/Models/CreateFineTuningJobRequestModel.cs b/.dotnet/src/Generated/Models/CreateFineTuningJobRequestModel.cs index e36868cc4..670bdc0f5 100644 --- a/.dotnet/src/Generated/Models/CreateFineTuningJobRequestModel.cs +++ b/.dotnet/src/Generated/Models/CreateFineTuningJobRequestModel.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Enum for model in CreateFineTuningJobRequest. - public readonly partial struct CreateFineTuningJobRequestModel : IEquatable + internal readonly partial struct CreateFineTuningJobRequestModel : IEquatable { private readonly string _value; @@ -47,4 +47,3 @@ public CreateFineTuningJobRequestModel(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/CreateImageEditRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateImageEditRequest.Serialization.cs index 9de7cccac..5da48765a 100644 --- a/.dotnet/src/Generated/Models/CreateImageEditRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateImageEditRequest.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class CreateImageEditRequest : IJsonModel + internal partial class CreateImageEditRequest : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -220,4 +220,3 @@ internal static CreateImageEditRequest FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/CreateImageEditRequest.cs b/.dotnet/src/Generated/Models/CreateImageEditRequest.cs index 5b78b294b..136db1035 100644 --- a/.dotnet/src/Generated/Models/CreateImageEditRequest.cs +++ b/.dotnet/src/Generated/Models/CreateImageEditRequest.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The CreateImageEditRequest. - public partial class CreateImageEditRequest + internal partial class CreateImageEditRequest { /// /// Keeps track of any properties unknown to the library. @@ -149,4 +149,3 @@ internal CreateImageEditRequest() public string User { get; set; } } } - diff --git a/.dotnet/src/Generated/Models/CreateImageEditRequestModel.cs b/.dotnet/src/Generated/Models/CreateImageEditRequestModel.cs index 946936edb..60e9057d2 100644 --- a/.dotnet/src/Generated/Models/CreateImageEditRequestModel.cs +++ b/.dotnet/src/Generated/Models/CreateImageEditRequestModel.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Enum for model in CreateImageEditRequest. - public readonly partial struct CreateImageEditRequestModel : IEquatable + internal readonly partial struct CreateImageEditRequestModel : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public CreateImageEditRequestModel(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/CreateImageEditRequestResponseFormat.cs b/.dotnet/src/Generated/Models/CreateImageEditRequestResponseFormat.cs index b398ca63d..374dccec4 100644 --- a/.dotnet/src/Generated/Models/CreateImageEditRequestResponseFormat.cs +++ b/.dotnet/src/Generated/Models/CreateImageEditRequestResponseFormat.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Enum for response_format in CreateImageEditRequest. - public readonly partial struct CreateImageEditRequestResponseFormat : IEquatable + internal readonly partial struct CreateImageEditRequestResponseFormat : IEquatable { private readonly string _value; @@ -44,4 +44,3 @@ public CreateImageEditRequestResponseFormat(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/CreateImageEditRequestSize.cs b/.dotnet/src/Generated/Models/CreateImageEditRequestSize.cs index 8e644e852..63269c1f2 100644 --- a/.dotnet/src/Generated/Models/CreateImageEditRequestSize.cs +++ b/.dotnet/src/Generated/Models/CreateImageEditRequestSize.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Enum for size in CreateImageEditRequest. - public readonly partial struct CreateImageEditRequestSize : IEquatable + internal readonly partial struct CreateImageEditRequestSize : IEquatable { private readonly string _value; @@ -47,4 +47,3 @@ public CreateImageEditRequestSize(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/CreateImageRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateImageRequest.Serialization.cs index 74b4555fc..b2f3758de 100644 --- a/.dotnet/src/Generated/Models/CreateImageRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateImageRequest.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class CreateImageRequest : IJsonModel + internal partial class CreateImageRequest : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -227,4 +227,3 @@ internal static CreateImageRequest FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/CreateImageRequest.cs b/.dotnet/src/Generated/Models/CreateImageRequest.cs index 0ddf94adb..552a5e8a7 100644 --- a/.dotnet/src/Generated/Models/CreateImageRequest.cs +++ b/.dotnet/src/Generated/Models/CreateImageRequest.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The CreateImageRequest. - public partial class CreateImageRequest + internal partial class CreateImageRequest { /// /// Keeps track of any properties unknown to the library. @@ -138,4 +138,3 @@ internal CreateImageRequest() public string User { get; set; } } } - diff --git a/.dotnet/src/Generated/Models/CreateImageRequestModel.cs b/.dotnet/src/Generated/Models/CreateImageRequestModel.cs index e6401986c..6e8ad967e 100644 --- a/.dotnet/src/Generated/Models/CreateImageRequestModel.cs +++ b/.dotnet/src/Generated/Models/CreateImageRequestModel.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Enum for model in CreateImageRequest. - public readonly partial struct CreateImageRequestModel : IEquatable + internal readonly partial struct CreateImageRequestModel : IEquatable { private readonly string _value; @@ -44,4 +44,3 @@ public CreateImageRequestModel(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/CreateImageRequestQuality.cs b/.dotnet/src/Generated/Models/CreateImageRequestQuality.cs index 236be9ebf..01060a04f 100644 --- a/.dotnet/src/Generated/Models/CreateImageRequestQuality.cs +++ b/.dotnet/src/Generated/Models/CreateImageRequestQuality.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Enum for quality in CreateImageRequest. - public readonly partial struct CreateImageRequestQuality : IEquatable + internal readonly partial struct CreateImageRequestQuality : IEquatable { private readonly string _value; @@ -44,4 +44,3 @@ public CreateImageRequestQuality(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/CreateImageRequestResponseFormat.cs b/.dotnet/src/Generated/Models/CreateImageRequestResponseFormat.cs index e7f95d5ee..22d92d612 100644 --- a/.dotnet/src/Generated/Models/CreateImageRequestResponseFormat.cs +++ b/.dotnet/src/Generated/Models/CreateImageRequestResponseFormat.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Enum for response_format in CreateImageRequest. - public readonly partial struct CreateImageRequestResponseFormat : IEquatable + internal readonly partial struct CreateImageRequestResponseFormat : IEquatable { private readonly string _value; @@ -44,4 +44,3 @@ public CreateImageRequestResponseFormat(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/CreateImageRequestSize.cs b/.dotnet/src/Generated/Models/CreateImageRequestSize.cs index 091e4d3b0..9bff12b78 100644 --- a/.dotnet/src/Generated/Models/CreateImageRequestSize.cs +++ b/.dotnet/src/Generated/Models/CreateImageRequestSize.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Enum for size in CreateImageRequest. - public readonly partial struct CreateImageRequestSize : IEquatable + internal readonly partial struct CreateImageRequestSize : IEquatable { private readonly string _value; @@ -53,4 +53,3 @@ public CreateImageRequestSize(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/CreateImageRequestStyle.cs b/.dotnet/src/Generated/Models/CreateImageRequestStyle.cs index 98f67425f..200565a5f 100644 --- a/.dotnet/src/Generated/Models/CreateImageRequestStyle.cs +++ b/.dotnet/src/Generated/Models/CreateImageRequestStyle.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Enum for style in CreateImageRequest. - public readonly partial struct CreateImageRequestStyle : IEquatable + internal readonly partial struct CreateImageRequestStyle : IEquatable { private readonly string _value; @@ -44,4 +44,3 @@ public CreateImageRequestStyle(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/CreateImageVariationRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateImageVariationRequest.Serialization.cs index 642a6c439..4c9d18ed2 100644 --- a/.dotnet/src/Generated/Models/CreateImageVariationRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateImageVariationRequest.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class CreateImageVariationRequest : IJsonModel + internal partial class CreateImageVariationRequest : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -197,4 +197,3 @@ internal static CreateImageVariationRequest FromResponse(PipelineResponse respon } } } - diff --git a/.dotnet/src/Generated/Models/CreateImageVariationRequest.cs b/.dotnet/src/Generated/Models/CreateImageVariationRequest.cs index a034e2faa..1986159f7 100644 --- a/.dotnet/src/Generated/Models/CreateImageVariationRequest.cs +++ b/.dotnet/src/Generated/Models/CreateImageVariationRequest.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The CreateImageVariationRequest. - public partial class CreateImageVariationRequest + internal partial class CreateImageVariationRequest { /// /// Keeps track of any properties unknown to the library. @@ -117,4 +117,3 @@ internal CreateImageVariationRequest() public string User { get; set; } } } - diff --git a/.dotnet/src/Generated/Models/CreateImageVariationRequestModel.cs b/.dotnet/src/Generated/Models/CreateImageVariationRequestModel.cs index d40886648..13ab94757 100644 --- a/.dotnet/src/Generated/Models/CreateImageVariationRequestModel.cs +++ b/.dotnet/src/Generated/Models/CreateImageVariationRequestModel.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Enum for model in CreateImageVariationRequest. - public readonly partial struct CreateImageVariationRequestModel : IEquatable + internal readonly partial struct CreateImageVariationRequestModel : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public CreateImageVariationRequestModel(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/CreateImageVariationRequestResponseFormat.cs b/.dotnet/src/Generated/Models/CreateImageVariationRequestResponseFormat.cs index 94f353a25..5f99e6bcf 100644 --- a/.dotnet/src/Generated/Models/CreateImageVariationRequestResponseFormat.cs +++ b/.dotnet/src/Generated/Models/CreateImageVariationRequestResponseFormat.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Enum for response_format in CreateImageVariationRequest. - public readonly partial struct CreateImageVariationRequestResponseFormat : IEquatable + internal readonly partial struct CreateImageVariationRequestResponseFormat : IEquatable { private readonly string _value; @@ -44,4 +44,3 @@ public CreateImageVariationRequestResponseFormat(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/CreateImageVariationRequestSize.cs b/.dotnet/src/Generated/Models/CreateImageVariationRequestSize.cs index 0374c3214..f10e28466 100644 --- a/.dotnet/src/Generated/Models/CreateImageVariationRequestSize.cs +++ b/.dotnet/src/Generated/Models/CreateImageVariationRequestSize.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Enum for size in CreateImageVariationRequest. - public readonly partial struct CreateImageVariationRequestSize : IEquatable + internal readonly partial struct CreateImageVariationRequestSize : IEquatable { private readonly string _value; @@ -47,4 +47,3 @@ public CreateImageVariationRequestSize(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/CreateMessageRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateMessageRequest.Serialization.cs index 3386db660..bc6fa6aea 100644 --- a/.dotnet/src/Generated/Models/CreateMessageRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateMessageRequest.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class CreateMessageRequest : IJsonModel + internal partial class CreateMessageRequest : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -184,4 +184,3 @@ internal static CreateMessageRequest FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/CreateMessageRequest.cs b/.dotnet/src/Generated/Models/CreateMessageRequest.cs index 7d2855a9f..9a609327e 100644 --- a/.dotnet/src/Generated/Models/CreateMessageRequest.cs +++ b/.dotnet/src/Generated/Models/CreateMessageRequest.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The CreateMessageRequest. - public partial class CreateMessageRequest + internal partial class CreateMessageRequest { /// /// Keeps track of any properties unknown to the library. @@ -100,4 +100,3 @@ internal CreateMessageRequest() public IDictionary Metadata { get; set; } } } - diff --git a/.dotnet/src/Generated/Models/CreateMessageRequestRole.cs b/.dotnet/src/Generated/Models/CreateMessageRequestRole.cs index f5be49786..d0118f1cb 100644 --- a/.dotnet/src/Generated/Models/CreateMessageRequestRole.cs +++ b/.dotnet/src/Generated/Models/CreateMessageRequestRole.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The CreateMessageRequest_role. - public readonly partial struct CreateMessageRequestRole : IEquatable + internal readonly partial struct CreateMessageRequestRole : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public CreateMessageRequestRole(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/CreateModerationRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateModerationRequest.Serialization.cs index 50c0cf4f7..b131e485d 100644 --- a/.dotnet/src/Generated/Models/CreateModerationRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateModerationRequest.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class CreateModerationRequest : IJsonModel + internal partial class CreateModerationRequest : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -140,4 +140,3 @@ internal static CreateModerationRequest FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/CreateModerationRequest.cs b/.dotnet/src/Generated/Models/CreateModerationRequest.cs index b92114f96..d015993c7 100644 --- a/.dotnet/src/Generated/Models/CreateModerationRequest.cs +++ b/.dotnet/src/Generated/Models/CreateModerationRequest.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The CreateModerationRequest. - public partial class CreateModerationRequest + internal partial class CreateModerationRequest { /// /// Keeps track of any properties unknown to the library. @@ -125,4 +125,3 @@ internal CreateModerationRequest() public CreateModerationRequestModel? Model { get; set; } } } - diff --git a/.dotnet/src/Generated/Models/CreateModerationRequestModel.cs b/.dotnet/src/Generated/Models/CreateModerationRequestModel.cs index af410f3a0..2accc5ab0 100644 --- a/.dotnet/src/Generated/Models/CreateModerationRequestModel.cs +++ b/.dotnet/src/Generated/Models/CreateModerationRequestModel.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Enum for model in CreateModerationRequest. - public readonly partial struct CreateModerationRequestModel : IEquatable + internal readonly partial struct CreateModerationRequestModel : IEquatable { private readonly string _value; @@ -44,4 +44,3 @@ public CreateModerationRequestModel(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/CreateModerationResponse.Serialization.cs b/.dotnet/src/Generated/Models/CreateModerationResponse.Serialization.cs index 1d3fd1188..791fd71f1 100644 --- a/.dotnet/src/Generated/Models/CreateModerationResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateModerationResponse.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class CreateModerationResponse : IJsonModel + internal partial class CreateModerationResponse : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -144,4 +144,3 @@ internal static CreateModerationResponse FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/CreateModerationResponse.cs b/.dotnet/src/Generated/Models/CreateModerationResponse.cs index caf0c4858..8f42f77c0 100644 --- a/.dotnet/src/Generated/Models/CreateModerationResponse.cs +++ b/.dotnet/src/Generated/Models/CreateModerationResponse.cs @@ -5,10 +5,10 @@ using System.Collections.Generic; using System.Linq; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Represents policy compliance report by OpenAI's content moderation model against a given input. - public partial class CreateModerationResponse + internal partial class CreateModerationResponse { /// /// Keeps track of any properties unknown to the library. @@ -84,4 +84,3 @@ internal CreateModerationResponse() public IReadOnlyList Results { get; } } } - diff --git a/.dotnet/src/Generated/Models/CreateModerationResponseResult.Serialization.cs b/.dotnet/src/Generated/Models/CreateModerationResponseResult.Serialization.cs index edabb3d30..6a69b7a0d 100644 --- a/.dotnet/src/Generated/Models/CreateModerationResponseResult.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateModerationResponseResult.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class CreateModerationResponseResult : IJsonModel + internal partial class CreateModerationResponseResult : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -134,4 +134,3 @@ internal static CreateModerationResponseResult FromResponse(PipelineResponse res } } } - diff --git a/.dotnet/src/Generated/Models/CreateModerationResponseResult.cs b/.dotnet/src/Generated/Models/CreateModerationResponseResult.cs index d3abebbc3..baea8e148 100644 --- a/.dotnet/src/Generated/Models/CreateModerationResponseResult.cs +++ b/.dotnet/src/Generated/Models/CreateModerationResponseResult.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The CreateModerationResponseResult. - public partial class CreateModerationResponseResult + internal partial class CreateModerationResponseResult { /// /// Keeps track of any properties unknown to the library. @@ -82,4 +82,3 @@ internal CreateModerationResponseResult() public CreateModerationResponseResultCategoryScores CategoryScores { get; } } } - diff --git a/.dotnet/src/Generated/Models/CreateModerationResponseResultCategories.Serialization.cs b/.dotnet/src/Generated/Models/CreateModerationResponseResultCategories.Serialization.cs index 5dce74b53..4f1c9d5eb 100644 --- a/.dotnet/src/Generated/Models/CreateModerationResponseResultCategories.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateModerationResponseResultCategories.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class CreateModerationResponseResultCategories : IJsonModel + internal partial class CreateModerationResponseResultCategories : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -198,4 +198,3 @@ internal static CreateModerationResponseResultCategories FromResponse(PipelineRe } } } - diff --git a/.dotnet/src/Generated/Models/CreateModerationResponseResultCategories.cs b/.dotnet/src/Generated/Models/CreateModerationResponseResultCategories.cs index 8af6aa519..2bdc02449 100644 --- a/.dotnet/src/Generated/Models/CreateModerationResponseResultCategories.cs +++ b/.dotnet/src/Generated/Models/CreateModerationResponseResultCategories.cs @@ -3,10 +3,10 @@ using System; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The CreateModerationResponseResultCategories. - public partial class CreateModerationResponseResultCategories + internal partial class CreateModerationResponseResultCategories { /// /// Keeps track of any properties unknown to the library. @@ -185,4 +185,3 @@ internal CreateModerationResponseResultCategories() public bool ViolenceGraphic { get; } } } - diff --git a/.dotnet/src/Generated/Models/CreateModerationResponseResultCategoryScores.Serialization.cs b/.dotnet/src/Generated/Models/CreateModerationResponseResultCategoryScores.Serialization.cs index 50b22a9d6..6a8189dc3 100644 --- a/.dotnet/src/Generated/Models/CreateModerationResponseResultCategoryScores.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateModerationResponseResultCategoryScores.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class CreateModerationResponseResultCategoryScores : IJsonModel + internal partial class CreateModerationResponseResultCategoryScores : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -198,4 +198,3 @@ internal static CreateModerationResponseResultCategoryScores FromResponse(Pipeli } } } - diff --git a/.dotnet/src/Generated/Models/CreateModerationResponseResultCategoryScores.cs b/.dotnet/src/Generated/Models/CreateModerationResponseResultCategoryScores.cs index 0ee6e8d5a..17d49937e 100644 --- a/.dotnet/src/Generated/Models/CreateModerationResponseResultCategoryScores.cs +++ b/.dotnet/src/Generated/Models/CreateModerationResponseResultCategoryScores.cs @@ -3,10 +3,10 @@ using System; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The CreateModerationResponseResultCategoryScores. - public partial class CreateModerationResponseResultCategoryScores + internal partial class CreateModerationResponseResultCategoryScores { /// /// Keeps track of any properties unknown to the library. @@ -125,4 +125,3 @@ internal CreateModerationResponseResultCategoryScores() public double ViolenceGraphic { get; } } } - diff --git a/.dotnet/src/Generated/Models/CreateRunRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateRunRequest.Serialization.cs index 4eda947a7..3bdb98f68 100644 --- a/.dotnet/src/Generated/Models/CreateRunRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateRunRequest.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class CreateRunRequest : IJsonModel + internal partial class CreateRunRequest : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -271,4 +271,3 @@ internal static CreateRunRequest FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/CreateRunRequest.cs b/.dotnet/src/Generated/Models/CreateRunRequest.cs index ea2ac5123..e7bf9f448 100644 --- a/.dotnet/src/Generated/Models/CreateRunRequest.cs +++ b/.dotnet/src/Generated/Models/CreateRunRequest.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The CreateRunRequest. - public partial class CreateRunRequest + internal partial class CreateRunRequest { /// /// Keeps track of any properties unknown to the library. @@ -152,4 +152,3 @@ internal CreateRunRequest() public IDictionary Metadata { get; set; } } } - diff --git a/.dotnet/src/Generated/Models/CreateSpeechRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateSpeechRequest.Serialization.cs index f2216a6e1..46fd557d6 100644 --- a/.dotnet/src/Generated/Models/CreateSpeechRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateSpeechRequest.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class CreateSpeechRequest : IJsonModel + internal partial class CreateSpeechRequest : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -164,4 +164,3 @@ internal static CreateSpeechRequest FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/CreateSpeechRequest.cs b/.dotnet/src/Generated/Models/CreateSpeechRequest.cs index eb2c71ae0..470717796 100644 --- a/.dotnet/src/Generated/Models/CreateSpeechRequest.cs +++ b/.dotnet/src/Generated/Models/CreateSpeechRequest.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The CreateSpeechRequest. - public partial class CreateSpeechRequest + internal partial class CreateSpeechRequest { /// /// Keeps track of any properties unknown to the library. @@ -101,4 +101,3 @@ internal CreateSpeechRequest() public double? Speed { get; set; } } } - diff --git a/.dotnet/src/Generated/Models/CreateSpeechRequestModel.cs b/.dotnet/src/Generated/Models/CreateSpeechRequestModel.cs index 590078a92..806f493d3 100644 --- a/.dotnet/src/Generated/Models/CreateSpeechRequestModel.cs +++ b/.dotnet/src/Generated/Models/CreateSpeechRequestModel.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Enum for model in CreateSpeechRequest. - public readonly partial struct CreateSpeechRequestModel : IEquatable + internal readonly partial struct CreateSpeechRequestModel : IEquatable { private readonly string _value; @@ -44,4 +44,3 @@ public CreateSpeechRequestModel(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/CreateSpeechRequestResponseFormat.cs b/.dotnet/src/Generated/Models/CreateSpeechRequestResponseFormat.cs index 4694fe9d7..186f59cc2 100644 --- a/.dotnet/src/Generated/Models/CreateSpeechRequestResponseFormat.cs +++ b/.dotnet/src/Generated/Models/CreateSpeechRequestResponseFormat.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Enum for response_format in CreateSpeechRequest. - public readonly partial struct CreateSpeechRequestResponseFormat : IEquatable + internal readonly partial struct CreateSpeechRequestResponseFormat : IEquatable { private readonly string _value; @@ -50,4 +50,3 @@ public CreateSpeechRequestResponseFormat(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/CreateSpeechRequestVoice.cs b/.dotnet/src/Generated/Models/CreateSpeechRequestVoice.cs index e764eed85..be68af49d 100644 --- a/.dotnet/src/Generated/Models/CreateSpeechRequestVoice.cs +++ b/.dotnet/src/Generated/Models/CreateSpeechRequestVoice.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Enum for voice in CreateSpeechRequest. - public readonly partial struct CreateSpeechRequestVoice : IEquatable + internal readonly partial struct CreateSpeechRequestVoice : IEquatable { private readonly string _value; @@ -56,4 +56,3 @@ public CreateSpeechRequestVoice(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/CreateThreadAndRunRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateThreadAndRunRequest.Serialization.cs index 384147a67..b3281386f 100644 --- a/.dotnet/src/Generated/Models/CreateThreadAndRunRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateThreadAndRunRequest.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class CreateThreadAndRunRequest : IJsonModel + internal partial class CreateThreadAndRunRequest : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -263,4 +263,3 @@ internal static CreateThreadAndRunRequest FromResponse(PipelineResponse response } } } - diff --git a/.dotnet/src/Generated/Models/CreateThreadAndRunRequest.cs b/.dotnet/src/Generated/Models/CreateThreadAndRunRequest.cs index b212c90c5..942ac10f1 100644 --- a/.dotnet/src/Generated/Models/CreateThreadAndRunRequest.cs +++ b/.dotnet/src/Generated/Models/CreateThreadAndRunRequest.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The CreateThreadAndRunRequest. - public partial class CreateThreadAndRunRequest + internal partial class CreateThreadAndRunRequest { /// /// Keeps track of any properties unknown to the library. @@ -146,4 +146,3 @@ internal CreateThreadAndRunRequest() public IDictionary Metadata { get; set; } } } - diff --git a/.dotnet/src/Generated/Models/CreateThreadRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateThreadRequest.Serialization.cs index b899f1a9d..521c12b25 100644 --- a/.dotnet/src/Generated/Models/CreateThreadRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateThreadRequest.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class CreateThreadRequest : IJsonModel + internal partial class CreateThreadRequest : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -168,4 +168,3 @@ internal static CreateThreadRequest FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/CreateThreadRequest.cs b/.dotnet/src/Generated/Models/CreateThreadRequest.cs index a31999cb7..0fd5b0167 100644 --- a/.dotnet/src/Generated/Models/CreateThreadRequest.cs +++ b/.dotnet/src/Generated/Models/CreateThreadRequest.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The CreateThreadRequest. - public partial class CreateThreadRequest + internal partial class CreateThreadRequest { /// /// Keeps track of any properties unknown to the library. @@ -73,4 +73,3 @@ internal CreateThreadRequest(IList messages, IDictionary Metadata { get; set; } } } - diff --git a/.dotnet/src/Generated/Models/CreateTranscriptionRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateTranscriptionRequest.Serialization.cs index 8aa3b8a97..ac4870484 100644 --- a/.dotnet/src/Generated/Models/CreateTranscriptionRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateTranscriptionRequest.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class CreateTranscriptionRequest : IJsonModel + internal partial class CreateTranscriptionRequest : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -178,4 +178,3 @@ internal static CreateTranscriptionRequest FromResponse(PipelineResponse respons } } } - diff --git a/.dotnet/src/Generated/Models/CreateTranscriptionRequest.cs b/.dotnet/src/Generated/Models/CreateTranscriptionRequest.cs index 7cccd2c15..9bdc65381 100644 --- a/.dotnet/src/Generated/Models/CreateTranscriptionRequest.cs +++ b/.dotnet/src/Generated/Models/CreateTranscriptionRequest.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The CreateTranscriptionRequest. - public partial class CreateTranscriptionRequest + internal partial class CreateTranscriptionRequest { /// /// Keeps track of any properties unknown to the library. @@ -143,4 +143,3 @@ internal CreateTranscriptionRequest() public double? Temperature { get; set; } } } - diff --git a/.dotnet/src/Generated/Models/CreateTranscriptionRequestModel.cs b/.dotnet/src/Generated/Models/CreateTranscriptionRequestModel.cs index c4639fe72..374f27dc3 100644 --- a/.dotnet/src/Generated/Models/CreateTranscriptionRequestModel.cs +++ b/.dotnet/src/Generated/Models/CreateTranscriptionRequestModel.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Enum for model in CreateTranscriptionRequest. - public readonly partial struct CreateTranscriptionRequestModel : IEquatable + internal readonly partial struct CreateTranscriptionRequestModel : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public CreateTranscriptionRequestModel(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/CreateTranscriptionRequestResponseFormat.cs b/.dotnet/src/Generated/Models/CreateTranscriptionRequestResponseFormat.cs index 1eb2b8b48..e8211eeef 100644 --- a/.dotnet/src/Generated/Models/CreateTranscriptionRequestResponseFormat.cs +++ b/.dotnet/src/Generated/Models/CreateTranscriptionRequestResponseFormat.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Enum for response_format in CreateTranscriptionRequest. - public readonly partial struct CreateTranscriptionRequestResponseFormat : IEquatable + internal readonly partial struct CreateTranscriptionRequestResponseFormat : IEquatable { private readonly string _value; @@ -53,4 +53,3 @@ public CreateTranscriptionRequestResponseFormat(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/CreateTranscriptionResponse.Serialization.cs b/.dotnet/src/Generated/Models/CreateTranscriptionResponse.Serialization.cs index 0052a0f0b..326257fc1 100644 --- a/.dotnet/src/Generated/Models/CreateTranscriptionResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateTranscriptionResponse.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class CreateTranscriptionResponse : IJsonModel + internal partial class CreateTranscriptionResponse : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -184,4 +184,3 @@ internal static CreateTranscriptionResponse FromResponse(PipelineResponse respon } } } - diff --git a/.dotnet/src/Generated/Models/CreateTranscriptionResponse.cs b/.dotnet/src/Generated/Models/CreateTranscriptionResponse.cs index 47f9428ca..ce8ed266d 100644 --- a/.dotnet/src/Generated/Models/CreateTranscriptionResponse.cs +++ b/.dotnet/src/Generated/Models/CreateTranscriptionResponse.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The CreateTranscriptionResponse. - public partial class CreateTranscriptionResponse + internal partial class CreateTranscriptionResponse { /// /// Keeps track of any properties unknown to the library. @@ -92,4 +92,3 @@ internal CreateTranscriptionResponse() public IReadOnlyList Segments { get; } } } - diff --git a/.dotnet/src/Generated/Models/CreateTranscriptionResponseTask.cs b/.dotnet/src/Generated/Models/CreateTranscriptionResponseTask.cs index 45bb38166..167892816 100644 --- a/.dotnet/src/Generated/Models/CreateTranscriptionResponseTask.cs +++ b/.dotnet/src/Generated/Models/CreateTranscriptionResponseTask.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The CreateTranscriptionResponse_task. - public readonly partial struct CreateTranscriptionResponseTask : IEquatable + internal readonly partial struct CreateTranscriptionResponseTask : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public CreateTranscriptionResponseTask(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/CreateTranslationRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateTranslationRequest.Serialization.cs index 2b415a177..521582c54 100644 --- a/.dotnet/src/Generated/Models/CreateTranslationRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateTranslationRequest.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class CreateTranslationRequest : IJsonModel + internal partial class CreateTranslationRequest : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -167,4 +167,3 @@ internal static CreateTranslationRequest FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/CreateTranslationRequest.cs b/.dotnet/src/Generated/Models/CreateTranslationRequest.cs index fa5224a3e..46911bdf5 100644 --- a/.dotnet/src/Generated/Models/CreateTranslationRequest.cs +++ b/.dotnet/src/Generated/Models/CreateTranslationRequest.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The CreateTranslationRequest. - public partial class CreateTranslationRequest + internal partial class CreateTranslationRequest { /// /// Keeps track of any properties unknown to the library. @@ -131,4 +131,3 @@ internal CreateTranslationRequest() public double? Temperature { get; set; } } } - diff --git a/.dotnet/src/Generated/Models/CreateTranslationRequestModel.cs b/.dotnet/src/Generated/Models/CreateTranslationRequestModel.cs index 10536d253..633bfd4c6 100644 --- a/.dotnet/src/Generated/Models/CreateTranslationRequestModel.cs +++ b/.dotnet/src/Generated/Models/CreateTranslationRequestModel.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Enum for model in CreateTranslationRequest. - public readonly partial struct CreateTranslationRequestModel : IEquatable + internal readonly partial struct CreateTranslationRequestModel : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public CreateTranslationRequestModel(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/CreateTranslationRequestResponseFormat.cs b/.dotnet/src/Generated/Models/CreateTranslationRequestResponseFormat.cs index 7c9a6ff50..39ef19095 100644 --- a/.dotnet/src/Generated/Models/CreateTranslationRequestResponseFormat.cs +++ b/.dotnet/src/Generated/Models/CreateTranslationRequestResponseFormat.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Enum for response_format in CreateTranslationRequest. - public readonly partial struct CreateTranslationRequestResponseFormat : IEquatable + internal readonly partial struct CreateTranslationRequestResponseFormat : IEquatable { private readonly string _value; @@ -53,4 +53,3 @@ public CreateTranslationRequestResponseFormat(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/CreateTranslationResponse.Serialization.cs b/.dotnet/src/Generated/Models/CreateTranslationResponse.Serialization.cs index 10254dee9..46025de53 100644 --- a/.dotnet/src/Generated/Models/CreateTranslationResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/CreateTranslationResponse.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class CreateTranslationResponse : IJsonModel + internal partial class CreateTranslationResponse : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -184,4 +184,3 @@ internal static CreateTranslationResponse FromResponse(PipelineResponse response } } } - diff --git a/.dotnet/src/Generated/Models/CreateTranslationResponse.cs b/.dotnet/src/Generated/Models/CreateTranslationResponse.cs index bc4f03097..0b8ebf971 100644 --- a/.dotnet/src/Generated/Models/CreateTranslationResponse.cs +++ b/.dotnet/src/Generated/Models/CreateTranslationResponse.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The CreateTranslationResponse. - public partial class CreateTranslationResponse + internal partial class CreateTranslationResponse { /// /// Keeps track of any properties unknown to the library. @@ -92,4 +92,3 @@ internal CreateTranslationResponse() public IReadOnlyList Segments { get; } } } - diff --git a/.dotnet/src/Generated/Models/CreateTranslationResponseTask.cs b/.dotnet/src/Generated/Models/CreateTranslationResponseTask.cs index fe24f6343..db1eaf0f3 100644 --- a/.dotnet/src/Generated/Models/CreateTranslationResponseTask.cs +++ b/.dotnet/src/Generated/Models/CreateTranslationResponseTask.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The CreateTranslationResponse_task. - public readonly partial struct CreateTranslationResponseTask : IEquatable + internal readonly partial struct CreateTranslationResponseTask : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public CreateTranslationResponseTask(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/DeleteAssistantFileResponse.Serialization.cs b/.dotnet/src/Generated/Models/DeleteAssistantFileResponse.Serialization.cs index 997b39d80..c3e231655 100644 --- a/.dotnet/src/Generated/Models/DeleteAssistantFileResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/DeleteAssistantFileResponse.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class DeleteAssistantFileResponse : IJsonModel + internal partial class DeleteAssistantFileResponse : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -134,4 +134,3 @@ internal static DeleteAssistantFileResponse FromResponse(PipelineResponse respon } } } - diff --git a/.dotnet/src/Generated/Models/DeleteAssistantFileResponse.cs b/.dotnet/src/Generated/Models/DeleteAssistantFileResponse.cs index 2172613e2..d4fe927c2 100644 --- a/.dotnet/src/Generated/Models/DeleteAssistantFileResponse.cs +++ b/.dotnet/src/Generated/Models/DeleteAssistantFileResponse.cs @@ -4,13 +4,13 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// /// Deletes the association between the assistant and the file, but does not delete the /// [File](/docs/api-reference/files) object itself. /// - public partial class DeleteAssistantFileResponse + internal partial class DeleteAssistantFileResponse { /// /// Keeps track of any properties unknown to the library. @@ -82,4 +82,3 @@ internal DeleteAssistantFileResponse() public DeleteAssistantFileResponseObject Object { get; } = DeleteAssistantFileResponseObject.AssistantFileDeleted; } } - diff --git a/.dotnet/src/Generated/Models/DeleteAssistantFileResponseObject.cs b/.dotnet/src/Generated/Models/DeleteAssistantFileResponseObject.cs index df0a1e5a6..d40f946af 100644 --- a/.dotnet/src/Generated/Models/DeleteAssistantFileResponseObject.cs +++ b/.dotnet/src/Generated/Models/DeleteAssistantFileResponseObject.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The DeleteAssistantFileResponse_object. - public readonly partial struct DeleteAssistantFileResponseObject : IEquatable + internal readonly partial struct DeleteAssistantFileResponseObject : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public DeleteAssistantFileResponseObject(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/DeleteAssistantResponse.Serialization.cs b/.dotnet/src/Generated/Models/DeleteAssistantResponse.Serialization.cs index 7b575f777..b6c0c9c62 100644 --- a/.dotnet/src/Generated/Models/DeleteAssistantResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/DeleteAssistantResponse.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class DeleteAssistantResponse : IJsonModel + internal partial class DeleteAssistantResponse : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -134,4 +134,3 @@ internal static DeleteAssistantResponse FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/DeleteAssistantResponse.cs b/.dotnet/src/Generated/Models/DeleteAssistantResponse.cs index 4ce652ffd..ab93ae628 100644 --- a/.dotnet/src/Generated/Models/DeleteAssistantResponse.cs +++ b/.dotnet/src/Generated/Models/DeleteAssistantResponse.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The DeleteAssistantResponse. - public partial class DeleteAssistantResponse + internal partial class DeleteAssistantResponse { /// /// Keeps track of any properties unknown to the library. @@ -79,4 +79,3 @@ internal DeleteAssistantResponse() public DeleteAssistantResponseObject Object { get; } = DeleteAssistantResponseObject.AssistantDeleted; } } - diff --git a/.dotnet/src/Generated/Models/DeleteAssistantResponseObject.cs b/.dotnet/src/Generated/Models/DeleteAssistantResponseObject.cs index 4ce063190..acc386d38 100644 --- a/.dotnet/src/Generated/Models/DeleteAssistantResponseObject.cs +++ b/.dotnet/src/Generated/Models/DeleteAssistantResponseObject.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The DeleteAssistantResponse_object. - public readonly partial struct DeleteAssistantResponseObject : IEquatable + internal readonly partial struct DeleteAssistantResponseObject : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public DeleteAssistantResponseObject(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/DeleteFileResponse.Serialization.cs b/.dotnet/src/Generated/Models/DeleteFileResponse.Serialization.cs index 21416fc19..2ff94aa3f 100644 --- a/.dotnet/src/Generated/Models/DeleteFileResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/DeleteFileResponse.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class DeleteFileResponse : IJsonModel + internal partial class DeleteFileResponse : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -134,4 +134,3 @@ internal static DeleteFileResponse FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/DeleteFileResponse.cs b/.dotnet/src/Generated/Models/DeleteFileResponse.cs index 06777ae53..5e91f557a 100644 --- a/.dotnet/src/Generated/Models/DeleteFileResponse.cs +++ b/.dotnet/src/Generated/Models/DeleteFileResponse.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The DeleteFileResponse. - public partial class DeleteFileResponse + internal partial class DeleteFileResponse { /// /// Keeps track of any properties unknown to the library. @@ -80,4 +80,3 @@ internal DeleteFileResponse() public bool Deleted { get; } } } - diff --git a/.dotnet/src/Generated/Models/DeleteFileResponseObject.cs b/.dotnet/src/Generated/Models/DeleteFileResponseObject.cs index 3f25d59f3..777211698 100644 --- a/.dotnet/src/Generated/Models/DeleteFileResponseObject.cs +++ b/.dotnet/src/Generated/Models/DeleteFileResponseObject.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The DeleteFileResponse_object. - public readonly partial struct DeleteFileResponseObject : IEquatable + internal readonly partial struct DeleteFileResponseObject : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public DeleteFileResponseObject(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/DeleteModelResponse.Serialization.cs b/.dotnet/src/Generated/Models/DeleteModelResponse.Serialization.cs index e1865ef13..afd368541 100644 --- a/.dotnet/src/Generated/Models/DeleteModelResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/DeleteModelResponse.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class DeleteModelResponse : IJsonModel + internal partial class DeleteModelResponse : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -134,4 +134,3 @@ internal static DeleteModelResponse FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/DeleteModelResponse.cs b/.dotnet/src/Generated/Models/DeleteModelResponse.cs index 96f122d77..1e5f9e687 100644 --- a/.dotnet/src/Generated/Models/DeleteModelResponse.cs +++ b/.dotnet/src/Generated/Models/DeleteModelResponse.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The DeleteModelResponse. - public partial class DeleteModelResponse + internal partial class DeleteModelResponse { /// /// Keeps track of any properties unknown to the library. @@ -79,4 +79,3 @@ internal DeleteModelResponse() public DeleteModelResponseObject Object { get; } = DeleteModelResponseObject.Model; } } - diff --git a/.dotnet/src/Generated/Models/DeleteModelResponseObject.cs b/.dotnet/src/Generated/Models/DeleteModelResponseObject.cs index 25f957c69..9a0e654a6 100644 --- a/.dotnet/src/Generated/Models/DeleteModelResponseObject.cs +++ b/.dotnet/src/Generated/Models/DeleteModelResponseObject.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The DeleteModelResponse_object. - public readonly partial struct DeleteModelResponseObject : IEquatable + internal readonly partial struct DeleteModelResponseObject : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public DeleteModelResponseObject(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/DeleteThreadResponse.Serialization.cs b/.dotnet/src/Generated/Models/DeleteThreadResponse.Serialization.cs index b6c04e508..ec4ac2da6 100644 --- a/.dotnet/src/Generated/Models/DeleteThreadResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/DeleteThreadResponse.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class DeleteThreadResponse : IJsonModel + internal partial class DeleteThreadResponse : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -134,4 +134,3 @@ internal static DeleteThreadResponse FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/DeleteThreadResponse.cs b/.dotnet/src/Generated/Models/DeleteThreadResponse.cs index 368b9ff80..425135cb5 100644 --- a/.dotnet/src/Generated/Models/DeleteThreadResponse.cs +++ b/.dotnet/src/Generated/Models/DeleteThreadResponse.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The DeleteThreadResponse. - public partial class DeleteThreadResponse + internal partial class DeleteThreadResponse { /// /// Keeps track of any properties unknown to the library. @@ -79,4 +79,3 @@ internal DeleteThreadResponse() public DeleteThreadResponseObject Object { get; } = DeleteThreadResponseObject.ThreadDeleted; } } - diff --git a/.dotnet/src/Generated/Models/DeleteThreadResponseObject.cs b/.dotnet/src/Generated/Models/DeleteThreadResponseObject.cs index 7e4b34167..5dcbdb507 100644 --- a/.dotnet/src/Generated/Models/DeleteThreadResponseObject.cs +++ b/.dotnet/src/Generated/Models/DeleteThreadResponseObject.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The DeleteThreadResponse_object. - public readonly partial struct DeleteThreadResponseObject : IEquatable + internal readonly partial struct DeleteThreadResponseObject : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public DeleteThreadResponseObject(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/Embedding.Serialization.cs b/.dotnet/src/Generated/Models/Embedding.Serialization.cs index b3506e51c..08a9811ea 100644 --- a/.dotnet/src/Generated/Models/Embedding.Serialization.cs +++ b/.dotnet/src/Generated/Models/Embedding.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class Embedding : IJsonModel + internal partial class Embedding : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -141,4 +141,3 @@ internal static Embedding FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/Embedding.cs b/.dotnet/src/Generated/Models/Embedding.cs index ecabc7df9..8e4f0cced 100644 --- a/.dotnet/src/Generated/Models/Embedding.cs +++ b/.dotnet/src/Generated/Models/Embedding.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Represents an embedding vector returned by embedding endpoint. - public partial class Embedding + internal partial class Embedding { /// /// Keeps track of any properties unknown to the library. @@ -126,4 +126,3 @@ internal Embedding() public EmbeddingObject Object { get; } = EmbeddingObject.Embedding; } } - diff --git a/.dotnet/src/Generated/Models/EmbeddingObject.cs b/.dotnet/src/Generated/Models/EmbeddingObject.cs index 2cc2f012c..6f4bac9bd 100644 --- a/.dotnet/src/Generated/Models/EmbeddingObject.cs +++ b/.dotnet/src/Generated/Models/EmbeddingObject.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The Embedding_object. - public readonly partial struct EmbeddingObject : IEquatable + internal readonly partial struct EmbeddingObject : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public EmbeddingObject(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/EmbeddingUsage.Serialization.cs b/.dotnet/src/Generated/Models/EmbeddingUsage.Serialization.cs index 23beb37ad..ca241d62e 100644 --- a/.dotnet/src/Generated/Models/EmbeddingUsage.Serialization.cs +++ b/.dotnet/src/Generated/Models/EmbeddingUsage.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class EmbeddingUsage : IJsonModel + internal partial class EmbeddingUsage : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -126,4 +126,3 @@ internal static EmbeddingUsage FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/EmbeddingUsage.cs b/.dotnet/src/Generated/Models/EmbeddingUsage.cs index 6c03d3210..a3c1bf356 100644 --- a/.dotnet/src/Generated/Models/EmbeddingUsage.cs +++ b/.dotnet/src/Generated/Models/EmbeddingUsage.cs @@ -3,10 +3,10 @@ using System; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The EmbeddingUsage. - public partial class EmbeddingUsage + internal partial class EmbeddingUsage { /// /// Keeps track of any properties unknown to the library. @@ -71,4 +71,3 @@ internal EmbeddingUsage() public long TotalTokens { get; } } } - diff --git a/.dotnet/src/Generated/Models/FineTuningJob.Serialization.cs b/.dotnet/src/Generated/Models/FineTuningJob.Serialization.cs index 930aa5fc5..c8457dfdf 100644 --- a/.dotnet/src/Generated/Models/FineTuningJob.Serialization.cs +++ b/.dotnet/src/Generated/Models/FineTuningJob.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class FineTuningJob : IJsonModel + internal partial class FineTuningJob : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -292,4 +292,3 @@ internal static FineTuningJob FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/FineTuningJob.cs b/.dotnet/src/Generated/Models/FineTuningJob.cs index 773d90651..52609ca25 100644 --- a/.dotnet/src/Generated/Models/FineTuningJob.cs +++ b/.dotnet/src/Generated/Models/FineTuningJob.cs @@ -5,10 +5,10 @@ using System.Collections.Generic; using System.Linq; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The FineTuningJob. - public partial class FineTuningJob + internal partial class FineTuningJob { /// /// Keeps track of any properties unknown to the library. @@ -233,4 +233,3 @@ internal FineTuningJob() public FineTuningJobError Error { get; } } } - diff --git a/.dotnet/src/Generated/Models/FineTuningJobError.Serialization.cs b/.dotnet/src/Generated/Models/FineTuningJobError.Serialization.cs index 6bfe0c908..fab47e3bd 100644 --- a/.dotnet/src/Generated/Models/FineTuningJobError.Serialization.cs +++ b/.dotnet/src/Generated/Models/FineTuningJobError.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class FineTuningJobError : IJsonModel + internal partial class FineTuningJobError : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -155,4 +155,3 @@ internal static FineTuningJobError FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/FineTuningJobError.cs b/.dotnet/src/Generated/Models/FineTuningJobError.cs index 98268009c..c3798392a 100644 --- a/.dotnet/src/Generated/Models/FineTuningJobError.cs +++ b/.dotnet/src/Generated/Models/FineTuningJobError.cs @@ -3,10 +3,10 @@ using System; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The FineTuningJobError. - public partial class FineTuningJobError + internal partial class FineTuningJobError { /// /// Keeps track of any properties unknown to the library. @@ -72,4 +72,3 @@ internal FineTuningJobError(string message, string code, string param, IDictiona public string Param { get; } } } - diff --git a/.dotnet/src/Generated/Models/FineTuningJobEvent.Serialization.cs b/.dotnet/src/Generated/Models/FineTuningJobEvent.Serialization.cs index f61e577d3..0176faabe 100644 --- a/.dotnet/src/Generated/Models/FineTuningJobEvent.Serialization.cs +++ b/.dotnet/src/Generated/Models/FineTuningJobEvent.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class FineTuningJobEvent : IJsonModel + internal partial class FineTuningJobEvent : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -150,4 +150,3 @@ internal static FineTuningJobEvent FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/FineTuningJobEvent.cs b/.dotnet/src/Generated/Models/FineTuningJobEvent.cs index 1ed3dacf3..30f900667 100644 --- a/.dotnet/src/Generated/Models/FineTuningJobEvent.cs +++ b/.dotnet/src/Generated/Models/FineTuningJobEvent.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The FineTuningJobEvent. - public partial class FineTuningJobEvent + internal partial class FineTuningJobEvent { /// /// Keeps track of any properties unknown to the library. @@ -95,4 +95,3 @@ internal FineTuningJobEvent() public string Message { get; } } } - diff --git a/.dotnet/src/Generated/Models/FineTuningJobEventLevel.cs b/.dotnet/src/Generated/Models/FineTuningJobEventLevel.cs index 1e0ae3a85..cc9aa24a5 100644 --- a/.dotnet/src/Generated/Models/FineTuningJobEventLevel.cs +++ b/.dotnet/src/Generated/Models/FineTuningJobEventLevel.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Enum for level in FineTuningJobEvent. - public readonly partial struct FineTuningJobEventLevel : IEquatable + internal readonly partial struct FineTuningJobEventLevel : IEquatable { private readonly string _value; @@ -47,4 +47,3 @@ public FineTuningJobEventLevel(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/FineTuningJobHyperparameters.Serialization.cs b/.dotnet/src/Generated/Models/FineTuningJobHyperparameters.Serialization.cs index 3b4a5c3aa..c08e0565b 100644 --- a/.dotnet/src/Generated/Models/FineTuningJobHyperparameters.Serialization.cs +++ b/.dotnet/src/Generated/Models/FineTuningJobHyperparameters.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class FineTuningJobHyperparameters : IJsonModel + internal partial class FineTuningJobHyperparameters : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -132,4 +132,3 @@ internal static FineTuningJobHyperparameters FromResponse(PipelineResponse respo } } } - diff --git a/.dotnet/src/Generated/Models/FineTuningJobHyperparameters.cs b/.dotnet/src/Generated/Models/FineTuningJobHyperparameters.cs index 830aef443..40a9d6547 100644 --- a/.dotnet/src/Generated/Models/FineTuningJobHyperparameters.cs +++ b/.dotnet/src/Generated/Models/FineTuningJobHyperparameters.cs @@ -3,10 +3,10 @@ using System; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The FineTuningJobHyperparameters. - public partial class FineTuningJobHyperparameters + internal partial class FineTuningJobHyperparameters { /// /// Keeps track of any properties unknown to the library. @@ -108,4 +108,3 @@ internal FineTuningJobHyperparameters(BinaryData nEpochs, IDictionary The FineTuningJob_object. - public readonly partial struct FineTuningJobObject : IEquatable + internal readonly partial struct FineTuningJobObject : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public FineTuningJobObject(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/FineTuningJobStatus.cs b/.dotnet/src/Generated/Models/FineTuningJobStatus.cs index f9715b5b6..73c350bca 100644 --- a/.dotnet/src/Generated/Models/FineTuningJobStatus.cs +++ b/.dotnet/src/Generated/Models/FineTuningJobStatus.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Enum for status in FineTuningJob. - public readonly partial struct FineTuningJobStatus : IEquatable + internal readonly partial struct FineTuningJobStatus : IEquatable { private readonly string _value; @@ -56,4 +56,3 @@ public FineTuningJobStatus(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/FunctionObject.Serialization.cs b/.dotnet/src/Generated/Models/FunctionObject.Serialization.cs index fec24f853..1b0e30bd1 100644 --- a/.dotnet/src/Generated/Models/FunctionObject.Serialization.cs +++ b/.dotnet/src/Generated/Models/FunctionObject.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class FunctionObject : IJsonModel + internal partial class FunctionObject : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -144,4 +144,3 @@ internal static FunctionObject FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/FunctionObject.cs b/.dotnet/src/Generated/Models/FunctionObject.cs index ddd65ebfa..456b879f2 100644 --- a/.dotnet/src/Generated/Models/FunctionObject.cs +++ b/.dotnet/src/Generated/Models/FunctionObject.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The FunctionObject. - public partial class FunctionObject + internal partial class FunctionObject { /// /// Keeps track of any properties unknown to the library. @@ -92,4 +92,3 @@ internal FunctionObject() public FunctionParameters Parameters { get; set; } } } - diff --git a/.dotnet/src/Generated/Models/FunctionParameters.Serialization.cs b/.dotnet/src/Generated/Models/FunctionParameters.Serialization.cs index 3d473765a..5af866e0b 100644 --- a/.dotnet/src/Generated/Models/FunctionParameters.Serialization.cs +++ b/.dotnet/src/Generated/Models/FunctionParameters.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class FunctionParameters : IJsonModel + internal partial class FunctionParameters : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -104,4 +104,3 @@ internal static FunctionParameters FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/FunctionParameters.cs b/.dotnet/src/Generated/Models/FunctionParameters.cs index 8490bdc5f..bb8e5abff 100644 --- a/.dotnet/src/Generated/Models/FunctionParameters.cs +++ b/.dotnet/src/Generated/Models/FunctionParameters.cs @@ -4,7 +4,7 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// /// The parameters the functions accepts, described as a JSON Schema object. See the @@ -13,7 +13,7 @@ namespace OpenAI.Models /// about the format.\n\nTo describe a function that accepts no parameters, provide the value /// `{\"type\": \"object\", \"properties\": {}}`. /// - public partial class FunctionParameters + internal partial class FunctionParameters { /// Initializes a new instance of . public FunctionParameters() @@ -61,4 +61,3 @@ internal FunctionParameters(IDictionary additionalProperties public IDictionary AdditionalProperties { get; } } } - diff --git a/.dotnet/src/Generated/Models/Image.Serialization.cs b/.dotnet/src/Generated/Models/Image.Serialization.cs index dc41f82a8..e568bb123 100644 --- a/.dotnet/src/Generated/Models/Image.Serialization.cs +++ b/.dotnet/src/Generated/Models/Image.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class Image : IJsonModel + internal partial class Image : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -151,4 +151,3 @@ internal static Image FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/Image.cs b/.dotnet/src/Generated/Models/Image.cs index 7d6c304f2..ca021414d 100644 --- a/.dotnet/src/Generated/Models/Image.cs +++ b/.dotnet/src/Generated/Models/Image.cs @@ -3,10 +3,10 @@ using System; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Represents the url or the content of an image generated by the OpenAI API. - public partial class Image + internal partial class Image { /// /// Keeps track of any properties unknown to the library. @@ -81,4 +81,3 @@ internal Image(BinaryData b64Json, Uri url, string revisedPrompt, IDictionary + internal partial class ImagesResponse : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -136,4 +136,3 @@ internal static ImagesResponse FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/ImagesResponse.cs b/.dotnet/src/Generated/Models/ImagesResponse.cs index 44b649f3f..c73c3741c 100644 --- a/.dotnet/src/Generated/Models/ImagesResponse.cs +++ b/.dotnet/src/Generated/Models/ImagesResponse.cs @@ -5,10 +5,10 @@ using System.Collections.Generic; using System.Linq; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The ImagesResponse. - public partial class ImagesResponse + internal partial class ImagesResponse { /// /// Keeps track of any properties unknown to the library. @@ -76,4 +76,3 @@ internal ImagesResponse() public IReadOnlyList Data { get; } } } - diff --git a/.dotnet/src/Generated/Models/ListAssistantFilesResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListAssistantFilesResponse.Serialization.cs index 6e6f9758b..d0c12f66b 100644 --- a/.dotnet/src/Generated/Models/ListAssistantFilesResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/ListAssistantFilesResponse.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class ListAssistantFilesResponse : IJsonModel + internal partial class ListAssistantFilesResponse : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -160,4 +160,3 @@ internal static ListAssistantFilesResponse FromResponse(PipelineResponse respons } } } - diff --git a/.dotnet/src/Generated/Models/ListAssistantFilesResponse.cs b/.dotnet/src/Generated/Models/ListAssistantFilesResponse.cs index 4537942a4..96063de70 100644 --- a/.dotnet/src/Generated/Models/ListAssistantFilesResponse.cs +++ b/.dotnet/src/Generated/Models/ListAssistantFilesResponse.cs @@ -5,10 +5,10 @@ using System.Collections.Generic; using System.Linq; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The ListAssistantFilesResponse. - public partial class ListAssistantFilesResponse + internal partial class ListAssistantFilesResponse { /// /// Keeps track of any properties unknown to the library. @@ -95,4 +95,3 @@ internal ListAssistantFilesResponse() public bool HasMore { get; } } } - diff --git a/.dotnet/src/Generated/Models/ListAssistantFilesResponseObject.cs b/.dotnet/src/Generated/Models/ListAssistantFilesResponseObject.cs index 4b8e72c7e..bfb6cd5f6 100644 --- a/.dotnet/src/Generated/Models/ListAssistantFilesResponseObject.cs +++ b/.dotnet/src/Generated/Models/ListAssistantFilesResponseObject.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The ListAssistantFilesResponse_object. - public readonly partial struct ListAssistantFilesResponseObject : IEquatable + internal readonly partial struct ListAssistantFilesResponseObject : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public ListAssistantFilesResponseObject(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/ListAssistantsResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListAssistantsResponse.Serialization.cs index 573a35240..a94da2024 100644 --- a/.dotnet/src/Generated/Models/ListAssistantsResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/ListAssistantsResponse.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class ListAssistantsResponse : IJsonModel + internal partial class ListAssistantsResponse : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -160,4 +160,3 @@ internal static ListAssistantsResponse FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/ListAssistantsResponse.cs b/.dotnet/src/Generated/Models/ListAssistantsResponse.cs index bf553f319..032d0dd9a 100644 --- a/.dotnet/src/Generated/Models/ListAssistantsResponse.cs +++ b/.dotnet/src/Generated/Models/ListAssistantsResponse.cs @@ -5,10 +5,10 @@ using System.Collections.Generic; using System.Linq; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The ListAssistantsResponse. - public partial class ListAssistantsResponse + internal partial class ListAssistantsResponse { /// /// Keeps track of any properties unknown to the library. @@ -95,4 +95,3 @@ internal ListAssistantsResponse() public bool HasMore { get; } } } - diff --git a/.dotnet/src/Generated/Models/ListAssistantsResponseObject.cs b/.dotnet/src/Generated/Models/ListAssistantsResponseObject.cs index 207fe09fa..8c9ff2360 100644 --- a/.dotnet/src/Generated/Models/ListAssistantsResponseObject.cs +++ b/.dotnet/src/Generated/Models/ListAssistantsResponseObject.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The ListAssistantsResponse_object. - public readonly partial struct ListAssistantsResponseObject : IEquatable + internal readonly partial struct ListAssistantsResponseObject : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public ListAssistantsResponseObject(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/ListFilesResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListFilesResponse.Serialization.cs index 24134e771..088451116 100644 --- a/.dotnet/src/Generated/Models/ListFilesResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/ListFilesResponse.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class ListFilesResponse : IJsonModel + internal partial class ListFilesResponse : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -136,4 +136,3 @@ internal static ListFilesResponse FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/ListFilesResponse.cs b/.dotnet/src/Generated/Models/ListFilesResponse.cs index 7447294ac..a79ec60fc 100644 --- a/.dotnet/src/Generated/Models/ListFilesResponse.cs +++ b/.dotnet/src/Generated/Models/ListFilesResponse.cs @@ -5,10 +5,10 @@ using System.Collections.Generic; using System.Linq; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The ListFilesResponse. - public partial class ListFilesResponse + internal partial class ListFilesResponse { /// /// Keeps track of any properties unknown to the library. @@ -74,4 +74,3 @@ internal ListFilesResponse() public ListFilesResponseObject Object { get; } = ListFilesResponseObject.List; } } - diff --git a/.dotnet/src/Generated/Models/ListFilesResponseObject.cs b/.dotnet/src/Generated/Models/ListFilesResponseObject.cs index 1c8be15c3..c2a5e6e52 100644 --- a/.dotnet/src/Generated/Models/ListFilesResponseObject.cs +++ b/.dotnet/src/Generated/Models/ListFilesResponseObject.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The ListFilesResponse_object. - public readonly partial struct ListFilesResponseObject : IEquatable + internal readonly partial struct ListFilesResponseObject : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public ListFilesResponseObject(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/ListFineTuningJobEventsResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListFineTuningJobEventsResponse.Serialization.cs index 588d06c98..b2595af03 100644 --- a/.dotnet/src/Generated/Models/ListFineTuningJobEventsResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/ListFineTuningJobEventsResponse.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class ListFineTuningJobEventsResponse : IJsonModel + internal partial class ListFineTuningJobEventsResponse : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -136,4 +136,3 @@ internal static ListFineTuningJobEventsResponse FromResponse(PipelineResponse re } } } - diff --git a/.dotnet/src/Generated/Models/ListFineTuningJobEventsResponse.cs b/.dotnet/src/Generated/Models/ListFineTuningJobEventsResponse.cs index b79f9a929..6ff86e39b 100644 --- a/.dotnet/src/Generated/Models/ListFineTuningJobEventsResponse.cs +++ b/.dotnet/src/Generated/Models/ListFineTuningJobEventsResponse.cs @@ -5,10 +5,10 @@ using System.Collections.Generic; using System.Linq; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The ListFineTuningJobEventsResponse. - public partial class ListFineTuningJobEventsResponse + internal partial class ListFineTuningJobEventsResponse { /// /// Keeps track of any properties unknown to the library. @@ -77,4 +77,3 @@ internal ListFineTuningJobEventsResponse() public IReadOnlyList Data { get; } } } - diff --git a/.dotnet/src/Generated/Models/ListMessageFilesResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListMessageFilesResponse.Serialization.cs index c8585eaf5..6f16b2c6c 100644 --- a/.dotnet/src/Generated/Models/ListMessageFilesResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/ListMessageFilesResponse.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class ListMessageFilesResponse : IJsonModel + internal partial class ListMessageFilesResponse : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -160,4 +160,3 @@ internal static ListMessageFilesResponse FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/ListMessageFilesResponse.cs b/.dotnet/src/Generated/Models/ListMessageFilesResponse.cs index be35d9455..80fdc2499 100644 --- a/.dotnet/src/Generated/Models/ListMessageFilesResponse.cs +++ b/.dotnet/src/Generated/Models/ListMessageFilesResponse.cs @@ -5,10 +5,10 @@ using System.Collections.Generic; using System.Linq; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The ListMessageFilesResponse. - public partial class ListMessageFilesResponse + internal partial class ListMessageFilesResponse { /// /// Keeps track of any properties unknown to the library. @@ -95,4 +95,3 @@ internal ListMessageFilesResponse() public bool HasMore { get; } } } - diff --git a/.dotnet/src/Generated/Models/ListMessageFilesResponseObject.cs b/.dotnet/src/Generated/Models/ListMessageFilesResponseObject.cs index 01cd0514a..b1eae36d8 100644 --- a/.dotnet/src/Generated/Models/ListMessageFilesResponseObject.cs +++ b/.dotnet/src/Generated/Models/ListMessageFilesResponseObject.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The ListMessageFilesResponse_object. - public readonly partial struct ListMessageFilesResponseObject : IEquatable + internal readonly partial struct ListMessageFilesResponseObject : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public ListMessageFilesResponseObject(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/ListMessagesResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListMessagesResponse.Serialization.cs index fd1dad634..dbf0afd83 100644 --- a/.dotnet/src/Generated/Models/ListMessagesResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/ListMessagesResponse.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class ListMessagesResponse : IJsonModel + internal partial class ListMessagesResponse : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -160,4 +160,3 @@ internal static ListMessagesResponse FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/ListMessagesResponse.cs b/.dotnet/src/Generated/Models/ListMessagesResponse.cs index 56ef1f2ec..fc2d652e0 100644 --- a/.dotnet/src/Generated/Models/ListMessagesResponse.cs +++ b/.dotnet/src/Generated/Models/ListMessagesResponse.cs @@ -5,10 +5,10 @@ using System.Collections.Generic; using System.Linq; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The ListMessagesResponse. - public partial class ListMessagesResponse + internal partial class ListMessagesResponse { /// /// Keeps track of any properties unknown to the library. @@ -95,4 +95,3 @@ internal ListMessagesResponse() public bool HasMore { get; } } } - diff --git a/.dotnet/src/Generated/Models/ListMessagesResponseObject.cs b/.dotnet/src/Generated/Models/ListMessagesResponseObject.cs index 01ad83164..a3d4be1eb 100644 --- a/.dotnet/src/Generated/Models/ListMessagesResponseObject.cs +++ b/.dotnet/src/Generated/Models/ListMessagesResponseObject.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The ListMessagesResponse_object. - public readonly partial struct ListMessagesResponseObject : IEquatable + internal readonly partial struct ListMessagesResponseObject : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public ListMessagesResponseObject(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/ListModelsResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListModelsResponse.Serialization.cs index 36da18e24..45281c137 100644 --- a/.dotnet/src/Generated/Models/ListModelsResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/ListModelsResponse.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class ListModelsResponse : IJsonModel + internal partial class ListModelsResponse : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -136,4 +136,3 @@ internal static ListModelsResponse FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/ListModelsResponse.cs b/.dotnet/src/Generated/Models/ListModelsResponse.cs index ee8f8e350..b0751334c 100644 --- a/.dotnet/src/Generated/Models/ListModelsResponse.cs +++ b/.dotnet/src/Generated/Models/ListModelsResponse.cs @@ -5,10 +5,10 @@ using System.Collections.Generic; using System.Linq; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The ListModelsResponse. - public partial class ListModelsResponse + internal partial class ListModelsResponse { /// /// Keeps track of any properties unknown to the library. @@ -75,4 +75,3 @@ internal ListModelsResponse() public IReadOnlyList Data { get; } } } - diff --git a/.dotnet/src/Generated/Models/ListModelsResponseObject.cs b/.dotnet/src/Generated/Models/ListModelsResponseObject.cs index 081da58be..b9e471c75 100644 --- a/.dotnet/src/Generated/Models/ListModelsResponseObject.cs +++ b/.dotnet/src/Generated/Models/ListModelsResponseObject.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The ListModelsResponse_object. - public readonly partial struct ListModelsResponseObject : IEquatable + internal readonly partial struct ListModelsResponseObject : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public ListModelsResponseObject(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/ListOrder.cs b/.dotnet/src/Generated/Models/ListOrder.cs index 95b097763..0a2a6e6cf 100644 --- a/.dotnet/src/Generated/Models/ListOrder.cs +++ b/.dotnet/src/Generated/Models/ListOrder.cs @@ -3,9 +3,9 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public readonly partial struct ListOrder : IEquatable + internal readonly partial struct ListOrder : IEquatable { private readonly string _value; @@ -43,4 +43,3 @@ public ListOrder(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/ListPaginatedFineTuningJobsResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListPaginatedFineTuningJobsResponse.Serialization.cs index 16a324034..c1344e078 100644 --- a/.dotnet/src/Generated/Models/ListPaginatedFineTuningJobsResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/ListPaginatedFineTuningJobsResponse.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class ListPaginatedFineTuningJobsResponse : IJsonModel + internal partial class ListPaginatedFineTuningJobsResponse : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -144,4 +144,3 @@ internal static ListPaginatedFineTuningJobsResponse FromResponse(PipelineRespons } } } - diff --git a/.dotnet/src/Generated/Models/ListPaginatedFineTuningJobsResponse.cs b/.dotnet/src/Generated/Models/ListPaginatedFineTuningJobsResponse.cs index 3bb6a7d15..e99c79598 100644 --- a/.dotnet/src/Generated/Models/ListPaginatedFineTuningJobsResponse.cs +++ b/.dotnet/src/Generated/Models/ListPaginatedFineTuningJobsResponse.cs @@ -5,10 +5,10 @@ using System.Collections.Generic; using System.Linq; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The ListPaginatedFineTuningJobsResponse. - public partial class ListPaginatedFineTuningJobsResponse + internal partial class ListPaginatedFineTuningJobsResponse { /// /// Keeps track of any properties unknown to the library. @@ -83,4 +83,3 @@ internal ListPaginatedFineTuningJobsResponse() public bool HasMore { get; } } } - diff --git a/.dotnet/src/Generated/Models/ListRunStepsResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListRunStepsResponse.Serialization.cs index a91044a98..4a9b71ed8 100644 --- a/.dotnet/src/Generated/Models/ListRunStepsResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/ListRunStepsResponse.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class ListRunStepsResponse : IJsonModel + internal partial class ListRunStepsResponse : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -160,4 +160,3 @@ internal static ListRunStepsResponse FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/ListRunStepsResponse.cs b/.dotnet/src/Generated/Models/ListRunStepsResponse.cs index 641a91e04..6582d2fc6 100644 --- a/.dotnet/src/Generated/Models/ListRunStepsResponse.cs +++ b/.dotnet/src/Generated/Models/ListRunStepsResponse.cs @@ -5,10 +5,10 @@ using System.Collections.Generic; using System.Linq; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The ListRunStepsResponse. - public partial class ListRunStepsResponse + internal partial class ListRunStepsResponse { /// /// Keeps track of any properties unknown to the library. @@ -95,4 +95,3 @@ internal ListRunStepsResponse() public bool HasMore { get; } } } - diff --git a/.dotnet/src/Generated/Models/ListRunStepsResponseObject.cs b/.dotnet/src/Generated/Models/ListRunStepsResponseObject.cs index 2b613b3e4..98dd2d37c 100644 --- a/.dotnet/src/Generated/Models/ListRunStepsResponseObject.cs +++ b/.dotnet/src/Generated/Models/ListRunStepsResponseObject.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The ListRunStepsResponse_object. - public readonly partial struct ListRunStepsResponseObject : IEquatable + internal readonly partial struct ListRunStepsResponseObject : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public ListRunStepsResponseObject(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/ListRunsResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListRunsResponse.Serialization.cs index a934e890e..c6b501c8a 100644 --- a/.dotnet/src/Generated/Models/ListRunsResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/ListRunsResponse.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class ListRunsResponse : IJsonModel + internal partial class ListRunsResponse : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -160,4 +160,3 @@ internal static ListRunsResponse FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/ListRunsResponse.cs b/.dotnet/src/Generated/Models/ListRunsResponse.cs index f3fb538c7..233e26060 100644 --- a/.dotnet/src/Generated/Models/ListRunsResponse.cs +++ b/.dotnet/src/Generated/Models/ListRunsResponse.cs @@ -5,10 +5,10 @@ using System.Collections.Generic; using System.Linq; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The ListRunsResponse. - public partial class ListRunsResponse + internal partial class ListRunsResponse { /// /// Keeps track of any properties unknown to the library. @@ -95,4 +95,3 @@ internal ListRunsResponse() public bool HasMore { get; } } } - diff --git a/.dotnet/src/Generated/Models/ListRunsResponseObject.cs b/.dotnet/src/Generated/Models/ListRunsResponseObject.cs index f8a80ed2d..f3f0e1a1e 100644 --- a/.dotnet/src/Generated/Models/ListRunsResponseObject.cs +++ b/.dotnet/src/Generated/Models/ListRunsResponseObject.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The ListRunsResponse_object. - public readonly partial struct ListRunsResponseObject : IEquatable + internal readonly partial struct ListRunsResponseObject : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public ListRunsResponseObject(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/MessageFileObject.Serialization.cs b/.dotnet/src/Generated/Models/MessageFileObject.Serialization.cs index 62d4e0d8e..61eb5abae 100644 --- a/.dotnet/src/Generated/Models/MessageFileObject.Serialization.cs +++ b/.dotnet/src/Generated/Models/MessageFileObject.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class MessageFileObject : IJsonModel + internal partial class MessageFileObject : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -142,4 +142,3 @@ internal static MessageFileObject FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/MessageFileObject.cs b/.dotnet/src/Generated/Models/MessageFileObject.cs index 1f4f67e75..508d68981 100644 --- a/.dotnet/src/Generated/Models/MessageFileObject.cs +++ b/.dotnet/src/Generated/Models/MessageFileObject.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// A list of files attached to a `message`. - public partial class MessageFileObject + internal partial class MessageFileObject { /// /// Keeps track of any properties unknown to the library. @@ -87,4 +87,3 @@ internal MessageFileObject() public string MessageId { get; } } } - diff --git a/.dotnet/src/Generated/Models/MessageFileObjectObject.cs b/.dotnet/src/Generated/Models/MessageFileObjectObject.cs index f1185adf5..19c46e334 100644 --- a/.dotnet/src/Generated/Models/MessageFileObjectObject.cs +++ b/.dotnet/src/Generated/Models/MessageFileObjectObject.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The MessageFileObject_object. - public readonly partial struct MessageFileObjectObject : IEquatable + internal readonly partial struct MessageFileObjectObject : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public MessageFileObjectObject(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/MessageObject.Serialization.cs b/.dotnet/src/Generated/Models/MessageObject.Serialization.cs index bc6f566b7..27a044c39 100644 --- a/.dotnet/src/Generated/Models/MessageObject.Serialization.cs +++ b/.dotnet/src/Generated/Models/MessageObject.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class MessageObject : IJsonModel + internal partial class MessageObject : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -276,4 +276,3 @@ internal static MessageObject FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/MessageObject.cs b/.dotnet/src/Generated/Models/MessageObject.cs index 0ed781f8d..f796b78be 100644 --- a/.dotnet/src/Generated/Models/MessageObject.cs +++ b/.dotnet/src/Generated/Models/MessageObject.cs @@ -5,10 +5,10 @@ using System.Collections.Generic; using System.Linq; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The MessageObject. - public partial class MessageObject + internal partial class MessageObject { /// /// Keeps track of any properties unknown to the library. @@ -197,4 +197,3 @@ internal MessageObject() public IReadOnlyDictionary Metadata { get; } } } - diff --git a/.dotnet/src/Generated/Models/MessageObjectObject.cs b/.dotnet/src/Generated/Models/MessageObjectObject.cs index 978ab0a6a..120249ab6 100644 --- a/.dotnet/src/Generated/Models/MessageObjectObject.cs +++ b/.dotnet/src/Generated/Models/MessageObjectObject.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The MessageObject_object. - public readonly partial struct MessageObjectObject : IEquatable + internal readonly partial struct MessageObjectObject : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public MessageObjectObject(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/MessageObjectRole.cs b/.dotnet/src/Generated/Models/MessageObjectRole.cs index 53ee3cafe..1b637beb6 100644 --- a/.dotnet/src/Generated/Models/MessageObjectRole.cs +++ b/.dotnet/src/Generated/Models/MessageObjectRole.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Enum for role in MessageObject. - public readonly partial struct MessageObjectRole : IEquatable + internal readonly partial struct MessageObjectRole : IEquatable { private readonly string _value; @@ -44,4 +44,3 @@ public MessageObjectRole(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/Model.Serialization.cs b/.dotnet/src/Generated/Models/Model.Serialization.cs index 4b0cb8d8a..13d7f1f3a 100644 --- a/.dotnet/src/Generated/Models/Model.Serialization.cs +++ b/.dotnet/src/Generated/Models/Model.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class Model : IJsonModel + internal partial class Model : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -142,4 +142,3 @@ internal static Model FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/Model.cs b/.dotnet/src/Generated/Models/Model.cs index 66fe38632..d5b78f33f 100644 --- a/.dotnet/src/Generated/Models/Model.cs +++ b/.dotnet/src/Generated/Models/Model.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Describes an OpenAI model offering that can be used with the API. - public partial class Model + internal partial class Model { /// /// Keeps track of any properties unknown to the library. @@ -87,4 +87,3 @@ internal Model() public string OwnedBy { get; } } } - diff --git a/.dotnet/src/Generated/Models/ModelObject.cs b/.dotnet/src/Generated/Models/ModelObject.cs index 70fd47f04..5971c3917 100644 --- a/.dotnet/src/Generated/Models/ModelObject.cs +++ b/.dotnet/src/Generated/Models/ModelObject.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The Model_object. - public readonly partial struct ModelObject : IEquatable + internal readonly partial struct ModelObject : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public ModelObject(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/ModifyAssistantRequest.Serialization.cs b/.dotnet/src/Generated/Models/ModifyAssistantRequest.Serialization.cs index 7635f00f3..41c39aac7 100644 --- a/.dotnet/src/Generated/Models/ModifyAssistantRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/ModifyAssistantRequest.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class ModifyAssistantRequest : IJsonModel + internal partial class ModifyAssistantRequest : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -292,4 +292,3 @@ internal static ModifyAssistantRequest FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/ModifyAssistantRequest.cs b/.dotnet/src/Generated/Models/ModifyAssistantRequest.cs index 9882b6462..ae45a5a00 100644 --- a/.dotnet/src/Generated/Models/ModifyAssistantRequest.cs +++ b/.dotnet/src/Generated/Models/ModifyAssistantRequest.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The ModifyAssistantRequest. - public partial class ModifyAssistantRequest + internal partial class ModifyAssistantRequest { /// /// Keeps track of any properties unknown to the library. @@ -143,4 +143,3 @@ internal ModifyAssistantRequest(string model, string name, string description, s public IDictionary Metadata { get; set; } } } - diff --git a/.dotnet/src/Generated/Models/ModifyMessageRequest.Serialization.cs b/.dotnet/src/Generated/Models/ModifyMessageRequest.Serialization.cs index 2c5c37dec..bf8d29430 100644 --- a/.dotnet/src/Generated/Models/ModifyMessageRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/ModifyMessageRequest.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class ModifyMessageRequest : IJsonModel + internal partial class ModifyMessageRequest : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -143,4 +143,3 @@ internal static ModifyMessageRequest FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/ModifyMessageRequest.cs b/.dotnet/src/Generated/Models/ModifyMessageRequest.cs index a087c656d..72cc4c5bc 100644 --- a/.dotnet/src/Generated/Models/ModifyMessageRequest.cs +++ b/.dotnet/src/Generated/Models/ModifyMessageRequest.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The ModifyMessageRequest. - public partial class ModifyMessageRequest + internal partial class ModifyMessageRequest { /// /// Keeps track of any properties unknown to the library. @@ -68,4 +68,3 @@ internal ModifyMessageRequest(IDictionary metadata, IDictionary< public IDictionary Metadata { get; set; } } } - diff --git a/.dotnet/src/Generated/Models/ModifyRunRequest.Serialization.cs b/.dotnet/src/Generated/Models/ModifyRunRequest.Serialization.cs index 1976fd93e..40da11ec2 100644 --- a/.dotnet/src/Generated/Models/ModifyRunRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/ModifyRunRequest.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class ModifyRunRequest : IJsonModel + internal partial class ModifyRunRequest : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -143,4 +143,3 @@ internal static ModifyRunRequest FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/ModifyRunRequest.cs b/.dotnet/src/Generated/Models/ModifyRunRequest.cs index 1be502ea5..6def465a0 100644 --- a/.dotnet/src/Generated/Models/ModifyRunRequest.cs +++ b/.dotnet/src/Generated/Models/ModifyRunRequest.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The ModifyRunRequest. - public partial class ModifyRunRequest + internal partial class ModifyRunRequest { /// /// Keeps track of any properties unknown to the library. @@ -68,4 +68,3 @@ internal ModifyRunRequest(IDictionary metadata, IDictionary Metadata { get; set; } } } - diff --git a/.dotnet/src/Generated/Models/ModifyThreadRequest.Serialization.cs b/.dotnet/src/Generated/Models/ModifyThreadRequest.Serialization.cs index e5bc72917..5c6618494 100644 --- a/.dotnet/src/Generated/Models/ModifyThreadRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/ModifyThreadRequest.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class ModifyThreadRequest : IJsonModel + internal partial class ModifyThreadRequest : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -143,4 +143,3 @@ internal static ModifyThreadRequest FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/ModifyThreadRequest.cs b/.dotnet/src/Generated/Models/ModifyThreadRequest.cs index 084d68686..df90174fa 100644 --- a/.dotnet/src/Generated/Models/ModifyThreadRequest.cs +++ b/.dotnet/src/Generated/Models/ModifyThreadRequest.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The ModifyThreadRequest. - public partial class ModifyThreadRequest + internal partial class ModifyThreadRequest { /// /// Keeps track of any properties unknown to the library. @@ -68,4 +68,3 @@ internal ModifyThreadRequest(IDictionary metadata, IDictionary Metadata { get; set; } } } - diff --git a/.dotnet/src/Generated/Models/OpenAIFile.Serialization.cs b/.dotnet/src/Generated/Models/OpenAIFile.Serialization.cs index fe73a6958..98890a027 100644 --- a/.dotnet/src/Generated/Models/OpenAIFile.Serialization.cs +++ b/.dotnet/src/Generated/Models/OpenAIFile.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class OpenAIFile : IJsonModel + internal partial class OpenAIFile : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -177,4 +177,3 @@ internal static OpenAIFile FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/OpenAIFile.cs b/.dotnet/src/Generated/Models/OpenAIFile.cs index 05b42f6de..99f8e94ad 100644 --- a/.dotnet/src/Generated/Models/OpenAIFile.cs +++ b/.dotnet/src/Generated/Models/OpenAIFile.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The `File` object represents a document that has been uploaded to OpenAI. - public partial class OpenAIFile + internal partial class OpenAIFile { /// /// Keeps track of any properties unknown to the library. @@ -133,4 +133,3 @@ internal OpenAIFile() public string StatusDetails { get; } } } - diff --git a/.dotnet/src/Generated/Models/OpenAIFileObject.cs b/.dotnet/src/Generated/Models/OpenAIFileObject.cs index 268a0888e..db29f0450 100644 --- a/.dotnet/src/Generated/Models/OpenAIFileObject.cs +++ b/.dotnet/src/Generated/Models/OpenAIFileObject.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The OpenAIFile_object. - public readonly partial struct OpenAIFileObject : IEquatable + internal readonly partial struct OpenAIFileObject : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public OpenAIFileObject(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/OpenAIFilePurpose.cs b/.dotnet/src/Generated/Models/OpenAIFilePurpose.cs index 98a369e91..97d4f29b4 100644 --- a/.dotnet/src/Generated/Models/OpenAIFilePurpose.cs +++ b/.dotnet/src/Generated/Models/OpenAIFilePurpose.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Enum for purpose in OpenAIFile. - public readonly partial struct OpenAIFilePurpose : IEquatable + internal readonly partial struct OpenAIFilePurpose : IEquatable { private readonly string _value; @@ -50,4 +50,3 @@ public OpenAIFilePurpose(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/OpenAIFileStatus.cs b/.dotnet/src/Generated/Models/OpenAIFileStatus.cs index cddc7cf60..bd8c906d5 100644 --- a/.dotnet/src/Generated/Models/OpenAIFileStatus.cs +++ b/.dotnet/src/Generated/Models/OpenAIFileStatus.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Enum for status in OpenAIFile. - public readonly partial struct OpenAIFileStatus : IEquatable + internal readonly partial struct OpenAIFileStatus : IEquatable { private readonly string _value; @@ -47,4 +47,3 @@ public OpenAIFileStatus(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/RunCompletionUsage.Serialization.cs b/.dotnet/src/Generated/Models/RunCompletionUsage.Serialization.cs index b730fa9c8..b1cdb73da 100644 --- a/.dotnet/src/Generated/Models/RunCompletionUsage.Serialization.cs +++ b/.dotnet/src/Generated/Models/RunCompletionUsage.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class RunCompletionUsage : IJsonModel + internal partial class RunCompletionUsage : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -134,4 +134,3 @@ internal static RunCompletionUsage FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/RunCompletionUsage.cs b/.dotnet/src/Generated/Models/RunCompletionUsage.cs index a1bd07938..8e804dce9 100644 --- a/.dotnet/src/Generated/Models/RunCompletionUsage.cs +++ b/.dotnet/src/Generated/Models/RunCompletionUsage.cs @@ -3,13 +3,13 @@ using System; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// /// Usage statistics related to the run. This value will be `null` if the run is not in a terminal /// state (i.e. `in_progress`, `queued`, etc.). /// - public partial class RunCompletionUsage + internal partial class RunCompletionUsage { /// /// Keeps track of any properties unknown to the library. @@ -80,4 +80,3 @@ internal RunCompletionUsage() public long TotalTokens { get; } } } - diff --git a/.dotnet/src/Generated/Models/RunObject.Serialization.cs b/.dotnet/src/Generated/Models/RunObject.Serialization.cs index 1c7779a88..07fa205c1 100644 --- a/.dotnet/src/Generated/Models/RunObject.Serialization.cs +++ b/.dotnet/src/Generated/Models/RunObject.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class RunObject : IJsonModel + internal partial class RunObject : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -408,4 +408,3 @@ internal static RunObject FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/RunObject.cs b/.dotnet/src/Generated/Models/RunObject.cs index a1baf8de2..282cac58a 100644 --- a/.dotnet/src/Generated/Models/RunObject.cs +++ b/.dotnet/src/Generated/Models/RunObject.cs @@ -5,10 +5,10 @@ using System.Collections.Generic; using System.Linq; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Represents an execution run on a [thread](/docs/api-reference/threads). - public partial class RunObject + internal partial class RunObject { /// /// Keeps track of any properties unknown to the library. @@ -260,4 +260,3 @@ internal RunObject() public RunCompletionUsage Usage { get; } } } - diff --git a/.dotnet/src/Generated/Models/RunObjectLastError.Serialization.cs b/.dotnet/src/Generated/Models/RunObjectLastError.Serialization.cs index f3486aafe..6466b1546 100644 --- a/.dotnet/src/Generated/Models/RunObjectLastError.Serialization.cs +++ b/.dotnet/src/Generated/Models/RunObjectLastError.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class RunObjectLastError : IJsonModel + internal partial class RunObjectLastError : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -126,4 +126,3 @@ internal static RunObjectLastError FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/RunObjectLastError.cs b/.dotnet/src/Generated/Models/RunObjectLastError.cs index b46274c65..b824da830 100644 --- a/.dotnet/src/Generated/Models/RunObjectLastError.cs +++ b/.dotnet/src/Generated/Models/RunObjectLastError.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The RunObjectLastError. - public partial class RunObjectLastError + internal partial class RunObjectLastError { /// /// Keeps track of any properties unknown to the library. @@ -75,4 +75,3 @@ internal RunObjectLastError() public string Message { get; } } } - diff --git a/.dotnet/src/Generated/Models/RunObjectLastErrorCode.cs b/.dotnet/src/Generated/Models/RunObjectLastErrorCode.cs index d851f2397..500d3a25a 100644 --- a/.dotnet/src/Generated/Models/RunObjectLastErrorCode.cs +++ b/.dotnet/src/Generated/Models/RunObjectLastErrorCode.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Enum for code in RunObjectLastError. - public readonly partial struct RunObjectLastErrorCode : IEquatable + internal readonly partial struct RunObjectLastErrorCode : IEquatable { private readonly string _value; @@ -44,4 +44,3 @@ public RunObjectLastErrorCode(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/RunObjectObject.cs b/.dotnet/src/Generated/Models/RunObjectObject.cs index 19925055a..f03e8122b 100644 --- a/.dotnet/src/Generated/Models/RunObjectObject.cs +++ b/.dotnet/src/Generated/Models/RunObjectObject.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The RunObject_object. - public readonly partial struct RunObjectObject : IEquatable + internal readonly partial struct RunObjectObject : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public RunObjectObject(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/RunObjectRequiredAction.Serialization.cs b/.dotnet/src/Generated/Models/RunObjectRequiredAction.Serialization.cs index 98fb7bbc1..b20e400db 100644 --- a/.dotnet/src/Generated/Models/RunObjectRequiredAction.Serialization.cs +++ b/.dotnet/src/Generated/Models/RunObjectRequiredAction.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class RunObjectRequiredAction : IJsonModel + internal partial class RunObjectRequiredAction : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -126,4 +126,3 @@ internal static RunObjectRequiredAction FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/RunObjectRequiredAction.cs b/.dotnet/src/Generated/Models/RunObjectRequiredAction.cs index 034fedcca..c3515dfbd 100644 --- a/.dotnet/src/Generated/Models/RunObjectRequiredAction.cs +++ b/.dotnet/src/Generated/Models/RunObjectRequiredAction.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The RunObjectRequiredAction. - public partial class RunObjectRequiredAction + internal partial class RunObjectRequiredAction { /// /// Keeps track of any properties unknown to the library. @@ -74,4 +74,3 @@ internal RunObjectRequiredAction() public RunObjectRequiredActionSubmitToolOutputs SubmitToolOutputs { get; } } } - diff --git a/.dotnet/src/Generated/Models/RunObjectRequiredActionSubmitToolOutputs.Serialization.cs b/.dotnet/src/Generated/Models/RunObjectRequiredActionSubmitToolOutputs.Serialization.cs index 0f374b56a..79032ea03 100644 --- a/.dotnet/src/Generated/Models/RunObjectRequiredActionSubmitToolOutputs.Serialization.cs +++ b/.dotnet/src/Generated/Models/RunObjectRequiredActionSubmitToolOutputs.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class RunObjectRequiredActionSubmitToolOutputs : IJsonModel + internal partial class RunObjectRequiredActionSubmitToolOutputs : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -128,4 +128,3 @@ internal static RunObjectRequiredActionSubmitToolOutputs FromResponse(PipelineRe } } } - diff --git a/.dotnet/src/Generated/Models/RunObjectRequiredActionSubmitToolOutputs.cs b/.dotnet/src/Generated/Models/RunObjectRequiredActionSubmitToolOutputs.cs index ffdc5ff0f..044c28ed8 100644 --- a/.dotnet/src/Generated/Models/RunObjectRequiredActionSubmitToolOutputs.cs +++ b/.dotnet/src/Generated/Models/RunObjectRequiredActionSubmitToolOutputs.cs @@ -5,10 +5,10 @@ using System.Collections.Generic; using System.Linq; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The RunObjectRequiredActionSubmitToolOutputs. - public partial class RunObjectRequiredActionSubmitToolOutputs + internal partial class RunObjectRequiredActionSubmitToolOutputs { /// /// Keeps track of any properties unknown to the library. @@ -70,4 +70,3 @@ internal RunObjectRequiredActionSubmitToolOutputs() public IReadOnlyList ToolCalls { get; } } } - diff --git a/.dotnet/src/Generated/Models/RunObjectRequiredActionType.cs b/.dotnet/src/Generated/Models/RunObjectRequiredActionType.cs index d8d1c8f07..c1ce2fd12 100644 --- a/.dotnet/src/Generated/Models/RunObjectRequiredActionType.cs +++ b/.dotnet/src/Generated/Models/RunObjectRequiredActionType.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The RunObjectRequiredAction_type. - public readonly partial struct RunObjectRequiredActionType : IEquatable + internal readonly partial struct RunObjectRequiredActionType : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public RunObjectRequiredActionType(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/RunObjectStatus.cs b/.dotnet/src/Generated/Models/RunObjectStatus.cs index cd13d9b49..c369bec80 100644 --- a/.dotnet/src/Generated/Models/RunObjectStatus.cs +++ b/.dotnet/src/Generated/Models/RunObjectStatus.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Enum for status in RunObject. - public readonly partial struct RunObjectStatus : IEquatable + internal readonly partial struct RunObjectStatus : IEquatable { private readonly string _value; @@ -62,4 +62,3 @@ public RunObjectStatus(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObject.Serialization.cs b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObject.Serialization.cs index 38408c8d1..60be54dde 100644 --- a/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObject.Serialization.cs +++ b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObject.Serialization.cs @@ -6,7 +6,7 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { internal partial class RunStepDetailsMessageCreationObject : IJsonModel { @@ -126,4 +126,3 @@ internal static RunStepDetailsMessageCreationObject FromResponse(PipelineRespons } } } - diff --git a/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObject.cs b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObject.cs index ffac3bdb1..1de631b8d 100644 --- a/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObject.cs +++ b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObject.cs @@ -4,7 +4,7 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Details of the message creation by the run step. internal partial class RunStepDetailsMessageCreationObject @@ -74,4 +74,3 @@ internal RunStepDetailsMessageCreationObject() public RunStepDetailsMessageCreationObjectMessageCreation MessageCreation { get; } } } - diff --git a/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectMessageCreation.Serialization.cs b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectMessageCreation.Serialization.cs index 1d02bc66a..b1f56e27c 100644 --- a/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectMessageCreation.Serialization.cs +++ b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectMessageCreation.Serialization.cs @@ -6,7 +6,7 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { internal partial class RunStepDetailsMessageCreationObjectMessageCreation : IJsonModel { @@ -118,4 +118,3 @@ internal static RunStepDetailsMessageCreationObjectMessageCreation FromResponse( } } } - diff --git a/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectMessageCreation.cs b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectMessageCreation.cs index 447962ae3..a7e725f0a 100644 --- a/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectMessageCreation.cs +++ b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectMessageCreation.cs @@ -4,7 +4,7 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The RunStepDetailsMessageCreationObjectMessageCreation. internal partial class RunStepDetailsMessageCreationObjectMessageCreation @@ -69,4 +69,3 @@ internal RunStepDetailsMessageCreationObjectMessageCreation() public string MessageId { get; } } } - diff --git a/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectType.cs b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectType.cs index bf0a1f637..e5d49a8d3 100644 --- a/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectType.cs +++ b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectType.cs @@ -3,7 +3,7 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The RunStepDetailsMessageCreationObject_type. internal readonly partial struct RunStepDetailsMessageCreationObjectType : IEquatable @@ -41,4 +41,3 @@ public RunStepDetailsMessageCreationObjectType(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObject.Serialization.cs b/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObject.Serialization.cs index ebf127b7d..c38c64703 100644 --- a/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObject.Serialization.cs +++ b/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObject.Serialization.cs @@ -6,7 +6,7 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { internal partial class RunStepDetailsToolCallsObject : IJsonModel { @@ -155,4 +155,3 @@ internal static RunStepDetailsToolCallsObject FromResponse(PipelineResponse resp } } } - diff --git a/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObject.cs b/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObject.cs index f290db55e..37fb29ed7 100644 --- a/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObject.cs +++ b/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObject.cs @@ -5,7 +5,7 @@ using System.Collections.Generic; using System.Linq; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Details of the tool call. internal partial class RunStepDetailsToolCallsObject @@ -111,4 +111,3 @@ internal RunStepDetailsToolCallsObject() public IReadOnlyList ToolCalls { get; } } } - diff --git a/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObjectType.cs b/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObjectType.cs index a307fc4c4..5593d5a5b 100644 --- a/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObjectType.cs +++ b/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObjectType.cs @@ -3,7 +3,7 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The RunStepDetailsToolCallsObject_type. internal readonly partial struct RunStepDetailsToolCallsObjectType : IEquatable @@ -41,4 +41,3 @@ public RunStepDetailsToolCallsObjectType(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/RunStepObject.Serialization.cs b/.dotnet/src/Generated/Models/RunStepObject.Serialization.cs index a73966402..b824f8e76 100644 --- a/.dotnet/src/Generated/Models/RunStepObject.Serialization.cs +++ b/.dotnet/src/Generated/Models/RunStepObject.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class RunStepObject : IJsonModel + internal partial class RunStepObject : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -340,4 +340,3 @@ internal static RunStepObject FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/RunStepObject.cs b/.dotnet/src/Generated/Models/RunStepObject.cs index 24cbfec6e..ebd98f23f 100644 --- a/.dotnet/src/Generated/Models/RunStepObject.cs +++ b/.dotnet/src/Generated/Models/RunStepObject.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Represents a step in execution of a run. - public partial class RunStepObject + internal partial class RunStepObject { /// /// Keeps track of any properties unknown to the library. @@ -232,4 +232,3 @@ internal RunStepObject() public RunCompletionUsage Usage { get; } } } - diff --git a/.dotnet/src/Generated/Models/RunStepObjectLastError.Serialization.cs b/.dotnet/src/Generated/Models/RunStepObjectLastError.Serialization.cs index 71a0e8a2f..bced34068 100644 --- a/.dotnet/src/Generated/Models/RunStepObjectLastError.Serialization.cs +++ b/.dotnet/src/Generated/Models/RunStepObjectLastError.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class RunStepObjectLastError : IJsonModel + internal partial class RunStepObjectLastError : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -126,4 +126,3 @@ internal static RunStepObjectLastError FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/RunStepObjectLastError.cs b/.dotnet/src/Generated/Models/RunStepObjectLastError.cs index 2bf3b1db8..32d41e0fd 100644 --- a/.dotnet/src/Generated/Models/RunStepObjectLastError.cs +++ b/.dotnet/src/Generated/Models/RunStepObjectLastError.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The RunStepObjectLastError. - public partial class RunStepObjectLastError + internal partial class RunStepObjectLastError { /// /// Keeps track of any properties unknown to the library. @@ -75,4 +75,3 @@ internal RunStepObjectLastError() public string Message { get; } } } - diff --git a/.dotnet/src/Generated/Models/RunStepObjectLastErrorCode.cs b/.dotnet/src/Generated/Models/RunStepObjectLastErrorCode.cs index 9bf035ea1..2d8832e99 100644 --- a/.dotnet/src/Generated/Models/RunStepObjectLastErrorCode.cs +++ b/.dotnet/src/Generated/Models/RunStepObjectLastErrorCode.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Enum for code in RunStepObjectLastError. - public readonly partial struct RunStepObjectLastErrorCode : IEquatable + internal readonly partial struct RunStepObjectLastErrorCode : IEquatable { private readonly string _value; @@ -44,4 +44,3 @@ public RunStepObjectLastErrorCode(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/RunStepObjectObject.cs b/.dotnet/src/Generated/Models/RunStepObjectObject.cs index 21477c60d..376e9e3cc 100644 --- a/.dotnet/src/Generated/Models/RunStepObjectObject.cs +++ b/.dotnet/src/Generated/Models/RunStepObjectObject.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The RunStepObject_object. - public readonly partial struct RunStepObjectObject : IEquatable + internal readonly partial struct RunStepObjectObject : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public RunStepObjectObject(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/RunStepObjectStatus.cs b/.dotnet/src/Generated/Models/RunStepObjectStatus.cs index 21abb0c10..f46f639d7 100644 --- a/.dotnet/src/Generated/Models/RunStepObjectStatus.cs +++ b/.dotnet/src/Generated/Models/RunStepObjectStatus.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Enum for status in RunStepObject. - public readonly partial struct RunStepObjectStatus : IEquatable + internal readonly partial struct RunStepObjectStatus : IEquatable { private readonly string _value; @@ -53,4 +53,3 @@ public RunStepObjectStatus(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/RunStepObjectType.cs b/.dotnet/src/Generated/Models/RunStepObjectType.cs index 432a90099..30257f499 100644 --- a/.dotnet/src/Generated/Models/RunStepObjectType.cs +++ b/.dotnet/src/Generated/Models/RunStepObjectType.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Enum for type in RunStepObject. - public readonly partial struct RunStepObjectType : IEquatable + internal readonly partial struct RunStepObjectType : IEquatable { private readonly string _value; @@ -44,4 +44,3 @@ public RunStepObjectType(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/RunToolCallObject.Serialization.cs b/.dotnet/src/Generated/Models/RunToolCallObject.Serialization.cs index 47b58bfd9..8c1145c63 100644 --- a/.dotnet/src/Generated/Models/RunToolCallObject.Serialization.cs +++ b/.dotnet/src/Generated/Models/RunToolCallObject.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class RunToolCallObject : IJsonModel + internal partial class RunToolCallObject : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -134,4 +134,3 @@ internal static RunToolCallObject FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/RunToolCallObject.cs b/.dotnet/src/Generated/Models/RunToolCallObject.cs index d70655724..83c3a699c 100644 --- a/.dotnet/src/Generated/Models/RunToolCallObject.cs +++ b/.dotnet/src/Generated/Models/RunToolCallObject.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Tool call objects. - public partial class RunToolCallObject + internal partial class RunToolCallObject { /// /// Keeps track of any properties unknown to the library. @@ -90,4 +90,3 @@ internal RunToolCallObject() public RunToolCallObjectFunction Function { get; } } } - diff --git a/.dotnet/src/Generated/Models/RunToolCallObjectFunction.Serialization.cs b/.dotnet/src/Generated/Models/RunToolCallObjectFunction.Serialization.cs index a62970151..d7af27898 100644 --- a/.dotnet/src/Generated/Models/RunToolCallObjectFunction.Serialization.cs +++ b/.dotnet/src/Generated/Models/RunToolCallObjectFunction.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class RunToolCallObjectFunction : IJsonModel + internal partial class RunToolCallObjectFunction : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -126,4 +126,3 @@ internal static RunToolCallObjectFunction FromResponse(PipelineResponse response } } } - diff --git a/.dotnet/src/Generated/Models/RunToolCallObjectFunction.cs b/.dotnet/src/Generated/Models/RunToolCallObjectFunction.cs index 1fab15eac..15f52388a 100644 --- a/.dotnet/src/Generated/Models/RunToolCallObjectFunction.cs +++ b/.dotnet/src/Generated/Models/RunToolCallObjectFunction.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The RunToolCallObjectFunction. - public partial class RunToolCallObjectFunction + internal partial class RunToolCallObjectFunction { /// /// Keeps track of any properties unknown to the library. @@ -76,4 +76,3 @@ internal RunToolCallObjectFunction() public string Arguments { get; } } } - diff --git a/.dotnet/src/Generated/Models/RunToolCallObjectType.cs b/.dotnet/src/Generated/Models/RunToolCallObjectType.cs index d882bc7f3..b036b215c 100644 --- a/.dotnet/src/Generated/Models/RunToolCallObjectType.cs +++ b/.dotnet/src/Generated/Models/RunToolCallObjectType.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The RunToolCallObject_type. - public readonly partial struct RunToolCallObjectType : IEquatable + internal readonly partial struct RunToolCallObjectType : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public RunToolCallObjectType(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequest.Serialization.cs b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequest.Serialization.cs index 68b1ad66f..a4b178b12 100644 --- a/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequest.Serialization.cs +++ b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequest.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class SubmitToolOutputsRunRequest : IJsonModel + internal partial class SubmitToolOutputsRunRequest : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -128,4 +128,3 @@ internal static SubmitToolOutputsRunRequest FromResponse(PipelineResponse respon } } } - diff --git a/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequest.cs b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequest.cs index f489e6bb1..76e994a43 100644 --- a/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequest.cs +++ b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequest.cs @@ -5,10 +5,10 @@ using System.Collections.Generic; using System.Linq; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The SubmitToolOutputsRunRequest. - public partial class SubmitToolOutputsRunRequest + internal partial class SubmitToolOutputsRunRequest { /// /// Keeps track of any properties unknown to the library. @@ -70,4 +70,3 @@ internal SubmitToolOutputsRunRequest() public IList ToolOutputs { get; } } } - diff --git a/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutput.Serialization.cs b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutput.Serialization.cs index 709807021..ab9abfeb1 100644 --- a/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutput.Serialization.cs +++ b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutput.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class SubmitToolOutputsRunRequestToolOutput : IJsonModel + internal partial class SubmitToolOutputsRunRequestToolOutput : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -132,4 +132,3 @@ internal static SubmitToolOutputsRunRequestToolOutput FromResponse(PipelineRespo } } } - diff --git a/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutput.cs b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutput.cs index 096a5584b..78c4c374f 100644 --- a/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutput.cs +++ b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutput.cs @@ -3,10 +3,10 @@ using System; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The SubmitToolOutputsRunRequestToolOutput. - public partial class SubmitToolOutputsRunRequestToolOutput + internal partial class SubmitToolOutputsRunRequestToolOutput { /// /// Keeps track of any properties unknown to the library. @@ -68,4 +68,3 @@ internal SubmitToolOutputsRunRequestToolOutput(string toolCallId, string output, public string Output { get; set; } } } - diff --git a/.dotnet/src/Generated/Models/ThreadObject.Serialization.cs b/.dotnet/src/Generated/Models/ThreadObject.Serialization.cs index 4644d1aa9..c87050e03 100644 --- a/.dotnet/src/Generated/Models/ThreadObject.Serialization.cs +++ b/.dotnet/src/Generated/Models/ThreadObject.Serialization.cs @@ -6,9 +6,9 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { - public partial class ThreadObject : IJsonModel + internal partial class ThreadObject : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { @@ -165,4 +165,3 @@ internal static ThreadObject FromResponse(PipelineResponse response) } } } - diff --git a/.dotnet/src/Generated/Models/ThreadObject.cs b/.dotnet/src/Generated/Models/ThreadObject.cs index 22d754976..b4cf5143d 100644 --- a/.dotnet/src/Generated/Models/ThreadObject.cs +++ b/.dotnet/src/Generated/Models/ThreadObject.cs @@ -4,10 +4,10 @@ using OpenAI.ClientShared.Internal; using System.Collections.Generic; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Represents a thread that contains [messages](/docs/api-reference/messages). - public partial class ThreadObject + internal partial class ThreadObject { /// /// Keeps track of any properties unknown to the library. @@ -98,4 +98,3 @@ internal ThreadObject() public IReadOnlyDictionary Metadata { get; } } } - diff --git a/.dotnet/src/Generated/Models/ThreadObjectObject.cs b/.dotnet/src/Generated/Models/ThreadObjectObject.cs index 8eb382a0b..0b10ab32c 100644 --- a/.dotnet/src/Generated/Models/ThreadObjectObject.cs +++ b/.dotnet/src/Generated/Models/ThreadObjectObject.cs @@ -3,10 +3,10 @@ using System; using System.ComponentModel; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// The ThreadObject_object. - public readonly partial struct ThreadObjectObject : IEquatable + internal readonly partial struct ThreadObjectObject : IEquatable { private readonly string _value; @@ -41,4 +41,3 @@ public ThreadObjectObject(string value) public override string ToString() => _value; } } - diff --git a/.dotnet/src/Generated/ModelsOps.cs b/.dotnet/src/Generated/ModelsOps.cs index c4b6cbfbb..3110695ec 100644 --- a/.dotnet/src/Generated/ModelsOps.cs +++ b/.dotnet/src/Generated/ModelsOps.cs @@ -6,13 +6,13 @@ using System.Text; using System.Threading; using System.Threading.Tasks; -using OpenAI.Models; +using OpenAI.Internal.Models; -namespace OpenAI +namespace OpenAI.Internal { // Data plane generated sub-client. /// The ModelsOps sub-client. - public partial class ModelsOps + internal partial class ModelsOps { private const string AuthorizationHeader = "Authorization"; private readonly ApiKeyCredential _credential; @@ -392,4 +392,3 @@ internal PipelineMessage CreateDeleteRequest(string model, RequestOptions option private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } } - diff --git a/.dotnet/src/Generated/Moderations.cs b/.dotnet/src/Generated/Moderations.cs index 3c728d50a..ef4c93f63 100644 --- a/.dotnet/src/Generated/Moderations.cs +++ b/.dotnet/src/Generated/Moderations.cs @@ -6,13 +6,13 @@ using System.Text; using System.Threading; using System.Threading.Tasks; -using OpenAI.Models; +using OpenAI.Internal.Models; -namespace OpenAI +namespace OpenAI.Internal { // Data plane generated sub-client. /// The Moderations sub-client. - public partial class Moderations + internal partial class Moderations { private const string AuthorizationHeader = "Authorization"; private readonly ApiKeyCredential _credential; @@ -159,4 +159,3 @@ internal PipelineMessage CreateCreateModerationRequest(BinaryContent content, Re private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } } - diff --git a/.dotnet/src/Generated/OpenAIClient.cs b/.dotnet/src/Generated/OpenAIClient.cs index 29785453c..abe16ec58 100644 --- a/.dotnet/src/Generated/OpenAIClient.cs +++ b/.dotnet/src/Generated/OpenAIClient.cs @@ -5,11 +5,11 @@ using System.ClientModel.Primitives; using System.Threading; -namespace OpenAI +namespace OpenAI.Internal { // Data plane generated client. /// The OpenAI service client. - public partial class OpenAIClient + internal partial class OpenAIClient { private const string AuthorizationHeader = "Authorization"; private readonly ApiKeyCredential _credential; @@ -144,4 +144,3 @@ public virtual Threads GetThreadsClient() } } } - diff --git a/.dotnet/src/Generated/OpenAIClientOptions.cs b/.dotnet/src/Generated/OpenAIClientOptions.cs index 31df0818d..10b7372d4 100644 --- a/.dotnet/src/Generated/OpenAIClientOptions.cs +++ b/.dotnet/src/Generated/OpenAIClientOptions.cs @@ -2,11 +2,10 @@ using System.ClientModel.Primitives; -namespace OpenAI +namespace OpenAI.Internal { /// Client options for OpenAIClient. - public partial class OpenAIClientOptions : ClientPipelineOptions + internal partial class OpenAIClientOptions : ClientPipelineOptions { } } - diff --git a/.dotnet/src/Generated/OpenAIModelFactory.cs b/.dotnet/src/Generated/OpenAIModelFactory.cs index 8e81ba327..ad51183cf 100644 --- a/.dotnet/src/Generated/OpenAIModelFactory.cs +++ b/.dotnet/src/Generated/OpenAIModelFactory.cs @@ -4,10 +4,10 @@ using System.Collections.Generic; using System.Linq; -namespace OpenAI.Models +namespace OpenAI.Internal.Models { /// Model factory for models. - public static partial class OpenAIModelFactory + internal static partial class OpenAIModelFactory { /// Initializes a new instance of . /// One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd`. @@ -1577,4 +1577,3 @@ public static DeleteThreadResponse DeleteThreadResponse(string id = null, bool d } } } - diff --git a/.dotnet/src/Generated/Runs.cs b/.dotnet/src/Generated/Runs.cs index 5d79922ec..66d2cfc1c 100644 --- a/.dotnet/src/Generated/Runs.cs +++ b/.dotnet/src/Generated/Runs.cs @@ -6,13 +6,13 @@ using System.Text; using System.Threading; using System.Threading.Tasks; -using OpenAI.Models; +using OpenAI.Internal.Models; -namespace OpenAI +namespace OpenAI.Internal { // Data plane generated sub-client. /// The Runs sub-client. - public partial class Runs + internal partial class Runs { private const string AuthorizationHeader = "Authorization"; private readonly ApiKeyCredential _credential; @@ -1489,4 +1489,3 @@ internal PipelineMessage CreateGetRunStepRequest(string threadId, string runId, private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } } - diff --git a/.dotnet/src/Generated/Threads.cs b/.dotnet/src/Generated/Threads.cs index 6332bef17..8e2ac77e4 100644 --- a/.dotnet/src/Generated/Threads.cs +++ b/.dotnet/src/Generated/Threads.cs @@ -6,13 +6,13 @@ using System.Text; using System.Threading; using System.Threading.Tasks; -using OpenAI.Models; +using OpenAI.Internal.Models; -namespace OpenAI +namespace OpenAI.Internal { // Data plane generated sub-client. /// The Threads sub-client. - public partial class Threads + internal partial class Threads { private const string AuthorizationHeader = "Authorization"; private readonly ApiKeyCredential _credential; @@ -526,4 +526,3 @@ internal PipelineMessage CreateDeleteThreadRequest(string threadId, RequestOptio private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); } } - diff --git a/.dotnet/tests/Generated/Tests/AssistantsTests.cs b/.dotnet/tests/Generated/Tests/AssistantsTests.cs index 6c1ba99d2..dde077712 100644 --- a/.dotnet/tests/Generated/Tests/AssistantsTests.cs +++ b/.dotnet/tests/Generated/Tests/AssistantsTests.cs @@ -20,4 +20,3 @@ public void SmokeTest() } } } - diff --git a/.dotnet/tests/Generated/Tests/AudioTests.cs b/.dotnet/tests/Generated/Tests/AudioTests.cs index 6ff9cd6f5..f14a6f69d 100644 --- a/.dotnet/tests/Generated/Tests/AudioTests.cs +++ b/.dotnet/tests/Generated/Tests/AudioTests.cs @@ -20,4 +20,3 @@ public void SmokeTest() } } } - diff --git a/.dotnet/tests/Generated/Tests/ChatTests.cs b/.dotnet/tests/Generated/Tests/ChatTests.cs index 17a9746da..b1ac76e65 100644 --- a/.dotnet/tests/Generated/Tests/ChatTests.cs +++ b/.dotnet/tests/Generated/Tests/ChatTests.cs @@ -20,4 +20,3 @@ public void SmokeTest() } } } - diff --git a/.dotnet/tests/Generated/Tests/CompletionsTests.cs b/.dotnet/tests/Generated/Tests/CompletionsTests.cs index 159a4444d..59958da6b 100644 --- a/.dotnet/tests/Generated/Tests/CompletionsTests.cs +++ b/.dotnet/tests/Generated/Tests/CompletionsTests.cs @@ -20,4 +20,3 @@ public void SmokeTest() } } } - diff --git a/.dotnet/tests/Generated/Tests/EmbeddingsTests.cs b/.dotnet/tests/Generated/Tests/EmbeddingsTests.cs index b4deac0fc..b4e115c86 100644 --- a/.dotnet/tests/Generated/Tests/EmbeddingsTests.cs +++ b/.dotnet/tests/Generated/Tests/EmbeddingsTests.cs @@ -20,4 +20,3 @@ public void SmokeTest() } } } - diff --git a/.dotnet/tests/Generated/Tests/FilesTests.cs b/.dotnet/tests/Generated/Tests/FilesTests.cs index c08f7aee8..64ebbed83 100644 --- a/.dotnet/tests/Generated/Tests/FilesTests.cs +++ b/.dotnet/tests/Generated/Tests/FilesTests.cs @@ -20,4 +20,3 @@ public void SmokeTest() } } } - diff --git a/.dotnet/tests/Generated/Tests/FineTuningTests.cs b/.dotnet/tests/Generated/Tests/FineTuningTests.cs index 7504035a2..324b4d458 100644 --- a/.dotnet/tests/Generated/Tests/FineTuningTests.cs +++ b/.dotnet/tests/Generated/Tests/FineTuningTests.cs @@ -20,4 +20,3 @@ public void SmokeTest() } } } - diff --git a/.dotnet/tests/Generated/Tests/ImagesTests.cs b/.dotnet/tests/Generated/Tests/ImagesTests.cs index 097054471..96b7146de 100644 --- a/.dotnet/tests/Generated/Tests/ImagesTests.cs +++ b/.dotnet/tests/Generated/Tests/ImagesTests.cs @@ -20,4 +20,3 @@ public void SmokeTest() } } } - diff --git a/.dotnet/tests/Generated/Tests/MessagesTests.cs b/.dotnet/tests/Generated/Tests/MessagesTests.cs index ab0223685..89933cb52 100644 --- a/.dotnet/tests/Generated/Tests/MessagesTests.cs +++ b/.dotnet/tests/Generated/Tests/MessagesTests.cs @@ -20,4 +20,3 @@ public void SmokeTest() } } } - diff --git a/.dotnet/tests/Generated/Tests/ModelsOpsTests.cs b/.dotnet/tests/Generated/Tests/ModelsOpsTests.cs index 0560956dc..3ac25fa3d 100644 --- a/.dotnet/tests/Generated/Tests/ModelsOpsTests.cs +++ b/.dotnet/tests/Generated/Tests/ModelsOpsTests.cs @@ -20,4 +20,3 @@ public void SmokeTest() } } } - diff --git a/.dotnet/tests/Generated/Tests/ModerationsTests.cs b/.dotnet/tests/Generated/Tests/ModerationsTests.cs index 7fc5a1d0b..0413ef684 100644 --- a/.dotnet/tests/Generated/Tests/ModerationsTests.cs +++ b/.dotnet/tests/Generated/Tests/ModerationsTests.cs @@ -20,4 +20,3 @@ public void SmokeTest() } } } - diff --git a/.dotnet/tests/Generated/Tests/RunsTests.cs b/.dotnet/tests/Generated/Tests/RunsTests.cs index bd4de7947..8bc6927a9 100644 --- a/.dotnet/tests/Generated/Tests/RunsTests.cs +++ b/.dotnet/tests/Generated/Tests/RunsTests.cs @@ -20,4 +20,3 @@ public void SmokeTest() } } } - diff --git a/.dotnet/tests/Generated/Tests/ThreadsTests.cs b/.dotnet/tests/Generated/Tests/ThreadsTests.cs index b5ea58e10..e7f4583af 100644 --- a/.dotnet/tests/Generated/Tests/ThreadsTests.cs +++ b/.dotnet/tests/Generated/Tests/ThreadsTests.cs @@ -20,4 +20,3 @@ public void SmokeTest() } } } - From f1198ade3b7b5f4aa91459ff63846f8e9488db69 Mon Sep 17 00:00:00 2001 From: Jose Arriaga Maldonado Date: Wed, 28 Feb 2024 16:18:58 -0800 Subject: [PATCH 18/18] Port over custom code --- .dotnet/Directory.Build.props | 5 + .dotnet/scripts/Add-Customizations.ps1 | 28 + .dotnet/scripts/ConvertTo-Internal.ps1 | 44 +- .dotnet/scripts/Update-ClientModel.ps1 | 8 +- .dotnet/src/Custom/Assistants/Assistant.cs | 48 + .../Assistants/AssistantClient.Protocol.cs | 429 +++++++++ .../src/Custom/Assistants/AssistantClient.cs | 771 ++++++++++++++++ .../Assistants/AssistantCreationOptions.cs | 65 ++ .../Assistants/AssistantFileAssociation.cs | 17 + .../AssistantModificationOptions.cs | 71 ++ .../src/Custom/Assistants/AssistantThread.cs | 31 + .../CodeInterpreterToolDefinition.cs | 38 + .../Assistants/CodeInterpreterToolInfo.cs | 33 + .../Custom/Assistants/CreatedAtSortOrder.cs | 7 + .../Assistants/FunctionToolDefinition.cs | 89 ++ .../src/Custom/Assistants/FunctionToolInfo.cs | 82 ++ .../src/Custom/Assistants/ListQueryPage.cs | 114 +++ .../MessageContent.Serialization.cs | 94 ++ .../src/Custom/Assistants/MessageContent.cs | 6 + .../Assistants/MessageCreationOptions.cs | 30 + .../Assistants/MessageFileAssociation.cs | 17 + .../Assistants/MessageImageFileContent.cs | 55 ++ .dotnet/src/Custom/Assistants/MessageRole.cs | 16 + .../Custom/Assistants/MessageTextContent.cs | 72 ++ .../Assistants/RequiredFunctionToolCall.cs | 66 ++ .../src/Custom/Assistants/RequiredToolCall.cs | 37 + .../Assistants/RetrievalToolDefinition.cs | 38 + .../Custom/Assistants/RetrievalToolInfo.cs | 33 + .../Custom/Assistants/RunCreationOptions.cs | 67 ++ .dotnet/src/Custom/Assistants/RunError.cs | 24 + .dotnet/src/Custom/Assistants/RunErrorCode.cs | 37 + .../Assistants/RunModificationOptions.cs | 24 + .../RunRequiredAction.Serialization.cs | 107 +++ .../Custom/Assistants/RunRequiredAction.cs | 8 + .dotnet/src/Custom/Assistants/RunStatus.cs | 13 + .../src/Custom/Assistants/RunTokenUsage.cs | 20 + .../TextContentAnnotation.Serialization.cs | 93 ++ .../Assistants/TextContentAnnotation.cs | 6 + .../TextContentFileCitationAnnotation.cs | 95 ++ .../TextContentFilePathAnnotation.cs | 85 ++ .../Assistants/ThreadCreationOptions.cs | 26 + .../Assistants/ThreadInitializationMessage.cs | 45 + .../src/Custom/Assistants/ThreadMessage.cs | 62 ++ .../Assistants/ThreadModificationOptions.cs | 24 + .dotnet/src/Custom/Assistants/ThreadRun.cs | 105 +++ .../ToolDefinition.Serialization.cs | 98 ++ .../src/Custom/Assistants/ToolDefinition.cs | 5 + .../Assistants/ToolInfo.Serialization.cs | 97 ++ .dotnet/src/Custom/Assistants/ToolInfo.cs | 5 + .../Assistants/ToolOutput.Serialization.cs | 97 ++ .dotnet/src/Custom/Assistants/ToolOutput.cs | 27 + .dotnet/src/Custom/Audio/AudioClient.cs | 385 ++++++++ .dotnet/src/Custom/Audio/AudioDataFormat.cs | 92 ++ .../src/Custom/Audio/AudioTranscription.cs | 72 ++ .../Custom/Audio/AudioTranscriptionFormat.cs | 12 + .../Custom/Audio/AudioTranscriptionOptions.cs | 14 + .dotnet/src/Custom/Audio/AudioTranslation.cs | 32 + .../Custom/Audio/AudioTranslationOptions.cs | 11 + .../src/Custom/Audio/TextToSpeechOptions.cs | 29 + .dotnet/src/Custom/Audio/TextToSpeechVoice.cs | 64 ++ .dotnet/src/Custom/Audio/TranscribedWord.cs | 45 + .../src/Custom/Audio/TranscriptionSegment.cs | 109 +++ .dotnet/src/Custom/Chat/ChatClient.cs | 400 +++++++++ .dotnet/src/Custom/Chat/ChatCompletion.cs | 81 ++ .../Custom/Chat/ChatCompletionCollection.cs | 13 + .../src/Custom/Chat/ChatCompletionOptions.cs | 115 +++ .dotnet/src/Custom/Chat/ChatFinishReason.cs | 77 ++ .../Chat/ChatFunctionCall.Serialization.cs | 37 + .dotnet/src/Custom/Chat/ChatFunctionCall.cs | 47 + .../src/Custom/Chat/ChatFunctionConstraint.cs | 73 ++ .../src/Custom/Chat/ChatFunctionDefinition.cs | 60 ++ .../src/Custom/Chat/ChatFunctionToolCall.cs | 69 ++ .../Custom/Chat/ChatFunctionToolDefinition.cs | 60 ++ .../Chat/ChatLogProbabilityCollection.cs | 43 + .../Custom/Chat/ChatLogProbabilityResult.cs | 43 + .../Chat/ChatLogProbabilityResultItem.cs | 42 + .dotnet/src/Custom/Chat/ChatMessageContent.cs | 112 +++ .../src/Custom/Chat/ChatMessageContentKind.cs | 19 + .../Chat/ChatRequestAssistantMessage.cs | 129 +++ .../Custom/Chat/ChatRequestFunctionMessage.cs | 40 + .../Chat/ChatRequestMessage.Serialization.cs | 94 ++ .dotnet/src/Custom/Chat/ChatRequestMessage.cs | 108 +++ .../Custom/Chat/ChatRequestSystemMessage.cs | 39 + .../src/Custom/Chat/ChatRequestToolMessage.cs | 53 ++ .../src/Custom/Chat/ChatRequestUserMessage.cs | 65 ++ .dotnet/src/Custom/Chat/ChatResponseFormat.cs | 44 + .dotnet/src/Custom/Chat/ChatRole.cs | 85 ++ .dotnet/src/Custom/Chat/ChatTokenUsage.cs | 21 + .../Custom/Chat/ChatToolCall.Serialization.cs | 38 + .dotnet/src/Custom/Chat/ChatToolCall.cs | 15 + .dotnet/src/Custom/Chat/ChatToolConstraint.cs | 87 ++ .dotnet/src/Custom/Chat/ChatToolDefinition.cs | 11 + .../src/Custom/Chat/StreamingChatUpdate.cs | 336 +++++++ .../Chat/StreamingFunctionToolCallUpdate.cs | 90 ++ .../Custom/Chat/StreamingToolCallUpdate.cs | 97 ++ .dotnet/src/Custom/Embeddings/Embedding.cs | 44 + .../src/Custom/Embeddings/EmbeddingClient.cs | 121 +++ .../Custom/Embeddings/EmbeddingCollection.cs | 19 + .../src/Custom/Embeddings/EmbeddingOptions.cs | 8 + .../Custom/Embeddings/EmbeddingTokenUsage.cs | 16 + .dotnet/src/Custom/Files/FileClient.cs | 352 ++++++++ .dotnet/src/Custom/Files/OpenAIFileInfo.cs | 36 + .../Custom/Files/OpenAIFileInfoCollection.cs | 11 + .dotnet/src/Custom/Images/GeneratedImage.cs | 48 + .dotnet/src/Custom/Images/ImageClient.cs | 245 +++++ .../Images/ImageGenerationCollection.cs | 12 + .../Custom/Images/ImageGenerationOptions.cs | 75 ++ .dotnet/src/Custom/Images/ImageQuality.cs | 24 + .../src/Custom/Images/ImageResponseFormat.cs | 32 + .dotnet/src/Custom/Images/ImageSize.cs | 43 + .dotnet/src/Custom/Images/ImageStyle.cs | 18 + .../LegacyCompletionClient.cs | 106 +++ .../Custom/Models/ModelDetailCollection.cs | 13 + .dotnet/src/Custom/Models/ModelDetails.cs | 29 + .../Custom/Models/ModelManagementClient.cs | 197 ++++ .../Custom/Moderations/ModerationClient.cs | 106 +++ .dotnet/src/Custom/OpenAIClient.cs | 191 ++++ .dotnet/src/Custom/OpenAIClientConnector.cs | 33 + .dotnet/src/Custom/OpenAIClientOptions.cs | 32 + .dotnet/src/Generated/OpenAIClient.cs | 78 +- .dotnet/src/Generated/OpenAIModelFactory.cs | 844 ------------------ .dotnet/src/OpenAI.csproj | 2 +- ...deAnalysis.SetsRequiredMembersAttribute.cs | 8 + ...rvices.CompilerFeatureRequiredAttribute.cs | 15 + ...Runtime.CompilerServices.IsExternalInit.cs | 9 + ...ompilerServices.RequiredMemberAttribute.cs | 8 + .../Utility/GenericActionPipelinePolicy.cs | 35 + .dotnet/src/Utility/SseAsyncEnumerator.cs | 59 ++ .dotnet/src/Utility/SseLine.cs | 29 + .dotnet/src/Utility/SseReader.cs | 118 +++ .dotnet/src/Utility/StreamingResult.cs | 95 ++ .../System.ClientModel.MultipartContent.cs | 367 ++++++++ ...em.ClientModel.MultipartFormDataContent.cs | 117 +++ .../tests/Generated/Tests/AssistantsTests.cs | 22 - .dotnet/tests/Generated/Tests/AudioTests.cs | 22 - .dotnet/tests/Generated/Tests/ChatTests.cs | 22 - .../tests/Generated/Tests/CompletionsTests.cs | 22 - .../tests/Generated/Tests/EmbeddingsTests.cs | 22 - .dotnet/tests/Generated/Tests/FilesTests.cs | 22 - .../tests/Generated/Tests/FineTuningTests.cs | 22 - .dotnet/tests/Generated/Tests/ImagesTests.cs | 22 - .../tests/Generated/Tests/MessagesTests.cs | 22 - .../tests/Generated/Tests/ModelsOpsTests.cs | 22 - .../tests/Generated/Tests/ModerationsTests.cs | 22 - .dotnet/tests/Generated/Tests/RunsTests.cs | 22 - .dotnet/tests/Generated/Tests/ThreadsTests.cs | 22 - .dotnet/tests/OpenAI.Tests.csproj | 1 + .dotnet/tests/Samples/AssistantsSamples.cs | 167 ++++ .dotnet/tests/Samples/Chat/ChatSamples.cs | 128 +++ .../Samples/Chat/Sample_FunctionCalling.cs | 148 +++ .dotnet/tests/Samples/CombinationSamples.cs | 150 ++++ .dotnet/tests/Samples/EmbeddingSamples.cs | 95 ++ .dotnet/tests/Samples/ImageSamples.cs | 46 + .dotnet/tests/TestScenarios/AssistantTests.cs | 161 ++++ .../tests/TestScenarios/ChatClientTests.cs | 89 ++ .../TestScenarios/ChatToolConstraints.cs | 70 ++ .dotnet/tests/TestScenarios/ChatToolTests.cs | 90 ++ .dotnet/tests/TestScenarios/ChatWithVision.cs | 33 + .../TestScenarios/EmbeddingClientTests.cs | 52 ++ .../tests/TestScenarios/FileClientTests.cs | 43 + .../TestScenarios/ImageGenerationTests.cs | 36 + .../tests/TestScenarios/LegacyCompletions.cs | 32 + .../tests/TestScenarios/ModelClientTests.cs | 28 + .../tests/TestScenarios/TextToSpeechTests.cs | 35 + .../tests/TestScenarios/TranscriptionTests.cs | 43 + .../tests/TestScenarios/TranslationTests.cs | 24 + .dotnet/tests/Utility/TestHelpers.cs | 77 ++ .dotnet/tests/Utility/TestPipelinePolicy.cs | 35 + .dotnet/tests/data/hello_world.m4a | Bin 0 -> 79968 bytes .dotnet/tests/data/hola_mundo.m4a | Bin 0 -> 202099 bytes .dotnet/tests/data/stop_sign.png | Bin 0 -> 2125 bytes 171 files changed, 11302 insertions(+), 1184 deletions(-) create mode 100644 .dotnet/Directory.Build.props create mode 100644 .dotnet/scripts/Add-Customizations.ps1 create mode 100644 .dotnet/src/Custom/Assistants/Assistant.cs create mode 100644 .dotnet/src/Custom/Assistants/AssistantClient.Protocol.cs create mode 100644 .dotnet/src/Custom/Assistants/AssistantClient.cs create mode 100644 .dotnet/src/Custom/Assistants/AssistantCreationOptions.cs create mode 100644 .dotnet/src/Custom/Assistants/AssistantFileAssociation.cs create mode 100644 .dotnet/src/Custom/Assistants/AssistantModificationOptions.cs create mode 100644 .dotnet/src/Custom/Assistants/AssistantThread.cs create mode 100644 .dotnet/src/Custom/Assistants/CodeInterpreterToolDefinition.cs create mode 100644 .dotnet/src/Custom/Assistants/CodeInterpreterToolInfo.cs create mode 100644 .dotnet/src/Custom/Assistants/CreatedAtSortOrder.cs create mode 100644 .dotnet/src/Custom/Assistants/FunctionToolDefinition.cs create mode 100644 .dotnet/src/Custom/Assistants/FunctionToolInfo.cs create mode 100644 .dotnet/src/Custom/Assistants/ListQueryPage.cs create mode 100644 .dotnet/src/Custom/Assistants/MessageContent.Serialization.cs create mode 100644 .dotnet/src/Custom/Assistants/MessageContent.cs create mode 100644 .dotnet/src/Custom/Assistants/MessageCreationOptions.cs create mode 100644 .dotnet/src/Custom/Assistants/MessageFileAssociation.cs create mode 100644 .dotnet/src/Custom/Assistants/MessageImageFileContent.cs create mode 100644 .dotnet/src/Custom/Assistants/MessageRole.cs create mode 100644 .dotnet/src/Custom/Assistants/MessageTextContent.cs create mode 100644 .dotnet/src/Custom/Assistants/RequiredFunctionToolCall.cs create mode 100644 .dotnet/src/Custom/Assistants/RequiredToolCall.cs create mode 100644 .dotnet/src/Custom/Assistants/RetrievalToolDefinition.cs create mode 100644 .dotnet/src/Custom/Assistants/RetrievalToolInfo.cs create mode 100644 .dotnet/src/Custom/Assistants/RunCreationOptions.cs create mode 100644 .dotnet/src/Custom/Assistants/RunError.cs create mode 100644 .dotnet/src/Custom/Assistants/RunErrorCode.cs create mode 100644 .dotnet/src/Custom/Assistants/RunModificationOptions.cs create mode 100644 .dotnet/src/Custom/Assistants/RunRequiredAction.Serialization.cs create mode 100644 .dotnet/src/Custom/Assistants/RunRequiredAction.cs create mode 100644 .dotnet/src/Custom/Assistants/RunStatus.cs create mode 100644 .dotnet/src/Custom/Assistants/RunTokenUsage.cs create mode 100644 .dotnet/src/Custom/Assistants/TextContentAnnotation.Serialization.cs create mode 100644 .dotnet/src/Custom/Assistants/TextContentAnnotation.cs create mode 100644 .dotnet/src/Custom/Assistants/TextContentFileCitationAnnotation.cs create mode 100644 .dotnet/src/Custom/Assistants/TextContentFilePathAnnotation.cs create mode 100644 .dotnet/src/Custom/Assistants/ThreadCreationOptions.cs create mode 100644 .dotnet/src/Custom/Assistants/ThreadInitializationMessage.cs create mode 100644 .dotnet/src/Custom/Assistants/ThreadMessage.cs create mode 100644 .dotnet/src/Custom/Assistants/ThreadModificationOptions.cs create mode 100644 .dotnet/src/Custom/Assistants/ThreadRun.cs create mode 100644 .dotnet/src/Custom/Assistants/ToolDefinition.Serialization.cs create mode 100644 .dotnet/src/Custom/Assistants/ToolDefinition.cs create mode 100644 .dotnet/src/Custom/Assistants/ToolInfo.Serialization.cs create mode 100644 .dotnet/src/Custom/Assistants/ToolInfo.cs create mode 100644 .dotnet/src/Custom/Assistants/ToolOutput.Serialization.cs create mode 100644 .dotnet/src/Custom/Assistants/ToolOutput.cs create mode 100644 .dotnet/src/Custom/Audio/AudioClient.cs create mode 100644 .dotnet/src/Custom/Audio/AudioDataFormat.cs create mode 100644 .dotnet/src/Custom/Audio/AudioTranscription.cs create mode 100644 .dotnet/src/Custom/Audio/AudioTranscriptionFormat.cs create mode 100644 .dotnet/src/Custom/Audio/AudioTranscriptionOptions.cs create mode 100644 .dotnet/src/Custom/Audio/AudioTranslation.cs create mode 100644 .dotnet/src/Custom/Audio/AudioTranslationOptions.cs create mode 100644 .dotnet/src/Custom/Audio/TextToSpeechOptions.cs create mode 100644 .dotnet/src/Custom/Audio/TextToSpeechVoice.cs create mode 100644 .dotnet/src/Custom/Audio/TranscribedWord.cs create mode 100644 .dotnet/src/Custom/Audio/TranscriptionSegment.cs create mode 100644 .dotnet/src/Custom/Chat/ChatClient.cs create mode 100644 .dotnet/src/Custom/Chat/ChatCompletion.cs create mode 100644 .dotnet/src/Custom/Chat/ChatCompletionCollection.cs create mode 100644 .dotnet/src/Custom/Chat/ChatCompletionOptions.cs create mode 100644 .dotnet/src/Custom/Chat/ChatFinishReason.cs create mode 100644 .dotnet/src/Custom/Chat/ChatFunctionCall.Serialization.cs create mode 100644 .dotnet/src/Custom/Chat/ChatFunctionCall.cs create mode 100644 .dotnet/src/Custom/Chat/ChatFunctionConstraint.cs create mode 100644 .dotnet/src/Custom/Chat/ChatFunctionDefinition.cs create mode 100644 .dotnet/src/Custom/Chat/ChatFunctionToolCall.cs create mode 100644 .dotnet/src/Custom/Chat/ChatFunctionToolDefinition.cs create mode 100644 .dotnet/src/Custom/Chat/ChatLogProbabilityCollection.cs create mode 100644 .dotnet/src/Custom/Chat/ChatLogProbabilityResult.cs create mode 100644 .dotnet/src/Custom/Chat/ChatLogProbabilityResultItem.cs create mode 100644 .dotnet/src/Custom/Chat/ChatMessageContent.cs create mode 100644 .dotnet/src/Custom/Chat/ChatMessageContentKind.cs create mode 100644 .dotnet/src/Custom/Chat/ChatRequestAssistantMessage.cs create mode 100644 .dotnet/src/Custom/Chat/ChatRequestFunctionMessage.cs create mode 100644 .dotnet/src/Custom/Chat/ChatRequestMessage.Serialization.cs create mode 100644 .dotnet/src/Custom/Chat/ChatRequestMessage.cs create mode 100644 .dotnet/src/Custom/Chat/ChatRequestSystemMessage.cs create mode 100644 .dotnet/src/Custom/Chat/ChatRequestToolMessage.cs create mode 100644 .dotnet/src/Custom/Chat/ChatRequestUserMessage.cs create mode 100644 .dotnet/src/Custom/Chat/ChatResponseFormat.cs create mode 100644 .dotnet/src/Custom/Chat/ChatRole.cs create mode 100644 .dotnet/src/Custom/Chat/ChatTokenUsage.cs create mode 100644 .dotnet/src/Custom/Chat/ChatToolCall.Serialization.cs create mode 100644 .dotnet/src/Custom/Chat/ChatToolCall.cs create mode 100644 .dotnet/src/Custom/Chat/ChatToolConstraint.cs create mode 100644 .dotnet/src/Custom/Chat/ChatToolDefinition.cs create mode 100644 .dotnet/src/Custom/Chat/StreamingChatUpdate.cs create mode 100644 .dotnet/src/Custom/Chat/StreamingFunctionToolCallUpdate.cs create mode 100644 .dotnet/src/Custom/Chat/StreamingToolCallUpdate.cs create mode 100644 .dotnet/src/Custom/Embeddings/Embedding.cs create mode 100644 .dotnet/src/Custom/Embeddings/EmbeddingClient.cs create mode 100644 .dotnet/src/Custom/Embeddings/EmbeddingCollection.cs create mode 100644 .dotnet/src/Custom/Embeddings/EmbeddingOptions.cs create mode 100644 .dotnet/src/Custom/Embeddings/EmbeddingTokenUsage.cs create mode 100644 .dotnet/src/Custom/Files/FileClient.cs create mode 100644 .dotnet/src/Custom/Files/OpenAIFileInfo.cs create mode 100644 .dotnet/src/Custom/Files/OpenAIFileInfoCollection.cs create mode 100644 .dotnet/src/Custom/Images/GeneratedImage.cs create mode 100644 .dotnet/src/Custom/Images/ImageClient.cs create mode 100644 .dotnet/src/Custom/Images/ImageGenerationCollection.cs create mode 100644 .dotnet/src/Custom/Images/ImageGenerationOptions.cs create mode 100644 .dotnet/src/Custom/Images/ImageQuality.cs create mode 100644 .dotnet/src/Custom/Images/ImageResponseFormat.cs create mode 100644 .dotnet/src/Custom/Images/ImageSize.cs create mode 100644 .dotnet/src/Custom/Images/ImageStyle.cs create mode 100644 .dotnet/src/Custom/LegacyCompletions/LegacyCompletionClient.cs create mode 100644 .dotnet/src/Custom/Models/ModelDetailCollection.cs create mode 100644 .dotnet/src/Custom/Models/ModelDetails.cs create mode 100644 .dotnet/src/Custom/Models/ModelManagementClient.cs create mode 100644 .dotnet/src/Custom/Moderations/ModerationClient.cs create mode 100644 .dotnet/src/Custom/OpenAIClient.cs create mode 100644 .dotnet/src/Custom/OpenAIClientConnector.cs create mode 100644 .dotnet/src/Custom/OpenAIClientOptions.cs create mode 100644 .dotnet/src/Polyfill/System.Diagnostics.CodeAnalysis.SetsRequiredMembersAttribute.cs create mode 100644 .dotnet/src/Polyfill/System.Runtime.CompilerServices.CompilerFeatureRequiredAttribute.cs create mode 100644 .dotnet/src/Polyfill/System.Runtime.CompilerServices.IsExternalInit.cs create mode 100644 .dotnet/src/Polyfill/System.Runtime.CompilerServices.RequiredMemberAttribute.cs create mode 100644 .dotnet/src/Utility/GenericActionPipelinePolicy.cs create mode 100644 .dotnet/src/Utility/SseAsyncEnumerator.cs create mode 100644 .dotnet/src/Utility/SseLine.cs create mode 100644 .dotnet/src/Utility/SseReader.cs create mode 100644 .dotnet/src/Utility/StreamingResult.cs create mode 100644 .dotnet/src/Utility/System.ClientModel.MultipartContent.cs create mode 100644 .dotnet/src/Utility/System.ClientModel.MultipartFormDataContent.cs delete mode 100644 .dotnet/tests/Generated/Tests/AssistantsTests.cs delete mode 100644 .dotnet/tests/Generated/Tests/AudioTests.cs delete mode 100644 .dotnet/tests/Generated/Tests/ChatTests.cs delete mode 100644 .dotnet/tests/Generated/Tests/CompletionsTests.cs delete mode 100644 .dotnet/tests/Generated/Tests/EmbeddingsTests.cs delete mode 100644 .dotnet/tests/Generated/Tests/FilesTests.cs delete mode 100644 .dotnet/tests/Generated/Tests/FineTuningTests.cs delete mode 100644 .dotnet/tests/Generated/Tests/ImagesTests.cs delete mode 100644 .dotnet/tests/Generated/Tests/MessagesTests.cs delete mode 100644 .dotnet/tests/Generated/Tests/ModelsOpsTests.cs delete mode 100644 .dotnet/tests/Generated/Tests/ModerationsTests.cs delete mode 100644 .dotnet/tests/Generated/Tests/RunsTests.cs delete mode 100644 .dotnet/tests/Generated/Tests/ThreadsTests.cs create mode 100644 .dotnet/tests/Samples/AssistantsSamples.cs create mode 100644 .dotnet/tests/Samples/Chat/ChatSamples.cs create mode 100644 .dotnet/tests/Samples/Chat/Sample_FunctionCalling.cs create mode 100644 .dotnet/tests/Samples/CombinationSamples.cs create mode 100644 .dotnet/tests/Samples/EmbeddingSamples.cs create mode 100644 .dotnet/tests/Samples/ImageSamples.cs create mode 100644 .dotnet/tests/TestScenarios/AssistantTests.cs create mode 100644 .dotnet/tests/TestScenarios/ChatClientTests.cs create mode 100644 .dotnet/tests/TestScenarios/ChatToolConstraints.cs create mode 100644 .dotnet/tests/TestScenarios/ChatToolTests.cs create mode 100644 .dotnet/tests/TestScenarios/ChatWithVision.cs create mode 100644 .dotnet/tests/TestScenarios/EmbeddingClientTests.cs create mode 100644 .dotnet/tests/TestScenarios/FileClientTests.cs create mode 100644 .dotnet/tests/TestScenarios/ImageGenerationTests.cs create mode 100644 .dotnet/tests/TestScenarios/LegacyCompletions.cs create mode 100644 .dotnet/tests/TestScenarios/ModelClientTests.cs create mode 100644 .dotnet/tests/TestScenarios/TextToSpeechTests.cs create mode 100644 .dotnet/tests/TestScenarios/TranscriptionTests.cs create mode 100644 .dotnet/tests/TestScenarios/TranslationTests.cs create mode 100644 .dotnet/tests/Utility/TestHelpers.cs create mode 100644 .dotnet/tests/Utility/TestPipelinePolicy.cs create mode 100644 .dotnet/tests/data/hello_world.m4a create mode 100644 .dotnet/tests/data/hola_mundo.m4a create mode 100644 .dotnet/tests/data/stop_sign.png diff --git a/.dotnet/Directory.Build.props b/.dotnet/Directory.Build.props new file mode 100644 index 000000000..a95ac958c --- /dev/null +++ b/.dotnet/Directory.Build.props @@ -0,0 +1,5 @@ + + + latest + + \ No newline at end of file diff --git a/.dotnet/scripts/Add-Customizations.ps1 b/.dotnet/scripts/Add-Customizations.ps1 new file mode 100644 index 000000000..c7eb4412b --- /dev/null +++ b/.dotnet/scripts/Add-Customizations.ps1 @@ -0,0 +1,28 @@ +function Update-SystemTextJsonPackage { + $current = Get-Location + $root = Split-Path $PSScriptRoot -Parent + + # Update System.Text.Json package to 8.0.2 in OpenAI.csproj + $directory = Join-Path -Path $root -ChildPath "src" + Set-Location -Path $directory + dotnet remove "OpenAI.csproj" package "System.Text.Json" + dotnet add "OpenAI.csproj" package "System.Text.Json" --version "8.0.2" + + Set-Location -Path $current +} + +function Update-MicrosoftBclAsyncInterfacesPackage { + $current = Get-Location + $root = Split-Path $PSScriptRoot -Parent + + # Update Microsoft.Bcl.AsyncInterfaces package to 8.0.0 in OpenAI.Tests.csproj + $directory = Join-Path -Path $root -ChildPath "tests" + Set-Location -Path $directory + dotnet remove "OpenAI.Tests.csproj" package "Microsoft.Bcl.AsyncInterfaces" + dotnet add "OpenAI.Tests.csproj" package "Microsoft.Bcl.AsyncInterfaces" --version "8.0.0" + + Set-Location -Path $current +} + +Update-SystemTextJsonPackage +Update-MicrosoftBclAsyncInterfacesPackage \ No newline at end of file diff --git a/.dotnet/scripts/ConvertTo-Internal.ps1 b/.dotnet/scripts/ConvertTo-Internal.ps1 index ec809e075..60f66e75e 100644 --- a/.dotnet/scripts/ConvertTo-Internal.ps1 +++ b/.dotnet/scripts/ConvertTo-Internal.ps1 @@ -1,17 +1,41 @@ -$root = Split-Path $PSScriptRoot -Parent -$directory = Join-Path -Path $root -ChildPath "src\Generated" -$files = Get-ChildItem -Path $($directory + "\*") -Include "*.cs" -Recurse +function Edit-GeneratedSources { + $root = Split-Path $PSScriptRoot -Parent -foreach ($file in $files) { + $directory = Join-Path -Path $root -ChildPath "src\Generated" + $files = Get-ChildItem -Path $($directory + "\*") -Include "*.cs" -Recurse + + foreach ($file in $files) { + $content = Get-Content -Path $file -Raw + + Write-Output "Editing $($file.FullName)" + + $content = $content -creplace "public partial class", "internal partial class" + $content = $content -creplace "public readonly partial struct", "internal readonly partial struct" + $content = $content -creplace "public static partial class", "internal static partial class" + $content = $content -creplace "namespace OpenAI", "namespace OpenAI.Internal" + $content = $content -creplace "using OpenAI.Models;", "using OpenAI.Internal.Models;" + + $content | Set-Content -Path $file.FullName -NoNewline + } + + $file = Get-ChildItem -Path $directory -Filter "OpenAIClient.cs" $content = Get-Content -Path $file -Raw Write-Output "Editing $($file.FullName)" - $content = $content -creplace "public partial class", "internal partial class" - $content = $content -creplace "public readonly partial struct", "internal readonly partial struct" - $content = $content -creplace "public static partial class", "internal static partial class" - $content = $content -creplace "namespace OpenAI", "namespace OpenAI.Internal" - $content = $content -creplace "using OpenAI.Models;", "using OpenAI.Internal.Models;" + $content = $content -creplace "private (OpenAI.)?(?\w+) _cached(\w+);", "private OpenAI.Internal.`${var} _cached`${var};" + $content = $content -creplace "public virtual (OpenAI.)?(?\w+) Get(\w+)Client", "public virtual OpenAI.Internal.`${var} Get`${var}Client" + $content = $content -creplace "ref _cached(\w+), new (OpenAI.)?(?\w+)", "ref _cached`${var}, new OpenAI.Internal.`${var}" $content | Set-Content -Path $file.FullName -NoNewline -} \ No newline at end of file +} + +function Remove-GeneratedTests { + $root = Split-Path $PSScriptRoot -Parent + + $directory = Join-Path -Path $root -ChildPath "tests\Generated" + Remove-Item -LiteralPath $directory -Recurse -Force +} + +Edit-GeneratedSources +Remove-GeneratedTests diff --git a/.dotnet/scripts/Update-ClientModel.ps1 b/.dotnet/scripts/Update-ClientModel.ps1 index 9cab3caf9..a833fc8b7 100644 --- a/.dotnet/scripts/Update-ClientModel.ps1 +++ b/.dotnet/scripts/Update-ClientModel.ps1 @@ -1,16 +1,16 @@ -function Update-ClientModelPackage { +function Update-SystemClientModelPackage { $current = Get-Location $root = Split-Path $PSScriptRoot -Parent + # Update System.ClientModel package in OpenAI.csproj $directory = Join-Path -Path $root -ChildPath "src" Set-Location -Path $directory - dotnet remove "OpenAI.csproj" package "System.ClientModel" dotnet add "OpenAI.csproj" package "System.ClientModel" --version "1.1.0-alpha.20240227.1" --source "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-net/nuget/v3/index.json" + # Update System.ClientModel package in OpenAI.Tests.csproj $directory = Join-Path -Path $root -ChildPath "tests" Set-Location -Path $directory - dotnet remove "OpenAI.Tests.csproj" package "System.ClientModel" dotnet add "OpenAI.Tests.csproj" package "System.ClientModel" --version "1.1.0-alpha.20240227.1" --source "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-net/nuget/v3/index.json" @@ -196,7 +196,7 @@ function Update-Tests { } } -Update-ClientModelPackage +Update-SystemClientModelPackage Update-OpenAIClient Update-OpenAIClientOptions Update-Subclients diff --git a/.dotnet/src/Custom/Assistants/Assistant.cs b/.dotnet/src/Custom/Assistants/Assistant.cs new file mode 100644 index 000000000..02dcede78 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/Assistant.cs @@ -0,0 +1,48 @@ +using System; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Assistants; + +public partial class Assistant +{ + public string Id { get; } + public DateTimeOffset CreatedAt { get; } + public string Name { get; } + public string Description { get; } + public string DefaultModel { get; } + public string DefaultInstructions { get; } + public IReadOnlyList DefaultTools { get; } + /// + /// An optional key/value mapping of additional, supplemental data items to attach to the . + /// This information may be useful for storing custom details in a structured format. + /// + /// + /// + /// Keys can be a maximum of 64 characters in length. + /// Values can be a maximum of 512 characters in length. + /// + /// + public IReadOnlyDictionary Metadata { get; } + + internal Assistant(Internal.Models.AssistantObject internalAssistant) + { + Id = internalAssistant.Id; + CreatedAt = internalAssistant.CreatedAt; + Name = internalAssistant.Name; + Description = internalAssistant.Description; + DefaultModel = internalAssistant.Model; + DefaultInstructions = internalAssistant.Instructions; + Metadata = internalAssistant.Metadata; + + if (internalAssistant.Tools != null) + { + List tools = []; + foreach (BinaryData unionToolDefinitionData in internalAssistant.Tools) + { + tools.Add(ToolDefinition.DeserializeToolDefinition(JsonDocument.Parse(unionToolDefinitionData).RootElement)); + } + DefaultTools = tools; + } + } +} diff --git a/.dotnet/src/Custom/Assistants/AssistantClient.Protocol.cs b/.dotnet/src/Custom/Assistants/AssistantClient.Protocol.cs new file mode 100644 index 000000000..a0f55c3e0 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/AssistantClient.Protocol.cs @@ -0,0 +1,429 @@ +using System.ClientModel; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.ComponentModel; +using System.Threading.Tasks; + +namespace OpenAI.Assistants; + +public partial class AssistantClient +{ + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult CreateAssistant(BinaryContent content, RequestOptions context = null) + { + return Shim.CreateAssistant(content, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual Task CreateAssistantAsync(BinaryContent content, RequestOptions context = null) + { + return Shim.CreateAssistantAsync(content, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult GetAssistant(string assistantId, RequestOptions context) + { + return Shim.GetAssistant(assistantId, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual Task GetAssistantAsync(string assistantId, RequestOptions context) + { + return Shim.GetAssistantAsync(assistantId, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult GetAssistants( + int? maxResults, + string createdSortOrder, + string previousAssistantId, + string subsequentAssistantId, + RequestOptions context) + { + return Shim.GetAssistants(maxResults, createdSortOrder, previousAssistantId, subsequentAssistantId, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual Task GetAssistantsAsync( + int? maxResults, + string createdSortOrder, + string previousAssistantId, + string subsequentAssistantId, + RequestOptions context) + { + return Shim.GetAssistantsAsync(maxResults, createdSortOrder, previousAssistantId, subsequentAssistantId, context); + } + + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult ModifyAssistant(string assistantId, BinaryContent content, RequestOptions context = null) + { + return Shim.ModifyAssistant(assistantId, content, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual Task ModifyAssistantAsync(string assistantId, BinaryContent content, RequestOptions context = null) + { + return Shim.ModifyAssistantAsync(assistantId, content, context); + } + + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult DeleteAssistant(string assistantId, RequestOptions context) + { + return Shim.DeleteAssistant(assistantId, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual Task DeleteAssistantAsync(string assistantId, RequestOptions context) + { + return Shim.DeleteAssistantAsync(assistantId, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult CreateAssistantFileAssociation( + string assistantId, + BinaryContent content, + RequestOptions context = null) + { + return Shim.CreateAssistantFile(assistantId, content, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual Task CreateAssistantFileAssociationAsync( + string assistantId, + BinaryContent content, + RequestOptions context = null) + { + return Shim.CreateAssistantFileAsync(assistantId, content, context); + } + + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult GetAssistantFileAssociation(string assistantId, string fileId, RequestOptions context) + { + return Shim.GetAssistantFile(assistantId, fileId, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual Task GetAssistantFileAssociationAsync(string assistantId, string fileId, RequestOptions context) + { + return Shim.GetAssistantFileAsync(assistantId, fileId, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult GetAssistantFileAssociations( + string assistantId, + int? maxResults, + string createdSortOrder, + string previousId, + string subsequentId, + RequestOptions context) + { + return Shim.GetAssistantFiles(assistantId, maxResults, createdSortOrder, previousId, subsequentId, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual Task GetAssistantFileAssociationsAsync( + string assistantId, + int? maxResults, + string createdSortOrder, + string previousId, + string subsequentId, + RequestOptions context) + { + return Shim + .GetAssistantFilesAsync(assistantId, maxResults, createdSortOrder, previousId, subsequentId, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult RemoveAssistantFileAssociation(string assistantId, string fileId, RequestOptions context) + { + return Shim.DeleteAssistantFile(assistantId, fileId, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual Task RemoveAssistantFileAssociationAsync(string assistantId, string fileId, RequestOptions context) + { + return Shim.DeleteAssistantFileAsync(assistantId, fileId, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult CreateThread(BinaryContent content, RequestOptions context = null) + { + return ThreadShim.CreateThread(content, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual Task CreateThreadAsync(BinaryContent content, RequestOptions context = null) + { + return ThreadShim.CreateThreadAsync(content, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult GetThread(string threadId, RequestOptions context) + { + return ThreadShim.GetThread(threadId, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual Task GetThreadAsync(string threadId, RequestOptions context) + { + return ThreadShim.GetThreadAsync(threadId, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult ModifyThread(string threadId, BinaryContent content, RequestOptions context = null) + { + return ThreadShim.ModifyThread(threadId, content, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual Task ModifyThreadAsync(string threadId, BinaryContent content, RequestOptions context = null) + { + return ThreadShim.ModifyThreadAsync(threadId, content, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult DeleteThread(string threadId, RequestOptions context) + { + return ThreadShim.DeleteThread(threadId, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual Task DeleteThreadAsync(string threadId, RequestOptions context) + { + return ThreadShim.DeleteThreadAsync(threadId, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult CreateMessage(string threadId, BinaryContent content, RequestOptions context = null) + { + return MessageShim.CreateMessage(threadId, content, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual Task CreateMessageAsync(string threadId, BinaryContent content, RequestOptions context = null) + { + return MessageShim.CreateMessageAsync(threadId, content, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult GetMessage(string threadId, string messageId, RequestOptions context) + { + return MessageShim.GetMessage(threadId, messageId, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual Task GetMessageAsync(string threadId, string messageId, RequestOptions context) + { + return MessageShim.GetMessageAsync(threadId, messageId, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult GetMessages( + string threadId, + int? maxResults, + string createdSortOrder, + string previousMessageId, + string subsequentMessageId, + RequestOptions context) + { + return MessageShim + .GetMessages(threadId, maxResults, createdSortOrder, previousMessageId, subsequentMessageId, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual Task GetMessagesAsync( + string threadId, + int? maxResults, + string createdSortOrder, + string previousMessageId, + string subsequentMessageId, + RequestOptions context) + { + return MessageShim + .GetMessagesAsync(threadId, maxResults, createdSortOrder, previousMessageId, subsequentMessageId, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult GetMessageFileAssociation(string threadId, string messageId, string fileId, RequestOptions context) + { + return MessageShim.GetMessageFile(threadId, messageId, fileId, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual Task GetMessageFileAssociationAsync( + string threadId, + string messageId, + string fileId, + RequestOptions context) + { + return MessageShim.GetMessageFileAsync(threadId, messageId, fileId, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult GetMessageFileAssociations( + string threadId, + string messageId, + int? maxResults, + string createdSortOrder, + string previousId , + string subsequentId, + RequestOptions context) + { + return MessageShim + .GetMessageFiles(threadId, messageId, maxResults, createdSortOrder, previousId, subsequentId, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual Task GetMessageFileAssociationsAsync( + string threadId, + string messageId, + int? maxResults, + string createdSortOrder, + string previousId, + string subsequentId, + RequestOptions context) + { + return MessageShim + .GetMessageFilesAsync(threadId, messageId, maxResults, createdSortOrder, previousId, subsequentId, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult CreateRun(string threadId, BinaryContent content, RequestOptions context = null) + { + return RunShim.CreateRun(threadId, content, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual Task CreateRunAsync(string threadId, BinaryContent content, RequestOptions context = null) + { + return RunShim.CreateRunAsync(threadId, content, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult CreateThreadAndRun(BinaryContent content, RequestOptions context = null) + { + return RunShim.CreateThreadAndRun(content, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual Task CreateThreadAndRunAsync(BinaryContent content, RequestOptions context = null) + { + return RunShim.CreateThreadAndRunAsync(content, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult GetRun(string threadId, string runId, RequestOptions context) + { + return RunShim.GetRun(threadId, runId, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual Task GetRunAsync(string threadId, string runId, RequestOptions context) + { + return RunShim.GetRunAsync(threadId, runId, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult GetRuns( + string threadId, + int? maxResults, + string createdSortOrder, + string previousRunId, + string subsequentRunId, + RequestOptions context) + { + return RunShim.GetRuns(threadId, maxResults, createdSortOrder, previousRunId, subsequentRunId, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual Task GetRunsAsync( + string threadId, + int? maxResults, + string createdSortOrder, + string previousRunId, + string subsequentRunId, + RequestOptions context) + { + return RunShim.GetRunsAsync(threadId, maxResults, createdSortOrder, previousRunId, subsequentRunId, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult ModifyRun(string threadId, string runId, BinaryContent content, RequestOptions context = null) + { + return RunShim.ModifyRun(threadId, runId, content, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual Task ModifyRunAsync( + string threadId, + string runId, + BinaryContent content, + RequestOptions context = null) + { + return RunShim.ModifyRunAsync(threadId, runId, content, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult CancelRun(string threadId, string runId, RequestOptions context) + { + return RunShim.CancelRun(threadId, runId, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual Task CancelRunAsync(string threadId, string runId, RequestOptions context) + { + return RunShim.CancelRunAsync(threadId, runId, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult SubmitToolOutputs(string threadId, string runId, BinaryContent content, RequestOptions context = null) + { + return RunShim.SubmitToolOuputsToRun(threadId, runId, content, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual Task SubmitToolOutputsAsync(string threadId, string runId, BinaryContent content, RequestOptions context = null) + { + return RunShim.SubmitToolOuputsToRunAsync(threadId, runId, content, context); + } + + public virtual ClientResult GetRunStep(string threadId, string runId, string stepId, RequestOptions context) + { + return RunShim.GetRunStep(threadId, runId, stepId, context); + } + + public virtual Task GetRunStepAsync(string threadId, string runId, string stepId, RequestOptions context) + { + return RunShim.GetRunStepAsync(threadId, runId, stepId, context); + } + + public virtual ClientResult GetRunSteps( + string threadId, + string runId, + int? maxResults, + string createdSortOrder, + string previousStepId, + string subsequentStepId, + RequestOptions context) + { + return RunShim + .GetRunSteps(threadId, runId, maxResults, createdSortOrder, previousStepId, subsequentStepId, context); + } + + public virtual Task GetRunStepsAsync( + string threadId, + string runId, + int? maxResults, + string createdSortOrder, + string previousStepId, + string subsequentStepId, + RequestOptions context) + { + return RunShim + .GetRunStepsAsync(threadId, runId, maxResults, createdSortOrder, previousStepId, subsequentStepId, context); + } +} diff --git a/.dotnet/src/Custom/Assistants/AssistantClient.cs b/.dotnet/src/Custom/Assistants/AssistantClient.cs new file mode 100644 index 000000000..72f33f119 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/AssistantClient.cs @@ -0,0 +1,771 @@ +using System; +using System.ClientModel; +using System.ClientModel.Internal; + +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; +using OpenAI.Internal; +using System.Text.Json; +using OpenAI.ClientShared.Internal; + +namespace OpenAI.Assistants; + +/// +/// The service client for OpenAI assistants. +/// +public partial class AssistantClient +{ + private OpenAIClientConnector _clientConnector; + private Internal.Assistants Shim => _clientConnector.InternalClient.GetAssistantsClient(); + private Internal.Threads ThreadShim => _clientConnector.InternalClient.GetThreadsClient(); + private Internal.Messages MessageShim => _clientConnector.InternalClient.GetMessagesClient(); + private Internal.Runs RunShim => _clientConnector.InternalClient.GetRunsClient(); + + /// + /// Initializes a new instance of , used for assistant requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// The connection endpoint to use. + /// The API key used to authenticate with the service endpoint. + /// Additional options to customize the client. + public AssistantClient(Uri endpoint, ApiKeyCredential credential, OpenAIClientOptions options = null) + { + options ??= new(); + options.AddPolicy( + new GenericActionPipelinePolicy((m) => m.Request?.Headers.Set("OpenAI-Beta", "assistants=v1")), + PipelinePosition.PerCall); + _clientConnector = new("none", endpoint, credential, options); + } + + /// + /// Initializes a new instance of , used for assistant requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// The connection endpoint to use. + /// Additional options to customize the client. + public AssistantClient(Uri endpoint, OpenAIClientOptions options = null) + : this(endpoint, credential: null, options) + { } + + /// + /// Initializes a new instance of , used for assistant requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// The API key used to authenticate with the service endpoint. + /// Additional options to customize the client. + public AssistantClient(ApiKeyCredential credential, OpenAIClientOptions options = null) + : this(endpoint: null, credential, options) + { } + + /// + /// Initializes a new instance of , used for assistant requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// Additional options to customize the client. + public AssistantClient(OpenAIClientOptions options = null) + : this(endpoint: null, credential: null, options) + { } + + public virtual ClientResult CreateAssistant(string modelName, AssistantCreationOptions options = null) + { + Internal.Models.CreateAssistantRequest request = CreateInternalCreateAssistantRequest(modelName, options); + ClientResult internalResult = Shim.CreateAssistant(request); + return ClientResult.FromValue(new Assistant(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual async Task> CreateAssistantAsync(string modelName, AssistantCreationOptions options = null) + { + Internal.Models.CreateAssistantRequest request = CreateInternalCreateAssistantRequest(modelName, options); + ClientResult internalResult + = await Shim.CreateAssistantAsync(request).ConfigureAwait(false); + return ClientResult.FromValue(new Assistant(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual ClientResult GetAssistant(string assistantId) + { + ClientResult internalResult = Shim.GetAssistant(assistantId); + return ClientResult.FromValue(new Assistant(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual async Task> GetAssistantAsync( + string assistantId) + { + ClientResult internalResult + = await Shim.GetAssistantAsync(assistantId).ConfigureAwait(false); + return ClientResult.FromValue(new Assistant(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual ClientResult> GetAssistants( + int? maxResults = null, + CreatedAtSortOrder? createdSortOrder = null, + string previousAssistantId = null, + string subsequentAssistantId = null) + { + ClientResult internalFunc() => Shim.GetAssistants( + maxResults, + ToInternalListOrder(createdSortOrder), + previousAssistantId, + subsequentAssistantId); + return GetListQueryPage(internalFunc); + } + + public virtual Task>> GetAssistantsAsync( + int? maxResults = null, + CreatedAtSortOrder? createdSortOrder = null, + string previousAssistantId = null, + string subsequentAssistantId = null) + { + Task> internalAsyncFunc() => Shim.GetAssistantsAsync( + maxResults, + ToInternalListOrder(createdSortOrder), + previousAssistantId, + subsequentAssistantId); + return GetListQueryPageAsync(internalAsyncFunc); + } + + public virtual ClientResult ModifyAssistant( + string assistantId, + AssistantModificationOptions options) + { + ClientResult internalResult + = Shim.ModifyAssistant(assistantId, CreateInternalModifyAssistantRequest(options)); + return ClientResult.FromValue(new Assistant(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual async Task> ModifyAssistantAsync( + string assistantId, + AssistantModificationOptions options) + { + Internal.Models.ModifyAssistantRequest request = CreateInternalModifyAssistantRequest(options); + ClientResult internalResult + = await Shim.ModifyAssistantAsync(assistantId, request).ConfigureAwait(false); + return ClientResult.FromValue(new Assistant(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual ClientResult DeleteAssistant(string assistantId) + { + ClientResult internalResponse = Shim.DeleteAssistant(assistantId); + return ClientResult.FromValue(internalResponse.Value.Deleted, internalResponse.GetRawResponse()); + } + + public virtual async Task> DeleteAssistantAsync( + string assistantId) + { + ClientResult internalResponse + = await Shim.DeleteAssistantAsync(assistantId).ConfigureAwait(false); + return ClientResult.FromValue(internalResponse.Value.Deleted, internalResponse.GetRawResponse()); + } + + public virtual ClientResult CreateAssistantFileAssociation( + string assistantId, + string fileId) + { + ClientResult internalResult + = Shim.CreateAssistantFile(assistantId, new(fileId)); + return ClientResult.FromValue(new AssistantFileAssociation(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual async Task> CreateAssistantFileAssociationAsync( + string assistantId, + string fileId) + { + ClientResult internalResult + = await Shim.CreateAssistantFileAsync(assistantId, new(fileId)).ConfigureAwait(false); + return ClientResult.FromValue(new AssistantFileAssociation(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual ClientResult GetAssistantFileAssociation( + string assistantId, + string fileId) + { + ClientResult internalResult = Shim.GetAssistantFile(assistantId, fileId); + return ClientResult.FromValue(new AssistantFileAssociation(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual async Task> GetAssistantFileAssociationAsync( + string assistantId, + string fileId) + { + ClientResult internalResult + = await Shim.GetAssistantFileAsync(assistantId, fileId).ConfigureAwait(false); + return ClientResult.FromValue(new AssistantFileAssociation(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual ClientResult> GetAssistantFileAssociations( + string assistantId, + int? maxResults = null, + CreatedAtSortOrder? createdSortOrder = null, + string previousId = null, + string subsequentId = null) + { + ClientResult internalFunc() => Shim.GetAssistantFiles( + assistantId, + maxResults, + ToInternalListOrder(createdSortOrder), + previousId, + subsequentId); + return GetListQueryPage(internalFunc); + } + + public virtual Task>> GetAssistantFileAssociationsAsync( + string assistantId, + int? maxResults = null, + CreatedAtSortOrder? createdSortOrder = null, + string previousId = null, + string subsequentId = null) + { + Func>> internalFunc + = () => Shim.GetAssistantFilesAsync( + assistantId, + maxResults, + ToInternalListOrder(createdSortOrder), + previousId, + subsequentId); + return GetListQueryPageAsync(internalFunc); + } + + public virtual ClientResult RemoveAssistantFileAssociation( + string assistantId, + string fileId) + { + ClientResult internalResult + = Shim.DeleteAssistantFile(assistantId, fileId); + return ClientResult.FromValue(internalResult.Value.Deleted, internalResult.GetRawResponse()); + } + + public virtual async Task> RemoveAssistantFileAssociationAsync( + string assistantId, + string fileId) + { + ClientResult internalResult + = await Shim.DeleteAssistantFileAsync(assistantId, fileId).ConfigureAwait(false); + return ClientResult.FromValue(internalResult.Value.Deleted, internalResult.GetRawResponse()); + } + + public virtual ClientResult CreateThread( + ThreadCreationOptions options = null) + { + Internal.Models.CreateThreadRequest request = CreateInternalCreateThreadRequest(options); + ClientResult internalResult = ThreadShim.CreateThread(request); + return ClientResult.FromValue(new AssistantThread(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual async Task> CreateThreadAsync( + ThreadCreationOptions options = null) + { + Internal.Models.CreateThreadRequest request = CreateInternalCreateThreadRequest(options); + ClientResult internalResult + = await ThreadShim.CreateThreadAsync(request).ConfigureAwait(false); + return ClientResult.FromValue(new AssistantThread(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual ClientResult GetThread(string threadId) + { + ClientResult internalResult = ThreadShim.GetThread(threadId); + return ClientResult.FromValue(new AssistantThread(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual async Task> GetThreadAsync( + string threadId) + { + ClientResult internalResult + = await ThreadShim.GetThreadAsync(threadId).ConfigureAwait(false); + return ClientResult.FromValue(new AssistantThread(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual ClientResult ModifyThread( + string threadId, + ThreadModificationOptions options) + { + Internal.Models.ModifyThreadRequest request = new( + options.Metadata, + serializedAdditionalRawData: null); + ClientResult internalResult = ThreadShim.ModifyThread(threadId, request); + return ClientResult.FromValue(new AssistantThread(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual async Task> ModifyThreadAsync( + string threadId, + ThreadModificationOptions options) + { + Internal.Models.ModifyThreadRequest request = new( + options.Metadata, + serializedAdditionalRawData: null); + ClientResult internalResult + = await ThreadShim.ModifyThreadAsync(threadId, request); + return ClientResult.FromValue(new AssistantThread(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual ClientResult DeleteThread(string threadId) + { + ClientResult internalResult = ThreadShim.DeleteThread(threadId); + return ClientResult.FromValue(internalResult.Value.Deleted, internalResult.GetRawResponse()); + } + + public virtual async Task> DeleteThreadAsync(string threadId) + { + ClientResult internalResult + = await ThreadShim.DeleteThreadAsync(threadId).ConfigureAwait(false); + return ClientResult.FromValue(internalResult.Value.Deleted, internalResult.GetRawResponse()); + } + + public virtual ClientResult CreateMessage( + string threadId, + MessageRole role, + string content, + MessageCreationOptions options = null) + { + Internal.Models.CreateMessageRequest request = new( + ToInternalRequestRole(role), + content, + options.FileIds, + options.Metadata, + serializedAdditionalRawData: null); + ClientResult internalResult = MessageShim.CreateMessage(threadId, request); + return ClientResult.FromValue(new ThreadMessage(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual async Task> CreateMessageAsync( + string threadId, + MessageRole role, + string content, + MessageCreationOptions options = null) + { + Internal.Models.CreateMessageRequest request = new( + ToInternalRequestRole(role), + content, + options.FileIds, + options.Metadata, + serializedAdditionalRawData: null); + ClientResult internalResult + = await MessageShim.CreateMessageAsync(threadId, request).ConfigureAwait(false); + return ClientResult.FromValue(new ThreadMessage(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual ClientResult GetMessage( + string threadId, + string messageId) + { + ClientResult internalResult = MessageShim.GetMessage(threadId, messageId); + return ClientResult.FromValue(new ThreadMessage(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual async Task> GetMessageAsync( + string threadId, + string messageId) + { + ClientResult internalResult + = await MessageShim.GetMessageAsync(threadId, messageId).ConfigureAwait(false); + return ClientResult.FromValue(new ThreadMessage(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual ClientResult> GetMessages( + string threadId, + int? maxResults = null, + CreatedAtSortOrder? createdSortOrder = null, + string previousMessageId = null, + string subsequentMessageId = null) + { + ClientResult internalFunc() => MessageShim.GetMessages( + threadId, + maxResults, + ToInternalListOrder(createdSortOrder), + previousMessageId, + subsequentMessageId); + return GetListQueryPage(internalFunc); + } + + public virtual Task>> GetMessagesAsync( + string threadId, + int? maxResults = null, + CreatedAtSortOrder? createdSortOrder = null, + string previousMessageId = null, + string subsequentMessageId = null) + { + Func>> internalFunc = () => MessageShim.GetMessagesAsync( + threadId, + maxResults, + ToInternalListOrder(createdSortOrder), + previousMessageId, + subsequentMessageId); + return GetListQueryPageAsync(internalFunc); + } + + public virtual ClientResult GetMessageFileAssociation( + string threadId, + string messageId, + string fileId) + { + ClientResult internalResult + = MessageShim.GetMessageFile(threadId, messageId, fileId); + return ClientResult.FromValue(new MessageFileAssociation(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual async Task> GetMessageFileAssociationAsync( + string threadId, + string messageId, + string fileId) + { + ClientResult internalResult + = await MessageShim.GetMessageFileAsync(threadId, messageId, fileId).ConfigureAwait(false); + return ClientResult.FromValue(new MessageFileAssociation(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual ClientResult> GetMessageFileAssociations( + string threadId, + string messageId, + int? maxResults = null, + CreatedAtSortOrder? createdSortOrder = null, + string previousId = null, + string subsequentId = null) + { + ClientResult internalFunc() => MessageShim.GetMessageFiles( + threadId, + messageId, + maxResults, + ToInternalListOrder(createdSortOrder), + previousId, + subsequentId); + return GetListQueryPage(internalFunc); + } + + public virtual Task>> GetMessageFileAssociationsAsync( + string threadId, + string messageId, + int? maxResults = null, + CreatedAtSortOrder? createdSortOrder = null, + string previousId = null, + string subsequentId = null) + { + Task> internalFunc() => MessageShim.GetMessageFilesAsync( + threadId, + messageId, + maxResults, + ToInternalListOrder(createdSortOrder), + previousId, + subsequentId); + return GetListQueryPageAsync(internalFunc); + } + + public virtual ClientResult CreateRun( + string threadId, + string assistantId, + RunCreationOptions options = null) + { + Internal.Models.CreateRunRequest request = CreateInternalCreateRunRequest(assistantId, options); + ClientResult internalResult = RunShim.CreateRun(threadId, request); + return ClientResult.FromValue(new ThreadRun(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual async Task> CreateRunAsync( + string threadId, + string assistantId, + RunCreationOptions options = null) + { + Internal.Models.CreateRunRequest request = CreateInternalCreateRunRequest(assistantId, options); + ClientResult internalResult + = await RunShim.CreateRunAsync(threadId, request).ConfigureAwait(false); + return ClientResult.FromValue(new ThreadRun(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual ClientResult CreateThreadAndRun( + string assistantId, + ThreadCreationOptions threadOptions = null, + RunCreationOptions runOptions = null) + { + Internal.Models.CreateThreadAndRunRequest request + = CreateInternalCreateThreadAndRunRequest(assistantId, threadOptions, runOptions); + ClientResult internalResult = RunShim.CreateThreadAndRun(request); + return ClientResult.FromValue(new ThreadRun(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual async Task> CreateThreadAndRunAsync( + string assistantId, + ThreadCreationOptions threadOptions = null, + RunCreationOptions runOptions = null) + { + Internal.Models.CreateThreadAndRunRequest request + = CreateInternalCreateThreadAndRunRequest(assistantId, threadOptions, runOptions); + ClientResult internalResult + = await RunShim.CreateThreadAndRunAsync(request).ConfigureAwait(false); + return ClientResult.FromValue(new ThreadRun(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual ClientResult GetRun(string threadId, string runId) + { + ClientResult internalResult = RunShim.GetRun(threadId, runId); + return ClientResult.FromValue(new ThreadRun(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual async Task> GetRunAsync(string threadId, string runId) + { + ClientResult internalResult + = await RunShim.GetRunAsync(threadId, runId).ConfigureAwait(false); + return ClientResult.FromValue(new ThreadRun(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual ClientResult> GetRuns( + string threadId, + int? maxResults = null, + CreatedAtSortOrder? createdSortOrder = null, + string previousRunId = null, + string subsequentRunId = null) + { + ClientResult internalFunc() => RunShim.GetRuns( + threadId, + maxResults, + ToInternalListOrder(createdSortOrder), + previousRunId, + subsequentRunId); + return GetListQueryPage(internalFunc); + } + + public virtual Task>> GetRunsAsync( + string threadId, + int? maxResults = null, + CreatedAtSortOrder? createdSortOrder = null, + string previousRunId = null, + string subsequentRunId = null) + { + Func>> internalFunc = () => RunShim.GetRunsAsync( + threadId, + maxResults, + ToInternalListOrder(createdSortOrder), + previousRunId, + subsequentRunId); + return GetListQueryPageAsync(internalFunc); + } + + public virtual ClientResult ModifyRun(string threadId, string runId, RunModificationOptions options) + { + Internal.Models.ModifyRunRequest request = new(options.Metadata, serializedAdditionalRawData: null); + ClientResult internalResult = RunShim.ModifyRun(threadId, runId, request); + return ClientResult.FromValue(new ThreadRun(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual async Task> ModifyRunAsync(string threadId, string runId, RunModificationOptions options) + { + Internal.Models.ModifyRunRequest request = new(options.Metadata, serializedAdditionalRawData: null); + ClientResult internalResult + = await RunShim.ModifyRunAsync(threadId, runId, request).ConfigureAwait(false); + return ClientResult.FromValue(new ThreadRun(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual ClientResult CancelRun(string threadId, string runId) + { + ClientResult internalResult = RunShim.CancelRun(threadId, runId); + return ClientResult.FromValue(true, internalResult.GetRawResponse()); + } + + public virtual async Task> CancelRunAsync(string threadId, string runId) + { + ClientResult internalResult + = await RunShim.CancelRunAsync(threadId, runId); + return ClientResult.FromValue(true, internalResult.GetRawResponse()); + } + + public virtual ClientResult SubmitToolOutputs(string threadId, string runId, IEnumerable toolOutputs) + { + BinaryContent content = BinaryContent.Create(BinaryData.FromObjectAsJson(new + { + tool_outputs = toolOutputs + }, + new JsonSerializerOptions() + { + PropertyNamingPolicy = JsonNamingPolicy.SnakeCaseLower + })); + ClientResult internalResult = RunShim.SubmitToolOuputsToRun(threadId, runId, content, default); + return ClientResult.FromValue(true, internalResult.GetRawResponse()); + } + + public virtual async Task> SubmitToolOutputsAsync(string threadId, string runId, IEnumerable toolOutputs) + { + BinaryContent content = BinaryContent.Create(BinaryData.FromObjectAsJson(new + { + tool_outputs = toolOutputs + }, + new JsonSerializerOptions() + { + PropertyNamingPolicy = JsonNamingPolicy.SnakeCaseLower + })); + ClientResult internalResult + = await RunShim.SubmitToolOuputsToRunAsync(threadId, runId, content, default).ConfigureAwait(false); + return ClientResult.FromValue(true, internalResult.GetRawResponse()); + } + + internal static Internal.Models.CreateAssistantRequest CreateInternalCreateAssistantRequest( + string modelName, + AssistantCreationOptions options) + { + options ??= new(); + return new Internal.Models.CreateAssistantRequest( + modelName, + options.Name, + options.Description, + options.Instructions, + ToInternalBinaryDataList(options.Tools), + options.FileIds, + options.Metadata, + serializedAdditionalRawData: null); + } + + internal static Internal.Models.ModifyAssistantRequest CreateInternalModifyAssistantRequest(AssistantModificationOptions options) + { + return new Internal.Models.ModifyAssistantRequest( + options.Model, + options.Name, + options.Description, + options.Instructions, + ToInternalBinaryDataList(options.Tools), + options.FileIds, + options.Metadata, + serializedAdditionalRawData: null); + } + + internal static Internal.Models.CreateThreadRequest CreateInternalCreateThreadRequest(ThreadCreationOptions options) + { + options ??= new(); + return new Internal.Models.CreateThreadRequest( + ToInternalCreateMessageRequestList(options.Messages), + options.Metadata, + serializedAdditionalRawData: null); + } + + internal static Internal.Models.CreateRunRequest CreateInternalCreateRunRequest( + string assistantId, + RunCreationOptions options = null) + { + options ??= new(); + return new( + assistantId, + options.OverrideModel, + options.OverrideInstructions, + options.AdditionalInstructions, + ToInternalBinaryDataList(options.OverrideTools), + options.Metadata, + serializedAdditionalRawData: null); + } + + internal static Internal.Models.CreateThreadAndRunRequest CreateInternalCreateThreadAndRunRequest( + string assistantId, + ThreadCreationOptions threadOptions, + RunCreationOptions runOptions) + { + threadOptions ??= new(); + runOptions ??= new(); + Internal.Models.CreateThreadRequest internalThreadOptions = CreateInternalCreateThreadRequest(threadOptions); + return new Internal.Models.CreateThreadAndRunRequest( + assistantId, + internalThreadOptions, + runOptions?.OverrideModel, + runOptions.OverrideInstructions, + ToInternalBinaryDataList(runOptions?.OverrideTools), + runOptions?.Metadata, + serializedAdditionalRawData: null); + } + + internal static OptionalList ToInternalBinaryDataList(IEnumerable values) + where T : IPersistableModel + { + OptionalList internalList = []; + foreach (T value in values) + { + internalList.Add(ModelReaderWriter.Write(value)); + } + return internalList; + } + + internal static Internal.Models.ListOrder? ToInternalListOrder(CreatedAtSortOrder? order) + { + if (order == null) + { + return null; + } + return order switch + { + CreatedAtSortOrder.OldestFirst => Internal.Models.ListOrder.Asc, + CreatedAtSortOrder.NewestFirst => Internal.Models.ListOrder.Desc, + _ => throw new ArgumentException(nameof(order)), + }; + } + + internal static Internal.Models.CreateMessageRequestRole ToInternalRequestRole(MessageRole role) + => role switch + { + MessageRole.User => Internal.Models.CreateMessageRequestRole.User, + _ => throw new ArgumentException(nameof(role)), + }; + + internal static OptionalList ToInternalCreateMessageRequestList( + IEnumerable messages) + { + OptionalList internalList = []; + foreach (ThreadInitializationMessage message in messages) + { + internalList.Add(new Internal.Models.CreateMessageRequest( + ToInternalRequestRole(message.Role), + message.Content, + message.FileIds, + message.Metadata, + serializedAdditionalRawData: null)); + } + return internalList; + } + + internal virtual ClientResult> GetListQueryPage(Func> internalFunc) + where T : class + where U : class + { + ClientResult internalResult = internalFunc.Invoke(); + ListQueryPage convertedValue = ListQueryPage.Create(internalResult.Value) as ListQueryPage; + return ClientResult.FromValue(convertedValue, internalResult.GetRawResponse()); + } + + internal virtual async Task>> GetListQueryPageAsync(Func>> internalAsyncFunc) + where T : class + where U : class + { + ClientResult internalResult = await internalAsyncFunc.Invoke(); + ListQueryPage convertedValue = ListQueryPage.Create(internalResult.Value) as ListQueryPage; + return ClientResult.FromValue(convertedValue, internalResult.GetRawResponse()); + } +} diff --git a/.dotnet/src/Custom/Assistants/AssistantCreationOptions.cs b/.dotnet/src/Custom/Assistants/AssistantCreationOptions.cs new file mode 100644 index 000000000..9b9f23321 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/AssistantCreationOptions.cs @@ -0,0 +1,65 @@ +using OpenAI.ClientShared.Internal; +using System.ClientModel.Internal; + +using System.Collections.Generic; + +namespace OpenAI.Assistants; + +/// +/// Represents additional options available when creating a new . +/// +public partial class AssistantCreationOptions +{ + /// + /// An optional display name for the assistant. + /// + public string Name { get; set; } + /// + /// A description to associate with the assistant. + /// + public string Description { get; set; } + + /// + /// Default instructions for the assistant to use when creating messages. + /// + public string Instructions { get; set; } + + /// + /// A collection of default tool definitions to enable for the assistant. Available tools include: + /// + /// + /// + /// code_interpreter - + /// - works with data, math, and computer code + /// + /// + /// retrieval - + /// - dynamically enriches an assistant's context with content from uploaded, indexed files + /// + /// + /// function - + /// - enables caller-provided custom functions for actions and enrichment + /// + /// + /// + /// + public IList Tools { get; } = new OptionalList(); + + /// + /// A collection of IDs for previously uploaded files that are made accessible to the assistant. These IDs are the + /// basis for the functionality of file-based tools like retrieval. + /// + public IList FileIds { get; } = new OptionalList(); + + /// + /// An optional key/value mapping of additional, supplemental data items to attach to the . + /// This information may be useful for storing custom details in a structured format. + /// + /// + /// + /// Keys can be a maximum of 64 characters in length. + /// Values can be a maximum of 512 characters in length. + /// + /// + public IDictionary Metadata { get; } = new OptionalDictionary(); +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Assistants/AssistantFileAssociation.cs b/.dotnet/src/Custom/Assistants/AssistantFileAssociation.cs new file mode 100644 index 000000000..7a563c06e --- /dev/null +++ b/.dotnet/src/Custom/Assistants/AssistantFileAssociation.cs @@ -0,0 +1,17 @@ +using System; + +namespace OpenAI.Assistants; + +public partial class AssistantFileAssociation +{ + public string AssistantId { get; } + public string FileId { get; } + public DateTimeOffset CreatedAt { get; } + + internal AssistantFileAssociation(Internal.Models.AssistantFileObject internalFile) + { + AssistantId = internalFile.AssistantId; + FileId = internalFile.Id; + CreatedAt = internalFile.CreatedAt; + } +} diff --git a/.dotnet/src/Custom/Assistants/AssistantModificationOptions.cs b/.dotnet/src/Custom/Assistants/AssistantModificationOptions.cs new file mode 100644 index 000000000..1c72d4490 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/AssistantModificationOptions.cs @@ -0,0 +1,71 @@ +using OpenAI.ClientShared.Internal; +using System.ClientModel.Internal; + +using System.Collections.Generic; + +namespace OpenAI.Assistants; + +/// +/// Represents additional options available when modifying an existing . +/// +public partial class AssistantModificationOptions +{ + /// + /// The new model that the assistant should use when creating messages. + /// + public string Model { get; } + + /// + /// A new, friendly name for the assistant. Its will remain unchanged. + /// + public string Name { get; } + + /// + /// A new description to associate with the assistant. + /// + public string Description { get; } + + /// + /// New, default instructions for the assistant to use when creating messages. + /// + public string Instructions { get; } + + /// + /// A new collection of default tool definitions to enable for the assistant. Available tools include: + /// + /// + /// + /// code_interpreter - + /// - works with data, math, and computer code + /// + /// + /// retrieval - + /// - dynamically enriches an assistant's context with content from uploaded, indexed files + /// + /// + /// function - + /// - enables caller-provided custom functions for actions and enrichment + /// + /// + /// + /// + public IList Tools { get; } = new OptionalList(); + + /// + /// A new collection of IDs for previously uploaded files that are made accessible to the assistant. These IDs are + /// the basis for the functionality of file-based tools like retrieval. + /// + public IList FileIds { get; } = new OptionalList(); + + /// + /// A replacement for the optional key/value mapping of additional, supplemental data items to attach to the + /// . This information may be useful for storing custom details in a structured format. + /// + /// + /// + /// Keys can be a maximum of 64 characters in length. + /// Values can be a maximum of 512 characters in length. + /// + /// + public IDictionary Metadata { get; } = new OptionalDictionary(); +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Assistants/AssistantThread.cs b/.dotnet/src/Custom/Assistants/AssistantThread.cs new file mode 100644 index 000000000..f6c85c9ad --- /dev/null +++ b/.dotnet/src/Custom/Assistants/AssistantThread.cs @@ -0,0 +1,31 @@ +using System; +using System.Collections.Generic; +namespace OpenAI.Assistants; + +public partial class AssistantThread +{ + public string Id { get; } + + public DateTimeOffset CreatedAt { get; } + + /// + /// An optional key/value mapping of additional, supplemental data items to attach to the . + /// This information may be useful for storing custom details in a structured format. + /// + /// + /// + /// Keys can be a maximum of 64 characters in length. + /// Values can be a maximum of 512 characters in length. + /// + /// + public IReadOnlyDictionary Metadata { get; } + + + internal AssistantThread(Internal.Models.ThreadObject internalThread) + { + Id = internalThread.Id; + Metadata = internalThread.Metadata; + CreatedAt = internalThread.CreatedAt; + } + +} diff --git a/.dotnet/src/Custom/Assistants/CodeInterpreterToolDefinition.cs b/.dotnet/src/Custom/Assistants/CodeInterpreterToolDefinition.cs new file mode 100644 index 000000000..995e9ffb3 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/CodeInterpreterToolDefinition.cs @@ -0,0 +1,38 @@ +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Assistants; + +public partial class CodeInterpreterToolDefinition : ToolDefinition +{ + public CodeInterpreterToolDefinition() + { } + + internal static CodeInterpreterToolDefinition DeserializeCodeInterpreterToolDefinition( + JsonElement element, + ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("code_interpreter"u8)) + { + continue; + } + } + + return new CodeInterpreterToolDefinition(); + } + + internal override void WriteDerived(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteString("type"u8, "code_interpreter"u8); + } +} diff --git a/.dotnet/src/Custom/Assistants/CodeInterpreterToolInfo.cs b/.dotnet/src/Custom/Assistants/CodeInterpreterToolInfo.cs new file mode 100644 index 000000000..b464fd2f3 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/CodeInterpreterToolInfo.cs @@ -0,0 +1,33 @@ +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Assistants; + +public partial class CodeInterpreterToolInfo : ToolInfo +{ + internal CodeInterpreterToolInfo() + { } + + internal static CodeInterpreterToolInfo DeserializeCodeInterpreterToolInfo( + JsonElement element, + ModelReaderWriterOptions options) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + + foreach (var property in element.EnumerateObject()) + { + } + return new CodeInterpreterToolInfo(); + } + + internal override void WriteDerived(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteString("type"u8, "code_interpreter"u8); + } +} diff --git a/.dotnet/src/Custom/Assistants/CreatedAtSortOrder.cs b/.dotnet/src/Custom/Assistants/CreatedAtSortOrder.cs new file mode 100644 index 000000000..d10e11781 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/CreatedAtSortOrder.cs @@ -0,0 +1,7 @@ +namespace OpenAI.Assistants; + +public enum CreatedAtSortOrder +{ + NewestFirst, + OldestFirst, +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Assistants/FunctionToolDefinition.cs b/.dotnet/src/Custom/Assistants/FunctionToolDefinition.cs new file mode 100644 index 000000000..57592ac9e --- /dev/null +++ b/.dotnet/src/Custom/Assistants/FunctionToolDefinition.cs @@ -0,0 +1,89 @@ +using System; +using System.ClientModel.Internal; + +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Diagnostics.CodeAnalysis; +using System.Text.Json; +using OpenAI.ClientShared.Internal; + +namespace OpenAI.Assistants; + +public partial class FunctionToolDefinition : ToolDefinition +{ + public required string Name { get; set; } + public string Description { get; set; } + public BinaryData Parameters { get; set; } + + [SetsRequiredMembers] + public FunctionToolDefinition(string name, string description = null, BinaryData parameters = null) + { + Name = name; + Description = description; + Parameters = parameters; + } + + public FunctionToolDefinition() + { } + + internal static FunctionToolDefinition DeserializeFunctionToolDefinition( + JsonElement element, + ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + + string name = null; + string description = null; + BinaryData parameters = null; + + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("function"u8)) + { + foreach (var functionProperty in property.Value.EnumerateObject()) + { + if (functionProperty.NameEquals("name"u8)) + { + name = functionProperty.Value.GetString(); + continue; + } + if (functionProperty.NameEquals("description"u8)) + { + description = functionProperty.Value.GetString(); + continue; + } + if (functionProperty.NameEquals("parameters")) + { + parameters = BinaryData.FromObjectAsJson(functionProperty.Value.GetRawText()); + continue; + } + } + } + } + + return new FunctionToolDefinition(name, description, parameters); + } + + internal override void WriteDerived(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteString("type"u8, "function"u8); + writer.WritePropertyName("function"u8); + writer.WriteStartObject(); + writer.WriteString("name"u8, Name); + if (OptionalProperty.IsDefined(Description)) + { + writer.WriteString("description"u8, Description); + } + if (OptionalProperty.IsDefined(Parameters)) + { + writer.WritePropertyName("parameters"u8); + writer.WriteRawValue(Parameters.ToString()); + } + writer.WriteEndObject(); + } +} diff --git a/.dotnet/src/Custom/Assistants/FunctionToolInfo.cs b/.dotnet/src/Custom/Assistants/FunctionToolInfo.cs new file mode 100644 index 000000000..a85cd9158 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/FunctionToolInfo.cs @@ -0,0 +1,82 @@ +using System; +using System.ClientModel.Internal; + +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; +using OpenAI.ClientShared.Internal; + +namespace OpenAI.Assistants; + +public partial class FunctionToolInfo : ToolInfo +{ + public string Name { get; } + public string Description { get; } + public BinaryData Parameters { get; } + + internal FunctionToolInfo(string name, string description, BinaryData parameters) + { + Name = name; + Description = description; + Parameters = parameters; + } + + internal static FunctionToolInfo DeserializeFunctionToolInfo( + JsonElement element, + ModelReaderWriterOptions options) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + + string name = null; + string description = null; + BinaryData parameters = null; + + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("function"u8)) + { + foreach (var functionObjectProperty in property.Value.EnumerateObject()) + { + if (functionObjectProperty.NameEquals("name"u8)) + { + name = functionObjectProperty.Value.GetString(); + continue; + } + if (functionObjectProperty.NameEquals("description"u8)) + { + description = functionObjectProperty.Value.GetString(); + continue; + } + if (functionObjectProperty.NameEquals("parameters"u8)) + { + parameters = BinaryData.FromObjectAsJson(functionObjectProperty.Value.GetRawText()); + continue; + } + } + } + } + return new FunctionToolInfo(name, description, parameters); + } + + internal override void WriteDerived(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteString("type"u8, "function"u8); + writer.WritePropertyName("function"u8); + writer.WriteStartObject(); + writer.WriteString("name"u8, Name); + if (OptionalProperty.IsDefined(Description)) + { + writer.WriteString("description"u8, Description); + } + if (OptionalProperty.IsDefined(Parameters)) + { + writer.WriteRawValue(Parameters.ToString()); + } + writer.WriteEndObject(); + } +} diff --git a/.dotnet/src/Custom/Assistants/ListQueryPage.cs b/.dotnet/src/Custom/Assistants/ListQueryPage.cs new file mode 100644 index 000000000..3d11c9b48 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/ListQueryPage.cs @@ -0,0 +1,114 @@ +using OpenAI.ClientShared.Internal; +using System; +using System.ClientModel.Internal; + +using System.Collections; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI.Assistants; + +public abstract partial class ListQueryPage +{ + public string FirstId { get; } + public string LastId { get; } + public bool HasMore { get; } + + internal ListQueryPage(string firstId, string lastId, bool hasMore) + { + FirstId = firstId; + LastId = lastId; + HasMore = hasMore; + } + + internal static ListQueryPage Create(Internal.Models.ListAssistantsResponse internalResponse) + { + OptionalList assistants = new(); + foreach (Internal.Models.AssistantObject internalAssistant in internalResponse.Data) + { + assistants.Add(new(internalAssistant)); + } + return new(assistants, internalResponse.FirstId, internalResponse.LastId, internalResponse.HasMore); + } + + internal static ListQueryPage Create(Internal.Models.ListAssistantFilesResponse internalResponse) + { + OptionalList assistantFileAssociations = new(); + foreach (Internal.Models.AssistantFileObject internalFile in internalResponse.Data) + { + assistantFileAssociations.Add(new(internalFile)); + } + return new(assistantFileAssociations, internalResponse.FirstId, internalResponse.LastId, internalResponse.HasMore); + } + + internal static ListQueryPage Create(Internal.Models.ListMessagesResponse internalResponse) + { + OptionalList messages = new(); + foreach (Internal.Models.MessageObject internalMessage in internalResponse.Data) + { + messages.Add(new(internalMessage)); + } + return new(messages, internalResponse.FirstId, internalResponse.LastId, internalResponse.HasMore); + } + + internal static ListQueryPage Create(Internal.Models.ListMessageFilesResponse internalResponse) + { + OptionalList messageFileAssociations = new(); + foreach (Internal.Models.MessageFileObject internalFile in internalResponse.Data) + { + messageFileAssociations.Add(new(internalFile)); + } + return new(messageFileAssociations, internalResponse.FirstId, internalResponse.LastId, internalResponse.HasMore); + } + + internal static ListQueryPage Create(Internal.Models.ListRunsResponse internalResponse) + { + OptionalList runs = new(); + foreach (Internal.Models.RunObject internalRun in internalResponse.Data) + { + runs.Add(new(internalRun)); + } + return new(runs, internalResponse.FirstId, internalResponse.LastId, internalResponse.HasMore); + } + + internal static ListQueryPage Create(T internalResponse) + where T : class + { + return internalResponse switch + { + Internal.Models.ListAssistantsResponse internalAssistantsResponse => Create(internalAssistantsResponse), + Internal.Models.ListAssistantFilesResponse internalFilesResponse => Create(internalFilesResponse), + Internal.Models.ListMessagesResponse internalMessagesResponse => Create(internalMessagesResponse), + Internal.Models.ListMessageFilesResponse internalMessageFilesResponse => Create(internalMessageFilesResponse), + Internal.Models.ListRunsResponse internalRunsResponse => Create(internalRunsResponse), + _ => throw new ArgumentException( + $"Unknown type for generic {nameof(ListQueryPage)} conversion: {internalResponse.GetType()}"), + }; + } +} + +public partial class ListQueryPage : ListQueryPage, IReadOnlyList + where T : class +{ + public IReadOnlyList Items { get; } + + /// + public int Count => Items.Count; + + /// + public T this[int index] + { + get => Items[index]; + } + + internal ListQueryPage(IEnumerable items, string firstId, string lastId, bool hasMore) + : base(firstId, lastId, hasMore) + { + Items = items.ToList(); + } + + /// + public IEnumerator GetEnumerator() => Items.GetEnumerator(); + + IEnumerator IEnumerable.GetEnumerator() => Items.GetEnumerator(); +} diff --git a/.dotnet/src/Custom/Assistants/MessageContent.Serialization.cs b/.dotnet/src/Custom/Assistants/MessageContent.Serialization.cs new file mode 100644 index 000000000..4d88129b3 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/MessageContent.Serialization.cs @@ -0,0 +1,94 @@ +using System; +using System.ClientModel.Internal; + +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Assistants; + +public abstract partial class MessageContent : IJsonModel +{ + MessageContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(MessageContent)} does not support '{format}' format."); + } + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeMessageContent(document.RootElement, options); + } + + MessageContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeMessageContent(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(MessageContent)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteStartObject(); + WriteDerived(writer, options); + writer.WriteEndObject(); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(MessageContent)} does not support '{options.Format}' format."); + } + } + + internal abstract void WriteDerived(Utf8JsonWriter writer, ModelReaderWriterOptions options); + + internal static MessageContent DeserializeMessageContent( + JsonElement element, + ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("type"u8)) + { + if (property.Value.ValueEquals("text"u8)) + { + return MessageTextContent.DeserializeMessageTextContent(element, options); + } + else if (property.Value.ValueEquals("image_file"u8)) + { + return MessageImageFileContent.DeserializeMessageImageFileContent(element, options); + } + else + { + throw new ArgumentException(property.Value.GetString()); + } + } + } + throw new ArgumentException(nameof(element)); + } + +} diff --git a/.dotnet/src/Custom/Assistants/MessageContent.cs b/.dotnet/src/Custom/Assistants/MessageContent.cs new file mode 100644 index 000000000..928137621 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/MessageContent.cs @@ -0,0 +1,6 @@ +namespace OpenAI.Assistants; + + +public abstract partial class MessageContent +{ +} diff --git a/.dotnet/src/Custom/Assistants/MessageCreationOptions.cs b/.dotnet/src/Custom/Assistants/MessageCreationOptions.cs new file mode 100644 index 000000000..a0916f7dd --- /dev/null +++ b/.dotnet/src/Custom/Assistants/MessageCreationOptions.cs @@ -0,0 +1,30 @@ +using OpenAI.ClientShared.Internal; +using System.ClientModel.Internal; + +using System.Collections.Generic; + +namespace OpenAI.Assistants; + +/// +/// Represents additional options available when creating a new . +/// +public partial class MessageCreationOptions +{ + /// + /// A collection of IDs for previously uploaded files that are made accessible to the message. These IDs are the + /// basis for the functionality of file-based tools like retrieval. + /// + public IList FileIds { get; } = new OptionalList(); + + /// + /// An optional key/value mapping of additional, supplemental data items to attach to the . + /// This information may be useful for storing custom details in a structured format. + /// + /// + /// + /// Keys can be a maximum of 64 characters in length. + /// Values can be a maximum of 512 characters in length. + /// + /// + public IDictionary Metadata { get; } = new OptionalDictionary(); +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Assistants/MessageFileAssociation.cs b/.dotnet/src/Custom/Assistants/MessageFileAssociation.cs new file mode 100644 index 000000000..93e7e3d68 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/MessageFileAssociation.cs @@ -0,0 +1,17 @@ +using System; + +namespace OpenAI.Assistants; + +public partial class MessageFileAssociation +{ + public string MessageId { get; } + public string FileId { get; } + public DateTimeOffset CreatedAt { get; } + + internal MessageFileAssociation(Internal.Models.MessageFileObject internalFile) + { + MessageId = internalFile.MessageId; + FileId = internalFile.Id; + CreatedAt = internalFile.CreatedAt; + } +} diff --git a/.dotnet/src/Custom/Assistants/MessageImageFileContent.cs b/.dotnet/src/Custom/Assistants/MessageImageFileContent.cs new file mode 100644 index 000000000..68997b045 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/MessageImageFileContent.cs @@ -0,0 +1,55 @@ +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Assistants; + +public class MessageImageFileContent : MessageContent +{ + public string FileId { get; } + + internal MessageImageFileContent(string fileId) + { + FileId = fileId; + } + + internal override void WriteDerived(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteString("type"u8, "image_file"u8); + writer.WritePropertyName("image_file"u8); + writer.WriteStartObject(); + writer.WriteString("file_id"u8, FileId); + writer.WriteEndObject(); + } + + + internal static MessageContent DeserializeMessageImageFileContent( + JsonElement element, + ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + + string fileId = null; + + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("image_file"u8)) + { + foreach (var textObjectProperty in property.Value.EnumerateObject()) + { + if (textObjectProperty.NameEquals("file_id"u8)) + { + fileId = textObjectProperty.Value.GetString(); + continue; + } + } + } + } + return new MessageImageFileContent(fileId); + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Assistants/MessageRole.cs b/.dotnet/src/Custom/Assistants/MessageRole.cs new file mode 100644 index 000000000..c59e5abd2 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/MessageRole.cs @@ -0,0 +1,16 @@ +namespace OpenAI.Assistants; + +/// +/// Represents the role associated with the message which indicates its source and purpose. +/// +public enum MessageRole +{ + /// + /// The user role, associated with caller input into the model. + /// + User, + /// + /// The assistant role, associated with model output in response to inputs from the user and tools. + /// + Assistant, +} diff --git a/.dotnet/src/Custom/Assistants/MessageTextContent.cs b/.dotnet/src/Custom/Assistants/MessageTextContent.cs new file mode 100644 index 000000000..43c0433ad --- /dev/null +++ b/.dotnet/src/Custom/Assistants/MessageTextContent.cs @@ -0,0 +1,72 @@ +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Assistants; + +public class MessageTextContent : MessageContent +{ + /// + /// The content text. The interpretation of this value will depend on which kind of chat message the content is + /// associated with. + /// + public string Text { get; } + + public IReadOnlyList Annotations { get; } + + internal MessageTextContent(string text, IReadOnlyList annotations) + { + Text = text; + Annotations = annotations; + } + + internal override void WriteDerived(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteString("type"u8, "text"u8); + writer.WritePropertyName("text"u8); + writer.WriteStartObject(); + writer.WriteString("value"u8, Text); + writer.WriteEndObject(); + } + + internal static MessageContent DeserializeMessageTextContent( + JsonElement element, + ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + + string text = null; + List annotations = null; + + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("text"u8)) + { + foreach (var textObjectProperty in property.Value.EnumerateObject()) + { + if (textObjectProperty.NameEquals("value"u8)) + { + text = textObjectProperty.Value.GetString(); + continue; + } + if (textObjectProperty.NameEquals("annotations"u8)) + { + annotations ??= []; + foreach (var annotationObject in textObjectProperty.Value.EnumerateArray()) + { + annotations.Add(TextContentAnnotation.DeserializeTextContentAnnotation(annotationObject, options)); + } + continue; + } + } + } + } + return new MessageTextContent(text, annotations); + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Assistants/RequiredFunctionToolCall.cs b/.dotnet/src/Custom/Assistants/RequiredFunctionToolCall.cs new file mode 100644 index 000000000..f0807495c --- /dev/null +++ b/.dotnet/src/Custom/Assistants/RequiredFunctionToolCall.cs @@ -0,0 +1,66 @@ +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Assistants; + +public partial class RequiredFunctionToolCall : RequiredToolCall +{ + public string Name { get; } + public string Arguments { get; } + + internal RequiredFunctionToolCall(string id, string name, string arguments) + : base(id) + { + Name = name; + Arguments = arguments; + } + + internal static RequiredFunctionToolCall DeserializeRequiredFunctionToolCall( + JsonElement element, + ModelReaderWriterOptions options) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + + string id = null; + string name = null; + string arguments = null; + + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("function"u8)) + { + foreach (var functionProperty in property.Value.EnumerateObject()) + { + if (functionProperty.NameEquals("name"u8)) + { + name = functionProperty.Value.GetString(); + continue; + } + if (functionProperty.NameEquals("arguments"u8)) + { + arguments = functionProperty.Value.GetString(); + continue; + } + } + continue; + } + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + } + return new RequiredFunctionToolCall(id, name, arguments); + } + + internal override void WriteDerived(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteString("type"u8, "retrieval"u8); + } +} diff --git a/.dotnet/src/Custom/Assistants/RequiredToolCall.cs b/.dotnet/src/Custom/Assistants/RequiredToolCall.cs new file mode 100644 index 000000000..19e951d2c --- /dev/null +++ b/.dotnet/src/Custom/Assistants/RequiredToolCall.cs @@ -0,0 +1,37 @@ +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Assistants; + +public abstract partial class RequiredToolCall : RunRequiredAction +{ + public string Id { get; } + + internal RequiredToolCall(string id) + { + Id = id; + } + + internal static RequiredToolCall DeserializeRequiredToolCall( + JsonElement element, + ModelReaderWriterOptions options) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("function"u8)) + { + return RequiredFunctionToolCall.DeserializeRequiredFunctionToolCall(element, options); + } + } + throw new ArgumentException(nameof(element)); + } +} diff --git a/.dotnet/src/Custom/Assistants/RetrievalToolDefinition.cs b/.dotnet/src/Custom/Assistants/RetrievalToolDefinition.cs new file mode 100644 index 000000000..f18579c87 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/RetrievalToolDefinition.cs @@ -0,0 +1,38 @@ +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Assistants; + +public partial class RetrievalToolDefinition : ToolDefinition +{ + public RetrievalToolDefinition() + { } + + internal static RetrievalToolDefinition DeserializeRetrievalToolDefinition( + JsonElement element, + ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("retrieval"u8)) + { + continue; + } + } + + return new RetrievalToolDefinition(); + } + + internal override void WriteDerived(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteString("type"u8, "retrieval"u8); + } +} diff --git a/.dotnet/src/Custom/Assistants/RetrievalToolInfo.cs b/.dotnet/src/Custom/Assistants/RetrievalToolInfo.cs new file mode 100644 index 000000000..1b58f35a4 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/RetrievalToolInfo.cs @@ -0,0 +1,33 @@ +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Assistants; + +public partial class RetrievalToolInfo : ToolInfo +{ + internal RetrievalToolInfo() + { } + + internal static RetrievalToolInfo DeserializeRetrievalToolInfo( + JsonElement element, + ModelReaderWriterOptions options) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + + foreach (var property in element.EnumerateObject()) + { + } + return new RetrievalToolInfo(); + } + + internal override void WriteDerived(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteString("type"u8, "retrieval"u8); + } +} diff --git a/.dotnet/src/Custom/Assistants/RunCreationOptions.cs b/.dotnet/src/Custom/Assistants/RunCreationOptions.cs new file mode 100644 index 000000000..f7106eaac --- /dev/null +++ b/.dotnet/src/Custom/Assistants/RunCreationOptions.cs @@ -0,0 +1,67 @@ +using OpenAI.ClientShared.Internal; +using System.ClientModel.Internal; + +using System.Collections.Generic; + +namespace OpenAI.Assistants; + +/// +/// Represents additional options available when creating a new . +/// +public partial class RunCreationOptions +{ + + + /// + /// A run-specific model name that will override the assistant's defined model. If not provided, the assistant's + /// selection will be used. + /// + public string OverrideModel { get; set; } + + /// + /// A run specific replacement for the assistant's default instructions that will override the assistant-level + /// instructions. If not specified, the assistant's instructions will be used. + /// + public string OverrideInstructions { get; set; } + + /// + /// Run-specific additional instructions that will be appended to the assistant-level instructions solely for this + /// run. Unlike , the assistant's instructions are preserved and these additional + /// instructions are concatenated. + /// + public string AdditionalInstructions { get; set; } + + /// + /// A run-specific collection of tool definitions that will override the assistant-level defaults. If not provided, + /// the assistant's defined tools will be used. Available tools include: + /// + /// + /// + /// code_interpreter - + /// - works with data, math, and computer code + /// + /// + /// retrieval - + /// - dynamically enriches an Run's context with content from uploaded, indexed files + /// + /// + /// function - + /// - enables caller-provided custom functions for actions and enrichment + /// + /// + /// + /// + public IList OverrideTools { get; } = new OptionalList(); + + /// + /// An optional key/value mapping of additional, supplemental data items to attach to the . + /// This information may be useful for storing custom details in a structured format. + /// + /// + /// + /// Keys can be a maximum of 64 characters in length. + /// Values can be a maximum of 512 characters in length. + /// + /// + public IDictionary Metadata { get; } = new OptionalDictionary(); +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Assistants/RunError.cs b/.dotnet/src/Custom/Assistants/RunError.cs new file mode 100644 index 000000000..b53777c3d --- /dev/null +++ b/.dotnet/src/Custom/Assistants/RunError.cs @@ -0,0 +1,24 @@ +using OpenAI.Chat; + +namespace OpenAI.Assistants; + +public partial class RunError +{ + public RunErrorCode ErrorCode { get; } + public string ErrorMessage { get; } + + internal RunError(RunErrorCode errorCode, string errorMessage) + { + ErrorCode = errorCode; + ErrorMessage = errorMessage; + } + + internal RunError(Internal.Models.RunObjectLastError internalError) + { + if (internalError.Code != null) + { + ErrorCode = new(internalError.Code.ToString()); + } + ErrorMessage = internalError.Message; + } +} diff --git a/.dotnet/src/Custom/Assistants/RunErrorCode.cs b/.dotnet/src/Custom/Assistants/RunErrorCode.cs new file mode 100644 index 000000000..d2eb1b65e --- /dev/null +++ b/.dotnet/src/Custom/Assistants/RunErrorCode.cs @@ -0,0 +1,37 @@ +using System; +using System.ComponentModel; + +namespace OpenAI.Assistants; + +public readonly struct RunErrorCode : IEquatable +{ + private readonly string _value; + + public static RunErrorCode ServerError { get; } = new(Internal.Models.RunObjectLastErrorCode.ServerError.ToString()); + public static RunErrorCode RateLimitExceeded { get; } = new(Internal.Models.RunObjectLastErrorCode.RateLimitExceeded.ToString()); + public static RunErrorCode InvalidPrompt { get; } = new("invalid_prompt"); + + public RunErrorCode(string status) + { + _value = status; + } + + /// Determines if two values are the same. + public static bool operator ==(RunErrorCode left, RunErrorCode right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(RunErrorCode left, RunErrorCode right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator RunErrorCode(string value) => new RunErrorCode(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is RunErrorCode other && Equals(other); + /// + public bool Equals(RunErrorCode other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Assistants/RunModificationOptions.cs b/.dotnet/src/Custom/Assistants/RunModificationOptions.cs new file mode 100644 index 000000000..88739b12e --- /dev/null +++ b/.dotnet/src/Custom/Assistants/RunModificationOptions.cs @@ -0,0 +1,24 @@ +using OpenAI.ClientShared.Internal; +using System.ClientModel.Internal; + +using System.Collections.Generic; + +namespace OpenAI.Assistants; + +/// +/// Represents additional options available when modifying an existing . +/// +public partial class RunModificationOptions +{ + /// + /// A replacement for the optional key/value mapping of additional, supplemental data items to attach to the + /// . This information may be useful for storing custom details in a structured format. + /// + /// + /// + /// Keys can be a maximum of 64 characters in length. + /// Values can be a maximum of 512 characters in length. + /// + /// + public IDictionary Metadata { get; } = new OptionalDictionary(); +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Assistants/RunRequiredAction.Serialization.cs b/.dotnet/src/Custom/Assistants/RunRequiredAction.Serialization.cs new file mode 100644 index 000000000..e9ba0d3ac --- /dev/null +++ b/.dotnet/src/Custom/Assistants/RunRequiredAction.Serialization.cs @@ -0,0 +1,107 @@ +using System; +using System.ClientModel.Internal; + +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Assistants; + +public abstract partial class RunRequiredAction : IJsonModel> +{ + IList IJsonModel>.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel>)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunRequiredAction)} does not support '{format}' format."); + } + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeRunRequiredActions(document.RootElement, options); + } + + IList IPersistableModel>.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel>)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeRunRequiredActions(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(RunRequiredAction)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel>.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + void IJsonModel>.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteStartObject(); + WriteDerived(writer, options); + writer.WriteEndObject(); + } + + BinaryData IPersistableModel>.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel>)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(RunRequiredAction)} does not support '{options.Format}' format."); + } + } + + internal abstract void WriteDerived(Utf8JsonWriter writer, ModelReaderWriterOptions options); + + internal static IList DeserializeRunRequiredActions( + JsonElement element, + ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + + List actions = null; + + foreach (var topProperty in element.EnumerateObject()) + { + if (topProperty.NameEquals("submit_tool_outputs"u8)) + { + foreach (var submitObjectProperty in topProperty.Value.EnumerateObject()) + { + if (submitObjectProperty.NameEquals("tool_calls"u8)) + { + foreach (var toolCallObject in submitObjectProperty.Value.EnumerateArray()) + { + foreach (var toolCallProperty in toolCallObject.EnumerateObject()) + { + if ((toolCallProperty.NameEquals("type"u8) && toolCallProperty.Value.ValueEquals("function"u8)) + || (toolCallProperty.NameEquals("function"u8))) + { + actions ??= []; + actions.Add(RequiredFunctionToolCall.DeserializeRequiredFunctionToolCall( + toolCallObject, + options)); + continue; + } + } + } + } + } + } + } + + return actions; + } +} diff --git a/.dotnet/src/Custom/Assistants/RunRequiredAction.cs b/.dotnet/src/Custom/Assistants/RunRequiredAction.cs new file mode 100644 index 000000000..be9a12215 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/RunRequiredAction.cs @@ -0,0 +1,8 @@ +using System; + +namespace OpenAI.Assistants; + +public partial class RunRequiredAction +{ + +} diff --git a/.dotnet/src/Custom/Assistants/RunStatus.cs b/.dotnet/src/Custom/Assistants/RunStatus.cs new file mode 100644 index 000000000..25888b8d2 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/RunStatus.cs @@ -0,0 +1,13 @@ +namespace OpenAI.Assistants; + +public enum RunStatus +{ + Queued, + InProgress, + RequiresAction, + Cancelling, + CompletedSuccessfully, + Cancelled, + Failed, + Expired, +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Assistants/RunTokenUsage.cs b/.dotnet/src/Custom/Assistants/RunTokenUsage.cs new file mode 100644 index 000000000..6ec585b3d --- /dev/null +++ b/.dotnet/src/Custom/Assistants/RunTokenUsage.cs @@ -0,0 +1,20 @@ +namespace OpenAI.Assistants; + +public partial class RunTokenUsage +{ + public long InputTokens { get; } + public long OutputTokens { get; } + public long TotalTokens { get; } + + internal RunTokenUsage(long inputTokens, long outputTokens, long totalTokens) + { + InputTokens = inputTokens; + OutputTokens = outputTokens; + TotalTokens = totalTokens; + } + + internal RunTokenUsage(Internal.Models.RunCompletionUsage internalUsage) + : this(internalUsage.PromptTokens, internalUsage.CompletionTokens, internalUsage.TotalTokens) + { + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Assistants/TextContentAnnotation.Serialization.cs b/.dotnet/src/Custom/Assistants/TextContentAnnotation.Serialization.cs new file mode 100644 index 000000000..b7ba4eedb --- /dev/null +++ b/.dotnet/src/Custom/Assistants/TextContentAnnotation.Serialization.cs @@ -0,0 +1,93 @@ +using System; +using System.ClientModel.Internal; + +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Assistants; + +public abstract partial class TextContentAnnotation : IJsonModel +{ + TextContentAnnotation IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(TextContentAnnotation)} does not support '{format}' format."); + } + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeTextContentAnnotation(document.RootElement, options); + } + + TextContentAnnotation IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeTextContentAnnotation(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(TextContentAnnotation)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteStartObject(); + WriteDerived(writer, options); + writer.WriteEndObject(); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(TextContentAnnotation)} does not support '{options.Format}' format."); + } + } + + internal static TextContentAnnotation DeserializeTextContentAnnotation( + JsonElement element, + ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("type"u8)) + { + if (property.Value.ValueEquals("file_citation"u8)) + { + return TextContentFileCitationAnnotation.DeserializeTextContentFileCitationAnnotation(element, options); + } + else if (property.Value.ValueEquals("file_path"u8)) + { + return TextContentFilePathAnnotation.DeserializeTextContentFilePathAnnotation(element, options); + } + else + { + throw new ArgumentException(property.Value.GetString()); + } + } + } + throw new ArgumentException(nameof(element)); + } + + internal abstract void WriteDerived(Utf8JsonWriter writer, ModelReaderWriterOptions options); +} diff --git a/.dotnet/src/Custom/Assistants/TextContentAnnotation.cs b/.dotnet/src/Custom/Assistants/TextContentAnnotation.cs new file mode 100644 index 000000000..bd3f390c0 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/TextContentAnnotation.cs @@ -0,0 +1,6 @@ +namespace OpenAI.Assistants; + + +public abstract partial class TextContentAnnotation +{ +} diff --git a/.dotnet/src/Custom/Assistants/TextContentFileCitationAnnotation.cs b/.dotnet/src/Custom/Assistants/TextContentFileCitationAnnotation.cs new file mode 100644 index 000000000..459b91a94 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/TextContentFileCitationAnnotation.cs @@ -0,0 +1,95 @@ +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Assistants; + +public class TextContentFileCitationAnnotation : TextContentAnnotation +{ + public string TextToReplace { get; } + + public string FileId { get; } + + public string Quote { get; } + + public int StartIndex { get; } + + public int EndIndex { get; } + + internal TextContentFileCitationAnnotation(string textToReplace, string citationFileId, string citationQuote, int startIndex, int endIndex) + { + TextToReplace = textToReplace; + FileId = citationFileId; + Quote = citationQuote; + StartIndex = startIndex; + EndIndex = endIndex; + } + + internal override void WriteDerived(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteString("type"u8, "file_citation"u8); + writer.WriteString("text"u8, TextToReplace); + writer.WritePropertyName("file_citation"u8); + writer.WriteStartObject(); + writer.WriteString("file_id"u8, FileId); + writer.WriteString("quote"u8, Quote); + writer.WriteEndObject(); + writer.WriteNumber("start_index"u8, StartIndex); + writer.WriteNumber("end_index"u8, EndIndex); + } + + + internal static TextContentFileCitationAnnotation DeserializeTextContentFileCitationAnnotation( + JsonElement element, + ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + + string textToReplace = null; + int startIndex = 0; + int endIndex = 0; + string citationFileId = null; + string citationQuote = null; + + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("text"u8)) + { + textToReplace = property.Value.GetString(); + continue; + } + if (property.NameEquals("start_index"u8)) + { + startIndex = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("end_index"u8)) + { + endIndex = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("file_citation"u8)) + { + foreach (var filePathObjectProperty in property.Value.EnumerateObject()) + { + if (filePathObjectProperty.NameEquals("file_id"u8)) + { + citationFileId = filePathObjectProperty.Value.GetString(); + continue; + } + if (filePathObjectProperty.NameEquals("quote"u8)) + { + citationQuote = filePathObjectProperty.Value.GetString(); + continue; + } + } + } + } + return new TextContentFileCitationAnnotation(textToReplace, citationFileId, citationQuote, startIndex, endIndex); + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Assistants/TextContentFilePathAnnotation.cs b/.dotnet/src/Custom/Assistants/TextContentFilePathAnnotation.cs new file mode 100644 index 000000000..7e716bc6c --- /dev/null +++ b/.dotnet/src/Custom/Assistants/TextContentFilePathAnnotation.cs @@ -0,0 +1,85 @@ +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Assistants; + +public class TextContentFilePathAnnotation : TextContentAnnotation +{ + public string TextToReplace { get; } + + public string FileId { get; } + + public int StartIndex { get; } + + public int EndIndex { get; } + + internal TextContentFilePathAnnotation(string textToReplace, string createdFileId, int startIndex, int endIndex) + { + TextToReplace = textToReplace; + FileId = createdFileId; + StartIndex = startIndex; + EndIndex = endIndex; + } + + internal override void WriteDerived(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteString("type"u8, "file_path"u8); + writer.WriteString("text"u8, TextToReplace); + writer.WritePropertyName("file_path"u8); + writer.WriteStartObject(); + writer.WriteString("file_id"u8, FileId); + writer.WriteEndObject(); + writer.WriteNumber("start_index"u8, StartIndex); + writer.WriteNumber("end_index"u8, EndIndex); + } + + internal static TextContentFilePathAnnotation DeserializeTextContentFilePathAnnotation( + JsonElement element, + ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + + string textToReplace = null; + int startIndex = 0; + int endIndex = 0; + string createdFileId = null; + + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("text"u8)) + { + textToReplace = property.Value.GetString(); + continue; + } + if (property.NameEquals("start_index"u8)) + { + startIndex = property.Value.GetInt32(); + continue; + } + if (property.NameEquals ("end_index"u8)) + { + endIndex = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("file_path"u8)) + { + foreach (var filePathObjectProperty in property.Value.EnumerateObject()) + { + if (filePathObjectProperty.NameEquals("file_id"u8)) + { + createdFileId = filePathObjectProperty.Value.GetString(); + continue; + } + } + } + } + return new TextContentFilePathAnnotation(textToReplace, createdFileId, startIndex, endIndex); + } + +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Assistants/ThreadCreationOptions.cs b/.dotnet/src/Custom/Assistants/ThreadCreationOptions.cs new file mode 100644 index 000000000..404b697e8 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/ThreadCreationOptions.cs @@ -0,0 +1,26 @@ +using OpenAI.ClientShared.Internal; +using System.ClientModel.Internal; + +using System.Collections.Generic; + +namespace OpenAI.Assistants; + +/// +/// Represents additional options available when creating a new . +/// +public partial class ThreadCreationOptions +{ + public IList Messages { get; } = new OptionalList(); + + /// + /// An optional key/value mapping of additional, supplemental data items to attach to the . + /// This information may be useful for storing custom details in a structured format. + /// + /// + /// + /// Keys can be a maximum of 64 characters in length. + /// Values can be a maximum of 512 characters in length. + /// + /// + public IDictionary Metadata { get; } = new OptionalDictionary(); +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Assistants/ThreadInitializationMessage.cs b/.dotnet/src/Custom/Assistants/ThreadInitializationMessage.cs new file mode 100644 index 000000000..069bd34b4 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/ThreadInitializationMessage.cs @@ -0,0 +1,45 @@ +using OpenAI.ClientShared.Internal; +using System.ClientModel.Internal; + +using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; + +namespace OpenAI.Assistants; + +public partial class ThreadInitializationMessage +{ + public required MessageRole Role { get; set; } + + public required string Content { get; set; } + + /// + /// A list of File IDs that the message should use.There can be a maximum of 10 files attached to a message. Useful + /// for tools like retrieval and code_interpreter that can access and use files. + /// + public IList FileIds { get; } = new OptionalList(); + + /// + /// An optional key/value mapping of additional, supplemental data items to attach to the . + /// This information may be useful for storing custom details in a structured format. + /// + /// + /// + /// Keys can be a maximum of 64 characters in length. + /// Values can be a maximum of 512 characters in length. + /// + /// + public IDictionary Metadata { get; } = new OptionalDictionary(); + + [SetsRequiredMembers] + public ThreadInitializationMessage(MessageRole role, string content) + { + Role = role; + Content = content; + } + + public ThreadInitializationMessage() + { } + + public static implicit operator ThreadInitializationMessage(string content) + => new(MessageRole.User, content); +} diff --git a/.dotnet/src/Custom/Assistants/ThreadMessage.cs b/.dotnet/src/Custom/Assistants/ThreadMessage.cs new file mode 100644 index 000000000..01b481b36 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/ThreadMessage.cs @@ -0,0 +1,62 @@ +using System; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Assistants; + +public partial class ThreadMessage +{ + public string Id { get; } + public DateTimeOffset CreatedAt { get; } + public string ThreadId { get; } + public MessageRole Role { get; } + public IReadOnlyList ContentItems { get; } + public string AssistantId { get; } + + public string RunId { get; } + public IReadOnlyList FileIds { get; } + /// + /// An optional key/value mapping of additional, supplemental data items to attach to the . + /// This information may be useful for storing custom details in a structured format. + /// + /// + /// + /// Keys can be a maximum of 64 characters in length. + /// Values can be a maximum of 512 characters in length. + /// + /// + public IReadOnlyDictionary Metadata { get; } + + internal ThreadMessage(Internal.Models.MessageObject internalMessage) + { + MessageRole convertedRole = MessageRole.User; + if (internalMessage.Role.ToString() == "user") + { + convertedRole = MessageRole.User; + } + else if (internalMessage.Role.ToString() == "assistant") + { + convertedRole = MessageRole.Assistant; + } + else + { + throw new ArgumentException(internalMessage.Role.ToString()); + } + + List content = []; + foreach (BinaryData unionContentData in internalMessage.Content) + { + content.Add(MessageContent.DeserializeMessageContent(JsonDocument.Parse(unionContentData).RootElement)); + } + + Id = internalMessage.Id; + AssistantId = internalMessage.AssistantId; + ThreadId = internalMessage.ThreadId; + RunId = internalMessage.RunId; + Metadata = internalMessage.Metadata; + FileIds = internalMessage.FileIds; + CreatedAt = internalMessage.CreatedAt; + Role = convertedRole; + ContentItems = content; + } +} diff --git a/.dotnet/src/Custom/Assistants/ThreadModificationOptions.cs b/.dotnet/src/Custom/Assistants/ThreadModificationOptions.cs new file mode 100644 index 000000000..07e6380ea --- /dev/null +++ b/.dotnet/src/Custom/Assistants/ThreadModificationOptions.cs @@ -0,0 +1,24 @@ +using OpenAI.ClientShared.Internal; +using System.ClientModel.Internal; + +using System.Collections.Generic; + +namespace OpenAI.Assistants; + +/// +/// Represents additional options available when modifying an existing . +/// +public partial class ThreadModificationOptions +{ + /// + /// A replacement for the optional key/value mapping of additional, supplemental data items to attach to the + /// . This information may be useful for storing custom details in a structured format. + /// + /// + /// + /// Keys can be a maximum of 64 characters in length. + /// Values can be a maximum of 512 characters in length. + /// + /// + public IDictionary Metadata { get; } = new OptionalDictionary(); +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Assistants/ThreadRun.cs b/.dotnet/src/Custom/Assistants/ThreadRun.cs new file mode 100644 index 000000000..fa65d9c08 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/ThreadRun.cs @@ -0,0 +1,105 @@ +using System; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Assistants; + +public partial class ThreadRun +{ + public string Id { get; } + public string ThreadId { get; } + public string AssistantId { get; } + public DateTimeOffset CreatedAt { get; } + + public RunStatus Status { get; } + + public IReadOnlyList RequiredActions { get; } + + public RunError LastError { get; } + public DateTimeOffset? ExpiresAt { get; } + public DateTimeOffset? StartedAt { get; } + public DateTimeOffset? CancelledAt { get; } + public DateTimeOffset? FailedAt { get; } + public DateTimeOffset? CompletedAt { get; } + public string Model { get; } + public string Instructions { get; } + public IReadOnlyList Tools { get; } + public IReadOnlyList FileIds { get; } + /// + /// An optional key/value mapping of additional, supplemental data items to attach to the . + /// This information may be useful for storing custom details in a structured format. + /// + /// + /// + /// Keys can be a maximum of 64 characters in length. + /// Values can be a maximum of 512 characters in length. + /// + /// + public IReadOnlyDictionary Metadata { get; } + public RunTokenUsage Usage { get; } + + internal ThreadRun(Internal.Models.RunObject internalRun) + { + Id = internalRun.Id; + ThreadId = internalRun.ThreadId; + AssistantId = internalRun.AssistantId; + CreatedAt = internalRun.CreatedAt; + FailedAt = internalRun.FailedAt; + ExpiresAt = internalRun.ExpiresAt; + StartedAt = internalRun.StartedAt; + CancelledAt = internalRun.CancelledAt; + CompletedAt = internalRun.CompletedAt; + Status = internalRun.Status.ToString() switch + { + "queued" => RunStatus.Queued, + "in_progress" => RunStatus.InProgress, + "requires_action" => RunStatus.RequiresAction, + "cancelling" => RunStatus.Cancelling, + "cancelled" => RunStatus.Cancelled, + "failed" => RunStatus.Failed, + "completed" => RunStatus.CompletedSuccessfully, + "expired" => RunStatus.Expired, + _ => throw new ArgumentException(nameof(Status)), + }; + Metadata = internalRun.Metadata; + FileIds = internalRun.FileIds; + Metadata = internalRun.Metadata; + Model = internalRun.Model; + Instructions = internalRun.Instructions; + + if (internalRun.LastError != null) + { + LastError = new(internalRun.LastError); + } + + if (internalRun.Usage != null) + { + Usage = new(internalRun.Usage); + } + + if (internalRun.Tools != null) + { + List tools = []; + foreach (BinaryData unionToolInfo in internalRun.Tools) + { + tools.Add(ToolInfo.DeserializeToolInfo(JsonDocument.Parse(unionToolInfo).RootElement)); + } + Tools = tools; + } + + IReadOnlyList internalFunctionCalls + = internalRun.RequiredAction?.SubmitToolOutputs?.ToolCalls; + if (internalFunctionCalls != null) + { + List actions = []; + foreach (Internal.Models.RunToolCallObject internalToolCall in internalFunctionCalls) + { + actions.Add(new RequiredFunctionToolCall( + internalToolCall.Id, + internalToolCall.Function.Name, + internalToolCall.Function.Arguments)); + } + RequiredActions = actions; + } + } +} diff --git a/.dotnet/src/Custom/Assistants/ToolDefinition.Serialization.cs b/.dotnet/src/Custom/Assistants/ToolDefinition.Serialization.cs new file mode 100644 index 000000000..a4975c9bb --- /dev/null +++ b/.dotnet/src/Custom/Assistants/ToolDefinition.Serialization.cs @@ -0,0 +1,98 @@ +using System; +using System.ClientModel.Internal; + +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Assistants; + +public abstract partial class ToolDefinition : IJsonModel +{ + ToolDefinition IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ToolDefinition)} does not support '{format}' format."); + } + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeToolDefinition(document.RootElement, options); + } + + ToolDefinition IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeToolDefinition(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ToolDefinition)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteStartObject(); + WriteDerived(writer, options); + writer.WriteEndObject(); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ToolDefinition)} does not support '{options.Format}' format."); + } + } + + internal abstract void WriteDerived(Utf8JsonWriter writer, ModelReaderWriterOptions options); + + internal static ToolDefinition DeserializeToolDefinition( + JsonElement element, + ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("type"u8)) + { + if (property.Value.ValueEquals("code_interpreter"u8)) + { + return CodeInterpreterToolDefinition.DeserializeCodeInterpreterToolDefinition(element, options); + } + else if (property.Value.ValueEquals("retrieval"u8)) + { + return RetrievalToolDefinition.DeserializeRetrievalToolDefinition(element, options); + } + else if (property.Value.ValueEquals("function"u8)) + { + return FunctionToolDefinition.DeserializeFunctionToolDefinition(element, options); + } + else + { + throw new ArgumentException(property.Value.GetString()); + } + } + } + throw new ArgumentException(nameof(element)); + } + +} diff --git a/.dotnet/src/Custom/Assistants/ToolDefinition.cs b/.dotnet/src/Custom/Assistants/ToolDefinition.cs new file mode 100644 index 000000000..b39de3b21 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/ToolDefinition.cs @@ -0,0 +1,5 @@ +namespace OpenAI.Assistants; + +public abstract partial class ToolDefinition +{ +} diff --git a/.dotnet/src/Custom/Assistants/ToolInfo.Serialization.cs b/.dotnet/src/Custom/Assistants/ToolInfo.Serialization.cs new file mode 100644 index 000000000..ae8d31b33 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/ToolInfo.Serialization.cs @@ -0,0 +1,97 @@ +using System; +using System.ClientModel.Internal; + +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Assistants; + +public abstract partial class ToolInfo : IJsonModel +{ + ToolInfo IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ToolInfo)} does not support '{format}' format."); + } + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeToolInfo(document.RootElement, options); + } + + ToolInfo IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeToolInfo(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ToolInfo)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteStartObject(); + WriteDerived(writer, options); + writer.WriteEndObject(); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ToolInfo)} does not support '{options.Format}' format."); + } + } + + internal abstract void WriteDerived(Utf8JsonWriter writer, ModelReaderWriterOptions options); + + internal static ToolInfo DeserializeToolInfo( + JsonElement element, + ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("type"u8)) + { + if (property.Value.ValueEquals("code_interpreter"u8)) + { + return CodeInterpreterToolInfo.DeserializeCodeInterpreterToolInfo(element, options); + } + else if (property.Value.ValueEquals("retrieval"u8)) + { + return RetrievalToolInfo.DeserializeRetrievalToolInfo(element, options); + } + else if (property.Value.ValueEquals("function"u8)) + { + return FunctionToolInfo.DeserializeFunctionToolInfo(element, options); + } + else + { + throw new ArgumentException(property.Value.GetString()); + } + } + } + throw new ArgumentException(nameof(element)); + } +} diff --git a/.dotnet/src/Custom/Assistants/ToolInfo.cs b/.dotnet/src/Custom/Assistants/ToolInfo.cs new file mode 100644 index 000000000..1708f94d2 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/ToolInfo.cs @@ -0,0 +1,5 @@ +namespace OpenAI.Assistants; + +public abstract partial class ToolInfo +{ +} diff --git a/.dotnet/src/Custom/Assistants/ToolOutput.Serialization.cs b/.dotnet/src/Custom/Assistants/ToolOutput.Serialization.cs new file mode 100644 index 000000000..8d5783eaa --- /dev/null +++ b/.dotnet/src/Custom/Assistants/ToolOutput.Serialization.cs @@ -0,0 +1,97 @@ +using System; +using System.ClientModel.Internal; + +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; +using OpenAI.ClientShared.Internal; + +namespace OpenAI.Assistants; + +public partial class ToolOutput : IJsonModel +{ + ToolOutput IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ToolOutput)} does not support '{format}' format."); + } + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeToolOutput(document.RootElement, options); + } + + ToolOutput IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeToolOutput(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ToolOutput)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteStartObject(); + if (OptionalProperty.IsDefined(Id)) + { + writer.WriteString("tool_call_id"u8, Id); + } + if (OptionalProperty.IsDefined(Output)) + { + writer.WriteString("output"u8, Output); + } + writer.WriteEndObject(); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ToolOutput)} does not support '{options.Format}' format."); + } + } + + internal static ToolOutput DeserializeToolOutput( + JsonElement element, + ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + string id = null; + string output = null; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("tool_call_id"u8)) + { + id = property.Value.ToString(); + continue; + } + if (property.NameEquals("output"u8)) + { + output = property.Value.ToString(); + continue; + } + } + return new ToolOutput(id, output); + } +} diff --git a/.dotnet/src/Custom/Assistants/ToolOutput.cs b/.dotnet/src/Custom/Assistants/ToolOutput.cs new file mode 100644 index 000000000..762466f89 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/ToolOutput.cs @@ -0,0 +1,27 @@ +using System.Diagnostics.CodeAnalysis; +using System.Text.Json.Serialization; + +namespace OpenAI.Assistants; + +public partial class ToolOutput +{ + [JsonPropertyName("tool_call_id")] + public required string Id { get; set; } + [JsonPropertyName("output")] + public string Output { get; set; } + + public ToolOutput() + { } + + [SetsRequiredMembers] + public ToolOutput(string toolCallId, string output = null) + { + Id = toolCallId; + Output = output; + } + + [SetsRequiredMembers] + public ToolOutput(RequiredToolCall toolCall, string output = null) + : this(toolCall.Id, output) + { } +} diff --git a/.dotnet/src/Custom/Audio/AudioClient.cs b/.dotnet/src/Custom/Audio/AudioClient.cs new file mode 100644 index 000000000..c5835c6d9 --- /dev/null +++ b/.dotnet/src/Custom/Audio/AudioClient.cs @@ -0,0 +1,385 @@ +using OpenAI.ClientShared.Internal; +using System; +using System.ClientModel; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.ComponentModel; +using System.Text; +using System.Text.Json; +using System.Threading; +using System.Threading.Tasks; + +namespace OpenAI.Audio; + +/// The service client for OpenAI audio operations. +public partial class AudioClient +{ + private OpenAIClientConnector _clientConnector; + private Internal.Audio Shim => _clientConnector.InternalClient.GetAudioClient(); + + /// + /// Initializes a new instance of , used for audio operation requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// The connection endpoint to use. + /// The model name for audio operations that the client should use. + /// The API key used to authenticate with the service endpoint. + /// Additional options to customize the client. + public AudioClient(Uri endpoint, string model, ApiKeyCredential credential, OpenAIClientOptions options = null) + { + _clientConnector = new(model, endpoint, credential, options); + } + + /// + /// Initializes a new instance of , used for audio operation requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// The connection endpoint to use. + /// The model name for audio operations that the client should use. + /// Additional options to customize the client. + public AudioClient(Uri endpoint, string model, OpenAIClientOptions options = null) + : this(endpoint, model, credential: null, options) + { } + + /// + /// Initializes a new instance of , used for audio operation requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// The model name for audio operations that the client should use. + /// The API key used to authenticate with the service endpoint. + /// Additional options to customize the client. + public AudioClient(string model, ApiKeyCredential credential, OpenAIClientOptions options = null) + : this(endpoint: null, model, credential, options) + { } + + /// + /// Initializes a new instance of , used for audio operation requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// The model name for audio operations that the client should use. + /// Additional options to customize the client. + public AudioClient(string model, OpenAIClientOptions options = null) + : this(endpoint: null, model, credential: null, options) + { } + + /// + /// Creates text-to-speech audio that reflects the specified voice speaking the provided input text. + /// + /// + /// Unless otherwise specified via , the mp3 format of + /// will be used for the generated audio. + /// + /// The text for the voice to speak. + /// The voice to use. + /// Additional options to control the text-to-speech operation. + /// + /// A result containing generated, spoken audio in the specified output format. + /// Unless otherwise specified via , the mp3 format of + /// will be used for the generated audio. + /// + public virtual ClientResult GenerateSpeechFromText( + string text, + TextToSpeechVoice voice, + TextToSpeechOptions options = null) + { + Internal.Models.CreateSpeechRequest request = CreateInternalTtsRequest(text, voice, options); + return Shim.CreateSpeech(request); + } + + /// + /// Creates text-to-speech audio that reflects the specified voice speaking the provided input text. + /// + /// + /// Unless otherwise specified via , the mp3 format of + /// will be used for the generated audio. + /// + /// The text for the voice to speak. + /// The voice to use. + /// Additional options to control the text-to-speech operation. + /// + /// A result containing generated, spoken audio in the specified output format. + /// Unless otherwise specified via , the mp3 format of + /// will be used for the generated audio. + /// + public virtual Task> GenerateSpeechFromTextAsync( + string text, + TextToSpeechVoice voice, + TextToSpeechOptions options = null) + { + Internal.Models.CreateSpeechRequest request = CreateInternalTtsRequest(text, voice, options); + return Shim.CreateSpeechAsync(request); + } + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult GenerateSpeechFromText(BinaryContent content, RequestOptions context = null) + => Shim.CreateSpeech(content, context); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual Task GenerateSpeechFromTextAsync(BinaryContent content, RequestOptions context = null) + => Shim.CreateSpeechAsync(content, context); + + public virtual ClientResult TranscribeAudio(BinaryData audioBytes, string filename, AudioTranscriptionOptions options = null) + { + PipelineMessage message = CreateInternalTranscriptionRequestMessage(audioBytes, filename, options); + Shim.Pipeline.Send(message); + return GetTranscriptionResultFromResponse(message.Response); + } + + public virtual async Task> TranscribeAudioAsync(BinaryData audioBytes, string filename, AudioTranscriptionOptions options = null) + { + PipelineMessage message = CreateInternalTranscriptionRequestMessage(audioBytes, filename, options); + await Shim.Pipeline.SendAsync(message); + return GetTranscriptionResultFromResponse(message.Response); + } + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult TranscribeAudio(BinaryContent content, RequestOptions context = null) + => Shim.CreateTranscription(content, context); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual Task TranscribeAudioAsync(BinaryContent content, RequestOptions context = null) + => Shim.CreateTranscriptionAsync(content, context); + + public virtual ClientResult TranslateAudio(BinaryData audioBytes, string filename, AudioTranslationOptions options = null) + { + PipelineMessage message = CreateInternalTranslationRequestMessage(audioBytes, filename, options); + Shim.Pipeline.Send(message); + return GetTranslationResultFromResponse(message.Response); + } + + public virtual async Task> TranslateAudioAsync(BinaryData audioBytes, string filename, AudioTranslationOptions options = null) + { + PipelineMessage message = CreateInternalTranslationRequestMessage(audioBytes, filename, options); + await Shim.Pipeline.SendAsync(message); + return GetTranslationResultFromResponse(message.Response); + } + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult TranslateAudio(BinaryContent content, RequestOptions context = null) + => Shim.CreateTranslation(content, context); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual Task TranslateAudioAsync(BinaryContent content, RequestOptions context = null) + => Shim.CreateTranslationAsync(content, context); + + private PipelineMessage CreateInternalTranscriptionRequestMessage(BinaryData audioBytes, string filename, AudioTranscriptionOptions options) + { + PipelineMessage message = Shim.Pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_clientConnector.Endpoint.AbsoluteUri); + StringBuilder path = new(); + path.Append("/audio/transcriptions"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + + MultipartFormDataContent requestContent = CreateInternalTranscriptionRequestContent(audioBytes, filename, options); + requestContent.ApplyToRequest(request); + + return message; + } + + private PipelineMessage CreateInternalTranslationRequestMessage(BinaryData audioBytes, string filename, AudioTranslationOptions options) + { + PipelineMessage message = Shim.Pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_clientConnector.Endpoint.AbsoluteUri); + StringBuilder path = new(); + path.Append("/audio/translations"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + + MultipartFormDataContent requestContent = CreateInternalTranscriptionRequestContent(audioBytes, filename, options); + requestContent.ApplyToRequest(request); + + return message; + } + + private MultipartFormDataContent CreateInternalTranscriptionRequestContent(BinaryData audioBytes, string filename, AudioTranscriptionOptions options) + { + options ??= new(); + return CreateInternalTranscriptionRequestContent( + audioBytes, + filename, + options.Language, + options.Prompt, + options.ResponseFormat, + options.Temperature, + options.EnableWordTimestamps, + options.EnableSegmentTimestamps); + } + + private MultipartFormDataContent CreateInternalTranscriptionRequestContent(BinaryData audioBytes, string filename, AudioTranslationOptions options) + { + options ??= new(); + return CreateInternalTranscriptionRequestContent( + audioBytes, + filename, + language: null, + options.Prompt, + options.ResponseFormat, + options.Temperature, + enableWordTimestamps: null, + enableSegmentTimestamps: null); + } + + private MultipartFormDataContent CreateInternalTranscriptionRequestContent( + BinaryData audioBytes, + string filename, + string language = null, + string prompt = null, + AudioTranscriptionFormat? transcriptionFormat = null, + float? temperature = null, + bool? enableWordTimestamps = null, + bool? enableSegmentTimestamps = null) + { + MultipartFormDataContent content = new(); + content.Add(MultipartContent.Create(BinaryData.FromString(_clientConnector.Model)), name: "model", []); + if (OptionalProperty.IsDefined(language)) + { + content.Add(MultipartContent.Create(BinaryData.FromString(language)), name: "language", []); + } + if (OptionalProperty.IsDefined(prompt)) + { + content.Add(MultipartContent.Create(BinaryData.FromString(prompt)), name: "prompt", []); + } + if (OptionalProperty.IsDefined(transcriptionFormat)) + { + content.Add(MultipartContent.Create(BinaryData.FromString(transcriptionFormat switch + { + AudioTranscriptionFormat.Simple => "json", + AudioTranscriptionFormat.Detailed => "verbose_json", + AudioTranscriptionFormat.Srt => "srt", + AudioTranscriptionFormat.Vtt => "vtt", + _ => throw new ArgumentException(nameof(transcriptionFormat)), + })), + name: "response_format", + []); + } + if (OptionalProperty.IsDefined(temperature)) + { + content.Add(MultipartContent.Create(BinaryData.FromString($"{temperature}")), name: "temperature", []); + } + if (OptionalProperty.IsDefined(enableWordTimestamps) || OptionalProperty.IsDefined(enableSegmentTimestamps)) + { + List granularities = []; + if (enableWordTimestamps == true) + { + granularities.Add("word"); + } + if (enableSegmentTimestamps == true) + { + granularities.Add("segment"); + } + content.Add(MultipartContent.Create(BinaryData.FromObjectAsJson(granularities)), name: "timestamp_granularities", []); + } + content.Add(MultipartContent.Create(audioBytes), name: "file", fileName: filename, []); + + return content; + } + + private static ClientResult GetTranscriptionResultFromResponse(PipelineResponse response) + { + if (response.IsError) + { + throw new ClientResultException(response); + } + + using JsonDocument responseDocument = JsonDocument.Parse(response.Content); + return ClientResult.FromValue(AudioTranscription.DeserializeAudioTranscription(responseDocument.RootElement), response); + } + + private static ClientResult GetTranslationResultFromResponse(PipelineResponse response) + { + if (response.IsError) + { + throw new ClientResultException(response); + } + + using JsonDocument responseDocument = JsonDocument.Parse(response.Content); + return ClientResult.FromValue(AudioTranslation.DeserializeAudioTranscription(responseDocument.RootElement), response); + } + + private Internal.Models.CreateSpeechRequest CreateInternalTtsRequest( + string input, + TextToSpeechVoice voice, + TextToSpeechOptions options = null) + { + options ??= new(); + Internal.Models.CreateSpeechRequestResponseFormat? internalResponseFormat = null; + if (options.ResponseFormat != null) + { + internalResponseFormat = options.ResponseFormat switch + { + AudioDataFormat.Aac => "aac", + AudioDataFormat.Flac => "flac", + AudioDataFormat.M4a => "m4a", + AudioDataFormat.Mp3 => "mp3", + AudioDataFormat.Mp4 => "mp4", + AudioDataFormat.Mpeg => "mpeg", + AudioDataFormat.Mpga => "mpga", + AudioDataFormat.Ogg => "ogg", + AudioDataFormat.Opus => "opus", + AudioDataFormat.Wav => "wav", + AudioDataFormat.Webm => "webm", + _ => throw new ArgumentException(nameof(options.ResponseFormat)), + }; + } + return new Internal.Models.CreateSpeechRequest( + _clientConnector.Model, + input, + voice.ToString(), + internalResponseFormat, + options?.SpeedMultiplier, + serializedAdditionalRawData: null); + } + private static PipelineMessageClassifier _responseErrorClassifier200; + private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); + +} diff --git a/.dotnet/src/Custom/Audio/AudioDataFormat.cs b/.dotnet/src/Custom/Audio/AudioDataFormat.cs new file mode 100644 index 000000000..b8840ca44 --- /dev/null +++ b/.dotnet/src/Custom/Audio/AudioDataFormat.cs @@ -0,0 +1,92 @@ +namespace OpenAI.Audio; + +/// +/// Represents an audio data format available as either input or output into an audio operation. +/// +public enum AudioDataFormat +{ + /// + /// MP3, an all-purpose audio compression format with a moderate tradeoff of quality for data size. + /// + /// mp3 is supported for input into translation and transcription as well as for output from text-to-speech. + /// + /// + Mp3, + /// + /// AAC, an alternative all-purpose format to MP3 preferred by YouTube, Android, and iOS. + /// + /// aac is supported for input into translation and transcription as well as for output from text-to-speech. + /// + /// + Aac, + /// + /// OGG, a balanced, open-source, general use format favored by Spotify. + /// + /// ogg is supported as input into translation and transcription but is not available for + /// text-to-speech output. + /// + /// + Ogg, + /// + /// FLAC, a high-quality, lossless compression format preferred for audio archival and enthusiast use. + /// + /// flac is supported for input into translation and transcription as well as for output from text-to-speech. + /// + /// + Flac, + /// + /// MP4, a multimedia container format that generally features bigger sizes and higher quality relative to MP3. + /// + /// mp4 is supported as input into translation and transcription but is not available for + /// text-to-speech output. + /// + /// + Mp4, + /// + /// MPEG, a multimedia container format that can contain any of several different underlying audio formats. + /// + /// mpeg is supported as input into translation and transcription but is not available for + /// text-to-speech output. + /// + /// + Mpeg, + /// + /// MPGA, effectively an alias for MP3. + /// + /// mpga is supported as input into translation and transcription but is not available for + /// text-to-speech output. + /// + /// + Mpga, + /// + /// M4A, the audio-only counterpart to MP4 that generally features larger data sizes and higher quality than MP3. + /// + /// m4a is supported as input into translation and transcription but is not available for + /// text-to-speech output. + /// + /// + M4a, + /// + /// Opus, a higher-quality compression format that features integrated optimizations for speech. + /// + /// opus is supported for input into translation and transcription as well as for output from text-to-speech. + /// + /// + Opus, + /// + /// WAV, an uncompressed, lossless format with maximum quality, highest file size, and minimal decoding. + /// + /// wav is supported as input into translation and transcription but is not available for + /// text-to-speech output. + /// + /// + Wav, + /// + /// WebM, a multimedia container that generally uses Opus or OGG audio. + /// + /// webm is supported as input into translation and transcription but is not available for + /// text-to-speech output. + /// + /// + Webm, +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Audio/AudioTranscription.cs b/.dotnet/src/Custom/Audio/AudioTranscription.cs new file mode 100644 index 000000000..5f55ef9cb --- /dev/null +++ b/.dotnet/src/Custom/Audio/AudioTranscription.cs @@ -0,0 +1,72 @@ +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Audio; + +public partial class AudioTranscription +{ + public string Language { get; } + public TimeSpan? Duration { get; } + public string Text { get; } + public IReadOnlyList Words { get; } + public IReadOnlyList Segments { get; } + + internal AudioTranscription(string language, TimeSpan? duration, string text, IReadOnlyList words, IReadOnlyList segments) + { + Language = language; + Duration = duration; + Text = text; + Words = words; + Segments = segments; + } + + internal static AudioTranscription DeserializeAudioTranscription(JsonElement element, ModelReaderWriterOptions options = default) + { + string language = null; + TimeSpan? duration = null; + string text = null; + List words = null; + List segments = null; + + foreach (JsonProperty topLevelProperty in element.EnumerateObject()) + { + if (topLevelProperty.NameEquals("language"u8)) + { + language = topLevelProperty.Value.GetString(); + continue; + } + if (topLevelProperty.NameEquals("duration"u8)) + { + duration = TimeSpan.FromSeconds(topLevelProperty.Value.GetSingle()); + continue; + } + if (topLevelProperty.NameEquals("text"u8)) + { + text = topLevelProperty.Value.GetString(); + continue; + } + if (topLevelProperty.NameEquals("words"u8)) + { + words = []; + foreach (JsonElement wordElement in topLevelProperty.Value.EnumerateArray()) + { + words.Add(TranscribedWord.DeserializeTranscribedWord(wordElement, options)); + } + continue; + } + if (topLevelProperty.NameEquals("segments"u8)) + { + segments = []; + foreach (JsonElement segmentElement in topLevelProperty.Value.EnumerateArray()) + { + segments.Add(TranscriptionSegment.DeserializeTranscriptionSegment(segmentElement, options)); + } + continue; + } + } + + return new AudioTranscription(language, duration, text, words, segments); + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Audio/AudioTranscriptionFormat.cs b/.dotnet/src/Custom/Audio/AudioTranscriptionFormat.cs new file mode 100644 index 000000000..585099b7a --- /dev/null +++ b/.dotnet/src/Custom/Audio/AudioTranscriptionFormat.cs @@ -0,0 +1,12 @@ +using System; +using System.Collections.Generic; + +namespace OpenAI.Audio; + +public enum AudioTranscriptionFormat +{ + Simple, + Detailed, + Srt, + Vtt, +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Audio/AudioTranscriptionOptions.cs b/.dotnet/src/Custom/Audio/AudioTranscriptionOptions.cs new file mode 100644 index 000000000..2b473f74d --- /dev/null +++ b/.dotnet/src/Custom/Audio/AudioTranscriptionOptions.cs @@ -0,0 +1,14 @@ +using System; +using System.Collections.Generic; + +namespace OpenAI.Audio; + +public partial class AudioTranscriptionOptions +{ + public string Language { get; set; } + public string Prompt { get; set; } + public AudioTranscriptionFormat? ResponseFormat { get; set; } + public float? Temperature { get; set; } + public bool? EnableWordTimestamps { get; set; } + public bool? EnableSegmentTimestamps { get; set; } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Audio/AudioTranslation.cs b/.dotnet/src/Custom/Audio/AudioTranslation.cs new file mode 100644 index 000000000..49de258bf --- /dev/null +++ b/.dotnet/src/Custom/Audio/AudioTranslation.cs @@ -0,0 +1,32 @@ +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Audio; + +public partial class AudioTranslation +{ + public string Text { get; } + + internal AudioTranslation(string text) + { + Text = text; + } + + internal static AudioTranslation DeserializeAudioTranscription(JsonElement element, ModelReaderWriterOptions options = default) + { + string text = null; + + foreach (JsonProperty property in element.EnumerateObject()) + { + if (property.NameEquals("text"u8)) + { + text = property.Value.GetString(); + continue; + } + } + + return new AudioTranslation(text); + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Audio/AudioTranslationOptions.cs b/.dotnet/src/Custom/Audio/AudioTranslationOptions.cs new file mode 100644 index 000000000..38e05a047 --- /dev/null +++ b/.dotnet/src/Custom/Audio/AudioTranslationOptions.cs @@ -0,0 +1,11 @@ +using System; +using System.Collections.Generic; + +namespace OpenAI.Audio; + +public partial class AudioTranslationOptions +{ + public string Prompt { get; set; } + public AudioTranscriptionFormat? ResponseFormat { get; set; } + public float? Temperature { get; set; } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Audio/TextToSpeechOptions.cs b/.dotnet/src/Custom/Audio/TextToSpeechOptions.cs new file mode 100644 index 000000000..476d564af --- /dev/null +++ b/.dotnet/src/Custom/Audio/TextToSpeechOptions.cs @@ -0,0 +1,29 @@ +namespace OpenAI.Audio; + +/// +/// A representation of additional options available to control the behavior of a text-to-speech audio generation +/// operation. +/// +public partial class TextToSpeechOptions +{ + /// + /// The desired format of the generated text-to-speech audio. If not specified, a default value of mp3 will + /// be used. + /// + /// Supported output formats include: + /// + /// mp3 - + /// opus - + /// aac - + /// flac - + /// + /// + /// + public AudioDataFormat? ResponseFormat { get; set; } + + /// + /// A multiplicative speed factor to apply to the generated audio, with 1.0 being the default and valid + /// values ranging from 0.25 to 4.0. + /// + public float? SpeedMultiplier { get; set; } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Audio/TextToSpeechVoice.cs b/.dotnet/src/Custom/Audio/TextToSpeechVoice.cs new file mode 100644 index 000000000..55bc324c5 --- /dev/null +++ b/.dotnet/src/Custom/Audio/TextToSpeechVoice.cs @@ -0,0 +1,64 @@ +using System; + +namespace OpenAI.Audio; + +/// +/// Represents the available text-to-speech voices. +/// +public readonly struct TextToSpeechVoice : IEquatable +{ + private readonly Internal.Models.CreateSpeechRequestVoice _internalVoice; + + /// + /// Creates a new instance of . + /// + /// The textual representation of the value to use. + public TextToSpeechVoice(string value) + : this(new Internal.Models.CreateSpeechRequestVoice(value)) + { } + + internal TextToSpeechVoice(Internal.Models.CreateSpeechRequestVoice internalVoice) + { + _internalVoice = internalVoice; + } + + /// + /// The onyx voice. + /// + public static TextToSpeechVoice Onyx { get; } = new TextToSpeechVoice(Internal.Models.CreateSpeechRequestVoice.Onyx); + /// + /// The shimmer voice. + /// + public static TextToSpeechVoice Shimmer { get; } = new TextToSpeechVoice(Internal.Models.CreateSpeechRequestVoice.Shimmer); + /// + /// The alloy voice. + /// + public static TextToSpeechVoice Alloy { get; } = new TextToSpeechVoice(Internal.Models.CreateSpeechRequestVoice.Alloy); + /// + /// The fable voice. + /// + public static TextToSpeechVoice Fable { get; } = new TextToSpeechVoice(Internal.Models.CreateSpeechRequestVoice.Fable); + /// + /// The echo voice. + /// + public static TextToSpeechVoice Echo { get; } = new TextToSpeechVoice(Internal.Models.CreateSpeechRequestVoice.Echo); + + /// + public static bool operator ==(TextToSpeechVoice left, TextToSpeechVoice right) + => left._internalVoice == right._internalVoice; + /// + public static implicit operator TextToSpeechVoice(string value) + => new TextToSpeechVoice(new Internal.Models.CreateSpeechRequestVoice(value)); + /// + public static bool operator !=(TextToSpeechVoice left, TextToSpeechVoice right) + => left._internalVoice != right._internalVoice; + /// + public bool Equals(TextToSpeechVoice other) => _internalVoice.Equals(other._internalVoice); + /// + public override string ToString() => _internalVoice.ToString(); + /// + public override bool Equals(object obj) => + (obj is TextToSpeechVoice voice && this.Equals(voice)) || _internalVoice.Equals(obj); + /// + public override int GetHashCode() => _internalVoice.GetHashCode(); +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Audio/TranscribedWord.cs b/.dotnet/src/Custom/Audio/TranscribedWord.cs new file mode 100644 index 000000000..94e26fde3 --- /dev/null +++ b/.dotnet/src/Custom/Audio/TranscribedWord.cs @@ -0,0 +1,45 @@ +using System; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Audio; + +public partial class TranscribedWord +{ + public string Word { get; } + public TimeSpan Start { get; } + public TimeSpan End { get; } + + internal TranscribedWord(string word, TimeSpan start, TimeSpan end) + { + Word = word; + Start = start; + End = end; + } + + internal static TranscribedWord DeserializeTranscribedWord(JsonElement element, ModelReaderWriterOptions options = default) + { + string word = null; + TimeSpan? start = null; + TimeSpan? end = null; + foreach (JsonProperty wordProperty in element.EnumerateObject()) + { + if (wordProperty.NameEquals("word"u8)) + { + word = wordProperty.Value.GetString(); + continue; + } + if (wordProperty.NameEquals("start"u8)) + { + start = TimeSpan.FromSeconds(wordProperty.Value.GetSingle()); + continue; + } + if (wordProperty.NameEquals("end"u8)) + { + end = TimeSpan.FromSeconds(wordProperty.Value.GetSingle()); + continue; + } + } + return new TranscribedWord(word, start.Value, end.Value); + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Audio/TranscriptionSegment.cs b/.dotnet/src/Custom/Audio/TranscriptionSegment.cs new file mode 100644 index 000000000..c1ee0632e --- /dev/null +++ b/.dotnet/src/Custom/Audio/TranscriptionSegment.cs @@ -0,0 +1,109 @@ +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using System.Threading; + +namespace OpenAI.Audio; + +public partial class TranscriptionSegment +{ + public int Id { get; } + public int SeekOffset { get; } + public TimeSpan Start { get; } + public TimeSpan End { get; } + public string Text { get; } + public IReadOnlyList TokenIds { get; } + public float Temperature { get; } + public float AverageLogProbability { get; } + public float CompressionRatio { get; } + public float NoSpeechProbability { get; } + + internal TranscriptionSegment(int id, int seekOffset, TimeSpan start, TimeSpan end, string text, IReadOnlyList tokenIds, float temperature, float averageLogProbability, float compressionRatio, float noSpeechProbability) + { + Id = id; + SeekOffset = seekOffset; + Start = start; + End = end; + Text = text; + TokenIds = tokenIds; + Temperature = temperature; + AverageLogProbability = averageLogProbability; + CompressionRatio = compressionRatio; + NoSpeechProbability = noSpeechProbability; + } + + internal static TranscriptionSegment DeserializeTranscriptionSegment(JsonElement element, ModelReaderWriterOptions options = default) + { + int id = 0; + int seekOffset = 0; + TimeSpan start = default; + TimeSpan end = default; + string text = null; + List tokenIds = null; + float temperature = 0; + float averageLogProbability = 0; + float compressionRatio = 0; + float noSpeechProbability = 0; + + foreach (JsonProperty topLevelProperty in element.EnumerateObject()) + { + if (topLevelProperty.NameEquals("id"u8)) + { + id = topLevelProperty.Value.GetInt32(); + continue; + } + if (topLevelProperty.NameEquals("seek"u8)) + { + seekOffset = topLevelProperty.Value.GetInt32(); + continue; + } + if (topLevelProperty.NameEquals("start"u8)) + { + start = TimeSpan.FromSeconds(topLevelProperty.Value.GetSingle()); + continue; + } + if (topLevelProperty.NameEquals("end"u8)) + { + end = TimeSpan.FromSeconds(topLevelProperty.Value.GetSingle()); + continue; + } + if (topLevelProperty.NameEquals("text"u8)) + { + text = topLevelProperty.Value.GetString(); + continue; + } + if (topLevelProperty.NameEquals("tokens"u8)) + { + tokenIds = []; + foreach (JsonElement tokenIdElement in topLevelProperty.Value.EnumerateArray()) + { + tokenIds.Add(tokenIdElement.GetInt32()); + } + continue; + } + if (topLevelProperty.NameEquals("temperature"u8)) + { + temperature = topLevelProperty.Value.GetSingle(); + continue; + } + if (topLevelProperty.NameEquals("avg_logprob"u8)) + { + averageLogProbability = topLevelProperty.Value.GetSingle(); + continue; + } + if (topLevelProperty.NameEquals("compression_ratio"u8)) + { + compressionRatio = topLevelProperty.Value.GetSingle(); + continue; + } + if (topLevelProperty.NameEquals("no_speech_prob"u8)) + { + noSpeechProbability = topLevelProperty.Value.GetSingle(); + continue; + } + } + + return new TranscriptionSegment(id, seekOffset, start, end, text, tokenIds, temperature, averageLogProbability, compressionRatio, noSpeechProbability); + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatClient.cs b/.dotnet/src/Custom/Chat/ChatClient.cs new file mode 100644 index 000000000..e9990e3c9 --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatClient.cs @@ -0,0 +1,400 @@ +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.ComponentModel; +using System.Text; +using System.Threading.Tasks; + +namespace OpenAI.Chat; + +/// The service client for the OpenAI Chat Completions endpoint. +public partial class ChatClient +{ + private OpenAIClientConnector _clientConnector; + private Internal.Chat Shim => _clientConnector.InternalClient.GetChatClient(); + + /// + /// Initializes a new instance of , used for Chat Completion requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// The connection endpoint to use. + /// The model name for chat completions that the client should use. + /// The API key used to authenticate with the service endpoint. + /// Additional options to customize the client. + public ChatClient(Uri endpoint, string model, ApiKeyCredential credential, OpenAIClientOptions options = null) + { + _clientConnector = new(model, endpoint, credential, options); + } + + /// + /// Initializes a new instance of , used for Chat Completion requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// The connection endpoint to use. + /// The model name for chat completions that the client should use. + /// Additional options to customize the client. + public ChatClient(Uri endpoint, string model, OpenAIClientOptions options = null) + : this(endpoint, model, credential: null, options) + { } + + /// + /// Initializes a new instance of , used for Chat Completion requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// The model name for chat completions that the client should use. + /// The API key used to authenticate with the service endpoint. + /// Additional options to customize the client. + public ChatClient(string model, ApiKeyCredential credential, OpenAIClientOptions options = null) + : this(endpoint: null, model, credential, options) + { } + + /// + /// Initializes a new instance of , used for Chat Completion requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// The model name for chat completions that the client should use. + /// Additional options to customize the client. + public ChatClient(string model, OpenAIClientOptions options = null) + : this(endpoint: null, model, credential: null, options) + { } + + /// + /// Generates a single chat completion result for a single, simple user message. + /// + /// The user message to provide as a prompt for chat completion. + /// Additional options for the chat completion request. + /// A result for a single chat completion. + public virtual ClientResult CompleteChat(string message, ChatCompletionOptions options = null) + => CompleteChat(new List() { new ChatRequestUserMessage(message) }, options); + + /// + /// Generates a single chat completion result for a single, simple user message. + /// + /// The user message to provide as a prompt for chat completion. + /// Additional options for the chat completion request. + /// A result for a single chat completion. + public virtual Task> CompleteChatAsync(string message, ChatCompletionOptions options = null) + => CompleteChatAsync( + new List() { new ChatRequestUserMessage(message) }, options); + + /// + /// Generates a single chat completion result for a provided set of input chat messages. + /// + /// The messages to provide as input and history for chat completion. + /// Additional options for the chat completion request. + /// A result for a single chat completion. + public virtual ClientResult CompleteChat( + IEnumerable messages, + ChatCompletionOptions options = null) + { + Internal.Models.CreateChatCompletionRequest request = CreateInternalRequest(messages, options); + ClientResult response = Shim.CreateChatCompletion(request); + ChatCompletion chatCompletion = new(response.Value, internalChoiceIndex: 0); + return ClientResult.FromValue(chatCompletion, response.GetRawResponse()); + } + + /// + /// Generates a single chat completion result for a provided set of input chat messages. + /// + /// The messages to provide as input and history for chat completion. + /// Additional options for the chat completion request. + /// A result for a single chat completion. + public virtual async Task> CompleteChatAsync( + IEnumerable messages, + ChatCompletionOptions options = null) + { + Internal.Models.CreateChatCompletionRequest request = CreateInternalRequest(messages, options); + ClientResult response = await Shim.CreateChatCompletionAsync(request).ConfigureAwait(false); + ChatCompletion chatCompletion = new(response.Value, internalChoiceIndex: 0); + return ClientResult.FromValue(chatCompletion, response.GetRawResponse()); + } + + /// + /// Generates a collection of chat completion results for a provided set of input chat messages. + /// + /// The messages to provide as input and history for chat completion. + /// + /// The number of independent, alternative response choices that should be generated. + /// + /// Additional options for the chat completion request. + /// The cancellation token for the operation. + /// A result for a single chat completion. + public virtual ClientResult CompleteChat( + IEnumerable messages, + int choiceCount, + ChatCompletionOptions options = null) + { + Internal.Models.CreateChatCompletionRequest request = CreateInternalRequest(messages, options, choiceCount); + ClientResult response = Shim.CreateChatCompletion(request); + List chatCompletions = []; + for (int i = 0; i < response.Value.Choices.Count; i++) + { + chatCompletions.Add(new(response.Value, response.Value.Choices[i].Index)); + } + return ClientResult.FromValue(new ChatCompletionCollection(chatCompletions), response.GetRawResponse()); + } + + /// + /// Generates a collection of chat completion results for a provided set of input chat messages. + /// + /// The messages to provide as input and history for chat completion. + /// + /// The number of independent, alternative response choices that should be generated. + /// + /// Additional options for the chat completion request. + /// A result for a single chat completion. + public virtual async Task> CompleteChatAsync( + IEnumerable messages, + int choiceCount, + ChatCompletionOptions options = null) + { + Internal.Models.CreateChatCompletionRequest request = CreateInternalRequest(messages, options, choiceCount); + ClientResult response = await Shim.CreateChatCompletionAsync(request).ConfigureAwait(false); + List chatCompletions = []; + for (int i = 0; i < response.Value.Choices.Count; i++) + { + chatCompletions.Add(new(response.Value, response.Value.Choices[i].Index)); + } + return ClientResult.FromValue(new ChatCompletionCollection(chatCompletions), response.GetRawResponse()); + } + + /// + /// Begins a streaming response for a chat completion request using a single, simple user message as input. + /// + /// + /// can be enumerated over using the await foreach pattern using the + /// interface. + /// + /// The user message to provide as a prompt for chat completion. + /// + /// The number of independent, alternative choices that the chat completion request should generate. + /// + /// Additional options for the chat completion request. + /// A streaming result with incremental chat completion updates. + public virtual StreamingClientResult CompleteChatStreaming( + string message, + int? choiceCount = null, + ChatCompletionOptions options = null) + => CompleteChatStreaming( + new List { new ChatRequestUserMessage(message) }, + choiceCount, + options); + + /// + /// Begins a streaming response for a chat completion request using a single, simple user message as input. + /// + /// + /// can be enumerated over using the await foreach pattern using the + /// interface. + /// + /// The user message to provide as a prompt for chat completion. + /// + /// The number of independent, alternative choices that the chat completion request should generate. + /// + /// Additional options for the chat completion request. + /// A streaming result with incremental chat completion updates. + public virtual Task> CompleteChatStreamingAsync( + string message, + int? choiceCount = null, + ChatCompletionOptions options = null) + => CompleteChatStreamingAsync( + new List { new ChatRequestUserMessage(message) }, + choiceCount, + options); + + /// + /// Begins a streaming response for a chat completion request using the provided chat messages as input and + /// history. + /// + /// + /// can be enumerated over using the await foreach pattern using the + /// interface. + /// + /// The messages to provide as input for chat completion. + /// + /// The number of independent, alternative choices that the chat completion request should generate. + /// + /// Additional options for the chat completion request. + /// The cancellation token for the operation. + /// A streaming result with incremental chat completion updates. + public virtual StreamingClientResult CompleteChatStreaming( + IEnumerable messages, + int? choiceCount = null, + ChatCompletionOptions options = null) + { + PipelineMessage requestMessage = CreateCustomRequestMessage(messages, choiceCount, options); + requestMessage.BufferResponse = false; + Shim.Pipeline.Send(requestMessage); + PipelineResponse response = requestMessage.ExtractResponse(); + + if (response.IsError) + { + throw new ClientResultException(response); + } + + ClientResult genericResult = ClientResult.FromResponse(response); + return StreamingClientResult.CreateFromResponse( + genericResult, + (responseForEnumeration) => SseAsyncEnumerator.EnumerateFromSseStream( + responseForEnumeration.GetRawResponse().ContentStream, + e => StreamingChatUpdate.DeserializeStreamingChatUpdates(e))); + } + + /// + /// Begins a streaming response for a chat completion request using the provided chat messages as input and + /// history. + /// + /// + /// can be enumerated over using the await foreach pattern using the + /// interface. + /// + /// The messages to provide as input for chat completion. + /// + /// The number of independent, alternative choices that the chat completion request should generate. + /// + /// Additional options for the chat completion request. + /// The cancellation token for the operation. + /// A streaming result with incremental chat completion updates. + public virtual async Task> CompleteChatStreamingAsync( + IEnumerable messages, + int? choiceCount = null, + ChatCompletionOptions options = null) + { + PipelineMessage requestMessage = CreateCustomRequestMessage(messages, choiceCount, options); + requestMessage.BufferResponse = false; + await Shim.Pipeline.SendAsync(requestMessage); + PipelineResponse response = requestMessage.ExtractResponse(); + + if (response.IsError) + { + throw new ClientResultException(response); + } + + ClientResult genericResult = ClientResult.FromResponse(response); + return StreamingClientResult.CreateFromResponse( + genericResult, + (responseForEnumeration) => SseAsyncEnumerator.EnumerateFromSseStream( + responseForEnumeration.GetRawResponse().ContentStream, + e => StreamingChatUpdate.DeserializeStreamingChatUpdates(e))); + } + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult CompleteChat(BinaryContent content, RequestOptions context = null) + => Shim.CreateChatCompletion(content, context); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual Task CompleteChatAsync(BinaryContent content, RequestOptions context = null) + => Shim.CreateChatCompletionAsync(content, context); + + private Internal.Models.CreateChatCompletionRequest CreateInternalRequest( + IEnumerable messages, + ChatCompletionOptions options = null, + int? choiceCount = null, + bool? stream = null) + { + options ??= new(); + Internal.Models.CreateChatCompletionRequestResponseFormat? internalFormat = null; + if (options.ResponseFormat is not null) + { + internalFormat = new(options.ResponseFormat switch + { + ChatResponseFormat.Text => Internal.Models.CreateChatCompletionRequestResponseFormatType.Text, + ChatResponseFormat.JsonObject => Internal.Models.CreateChatCompletionRequestResponseFormatType.JsonObject, + _ => throw new ArgumentException(nameof(options.ResponseFormat)), + }, null); + } + List messageDataItems = []; + foreach (ChatRequestMessage message in messages) + { + messageDataItems.Add(ModelReaderWriter.Write(message)); + } + Dictionary additionalData = []; + return new Internal.Models.CreateChatCompletionRequest( + messageDataItems, + _clientConnector.Model, + options?.FrequencyPenalty, + options?.GetInternalLogitBias(), + options?.IncludeLogProbabilities, + options?.LogProbabilityCount, + options?.MaxTokens, + choiceCount, + options?.PresencePenalty, + internalFormat, + options?.Seed, + options?.GetInternalStopSequences(), + stream, + options?.Temperature, + options?.NucleusSamplingFactor, + options?.GetInternalTools(), + options?.ToolConstraint?.GetBinaryData(), + options?.User, + options?.FunctionConstraint?.ToBinaryData(), + options?.GetInternalFunctions(), + additionalData + ); + } + + private PipelineMessage CreateCustomRequestMessage(IEnumerable messages, int? choiceCount, ChatCompletionOptions options) + { + Internal.Models.CreateChatCompletionRequest internalRequest = CreateInternalRequest(messages, options, choiceCount, stream: true); + BinaryContent content = BinaryContent.Create(internalRequest); + + PipelineMessage message = Shim.Pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + message.BufferResponse = false; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_clientConnector.Endpoint.AbsoluteUri); + StringBuilder path = new(); + path.Append("/chat/completions"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", "application/json"); + request.Content = content; + + return message; + } + + private static PipelineMessageClassifier _responseErrorClassifier200; + private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); +} diff --git a/.dotnet/src/Custom/Chat/ChatCompletion.cs b/.dotnet/src/Custom/Chat/ChatCompletion.cs new file mode 100644 index 000000000..119f62e1d --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatCompletion.cs @@ -0,0 +1,81 @@ +using System; +using System.Collections.Generic; + +namespace OpenAI.Chat; + +/// +public class ChatCompletion +{ + private Internal.Models.CreateChatCompletionResponse _internalResponse; + private long _internalChoiceIndex; + + /// + public string Id => _internalResponse.Id; + /// + public string SystemFingerprint => _internalResponse.SystemFingerprint; + /// + public DateTimeOffset CreatedAt => _internalResponse.Created; + /// + public ChatTokenUsage Usage { get; } + /// + public ChatFinishReason FinishReason { get; } + /// + public ChatMessageContent Content { get; } + /// + public IReadOnlyList ToolCalls { get; } + /// + public ChatFunctionCall FunctionCall { get; } + /// + public ChatRole Role { get; } + /// + public ChatLogProbabilityCollection LogProbabilities { get; } + /// + public long Index => _internalResponse.Choices[(int)_internalChoiceIndex].Index; + + internal ChatCompletion(Internal.Models.CreateChatCompletionResponse internalResponse, long internalChoiceIndex) + { + Internal.Models.CreateChatCompletionResponseChoice internalChoice = internalResponse.Choices[(int)internalChoiceIndex]; + _internalResponse = internalResponse; + _internalChoiceIndex = internalChoiceIndex; + Role = internalChoice.Message.Role.ToString() switch + { + "system" => ChatRole.System, + "user" => ChatRole.User, + "assistant" => ChatRole.Assistant, + "tool" => ChatRole.Tool, + "function" => ChatRole.Function, + _ => throw new ArgumentException(nameof(internalChoice.Message.Role)), + }; + Usage = new(_internalResponse.Usage); + FinishReason = internalChoice.FinishReason.ToString() switch + { + "stop" => ChatFinishReason.Stopped, + "length" => ChatFinishReason.Length, + "tool_calls" => ChatFinishReason.ToolCalls, + "function_call" => ChatFinishReason.FunctionCall, + "content_filter" => ChatFinishReason.ContentFilter, + _ => throw new ArgumentException(nameof(internalChoice.FinishReason)), + }; + Content = internalChoice.Message.Content; + if (internalChoice.Message.ToolCalls != null) + { + List toolCalls = []; + foreach (Internal.Models.ChatCompletionMessageToolCall internalToolCall in internalChoice.Message.ToolCalls) + { + if (internalToolCall.Type == "function") + { + toolCalls.Add(new ChatFunctionToolCall(internalToolCall.Id, internalToolCall.Function.Name, internalToolCall.Function.Arguments)); + } + } + ToolCalls = toolCalls; + } + if (internalChoice.Message.FunctionCall != null) + { + FunctionCall = new(internalChoice.Message.FunctionCall.Name, internalChoice.Message.FunctionCall.Arguments); + } + if (internalChoice.Logprobs != null) + { + LogProbabilities = ChatLogProbabilityCollection.FromInternalData(internalChoice.Logprobs); + } + } +} diff --git a/.dotnet/src/Custom/Chat/ChatCompletionCollection.cs b/.dotnet/src/Custom/Chat/ChatCompletionCollection.cs new file mode 100644 index 000000000..d677b65a9 --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatCompletionCollection.cs @@ -0,0 +1,13 @@ +using System.Collections.Generic; +using System.Collections.ObjectModel; + +namespace OpenAI.Chat; + +/// +/// Represents a chat completions response payload that contains information about multiple requested chat completion +/// choices. +/// +public class ChatCompletionCollection : ReadOnlyCollection +{ + internal ChatCompletionCollection(IList list) : base(list) { } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatCompletionOptions.cs b/.dotnet/src/Custom/Chat/ChatCompletionOptions.cs new file mode 100644 index 000000000..b5b8114e7 --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatCompletionOptions.cs @@ -0,0 +1,115 @@ +using OpenAI.ClientShared.Internal; +using System; +using System.ClientModel.Internal; + +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Chat; + +/// +/// Request-level options for chat completion. +/// +public partial class ChatCompletionOptions +{ + /// + public double? FrequencyPenalty { get; set; } + /// + public IDictionary TokenSelectionBiases { get; set; } = new OptionalDictionary(); + /// + public bool? IncludeLogProbabilities { get; set; } + /// + public long? LogProbabilityCount { get; set; } + /// + public long? MaxTokens { get; set; } + /// + public double? PresencePenalty { get; set; } + /// + public ChatResponseFormat? ResponseFormat { get; set; } + /// + public long? Seed { get; set; } + /// + public IList StopSequences { get; } = new OptionalList(); + /// + public double? Temperature { get; set; } + /// + public double? NucleusSamplingFactor { get; set; } + /// + public IList Tools { get; } = new OptionalList(); + /// + public ChatToolConstraint? ToolConstraint { get; set; } + /// + public string User { get; set; } + /// + public IList Functions { get; } = new OptionalList(); + /// + public ChatFunctionConstraint? FunctionConstraint { get; set; } + + internal BinaryData GetInternalStopSequences() + { + if (!OptionalProperty.IsCollectionDefined(StopSequences)) + { + return null; + } + return BinaryData.FromObjectAsJson(StopSequences); + } + + internal IDictionary GetInternalLogitBias() + { + OptionalDictionary packedLogitBias = []; + foreach (KeyValuePair pair in TokenSelectionBiases) + { + packedLogitBias[$"{pair.Key}"] = pair.Value; + } + return packedLogitBias; + } + + internal IList GetInternalTools() + { + OptionalList internalTools = []; + foreach (ChatToolDefinition tool in Tools) + { + if (tool is ChatFunctionToolDefinition functionTool) + { + Internal.Models.FunctionObject functionObject = new( + functionTool.Description, + functionTool.Name, + CreateInternalFunctionParameters(functionTool.Parameters), + serializedAdditionalRawData: null); + internalTools.Add(new(functionObject)); + } + } + return internalTools; + } + + internal IList GetInternalFunctions() + { + OptionalList internalFunctions = []; + foreach (ChatFunctionDefinition function in Functions) + { + Internal.Models.ChatCompletionFunctions internalFunction = new( + function.Description, + function.Name, + CreateInternalFunctionParameters(function.Parameters), + serializedAdditionalRawData: null); + internalFunctions.Add(internalFunction); + } + return internalFunctions; + } + + internal static Internal.Models.FunctionParameters CreateInternalFunctionParameters(BinaryData parameters) + { + if (parameters == null) + { + return null; + } + JsonElement parametersElement = JsonDocument.Parse(parameters.ToString()).RootElement; + Internal.Models.FunctionParameters internalParameters = new(); + foreach (JsonProperty property in parametersElement.EnumerateObject()) + { + BinaryData propertyData = BinaryData.FromString(property.Value.GetRawText()); + internalParameters.AdditionalProperties.Add(property.Name, propertyData); + } + return internalParameters; + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatFinishReason.cs b/.dotnet/src/Custom/Chat/ChatFinishReason.cs new file mode 100644 index 000000000..48196d52d --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatFinishReason.cs @@ -0,0 +1,77 @@ +namespace OpenAI.Chat; + +/// +/// The reason the model stopped generating tokens. This will be: +/// +/// +/// Property +/// REST +/// Condition +/// +/// +/// +/// stop +/// The model encountered a natural stop point or provided stop sequence. +/// +/// +/// +/// length +/// The maximum number of tokens specified in the request was reached. +/// +/// +/// +/// content_filter +/// Content was omitted due to a triggered content filter rule. +/// +/// +/// +/// tool_calls +/// +/// With no explicit tool_choice, the model called one or more tools that were defined in the request. +/// +/// +/// +/// +/// function_call +/// (Deprecated) The model called a function that was defined in the request. +/// +/// +/// +public enum ChatFinishReason +{ + /// + /// Indicates that the model encountered a natural stop point or provided stop sequence. + /// + Stopped, + /// + /// Indicates that the model reached the maximum number of tokens allowed for the request. + /// + Length, + /// + /// Indicates that content was omitted due to a triggered content filter rule. + /// + ContentFilter, + /// + /// Indicates that the model called a function that was defined in the request. + /// + /// + /// To resolve tool calls, append the message associated with the tool calls followed by matching instances of + /// for each tool call, then perform another chat completion with the combined + /// set of messages. + /// + /// Note: is not provided as the finish_reason if the model calls a + /// tool in response to an explicit tool_choice via . + /// In that case, calling the specified tool is assumed and the expected reason is . + /// + /// + ToolCalls, + /// + /// Indicates that the model called a function that was defined in the request. + /// + /// + /// To resolve a function call, append the message associated with the function call followed by a + /// with the appropriate name and arguments, then perform another chat + /// completion with the combined set of messages. + /// + FunctionCall, +} diff --git a/.dotnet/src/Custom/Chat/ChatFunctionCall.Serialization.cs b/.dotnet/src/Custom/Chat/ChatFunctionCall.Serialization.cs new file mode 100644 index 000000000..eb198cfd4 --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatFunctionCall.Serialization.cs @@ -0,0 +1,37 @@ +using System; +using System.ClientModel.Internal; + +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Diagnostics.CodeAnalysis; +using System.Text.Json; + +namespace OpenAI.Chat; + +public partial class ChatFunctionCall : IJsonModel +{ + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteStartObject(); + writer.WriteString("name"u8, Name); + writer.WriteString("arguments"u8, Arguments); + writer.WriteEndObject(); + } + + ChatFunctionCall IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + throw new NotImplementedException(); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + throw new NotImplementedException(); + } + + ChatFunctionCall IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + throw new NotImplementedException(); + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; +} diff --git a/.dotnet/src/Custom/Chat/ChatFunctionCall.cs b/.dotnet/src/Custom/Chat/ChatFunctionCall.cs new file mode 100644 index 000000000..822f1577f --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatFunctionCall.cs @@ -0,0 +1,47 @@ +using System.Diagnostics.CodeAnalysis; + +namespace OpenAI.Chat; + +/// +/// Represents an assistant call against a supplied that is needed by the +/// model to continue the logical conversation. +/// +/// +/// +/// Note that functions are deprecated in favor of tools and using +/// instances with will enable the use of tool_calls via +/// instead of this type. +/// +/// +/// The model makes a function_call in response to evaluation of supplied name> and +/// description information in functions and is resolved by providing a new +/// with matching functioning output on a subsequent chat completion +/// request. +/// +/// +public partial class ChatFunctionCall +{ + /// + /// The name of the function being called by the model. + /// + public required string Name { get; set; } + /// + /// The arguments to the function being called by the model. + /// + public required string Arguments { get; set; } + /// + /// Creates a new instance of . + /// + public ChatFunctionCall() { } + /// + /// Creates a new instance of . + /// + /// The name of the function that was called by the model. + /// The arguments to the function that was called by the model. + [SetsRequiredMembers] + public ChatFunctionCall(string functionName, string arguments) + { + Name = functionName; + Arguments = arguments; + } +} diff --git a/.dotnet/src/Custom/Chat/ChatFunctionConstraint.cs b/.dotnet/src/Custom/Chat/ChatFunctionConstraint.cs new file mode 100644 index 000000000..fe9e7bf66 --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatFunctionConstraint.cs @@ -0,0 +1,73 @@ +using System; + +namespace OpenAI.Chat; + +/// +/// Represents a desired manner in which the model should use the functions defined in a chat completion request. +/// +public readonly partial struct ChatFunctionConstraint : IEquatable +{ + private readonly string _value; + private readonly bool _isPredefined; + + /// + /// auto specifies that the model should freely call any or none of the provided functions. + /// This is the implied default when not otherwise specified. + /// + public static ChatFunctionConstraint Auto { get; } = new("auto", isPredefined: true); + /// + /// none specifies that the model should not call any of the provided functions. Note that the definition + /// of the functions may still influence the chat completion content even when not called. + /// + public static ChatFunctionConstraint None { get; } = new("none", isPredefined: true); + + /// + /// Creates a new instance of that specifies that the model should invoke a + /// specific, named function. + /// + /// The name of the function that the model should call. + public ChatFunctionConstraint(string functionName) + : this(functionName, isPredefined: false) + { + } + + internal ChatFunctionConstraint(string functionNameOrPredefinedLabel, bool isPredefined) + { + _value = functionNameOrPredefinedLabel; + _isPredefined = isPredefined; + } + + /// + public static bool operator ==(ChatFunctionConstraint left, ChatFunctionConstraint right) + => left._isPredefined == right._isPredefined && left._value == right._value; + /// + public static implicit operator ChatFunctionConstraint(string value) => new(value); + /// + public static bool operator !=(ChatFunctionConstraint left, ChatFunctionConstraint right) + => left._isPredefined != right._isPredefined || left._value != right._value; + /// + public bool Equals(ChatFunctionConstraint other) + => other._isPredefined.Equals(_isPredefined) && other._value.Equals(_value); + /// + public override string ToString() => ToBinaryData().ToString(); + /// + public override bool Equals(object obj) + => obj is ChatFunctionConstraint constraint && constraint.Equals(this); + /// + public override int GetHashCode() => $"{_value}-{_isPredefined}".GetHashCode(); + + internal BinaryData ToBinaryData() + { + if (_isPredefined) + { + return BinaryData.FromString(_value); + } + else + { + return BinaryData.FromObjectAsJson(new + { + name = _value, + }); + } + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatFunctionDefinition.cs b/.dotnet/src/Custom/Chat/ChatFunctionDefinition.cs new file mode 100644 index 000000000..ddcbb6ffa --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatFunctionDefinition.cs @@ -0,0 +1,60 @@ +using System; +using System.Diagnostics.CodeAnalysis; + +namespace OpenAI.Chat; + +/// +/// Represents the definition of a function that the model may call, as supplied in a chat completion request. +/// +public class ChatFunctionDefinition +{ + /// + /// The name of the function. + /// + public required string Name { get; set; } + /// + /// A friendly description of the function. This supplements in informing the model about when + /// it should call the function. + /// + public string Description { get; set; } + /// + /// The parameter information for the function, provided in JSON Schema format. + /// + /// + /// The method provides + /// an easy definition interface using the dynamic type: + /// + /// Parameters = BinaryData.FromObjectAsJson(new + /// { + /// type = "object", + /// properties = new + /// { + /// your_function_argument = new + /// { + /// type = "string", + /// description = "the description of your function argument" + /// } + /// }, + /// required = new[] { "your_function_argument" } + /// }) + /// + /// + public BinaryData Parameters { get; set; } + /// + /// Creates a new instance of . + /// + public ChatFunctionDefinition() { } + /// + /// Creates a new instance of . + /// + /// The name of the function. + /// A description of the function's behavior or purpose. + /// The parameter information for the function, in JSON Schema format. + [SetsRequiredMembers] + public ChatFunctionDefinition(string name, string description = null, BinaryData parameters = null) + { + Name = name; + Description = description; + Parameters = parameters; + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatFunctionToolCall.cs b/.dotnet/src/Custom/Chat/ChatFunctionToolCall.cs new file mode 100644 index 000000000..5a6a823b6 --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatFunctionToolCall.cs @@ -0,0 +1,69 @@ +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Diagnostics.CodeAnalysis; +using System.Text.Json; + +namespace OpenAI.Chat; + +/// +/// Represents a call made by the model to a function tool that was defined in a chat completion request. +/// +public class ChatFunctionToolCall : ChatToolCall +{ + internal Internal.Models.ChatCompletionMessageToolCallFunction InternalToolCall { get; } + + /// + /// Gets the name of the function. + /// + public required string Name + { + get => InternalToolCall.Name; + set => InternalToolCall.Name = value; + } + + /// + /// Gets the arguments to the function. + /// + public required string Arguments + { + get => InternalToolCall.Arguments; + set => InternalToolCall.Arguments = value; + + } + /// + /// Creates a new instance of . + /// + public ChatFunctionToolCall() + { + InternalToolCall = new(); + } + + /// + /// Creates a new instance of . + /// + /// + /// The ID of the tool call, used when resolving the tool call with a future + /// . + /// + /// The name of the function. + /// The arguments to the function. + [SetsRequiredMembers] + public ChatFunctionToolCall(string toolCallId, string functionName, string arguments) + : this() + { + Id = toolCallId; + Name = functionName; + Arguments = arguments; + } + + internal override void WriteDerivedAdditions(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteString("type"u8, "function"u8); + writer.WritePropertyName("function"u8); + writer.WriteStartObject(); + writer.WriteString("name"u8, Name); + writer.WriteString("arguments"u8, Arguments); + writer.WriteEndObject(); + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatFunctionToolDefinition.cs b/.dotnet/src/Custom/Chat/ChatFunctionToolDefinition.cs new file mode 100644 index 000000000..313b76aec --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatFunctionToolDefinition.cs @@ -0,0 +1,60 @@ +using System; +using System.Diagnostics.CodeAnalysis; + +namespace OpenAI.Chat; + +/// +/// Represents the definition of a function tool that is callable by the model for a chat completion request. +/// +public class ChatFunctionToolDefinition : ChatToolDefinition +{ + /// + /// The name of the function that the tool represents. + /// + public required string Name { get; set; } + /// + /// A friendly description of the function. This supplements in informing the model about when + /// it should call the function. + /// + public string Description { get; set; } + /// + /// The parameter information for the function, provided in JSON Schema format. + /// + /// + /// The method provides + /// an easy definition interface using the dynamic type: + /// + /// Parameters = BinaryData.FromObjectAsJson(new + /// { + /// type = "object", + /// properties = new + /// { + /// your_function_argument = new + /// { + /// type = "string", + /// description = "the description of your function argument" + /// } + /// }, + /// required = new[] { "your_function_argument" } + /// }) + /// + /// + public BinaryData Parameters { get; set; } + /// + /// Creates a new instance of . + /// + public ChatFunctionToolDefinition() { } + /// + /// Creates a new instance of . + /// + /// The name of the function. + /// The description of the function. + /// The parameters into the function, in JSON Schema format. + [SetsRequiredMembers] + public ChatFunctionToolDefinition(string name, string description = null, BinaryData parameters = null) + { + Name = name; + Description = description; + Parameters = parameters; + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatLogProbabilityCollection.cs b/.dotnet/src/Custom/Chat/ChatLogProbabilityCollection.cs new file mode 100644 index 000000000..bc63dbbd6 --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatLogProbabilityCollection.cs @@ -0,0 +1,43 @@ +using System.Collections.Generic; +using System.Collections.ObjectModel; + +namespace OpenAI.Chat; + +/// +/// Represents a collection of log probability result information as requested via +/// . +/// +public class ChatLogProbabilityCollection : ReadOnlyCollection +{ + internal ChatLogProbabilityCollection(IList list) : base(list) { } + internal static ChatLogProbabilityCollection FromInternalData( + Internal.Models.CreateChatCompletionResponseChoiceLogprobs internalLogprobs) + { + if (internalLogprobs == null) + { + return null; + } + List logProbabilities = []; + foreach (Internal.Models.ChatCompletionTokenLogprob internalLogprob in internalLogprobs.Content) + { + List alternateLogProbabilities = null; + if (internalLogprob.TopLogprobs != null) + { + alternateLogProbabilities = []; + foreach (Internal.Models.ChatCompletionTokenLogprobTopLogprob internalTopLogprob in internalLogprob.TopLogprobs) + { + alternateLogProbabilities.Add(new( + internalLogprob.Token, + internalLogprob.Logprob, + internalLogprob.Bytes)); + } + } + logProbabilities.Add(new( + internalLogprob.Token, + internalLogprob.Logprob, + internalLogprob.Bytes, + alternateLogProbabilities)); + } + return new ChatLogProbabilityCollection(logProbabilities); + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatLogProbabilityResult.cs b/.dotnet/src/Custom/Chat/ChatLogProbabilityResult.cs new file mode 100644 index 000000000..0520b3ecc --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatLogProbabilityResult.cs @@ -0,0 +1,43 @@ +namespace OpenAI.Chat; + +using System.Collections.Generic; +using System.Linq; + +/// +/// Represents a single token's log probability information, as requested via +/// . +/// +public class ChatLogProbabilityResult +{ + /// + /// The token for which this log probability information applies. + /// + public string Token { get; } + /// + /// The logprob for the token. + /// + public double LogProbability { get; } + /// + /// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where + /// characters are represented by multiple tokens and their byte representations must be combined to generate + /// the correct text representation. Can be null if there is no bytes representation for the token. + /// + public IReadOnlyList Utf8ByteValues { get; } + /// + /// List of the most likely tokens and their log probability at this token position. In rare cases, + /// there may be fewer than the number of requested top_logprobs returned, as supplied via + /// . + /// + public IReadOnlyList AlternateLogProbabilities { get; } + internal ChatLogProbabilityResult( + string token, + double logProbability, + IEnumerable byteValues, + IEnumerable alternateLogProbabilities) + { + Token = token; + LogProbability = logProbability; + Utf8ByteValues = byteValues.ToList(); + AlternateLogProbabilities = alternateLogProbabilities.ToList(); + } +} diff --git a/.dotnet/src/Custom/Chat/ChatLogProbabilityResultItem.cs b/.dotnet/src/Custom/Chat/ChatLogProbabilityResultItem.cs new file mode 100644 index 000000000..1bb2d45c1 --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatLogProbabilityResultItem.cs @@ -0,0 +1,42 @@ +using System.Collections.Generic; + +namespace OpenAI.Chat; + +/// +/// Represents a single item of log probability information as requested via +/// and +/// . +/// +public class ChatLogProbabilityResultItem +{ + /// + /// The token for which this log probability information applies. + /// + public string Token { get; } + /// + /// The logprob for the token. + /// + public double LogProbability { get; } + /// + /// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where + /// characters are represented by multiple tokens and their byte representations must be combined to generate + /// the correct text representation. Can be null if there is no bytes representation for the token. + /// + public IReadOnlyList Utf8ByteValues { get; } + /// + /// Creates a new instance of . + /// + protected ChatLogProbabilityResultItem() { } + /// + /// Creates a new instance of . + /// + /// The token represented by this item. + /// The logprob for the token. + /// The UTF8 byte value sequence representation for the token. + internal ChatLogProbabilityResultItem(string token, double logProbability, IEnumerable byteValues) + { + Token = token; + LogProbability = logProbability; + Utf8ByteValues = new List(byteValues); + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatMessageContent.cs b/.dotnet/src/Custom/Chat/ChatMessageContent.cs new file mode 100644 index 000000000..a39fbf184 --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatMessageContent.cs @@ -0,0 +1,112 @@ +using System; + +namespace OpenAI.Chat; + +/// +/// Represents the common base type for a piece of message content used for chat completions. +/// +public partial class ChatMessageContent +{ + /// + /// The type of message content data, e.g. text or image, that this instance + /// represents. + /// + public ChatMessageContentKind ContentKind { get; } + + private object _contentValue; + private string _contentMediaTypeName; + + internal ChatMessageContent(object value, ChatMessageContentKind kind, string contentMediaTypeName = null) + { + _contentValue = value; + ContentKind = kind; + _contentMediaTypeName = contentMediaTypeName; + } + + /// + /// Creates a new instance of that encapsulates text content. + /// + /// The content for the new instance. + /// A new instance of . + public static ChatMessageContent CreateText(string text) => new(text, ChatMessageContentKind.Text); + + /// + /// Creates a new instance of that encapsulates image content obtained from + /// an internet location that will be accessible to the model when evaluating a message with this content. + /// + /// + /// An internet location pointing to an image. This must be accessible to the model. + /// + /// A new instance of . + public static ChatMessageContent CreateImage(Uri imageUri) => new(imageUri, ChatMessageContentKind.Image); + + /// + /// Creates a new instance of that encapsulates binary image content. + /// + /// The binary representation of the image content. + /// The media type name, e.g. image/png, for the image. + /// A new instance of . + public static ChatMessageContent CreateImage(BinaryData imageBytes, string mediaType) + => new(imageBytes, ChatMessageContentKind.Image, mediaType); + + /// + /// Provides the associated with a content item using + /// . + /// + /// + /// will infer from the content type and `ChatMessageContent` known to be text can typically + /// be treated like a string without calling this explicitly. + /// + /// The content string for the text content item. + /// The content does not support a text representation. + public string ToText() + => ContentKind switch + { + ChatMessageContentKind.Text => _contentValue?.ToString(), + _ => throw new InvalidOperationException( + $"{nameof(ToText)} conversion not supported for content kind: {ContentKind}"), + }; + + /// + /// Provides a associated with a content item. These URIs can refer to an internet location + /// accessible to the target model or can be base64-encoded data URIs. + /// + /// A URI representation of the content item. + /// The content does not support a URI representation. + public Uri ToUri() + => ContentKind switch + { + ChatMessageContentKind.Image => _contentValue switch + { + Uri imageUri => imageUri, + BinaryData imageData => new Uri($"data:{_contentMediaTypeName};base64,{Convert.ToBase64String(imageData.ToArray())}"), + _ => throw new InvalidOperationException( + $"Cannot convert underlying image data type '{_contentValue?.GetType()}' to a {nameof(Uri)}"), + }, + _ => throw new InvalidOperationException( + $"{nameof(ToText)} conversion not supported for content kind: {ContentKind}"), + }; + + /// + /// The implicit conversion operator that infers an equivalent instance from + /// a plain . + /// + /// The text for the message content. + public static implicit operator ChatMessageContent(string value) => CreateText(value); + + /// + /// An implicit operator allowing a content item to be treated as a string. + /// + /// + public static implicit operator string(ChatMessageContent content) => content.ToText(); + + /// + public override string ToString() + { + if (ContentKind == ChatMessageContentKind.Text) + { + return ToText(); + } + return base.ToString(); + } +} diff --git a/.dotnet/src/Custom/Chat/ChatMessageContentKind.cs b/.dotnet/src/Custom/Chat/ChatMessageContentKind.cs new file mode 100644 index 000000000..7fafc8889 --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatMessageContentKind.cs @@ -0,0 +1,19 @@ +namespace OpenAI.Chat; + +/// +/// Represents the possibles of underlying data for a chat message's content property. +/// +public enum ChatMessageContentKind +{ + /// + /// Plain text content, represented as a . + /// + Text, + /// + /// Image content, as used exclusively by gpt-4-vision-preview when providing an array of content items + /// into a chat completion request. + /// + Image, + // Audio, + // Video, +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatRequestAssistantMessage.cs b/.dotnet/src/Custom/Chat/ChatRequestAssistantMessage.cs new file mode 100644 index 000000000..ecfba14c7 --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatRequestAssistantMessage.cs @@ -0,0 +1,129 @@ +using System; +using System.ClientModel.Internal; + +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI.ClientShared.Internal; + +namespace OpenAI.Chat; + +/// +/// Represents a chat message of the assistant role as supplied to a chat completion request. As assistant +/// messages are originated by the model on responses, instances typically +/// represent chat history or example interactions to guide model behavior. +/// +public class ChatRequestAssistantMessage : ChatRequestMessage +{ + /// + /// An optional name associated with the assistant message. This is typically defined with a system + /// message and is used to differentiate between multiple participants of the same role. + /// + public string Name { get; set; } + /// + /// The tool_calls furnished by the model that are needed to continue the logical conversation across chat + /// completion requests. A instance corresponds to a supplied + /// instance and is resolved by providing a + /// that correlates via id to the item in tool_calls. + /// + public IReadOnlyList ToolCalls { get; } + /// + /// Deprecated in favor of tool_calls. + /// + /// The function_call furnished by the model that is needed to continue the logical conversation + /// across chat completion requests. A instance corresponds to a supplied + /// instance and is resolved by providing a + /// that correlates via name to the function_call. + /// + /// + public ChatFunctionCall FunctionCall { get; } + + // Assistant messages may present ONE OF: + // - Ordinary text content without tools or a function, in which case the content is required; + // - A list of tool calls, together with optional text content + // - A function call, together with optional text content + + /// + /// Creates a new instance of that represents ordinary text content and + /// does not feature tool or function calls. + /// + /// The text content of the message. + public ChatRequestAssistantMessage(string content) + : base(ChatRole.Assistant, content) + { } + + /// + /// Creates a new instance of that represents tool_calls that + /// were provided by the model. + /// + /// The tool_calls made by the model. + /// Optional text content associated with the message. + public ChatRequestAssistantMessage(IEnumerable toolCalls, string content = null) + : base(ChatRole.Assistant, content) + { + ToolCalls = new List(toolCalls); + } + + /// + /// Creates a new instance of that represents a function_call + /// (deprecated in favor of tool_calls) that was made by the model. + /// + /// The function_call made by the model. + /// Optional text content associated with the message. + public ChatRequestAssistantMessage(ChatFunctionCall functionCall, string content = null) + : base(ChatRole.Assistant, content) + { + FunctionCall = functionCall; + } + + /// + /// Creates a new instance of from a with + /// an assistant role response. + /// + /// + /// This constructor will copy the content, tool_calls, and function_call from a chat + /// completion response into a new assistant role request message. + /// + /// + /// The from which the conversation history request message should be created. + /// + /// + /// The role of the provided chat completion response was not . + /// + public ChatRequestAssistantMessage(ChatCompletion chatCompletion) + : base(ChatRole.Assistant, chatCompletion?.Content) + { + if (chatCompletion?.Role != ChatRole.Assistant) + { + throw new ArgumentException( + $"Can't instantiate a {nameof(ChatRequestAssistantMessage)} from a chat completion" + + $" with finish reason: {chatCompletion?.FinishReason}"); + } + ToolCalls = chatCompletion.ToolCalls; + FunctionCall = chatCompletion.FunctionCall; + } + + internal override void WriteDerivedAdditions(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + if (OptionalProperty.IsDefined(Name)) + { + writer.WriteString("name"u8, Name); + } + if (OptionalProperty.IsCollectionDefined(ToolCalls)) + { + writer.WritePropertyName("tool_calls"u8); + writer.WriteStartArray(); + foreach (ChatToolCall toolCall in ToolCalls) + { + (toolCall as IJsonModel).Write(writer, options); + } + writer.WriteEndArray(); + } + if (OptionalProperty.IsDefined(FunctionCall)) + { + writer.WritePropertyName("function_call"u8); + (FunctionCall as IJsonModel).Write(writer, options); + } + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatRequestFunctionMessage.cs b/.dotnet/src/Custom/Chat/ChatRequestFunctionMessage.cs new file mode 100644 index 000000000..f1190771e --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatRequestFunctionMessage.cs @@ -0,0 +1,40 @@ +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Chat; + +/// +/// Represents a chat message of the function role as provided to a chat completion request. A function message +/// resolves a prior function_call received from the model and correlates to both a supplied +/// instance as well as a made by the model on an +/// assistant response message. +/// +public class ChatRequestFunctionMessage : ChatRequestMessage +{ + /// + /// The name of the called function that this message provides information from. + /// + public string FunctionName { get; set; } // JSON "name" + + /// + /// Creates a new instance of . + /// + /// + /// The name of the called function that this message provides information from. + /// + /// + /// The textual content that represents the output or result from the called function. There is no format + /// restriction (e.g. JSON) imposed on this content. + /// + public ChatRequestFunctionMessage(string functionName, string content) + : base(ChatRole.Function, content) + { + FunctionName = functionName; + } + + internal override void WriteDerivedAdditions(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteString("name"u8, FunctionName); + } +} diff --git a/.dotnet/src/Custom/Chat/ChatRequestMessage.Serialization.cs b/.dotnet/src/Custom/Chat/ChatRequestMessage.Serialization.cs new file mode 100644 index 000000000..0cf026fd9 --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatRequestMessage.Serialization.cs @@ -0,0 +1,94 @@ +using System; +using System.ClientModel.Internal; + +using System.ClientModel; +using System.ClientModel.Primitives; +using System.ComponentModel.Design; +using System.Text.Json; +using OpenAI.ClientShared.Internal; + +namespace OpenAI.Chat; + +public abstract partial class ChatRequestMessage : IJsonModel +{ + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteStartObject(); + writer.WriteString("role"u8, Role switch + { + ChatRole.System => "system", + ChatRole.User => "user", + ChatRole.Assistant => "assistant", + ChatRole.Tool => "tool", + ChatRole.Function => "function", + _ => throw new ArgumentException(nameof(Role)) + }); + if (OptionalProperty.IsDefined(Content)) + { + writer.WritePropertyName("content"u8); + if (Content.Span.Length == 0) + { + writer.WriteNullValue(); + } + else if (Content.Span.Length == 1) + { + if (Content.Span[0].ContentKind == ChatMessageContentKind.Text) + { + writer.WriteStringValue(Content.Span[0].ToText()); + } + else + { + throw new InvalidOperationException(); + } + } + else if (Content.Span.Length > 1) + { + writer.WriteStartArray(); + foreach (ChatMessageContent contentItem in Content.Span) + { + writer.WriteStartObject(); + if (contentItem.ContentKind == ChatMessageContentKind.Text) + { + writer.WriteString("type"u8, "text"u8); + writer.WriteString("text"u8, contentItem.ToText()); + } + else if (contentItem.ContentKind == ChatMessageContentKind.Image) + { + writer.WriteString("type"u8, "image_url"u8); + writer.WritePropertyName("image_url"u8); + writer.WriteStartObject(); + writer.WriteString("url"u8, contentItem.ToUri().AbsoluteUri); + writer.WriteEndObject(); + } + else + { + throw new InvalidOperationException(); + } + writer.WriteEndObject(); + } + writer.WriteEndArray(); + } + } + WriteDerivedAdditions(writer, options); + writer.WriteEndObject(); + } + + ChatRequestMessage IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + throw new NotImplementedException(); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + throw new NotImplementedException(); + } + + ChatRequestMessage IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + throw new NotImplementedException(); + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + internal abstract void WriteDerivedAdditions(Utf8JsonWriter writer, ModelReaderWriterOptions options); +} diff --git a/.dotnet/src/Custom/Chat/ChatRequestMessage.cs b/.dotnet/src/Custom/Chat/ChatRequestMessage.cs new file mode 100644 index 000000000..38444eb4e --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatRequestMessage.cs @@ -0,0 +1,108 @@ +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections; +using System.Collections.Generic; + +namespace OpenAI.Chat; + +/// +/// A common, base representation of a message provided as input into a chat completion request. +/// +/// +/// +/// +/// Type - +/// Role - +/// Description +/// +/// +/// - +/// system - +/// Instructions to the model that guide the behavior of future assistant messages. +/// +/// +/// - +/// user - +/// Input messages from the caller, typically paired with assistant messages in a conversation. +/// +/// +/// - +/// assistant - +/// +/// Output messages from the model with responses to the user or calls to tools or functions that are +/// needed to continue the logical conversation. +/// +/// +/// +/// - +/// tool - +/// +/// Resolution information for a in an earlier +/// that was made against a supplied +/// . +/// +/// +/// +/// - +/// function - +/// +/// Resolution information for a in an earlier +/// that was made against a supplied +/// . Note that functions are deprecated in favor of +/// tool_calls. +/// +/// +/// +/// +public abstract partial class ChatRequestMessage +{ + /// + /// The role associated with the message. + /// + public ChatRole Role { get; } + + /// + /// The content associated with the message. The interpretation of this content will vary depending on the message type. + /// + public ReadOnlyMemory Content => _contentItems.AsMemory(); + private readonly ChatMessageContent[] _contentItems; + + internal ChatRequestMessage(ChatRole role, ChatMessageContent content) + : this(role, [content]) + { } + + internal ChatRequestMessage(ChatRole role, ChatMessageContent[] content) + { + Role = role; + _contentItems = content; + } + + /// + public static ChatRequestSystemMessage CreateSystemMessage(string content) + => new ChatRequestSystemMessage(content); + + /// + public static ChatRequestUserMessage CreateUserMessage(string content) + => new ChatRequestUserMessage(content); + + /// + public static ChatRequestUserMessage CreateUserMessage(IEnumerable contentItems) + => new ChatRequestUserMessage(contentItems); + + /// + public static ChatRequestUserMessage CreateUserMessage(params ChatMessageContent[] contentItems) + => new ChatRequestUserMessage(contentItems); + + /// + public static ChatRequestAssistantMessage CreateAssistantMessage(string content) + => new ChatRequestAssistantMessage(content); + + /// + public static ChatRequestToolMessage CreateToolMessage(string toolCallId, string content) + => new ChatRequestToolMessage(toolCallId, content); + + /// + public static ChatRequestFunctionMessage CreateFunctionMessage(string toolCallId, string content) + => new ChatRequestFunctionMessage(toolCallId, content); +} diff --git a/.dotnet/src/Custom/Chat/ChatRequestSystemMessage.cs b/.dotnet/src/Custom/Chat/ChatRequestSystemMessage.cs new file mode 100644 index 000000000..d391aad3b --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatRequestSystemMessage.cs @@ -0,0 +1,39 @@ +using System; +using System.ClientModel.Internal; + +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Dynamic; +using System.Runtime.InteropServices; +using System.Text.Json; +using OpenAI.ClientShared.Internal; + +namespace OpenAI.Chat; + +/// +/// Represents a chat message of the system role as supplied to a chat completion request. A system message is +/// generally supplied as the first message to a chat completion request and guides the model's behavior across future +/// assistant role response messages. These messages may help control behavior, style, tone, and +/// restrictions for a model-based assistant. +/// +public class ChatRequestSystemMessage : ChatRequestMessage +{ + /// + /// An optional name for the participant. + /// + public string Name { get; set; } // JSON "name" + + /// + /// Creates a new instance of . + /// + /// The system message text that guides the model's behavior. + public ChatRequestSystemMessage(string content) : base(ChatRole.System, content) { } + + internal override void WriteDerivedAdditions(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + if (OptionalProperty.IsDefined(Name)) + { + writer.WriteString("name"u8, Name); + } + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatRequestToolMessage.cs b/.dotnet/src/Custom/Chat/ChatRequestToolMessage.cs new file mode 100644 index 000000000..7ec17eea5 --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatRequestToolMessage.cs @@ -0,0 +1,53 @@ +using System; +using System.ClientModel.Internal; + +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Chat; + +/// +/// Represents a chat message of the tool role as supplied to a chat completion request. A tool message +/// encapsulates a resolution of a made by the model. The typical interaction flow featuring +/// tool messages is: +/// +/// A provides a on a request; +/// +/// Based on the name and description information of provided tools, the model responds with one or +/// more instances that need to be resolved to continue the logical conversation; +/// +/// +/// For each , the matching tool is invoked and its output is supplied back to the model +/// via a to resolve the tool call and allow the logical conversation to +/// continue. +/// +/// +/// +public class ChatRequestToolMessage : ChatRequestMessage +{ + /// + /// The id correlating to the prior made by the model. + /// + public string ToolCallId { get; set; } + + /// + /// Creates a new instance of . + /// + /// The id correlating to a made by the model. + /// + /// The textual content, produced by the defined tool in response to the correlated , + /// that resolves the tool call and allows the logical conversation to continue. No format restrictions (e.g. + /// JSON) are imposed on the content emitted by tools. + /// + public ChatRequestToolMessage(string toolCallId, string content) + : base(ChatRole.Tool, content) + { + ToolCallId = toolCallId; + } + + internal override void WriteDerivedAdditions(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteString("tool_call_id"u8, ToolCallId); + } +} diff --git a/.dotnet/src/Custom/Chat/ChatRequestUserMessage.cs b/.dotnet/src/Custom/Chat/ChatRequestUserMessage.cs new file mode 100644 index 000000000..f1e7a3588 --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatRequestUserMessage.cs @@ -0,0 +1,65 @@ +using System.ClientModel.Internal; + +using System; +using System.Collections.Generic; +using System.Dynamic; +using System.Text.Json; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Linq; +using OpenAI.ClientShared.Internal; + +namespace OpenAI.Chat; + +/// +/// Represents a chat message of the user role as supplied to a chat completion request. A user message contains +/// information originating from the caller and serves as a prompt for the model to complete. User messages may result +/// in either direct assistant message responses or in calls to supplied tools or functions. +/// +public class ChatRequestUserMessage : ChatRequestMessage +{ + /// + /// An optional name for the participant. + /// + public string Name { get; set; } + + /// + /// Creates a new instance of with ordinary text content. + /// + /// The textual content associated with the message. + public ChatRequestUserMessage(string content) + : base(ChatRole.User, ChatMessageContent.CreateText(content)) + { } + + /// + /// Creates a new instance of using a collection of content items that can + /// include text and image information. This content format is currently only applicable to the + /// gpt-4-vision-preview model and will not be accepted by other models. + /// + /// + /// The collection of text and image content items associated with the message. + /// + public ChatRequestUserMessage(IEnumerable contentItems) + : base(ChatRole.User, contentItems.ToArray()) + { } + + /// + /// Creates a new instance of using a collection of content items that can + /// include text and image information. This content format is currently only applicable to the + /// gpt-4-vision-preview model and will not be accepted by other models. + /// + /// + /// The collection of text and image content items associated with the message. + /// + public ChatRequestUserMessage(params ChatMessageContent[] contentItems) + : this(contentItems as IEnumerable) + { } + + internal override void WriteDerivedAdditions(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + if (OptionalProperty.IsDefined(Name)) + { + writer.WriteString("name"u8, Name); + } + } +} diff --git a/.dotnet/src/Custom/Chat/ChatResponseFormat.cs b/.dotnet/src/Custom/Chat/ChatResponseFormat.cs new file mode 100644 index 000000000..b7d539655 --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatResponseFormat.cs @@ -0,0 +1,44 @@ +using System; + +namespace OpenAI.Chat; + +/// +/// Represents a requested response_format for the model to use, enabling "JSON mode" for guaranteed valid output. +/// +/// +/// Important: when using JSON mode, the model must also be instructed to produce JSON via a +/// system or user message. +/// +/// Without this paired, message-based accompaniment, the model may generate an unending stream of whitespace until the +/// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. +/// +/// +/// Also note that the message content may be partially cut off if finish_reason is length, which +/// indicates that the generation exceeded max_tokens or the conversation exceeded the max context length for +/// the model. +/// +/// +public enum ChatResponseFormat +{ + /// + /// Specifies that the model should provide plain, textual output. + /// + Text, + /// + /// Specifies that the model should enable "JSON mode" and better guarantee the emission of valid JSON. + /// + /// + /// Important: when using JSON mode, the model must also be instructed to produce JSON via a + /// system or user message. + /// + /// Without this paired, message-based accompaniment, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. + /// + /// + /// Also note that the message content may be partially cut off if finish_reason is length, which + /// indicates that the generation exceeded max_tokens or the conversation exceeded the max context length for + /// the model. + /// + /// + JsonObject, +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatRole.cs b/.dotnet/src/Custom/Chat/ChatRole.cs new file mode 100644 index 000000000..724baa74f --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatRole.cs @@ -0,0 +1,85 @@ +namespace OpenAI.Chat; + +/// +/// Represents the role of a chat completion message. +/// +/// +/// +/// +/// Type - +/// Role - +/// Description +/// +/// +/// - +/// system - +/// Instructions to the model that guide the behavior of future assistant messages. +/// +/// +/// - +/// user - +/// Input messages from the caller, typically paired with assistant messages in a conversation. +/// +/// +/// - +/// assistant - +/// +/// Output messages from the model with responses to the user or calls to tools or functions that are +/// needed to continue the logical conversation. +/// +/// +/// +/// - +/// tool - +/// +/// Resolution information for a in an earlier +/// that was made against a supplied +/// . +/// +/// +/// +/// - +/// function - +/// +/// Resolution information for a in an earlier +/// that was made against a supplied +/// . Note that functions are deprecated in favor of +/// tool_calls. +/// +/// +/// +/// +public enum ChatRole +{ + /// + /// The system role, which provides instructions to the model that guide the behavior of future + /// assistant messages + /// + System, + /// + /// The assistant role that provides output from the model that either issues completions in response to + /// user messages or calls provided tools or functions. + /// + Assistant, + /// + /// The user role that provides input from the caller as a prompt for model responses. + /// + User, + /// + /// The tool role that provides resolving information to prior tool_calls made by the model against + /// supplied tools. + /// + Tool, + /// + /// + /// The function role that provides resolving information to a prior function_call made by the model + /// against a definition supplied in functions. + /// + /// + /// + /// functions are deprecated in favor of tools and supplying tools will result in + /// tool_calls that must be resolved via the tool role rather than a function_call resolved + /// by a function role message. + /// + Function, +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatTokenUsage.cs b/.dotnet/src/Custom/Chat/ChatTokenUsage.cs new file mode 100644 index 000000000..699af8453 --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatTokenUsage.cs @@ -0,0 +1,21 @@ +namespace OpenAI.Chat; + +/// +/// Represents computed token consumption statistics for a chat completion request. +/// +public class ChatTokenUsage +{ + /// + public long InputTokens { get; } + /// + public long OutputTokens { get; } + /// + public long TotalTokens { get; } + + internal ChatTokenUsage(Internal.Models.CompletionUsage internalUsage) + { + InputTokens = internalUsage.PromptTokens; + OutputTokens = internalUsage.CompletionTokens; + TotalTokens = internalUsage.TotalTokens; + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatToolCall.Serialization.cs b/.dotnet/src/Custom/Chat/ChatToolCall.Serialization.cs new file mode 100644 index 000000000..eff6eec3c --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatToolCall.Serialization.cs @@ -0,0 +1,38 @@ +using System; +using System.ClientModel.Internal; + +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Chat; + +public abstract partial class ChatToolCall : IJsonModel +{ + ChatToolCall IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + throw new NotImplementedException(); + } + + ChatToolCall IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + throw new NotImplementedException(); + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteStartObject(); + writer.WriteString("id"u8, Id); + WriteDerivedAdditions(writer, options); + writer.WriteEndObject(); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + throw new NotImplementedException(); + } + + internal abstract void WriteDerivedAdditions(Utf8JsonWriter writer, ModelReaderWriterOptions options); +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatToolCall.cs b/.dotnet/src/Custom/Chat/ChatToolCall.cs new file mode 100644 index 000000000..9203f4b01 --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatToolCall.cs @@ -0,0 +1,15 @@ +namespace OpenAI.Chat; + +/// +/// A base representation of an item in an assistant role response's tool_calls that specifies +/// parameterized resolution against a previously defined tool that is needed for the model to continue the logical +/// conversation. +/// +public abstract partial class ChatToolCall +{ + /// + /// A unique identifier associated with the tool call, used in a subsequent to + /// resolve the tool call and continue the logical conversation. + /// + public required string Id { get; set; } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatToolConstraint.cs b/.dotnet/src/Custom/Chat/ChatToolConstraint.cs new file mode 100644 index 000000000..15622b5be --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatToolConstraint.cs @@ -0,0 +1,87 @@ +using System; + +namespace OpenAI.Chat; + +/// +/// Represents tool_choice, the desired manner in which the model should use the tools defined in a +/// chat completion request. +/// +public readonly struct ChatToolConstraint : IEquatable +{ + private enum ToolConstraintKind + { + Predefined, + Function, + } + + private readonly ToolConstraintKind _constraintKind; + private readonly BinaryData _serializableData; + + /// + /// Creates a new instance of which requests that the model restricts its behavior + /// to calling the specified tool. + /// + /// The definition of the tool that the model should call. + /// + /// tool_choice uses the name of a tool of the function type as the correlation field, so + /// instantiating a new instance of with the desired name is + /// sufficient if the matching instance is not available. + /// + public ChatToolConstraint(ChatToolDefinition toolDefinition) + { + if (toolDefinition is ChatFunctionToolDefinition functionToolDefinition) + { + _constraintKind = ToolConstraintKind.Function; + _serializableData = BinaryData.FromObjectAsJson(new + { + type = "function", + function = new + { + name = functionToolDefinition.Name, + } + }); + } + else + { + throw new ArgumentException( + $"Unsupported {nameof(toolDefinition)} type for 'tool_choice' constraint: {toolDefinition.GetType()}"); + } + } + + internal ChatToolConstraint(string predefinedLabel) + { + _constraintKind = ToolConstraintKind.Predefined; + _serializableData = BinaryData.FromString($@"""{predefinedLabel}"""); + } + + /// + /// auto specifies that the model should freely call any combination of the provided tools, including + /// the option to not invoke any tools and issue an ordinary response. + /// + public static ChatToolConstraint Auto { get; } = new("auto"); + /// + /// none specifies that the model should not invoke any of the provided tools and instead force an + /// ordinary assistant response. Note that provided tool definitions may still influence the behavior of + /// chat completions even when tools are not called. + /// + public static ChatToolConstraint None { get; } = new("none"); + /// + public static bool operator ==(ChatToolConstraint left, ChatToolConstraint right) + => left._serializableData?.ToString() == right._serializableData?.ToString(); + /// + public static bool operator !=(ChatToolConstraint left, ChatToolConstraint right) + => left._serializableData?.ToString() != right._serializableData?.ToString(); + /// + public bool Equals(ChatToolConstraint other) + => (_serializableData == null && other._serializableData == null) + || (_serializableData.ToString().Equals(other._serializableData.ToString())); + /// + public override string ToString() => _serializableData?.ToString(); + /// + public override bool Equals(object obj) + => obj is ChatToolConstraint constraint && constraint.Equals(this); + /// + public override int GetHashCode() => $"{_serializableData?.ToString()}".GetHashCode(); + + internal BinaryData GetBinaryData() => _serializableData; +} diff --git a/.dotnet/src/Custom/Chat/ChatToolDefinition.cs b/.dotnet/src/Custom/Chat/ChatToolDefinition.cs new file mode 100644 index 000000000..d116ac460 --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatToolDefinition.cs @@ -0,0 +1,11 @@ +namespace OpenAI.Chat; + +/// +/// A base representation of a tool supplied to a chat completion request. Tools inform the model about additional, +/// caller-provided behaviors that can be invoked to provide prompt enrichment or custom actions. +/// +/// +/// Chat completion currently supports function tools via . +/// +public abstract class ChatToolDefinition +{ } diff --git a/.dotnet/src/Custom/Chat/StreamingChatUpdate.cs b/.dotnet/src/Custom/Chat/StreamingChatUpdate.cs new file mode 100644 index 000000000..c1540897b --- /dev/null +++ b/.dotnet/src/Custom/Chat/StreamingChatUpdate.cs @@ -0,0 +1,336 @@ +namespace OpenAI.Chat; + +using System; +using System.Collections.Generic; +using System.Text.Json; + +/// +/// Represents an incremental item of new data in a streaming response to a chat completion request. +/// +public partial class StreamingChatUpdate +{ + /// + /// Gets a unique identifier associated with this streamed Chat Completions response. + /// + /// + /// + /// Corresponds to $.id in the underlying REST schema. + /// + /// When using Azure OpenAI, note that the values of and may not be + /// populated until the first containing role, content, or + /// function information. + /// + public string Id { get; } + + /// + /// Gets the first timestamp associated with generation activity for this completions response, + /// represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970. + /// + /// + /// + /// Corresponds to $.created in the underlying REST schema. + /// + /// When using Azure OpenAI, note that the values of and may not be + /// populated until the first containing role, content, or + /// function information. + /// + public DateTimeOffset? Created { get; } + + /// + /// Gets the associated with this update. + /// + /// + /// + /// Corresponds to e.g. $.choices[0].delta.role in the underlying REST schema. + /// + /// assignment typically occurs in a single update across a streamed Chat Completions + /// choice and the value should be considered to be persist for all subsequent updates without a + /// that bear the same . + /// + public ChatRole? Role { get; } + + /// + /// Gets the content fragment associated with this update. + /// + /// + /// + /// Corresponds to e.g. $.choices[0].delta.content in the underlying REST schema. + /// + /// Each update contains only a small number of tokens. When presenting or reconstituting a full, streamed + /// response, all values for the same should be + /// combined. + /// + public string ContentUpdate { get; } + + /// + /// Gets the name of a function to be called. + /// + /// + /// Corresponds to e.g. $.choices[0].delta.function_call.name in the underlying REST schema. + /// + public string FunctionName { get; } + + /// + /// Gets a function arguments fragment associated with this update. + /// + /// + /// + /// Corresponds to e.g. $.choices[0].delta.function_call.arguments in the underlying REST schema. + /// + /// + /// + /// Each update contains only a small number of tokens. When presenting or reconstituting a full, streamed + /// arguments body, all values for the same + /// should be combined. + /// + /// + /// + /// As is the case for non-streaming , the content provided for function + /// arguments is not guaranteed to be well-formed JSON or to contain expected data. Callers should validate + /// function arguments before using them. + /// + /// + public string FunctionArgumentsUpdate { get; } + + /// + /// An incremental update payload for a tool call that is part of this response. + /// + /// + /// + /// Corresponds to e.g. $.choices[0].delta.tool_calls[0].index in the REST API schema. + /// + /// + /// To differentiate between parallel streaming tool calls within a single streaming choice, use the value of the + /// property. + /// + /// + /// Please note is the base class. According to the scenario, a derived class + /// of the base class might need to be assigned here, or this property needs to be casted to one of the possible + /// derived classes. + /// The available derived classes include: . + /// + /// + public StreamingToolCallUpdate ToolCallUpdate { get; } + + /// + /// Gets the associated with this update. + /// + /// + /// + /// Corresponds to e.g. $.choices[0].finish_reason in the underlying REST schema. + /// + /// + /// assignment typically appears in the final streamed update message associated + /// with a choice. + /// + /// + public ChatFinishReason? FinishReason { get; } + + /// + /// Gets the choice index associated with this streamed update. + /// + /// + /// + /// Corresponds to e.g. $.choices[0].index in the underlying REST schema. + /// + /// + /// Unless a value greater than 1 was provided as the choiceCount to + /// , + /// only one choice will be generated. In that case, this value will always be 0 and may not need to be considered. + /// + /// + /// When a value greater than 1 to that choiceCount is provided, this index represents + /// which logical choice the information is associated with. In the event + /// that a single underlying server-sent event contains multiple choices, multiple instances of + /// will be created. + /// + /// + public int? ChoiceIndex { get; } + + /// + public string SystemFingerprint { get; } + + /// + /// The log probability information for choices in the chat completion response, as requested via + /// . + /// + public ChatLogProbabilityCollection LogProbabilities { get; } + + internal StreamingChatUpdate( + string id, + DateTimeOffset created, + string systemFingerprint = null, + int? choiceIndex = null, + ChatRole? role = null, + string contentUpdate = null, + ChatFinishReason? finishReason = null, + string functionName = null, + string functionArgumentsUpdate = null, + StreamingToolCallUpdate toolCallUpdate = null, + ChatLogProbabilityCollection logProbabilities = null) + { + Id = id; + Created = created; + SystemFingerprint = systemFingerprint; + ChoiceIndex = choiceIndex; + Role = role; + ContentUpdate = contentUpdate; + FinishReason = finishReason; + FunctionName = functionName; + FunctionArgumentsUpdate = functionArgumentsUpdate; + ToolCallUpdate = toolCallUpdate; + LogProbabilities = logProbabilities; + } + + internal static List DeserializeStreamingChatUpdates(JsonElement element) + { + List results = []; + if (element.ValueKind == JsonValueKind.Null) + { + return results; + } + string id = default; + DateTimeOffset created = default; + string systemFingerprint = null; + foreach (JsonProperty property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("created"u8)) + { + created = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("system_fingerprint")) + { + systemFingerprint = property.Value.GetString(); + continue; + } + if (property.NameEquals("choices"u8)) + { + foreach (JsonElement choiceElement in property.Value.EnumerateArray()) + { + ChatRole? role = null; + string contentUpdate = null; + string functionName = null; + string functionArgumentsUpdate = null; + int choiceIndex = 0; + ChatFinishReason? finishReason = null; + List toolCallUpdates = []; + ChatLogProbabilityCollection logProbabilities = null; + + foreach (JsonProperty choiceProperty in choiceElement.EnumerateObject()) + { + if (choiceProperty.NameEquals("index"u8)) + { + choiceIndex = choiceProperty.Value.GetInt32(); + continue; + } + if (choiceProperty.NameEquals("finish_reason"u8)) + { + if (choiceProperty.Value.ValueKind == JsonValueKind.Null) + { + finishReason = null; + continue; + } + finishReason = choiceProperty.Value.GetString() switch + { + "stop" => ChatFinishReason.Stopped, + "length" => ChatFinishReason.Length, + "tool_calls" => ChatFinishReason.ToolCalls, + "function_call" => ChatFinishReason.FunctionCall, + "content_filter" => ChatFinishReason.ContentFilter, + _ => throw new ArgumentException(nameof(finishReason)), + }; + continue; + } + if (choiceProperty.NameEquals("delta"u8)) + { + foreach (JsonProperty deltaProperty in choiceProperty.Value.EnumerateObject()) + { + if (deltaProperty.NameEquals("role"u8)) + { + role = deltaProperty.Value.GetString() switch + { + "system" => ChatRole.System, + "user" => ChatRole.User, + "assistant" => ChatRole.Assistant, + "tool" => ChatRole.Tool, + "function" => ChatRole.Function, + _ => throw new ArgumentException(nameof(role)), + }; + continue; + } + if (deltaProperty.NameEquals("content"u8)) + { + contentUpdate = deltaProperty.Value.GetString(); + continue; + } + if (deltaProperty.NameEquals("function_call"u8)) + { + foreach (JsonProperty functionProperty in deltaProperty.Value.EnumerateObject()) + { + if (functionProperty.NameEquals("name"u8)) + { + functionName = functionProperty.Value.GetString(); + continue; + } + if (functionProperty.NameEquals("arguments"u8)) + { + functionArgumentsUpdate = functionProperty.Value.GetString(); + } + } + } + if (deltaProperty.NameEquals("tool_calls")) + { + foreach (JsonElement toolCallElement in deltaProperty.Value.EnumerateArray()) + { + toolCallUpdates.Add( + StreamingToolCallUpdate.DeserializeStreamingToolCallUpdate(toolCallElement)); + } + } + } + } + if (choiceProperty.NameEquals("logprobs"u8)) + { + Internal.Models.CreateChatCompletionResponseChoiceLogprobs internalLogprobs + = Internal.Models.CreateChatCompletionResponseChoiceLogprobs.DeserializeCreateChatCompletionResponseChoiceLogprobs( + choiceProperty.Value); + logProbabilities = ChatLogProbabilityCollection.FromInternalData(internalLogprobs); + } + } + // In the unlikely event that more than one tool call arrives on a single chunk, we'll generate + // separate updates just like for choices. Adding a "null" if empty lets us avoid a separate loop. + if (toolCallUpdates.Count == 0) + { + toolCallUpdates.Add(null); + } + foreach (StreamingToolCallUpdate toolCallUpdate in toolCallUpdates) + { + results.Add(new StreamingChatUpdate( + id, + created, + systemFingerprint, + choiceIndex, + role, + contentUpdate, + finishReason, + functionName, + functionArgumentsUpdate, + toolCallUpdate, + logProbabilities)); + } + } + continue; + } + } + if (results.Count == 0) + { + results.Add(new StreamingChatUpdate(id, created, systemFingerprint)); + } + return results; + } +} diff --git a/.dotnet/src/Custom/Chat/StreamingFunctionToolCallUpdate.cs b/.dotnet/src/Custom/Chat/StreamingFunctionToolCallUpdate.cs new file mode 100644 index 000000000..bbae5e5c4 --- /dev/null +++ b/.dotnet/src/Custom/Chat/StreamingFunctionToolCallUpdate.cs @@ -0,0 +1,90 @@ +namespace OpenAI.Chat; +using System.Text.Json; + +/// +/// Represents an incremental update to a streaming function tool call that is part of a streaming chat completions +/// choice. +/// +public partial class StreamingFunctionToolCallUpdate : StreamingToolCallUpdate +{ + /// + /// The name of the function requested by the tool call. + /// + /// + /// + /// Corresponds to e.g. $.choices[0].delta.tool_calls[0].function.name in the REST API schema. + /// + /// + /// For a streaming function tool call, this name will appear in a single streaming update payload, typically the + /// first. Use the property to differentiate between multiple, + /// parallel tool calls when streaming. + /// + /// + public string Name { get; } + + /// + /// The next new segment of the function arguments for the function tool called by a streaming tool call. + /// These must be accumulated for the complete contents of the function arguments. + /// + /// + /// + /// Corresponds to e.g. $.choices[0].delta.tool_calls[0].function.arguments in the REST API schema. + /// + /// Note that the model does not always generate valid JSON and may hallucinate parameters + /// not defined by your function schema. Validate the arguments in your code before calling + /// your function. + /// + public string ArgumentsUpdate { get; } + + internal StreamingFunctionToolCallUpdate( + string id, + int toolCallIndex, + string functionName, + string functionArgumentsUpdate) + : base("function", id, toolCallIndex) + { + Name = functionName; + ArgumentsUpdate = functionArgumentsUpdate; + } + + internal static StreamingFunctionToolCallUpdate DeserializeStreamingFunctionToolCallUpdate(JsonElement element) + { + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + + string id = null; + int toolCallIndex = 0; + string functionName = null; + string functionArgumentsUpdate = null; + + foreach (JsonProperty property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + } + if (property.NameEquals("index"u8)) + { + toolCallIndex = property.Value.GetInt32(); + } + if (property.NameEquals("function"u8)) + { + foreach (JsonProperty functionProperty in property.Value.EnumerateObject()) + { + if (functionProperty.NameEquals("name"u8)) + { + functionName = functionProperty.Value.GetString(); + } + if (functionProperty.NameEquals("arguments"u8)) + { + functionArgumentsUpdate = functionProperty.Value.GetString(); + } + } + } + } + + return new StreamingFunctionToolCallUpdate(id, toolCallIndex, functionName, functionArgumentsUpdate); + } +} diff --git a/.dotnet/src/Custom/Chat/StreamingToolCallUpdate.cs b/.dotnet/src/Custom/Chat/StreamingToolCallUpdate.cs new file mode 100644 index 000000000..ce9fc9b56 --- /dev/null +++ b/.dotnet/src/Custom/Chat/StreamingToolCallUpdate.cs @@ -0,0 +1,97 @@ +namespace OpenAI.Chat; +using System.Text.Json; + +/// +/// A base representation of an incremental update to a streaming tool call that is part of a streaming chat completion +/// request. +/// +/// +/// +/// This type encapsulates the payload located in e.g. $.choices[0].delta.tool_calls[] in the REST API schema. +/// +/// +/// To differentiate between parallel streaming tool calls within a single streaming choice, use the value of the +/// property. +/// +/// +/// is the streaming, base class counterpart to . +/// Currently, chat completion supports function tools and the derived +/// type will provide required information about the matching function +/// tool call. +/// +/// +public abstract partial class StreamingToolCallUpdate +{ + /// + /// Gets the ID associated with with the streaming tool call. + /// + /// + /// + /// Corresponds to e.g. $.choices[0].delta.tool_calls[0].id in the REST API schema. + /// + /// + /// This value appears once for each streaming tool call, typically on the first update message for each + /// . Callers should retain the value when it arrives to accumulate the complete tool + /// call information. + /// + /// + /// Tool call IDs must be provided in instances that respond to tool calls. + /// + /// + public string Id { get; } + + /// + /// Gets the tool call index associated with this . + /// + /// + /// + /// Corresponds to e.g. $.choices[0].delta.tool_calls[0].index in the REST API schema. + /// + /// + /// This value appears on every streaming tool call update. When multiple tool calls occur within the same + /// streaming chat choice, this index specifies which tool call that this update contains new information for. + /// + /// + public int ToolCallIndex { get; } + + internal string Type { get; } + + internal StreamingToolCallUpdate(string type, string id, int toolCallIndex) + { + Type = type; + Id = id; + ToolCallIndex = toolCallIndex; + } + + internal static StreamingToolCallUpdate DeserializeStreamingToolCallUpdate(JsonElement element) + { + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + foreach (JsonProperty property in element.EnumerateObject()) + { + // CUSTOM CODE NOTE: + // "type" is superficially the JSON discriminator for possible tool call categories, but it does not + // appear on every streamed delta message. To account for this without maintaining state, we instead + // allow the deserialization to infer the type based on the presence of the named/typed key. This is + // consistent across all existing patterns of the form: + // { + // "type": "" + // "": { ... } + // } + if (property.NameEquals("type"u8)) + { + if (property.Value.GetString() == "function") + { + return StreamingFunctionToolCallUpdate.DeserializeStreamingFunctionToolCallUpdate(element); + } + } + else if (property.NameEquals("function"u8)) + { + return StreamingFunctionToolCallUpdate.DeserializeStreamingFunctionToolCallUpdate(element); + } + } + return null; + } +} diff --git a/.dotnet/src/Custom/Embeddings/Embedding.cs b/.dotnet/src/Custom/Embeddings/Embedding.cs new file mode 100644 index 000000000..9b206204d --- /dev/null +++ b/.dotnet/src/Custom/Embeddings/Embedding.cs @@ -0,0 +1,44 @@ +using System; + +namespace OpenAI.Embeddings; + +/// +/// Represents an embedding vector returned by embedding endpoint. +/// +public partial class Embedding +{ + /// + /// The embedding vector, which is a list of floats. + /// + public ReadOnlyMemory Vector { get; } + /// + public long Index { get; } + /// + public string Model { get; } + /// + public EmbeddingTokenUsage Usage { get; } + + internal Embedding(ReadOnlyMemory vector, long index, EmbeddingTokenUsage usage) + { + Vector = vector; + Index = index; + Usage = usage; + } + + internal Embedding( + Internal.Models.CreateEmbeddingResponse internalResponse, + long internalDataIndex, + EmbeddingTokenUsage usage = null) + { + Internal.Models.Embedding dataItem = internalResponse.Data[(int)internalDataIndex]; + string dataItemBase64 = dataItem.EmbeddingProperty.ToString(); + dataItemBase64 = dataItemBase64.Substring(1, dataItemBase64.Length - 2); + byte[] bytes = Convert.FromBase64String(dataItemBase64); + float[] vector = new float[bytes.Length / sizeof(float)]; + Buffer.BlockCopy(bytes, 0, vector, 0, bytes.Length); + Vector = new ReadOnlyMemory(vector); + Index = dataItem.Index; + Usage = usage ?? new(internalResponse.Usage); + Model = internalResponse.Model; + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Embeddings/EmbeddingClient.cs b/.dotnet/src/Custom/Embeddings/EmbeddingClient.cs new file mode 100644 index 000000000..8a94c33d9 --- /dev/null +++ b/.dotnet/src/Custom/Embeddings/EmbeddingClient.cs @@ -0,0 +1,121 @@ +using OpenAI.Internal.Models; +using System; +using System.ClientModel; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.ComponentModel; +using System.Threading; +using System.Threading.Tasks; + +namespace OpenAI.Embeddings; + +/// The service client for the OpenAI Embeddings endpoint. +public partial class EmbeddingClient +{ + private OpenAIClientConnector _clientConnector; + private Internal.Embeddings Shim => _clientConnector.InternalClient.GetEmbeddingsClient(); + + public EmbeddingClient(Uri endpoint, string model, ApiKeyCredential credential, OpenAIClientOptions options = null) + { + _clientConnector = new(model, endpoint, credential, options); + } + + public EmbeddingClient(Uri endpoint, string model, OpenAIClientOptions options = null) + : this(endpoint, model, credential: null, options) + { } + + public EmbeddingClient(string model, ApiKeyCredential credential, OpenAIClientOptions options = null) + : this(endpoint: null, model, credential, options) + { } + + public EmbeddingClient(string model, OpenAIClientOptions options = null) + : this(endpoint: null, model, credential: null, options) + { } + + public virtual ClientResult GenerateEmbedding(string input, EmbeddingOptions options = null) + { + Internal.Models.CreateEmbeddingRequest request = CreateInternalRequest(input, options); + ClientResult response = Shim.CreateEmbedding(request); + Embedding embeddingResult = new(response.Value, internalDataIndex: 0); + return ClientResult.FromValue(embeddingResult, response.GetRawResponse()); + } + + public virtual async Task> GenerateEmbeddingAsync(string input, EmbeddingOptions options = null) + { + Internal.Models.CreateEmbeddingRequest request = CreateInternalRequest(input, options); + ClientResult response = await Shim.CreateEmbeddingAsync(request); + Embedding embeddingResult = new(response.Value, internalDataIndex: 0); + return ClientResult.FromValue(embeddingResult, response.GetRawResponse()); + } + + public virtual ClientResult GenerateEmbedding(IEnumerable input, EmbeddingOptions options = null) + { + Internal.Models.CreateEmbeddingRequest request = CreateInternalRequest(input, options); + ClientResult response = Shim.CreateEmbedding(request); + Embedding embeddingResult = new(response.Value, internalDataIndex: 0); + return ClientResult.FromValue(embeddingResult, response.GetRawResponse()); + } + + public virtual async Task> GenerateEmbeddingAsync(IEnumerable input, EmbeddingOptions options = null) + { + Internal.Models.CreateEmbeddingRequest request = CreateInternalRequest(input, options); + ClientResult response = await Shim.CreateEmbeddingAsync(request); + Embedding embeddingResult = new(response.Value, internalDataIndex: 0); + return ClientResult.FromValue(embeddingResult, response.GetRawResponse()); + } + + public virtual ClientResult GenerateEmbeddings(IEnumerable inputs, EmbeddingOptions options = null) + { + Internal.Models.CreateEmbeddingRequest request = CreateInternalRequest(inputs, options); + ClientResult response = Shim.CreateEmbedding(request); + EmbeddingCollection resultCollection = EmbeddingCollection.CreateFromInternalResponse(response.Value); + return ClientResult.FromValue(resultCollection, response.GetRawResponse()); + } + + public virtual async Task> GenerateEmbeddingsAsync(IEnumerable inputs, EmbeddingOptions options = null) + { + Internal.Models.CreateEmbeddingRequest request = CreateInternalRequest(inputs, options); + ClientResult response = await Shim.CreateEmbeddingAsync(request); + EmbeddingCollection resultCollection = EmbeddingCollection.CreateFromInternalResponse(response.Value); + return ClientResult.FromValue(resultCollection, response.GetRawResponse()); + } + + public virtual ClientResult GenerateEmbeddings(IEnumerable> inputs, EmbeddingOptions options = null) + { + Internal.Models.CreateEmbeddingRequest request = CreateInternalRequest(inputs, options); + ClientResult response = Shim.CreateEmbedding(request); + EmbeddingCollection resultCollection = EmbeddingCollection.CreateFromInternalResponse(response.Value); + return ClientResult.FromValue(resultCollection, response.GetRawResponse()); + } + + public virtual async Task> GenerateEmbeddingsAsync(IEnumerable> inputs, EmbeddingOptions options = null) + { + Internal.Models.CreateEmbeddingRequest request = CreateInternalRequest(inputs, options); + ClientResult response = await Shim.CreateEmbeddingAsync(request); + EmbeddingCollection resultCollection = EmbeddingCollection.CreateFromInternalResponse(response.Value); + return ClientResult.FromValue(resultCollection, response.GetRawResponse()); + } + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult GenerateEmbeddings(BinaryContent content, RequestOptions context = null) + => Shim.CreateEmbedding(content, context); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual Task GenerateEmbeddingsAsync(BinaryContent content, RequestOptions context = null) + => Shim.CreateEmbeddingAsync(content, context); + + private Internal.Models.CreateEmbeddingRequest CreateInternalRequest(object inputObject, EmbeddingOptions options) + { + options ??= new(); + return new Internal.Models.CreateEmbeddingRequest( + BinaryData.FromObjectAsJson(inputObject), + new(_clientConnector.Model), + Internal.Models.CreateEmbeddingRequestEncodingFormat.Base64, + options?.Dimensions, + options?.User, + serializedAdditionalRawData: null); + } +} diff --git a/.dotnet/src/Custom/Embeddings/EmbeddingCollection.cs b/.dotnet/src/Custom/Embeddings/EmbeddingCollection.cs new file mode 100644 index 000000000..b87076584 --- /dev/null +++ b/.dotnet/src/Custom/Embeddings/EmbeddingCollection.cs @@ -0,0 +1,19 @@ +using System.Collections.Generic; +using System.Collections.ObjectModel; + +namespace OpenAI.Embeddings; + +public class EmbeddingCollection : ReadOnlyCollection +{ + internal EmbeddingCollection(IList list) : base(list) { } + internal static EmbeddingCollection CreateFromInternalResponse(Internal.Models.CreateEmbeddingResponse response) + { + EmbeddingTokenUsage usage = new(response.Usage); + List items = []; + for (int i = 0; i < response.Data.Count; i++) + { + items.Add(new(response, i, usage)); + } + return new EmbeddingCollection(items); + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Embeddings/EmbeddingOptions.cs b/.dotnet/src/Custom/Embeddings/EmbeddingOptions.cs new file mode 100644 index 000000000..e16fcec55 --- /dev/null +++ b/.dotnet/src/Custom/Embeddings/EmbeddingOptions.cs @@ -0,0 +1,8 @@ +namespace OpenAI.Embeddings; + +public class EmbeddingOptions +{ + public string User { get; set; } + + public long? Dimensions { get; set; } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Embeddings/EmbeddingTokenUsage.cs b/.dotnet/src/Custom/Embeddings/EmbeddingTokenUsage.cs new file mode 100644 index 000000000..4fad06102 --- /dev/null +++ b/.dotnet/src/Custom/Embeddings/EmbeddingTokenUsage.cs @@ -0,0 +1,16 @@ +namespace OpenAI.Embeddings; + +public partial class EmbeddingTokenUsage +{ + private Internal.Models.EmbeddingUsage _internalUsage; + + /// + public long InputTokens => _internalUsage.PromptTokens; + /// + public long TotalTokens => _internalUsage.TotalTokens; + + internal EmbeddingTokenUsage(Internal.Models.EmbeddingUsage internalUsage) + { + _internalUsage = internalUsage; + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Files/FileClient.cs b/.dotnet/src/Custom/Files/FileClient.cs new file mode 100644 index 000000000..4f2c6e3b1 --- /dev/null +++ b/.dotnet/src/Custom/Files/FileClient.cs @@ -0,0 +1,352 @@ +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.ComponentModel; +using System.Runtime.InteropServices.ComTypes; +using System.Text; +using System.Threading.Tasks; + +namespace OpenAI.Files; + +/// +/// The service client for OpenAI file operations. +/// +public partial class FileClient +{ + private OpenAIClientConnector _clientConnector; + private Internal.Files Shim => _clientConnector.InternalClient.GetFilesClient(); + + /// + /// Initializes a new instance of , used for file operation requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// The connection endpoint to use. + /// The API key used to authenticate with the service endpoint. + /// Additional options to customize the client. + public FileClient(Uri endpoint, ApiKeyCredential credential, OpenAIClientOptions options = null) + { + _clientConnector = new("none", endpoint, credential, options); + } + + /// + /// Initializes a new instance of , used for file operation requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// The connection endpoint to use. + /// Additional options to customize the client. + public FileClient(Uri endpoint, OpenAIClientOptions options = null) + : this(endpoint, credential: null, options) + { } + + /// + /// Initializes a new instance of , used for file operation requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// The API key used to authenticate with the service endpoint. + /// Additional options to customize the client. + public FileClient(ApiKeyCredential credential, OpenAIClientOptions options = null) + : this(endpoint: null, credential, options) + { } + + /// + /// Initializes a new instance of , used for file operation requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// Additional options to customize the client. + public FileClient(OpenAIClientOptions options = null) + : this(endpoint: null, credential: null, options) + { } + + public virtual ClientResult UploadFile(BinaryData file, string filename, OpenAIFilePurpose purpose) + { + if (file is null) throw new ArgumentNullException(nameof(file)); + if (string.IsNullOrWhiteSpace(filename)) throw new ArgumentException(nameof(filename)); + + PipelineMessage uploadMessage = CreateInternalUploadMessage(file, filename, purpose); + Shim.Pipeline.Send(uploadMessage); + return GetUploadResultFromResponse(uploadMessage.Response); + } + + public virtual async Task> UploadFileAsync(BinaryData file, string filename, OpenAIFilePurpose purpose) + { + if (file is null) throw new ArgumentNullException(nameof(file)); + if (string.IsNullOrWhiteSpace(filename)) throw new ArgumentException(nameof(filename)); + + PipelineMessage uploadMessage = CreateInternalUploadMessage(file, filename, purpose); + await Shim.Pipeline.SendAsync(uploadMessage); + return GetUploadResultFromResponse(uploadMessage.Response); + } + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult UploadFile(BinaryContent requestBody, RequestOptions context) + { + return Shim.CreateFile(requestBody, context); + } + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual Task UploadFileAsync(BinaryContent requestBody, RequestOptions context) + { + return Shim.CreateFileAsync(requestBody, context); + } + + public virtual ClientResult GetFileInfo(string fileId) + { + ClientResult internalResult = Shim.RetrieveFile(fileId); + return ClientResult.FromValue(new OpenAIFileInfo(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual async Task> GetFileInfoAsync(string fileId) + { + ClientResult internalResult = await Shim.RetrieveFileAsync(fileId); + return ClientResult.FromValue(new OpenAIFileInfo(internalResult.Value), internalResult.GetRawResponse()); + } + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult GetFileInfo(string fileId, RequestOptions context) + { + return Shim.RetrieveFile(fileId, context); + } + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual Task GetFileInfoAsync(string fileId, RequestOptions context) + { + return Shim.RetrieveFileAsync(fileId, context); + } + + public virtual ClientResult GetFileInfoList(OpenAIFilePurpose? purpose = null) + { + Internal.Models.OpenAIFilePurpose? internalPurpose = ToInternalFilePurpose(purpose); + string internalPurposeText = null; + if (internalPurpose != null) + { + internalPurposeText = internalPurpose.ToString(); + } + ClientResult result = Shim.GetFiles(internalPurposeText); + List infoItems = []; + foreach (Internal.Models.OpenAIFile internalFile in result.Value.Data) + { + infoItems.Add(new(internalFile)); + } + return ClientResult.FromValue(new OpenAIFileInfoCollection(infoItems), result.GetRawResponse()); + } + + public virtual async Task> GetFileInfoListAsync(OpenAIFilePurpose? purpose = null) + { + Internal.Models.OpenAIFilePurpose? internalPurpose = ToInternalFilePurpose(purpose); + string internalPurposeText = null; + if (internalPurpose != null) + { + internalPurposeText = internalPurpose.ToString(); + } + ClientResult result = await Shim.GetFilesAsync(internalPurposeText).ConfigureAwait(false); + List infoItems = []; + foreach (Internal.Models.OpenAIFile internalFile in result.Value.Data) + { + infoItems.Add(new(internalFile)); + } + return ClientResult.FromValue(new OpenAIFileInfoCollection(infoItems), result.GetRawResponse()); + } + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult GetFileInfoList(string purpose, RequestOptions context) + { + return Shim.GetFiles(purpose, context); + } + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual Task GetFileInfoListAsync(string purpose, RequestOptions context) + { + return Shim.GetFilesAsync(purpose, context); + } + + public virtual ClientResult DownloadFile(string fileId) + { + PipelineMessage message = Shim.Pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_clientConnector.Endpoint.AbsoluteUri); + StringBuilder path = new(); + path.Append($"/files/{fileId}/content"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("content-type", "multipart/form-data"); + Shim.Pipeline.Send(message); + + if (message.Response.IsError) + { + throw new ClientResultException(message.Response); + } + + return ClientResult.FromValue(message.Response.Content, message.Response); + } + + public virtual async Task> DownloadFileAsync(string fileId) + { + PipelineMessage message = Shim.Pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_clientConnector.Endpoint.AbsoluteUri); + StringBuilder path = new(); + path.Append($"/files/{fileId}/content"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("content-type", "multipart/form-data"); + + await Shim.Pipeline.SendAsync(message); + + if (message.Response.IsError) + { + throw new ClientResultException(message.Response); + } + + return ClientResult.FromValue(message.Response.Content, message.Response); + } + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult DownloadFile(string fileId, RequestOptions context) + { + return Shim.DownloadFile(fileId, context); + } + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual Task DownloadFileAsync(string fileId, RequestOptions context) + { + return Shim.DownloadFileAsync(fileId, context); + } + + public virtual void DeleteFile(string fileId) + { + _ = Shim.DeleteFile(fileId); + } + + public virtual async Task DeleteFileAsync(string fileId) + { + _ = Shim.DeleteFileAsync(fileId); + } + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult DeleteFile(string fileId, RequestOptions context) + { + return Shim.DeleteFile(fileId, context); + } + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult DeleteFileAsync(string fileId, RequestOptions context) + { + return Shim.DeleteFile(fileId, context); + } + + internal PipelineMessage CreateInternalUploadMessage(BinaryData fileData, string filename, OpenAIFilePurpose purpose) + { + MultipartFormDataContent content = new(); + content.Add(BinaryContent.Create(fileData), + name: "file", + fileName: filename, + headers: []); + content.Add(MultipartContent.Create( + BinaryData.FromString(purpose switch + { + OpenAIFilePurpose.FineTuning => "fine-tune", + OpenAIFilePurpose.Assistants => "assistants", + _ => throw new ArgumentException($"Unsupported purpose for file upload: {purpose}"), + })), + name: "\"purpose\"", + headers: []); + + PipelineMessage message = Shim.Pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_clientConnector.Endpoint.AbsoluteUri); + StringBuilder path = new(); + path.Append("/files"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Content = content; + + content.ApplyToRequest(request); + + return message; + } + + internal ClientResult GetUploadResultFromResponse(PipelineResponse response) + { + if (response.IsError) + { + throw new ClientResultException(response); + } + + Internal.Models.OpenAIFile internalFile = Internal.Models.OpenAIFile.FromResponse(response); + OpenAIFileInfo fileInfo = new(internalFile); + return ClientResult.FromValue(fileInfo, response); + } + + internal static Internal.Models.OpenAIFilePurpose? ToInternalFilePurpose(OpenAIFilePurpose? purpose) + { + if (purpose == null) + { + return null; + } + return purpose switch + { + OpenAIFilePurpose.FineTuning => Internal.Models.OpenAIFilePurpose.FineTune, + OpenAIFilePurpose.FineTuningResults => Internal.Models.OpenAIFilePurpose.FineTuneResults, + OpenAIFilePurpose.Assistants => Internal.Models.OpenAIFilePurpose.Assistants, + OpenAIFilePurpose.AssistantOutputs => Internal.Models.OpenAIFilePurpose.AssistantsOutput, + _ => throw new ArgumentException($"Unsupported file purpose: {purpose}"), + }; + } + private static PipelineMessageClassifier _responseErrorClassifier200; + private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); + +} diff --git a/.dotnet/src/Custom/Files/OpenAIFileInfo.cs b/.dotnet/src/Custom/Files/OpenAIFileInfo.cs new file mode 100644 index 000000000..04d76a036 --- /dev/null +++ b/.dotnet/src/Custom/Files/OpenAIFileInfo.cs @@ -0,0 +1,36 @@ +using System; + +namespace OpenAI.Files; + +public partial class OpenAIFileInfo +{ + public string Id { get; } + public OpenAIFilePurpose Purpose { get; } + public string Filename { get; } + public long Size { get; } + public DateTimeOffset CreatedAt { get; } + + internal OpenAIFileInfo(Internal.Models.OpenAIFile internalFile) + { + Id = internalFile.Id; + Purpose = internalFile.Purpose.ToString() switch + { + "fine-tune" => OpenAIFilePurpose.FineTuning, + "fine-tune-result" => OpenAIFilePurpose.FineTuningResults, + "assistants" => OpenAIFilePurpose.Assistants, + "assistants_output" => OpenAIFilePurpose.AssistantOutputs, + _ => throw new ArgumentException(nameof(internalFile)), + }; + Filename = internalFile.Filename; + Size = internalFile.Bytes; + CreatedAt = internalFile.CreatedAt; + } +} + +public enum OpenAIFilePurpose +{ + FineTuning, + FineTuningResults, + Assistants, + AssistantOutputs, +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Files/OpenAIFileInfoCollection.cs b/.dotnet/src/Custom/Files/OpenAIFileInfoCollection.cs new file mode 100644 index 000000000..3c40e56a5 --- /dev/null +++ b/.dotnet/src/Custom/Files/OpenAIFileInfoCollection.cs @@ -0,0 +1,11 @@ +using System.Collections.Generic; +using System.Collections.ObjectModel; + +namespace OpenAI.Files; + +public partial class OpenAIFileInfoCollection : ReadOnlyCollection +{ + internal OpenAIFileInfoCollection(IList list) : base(list) + { + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Images/GeneratedImage.cs b/.dotnet/src/Custom/Images/GeneratedImage.cs new file mode 100644 index 000000000..807268544 --- /dev/null +++ b/.dotnet/src/Custom/Images/GeneratedImage.cs @@ -0,0 +1,48 @@ +using System; + +namespace OpenAI.Images; + +/// +/// Represents the result data for an image generation request. +/// +public class GeneratedImage +{ + /// + /// The binary image data received from the response, provided when + /// is set to . + /// + /// + /// This property is mutually exclusive with and will be null when the other + /// is present. + /// + public BinaryData ImageBytes { get; } + /// + /// A temporary internet location for an image, provided by default or when + /// is set to . + /// + /// + /// This property is mutually exclusive with and will be null when the other + /// is present. + /// + public Uri ImageUri { get; } + /// + /// The final, revised prompt that was used to generate the result image, populated if the model performed any + /// such revisions to the prompt. + /// + /// + /// Revisions are automatically performed to enrich image prompts and improve output quality and consistency. + /// + public string RevisedPrompt { get; } + /// + /// The timestamp at which the result image was generated. + /// + public DateTimeOffset CreatedAt { get; } + + internal GeneratedImage(Internal.Models.ImagesResponse internalResponse, long internalDataIndex) + { + CreatedAt = internalResponse.Created; + ImageBytes = internalResponse.Data[(int)internalDataIndex].B64Json; + RevisedPrompt = internalResponse.Data[(int)internalDataIndex].RevisedPrompt; + ImageUri = internalResponse.Data[(int)internalDataIndex].Url; + } +} diff --git a/.dotnet/src/Custom/Images/ImageClient.cs b/.dotnet/src/Custom/Images/ImageClient.cs new file mode 100644 index 000000000..24f8ae4fd --- /dev/null +++ b/.dotnet/src/Custom/Images/ImageClient.cs @@ -0,0 +1,245 @@ +using System; +using System.ClientModel; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.ComponentModel; +using System.Threading; +using System.Threading.Tasks; + +namespace OpenAI.Images; + +/// The service client for OpenAI image operations. +public partial class ImageClient +{ + private OpenAIClientConnector _clientConnector; + private Internal.Images Shim => _clientConnector.InternalClient.GetImagesClient(); + + /// + /// Initializes a new instance of , used for image operation requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// The connection endpoint to use. + /// The model name for image operations that the client should use. + /// The API key used to authenticate with the service endpoint. + /// Additional options to customize the client. + public ImageClient(Uri endpoint, string model, ApiKeyCredential credential, OpenAIClientOptions options = null) + { + _clientConnector = new(model, endpoint, credential, options); + } + + /// + /// Initializes a new instance of , used for image operation requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// The connection endpoint to use. + /// The model name for image operations that the client should use. + /// Additional options to customize the client. + public ImageClient(Uri endpoint, string model, OpenAIClientOptions options = null) + : this(endpoint, model, credential: null, options) + { } + + /// + /// Initializes a new instance of , used for image operation requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// The model name for image operations that the client should use. + /// The API key used to authenticate with the service endpoint. + /// Additional options to customize the client. + public ImageClient(string model, ApiKeyCredential credential, OpenAIClientOptions options = null) + : this(endpoint: null, model, credential, options) + { } + + /// + /// Initializes a new instance of , used for image operation requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// The model name for image operations that the client should use. + /// Additional options to customize the client. + public ImageClient(string model, OpenAIClientOptions options = null) + : this(endpoint: null, model, credential: null, options) + { } + + /// + /// Generates a single image for a provided prompt. + /// + /// The description and instructions for the image. + /// Additional options for the image generation request. + /// The cancellation token for the operation. + /// A result for a single image generation. + public virtual ClientResult GenerateImage(string prompt, ImageGenerationOptions options = null) + { + ClientResult multiResult = GenerateImages(prompt, imageCount: null, options); + return ClientResult.FromValue(multiResult.Value[0], multiResult.GetRawResponse()); + } + + /// + /// Generates a single image for a provided prompt. + /// + /// The description and instructions for the image. + /// Additional options for the image generation request. + /// The cancellation token for the operation. + /// A result for a single image generation. + public virtual async Task> GenerateImageAsync(string prompt, ImageGenerationOptions options = null) + { + ClientResult multiResult = await GenerateImagesAsync(prompt, imageCount: null, options).ConfigureAwait(false); + return ClientResult.FromValue(multiResult.Value[0], multiResult.GetRawResponse()); + } + + /// + /// Generates a collection of image alternatives for a provided prompt. + /// + /// The description and instructions for the image. + /// + /// The number of alternative images to generate for the prompt. + /// + /// Additional options for the image generation request. + /// The cancellation token for the operation. + /// A result for a single image generation. + public virtual ClientResult GenerateImages( + string prompt, + int? imageCount = null, + ImageGenerationOptions options = null) + { + Internal.Models.CreateImageRequest request = CreateInternalRequest(prompt, imageCount, options); + ClientResult response = Shim.CreateImage(request); + List ImageGenerations = []; + for (int i = 0; i < response.Value.Data.Count; i++) + { + ImageGenerations.Add(new(response.Value, i)); + } + return ClientResult.FromValue(new ImageGenerationCollection(ImageGenerations), response.GetRawResponse()); + } + + /// + /// Generates a collection of image alternatives for a provided prompt. + /// + /// The description and instructions for the image. + /// + /// The number of alternative images to generate for the prompt. + /// + /// Additional options for the image generation request. + /// The cancellation token for the operation. + /// A result for a single image generation. + public virtual async Task> GenerateImagesAsync( + string prompt, + int? imageCount = null, + ImageGenerationOptions options = null) + { + Internal.Models.CreateImageRequest request = CreateInternalRequest(prompt, imageCount, options); + ClientResult response = await Shim.CreateImageAsync(request).ConfigureAwait(false); + List ImageGenerations = []; + for (int i = 0; i < response.Value.Data.Count; i++) + { + ImageGenerations.Add(new(response.Value, i)); + } + return ClientResult.FromValue(new ImageGenerationCollection(ImageGenerations), response.GetRawResponse()); + } + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult GenerateImage(BinaryContent content, RequestOptions context = null) + => Shim.CreateImage(content, context); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual Task GenerateImageAsync(BinaryContent content, RequestOptions context = null) + => Shim.CreateImageAsync(content, context); + + private Internal.Models.CreateImageRequest CreateInternalRequest( + string prompt, + int? imageCount = null, + ImageGenerationOptions options = null) + { + options ??= new(); + Internal.Models.CreateImageRequestQuality? internalQuality = null; + if (options.Quality != null) + { + internalQuality = options.Quality switch + { + ImageQuality.Standard => Internal.Models.CreateImageRequestQuality.Standard, + ImageQuality.High => Internal.Models.CreateImageRequestQuality.Hd, + _ => throw new ArgumentException(nameof(options.Quality)), + }; + } + Internal.Models.CreateImageRequestResponseFormat? internalFormat = null; + if (options.ResponseFormat != null) + { + internalFormat = options.ResponseFormat switch + { + ImageResponseFormat.Bytes => Internal.Models.CreateImageRequestResponseFormat.B64Json, + ImageResponseFormat.Uri => Internal.Models.CreateImageRequestResponseFormat.Url, + _ => throw new ArgumentException(nameof(options.ResponseFormat)), + }; + } + Internal.Models.CreateImageRequestSize? internalSize = null; + if (options.Size != null) + { + internalSize = options.Size switch + { + ImageSize.Size1024x1024 => Internal.Models.CreateImageRequestSize._1024x1024, + ImageSize.Size1024x1792 => Internal.Models.CreateImageRequestSize._1024x1792, + ImageSize.Size1792x1024 => Internal.Models.CreateImageRequestSize._1792x1024, + ImageSize.Size256x256 => Internal.Models.CreateImageRequestSize._256x256, + ImageSize.Size512x512 => Internal.Models.CreateImageRequestSize._512x512, + _ => throw new ArgumentException(nameof(options.Size)), + }; + } + Internal.Models.CreateImageRequestStyle? internalStyle = null; + if (options.Style != null) + { + internalStyle = options.Style switch + { + ImageStyle.Vivid => Internal.Models.CreateImageRequestStyle.Vivid, + ImageStyle.Natural => Internal.Models.CreateImageRequestStyle.Natural, + _ => throw new ArgumentException(nameof(options.Style)), + }; + } + return new Internal.Models.CreateImageRequest( + prompt, + _clientConnector.Model, + imageCount, + quality: internalQuality, + responseFormat: internalFormat, + size: internalSize, + style: internalStyle, + options.User, + serializedAdditionalRawData: null); + } +} diff --git a/.dotnet/src/Custom/Images/ImageGenerationCollection.cs b/.dotnet/src/Custom/Images/ImageGenerationCollection.cs new file mode 100644 index 000000000..7774309b5 --- /dev/null +++ b/.dotnet/src/Custom/Images/ImageGenerationCollection.cs @@ -0,0 +1,12 @@ +using System.Collections.Generic; +using System.Collections.ObjectModel; + +namespace OpenAI.Images; + +/// +/// Represents an image generation response payload that contains information for multiple generated images. +/// +public class ImageGenerationCollection : ReadOnlyCollection +{ + internal ImageGenerationCollection(IList list) : base(list) { } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Images/ImageGenerationOptions.cs b/.dotnet/src/Custom/Images/ImageGenerationOptions.cs new file mode 100644 index 000000000..37cdb95af --- /dev/null +++ b/.dotnet/src/Custom/Images/ImageGenerationOptions.cs @@ -0,0 +1,75 @@ +namespace OpenAI.Images; + +/// +/// Represents additional options available to control the behavior of an image generation operation. +/// +public partial class ImageGenerationOptions +{ + /// + /// Specifies the quality level of the image that will be generated. This setting is only available when using + /// the dall-e-3 model. + /// + /// + /// hd - - Finer details, greater consistency, slower, more intensive. + /// + /// + /// standard - - The default quality level that's faster and less + /// intensive but may also be less detailed and consistent than hd. + /// + /// + /// + public ImageQuality? Quality { get; set; } + /// + /// Specifies the desired output representation of the generated image. + /// + /// + /// url - - Default, provides a temporary internet location that + /// the generated image can be retrieved from. + /// + /// + /// b64_json - - Provides the full image data on the response, + /// encoded in the result as a base64 string. This offers the fastest round trip time but can drastically + /// increase the size of response payloads. + /// + /// + /// + public ImageResponseFormat? ResponseFormat { get; set; } + /// + /// Specifies the dimensions of the generated image. Larger images take longer to create. + /// + /// Available for dall-e-2: + /// + /// 1024x1024 - - default + /// 256x256 - - small + /// 512x512 - - medium + /// + /// + /// + /// Available for dall-e-3: + /// + /// 1024x1024 - - default + /// 1024x1792 - - extra tall + /// 1792x1024 - - extra wide + /// + /// + /// + public ImageSize? Size { get; set; } + /// + /// The style kind to guide the generation of the image. + /// + /// + /// vivid - - default, a style that tends towards more realistic, + /// dramatic images. + /// + /// + /// natural - - a more subdued style with less tendency towards + /// realism and striking imagery. + /// + /// + /// + public ImageStyle? Style { get; set; } + /// + /// An optional identifier for the end user that can help OpenAI monitor for and detect abuse. + /// + public string User { get; set; } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Images/ImageQuality.cs b/.dotnet/src/Custom/Images/ImageQuality.cs new file mode 100644 index 000000000..41f13b0ff --- /dev/null +++ b/.dotnet/src/Custom/Images/ImageQuality.cs @@ -0,0 +1,24 @@ +namespace OpenAI.Images; + +/// +/// A representation of the quality setting for image operations that controls the level of work that the model will +/// perform. +/// +/// +/// Available qualities consist of: +/// +/// - standard - The default setting that balances speed, detail, and consistecy. +/// - hd - Better consistency and finer details, but may be slower. +/// +/// +public enum ImageQuality +{ + /// + /// The hd image quality that provides finer details and greater consistency but may be slower. + /// + High, + /// + /// The standard image quality that provides a balanced mix of detailing, consistency, and speed. + /// + Standard, +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Images/ImageResponseFormat.cs b/.dotnet/src/Custom/Images/ImageResponseFormat.cs new file mode 100644 index 000000000..8403482c5 --- /dev/null +++ b/.dotnet/src/Custom/Images/ImageResponseFormat.cs @@ -0,0 +1,32 @@ +using System; + +namespace OpenAI.Images; + +/// +/// Represents the available output methods for generated images. +/// +/// +/// url - - Default, provides a temporary internet location that +/// the generated image can be retrieved from. +/// +/// +/// b64_json - - Provides the full image data on the response, +/// encoded in the result as a base64 string. This offers the fastest round trip time but can drastically +/// increase the size of response payloads. +/// +/// +/// +public enum ImageResponseFormat +{ + /// + /// Instructs the request to return image data directly on the response, encoded as a base64 string in the response + /// JSON. This minimizes availability time but drastically increases the size of responses, required bandwidth, and + /// immediate memory needs. This is equivalent to b64_json in the REST API. + /// + Bytes, + /// + /// The default setting that instructs the request to return a temporary internet location from which the image can + /// be retrieved. + /// + Uri, +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Images/ImageSize.cs b/.dotnet/src/Custom/Images/ImageSize.cs new file mode 100644 index 000000000..e92dbf99d --- /dev/null +++ b/.dotnet/src/Custom/Images/ImageSize.cs @@ -0,0 +1,43 @@ +namespace OpenAI.Images; + +/// +/// Represents the available output dimensions for generated images. +/// +public enum ImageSize +{ + /// + /// A square image with 1024 pixels of both width and height. + /// + /// Supported and default for both dall-e-2 and dall-e-3 models. + /// + /// + Size1024x1024, + /// + /// An extra tall image, 1024 pixels wide by 1792 pixels high. + /// + /// Supported only for the dall-e-3 model. + /// + /// + Size1024x1792, + /// + /// An extra wide image, 1792 pixels wide by 1024 pixels high. + /// + /// Supported only for the dall-e-3 model. + /// + /// + Size1792x1024, + /// + /// A small, square image with 256 pixels of both width and height. + /// + /// Supported only for the older dall-e-2 model. + /// + /// + Size256x256, + /// + /// A medium-small, square image with 512 pixels of both width and height. + /// + /// Supported only for the older dall-e-2 model. + /// + /// + Size512x512, +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Images/ImageStyle.cs b/.dotnet/src/Custom/Images/ImageStyle.cs new file mode 100644 index 000000000..30616eccd --- /dev/null +++ b/.dotnet/src/Custom/Images/ImageStyle.cs @@ -0,0 +1,18 @@ +namespace OpenAI.Images; + +/// +/// The style of the generated images. Must be one of vivid or natural. Vivid causes the model to lean towards +/// generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real +/// looking images. This param is only supported for dall-e-3. +/// +public enum ImageStyle +{ + /// + /// The vivid style, with which the model will tend towards hyper-realistic, dramatic imagery. + /// + Vivid, + /// + /// The natural style, with which the model will not tend towards hyper-realistic, dramatic imagery. + /// + Natural, +} \ No newline at end of file diff --git a/.dotnet/src/Custom/LegacyCompletions/LegacyCompletionClient.cs b/.dotnet/src/Custom/LegacyCompletions/LegacyCompletionClient.cs new file mode 100644 index 000000000..1b9393daa --- /dev/null +++ b/.dotnet/src/Custom/LegacyCompletions/LegacyCompletionClient.cs @@ -0,0 +1,106 @@ +using System; +using System.ClientModel; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.ComponentModel; +using System.Threading.Tasks; + +namespace OpenAI.LegacyCompletions; + +/// +/// The basic, protocol-level service client for OpenAI legacy completion operations. +/// +/// Note: pre-chat completions are a legacy feature. New solutions should consider the use of chat +/// completions or assistants, instead. +/// +/// +public partial class LegacyCompletionClient +{ + private OpenAIClientConnector _clientConnector; + private Internal.Completions Shim => _clientConnector.InternalClient.GetCompletionsClient(); + + /// + /// Initializes a new instance of , used for legacy completion requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// The connection endpoint to use. + /// The API key used to authenticate with the service endpoint. + /// Additional options to customize the client. + public LegacyCompletionClient(Uri endpoint, ApiKeyCredential credential, OpenAIClientOptions options = null) + { + _clientConnector = new("protocol", endpoint, credential, options); + } + + /// + /// Initializes a new instance of , used for legacy completion operation requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// The connection endpoint to use. + /// Additional options to customize the client. + public LegacyCompletionClient(Uri endpoint, OpenAIClientOptions options = null) + : this(endpoint, credential: null, options) + { } + + /// + /// Initializes a new instance of , used for legacy completion operation requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// The API key used to authenticate with the service endpoint. + /// Additional options to customize the client. + public LegacyCompletionClient(ApiKeyCredential credential, OpenAIClientOptions options = null) + : this(endpoint: null, credential, options) + { } + + /// + /// Initializes a new instance of , used for legacy completion operation requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// Additional options to customize the client. + public LegacyCompletionClient(OpenAIClientOptions options = null) + : this(endpoint: null, credential: null, options) + { } + + /// + public virtual ClientResult GenerateLegacyCompletions(BinaryContent content, RequestOptions context = null) + => Shim.CreateCompletion(content, context); + + /// + public virtual Task GenerateLegacyCompletionsAsync(BinaryContent content, RequestOptions context = null) + => Shim.CreateCompletionAsync(content, context); +} diff --git a/.dotnet/src/Custom/Models/ModelDetailCollection.cs b/.dotnet/src/Custom/Models/ModelDetailCollection.cs new file mode 100644 index 000000000..8e89d769e --- /dev/null +++ b/.dotnet/src/Custom/Models/ModelDetailCollection.cs @@ -0,0 +1,13 @@ +using System.Collections.Generic; +using System.Collections.ObjectModel; + +namespace OpenAI.ModelManagement; + +/// +/// Represents a collection of entries for available models. +/// +public partial class ModelDetailCollection : ReadOnlyCollection +{ + internal ModelDetailCollection(IList list) : base(list) + {} +} diff --git a/.dotnet/src/Custom/Models/ModelDetails.cs b/.dotnet/src/Custom/Models/ModelDetails.cs new file mode 100644 index 000000000..30d9ec8b5 --- /dev/null +++ b/.dotnet/src/Custom/Models/ModelDetails.cs @@ -0,0 +1,29 @@ +using System; + +namespace OpenAI.ModelManagement; + +/// +/// Represents information about a single available model entry. +/// +public partial class ModelDetails +{ + /// + /// The ID of the model as used when calling the service. An example is 'gpt-3.5-turbo'. + /// + public string Id { get; } + /// + /// The timestamp when the current model entry became available. + /// + public DateTimeOffset CreatedAt { get; } + /// + /// The name of the organization that owns the model. + /// + public string OwnerOrganization { get; } + + internal ModelDetails(Internal.Models.Model internalModel) + { + Id = internalModel.Id; + CreatedAt = internalModel.Created; + OwnerOrganization = internalModel.OwnedBy; + } +} diff --git a/.dotnet/src/Custom/Models/ModelManagementClient.cs b/.dotnet/src/Custom/Models/ModelManagementClient.cs new file mode 100644 index 000000000..88a3c805c --- /dev/null +++ b/.dotnet/src/Custom/Models/ModelManagementClient.cs @@ -0,0 +1,197 @@ +using OpenAI.ClientShared.Internal; +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.ComponentModel; +using System.Threading.Tasks; + +namespace OpenAI.ModelManagement; + +/// +/// The service client for OpenAI model operations. +/// +public partial class ModelManagementClient +{ + private OpenAIClientConnector _clientConnector; + private Internal.ModelsOps Shim => _clientConnector.InternalClient.GetModelsOpsClient(); + private Internal.FineTuning FineTuningShim + => _clientConnector.InternalClient.GetFineTuningClient(); + + /// + /// Initializes a new instance of , used for model operation requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// The connection endpoint to use. + /// The API key used to authenticate with the service endpoint. + /// Additional options to customize the client. + public ModelManagementClient(Uri endpoint, ApiKeyCredential credential, OpenAIClientOptions options = null) + { + _clientConnector = new("none", endpoint, credential, options); + } + + /// + /// Initializes a new instance of , used for model operation requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// The connection endpoint to use. + /// Additional options to customize the client. + public ModelManagementClient(Uri endpoint, OpenAIClientOptions options = null) + : this(endpoint, credential: null, options) + { } + + /// + /// Initializes a new instance of , used for model operation requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// The API key used to authenticate with the service endpoint. + /// Additional options to customize the client. + public ModelManagementClient(ApiKeyCredential credential, OpenAIClientOptions options = null) + : this(endpoint: null, credential, options) + { } + + /// + /// Initializes a new instance of , used for model operation requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// Additional options to customize the client. + public ModelManagementClient(OpenAIClientOptions options = null) + : this(endpoint: null, credential: null, options) + { } + + public virtual ClientResult GetModelInfo(string modelId) + { + ClientResult internalResult = Shim.Retrieve(modelId); + return ClientResult.FromValue(new ModelDetails(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual async Task> GetModelInfoAsync( + string modelId) + { + ClientResult internalResult = await Shim.RetrieveAsync(modelId).ConfigureAwait(false); + return ClientResult.FromValue(new ModelDetails(internalResult.Value), internalResult.GetRawResponse()); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult GetModelInfo(string modelId, RequestOptions context) + { + return Shim.Retrieve(modelId, context); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual Task GetModelInfoAsync(string modelId, RequestOptions context) + { + return Shim.RetrieveAsync(modelId, context); + } + + public virtual ClientResult GetModels() + { + ClientResult internalResult = Shim.GetModels(); + OptionalList modelEntries = []; + foreach (Internal.Models.Model internalModel in internalResult.Value.Data) + { + modelEntries.Add(new(internalModel)); + } + return ClientResult.FromValue(new ModelDetailCollection(modelEntries), internalResult.GetRawResponse()); + } + + public virtual async Task> GetModelsAsync() + { + ClientResult internalResult + = await Shim.GetModelsAsync().ConfigureAwait(false); + OptionalList modelEntries = []; + foreach (Internal.Models.Model internalModel in internalResult.Value.Data) + { + modelEntries.Add(new(internalModel)); + } + return ClientResult.FromValue(new ModelDetailCollection(modelEntries), internalResult.GetRawResponse()); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult GetModels(RequestOptions context) => Shim.GetModels(context); + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual Task GetModelsAsync(RequestOptions context) => Shim.GetModelsAsync(context); + + public virtual ClientResult DeleteModel(string modelId) + { + ClientResult internalResult = Shim.Delete(modelId); + return ClientResult.FromValue(internalResult.Value.Deleted, internalResult.GetRawResponse()); + } + + public virtual async Task> DeleteModelAsync(string modelId) + { + ClientResult internalResult + = await Shim.DeleteAsync(modelId).ConfigureAwait(false); + return ClientResult.FromValue(internalResult.Value.Deleted, internalResult.GetRawResponse()); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult DeleteModel(string modelId, RequestOptions context) => Shim.Delete(modelId, context); + + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual Task DeleteModelAsync(string modelId, RequestOptions context) => Shim.DeleteAsync(modelId, context); + + public virtual ClientResult CreateFineTuningJob(BinaryContent content, RequestOptions context = null) + => FineTuningShim.CreateFineTuningJob(content, context); + + public virtual Task CreateFineTuningJobAsync(BinaryContent content, RequestOptions context = null) + => FineTuningShim.CreateFineTuningJobAsync(content, context); + + public virtual ClientResult GetFineTuningJob(string jobId, RequestOptions context) => FineTuningShim.RetrieveFineTuningJob(jobId, context); + + public virtual Task GetFineTuningJobAsync(string jobId, RequestOptions context) + => FineTuningShim.RetrieveFineTuningJobAsync(jobId, context); + + public virtual ClientResult GetFineTuningJobs(string previousJobId, int? maxResults, RequestOptions context) + => FineTuningShim.GetPaginatedFineTuningJobs(previousJobId, maxResults, context); + + public virtual Task GetFineTuningJobsAsync(int? maxResults, string previousJobId, RequestOptions context) + => FineTuningShim.GetPaginatedFineTuningJobsAsync(previousJobId, maxResults, context); + + public virtual ClientResult GetFineTuningJobEvents(string jobId, int? maxResults, string previousJobId, RequestOptions context) + => FineTuningShim.GetFineTuningEvents(jobId, previousJobId, maxResults, context); + + public virtual Task GetFineTuningJobEventsAsync(string jobId, int? maxResults, string previousJobId, RequestOptions context) + => FineTuningShim.GetFineTuningEventsAsync(jobId, previousJobId, maxResults, context); + + public virtual ClientResult CancelFineTuningJob(string jobId, RequestOptions context) => FineTuningShim.CancelFineTuningJob(jobId, context); + + public virtual Task CancelFineTuningJobAsync(string jobId, RequestOptions context) + => FineTuningShim.CancelFineTuningJobAsync(jobId, context); +} diff --git a/.dotnet/src/Custom/Moderations/ModerationClient.cs b/.dotnet/src/Custom/Moderations/ModerationClient.cs new file mode 100644 index 000000000..28590087c --- /dev/null +++ b/.dotnet/src/Custom/Moderations/ModerationClient.cs @@ -0,0 +1,106 @@ +using System; +using System.ClientModel; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.ComponentModel; +using System.Threading; +using System.Threading.Tasks; + +namespace OpenAI.Moderations; + +/// +/// The service client for OpenAI moderation operations. +/// +public partial class ModerationClient +{ + private OpenAIClientConnector _clientConnector; + private Internal.Moderations Shim => _clientConnector.InternalClient.GetModerationsClient(); + + /// + /// Initializes a new instance of , used for moderation operation requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// The connection endpoint to use. + /// The API key used to authenticate with the service endpoint. + /// Additional options to customize the client. + public ModerationClient(Uri endpoint, ApiKeyCredential credential, OpenAIClientOptions options = null) + { + _clientConnector = new("none", endpoint, credential, options); + } + + /// + /// Initializes a new instance of , used for moderation operation requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// The connection endpoint to use. + /// Additional options to customize the client. + public ModerationClient(Uri endpoint, OpenAIClientOptions options = null) + : this(endpoint, credential: null, options) + { } + + /// + /// Initializes a new instance of , used for moderation operation requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// The API key used to authenticate with the service endpoint. + /// Additional options to customize the client. + public ModerationClient(ApiKeyCredential credential, OpenAIClientOptions options = null) + : this(endpoint: null, credential, options) + { } + + /// + /// Initializes a new instance of , used for moderation operation requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// Additional options to customize the client. + public ModerationClient(OpenAIClientOptions options = null) + : this(endpoint: null, credential: null, options) + { } + + public virtual ClientResult ClassifyText(BinaryContent content, RequestOptions context = null) + { + return Shim.CreateModeration(content, context); + } + + public virtual Task ClassifyTextAsync(BinaryContent content, RequestOptions context = null) + { + return Shim.CreateModerationAsync(content, context); + } + +} diff --git a/.dotnet/src/Custom/OpenAIClient.cs b/.dotnet/src/Custom/OpenAIClient.cs new file mode 100644 index 000000000..b94c3bf93 --- /dev/null +++ b/.dotnet/src/Custom/OpenAIClient.cs @@ -0,0 +1,191 @@ +using OpenAI.Assistants; +using OpenAI.Audio; +using OpenAI.Chat; +using OpenAI.Embeddings; +using OpenAI.Files; +using OpenAI.Images; +using OpenAI.LegacyCompletions; +using OpenAI.ModelManagement; +using OpenAI.Moderations; +using System; +using System.ClientModel; + +namespace OpenAI; + +/// +/// A top-level client factory that enables convenient creation of scenario-specific sub-clients while reusing shared +/// configuration details like endpoint, authentication, and pipeline customization. +/// +public partial class OpenAIClient +{ + private readonly Uri _cachedEndpoint = null; + private readonly ApiKeyCredential _cachedCredential = null; + private readonly OpenAIClientOptions _cachedOptions = null; + + /// + /// Creates a new instance of will store common client configuration details to permit + /// easy reuse and propagation to multiple, scenario-specific subclients. + /// + /// + /// This client does not provide any model functionality directly and is purely a helper to facilitate the creation + /// of the scenario-specific subclients like . + /// + /// An explicitly defined endpoint that all clients created by this should use. + /// An explicitly defined credential that all clients created by this should use. + /// A common client options definition that all clients created by this should use. + public OpenAIClient(Uri endpoint, ApiKeyCredential credential, OpenAIClientOptions clientOptions = null) + { + _cachedEndpoint = endpoint; + _cachedCredential = credential; + _cachedOptions = clientOptions; + } + + /// + /// Creates a new instance of will store common client configuration details to permit + /// easy reuse and propagation to multiple, scenario-specific subclients. + /// + /// + /// This client does not provide any model functionality directly and is purely a helper to facilitate the creation + /// of the scenario-specific subclients like . + /// + /// An explicitly defined endpoint that all clients created by this should use. + /// A common client options definition that all clients created by this should use. + public OpenAIClient(Uri endpoint, OpenAIClientOptions clientOptions = null) + : this(endpoint, credential: null, clientOptions) + { } + + /// + /// Creates a new instance of will store common client configuration details to permit + /// easy reuse and propagation to multiple, scenario-specific subclients. + /// + /// + /// This client does not provide any model functionality directly and is purely a helper to facilitate the creation + /// of the scenario-specific subclients like . + /// + /// An explicitly defined credential that all clients created by this should use. + /// A common client options definition that all clients created by this should use. + public OpenAIClient(ApiKeyCredential credential, OpenAIClientOptions clientOptions = null) + : this(endpoint: null, credential, clientOptions) + { } + + /// + /// Creates a new instance of will store common client configuration details to permit + /// easy reuse and propagation to multiple, scenario-specific subclients. + /// + /// + /// This client does not provide any model functionality directly and is purely a helper to facilitate the creation + /// of the scenario-specific subclients like . + /// + /// A common client options definition that all clients created by this should use. + public OpenAIClient(OpenAIClientOptions clientOptions) + : this(endpoint: null, credential: null, clientOptions) + { } + + /// + /// Gets a new instance of that reuses the client configuration details provided to + /// the instance. + /// + /// + /// This method is functionally equivalent to using the constructor directly with + /// the same configuration details. + /// + /// A new . + public AssistantClient GetAssistantClient() + => new AssistantClient(_cachedEndpoint, _cachedCredential, _cachedOptions); + + /// + /// Gets a new instance of that reuses the client configuration details provided to + /// the instance. + /// + /// + /// This method is functionally equivalent to using the constructor directly with + /// the same configuration details. + /// + /// A new . + public AudioClient GetAudioClient(string model) + => new AudioClient(_cachedEndpoint, model, _cachedCredential, _cachedOptions); + + /// + /// Gets a new instance of that reuses the client configuration details provided to + /// the instance. + /// + /// + /// This method is functionally equivalent to using the constructor directly with + /// the same configuration details. + /// + /// A new . + public ChatClient GetChatClient(string model) + => new ChatClient(_cachedEndpoint, model, _cachedCredential, _cachedOptions); + + /// + /// Gets a new instance of that reuses the client configuration details provided to + /// the instance. + /// + /// + /// This method is functionally equivalent to using the constructor directly with + /// the same configuration details. + /// + /// A new . + public EmbeddingClient GetEmbeddingClient(string model) + => new EmbeddingClient(_cachedEndpoint, model, _cachedCredential, _cachedOptions); + + /// + /// Gets a new instance of that reuses the client configuration details provided to + /// the instance. + /// + /// + /// This method is functionally equivalent to using the constructor directly with + /// the same configuration details. + /// + /// A new . + public FileClient GetFileClient() + => new FileClient(_cachedEndpoint, _cachedCredential, _cachedOptions); + + /// + /// Gets a new instance of that reuses the client configuration details provided to + /// the instance. + /// + /// + /// This method is functionally equivalent to using the constructor directly with + /// the same configuration details. + /// + /// A new . + public ImageClient GetImageClient(string model) + => new ImageClient(_cachedEndpoint, model, _cachedCredential, _cachedOptions); + + /// + /// Gets a new instance of that reuses the client configuration details provided to + /// the instance. + /// + /// + /// This method is functionally equivalent to using the constructor directly with + /// the same configuration details. + /// + /// A new . + public LegacyCompletionClient GetLegacyCompletionClient() + => new LegacyCompletionClient(_cachedEndpoint, _cachedCredential, _cachedOptions); + + /// + /// Gets a new instance of that reuses the client configuration details provided to + /// the instance. + /// + /// + /// This method is functionally equivalent to using the constructor directly with + /// the same configuration details. + /// + /// A new . + public ModelManagementClient GetModelManagementClient() + => new ModelManagementClient(_cachedEndpoint, _cachedCredential, _cachedOptions); + + /// + /// Gets a new instance of that reuses the client configuration details provided to + /// the instance. + /// + /// + /// This method is functionally equivalent to using the constructor directly with + /// the same configuration details. + /// + /// A new . + public ModerationClient GetModerationClient() + => new ModerationClient(_cachedEndpoint, _cachedCredential, _cachedOptions); +} diff --git a/.dotnet/src/Custom/OpenAIClientConnector.cs b/.dotnet/src/Custom/OpenAIClientConnector.cs new file mode 100644 index 000000000..348d5f48b --- /dev/null +++ b/.dotnet/src/Custom/OpenAIClientConnector.cs @@ -0,0 +1,33 @@ +using System; +using System.ClientModel; +using System.ClientModel.Internal; + + +namespace OpenAI; + +// This internal type facilitates composition rather than inheritance for scenario clients. + +internal partial class OpenAIClientConnector +{ + private static readonly string s_OpenAIEndpointEnvironmentVariable = "OPENAI_ENDPOINT"; + private static readonly string s_OpenAIApiKeyEnvironmentVariable = "OPENAI_API_KEY"; + private static readonly string s_defaultOpenAIV1Endpoint = "https://api.openai.com/v1"; + + internal Internal.OpenAIClient InternalClient { get; } + internal string Model { get; } + internal Uri Endpoint { get; } + + internal OpenAIClientConnector( + string model, + Uri endpoint = null, + ApiKeyCredential credential = null, + OpenAIClientOptions options = null) + { + if (model is null) throw new ArgumentNullException(nameof(model)); + Model = model; + Endpoint ??= new(Environment.GetEnvironmentVariable(s_OpenAIEndpointEnvironmentVariable) ?? s_defaultOpenAIV1Endpoint); + credential ??= new(Environment.GetEnvironmentVariable(s_OpenAIApiKeyEnvironmentVariable) ?? string.Empty); + options ??= new(); + InternalClient = new(Endpoint, credential, options.InternalOptions); + } +} diff --git a/.dotnet/src/Custom/OpenAIClientOptions.cs b/.dotnet/src/Custom/OpenAIClientOptions.cs new file mode 100644 index 000000000..69eb0b2d6 --- /dev/null +++ b/.dotnet/src/Custom/OpenAIClientOptions.cs @@ -0,0 +1,32 @@ +using System.ClientModel; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Threading; + +namespace OpenAI; + +/// +/// Client-level options for the OpenAI service. +/// +public partial class OpenAIClientOptions : RequestOptions +{ + // Note: this type currently proxies RequestOptions properties manually via the matching internal type. This is a + // temporary extra step pending richer integration with code generation. + + internal Internal.OpenAIClientOptions InternalOptions { get; } + + public new void AddPolicy(PipelinePolicy policy, PipelinePosition position) + { + InternalOptions.AddPolicy(policy, position); + } + + public OpenAIClientOptions() + : this(internalOptions: null) + { } + + internal OpenAIClientOptions(Internal.OpenAIClientOptions internalOptions = null) + { + internalOptions ??= new(); + InternalOptions = internalOptions; + } +} diff --git a/.dotnet/src/Generated/OpenAIClient.cs b/.dotnet/src/Generated/OpenAIClient.cs index abe16ec58..014c7aada 100644 --- a/.dotnet/src/Generated/OpenAIClient.cs +++ b/.dotnet/src/Generated/OpenAIClient.cs @@ -51,96 +51,96 @@ public OpenAIClient(Uri endpoint, ApiKeyCredential credential, OpenAIClientOptio _endpoint = endpoint; } - private Audio _cachedAudio; - private Assistants _cachedAssistants; - private Chat _cachedChat; - private Completions _cachedCompletions; - private Embeddings _cachedEmbeddings; - private Files _cachedFiles; - private FineTuning _cachedFineTuning; - private Images _cachedImages; - private Messages _cachedMessages; - private ModelsOps _cachedModelsOps; - private Moderations _cachedModerations; - private Runs _cachedRuns; - private Threads _cachedThreads; + private OpenAI.Internal.Audio _cachedAudio; + private OpenAI.Internal.Assistants _cachedAssistants; + private OpenAI.Internal.Chat _cachedChat; + private OpenAI.Internal.Completions _cachedCompletions; + private OpenAI.Internal.Embeddings _cachedEmbeddings; + private OpenAI.Internal.Files _cachedFiles; + private OpenAI.Internal.FineTuning _cachedFineTuning; + private OpenAI.Internal.Images _cachedImages; + private OpenAI.Internal.Messages _cachedMessages; + private OpenAI.Internal.ModelsOps _cachedModelsOps; + private OpenAI.Internal.Moderations _cachedModerations; + private OpenAI.Internal.Runs _cachedRuns; + private OpenAI.Internal.Threads _cachedThreads; /// Initializes a new instance of Audio. - public virtual Audio GetAudioClient() + public virtual OpenAI.Internal.Audio GetAudioClient() { - return Volatile.Read(ref _cachedAudio) ?? Interlocked.CompareExchange(ref _cachedAudio, new Audio(_pipeline, _credential, _endpoint), null) ?? _cachedAudio; + return Volatile.Read(ref _cachedAudio) ?? Interlocked.CompareExchange(ref _cachedAudio, new OpenAI.Internal.Audio(_pipeline, _credential, _endpoint), null) ?? _cachedAudio; } /// Initializes a new instance of Assistants. - public virtual Assistants GetAssistantsClient() + public virtual OpenAI.Internal.Assistants GetAssistantsClient() { - return Volatile.Read(ref _cachedAssistants) ?? Interlocked.CompareExchange(ref _cachedAssistants, new Assistants(_pipeline, _credential, _endpoint), null) ?? _cachedAssistants; + return Volatile.Read(ref _cachedAssistants) ?? Interlocked.CompareExchange(ref _cachedAssistants, new OpenAI.Internal.Assistants(_pipeline, _credential, _endpoint), null) ?? _cachedAssistants; } /// Initializes a new instance of Chat. - public virtual Chat GetChatClient() + public virtual OpenAI.Internal.Chat GetChatClient() { - return Volatile.Read(ref _cachedChat) ?? Interlocked.CompareExchange(ref _cachedChat, new Chat(_pipeline, _credential, _endpoint), null) ?? _cachedChat; + return Volatile.Read(ref _cachedChat) ?? Interlocked.CompareExchange(ref _cachedChat, new OpenAI.Internal.Chat(_pipeline, _credential, _endpoint), null) ?? _cachedChat; } /// Initializes a new instance of Completions. - public virtual Completions GetCompletionsClient() + public virtual OpenAI.Internal.Completions GetCompletionsClient() { - return Volatile.Read(ref _cachedCompletions) ?? Interlocked.CompareExchange(ref _cachedCompletions, new Completions(_pipeline, _credential, _endpoint), null) ?? _cachedCompletions; + return Volatile.Read(ref _cachedCompletions) ?? Interlocked.CompareExchange(ref _cachedCompletions, new OpenAI.Internal.Completions(_pipeline, _credential, _endpoint), null) ?? _cachedCompletions; } /// Initializes a new instance of Embeddings. - public virtual Embeddings GetEmbeddingsClient() + public virtual OpenAI.Internal.Embeddings GetEmbeddingsClient() { - return Volatile.Read(ref _cachedEmbeddings) ?? Interlocked.CompareExchange(ref _cachedEmbeddings, new Embeddings(_pipeline, _credential, _endpoint), null) ?? _cachedEmbeddings; + return Volatile.Read(ref _cachedEmbeddings) ?? Interlocked.CompareExchange(ref _cachedEmbeddings, new OpenAI.Internal.Embeddings(_pipeline, _credential, _endpoint), null) ?? _cachedEmbeddings; } /// Initializes a new instance of Files. - public virtual Files GetFilesClient() + public virtual OpenAI.Internal.Files GetFilesClient() { - return Volatile.Read(ref _cachedFiles) ?? Interlocked.CompareExchange(ref _cachedFiles, new Files(_pipeline, _credential, _endpoint), null) ?? _cachedFiles; + return Volatile.Read(ref _cachedFiles) ?? Interlocked.CompareExchange(ref _cachedFiles, new OpenAI.Internal.Files(_pipeline, _credential, _endpoint), null) ?? _cachedFiles; } /// Initializes a new instance of FineTuning. - public virtual FineTuning GetFineTuningClient() + public virtual OpenAI.Internal.FineTuning GetFineTuningClient() { - return Volatile.Read(ref _cachedFineTuning) ?? Interlocked.CompareExchange(ref _cachedFineTuning, new FineTuning(_pipeline, _credential, _endpoint), null) ?? _cachedFineTuning; + return Volatile.Read(ref _cachedFineTuning) ?? Interlocked.CompareExchange(ref _cachedFineTuning, new OpenAI.Internal.FineTuning(_pipeline, _credential, _endpoint), null) ?? _cachedFineTuning; } /// Initializes a new instance of Images. - public virtual Images GetImagesClient() + public virtual OpenAI.Internal.Images GetImagesClient() { - return Volatile.Read(ref _cachedImages) ?? Interlocked.CompareExchange(ref _cachedImages, new Images(_pipeline, _credential, _endpoint), null) ?? _cachedImages; + return Volatile.Read(ref _cachedImages) ?? Interlocked.CompareExchange(ref _cachedImages, new OpenAI.Internal.Images(_pipeline, _credential, _endpoint), null) ?? _cachedImages; } /// Initializes a new instance of Messages. - public virtual Messages GetMessagesClient() + public virtual OpenAI.Internal.Messages GetMessagesClient() { - return Volatile.Read(ref _cachedMessages) ?? Interlocked.CompareExchange(ref _cachedMessages, new Messages(_pipeline, _credential, _endpoint), null) ?? _cachedMessages; + return Volatile.Read(ref _cachedMessages) ?? Interlocked.CompareExchange(ref _cachedMessages, new OpenAI.Internal.Messages(_pipeline, _credential, _endpoint), null) ?? _cachedMessages; } /// Initializes a new instance of ModelsOps. - public virtual ModelsOps GetModelsOpsClient() + public virtual OpenAI.Internal.ModelsOps GetModelsOpsClient() { - return Volatile.Read(ref _cachedModelsOps) ?? Interlocked.CompareExchange(ref _cachedModelsOps, new ModelsOps(_pipeline, _credential, _endpoint), null) ?? _cachedModelsOps; + return Volatile.Read(ref _cachedModelsOps) ?? Interlocked.CompareExchange(ref _cachedModelsOps, new OpenAI.Internal.ModelsOps(_pipeline, _credential, _endpoint), null) ?? _cachedModelsOps; } /// Initializes a new instance of Moderations. - public virtual Moderations GetModerationsClient() + public virtual OpenAI.Internal.Moderations GetModerationsClient() { - return Volatile.Read(ref _cachedModerations) ?? Interlocked.CompareExchange(ref _cachedModerations, new Moderations(_pipeline, _credential, _endpoint), null) ?? _cachedModerations; + return Volatile.Read(ref _cachedModerations) ?? Interlocked.CompareExchange(ref _cachedModerations, new OpenAI.Internal.Moderations(_pipeline, _credential, _endpoint), null) ?? _cachedModerations; } /// Initializes a new instance of Runs. - public virtual Runs GetRunsClient() + public virtual OpenAI.Internal.Runs GetRunsClient() { - return Volatile.Read(ref _cachedRuns) ?? Interlocked.CompareExchange(ref _cachedRuns, new Runs(_pipeline, _credential, _endpoint), null) ?? _cachedRuns; + return Volatile.Read(ref _cachedRuns) ?? Interlocked.CompareExchange(ref _cachedRuns, new OpenAI.Internal.Runs(_pipeline, _credential, _endpoint), null) ?? _cachedRuns; } /// Initializes a new instance of Threads. - public virtual Threads GetThreadsClient() + public virtual OpenAI.Internal.Threads GetThreadsClient() { - return Volatile.Read(ref _cachedThreads) ?? Interlocked.CompareExchange(ref _cachedThreads, new Threads(_pipeline, _credential, _endpoint), null) ?? _cachedThreads; + return Volatile.Read(ref _cachedThreads) ?? Interlocked.CompareExchange(ref _cachedThreads, new OpenAI.Internal.Threads(_pipeline, _credential, _endpoint), null) ?? _cachedThreads; } } } diff --git a/.dotnet/src/Generated/OpenAIModelFactory.cs b/.dotnet/src/Generated/OpenAIModelFactory.cs index ad51183cf..f01f31690 100644 --- a/.dotnet/src/Generated/OpenAIModelFactory.cs +++ b/.dotnet/src/Generated/OpenAIModelFactory.cs @@ -9,546 +9,6 @@ namespace OpenAI.Internal.Models /// Model factory for models. internal static partial class OpenAIModelFactory { - /// Initializes a new instance of . - /// One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd`. - /// The text to generate audio for. The maximum length is 4096 characters. - /// - /// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, - /// `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the - /// [Text to speech guide](/docs/guides/text-to-speech/voice-options). - /// - /// The format to audio in. Supported formats are `mp3`, `opus`, `aac`, and `flac`. - /// The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default. - /// A new instance for mocking. - public static CreateSpeechRequest CreateSpeechRequest(CreateSpeechRequestModel model = default, string input = null, CreateSpeechRequestVoice voice = default, CreateSpeechRequestResponseFormat? responseFormat = null, double? speed = null) - { - return new CreateSpeechRequest(model, input, voice, responseFormat, speed, serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// - /// The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, - /// mpeg, mpga, m4a, ogg, wav, or webm. - /// - /// ID of the model to use. Only `whisper-1` is currently available. - /// - /// The language of the input audio. Supplying the input language in - /// [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy - /// and latency. - /// - /// - /// An optional text to guide the model's style or continue a previous audio segment. The - /// [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. - /// - /// - /// The format of the transcript output, in one of these options: json, text, srt, verbose_json, or - /// vtt. - /// - /// - /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more - /// random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, - /// the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to - /// automatically increase the temperature until certain thresholds are hit. - /// - /// A new instance for mocking. - public static CreateTranscriptionRequest CreateTranscriptionRequest(BinaryData file = null, CreateTranscriptionRequestModel model = default, string language = null, string prompt = null, CreateTranscriptionRequestResponseFormat? responseFormat = null, double? temperature = null) - { - return new CreateTranscriptionRequest(file, model, language, prompt, responseFormat, temperature, serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// The transcribed text for the provided audio data. - /// The label that describes which operation type generated the accompanying response data. - /// The spoken language that was detected in the audio data. - /// The total duration of the audio processed to produce accompanying transcription information. - /// - /// A collection of information about the timing, probabilities, and other detail of each processed - /// audio segment. - /// - /// A new instance for mocking. - public static CreateTranscriptionResponse CreateTranscriptionResponse(string text = null, CreateTranscriptionResponseTask? task = null, string language = null, TimeSpan? duration = null, IEnumerable segments = null) - { - segments ??= new List(); - - return new CreateTranscriptionResponse(text, task, language, duration, segments?.ToList(), serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// The zero-based index of this segment. - /// - /// The seek position associated with the processing of this audio segment. Seek positions are - /// expressed as hundredths of seconds. The model may process several segments from a single seek - /// position, so while the seek position will never represent a later time than the segment's - /// start, the segment's start may represent a significantly later time than the segment's - /// associated seek position. - /// - /// The time at which this segment started relative to the beginning of the audio. - /// The time at which this segment ended relative to the beginning of the audio. - /// The text that was part of this audio segment. - /// The token IDs matching the text in this audio segment. - /// The temperature score associated with this audio segment. - /// The average log probability associated with this audio segment. - /// The compression ratio of this audio segment. - /// The probability of no speech detection within this audio segment. - /// A new instance for mocking. - public static AudioSegment AudioSegment(long id = default, long seek = default, TimeSpan start = default, TimeSpan end = default, string text = null, IEnumerable tokens = null, double temperature = default, double avgLogprob = default, double compressionRatio = default, double noSpeechProb = default) - { - tokens ??= new List(); - - return new AudioSegment(id, seek, start, end, text, tokens?.ToList(), temperature, avgLogprob, compressionRatio, noSpeechProb, serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// - /// The audio file object (not file name) to translate, in one of these formats: flac, mp3, mp4, - /// mpeg, mpga, m4a, ogg, wav, or webm. - /// - /// ID of the model to use. Only `whisper-1` is currently available. - /// - /// An optional text to guide the model's style or continue a previous audio segment. The - /// [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. - /// - /// - /// The format of the transcript output, in one of these options: json, text, srt, verbose_json, or - /// vtt. - /// - /// - /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more - /// random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, - /// the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to - /// automatically increase the temperature until certain thresholds are hit. - /// - /// A new instance for mocking. - public static CreateTranslationRequest CreateTranslationRequest(BinaryData file = null, CreateTranslationRequestModel model = default, string prompt = null, CreateTranslationRequestResponseFormat? responseFormat = null, double? temperature = null) - { - return new CreateTranslationRequest(file, model, prompt, responseFormat, temperature, serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// The translated text for the provided audio data. - /// The label that describes which operation type generated the accompanying response data. - /// The spoken language that was detected in the audio data. - /// The total duration of the audio processed to produce accompanying translation information. - /// - /// A collection of information about the timing, probabilities, and other detail of each processed - /// audio segment. - /// - /// A new instance for mocking. - public static CreateTranslationResponse CreateTranslationResponse(string text = null, CreateTranslationResponseTask? task = null, string language = null, TimeSpan? duration = null, IEnumerable segments = null) - { - segments ??= new List(); - - return new CreateTranslationResponse(text, task, language, duration, segments?.ToList(), serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to - /// see all of your available models, or see our [Model overview](/docs/models/overview) for - /// descriptions of them. - /// - /// The name of the assistant. The maximum length is 256 characters. - /// The description of the assistant. The maximum length is 512 characters. - /// The system instructions that the assistant uses. The maximum length is 32768 characters. - /// - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. - /// Tools can be of types `code_interpreter`, `retrieval`, or `function`. - /// - /// - /// A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a - /// maximum of 20 files attached to the assistant. Files are ordered by their creation date in - /// ascending order. - /// - /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing - /// additional information about the object in a structured format. Keys can be a maximum of 64 - /// characters long and values can be a maxium of 512 characters long. - /// - /// A new instance for mocking. - public static CreateAssistantRequest CreateAssistantRequest(string model = null, string name = null, string description = null, string instructions = null, IEnumerable tools = null, IEnumerable fileIds = null, IDictionary metadata = null) - { - tools ??= new List(); - fileIds ??= new List(); - metadata ??= new Dictionary(); - - return new CreateAssistantRequest(model, name, description, instructions, tools?.ToList(), fileIds?.ToList(), metadata, serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// The identifier, which can be referenced in API endpoints. - /// The object type, which is always `assistant`. - /// The Unix timestamp (in seconds) for when the assistant was created. - /// The name of the assistant. The maximum length is 256 characters. - /// The description of the assistant. The maximum length is 512 characters. - /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to - /// see all of your available models, or see our [Model overview](/docs/models/overview) for - /// descriptions of them. - /// - /// The system instructions that the assistant uses. The maximum length is 32768 characters. - /// - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. - /// Tools can be of types `code_interpreter`, `retrieval`, or `function`. - /// - /// - /// A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a - /// maximum of 20 files attached to the assistant. Files are ordered by their creation date in - /// ascending order. - /// - /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing - /// additional information about the object in a structured format. Keys can be a maximum of 64 - /// characters long and values can be a maxium of 512 characters long. - /// - /// A new instance for mocking. - public static AssistantObject AssistantObject(string id = null, AssistantObjectObject @object = default, DateTimeOffset createdAt = default, string name = null, string description = null, string model = null, string instructions = null, IEnumerable tools = null, IEnumerable fileIds = null, IReadOnlyDictionary metadata = null) - { - tools ??= new List(); - fileIds ??= new List(); - metadata ??= new Dictionary(); - - return new AssistantObject(id, @object, createdAt, name, description, model, instructions, tools?.ToList(), fileIds?.ToList(), metadata, serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// - /// - /// - /// - /// - /// A new instance for mocking. - public static ListAssistantsResponse ListAssistantsResponse(ListAssistantsResponseObject @object = default, IEnumerable data = null, string firstId = null, string lastId = null, bool hasMore = default) - { - data ??= new List(); - - return new ListAssistantsResponse(@object, data?.ToList(), firstId, lastId, hasMore, serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// - /// - /// - /// A new instance for mocking. - public static DeleteAssistantResponse DeleteAssistantResponse(string id = null, bool deleted = default, DeleteAssistantResponseObject @object = default) - { - return new DeleteAssistantResponse(id, deleted, @object, serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// The identifier, which can be referenced in API endpoints. - /// The object type, which is always `assistant.file`. - /// The Unix timestamp (in seconds) for when the assistant file was created. - /// The assistant ID that the file is attached to. - /// A new instance for mocking. - public static AssistantFileObject AssistantFileObject(string id = null, AssistantFileObjectObject @object = default, DateTimeOffset createdAt = default, string assistantId = null) - { - return new AssistantFileObject(id, @object, createdAt, assistantId, serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// - /// - /// - /// - /// - /// A new instance for mocking. - public static ListAssistantFilesResponse ListAssistantFilesResponse(ListAssistantFilesResponseObject @object = default, IEnumerable data = null, string firstId = null, string lastId = null, bool hasMore = default) - { - data ??= new List(); - - return new ListAssistantFilesResponse(@object, data?.ToList(), firstId, lastId, hasMore, serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// - /// - /// - /// A new instance for mocking. - public static DeleteAssistantFileResponse DeleteAssistantFileResponse(string id = null, bool deleted = default, DeleteAssistantFileResponseObject @object = default) - { - return new DeleteAssistantFileResponse(id, deleted, @object, serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// - /// A list of messages comprising the conversation so far. - /// [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). - /// - /// - /// ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) - /// table for details on which models work with the Chat API. - /// - /// - /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing - /// frequency in the text so far, decreasing the model's likelihood to repeat the same line - /// verbatim. - /// - /// [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) - /// - /// - /// Modify the likelihood of specified tokens appearing in the completion. - /// - /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an - /// associated bias value from -100 to 100. Mathematically, the bias is added to the logits - /// generated by the model prior to sampling. The exact effect will vary per model, but values - /// between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 - /// should result in a ban or exclusive selection of the relevant token. - /// - /// - /// Whether to return log probabilities of the output tokens or not. If true, returns the log - /// probabilities of each output token returned in the `content` of `message`. This option is - /// currently not available on the `gpt-4-vision-preview` model. - /// - /// - /// An integer between 0 and 5 specifying the number of most likely tokens to return at each token - /// position, each with an associated log probability. `logprobs` must be set to `true` if this - /// parameter is used. - /// - /// - /// The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. - /// - /// The total length of input tokens and generated tokens is limited by the model's context length. - /// [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - /// for counting tokens. - /// - /// - /// How many chat completion choices to generate for each input message. Note that you will be - /// charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to - /// minimize costs. - /// - /// - /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear - /// in the text so far, increasing the model's likelihood to talk about new topics. - /// - /// [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) - /// - /// - /// An object specifying the format that the model must output. Compatible with - /// [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and `gpt-3.5-turbo-1106`. - /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the - /// model generates is valid JSON. - /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON - /// yourself via a system or user message. Without this, the model may generate an unending stream - /// of whitespace until the generation reaches the token limit, resulting in a long-running and - /// seemingly "stuck" request. Also note that the message content may be partially cut off if - /// `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the - /// conversation exceeded the max context length. - /// - /// - /// This feature is in Beta. - /// - /// If specified, our system will make a best effort to sample deterministically, such that - /// repeated requests with the same `seed` and parameters should return the same result. - /// - /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response - /// parameter to monitor changes in the backend. - /// - /// Up to 4 sequences where the API will stop generating further tokens. - /// - /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only - /// [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - /// as they become available, with the stream terminated by a `data: [DONE]` message. - /// [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). - /// - /// - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output - /// more random, while lower values like 0.2 will make it more focused and deterministic. - /// - /// We generally recommend altering this or `top_p` but not both. - /// - /// - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers - /// the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising - /// the top 10% probability mass are considered. - /// - /// We generally recommend altering this or `temperature` but not both. - /// - /// - /// A list of tools the model may call. Currently, only functions are supported as a tool. Use this - /// to provide a list of functions the model may generate JSON inputs for. - /// - /// - /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect - /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). - /// - /// - /// Deprecated in favor of `tool_choice`. - /// - /// Controls which (if any) function is called by the model. `none` means the model will not call a - /// function and instead generates a message. `auto` means the model can pick between generating a - /// message or calling a function. Specifying a particular function via `{"name": "my_function"}` - /// forces the model to call that function. - /// - /// `none` is the default when no functions are present. `auto` is the default if functions are - /// present. - /// - /// - /// Deprecated in favor of `tools`. - /// - /// A list of functions the model may generate JSON inputs for. - /// - /// A new instance for mocking. - public static CreateChatCompletionRequest CreateChatCompletionRequest(IEnumerable messages = null, CreateChatCompletionRequestModel model = default, double? frequencyPenalty = null, IDictionary logitBias = null, bool? logprobs = null, long? topLogprobs = null, long? maxTokens = null, long? n = null, double? presencePenalty = null, CreateChatCompletionRequestResponseFormat responseFormat = null, long? seed = null, BinaryData stop = null, bool? stream = null, double? temperature = null, double? topP = null, IEnumerable tools = null, BinaryData toolChoice = null, string user = null, BinaryData functionCall = null, IEnumerable functions = null) - { - messages ??= new List(); - logitBias ??= new Dictionary(); - tools ??= new List(); - functions ??= new List(); - - return new CreateChatCompletionRequest(messages?.ToList(), model, frequencyPenalty, logitBias, logprobs, topLogprobs, maxTokens, n, presencePenalty, responseFormat, seed, stop, stream, temperature, topP, tools?.ToList(), toolChoice, user, functionCall, functions?.ToList(), serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// The ID of the tool call. - /// The type of the tool. Currently, only `function` is supported. - /// The function that the model called. - /// A new instance for mocking. - public static ChatCompletionMessageToolCall ChatCompletionMessageToolCall(string id = null, ChatCompletionMessageToolCallType type = default, ChatCompletionMessageToolCallFunction function = null) - { - return new ChatCompletionMessageToolCall(id, type, function, serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// The type of the tool. Currently, only `function` is supported. - /// - /// A new instance for mocking. - public static ChatCompletionTool ChatCompletionTool(ChatCompletionToolType type = default, FunctionObject function = null) - { - return new ChatCompletionTool(type, function, serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// - /// A description of what the function does, used by the model to choose when and how to call the - /// function. - /// - /// - /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and - /// dashes, with a maximum length of 64. - /// - /// - /// A new instance for mocking. - public static ChatCompletionFunctions ChatCompletionFunctions(string description = null, string name = null, FunctionParameters parameters = null) - { - return new ChatCompletionFunctions(description, name, parameters, serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// A unique identifier for the chat completion. - /// A list of chat completion choices. Can be more than one if `n` is greater than 1. - /// The Unix timestamp (in seconds) of when the chat completion was created. - /// The model used for the chat completion. - /// - /// This fingerprint represents the backend configuration that the model runs with. - /// - /// Can be used in conjunction with the `seed` request parameter to understand when backend changes - /// have been made that might impact determinism. - /// - /// The object type, which is always `chat.completion`. - /// - /// A new instance for mocking. - public static CreateChatCompletionResponse CreateChatCompletionResponse(string id = null, IEnumerable choices = null, DateTimeOffset created = default, string model = null, string systemFingerprint = null, CreateChatCompletionResponseObject @object = default, CompletionUsage usage = null) - { - choices ??= new List(); - - return new CreateChatCompletionResponse(id, choices?.ToList(), created, model, systemFingerprint, @object, usage, serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// - /// The reason the model stopped generating tokens. This will be `stop` if the model hit a - /// natural stop point or a provided stop sequence, `length` if the maximum number of tokens - /// specified in the request was reached, `content_filter` if content was omitted due to a flag - /// from our content filters, `tool_calls` if the model called a tool, or `function_call` - /// (deprecated) if the model called a function. - /// - /// The index of the choice in the list of choices. - /// - /// Log probability information for the choice. - /// A new instance for mocking. - public static CreateChatCompletionResponseChoice CreateChatCompletionResponseChoice(CreateChatCompletionResponseChoiceFinishReason finishReason = default, long index = default, ChatCompletionResponseMessage message = null, CreateChatCompletionResponseChoiceLogprobs logprobs = null) - { - return new CreateChatCompletionResponseChoice(finishReason, index, message, logprobs, serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// The contents of the message. - /// - /// The role of the author of this message. - /// Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. - /// A new instance for mocking. - public static ChatCompletionResponseMessage ChatCompletionResponseMessage(string content = null, IEnumerable toolCalls = null, ChatCompletionResponseMessageRole role = default, ChatCompletionResponseMessageFunctionCall functionCall = null) - { - toolCalls ??= new List(); - - return new ChatCompletionResponseMessage(content, toolCalls?.ToList(), role, functionCall, serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// - /// The arguments to call the function with, as generated by the model in JSON format. Note that - /// the model does not always generate valid JSON, and may hallucinate parameters not defined by - /// your function schema. Validate the arguments in your code before calling your function. - /// - /// The name of the function to call. - /// A new instance for mocking. - public static ChatCompletionResponseMessageFunctionCall ChatCompletionResponseMessageFunctionCall(string arguments = null, string name = null) - { - return new ChatCompletionResponseMessageFunctionCall(arguments, name, serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// - /// A new instance for mocking. - public static CreateChatCompletionResponseChoiceLogprobs CreateChatCompletionResponseChoiceLogprobs(IEnumerable content = null) - { - content ??= new List(); - - return new CreateChatCompletionResponseChoiceLogprobs(content?.ToList(), serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// The token. - /// The log probability of this token. - /// - /// A list of integers representing the UTF-8 bytes representation of the token. Useful in - /// instances where characters are represented by multiple tokens and their byte representations - /// must be combined to generate the correct text representation. Can be `null` if there is no - /// bytes representation for the token. - /// - /// - /// List of the most likely tokens and their log probability, at this token position. In rare - /// cases, there may be fewer than the number of requested `top_logprobs` returned. - /// - /// A new instance for mocking. - public static ChatCompletionTokenLogprob ChatCompletionTokenLogprob(string token = null, double logprob = default, IEnumerable bytes = null, IEnumerable topLogprobs = null) - { - bytes ??= new List(); - topLogprobs ??= new List(); - - return new ChatCompletionTokenLogprob(token, logprob, bytes?.ToList(), topLogprobs?.ToList(), serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// The token. - /// The log probability of this token. - /// - /// A list of integers representing the UTF-8 bytes representation of the token. Useful in - /// instances where characters are represented by multiple tokens and their byte representations - /// must be combined to generate the correct text representation. Can be `null` if there is no - /// bytes representation for the token. - /// - /// A new instance for mocking. - public static ChatCompletionTokenLogprobTopLogprob ChatCompletionTokenLogprobTopLogprob(string token = null, double logprob = default, IEnumerable bytes = null) - { - bytes ??= new List(); - - return new ChatCompletionTokenLogprobTopLogprob(token, logprob, bytes?.ToList(), serializedAdditionalRawData: null); - } - /// Initializes a new instance of . /// Number of tokens in the prompt. /// Number of tokens in the generated completion. @@ -724,118 +184,6 @@ public static CreateCompletionResponseChoiceLogprobs CreateCompletionResponseCho return new CreateCompletionResponseChoiceLogprobs(tokens?.ToList(), tokenLogprobs?.ToList(), topLogprobs?.ToList(), textOffset?.ToList(), serializedAdditionalRawData: null); } - /// Initializes a new instance of . - /// - /// Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a - /// single request, pass an array of strings or array of token arrays. Each input must not exceed - /// the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an - /// empty string. - /// [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) - /// for counting tokens. - /// - /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to - /// see all of your available models, or see our [Model overview](/docs/models/overview) for - /// descriptions of them. - /// - /// - /// The format to return the embeddings in. Can be either `float` or - /// [`base64`](https://pypi.org/project/pybase64/). - /// - /// - /// The number of dimensions the resulting output embeddings should have. Only supported in - /// `text-embedding-3` and later models. - /// - /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect - /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). - /// - /// A new instance for mocking. - public static CreateEmbeddingRequest CreateEmbeddingRequest(BinaryData input = null, CreateEmbeddingRequestModel model = default, CreateEmbeddingRequestEncodingFormat? encodingFormat = null, long? dimensions = null, string user = null) - { - return new CreateEmbeddingRequest(input, model, encodingFormat, dimensions, user, serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// The list of embeddings generated by the model. - /// The name of the model used to generate the embedding. - /// The object type, which is always "list". - /// The usage information for the request. - /// A new instance for mocking. - public static CreateEmbeddingResponse CreateEmbeddingResponse(IEnumerable data = null, string model = null, CreateEmbeddingResponseObject @object = default, EmbeddingUsage usage = null) - { - data ??= new List(); - - return new CreateEmbeddingResponse(data?.ToList(), model, @object, usage, serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// The index of the embedding in the list of embeddings. - /// - /// The embedding vector, which is a list of floats. The length of vector depends on the model as - /// listed in the [embedding guide](/docs/guides/embeddings). - /// - /// The object type, which is always "embedding". - /// A new instance for mocking. - public static Embedding Embedding(long index = default, BinaryData embeddingProperty = null, EmbeddingObject @object = default) - { - return new Embedding(index, embeddingProperty, @object, serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// The number of tokens used by the prompt. - /// The total number of tokens used by the request. - /// A new instance for mocking. - public static EmbeddingUsage EmbeddingUsage(long promptTokens = default, long totalTokens = default) - { - return new EmbeddingUsage(promptTokens, totalTokens, serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// The file identifier, which can be referenced in the API endpoints. - /// The size of the file, in bytes. - /// The Unix timestamp (in seconds) for when the file was created. - /// The name of the file. - /// The object type, which is always "file". - /// - /// The intended purpose of the file. Supported values are `fine-tune`, `fine-tune-results`, - /// `assistants`, and `assistants_output`. - /// - /// - /// Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or - /// `error`. - /// - /// - /// Deprecated. For details on why a fine-tuning training file failed validation, see the `error` - /// field on `fine_tuning.job`. - /// - /// A new instance for mocking. - public static OpenAIFile OpenAIFile(string id = null, long bytes = default, DateTimeOffset createdAt = default, string filename = null, OpenAIFileObject @object = default, OpenAIFilePurpose purpose = default, OpenAIFileStatus status = default, string statusDetails = null) - { - return new OpenAIFile(id, bytes, createdAt, filename, @object, purpose, status, statusDetails, serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// - /// - /// A new instance for mocking. - public static ListFilesResponse ListFilesResponse(IEnumerable data = null, ListFilesResponseObject @object = default) - { - data ??= new List(); - - return new ListFilesResponse(data?.ToList(), @object, serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// - /// - /// - /// A new instance for mocking. - public static DeleteFileResponse DeleteFileResponse(string id = null, DeleteFileResponseObject @object = default, bool deleted = default) - { - return new DeleteFileResponse(id, @object, deleted, serializedAdditionalRawData: null); - } - /// Initializes a new instance of . /// /// The ID of an uploaded file that contains training data. @@ -988,105 +336,6 @@ public static FineTuningJobEvent FineTuningJobEvent(string id = null, string @ob return new FineTuningJobEvent(id, @object, createdAt, level, message, serializedAdditionalRawData: null); } - /// Initializes a new instance of . - /// - /// A text description of the desired image(s). The maximum length is 1000 characters for - /// `dall-e-2` and 4000 characters for `dall-e-3`. - /// - /// The model to use for image generation. - /// - /// The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is - /// supported. - /// - /// - /// The quality of the image that will be generated. `hd` creates images with finer details and - /// greater consistency across the image. This param is only supported for `dall-e-3`. - /// - /// The format in which the generated images are returned. Must be one of `url` or `b64_json`. - /// - /// The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for - /// `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. - /// - /// - /// The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes the model - /// to lean towards generating hyper-real and dramatic images. Natural causes the model to produce - /// more natural, less hyper-real looking images. This param is only supported for `dall-e-3`. - /// - /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect - /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). - /// - /// A new instance for mocking. - public static CreateImageRequest CreateImageRequest(string prompt = null, CreateImageRequestModel? model = null, long? n = null, CreateImageRequestQuality? quality = null, CreateImageRequestResponseFormat? responseFormat = null, CreateImageRequestSize? size = null, CreateImageRequestStyle? style = null, string user = null) - { - return new CreateImageRequest(prompt, model, n, quality, responseFormat, size, style, user, serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// - /// - /// A new instance for mocking. - public static ImagesResponse ImagesResponse(DateTimeOffset created = default, IEnumerable data = null) - { - data ??= new List(); - - return new ImagesResponse(created, data?.ToList(), serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. - /// The URL of the generated image, if `response_format` is `url` (default). - /// The prompt that was used to generate the image, if there was any revision to the prompt. - /// A new instance for mocking. - public static Image Image(BinaryData b64Json = null, Uri url = null, string revisedPrompt = null) - { - return new Image(b64Json, url, revisedPrompt, serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// - /// The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not - /// provided, image must have transparency, which will be used as the mask. - /// - /// A text description of the desired image(s). The maximum length is 1000 characters. - /// - /// An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where - /// `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions - /// as `image`. - /// - /// The model to use for image generation. Only `dall-e-2` is supported at this time. - /// The number of images to generate. Must be between 1 and 10. - /// The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. - /// The format in which the generated images are returned. Must be one of `url` or `b64_json`. - /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect - /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). - /// - /// A new instance for mocking. - public static CreateImageEditRequest CreateImageEditRequest(BinaryData image = null, string prompt = null, BinaryData mask = null, CreateImageEditRequestModel? model = null, long? n = null, CreateImageEditRequestSize? size = null, CreateImageEditRequestResponseFormat? responseFormat = null, string user = null) - { - return new CreateImageEditRequest(image, prompt, mask, model, n, size, responseFormat, user, serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// - /// The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, - /// and square. - /// - /// The model to use for image generation. Only `dall-e-2` is supported at this time. - /// The number of images to generate. Must be between 1 and 10. - /// The format in which the generated images are returned. Must be one of `url` or `b64_json`. - /// The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. - /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect - /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). - /// - /// A new instance for mocking. - public static CreateImageVariationRequest CreateImageVariationRequest(BinaryData image = null, CreateImageVariationRequestModel? model = null, long? n = null, CreateImageVariationRequestResponseFormat? responseFormat = null, CreateImageVariationRequestSize? size = null, string user = null) - { - return new CreateImageVariationRequest(image, model, n, responseFormat, size, user, serializedAdditionalRawData: null); - } - /// Initializes a new instance of . /// The role of the entity that is creating the message. Currently only `user` is supported. /// The content of the message. @@ -1215,99 +464,6 @@ public static DeleteModelResponse DeleteModelResponse(string id = null, bool del return new DeleteModelResponse(id, deleted, @object, serializedAdditionalRawData: null); } - /// Initializes a new instance of . - /// The input text to classify. - /// - /// Two content moderations models are available: `text-moderation-stable` and - /// `text-moderation-latest`. The default is `text-moderation-latest` which will be automatically - /// upgraded over time. This ensures you are always using our most accurate model. If you use - /// `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy - /// of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. - /// - /// A new instance for mocking. - public static CreateModerationRequest CreateModerationRequest(BinaryData input = null, CreateModerationRequestModel? model = null) - { - return new CreateModerationRequest(input, model, serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// The unique identifier for the moderation request. - /// The model used to generate the moderation results. - /// A list of moderation objects. - /// A new instance for mocking. - public static CreateModerationResponse CreateModerationResponse(string id = null, string model = null, IEnumerable results = null) - { - results ??= new List(); - - return new CreateModerationResponse(id, model, results?.ToList(), serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// Whether the content violates [OpenAI's usage policies](/policies/usage-policies). - /// A list of the categories, and whether they are flagged or not. - /// A list of the categories along with their scores as predicted by model. - /// A new instance for mocking. - public static CreateModerationResponseResult CreateModerationResponseResult(bool flagged = default, CreateModerationResponseResultCategories categories = null, CreateModerationResponseResultCategoryScores categoryScores = null) - { - return new CreateModerationResponseResult(flagged, categories, categoryScores, serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// - /// Content that expresses, incites, or promotes hate based on race, gender, ethnicity, - /// religion, nationality, sexual orientation, disability status, or caste. Hateful content - /// aimed at non-protected groups (e.g., chess players) is harrassment. - /// - /// - /// Hateful content that also includes violence or serious harm towards the targeted group - /// based on race, gender, ethnicity, religion, nationality, sexual orientation, disability - /// status, or caste. - /// - /// Content that expresses, incites, or promotes harassing language towards any target. - /// Harassment content that also includes violence or serious harm towards any target. - /// - /// Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, - /// and eating disorders. - /// - /// - /// Content where the speaker expresses that they are engaging or intend to engage in acts of - /// self-harm, such as suicide, cutting, and eating disorders. - /// - /// - /// Content that encourages performing acts of self-harm, such as suicide, cutting, and eating - /// disorders, or that gives instructions or advice on how to commit such acts. - /// - /// - /// Content meant to arouse sexual excitement, such as the description of sexual activity, or - /// that promotes sexual services (excluding sex education and wellness). - /// - /// Sexual content that includes an individual who is under 18 years old. - /// Content that depicts death, violence, or physical injury. - /// Content that depicts death, violence, or physical injury in graphic detail. - /// A new instance for mocking. - public static CreateModerationResponseResultCategories CreateModerationResponseResultCategories(bool hate = default, bool hateThreatening = default, bool harassment = default, bool harassmentThreatening = default, bool selfHarm = default, bool selfHarmIntent = default, bool selfHarmInstructions = default, bool sexual = default, bool sexualMinors = default, bool violence = default, bool violenceGraphic = default) - { - return new CreateModerationResponseResultCategories(hate, hateThreatening, harassment, harassmentThreatening, selfHarm, selfHarmIntent, selfHarmInstructions, sexual, sexualMinors, violence, violenceGraphic, serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// The score for the category 'hate'. - /// The score for the category 'hate/threatening'. - /// The score for the category 'harassment'. - /// The score for the category 'harassment/threatening'. - /// The score for the category 'self-harm'. - /// The score for the category 'self-harm/intent'. - /// The score for the category 'self-harm/instructive'. - /// The score for the category 'sexual'. - /// The score for the category 'sexual/minors'. - /// The score for the category 'violence'. - /// The score for the category 'violence/graphic'. - /// A new instance for mocking. - public static CreateModerationResponseResultCategoryScores CreateModerationResponseResultCategoryScores(double hate = default, double hateThreatening = default, double harassment = default, double harassmentThreatening = default, double selfHarm = default, double selfHarmIntent = default, double selfHarmInstructions = default, double sexual = default, double sexualMinors = default, double violence = default, double violenceGraphic = default) - { - return new CreateModerationResponseResultCategoryScores(hate, hateThreatening, harassment, harassmentThreatening, selfHarm, selfHarmIntent, selfHarmInstructions, sexual, sexualMinors, violence, violenceGraphic, serializedAdditionalRawData: null); - } - /// Initializes a new instance of . /// The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. /// If no thread is provided, an empty thread will be created. diff --git a/.dotnet/src/OpenAI.csproj b/.dotnet/src/OpenAI.csproj index e03a4dc1e..b473b4e63 100644 --- a/.dotnet/src/OpenAI.csproj +++ b/.dotnet/src/OpenAI.csproj @@ -11,6 +11,6 @@ - + diff --git a/.dotnet/src/Polyfill/System.Diagnostics.CodeAnalysis.SetsRequiredMembersAttribute.cs b/.dotnet/src/Polyfill/System.Diagnostics.CodeAnalysis.SetsRequiredMembersAttribute.cs new file mode 100644 index 000000000..26cd00bce --- /dev/null +++ b/.dotnet/src/Polyfill/System.Diagnostics.CodeAnalysis.SetsRequiredMembersAttribute.cs @@ -0,0 +1,8 @@ +#if !NET7_0_OR_GREATER + +namespace System.Diagnostics.CodeAnalysis; + +[AttributeUsage(AttributeTargets.Constructor, AllowMultiple = false, Inherited = false)] +internal sealed class SetsRequiredMembersAttribute : Attribute { } + +#endif // !NET7_0_OR_GREATER diff --git a/.dotnet/src/Polyfill/System.Runtime.CompilerServices.CompilerFeatureRequiredAttribute.cs b/.dotnet/src/Polyfill/System.Runtime.CompilerServices.CompilerFeatureRequiredAttribute.cs new file mode 100644 index 000000000..1b9abe47c --- /dev/null +++ b/.dotnet/src/Polyfill/System.Runtime.CompilerServices.CompilerFeatureRequiredAttribute.cs @@ -0,0 +1,15 @@ +#if !NET7_0_OR_GREATER + +namespace System.Runtime.CompilerServices; + +[AttributeUsage(AttributeTargets.All, AllowMultiple = true, Inherited = false)] +internal sealed class CompilerFeatureRequiredAttribute(string featureName) : Attribute +{ + public string FeatureName { get; } = featureName; + public bool IsOptional { get; init; } + + public const string RefStructs = nameof(RefStructs); + public const string RequiredMembers = nameof(RequiredMembers); +} + +#endif // !NET7_0_OR_GREATER diff --git a/.dotnet/src/Polyfill/System.Runtime.CompilerServices.IsExternalInit.cs b/.dotnet/src/Polyfill/System.Runtime.CompilerServices.IsExternalInit.cs new file mode 100644 index 000000000..f4b6d744f --- /dev/null +++ b/.dotnet/src/Polyfill/System.Runtime.CompilerServices.IsExternalInit.cs @@ -0,0 +1,9 @@ +#if !NET5_0_OR_GREATER + +using System.ComponentModel; +namespace System.Runtime.CompilerServices; + +[EditorBrowsable(EditorBrowsableState.Never)] +internal static class IsExternalInit { } + +#endif // !NET5_0_OR_GREATER diff --git a/.dotnet/src/Polyfill/System.Runtime.CompilerServices.RequiredMemberAttribute.cs b/.dotnet/src/Polyfill/System.Runtime.CompilerServices.RequiredMemberAttribute.cs new file mode 100644 index 000000000..216d76910 --- /dev/null +++ b/.dotnet/src/Polyfill/System.Runtime.CompilerServices.RequiredMemberAttribute.cs @@ -0,0 +1,8 @@ +#if !NET7_0_OR_GREATER + +namespace System.Runtime.CompilerServices; + +[AttributeUsage(AttributeTargets.Class | AttributeTargets.Struct | AttributeTargets.Field | AttributeTargets.Property, AllowMultiple = false, Inherited = false)] +internal sealed class RequiredMemberAttribute : Attribute { } + +#endif // !NET7_0_OR_GREATER diff --git a/.dotnet/src/Utility/GenericActionPipelinePolicy.cs b/.dotnet/src/Utility/GenericActionPipelinePolicy.cs new file mode 100644 index 000000000..41ccef5ec --- /dev/null +++ b/.dotnet/src/Utility/GenericActionPipelinePolicy.cs @@ -0,0 +1,35 @@ +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Threading.Tasks; + +namespace OpenAI; + +internal partial class GenericActionPipelinePolicy : PipelinePolicy +{ + private Action _processMessageAction; + + public GenericActionPipelinePolicy(Action processMessageAction) + { + _processMessageAction = processMessageAction; + } + + public override void Process(PipelineMessage message, IReadOnlyList pipeline, int currentIndex) + { + _processMessageAction(message); + if (currentIndex < pipeline.Count - 1) + { + pipeline[currentIndex + 1].Process(message, pipeline, currentIndex + 1); + } + } + + public override async ValueTask ProcessAsync(PipelineMessage message, IReadOnlyList pipeline, int currentIndex) + { + _processMessageAction(message); + if (currentIndex < pipeline.Count - 1) + { + await pipeline[currentIndex + 1].ProcessAsync(message, pipeline, currentIndex + 1); + } + } +} \ No newline at end of file diff --git a/.dotnet/src/Utility/SseAsyncEnumerator.cs b/.dotnet/src/Utility/SseAsyncEnumerator.cs new file mode 100644 index 000000000..743a1bedd --- /dev/null +++ b/.dotnet/src/Utility/SseAsyncEnumerator.cs @@ -0,0 +1,59 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Runtime.CompilerServices; +using System.Text.Json; +using System.Threading; + +namespace OpenAI; + +internal static class SseAsyncEnumerator +{ + internal static async IAsyncEnumerable EnumerateFromSseStream( + Stream stream, + Func> multiElementDeserializer, + [EnumeratorCancellation] CancellationToken cancellationToken = default) + { + try + { + using SseReader sseReader = new(stream); + while (!cancellationToken.IsCancellationRequested) + { + SseLine? sseEvent = await sseReader.TryReadSingleFieldEventAsync().ConfigureAwait(false); + if (sseEvent is not null) + { + ReadOnlyMemory name = sseEvent.Value.FieldName; + if (!name.Span.SequenceEqual("data".AsSpan())) + { + throw new InvalidDataException(); + } + ReadOnlyMemory value = sseEvent.Value.FieldValue; + if (value.Span.SequenceEqual("[DONE]".AsSpan())) + { + break; + } + using JsonDocument sseMessageJson = JsonDocument.Parse(value); + IEnumerable newItems = multiElementDeserializer.Invoke(sseMessageJson.RootElement); + foreach (T item in newItems) + { + yield return item; + } + } + } + } + finally + { + // Always dispose the stream immediately once enumeration is complete for any reason + stream.Dispose(); + } + } + + internal static IAsyncEnumerable EnumerateFromSseStream( + Stream stream, + Func elementDeserializer, + CancellationToken cancellationToken = default) + => EnumerateFromSseStream( + stream, + (element) => new T[] { elementDeserializer.Invoke(element) }, + cancellationToken); +} \ No newline at end of file diff --git a/.dotnet/src/Utility/SseLine.cs b/.dotnet/src/Utility/SseLine.cs new file mode 100644 index 000000000..4d82315f9 --- /dev/null +++ b/.dotnet/src/Utility/SseLine.cs @@ -0,0 +1,29 @@ +using System; + +namespace OpenAI; + +// SSE specification: https://html.spec.whatwg.org/multipage/server-sent-events.html#parsing-an-event-stream +internal readonly struct SseLine +{ + private readonly string _original; + private readonly int _colonIndex; + private readonly int _valueIndex; + + public static SseLine Empty { get; } = new SseLine(string.Empty, 0, false); + + internal SseLine(string original, int colonIndex, bool hasSpaceAfterColon) + { + _original = original; + _colonIndex = colonIndex; + _valueIndex = colonIndex + (hasSpaceAfterColon ? 2 : 1); + } + + public bool IsEmpty => _original.Length == 0; + public bool IsComment => !IsEmpty && _original[0] == ':'; + + // TODO: we should not expose UTF16 publicly + public ReadOnlyMemory FieldName => _original.AsMemory(0, _colonIndex); + public ReadOnlyMemory FieldValue => _original.AsMemory(_valueIndex); + + public override string ToString() => _original; +} \ No newline at end of file diff --git a/.dotnet/src/Utility/SseReader.cs b/.dotnet/src/Utility/SseReader.cs new file mode 100644 index 000000000..cf0301408 --- /dev/null +++ b/.dotnet/src/Utility/SseReader.cs @@ -0,0 +1,118 @@ +using System; +using System.ClientModel; +using System.ClientModel.Internal; +using System.IO; +using System.Threading.Tasks; + +namespace OpenAI; + +internal sealed class SseReader : IDisposable + { + private readonly Stream _stream; + private readonly StreamReader _reader; + private bool _disposedValue; + + public SseReader(Stream stream) + { + _stream = stream; + _reader = new StreamReader(stream); + } + + public SseLine? TryReadSingleFieldEvent() + { + while (true) + { + SseLine? line = TryReadLine(); + if (line == null) + return null; + if (line.Value.IsEmpty) + throw new InvalidDataException("event expected."); + SseLine? empty = TryReadLine(); + if (empty != null && !empty.Value.IsEmpty) + throw new NotSupportedException("Multi-filed events not supported."); + if (!line.Value.IsComment) + return line; // skip comment lines + } + } + + // TODO: we should support cancellation tokens, but StreamReader does not in NS2 + public async Task TryReadSingleFieldEventAsync() + { + while (true) + { + SseLine? line = await TryReadLineAsync().ConfigureAwait(false); + if (line == null) + return null; + if (line.Value.IsEmpty) + throw new InvalidDataException("event expected."); + SseLine? empty = await TryReadLineAsync().ConfigureAwait(false); + if (empty != null && !empty.Value.IsEmpty) + throw new NotSupportedException("Multi-filed events not supported."); + if (!line.Value.IsComment) + return line; // skip comment lines + } + } + + public SseLine? TryReadLine() + { + string lineText = _reader.ReadLine(); + if (lineText == null) + return null; + if (lineText.Length == 0) + return SseLine.Empty; + if (TryParseLine(lineText, out SseLine line)) + return line; + return null; + } + + // TODO: we should support cancellation tokens, but StreamReader does not in NS2 + public async Task TryReadLineAsync() + { + string lineText = await _reader.ReadLineAsync().ConfigureAwait(false); + if (lineText == null) + return null; + if (lineText.Length == 0) + return SseLine.Empty; + if (TryParseLine(lineText, out SseLine line)) + return line; + return null; + } + + private static bool TryParseLine(string lineText, out SseLine line) + { + if (lineText.Length == 0) + { + line = default; + return false; + } + + ReadOnlySpan lineSpan = lineText.AsSpan(); + int colonIndex = lineSpan.IndexOf(':'); + ReadOnlySpan fieldValue = lineSpan.Slice(colonIndex + 1); + + bool hasSpace = false; + if (fieldValue.Length > 0 && fieldValue[0] == ' ') + hasSpace = true; + line = new SseLine(lineText, colonIndex, hasSpace); + return true; + } + + private void Dispose(bool disposing) + { + if (!_disposedValue) + { + if (disposing) + { + _reader.Dispose(); + _stream.Dispose(); + } + + _disposedValue = true; + } + } + public void Dispose() + { + Dispose(disposing: true); + GC.SuppressFinalize(this); + } + } \ No newline at end of file diff --git a/.dotnet/src/Utility/StreamingResult.cs b/.dotnet/src/Utility/StreamingResult.cs new file mode 100644 index 000000000..a1b6ff538 --- /dev/null +++ b/.dotnet/src/Utility/StreamingResult.cs @@ -0,0 +1,95 @@ +using System.ClientModel; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Threading; +using System.Collections.Generic; +using System; + +namespace OpenAI; + +/// +/// Represents an operation response with streaming content that can be deserialized and enumerated while the response +/// is still being received. +/// +/// The data type representative of distinct, streamable items. +public class StreamingClientResult + : IDisposable + , IAsyncEnumerable +{ + private ClientResult _rawResult { get; } + private IAsyncEnumerable _asyncEnumerableSource { get; } + private bool _disposedValue { get; set; } + + private StreamingClientResult() { } + + private StreamingClientResult( + ClientResult rawResult, + Func> asyncEnumerableProcessor) + { + _rawResult = rawResult; + _asyncEnumerableSource = asyncEnumerableProcessor.Invoke(rawResult); + } + + /// + /// Creates a new instance of using the provided underlying HTTP response. The + /// provided function will be used to resolve the response into an asynchronous enumeration of streamed response + /// items. + /// + /// The HTTP response. + /// + /// The function that will resolve the provided response into an IAsyncEnumerable. + /// + /// + /// A new instance of that will be capable of asynchronous enumeration of + /// items from the HTTP response. + /// + internal static StreamingClientResult CreateFromResponse( + ClientResult result, + Func> asyncEnumerableProcessor) + { + return new(result, asyncEnumerableProcessor); + } + + /// + /// Gets the underlying instance that this may enumerate + /// over. + /// + /// The instance attached to this . + public PipelineResponse GetRawResponse() => _rawResult.GetRawResponse(); + + /// + /// Gets the asynchronously enumerable collection of distinct, streamable items in the response. + /// + /// + /// The return value of this method may be used with the "await foreach" statement. + /// + /// As explicitly implements , callers may + /// enumerate a instance directly instead of calling this method. + /// + /// + /// + public IAsyncEnumerable EnumerateValues() => this; + + /// + public void Dispose() + { + Dispose(disposing: true); + GC.SuppressFinalize(this); + } + + /// + protected virtual void Dispose(bool disposing) + { + if (!_disposedValue) + { + if (disposing) + { + _rawResult?.GetRawResponse()?.Dispose(); + } + _disposedValue = true; + } + } + + IAsyncEnumerator IAsyncEnumerable.GetAsyncEnumerator(CancellationToken cancellationToken) + => _asyncEnumerableSource.GetAsyncEnumerator(cancellationToken); +} \ No newline at end of file diff --git a/.dotnet/src/Utility/System.ClientModel.MultipartContent.cs b/.dotnet/src/Utility/System.ClientModel.MultipartContent.cs new file mode 100644 index 000000000..6342d7552 --- /dev/null +++ b/.dotnet/src/Utility/System.ClientModel.MultipartContent.cs @@ -0,0 +1,367 @@ + + +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.IO; +using System.Text; +using System.Threading; +using System.Threading.Tasks; + +namespace System.ClientModel; + +// Placeholder implementation adapted from: +// https://github.com/Azure/azure-sdk-for-net/blob/main/sdk/core/Azure.Core/src/Shared/Multipart/MultipartContent.cs + +internal partial class MultipartContent : BinaryContent +{ + #region Fields + + private const string CrLf = "\r\n"; + private const string ColonSP = ": "; + + private static readonly int s_crlfLength = GetEncodedLength(CrLf); + private static readonly int s_dashDashLength = GetEncodedLength("--"); + private static readonly int s_colonSpaceLength = GetEncodedLength(ColonSP); + + private readonly List _nestedContent; + private readonly string _subtype; + private readonly string _boundary; + internal readonly Dictionary _headers; + + #endregion Fields + + #region Construction + + public MultipartContent() + : this("mixed", GetDefaultBoundary()) + { } + + public MultipartContent(string subtype) + : this(subtype, GetDefaultBoundary()) + { } + + /// + /// Initializes a new instance of the class. + /// + /// The multipart sub type. + /// The boundary string for the multipart form data content. + public MultipartContent(string subtype, string boundary) + { + ValidateBoundary(boundary); + _subtype = subtype; + + // see https://www.ietf.org/rfc/rfc1521.txt page 29. + _boundary = boundary.Contains(":") ? $"\"{boundary}\"" : boundary; + _headers = new Dictionary + { + ["content-type"] = $"multipart/{_subtype}; boundary={_boundary}" + }; + + _nestedContent = new List(); + } + + private static void ValidateBoundary(string boundary) + { + // NameValueHeaderValue is too restrictive for boundary. + // Instead validate it ourselves and then quote it. + if (string.IsNullOrWhiteSpace(boundary)) throw new ArgumentException(nameof(boundary)); + + // cspell:disable + // RFC 2046 Section 5.1.1 + // boundary := 0*69 bcharsnospace + // bchars := bcharsnospace / " " + // bcharsnospace := DIGIT / ALPHA / "'" / "(" / ")" / "+" / "_" / "," / "-" / "." / "/" / ":" / "=" / "?" + // cspell:enable + if (boundary.Length > 70) + { + throw new ArgumentOutOfRangeException(nameof(boundary), boundary, $"The field cannot be longer than {70} characters."); + } + // Cannot end with space. + if (boundary.EndsWith(" ", StringComparison.InvariantCultureIgnoreCase)) + { + throw new ArgumentException($"The format of value '{boundary}' is invalid.", nameof(boundary)); + } + + const string AllowedMarks = @"'()+_,-./:=? "; + + foreach (char ch in boundary) + { + if (('0' <= ch && ch <= '9') || // Digit. + ('a' <= ch && ch <= 'z') || // alpha. + ('A' <= ch && ch <= 'Z') || // ALPHA. + AllowedMarks.Contains(char.ToString(ch))) // Marks. + { + // Valid. + } + else + { + throw new ArgumentException($"The format of value '{boundary}' is invalid.", nameof(boundary)); + } + } + } + + private static string GetDefaultBoundary() + { + return Guid.NewGuid().ToString(); + } + + /// + /// Add content type header to the request. + /// + /// The request. + public void ApplyToRequest(PipelineRequest request) + { + request.Headers.Set("content-type", $"multipart/{_subtype}; boundary={_boundary}"); + request.Content = this; + } + + /// + /// Add HTTP content to a collection of RequestContent objects that + /// get serialized to multipart/form-data MIME type. + /// + /// The Request content to add to the collection. + public virtual void Add(BinaryContent content) + { + if (content is null) throw new ArgumentNullException(nameof(content)); + AddInternal(content, null); + } + + /// + /// Add HTTP content to a collection of RequestContent objects that + /// get serialized to multipart/form-data MIME type. + /// + /// The Request content to add to the collection. + /// The headers to add to the collection. + public virtual void Add(BinaryContent content, Dictionary headers) + { + if (content is null) throw new ArgumentNullException(nameof(content)); + if (headers is null) throw new ArgumentNullException(nameof(headers)); + + AddInternal(content, headers); + } + + private void AddInternal(BinaryContent content, Dictionary headers) + { + headers ??= []; + _nestedContent.Add(new MultipartRequestContent(content, headers)); + } + + #endregion Construction + + #region Dispose + + /// + /// Frees resources held by the object. + /// + public override void Dispose() + { + foreach (MultipartRequestContent content in _nestedContent) + { + content.RequestContent.Dispose(); + } + _nestedContent.Clear(); + } + + #endregion Dispose + + #region Serialization + + // for-each content + // write "--" + boundary + // for-each content header + // write header: header-value + // write content.WriteTo[Async] + // write "--" + boundary + "--" + // Can't be canceled directly by the user. If the overall request is canceled + // then the stream will be closed an exception thrown. + /// + /// + /// + /// + /// + /// + public override void WriteTo(Stream stream, CancellationToken cancellationToken) + { + if (stream is null) throw new ArgumentNullException(nameof(stream)); + + try + { + // Write start boundary. + EncodeStringToStream(stream, "--" + _boundary + CrLf); + + // Write each nested content. + var output = new StringBuilder(); + for (int contentIndex = 0; contentIndex < _nestedContent.Count; contentIndex++) + { + // Write divider, headers, and content. + BinaryContent content = _nestedContent[contentIndex].RequestContent; + Dictionary headers = _nestedContent[contentIndex].Headers; + EncodeStringToStream(stream, SerializeHeadersToString(output, contentIndex, headers)); + content.WriteTo(stream, cancellationToken); + } + + // Write footer boundary. + EncodeStringToStream(stream, CrLf + "--" + _boundary + "--" + CrLf); + } + catch (Exception) + { + throw; + } + } + + // for-each content + // write "--" + boundary + // for-each content header + // write header: header-value + // write content.WriteTo[Async] + // write "--" + boundary + "--" + // Can't be canceled directly by the user. If the overall request is canceled + // then the stream will be closed an exception thrown. + /// + /// + /// + /// + /// + /// + public override Task WriteToAsync(Stream stream, CancellationToken cancellation) => + SerializeToStreamAsync(stream, cancellation); + + private async Task SerializeToStreamAsync(Stream stream, CancellationToken cancellationToken) + { + if (stream is null) throw new ArgumentNullException(nameof(stream)); + try + { + // Write start boundary. + await EncodeStringToStreamAsync(stream, "--" + _boundary + CrLf, cancellationToken).ConfigureAwait(false); + + // Write each nested content. + var output = new StringBuilder(); + for (int contentIndex = 0; contentIndex < _nestedContent.Count; contentIndex++) + { + // Write divider, headers, and content. + BinaryContent content = _nestedContent[contentIndex].RequestContent; + Dictionary headers = _nestedContent[contentIndex].Headers; + await EncodeStringToStreamAsync(stream, SerializeHeadersToString(output, contentIndex, headers), cancellationToken).ConfigureAwait(false); + await content.WriteToAsync(stream, cancellationToken).ConfigureAwait(false); + } + + // Write footer boundary. + await EncodeStringToStreamAsync(stream, CrLf + "--" + _boundary + "--" + CrLf, cancellationToken).ConfigureAwait(false); + } + catch (Exception) + { + throw; + } + } + + private string SerializeHeadersToString(StringBuilder scratch, int contentIndex, Dictionary headers) + { + scratch.Clear(); + + // Add divider. + if (contentIndex != 0) // Write divider for all but the first content. + { + scratch.Append(CrLf + "--"); // const strings + scratch.Append(_boundary); + scratch.Append(CrLf); + } + + // Add headers. + foreach (KeyValuePair header in headers) + { + scratch.Append(header.Key); + scratch.Append(": "); + scratch.Append(header.Value); + scratch.Append(CrLf); + } + + // Extra CRLF to end headers (even if there are no headers). + scratch.Append(CrLf); + + return scratch.ToString(); + } + + private static void EncodeStringToStream(Stream stream, string input) + { + byte[] buffer = Encoding.Default.GetBytes(input); + stream.Write(buffer, 0, buffer.Length); + } + + private static Task EncodeStringToStreamAsync(Stream stream, string input, CancellationToken cancellationToken) + { + byte[] buffer = Encoding.Default.GetBytes(input); + return stream.WriteAsync(buffer, 0, buffer.Length, cancellationToken); + } + + /// + /// Attempts to compute the length of the underlying content, if available. + /// + /// The length of the underlying data. + public override bool TryComputeLength(out long length) + { + int boundaryLength = GetEncodedLength(_boundary); + + long currentLength = 0; + long internalBoundaryLength = s_crlfLength + s_dashDashLength + boundaryLength + s_crlfLength; + + // Start Boundary. + currentLength += s_dashDashLength + boundaryLength + s_crlfLength; + + bool first = true; + foreach (MultipartRequestContent content in _nestedContent) + { + if (first) + { + first = false; // First boundary already written. + } + else + { + // Internal Boundary. + currentLength += internalBoundaryLength; + } + + // Headers. + foreach (KeyValuePair headerPair in content.Headers) + { + currentLength += GetEncodedLength(headerPair.Key) + s_colonSpaceLength; + currentLength += GetEncodedLength(headerPair.Value); + currentLength += s_crlfLength; + } + + currentLength += s_crlfLength; + + // Content. + if (!content.RequestContent.TryComputeLength(out long tempContentLength)) + { + length = 0; + return false; + } + currentLength += tempContentLength; + } + + // Terminating boundary. + currentLength += s_crlfLength + s_dashDashLength + boundaryLength + s_dashDashLength + s_crlfLength; + + length = currentLength; + return true; + } + + private static int GetEncodedLength(string input) + { + return Encoding.Default.GetByteCount(input); + } + + #endregion Serialization + + private class MultipartRequestContent + { + public readonly BinaryContent RequestContent; + public Dictionary Headers; + + public MultipartRequestContent(BinaryContent content, Dictionary headers) + { + RequestContent = content; + Headers = headers; + } + } +} \ No newline at end of file diff --git a/.dotnet/src/Utility/System.ClientModel.MultipartFormDataContent.cs b/.dotnet/src/Utility/System.ClientModel.MultipartFormDataContent.cs new file mode 100644 index 000000000..9faceac1c --- /dev/null +++ b/.dotnet/src/Utility/System.ClientModel.MultipartFormDataContent.cs @@ -0,0 +1,117 @@ +using System.Collections.Generic; + +namespace System.ClientModel; + +// Placeholder implementation adapted from: +// https://github.com/Azure/azure-sdk-for-net/blob/main/sdk/core/Azure.Core/src/Shared/Multipart/MultipartFormDataContent.cs + +internal partial class MultipartFormDataContent : MultipartContent +{ + // Copyright (c) Microsoft Corporation. All rights reserved. + // Licensed under the MIT License. + +#nullable disable + + #region Fields + + private const string FormData = "form-data"; + + #endregion Fields + + #region Construction + + /// + /// Initializes a new instance of the class. + /// + public MultipartFormDataContent() : base(FormData) + { } + + /// + /// Initializes a new instance of the class. + /// + /// The boundary string for the multipart form data content. + public MultipartFormDataContent(string boundary) : base(FormData, boundary) + { } + + #endregion Construction + + /// + /// Add HTTP content to a collection of RequestContent objects that + /// get serialized to multipart/form-data MIME type. + /// + /// The Request content to add to the collection. + public override void Add(BinaryContent content) + { + if (content is null) throw new ArgumentNullException(nameof(content)); + AddInternal(content, null, null, null); + } + + /// + /// Add HTTP content to a collection of RequestContent objects that + /// get serialized to multipart/form-data MIME type. + /// + /// The Request content to add to the collection. + /// The headers to add to the collection. + public override void Add(BinaryContent content, Dictionary headers) + { + if (content is null) throw new ArgumentNullException(nameof(content)); + if (headers is null) throw new ArgumentNullException(nameof(headers)); + + AddInternal(content, headers, null, null); + } + + /// + /// Add HTTP content to a collection of RequestContent objects that + /// get serialized to multipart/form-data MIME type. + /// + /// The Request content to add to the collection. + /// The name for the request content to add. + /// The headers to add to the collection. + public void Add(BinaryContent content, string name, Dictionary headers) + { + if (content is null) throw new ArgumentNullException(nameof(content)); + if (string.IsNullOrWhiteSpace(name)) throw new ArgumentException(nameof(name)); + + AddInternal(content, headers, name, null); + } + + /// + /// Add HTTP content to a collection of RequestContent objects that + /// get serialized to multipart/form-data MIME type. + /// + /// The Request content to add to the collection. + /// The name for the request content to add. + /// The file name for the request content to add to the collection. + /// The headers to add to the collection. + public void Add(BinaryContent content, string name, string fileName, Dictionary headers) + { + if (content is null) throw new ArgumentNullException(nameof(content)); + if (string.IsNullOrWhiteSpace(name)) throw new ArgumentException(nameof(name)); + if (string.IsNullOrWhiteSpace(fileName)) throw new ArgumentException(nameof(fileName)); + + AddInternal(content, headers, name, fileName); + } + + private void AddInternal(BinaryContent content, Dictionary headers, string name, string fileName) + { + headers ??= []; + + if (!headers.ContainsKey("Content-Disposition")) + { + var value = FormData; + + if (name != null) + { + value = value + "; name=" + name; + } + if (fileName != null) + { + value = value + "; filename=" + fileName; + } + + headers.Add("Content-Disposition", value); + } + + base.Add(content, headers); + } +} \ No newline at end of file diff --git a/.dotnet/tests/Generated/Tests/AssistantsTests.cs b/.dotnet/tests/Generated/Tests/AssistantsTests.cs deleted file mode 100644 index dde077712..000000000 --- a/.dotnet/tests/Generated/Tests/AssistantsTests.cs +++ /dev/null @@ -1,22 +0,0 @@ -// - -#nullable disable - -using System; -using System.ClientModel; -using NUnit.Framework; -using OpenAI; - -namespace OpenAI.Tests -{ - public partial class AssistantsTests - { - [Test] - public void SmokeTest() - { - ApiKeyCredential credential = new ApiKeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); - Assistants client = new OpenAIClient(credential).GetAssistantsClient(); - Assert.IsNotNull(client); - } - } -} diff --git a/.dotnet/tests/Generated/Tests/AudioTests.cs b/.dotnet/tests/Generated/Tests/AudioTests.cs deleted file mode 100644 index f14a6f69d..000000000 --- a/.dotnet/tests/Generated/Tests/AudioTests.cs +++ /dev/null @@ -1,22 +0,0 @@ -// - -#nullable disable - -using System; -using System.ClientModel; -using NUnit.Framework; -using OpenAI; - -namespace OpenAI.Tests -{ - public partial class AudioTests - { - [Test] - public void SmokeTest() - { - ApiKeyCredential credential = new ApiKeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); - Audio client = new OpenAIClient(credential).GetAudioClient(); - Assert.IsNotNull(client); - } - } -} diff --git a/.dotnet/tests/Generated/Tests/ChatTests.cs b/.dotnet/tests/Generated/Tests/ChatTests.cs deleted file mode 100644 index b1ac76e65..000000000 --- a/.dotnet/tests/Generated/Tests/ChatTests.cs +++ /dev/null @@ -1,22 +0,0 @@ -// - -#nullable disable - -using System; -using System.ClientModel; -using NUnit.Framework; -using OpenAI; - -namespace OpenAI.Tests -{ - public partial class ChatTests - { - [Test] - public void SmokeTest() - { - ApiKeyCredential credential = new ApiKeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); - Chat client = new OpenAIClient(credential).GetChatClient(); - Assert.IsNotNull(client); - } - } -} diff --git a/.dotnet/tests/Generated/Tests/CompletionsTests.cs b/.dotnet/tests/Generated/Tests/CompletionsTests.cs deleted file mode 100644 index 59958da6b..000000000 --- a/.dotnet/tests/Generated/Tests/CompletionsTests.cs +++ /dev/null @@ -1,22 +0,0 @@ -// - -#nullable disable - -using System; -using System.ClientModel; -using NUnit.Framework; -using OpenAI; - -namespace OpenAI.Tests -{ - public partial class CompletionsTests - { - [Test] - public void SmokeTest() - { - ApiKeyCredential credential = new ApiKeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); - Completions client = new OpenAIClient(credential).GetCompletionsClient(); - Assert.IsNotNull(client); - } - } -} diff --git a/.dotnet/tests/Generated/Tests/EmbeddingsTests.cs b/.dotnet/tests/Generated/Tests/EmbeddingsTests.cs deleted file mode 100644 index b4e115c86..000000000 --- a/.dotnet/tests/Generated/Tests/EmbeddingsTests.cs +++ /dev/null @@ -1,22 +0,0 @@ -// - -#nullable disable - -using System; -using System.ClientModel; -using NUnit.Framework; -using OpenAI; - -namespace OpenAI.Tests -{ - public partial class EmbeddingsTests - { - [Test] - public void SmokeTest() - { - ApiKeyCredential credential = new ApiKeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); - Embeddings client = new OpenAIClient(credential).GetEmbeddingsClient(); - Assert.IsNotNull(client); - } - } -} diff --git a/.dotnet/tests/Generated/Tests/FilesTests.cs b/.dotnet/tests/Generated/Tests/FilesTests.cs deleted file mode 100644 index 64ebbed83..000000000 --- a/.dotnet/tests/Generated/Tests/FilesTests.cs +++ /dev/null @@ -1,22 +0,0 @@ -// - -#nullable disable - -using System; -using System.ClientModel; -using NUnit.Framework; -using OpenAI; - -namespace OpenAI.Tests -{ - public partial class FilesTests - { - [Test] - public void SmokeTest() - { - ApiKeyCredential credential = new ApiKeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); - Files client = new OpenAIClient(credential).GetFilesClient(); - Assert.IsNotNull(client); - } - } -} diff --git a/.dotnet/tests/Generated/Tests/FineTuningTests.cs b/.dotnet/tests/Generated/Tests/FineTuningTests.cs deleted file mode 100644 index 324b4d458..000000000 --- a/.dotnet/tests/Generated/Tests/FineTuningTests.cs +++ /dev/null @@ -1,22 +0,0 @@ -// - -#nullable disable - -using System; -using System.ClientModel; -using NUnit.Framework; -using OpenAI; - -namespace OpenAI.Tests -{ - public partial class FineTuningTests - { - [Test] - public void SmokeTest() - { - ApiKeyCredential credential = new ApiKeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); - FineTuning client = new OpenAIClient(credential).GetFineTuningClient(); - Assert.IsNotNull(client); - } - } -} diff --git a/.dotnet/tests/Generated/Tests/ImagesTests.cs b/.dotnet/tests/Generated/Tests/ImagesTests.cs deleted file mode 100644 index 96b7146de..000000000 --- a/.dotnet/tests/Generated/Tests/ImagesTests.cs +++ /dev/null @@ -1,22 +0,0 @@ -// - -#nullable disable - -using System; -using System.ClientModel; -using NUnit.Framework; -using OpenAI; - -namespace OpenAI.Tests -{ - public partial class ImagesTests - { - [Test] - public void SmokeTest() - { - ApiKeyCredential credential = new ApiKeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); - Images client = new OpenAIClient(credential).GetImagesClient(); - Assert.IsNotNull(client); - } - } -} diff --git a/.dotnet/tests/Generated/Tests/MessagesTests.cs b/.dotnet/tests/Generated/Tests/MessagesTests.cs deleted file mode 100644 index 89933cb52..000000000 --- a/.dotnet/tests/Generated/Tests/MessagesTests.cs +++ /dev/null @@ -1,22 +0,0 @@ -// - -#nullable disable - -using System; -using System.ClientModel; -using NUnit.Framework; -using OpenAI; - -namespace OpenAI.Tests -{ - public partial class MessagesTests - { - [Test] - public void SmokeTest() - { - ApiKeyCredential credential = new ApiKeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); - Messages client = new OpenAIClient(credential).GetMessagesClient(); - Assert.IsNotNull(client); - } - } -} diff --git a/.dotnet/tests/Generated/Tests/ModelsOpsTests.cs b/.dotnet/tests/Generated/Tests/ModelsOpsTests.cs deleted file mode 100644 index 3ac25fa3d..000000000 --- a/.dotnet/tests/Generated/Tests/ModelsOpsTests.cs +++ /dev/null @@ -1,22 +0,0 @@ -// - -#nullable disable - -using System; -using System.ClientModel; -using NUnit.Framework; -using OpenAI; - -namespace OpenAI.Tests -{ - public partial class ModelsOpsTests - { - [Test] - public void SmokeTest() - { - ApiKeyCredential credential = new ApiKeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); - ModelsOps client = new OpenAIClient(credential).GetModelsOpsClient(); - Assert.IsNotNull(client); - } - } -} diff --git a/.dotnet/tests/Generated/Tests/ModerationsTests.cs b/.dotnet/tests/Generated/Tests/ModerationsTests.cs deleted file mode 100644 index 0413ef684..000000000 --- a/.dotnet/tests/Generated/Tests/ModerationsTests.cs +++ /dev/null @@ -1,22 +0,0 @@ -// - -#nullable disable - -using System; -using System.ClientModel; -using NUnit.Framework; -using OpenAI; - -namespace OpenAI.Tests -{ - public partial class ModerationsTests - { - [Test] - public void SmokeTest() - { - ApiKeyCredential credential = new ApiKeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); - Moderations client = new OpenAIClient(credential).GetModerationsClient(); - Assert.IsNotNull(client); - } - } -} diff --git a/.dotnet/tests/Generated/Tests/RunsTests.cs b/.dotnet/tests/Generated/Tests/RunsTests.cs deleted file mode 100644 index 8bc6927a9..000000000 --- a/.dotnet/tests/Generated/Tests/RunsTests.cs +++ /dev/null @@ -1,22 +0,0 @@ -// - -#nullable disable - -using System; -using System.ClientModel; -using NUnit.Framework; -using OpenAI; - -namespace OpenAI.Tests -{ - public partial class RunsTests - { - [Test] - public void SmokeTest() - { - ApiKeyCredential credential = new ApiKeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); - Runs client = new OpenAIClient(credential).GetRunsClient(); - Assert.IsNotNull(client); - } - } -} diff --git a/.dotnet/tests/Generated/Tests/ThreadsTests.cs b/.dotnet/tests/Generated/Tests/ThreadsTests.cs deleted file mode 100644 index e7f4583af..000000000 --- a/.dotnet/tests/Generated/Tests/ThreadsTests.cs +++ /dev/null @@ -1,22 +0,0 @@ -// - -#nullable disable - -using System; -using System.ClientModel; -using NUnit.Framework; -using OpenAI; - -namespace OpenAI.Tests -{ - public partial class ThreadsTests - { - [Test] - public void SmokeTest() - { - ApiKeyCredential credential = new ApiKeyCredential(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); - Threads client = new OpenAIClient(credential).GetThreadsClient(); - Assert.IsNotNull(client); - } - } -} diff --git a/.dotnet/tests/OpenAI.Tests.csproj b/.dotnet/tests/OpenAI.Tests.csproj index e590c0e52..694b63bba 100644 --- a/.dotnet/tests/OpenAI.Tests.csproj +++ b/.dotnet/tests/OpenAI.Tests.csproj @@ -10,6 +10,7 @@ + diff --git a/.dotnet/tests/Samples/AssistantsSamples.cs b/.dotnet/tests/Samples/AssistantsSamples.cs new file mode 100644 index 000000000..1b59fc41c --- /dev/null +++ b/.dotnet/tests/Samples/AssistantsSamples.cs @@ -0,0 +1,167 @@ +using NUnit.Framework; +using OpenAI.Assistants; +using OpenAI.Files; +using System; +using System.IO; +using System.Threading; + +namespace OpenAI.Tests.Examples; + +public partial class AssistantExamples +{ + [Test] + [Ignore("Compilation validation only")] + public void ListAllAssistants() + { + AssistantClient client = new(); + string latestId = null; + bool continueQuery = true; + int count = 0; + + while (continueQuery) + { + var assistantList = client.GetAssistants(previousAssistantId: latestId).Value; + foreach (Assistant assistant in assistantList) + { + Console.WriteLine($"[{count,3}] {assistant.Id} {assistant.CreatedAt:s} {assistant.Name}"); + latestId = assistant.Id; + count++; + } + continueQuery = assistantList.HasMore; + } + } + + [Test] + [Ignore("Compilation validation only")] + public void CreateClients() + { + OpenAIClient openAIClient = new(""); + FileClient fileClient = openAIClient.GetFileClient(); + AssistantClient assistantClient = openAIClient.GetAssistantClient(); + } + + [Test] + [Ignore("Compilation validation only")] + public void SimpleRetrievalAugmentedGeneration() + { + OpenAIClient openAIClient = new(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + FileClient fileClient = openAIClient.GetFileClient(); + AssistantClient assistantClient = openAIClient.GetAssistantClient(); + + // First, let's contrive a document we'll use retrieval with and upload it. + BinaryData document = BinaryData.FromString(""" + { + "description": "This document contains the sale history data for Contoso products.", + "sales": [ + { + "month": "January", + "by_product": { + "113043": 15, + "113045": 12, + "113049": 2 + } + }, + { + "month": "February", + "by_product": { + "113045": 22 + } + }, + { + "month": "March", + "by_product": { + "113045": 16, + "113055": 5 + } + } + ] + } + """); + + OpenAIFileInfo openAIFileInfo = fileClient.UploadFile(document, "test-rag-file-delete-me.json", OpenAIFilePurpose.Assistants); + + // Now, we'll create a client intended to help with that data + AssistantCreationOptions assistantOptions = new() + { + Name = "Example: Contoso sales RAG", + Instructions = + "You are an assistant that looks up sales data and helps visualize the information based" + + " on user queries. When asked to generate a graph, chart, or other visualization, use" + + " the code interpreter tool to do so.", + FileIds = { openAIFileInfo.Id }, + Tools = + { + new RetrievalToolDefinition(), + new CodeInterpreterToolDefinition(), + }, + Metadata = { ["test_key_delete_me"] = "true" }, + }; + + Assistant assistant = assistantClient.CreateAssistant("gpt-4-1106-preview", assistantOptions); + + // Now we'll create a thread with a user query about the data already associated with the assistant, then run it + ThreadCreationOptions threadOptions = new() + { + Messages = + { + new ThreadInitializationMessage( + MessageRole.User, + "How well did product 113045 sell in February? Graph its trend over time."), + } + }; + + ThreadRun threadRun = assistantClient.CreateThreadAndRun(assistant.Id, threadOptions); + + // Check back to see when the run is done + do + { + Thread.Sleep(TimeSpan.FromSeconds(1)); + threadRun = assistantClient.GetRun(threadRun.ThreadId, threadRun.Id); + } while (threadRun.Status == RunStatus.Queued || threadRun.Status == RunStatus.InProgress); + + // Finally, we'll print out the full history for the thread that includes the augmented generation + ListQueryPage messages = assistantClient.GetMessages(threadRun.ThreadId); + + for (int i = messages.Count - 1; i >= 0; i--) + { + ThreadMessage message = messages[i]; + + Console.WriteLine($"[{message.Role.ToString().ToUpper()}]:"); + foreach (MessageContent contentItem in message.ContentItems) + { + if (contentItem is MessageTextContent textContent) + { + Console.WriteLine($"{textContent.Text}"); + + if (textContent.Annotations.Count > 0) + { + Console.WriteLine(); + } + + // Include annotations, if any. + foreach (TextContentAnnotation annotation in textContent.Annotations) + { + if (annotation is TextContentFileCitationAnnotation citationAnnotation) + { + Console.WriteLine($"* File citation, file ID: {citationAnnotation.FileId}"); + } + else if (annotation is TextContentFilePathAnnotation pathAnnotation) + { + Console.WriteLine($"* File path, file ID: {pathAnnotation.FileId}"); + } + } + } + else if (contentItem is MessageImageFileContent imageFileContent) + { + OpenAIFileInfo imageInfo = fileClient.GetFileInfo(imageFileContent.FileId); + BinaryData imageBytes = fileClient.DownloadFile(imageFileContent.FileId); + using FileStream stream = File.OpenWrite($"{ imageInfo.Filename }.png"); + imageBytes.ToStream().CopyTo(stream); + + Console.WriteLine($""); + } + } + Console.WriteLine(); + } + } +} diff --git a/.dotnet/tests/Samples/Chat/ChatSamples.cs b/.dotnet/tests/Samples/Chat/ChatSamples.cs new file mode 100644 index 000000000..f08824304 --- /dev/null +++ b/.dotnet/tests/Samples/Chat/ChatSamples.cs @@ -0,0 +1,128 @@ +using NUnit.Framework; +using OpenAI.Audio; +using OpenAI.Chat; +using System; +using System.ClientModel; +using System.Text.Json; +using System.Threading.Tasks; + +namespace OpenAI.Tests.Examples; + +public partial class ChatSamples +{ + [Test] + [Ignore("Compilation validation only")] + public void CreateChatClient() + { + ChatClient client = new("gpt-3.5-turbo", ""); + } + + [Test] + [Ignore("Compilation validation only")] + public void CreateClients() + { + OpenAIClient client = new(""); + AudioClient ttsClient = client.GetAudioClient("tts-1"); + AudioClient whisperClient = client.GetAudioClient("whisper-1"); + } + + [Test] + [Ignore("Compilation validation only")] + public void HelloWorldChat() + { + ChatClient client = new("gpt-3.5-turbo", Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + + ChatCompletion chatCompletion = client.CompleteChat("How does AI work? Explain it in simple terms."); + + Console.WriteLine($"[ASSISTANT]:"); + Console.WriteLine($"{ chatCompletion.Content }"); + } + + [Test] + [Ignore("Compilation validation only")] + public void HelloWorldChatProtocol() + { + ChatClient client = new("gpt-3.5-turbo", Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + + BinaryData input = BinaryData.FromString(""" + { + "model": "gpt-3.5-turbo", + "messages": [ + { + "role": "user", + "content": "How does AI work? Explain it in simple terms." + } + ] + } + """); + + ClientResult result = client.CompleteChat(BinaryContent.Create(input)); + BinaryData output = result.GetRawResponse().Content; + + using JsonDocument outputAsJson = JsonDocument.Parse(output.ToString()); + string message = outputAsJson.RootElement + .GetProperty("choices")[0] + .GetProperty("message") + .GetProperty("content") + .GetString(); + + Console.WriteLine($"[ASSISTANT]:"); + Console.WriteLine($"{ message }"); + } + + + [Test] + [Ignore("Compilation validation only")] + public async void HelloWorldChatAsync() + { + ChatClient client = new("gpt-3.5-turbo", Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + + ChatCompletion chatCompletion = await client.CompleteChatAsync("How does AI work? Explain it in simple terms."); + + Console.WriteLine($"[ASSISTANT]: {chatCompletion.Content}"); + } + + [Test] + [Ignore("Compilation validation only")] + public async Task HelloWorldStreamingChat() + { + ChatClient client = new("gpt-3.5-turbo", Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + + StreamingClientResult result = + client.CompleteChatStreaming("How does AI work? Explain it in simple terms."); + + Console.WriteLine("[ASSISTANT]: "); + + await foreach (StreamingChatUpdate chatUpdate in result) + { + Console.Write(chatUpdate.ContentUpdate); + } + } + + [Test] + [Ignore("Compilation validation only")] + public void ChatWithImage(Uri imageUri = null) + { + ChatClient client = new("gpt-4-vision-preview", Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + + ChatCompletion chatCompletion = client.CompleteChat( + [ + new ChatRequestUserMessage( + "Describe this image for me", + ChatMessageContent.CreateImage(imageUri)), + ]); + } + + [Test] + [Ignore("Compilation validation only")] + public void ChatWithTools() + { + ChatClient client = new("gpt-3.5-turbo", Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + + ChatFunctionToolDefinition getSecretWordTool = new() + { + Name = "get_secret_word", + Description = "gets the arbitrary secret word from the caller" + }; + } +} diff --git a/.dotnet/tests/Samples/Chat/Sample_FunctionCalling.cs b/.dotnet/tests/Samples/Chat/Sample_FunctionCalling.cs new file mode 100644 index 000000000..be14c9d76 --- /dev/null +++ b/.dotnet/tests/Samples/Chat/Sample_FunctionCalling.cs @@ -0,0 +1,148 @@ +using NUnit.Framework; +using OpenAI.Chat; +using System; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Tests.Examples; + +public partial class Samples_FunctionCalling +{ + #region + private static string GetCurrentWeather(string location, string unit = "celsius") + { + // Call the weather API here. + return "31 celsius"; + } + + private const string GetCurrentWeatherFunctionName = "get_current_weather"; + + private static readonly ChatFunctionToolDefinition getCurrentWeatherFunction = new() + { + Name = GetCurrentWeatherFunctionName, + Description = "Get the current weather in a given location", + Parameters = BinaryData.FromString(""" + { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. Boston, MA" + }, + "unit": { + "type": "string", + "enum": [ "celsius", "fahrenheit" ], + "description": "The temperature unit to use. Infer this from the specified location." + } + }, + "required": [ "location" ] + } + """), + }; + #endregion + + [Test] + [Ignore("Compilation validation only")] + public void ChatWithFunctionCalling() + { + ChatClient client = new("gpt-3.5-turbo", Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + + #region + List messages = + [ + new ChatRequestSystemMessage( + "Don't make assumptions about what values to plug into functions." + + " Ask for clarification if a user request is ambiguous."), + new ChatRequestUserMessage("What's the weather like in San Francisco?"), + ]; + + ChatCompletionOptions options = new() + { + Tools = { getCurrentWeatherFunction }, + }; + + ChatCompletion chatCompletion = client.CompleteChat(messages, options); + #endregion + + #region + if (chatCompletion.FinishReason == ChatFinishReason.ToolCalls) + { + // First, add the assistant message with tool calls to the conversation history. + messages.Add(new ChatRequestAssistantMessage(chatCompletion)); + + // Then, add a new tool message for each tool call that is resolved. + foreach (ChatToolCall toolCall in chatCompletion.ToolCalls) + { + ChatFunctionToolCall functionToolCall = toolCall as ChatFunctionToolCall; + + switch (functionToolCall?.Name) + { + case GetCurrentWeatherFunctionName: + { + // The arguments that the model wants to use to call the function are specified as a + // stringified JSON object based on the schema defined in the tool definition. Note that + // the model may hallucinate arguments too. Consequently, it is important to do the + // appropriate parsing and validation before calling the function. + using JsonDocument argumentsJson = JsonDocument.Parse(functionToolCall.Arguments); + bool hasLocation = argumentsJson.RootElement.TryGetProperty("location", out JsonElement location); + bool hasUnit = argumentsJson.RootElement.TryGetProperty("unit", out JsonElement unit); + + if (!hasLocation) + { + throw new ArgumentNullException(nameof(location), "The location argument is required."); + } + + string toolResult = GetCurrentWeather(location.GetString(), hasUnit ? unit.GetString() : null); + messages.Add(new ChatRequestToolMessage(toolCall.Id, toolResult)); + break; + } + + default: + { + // Handle other or unexpected calls. + throw new NotImplementedException(); + } + } + } + + // Finally, make a new request to chat completions to let the assistant summarize the tool results + // and add the resulting message to the conversation history to keep it organized all in one place. + ChatCompletion chatCompletionAfterToolMessages = client.CompleteChat(messages, options); + messages.Add(new ChatRequestAssistantMessage(chatCompletionAfterToolMessages)); + } + #endregion + + #region + foreach (ChatRequestMessage requestMessage in messages) + { + switch (requestMessage) + { + case ChatRequestSystemMessage systemMessage: + Console.WriteLine($"[SYSTEM]:"); + Console.WriteLine($"{systemMessage.Content.Span[0].ToText()}"); + Console.WriteLine(); + break; + + case ChatRequestUserMessage userMessage: + Console.WriteLine($"[USER]:"); + Console.WriteLine($"{userMessage.Content.Span[0].ToText()}"); + Console.WriteLine(); + break; + + case ChatRequestAssistantMessage assistantMessage when assistantMessage.Content.Span[0].ToText() is not null: + Console.WriteLine($"[ASSISTANT]:"); + Console.WriteLine($"{assistantMessage.Content.Span[0].ToText()}"); + Console.WriteLine(); + break; + + case ChatRequestToolMessage: + // Do not print any tool messages; let the assistant summarize the tool results instead. + break; + + default: + break; + } + } + #endregion + } +} diff --git a/.dotnet/tests/Samples/CombinationSamples.cs b/.dotnet/tests/Samples/CombinationSamples.cs new file mode 100644 index 000000000..1fa848ed6 --- /dev/null +++ b/.dotnet/tests/Samples/CombinationSamples.cs @@ -0,0 +1,150 @@ +using NUnit.Framework; +using OpenAI.Audio; +using OpenAI.Chat; +using OpenAI.Images; +using System; +using System.ClientModel; +using System.IO; +using System.Threading.Tasks; + +namespace OpenAI.Tests.Examples; + +public partial class CombinationSamples +{ + [Test] + [Ignore("Compilation validation")] + public void AlpacaArtAssessor() + { + // First, we create an image using dall-e-3: + ImageClient imageClient = new("dall-e-3"); + ClientResult imageResult = imageClient.GenerateImage( + "a majestic alpaca on a mountain ridge, backed by an expansive blue sky accented with sparse clouds", + new() + { + Style = ImageStyle.Vivid, + Quality = ImageQuality.High, + Size = ImageSize.Size1792x1024, + }); + GeneratedImage imageGeneration = imageResult.Value; + Console.WriteLine($"Majestic alpaca available at:\n{imageGeneration.ImageUri.AbsoluteUri}"); + + // Now, we'll ask a cranky art critic to evaluate the image using gpt-4-vision-preview: + ChatClient chatClient = new("gpt-4-vision-preview"); + ClientResult chatResult = chatClient.CompleteChat( + [ + new ChatRequestSystemMessage("Assume the role of a cranky art critic. When asked to describe or " + + "evaluate imagery, focus on criticizing elements of subject, composition, and other details."), + new ChatRequestUserMessage( + "describe the following image in a few sentences", + ChatMessageContent.CreateImage(imageGeneration.ImageUri)), + ], + new ChatCompletionOptions() + { + MaxTokens = 2048, + }); + string chatResponseText = chatResult.Value.Content; + Console.WriteLine($"Art critique of majestic alpaca:\n{chatResponseText}"); + + // Finally, we'll get some text-to-speech for that critical evaluation using tts-1-hd: + AudioClient audioClient = new("tts-1-hd"); + ClientResult ttsResult = audioClient.GenerateSpeechFromText( + text: chatResponseText, + TextToSpeechVoice.Fable, + new TextToSpeechOptions() + { + SpeedMultiplier = 0.9f, + ResponseFormat = AudioDataFormat.Opus, + }); + FileInfo ttsFileInfo = new($"{chatResult.Value.Id}.opus"); + using (FileStream ttsFileStream = ttsFileInfo.Create()) + using (BinaryWriter ttsFileWriter = new(ttsFileStream)) + { + ttsFileWriter.Write(ttsResult.Value); + } + Console.WriteLine($"Alpaca evaluation audio available at:\n{new Uri(ttsFileInfo.FullName).AbsoluteUri}"); + } + + [Test] + [Ignore("Compilation validation")] + public async Task CuriousCreatureCreator() + { + // First, we'll use gpt-4 to have a creative helper imagine a twist on a household pet + ChatClient creativeWriterClient = new("gpt-4"); + ClientResult creativeWriterResult = creativeWriterClient.CompleteChat( + [ + new ChatRequestSystemMessage("You're a creative helper that specializes in brainstorming designs for concepts that fuse ordinary, mundane items with a fantastical touch. In particular, you can provide good one-paragraph descriptions of concept images."), + new ChatRequestUserMessage("Imagine a household pet. Now add in a subtle touch of magic or 'different'. What do you imagine? Provide a one-paragraph description of a picture of this new creature, focusing on the details of the imagery such that it'd be suitable for creating a picture."), + ], + new ChatCompletionOptions() + { + MaxTokens = 2048, + }); + string description = creativeWriterResult.Value.Content; + Console.WriteLine($"Creative helper's creature description:\n{description}"); + + // Asynchronously, in parallel to the next steps, we'll get the creative description in the voice of Onyx + AudioClient ttsClient = new("tts-1-hd"); + Task> imageDescriptionAudioTask = ttsClient.GenerateSpeechFromTextAsync( + description, + TextToSpeechVoice.Onyx, + new TextToSpeechOptions() + { + SpeedMultiplier = 1.1f, + ResponseFormat = AudioDataFormat.Opus, + }); + _ = Task.Run(async () => + { + ClientResult audioResult = await imageDescriptionAudioTask; + FileInfo audioFileInfo = new FileInfo($"{creativeWriterResult.Value.Id}-description.opus"); + using FileStream fileStream = audioFileInfo.Create(); + using BinaryWriter fileWriter = new(fileStream); + fileWriter.Write(audioResult.Value); + Console.WriteLine($"Spoken description available at:\n{new Uri(audioFileInfo.FullName).AbsoluteUri}"); + }); + + // Meanwhile, we'll use dall-e-3 to generate a rendition of our LLM artist's vision + ImageClient imageGenerationClient = new("dall-e-3"); + ClientResult imageGenerationResult = await imageGenerationClient.GenerateImageAsync( + description, + new ImageGenerationOptions() + { + Size = ImageSize.Size1792x1024, + Quality = ImageQuality.High, + }); + Uri imageLocation = imageGenerationResult.Value.ImageUri; + Console.WriteLine($"Creature image available at:\n{imageLocation.AbsoluteUri}"); + + // Now, we'll use gpt-4-vision-preview to get a hopelessly taken assessment from a usually exigent art connoisseur + ChatClient imageCriticClient = new("gpt-4-vision-preview"); + ClientResult criticalAppraisalResult = await imageCriticClient.CompleteChatAsync( + [ + new ChatRequestSystemMessage("Assume the role of an art critic. Although usually cranky and occasionally even referred to as a 'curmudgeon', you're somehow entirely smitten with the subject presented to you and, despite your best efforts, can't help but lavish praise when you're asked to appraise a provided image."), + new ChatRequestUserMessage( + "Evaluate this image for me. What is it, and what do you think of it?", + ChatMessageContent.CreateImage(imageLocation)), + ], + new ChatCompletionOptions() + { + MaxTokens = 2048, + }); + string appraisal = criticalAppraisalResult.Value.Content; + Console.WriteLine($"Critic's appraisal:\n{appraisal}"); + + // Finally, we'll get that art expert's laudations in the voice of Fable + ClientResult appraisalAudioResult = await ttsClient.GenerateSpeechFromTextAsync( + appraisal, + TextToSpeechVoice.Fable, + new TextToSpeechOptions() + { + ResponseFormat = AudioDataFormat.Opus, + SpeedMultiplier = 0.9f, + }); + FileInfo criticAudioFileInfo = new($"{criticalAppraisalResult.Value.Id}-appraisal.opus"); + using (FileStream criticStream = criticAudioFileInfo.Create()) + using (BinaryWriter criticFileWriter = new(criticStream)) + { + criticFileWriter.Write(appraisalAudioResult.Value); + } + Console.WriteLine($"Critical appraisal available at:\n{new Uri(criticAudioFileInfo.FullName).AbsoluteUri}"); + } +} diff --git a/.dotnet/tests/Samples/EmbeddingSamples.cs b/.dotnet/tests/Samples/EmbeddingSamples.cs new file mode 100644 index 000000000..7e2c8e61a --- /dev/null +++ b/.dotnet/tests/Samples/EmbeddingSamples.cs @@ -0,0 +1,95 @@ +using NUnit.Framework; +using OpenAI.Embeddings; +using System; +using System.ClientModel; +using System.Collections.Generic; + +namespace OpenAI.Tests.Examples; + +public partial class EmbeddingSamples +{ + [Test] + [Ignore("Compilation validation only")] + public void CreateEmbeddingClient() + { + EmbeddingClient client = new("text-embedding-3-small", new ApiKeyCredential("")); + } + + [Test] + [Ignore("Compilation validation only")] + public void SimpleEmbedding() + { + EmbeddingClient client = new("text-embedding-3-small", Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + + string description = + "Best hotel in town if you like luxury hotels. They have an amazing infinity pool, a spa," + + " and a really helpful concierge. The location is perfect -- right downtown, close to all " + + " the tourist attractions. We highly recommend this hotel."; + + Embedding embedding = client.GenerateEmbedding(description); + ReadOnlyMemory vector = embedding.Vector; + + Console.WriteLine($"Dimension: { vector.Length }"); + Console.WriteLine($"Floats: "); + for (int i = 0; i < vector.Length; i++) + { + Console.WriteLine($" [{i}] = { vector.Span[i] }"); + } + } + + [Test] + [Ignore("Compilation validation only")] + public void SimpleEmbeddingWithOptions() + { + EmbeddingClient client = new("text-embedding-3-small", Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + + string description = + "Best hotel in town if you like luxury hotels. They have an amazing infinity pool, a spa," + + " and a really helpful concierge. The location is perfect -- right downtown, close to all " + + " the tourist attractions. We highly recommend this hotel."; + + EmbeddingOptions options = new() { Dimensions = 512 }; + + Embedding embedding = client.GenerateEmbedding(description, options); + ReadOnlyMemory vector = embedding.Vector; + + Console.WriteLine($"Dimension: {vector.Length}"); + Console.WriteLine($"Floats: "); + for (int i = 0; i < vector.Length; i++) + { + Console.WriteLine($" [{i}] = {vector.Span[i]}"); + } + } + + [Test] + [Ignore("Compilation validation only")] + public void ComplexEmbedding() + { + EmbeddingClient client = new("text-embedding-3-small", Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + + string category = "Luxury"; + string description = + "Best hotel in town if you like luxury hotels. They have an amazing infinity pool, a spa," + + " and a really helpful concierge. The location is perfect -- right downtown, close to all " + + " the tourist attractions. We highly recommend this hotel."; + List inputs = [category, description]; + + EmbeddingOptions options = new() { Dimensions = 512 }; + + EmbeddingCollection collection = client.GenerateEmbeddings(inputs, options); + + foreach (Embedding embedding in collection) + { + ReadOnlyMemory vector = embedding.Vector; + + Console.WriteLine($"Dimension: {vector.Length}"); + Console.WriteLine($"Floats: "); + for (int i = 0; i < vector.Length; i++) + { + Console.WriteLine($" [{i}] = { vector.Span[i] }"); + } + + Console.WriteLine(); + } + } +} diff --git a/.dotnet/tests/Samples/ImageSamples.cs b/.dotnet/tests/Samples/ImageSamples.cs new file mode 100644 index 000000000..bad3fe58b --- /dev/null +++ b/.dotnet/tests/Samples/ImageSamples.cs @@ -0,0 +1,46 @@ +using NUnit.Framework; +using OpenAI.Images; +using System; +using System.IO; + +namespace OpenAI.Tests.Examples +{ + public partial class ImageSamples + { + [Test] + [Ignore("Compilation validation only")] + public void CreateImageClient() + { + ImageClient client = new("dall-e-3", ""); + } + + [Test] + [Ignore("Compilation validation only")] + public void SimpleImage() + { + ImageClient client = new("dall-e-3", Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + + string prompt = "The concept for a living room that blends Scandinavian simplicity with Japanese minimalism for" + + " a serene and cozy atmosphere. It's a space that invites relaxation and mindfulness, with natural light" + + " and fresh air. Using neutral tones, including colors like white, beige, gray, and black, that create a" + + " sense of harmony. Featuring sleek wood furniture with clean lines and subtle curves to add warmth and" + + " elegance. Plants and flowers in ceramic pots adding color and life to a space. They can serve as focal" + + " points, creating a connection with nature. Soft textiles and cushions in organic fabrics adding comfort" + + " and softness to a space. They can serve as accents, adding contrast and texture."; + + ImageGenerationOptions options = new() + { + Quality = ImageQuality.High, + Size = ImageSize.Size1792x1024, + Style = ImageStyle.Vivid, + ResponseFormat = ImageResponseFormat.Bytes + }; + + GeneratedImage image = client.GenerateImage(prompt, options); + BinaryData bytes = image.ImageBytes; + + using FileStream stream = File.OpenWrite($"{ Guid.NewGuid() }.png"); + bytes.ToStream().CopyTo(stream); + } + } +} diff --git a/.dotnet/tests/TestScenarios/AssistantTests.cs b/.dotnet/tests/TestScenarios/AssistantTests.cs new file mode 100644 index 000000000..aea1c3614 --- /dev/null +++ b/.dotnet/tests/TestScenarios/AssistantTests.cs @@ -0,0 +1,161 @@ +using NUnit.Framework; +using OpenAI.Assistants; +using System; +using System.ClientModel; +using System.Threading.Tasks; +using static OpenAI.Tests.TestHelpers; + +namespace OpenAI.Tests.Assistants; + +public partial class AssistantTests +{ + [Test] + public void ListingAssistantsWorks() + { + AssistantClient client = new(); + ClientResult> result = client.GetAssistants(); + Assert.That(result.Value, Is.Not.Null.Or.Empty); + } + + [Test] + public void CreatingAndDeletingAssistantsWorks() + { + AssistantClient client = GetTestClient(TestScenario.Assistants); + ClientResult result = client.CreateAssistant("gpt-3.5-turbo"); + Assert.That(result.Value, Is.Not.Null); + Assert.That(result.Value.Id, Is.Not.Null.Or.Empty); + ClientResult deletionResult = client.DeleteAssistant(result.Value.Id); + Assert.That(deletionResult.Value, Is.True); + } + + [Test] + public async Task AddingMessagesWorks() + { + AssistantClient client = new(); + ClientResult threadResult = await client.CreateThreadAsync(new ThreadCreationOptions() + { + Messages = + { + new(MessageRole.User, "this is an initial message on the thread"), + "this is another one done an easier way" + }, + Metadata = + { + ["test_key"] = "test_value", + [s_cleanupMetadataKey] = "true", + } + }); + ClientResult> messagesResult = await client.GetMessagesAsync(threadResult.Value.Id); + Assert.That(messagesResult.Value?.Count, Is.EqualTo(2)); + ThreadMessage latestMessage = messagesResult.Value[0]; + ThreadMessage oldestMessage = messagesResult.Value[1]; + Assert.That(latestMessage.Role, Is.EqualTo(MessageRole.User)); + Assert.That(latestMessage.ContentItems, Is.Not.Null.Or.Empty); + MessageTextContent textContent = latestMessage.ContentItems[0] as MessageTextContent; + Assert.That(textContent, Is.Not.Null); + Assert.That(textContent.Text, Is.Not.Null.Or.Empty); + Assert.That(textContent.Text, Contains.Substring("easier way")); + } + + [Test] + public async Task BasicFunctionToolWorks() + { + AssistantClient client = GetTestClient(); + ClientResult assistantResult = await client.CreateAssistantAsync( + "gpt-3.5-turbo", + new AssistantCreationOptions() + { + Tools = + { + new FunctionToolDefinition() + { + Name = "get_favorite_food_for_day_of_week", + Description = "gets the user's favorite food for a given day of the week, like Tuesday", + Parameters = BinaryData.FromObjectAsJson(new + { + type = "object", + properties = new + { + day_of_week = new + { + type = "string", + description = "a day of the week, like Tuesday or Saturday", + } + } + }), + }, + }, + Metadata = + { + [s_cleanupMetadataKey] = "true", + } + }); + Assert.That(assistantResult.Value.DefaultTools, Is.Not.Null.Or.Empty); + FunctionToolDefinition functionTool = assistantResult.Value.DefaultTools[0] as FunctionToolDefinition; + Assert.That(functionTool, Is.Not.Null); + Assert.That(functionTool.Parameters, Is.Not.Null); + + ClientResult threadResult = await client.CreateThreadAsync( + new ThreadCreationOptions() + { + Messages = + { + "what should I eat on Thursday?", + }, + Metadata = + { + [s_cleanupMetadataKey ] = "true", + } + }); + ClientResult runResult = await client.CreateRunAsync(threadResult.Value.Id, assistantResult.Value.Id); + Assert.That(runResult.Value.Id, Is.Not.Null.Or.Empty); + do + { + await Task.Delay(500); + runResult = await client.GetRunAsync(threadResult.Value.Id, runResult.Value.Id); + } while (runResult.Value.Status == RunStatus.Queued || runResult.Value.Status == RunStatus.InProgress); + Assert.That(runResult.Value.Status, Is.EqualTo(RunStatus.RequiresAction)); + Assert.That(runResult.Value.RequiredActions?.Count, Is.EqualTo(1)); + RequiredFunctionToolCall requiredFunctionToolCall = runResult.Value.RequiredActions[0] as RequiredFunctionToolCall; + Assert.That(requiredFunctionToolCall, Is.Not.Null); + _ = await client.SubmitToolOutputsAsync(threadResult.Value.Id, runResult.Value.Id, + [ + new ToolOutput(requiredFunctionToolCall, "tacos"), + ]); + runResult = await client.GetRunAsync(threadResult.Value.Id, runResult.Value.Id); + Assert.That(runResult.Value.Status, Is.Not.EqualTo(RunStatus.RequiresAction)); + } + + private async Task CreateCommonTestAssistantAsync() + { + AssistantClient client = new(); + ClientResult newAssistantResult = await client.CreateAssistantAsync("gpt-3.5-turbo", new() + { + Name = s_testAssistantName, + Metadata = + { + ["test_id"] = "test_id_goes_here", + [s_cleanupMetadataKey] = "true", + }, + }); + return newAssistantResult.Value; + } + + private async Task DeleteRecentTestThings() + { + AssistantClient client = new(); + foreach(Assistant assistant in client.GetAssistants().Value) + { + if (assistant.Name == s_testAssistantName + || assistant.Metadata?.ContainsKey(s_cleanupMetadataKey) == true) + { + _ = await client.DeleteAssistantAsync(assistant.Id); + } + } + } + + private static AssistantClient GetTestClient() => GetTestClient(TestScenario.Assistants); + + private static readonly string s_testAssistantName = $".NET SDK Test Assistant - Please Delete Me"; + private static readonly string s_cleanupMetadataKey = $"test_metadata_cleanup_eligible"; +} diff --git a/.dotnet/tests/TestScenarios/ChatClientTests.cs b/.dotnet/tests/TestScenarios/ChatClientTests.cs new file mode 100644 index 000000000..81cf59c5c --- /dev/null +++ b/.dotnet/tests/TestScenarios/ChatClientTests.cs @@ -0,0 +1,89 @@ +using NUnit.Framework; +using OpenAI.Chat; +using System; +using System.ClientModel; +using System.Diagnostics; +using System.Linq; +using System.Net; +using System.Threading.Tasks; +using static OpenAI.Tests.TestHelpers; + +namespace OpenAI.Tests.Chat; + +public partial class ChatClientTests +{ + [Test] + public void HelloWorldChat() + { + ChatClient client = GetTestClient(TestScenario.Chat); // new("gpt-3.5-turbo"); + Assert.That(client, Is.InstanceOf()); + ClientResult result = client.CompleteChat("Hello, world!"); + Assert.That(result, Is.InstanceOf>()); + Assert.That(result.Value.Content?.ContentKind, Is.EqualTo(ChatMessageContentKind.Text)); + Assert.That(result.Value.Content.ToText().Length, Is.GreaterThan(0)); + } + + [Test] + public void HelloWorldWithTopLevelClient() + { + OpenAIClient client = new(credential: new(Environment.GetEnvironmentVariable("OPENAI_API_KEY"))); + ChatClient chatClient = client.GetChatClient("gpt-3.5-turbo"); + ClientResult result = chatClient.CompleteChat("Hello, world!"); + Assert.That(result, Is.InstanceOf>()); + Assert.That(result.Value.Content.ToString().Length, Is.GreaterThan(0)); + } + [Test] + public void MultiMessageChat() + { + ChatClient client = new("gpt-3.5-turbo"); + ClientResult result = client.CompleteChat( + [ + new ChatRequestSystemMessage("You are a helpful assistant. You always talk like a pirate."), + new ChatRequestUserMessage("Hello, assistant! Can you help me train my parrot?"), + ]); + Assert.That(new string[] { "aye", "arr", "hearty" }.Any(pirateWord => result.Value.Content.ToString().ToLowerInvariant().Contains(pirateWord))); + } + + [Test] + public async Task StreamingChat() + { + ChatClient client = new("gpt-3.5-turbo"); + + TimeSpan? firstTokenReceiptTime = null; + TimeSpan? latestTokenReceiptTime = null; + Stopwatch stopwatch = Stopwatch.StartNew(); + + StreamingClientResult streamingResult + = client.CompleteChatStreaming("What are the best pizza toppings? Give me a breakdown on the reasons."); + Assert.That(streamingResult, Is.InstanceOf>()); + int updateCount = 0; + + await foreach (StreamingChatUpdate chatUpdate in streamingResult) + { + firstTokenReceiptTime ??= stopwatch.Elapsed; + latestTokenReceiptTime = stopwatch.Elapsed; + Console.WriteLine(stopwatch.Elapsed.TotalMilliseconds); + updateCount++; + } + Assert.That(updateCount, Is.GreaterThan(1)); + Assert.That(latestTokenReceiptTime - firstTokenReceiptTime > TimeSpan.FromMilliseconds(500)); + } + + [Test] + public void AuthFailure() + { + ChatClient client = new("gpt-3.5-turbo", new ApiKeyCredential("not-a-real-key")); + Exception caughtException = null; + try + { + _ = client.CompleteChat("Uh oh, this isn't going to work with that key"); + } + catch (Exception ex) + { + caughtException = ex; + } + var clientResultException = caughtException as ClientResultException; + Assert.That(clientResultException, Is.Not.Null); + Assert.That(clientResultException.Status, Is.EqualTo((int)HttpStatusCode.Unauthorized)); + } +} diff --git a/.dotnet/tests/TestScenarios/ChatToolConstraints.cs b/.dotnet/tests/TestScenarios/ChatToolConstraints.cs new file mode 100644 index 000000000..c6b20d28f --- /dev/null +++ b/.dotnet/tests/TestScenarios/ChatToolConstraints.cs @@ -0,0 +1,70 @@ +using NUnit.Framework; +using OpenAI.Chat; +using System; +using System.ClientModel; + +namespace OpenAI.Tests.Chat; + +public partial class ChatToolConstraintTests +{ + [Test] + public void BasicTypeManipulationWorks() + { + Assert.That(ChatToolConstraint.Auto.ToString(), Is.EqualTo("\"auto\"")); + Assert.That(ChatToolConstraint.None.ToString(), Is.EqualTo("\"none\"")); + Assert.That(ChatToolConstraint.Auto, Is.Not.EqualTo(ChatToolConstraint.None)); + + ChatFunctionToolDefinition functionTool = new() + { + Name = "test_function_tool", + Description = "description isn't applicable", + }; + + ChatToolConstraint constraintFromDefinition = new(functionTool); + Assert.That(constraintFromDefinition.ToString(), Is.EqualTo(@$"{{""type"":""function"",""function"":{{""name"":""{functionTool.Name}""}}}}")); + + ChatToolConstraint otherConstraint = new(new ChatFunctionToolDefinition("test_function_tool")); + Assert.That(constraintFromDefinition, Is.EqualTo(otherConstraint)); + Assert.That(otherConstraint, Is.Not.EqualTo(ChatToolConstraint.Auto)); + } + + [Test] + public void ConstraintsWork() + { + ChatClient client = new("gpt-3.5-turbo"); + ChatCompletionOptions options = new() + { + Tools = { s_numberForWordTool }, + }; + + foreach (var (constraint, reason) in new (ChatToolConstraint?, ChatFinishReason)[] + { + (null, ChatFinishReason.ToolCalls), + (ChatToolConstraint.None, ChatFinishReason.Stopped), + (new ChatToolConstraint(s_numberForWordTool), ChatFinishReason.Stopped), + (ChatToolConstraint.Auto, ChatFinishReason.ToolCalls), + }) + { + options.ToolConstraint = constraint; + ClientResult result = client.CompleteChat("What's the number for the word 'banana'?", options); + Assert.That(result.Value.FinishReason, Is.EqualTo(reason)); + } + } + + private static ChatFunctionToolDefinition s_numberForWordTool = new() + { + Name = "get_number_for_word", + Description = "gets an arbitrary number assigned to a given word", + Parameters = BinaryData.FromObjectAsJson(new + { + type = "object", + properties = new + { + word = new + { + type = "string" + } + } + }), + }; +} diff --git a/.dotnet/tests/TestScenarios/ChatToolTests.cs b/.dotnet/tests/TestScenarios/ChatToolTests.cs new file mode 100644 index 000000000..a59d789b3 --- /dev/null +++ b/.dotnet/tests/TestScenarios/ChatToolTests.cs @@ -0,0 +1,90 @@ +using NUnit.Framework; +using OpenAI.Chat; +using System; +using System.ClientModel; +using System.Collections.Generic; +using System.Text.Json; +using System.Text.Json.Nodes; +using static OpenAI.Tests.TestHelpers; + +namespace OpenAI.Tests.Chat; + +public partial class ChatToolTests +{ + [Test] + public void NoParameterToolWorks() + { + ChatClient client = new("gpt-3.5-turbo"); + ChatFunctionToolDefinition getFavoriteColorTool = new() + { + Name = "get_favorite_color", + Description = "gets the favorite color of the caller", + }; + ChatCompletionOptions options = new() + { + Tools = { getFavoriteColorTool }, + }; + ClientResult result = client.CompleteChat("What's my favorite color?", options); + Assert.That(result.Value.FinishReason, Is.EqualTo(ChatFinishReason.ToolCalls)); + Assert.That(result.Value.ToolCalls.Count, Is.EqualTo(1)); + var functionToolCall = result.Value.ToolCalls[0] as ChatFunctionToolCall; + var toolCallArguments = BinaryData.FromString(functionToolCall.Arguments).ToObjectFromJson>(); + Assert.That(functionToolCall, Is.Not.Null); + Assert.That(functionToolCall.Name, Is.EqualTo(getFavoriteColorTool.Name)); + Assert.That(functionToolCall.Id, Is.Not.Null.Or.Empty); + Assert.That(toolCallArguments.Count, Is.EqualTo(0)); + + result = client.CompleteChat( + [ + new ChatRequestUserMessage("What's my favorite color?"), + new ChatRequestAssistantMessage(result.Value), + new ChatRequestToolMessage(functionToolCall.Id, "green"), + ]); + Assert.That(result.Value.FinishReason, Is.EqualTo(ChatFinishReason.Stopped)); + Assert.That(result.Value.Content.ToString().ToLowerInvariant(), Contains.Substring("green")); + } + + [Test] + public void ParametersWork() + { + ChatClient client = GetTestClient(TestScenario.Chat); + ChatFunctionToolDefinition favoriteColorForMonthTool = new() + { + Name = "get_favorite_color_for_month", + Description = "gets the caller's favorite color for a given month", + Parameters = BinaryData.FromString(""" + { + "type": "object", + "properties": { + "month_name": { + "type": "string", + "description": "the name of a calendar month, e.g. February or October." + } + }, + "required": [ "month_name" ] + } + """), + }; + ChatCompletionOptions options = new() + { + Tools = { favoriteColorForMonthTool }, + }; + List messages = + [ + new ChatRequestUserMessage("What's my favorite color in February?"), + ]; + ClientResult result = client.CompleteChat(messages, options); + Assert.That(result.Value.FinishReason, Is.EqualTo(ChatFinishReason.ToolCalls)); + Assert.That(result.Value.ToolCalls?.Count, Is.EqualTo(1)); + var functionToolCall = result.Value.ToolCalls[0] as ChatFunctionToolCall; + Assert.That(functionToolCall.Name, Is.EqualTo(favoriteColorForMonthTool.Name)); + JsonObject argumentsJson = JsonSerializer.Deserialize(functionToolCall.Arguments); + Assert.That(argumentsJson.Count, Is.EqualTo(1)); + Assert.That(argumentsJson.ContainsKey("month_name")); + Assert.That(argumentsJson["month_name"].ToString().ToLowerInvariant(), Is.EqualTo("february")); + messages.Add(new ChatRequestAssistantMessage(result.Value)); + messages.Add(new ChatRequestToolMessage(functionToolCall.Id, "chartreuse")); + result = client.CompleteChat(messages, options); + Assert.That(result.Value.Content.ToString().ToLowerInvariant(), Contains.Substring("chartreuse")); + } +} diff --git a/.dotnet/tests/TestScenarios/ChatWithVision.cs b/.dotnet/tests/TestScenarios/ChatWithVision.cs new file mode 100644 index 000000000..4b37ea727 --- /dev/null +++ b/.dotnet/tests/TestScenarios/ChatWithVision.cs @@ -0,0 +1,33 @@ +using NUnit.Framework; +using OpenAI.Chat; +using System; +using System.ClientModel; +using System.IO; +using System.Net.Mime; +using static OpenAI.Tests.TestHelpers; + +namespace OpenAI.Tests.Chat; + +public partial class ChatWithVision +{ + [Test] + public void DescribeAnImage() + { + var stopSignPath = Path.Combine("data", "stop_sign.png"); + var stopSignData = BinaryData.FromBytes(File.ReadAllBytes(stopSignPath)); + + ChatClient client = GetTestClient(TestScenario.VisionChat); + + ClientResult result = client.CompleteChat( + [ + new ChatRequestUserMessage( + "Describe this image for me", + ChatMessageContent.CreateImage(stopSignData, "image/png")), + ], new ChatCompletionOptions() + { + MaxTokens = 2048, + }); + Console.WriteLine(result.Value.Content); + Assert.That(result.Value.Content.ToString().ToLowerInvariant(), Contains.Substring("stop")); + } +} diff --git a/.dotnet/tests/TestScenarios/EmbeddingClientTests.cs b/.dotnet/tests/TestScenarios/EmbeddingClientTests.cs new file mode 100644 index 000000000..0915e1711 --- /dev/null +++ b/.dotnet/tests/TestScenarios/EmbeddingClientTests.cs @@ -0,0 +1,52 @@ +using NUnit.Framework; +using OpenAI.Embeddings; +using System.ClientModel; +using System.Collections.Generic; + +namespace OpenAI.Tests.Embeddings; + +public partial class EmbeddingClientTests +{ + [Test] + public void OneEmbedding() + { + EmbeddingClient client = new("text-embedding-ada-002"); + ClientResult response = client.GenerateEmbedding("hello, world"); + Assert.That(response.Value, Is.Not.Null); + Assert.That(response.Value.Index, Is.EqualTo(0)); + Assert.That(response.Value.Usage, Is.Not.Null); + Assert.That(response.Value.Usage.TotalTokens, Is.GreaterThan(0)); + Assert.That(response.Value.Vector, Is.Not.Null.Or.Empty); + Assert.That(response.Value.Model, Contains.Substring("ada")); + float[] array = response.Value.Vector.ToArray(); + Assert.That(array.Length > 100); + } + + [Test] + public void SeveralEmbeddings() + { + EmbeddingClient client = new("text-embedding-3-small"); + List prompts = + [ + "Hello, world!", + "This is a test.", + "Goodbye!" + ]; + EmbeddingOptions options = new() + { + Dimensions = 456, + }; + ClientResult response = client.GenerateEmbeddings(prompts, options); + Assert.That(response.Value, Is.Not.Null); + Assert.That(response.Value.Count, Is.EqualTo(3)); + for (int i = 0; i < response.Value.Count; i++) + { + Assert.That(response.Value[i].Index, Is.EqualTo(i)); + Assert.That(response.Value[i].Usage, Is.Not.Null); + Assert.That(response.Value[i].Usage.TotalTokens, Is.GreaterThan(0)); + Assert.That(response.Value[i].Vector, Is.Not.Null.Or.Empty); + float[] array = response.Value[i].Vector.ToArray(); + Assert.That(array.Length, Is.GreaterThan(100)); + } + } +} diff --git a/.dotnet/tests/TestScenarios/FileClientTests.cs b/.dotnet/tests/TestScenarios/FileClientTests.cs new file mode 100644 index 000000000..3010dc941 --- /dev/null +++ b/.dotnet/tests/TestScenarios/FileClientTests.cs @@ -0,0 +1,43 @@ +using NUnit.Framework; +using OpenAI.Files; +using System; +using System.ClientModel; +using static OpenAI.Tests.TestHelpers; + +namespace OpenAI.Tests.Files; + +public partial class FileClientTests +{ + [Test] + public void ListFilesWorks() + { + FileClient client = new(); + ClientResult result = client.GetFileInfoList(); + Assert.That(result.Value.Count, Is.GreaterThan(0)); + Console.WriteLine(result.Value.Count); + ClientResult assistantsResult = client.GetFileInfoList(OpenAIFilePurpose.Assistants); + Assert.That(assistantsResult.Value.Count, Is.GreaterThan(0)); + Assert.That(assistantsResult.Value.Count, Is.LessThan(result.Value.Count)); + Console.WriteLine(assistantsResult.Value.Count); + } + + [Test] + public void UploadFileWorks() + { + FileClient client = GetTestClient(); + BinaryData uploadData = BinaryData.FromString("hello, this is a text file, please delete me"); + ClientResult uploadResult = client.UploadFile(uploadData, "test-file-delete-me.txt", OpenAIFilePurpose.Assistants); + } + + [Test] + public void DownloadAndInfoWork() + { + FileClient client = GetTestClient(); + ClientResult fileInfoResult = client.GetFileInfo("file-S7roYWamZqfMK9D979HU4q6m"); + Assert.That(fileInfoResult.Value, Is.Not.Null); + ClientResult downloadResult = client.DownloadFile("file-S7roYWamZqfMK9D979HU4q6m"); + Assert.That(downloadResult.Value, Is.Not.Null); + } + + private static FileClient GetTestClient() => GetTestClient(TestScenario.Files); +} \ No newline at end of file diff --git a/.dotnet/tests/TestScenarios/ImageGenerationTests.cs b/.dotnet/tests/TestScenarios/ImageGenerationTests.cs new file mode 100644 index 000000000..1d8d3304c --- /dev/null +++ b/.dotnet/tests/TestScenarios/ImageGenerationTests.cs @@ -0,0 +1,36 @@ +using NUnit.Framework; +using OpenAI.Images; +using System; +using System.ClientModel; +using static OpenAI.Tests.TestHelpers; + +namespace OpenAI.Tests.Images; + +public partial class ImageGenerationTests +{ + [Test] + public void BasicGenerationWorks() + { + ImageClient client = new("dall-e-3"); + ClientResult result = client.GenerateImage("an isolated stop sign"); + Assert.That(result, Is.InstanceOf>()); + Assert.That(result.Value.ImageUri, Is.Not.Null); + Console.WriteLine(result.Value.ImageUri.AbsoluteUri); + Assert.That(result.Value.ImageBytes, Is.Null); + Assert.That(result.Value.CreatedAt, Is.GreaterThan(new DateTimeOffset(new DateTime(year: 2020, month: 1, day: 1)))); + } + + [Test] + public void GenerationWithOptionsWorks() + { + ImageClient client = GetTestClient(); + ClientResult result = client.GenerateImage("an isolated stop sign", new ImageGenerationOptions() + { + Quality = ImageQuality.Standard, + Style = ImageStyle.Natural, + }); + Assert.That(result.Value.ImageUri, Is.Not.Null); + } + + private static ImageClient GetTestClient() => GetTestClient(TestScenario.Images); +} diff --git a/.dotnet/tests/TestScenarios/LegacyCompletions.cs b/.dotnet/tests/TestScenarios/LegacyCompletions.cs new file mode 100644 index 000000000..6f5deddc1 --- /dev/null +++ b/.dotnet/tests/TestScenarios/LegacyCompletions.cs @@ -0,0 +1,32 @@ +using NUnit.Framework; +using OpenAI.LegacyCompletions; +using System; +using System.ClientModel; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; +using System.Text.Json.Nodes; + +namespace OpenAI.Tests.LegacyCompletions; + +public partial class LegacyCompletionTests +{ + [Test] + public void BasicValidationWorks() + { + LegacyCompletionClient client = new(); + BinaryData requestData = BinaryData.FromObjectAsJson(new + { + model = "gpt-3.5-turbo-instruct", + prompt = "hello world", + max_tokens = 256, + temperature = 0, + }); + BinaryContent content = BinaryContent.Create(requestData); + ClientResult result = client.GenerateLegacyCompletions(content); + Assert.That(result, Is.Not.Null); + JsonObject responseObject = JsonSerializer.Deserialize(result.GetRawResponse().Content.ToString()); + string text = responseObject["choices"].AsArray()[0].AsObject()["text"].ToString(); + Assert.That(text, Is.Not.Null.Or.Empty); + } +} diff --git a/.dotnet/tests/TestScenarios/ModelClientTests.cs b/.dotnet/tests/TestScenarios/ModelClientTests.cs new file mode 100644 index 000000000..2e668006a --- /dev/null +++ b/.dotnet/tests/TestScenarios/ModelClientTests.cs @@ -0,0 +1,28 @@ +using NUnit.Framework; +using OpenAI.ModelManagement; +using System.ClientModel; +using System.Linq; +using System.Threading.Tasks; + +namespace OpenAI.Tests.Models; + +public partial class ModelManagementClientTests +{ + [Test] + public async Task CanListModels() + { + ModelManagementClient client = new(); + ClientResult result = await client.GetModelsAsync(); + Assert.That(result.Value, Is.Not.Null.Or.Empty); + Assert.That(result.Value.Any(modelInfo => modelInfo.Id.ToLowerInvariant().Contains("whisper"))); + } + + [Test] + public async Task CanRetrieveModelInfo() + { + ModelManagementClient client = new(); + ClientResult result = await client.GetModelInfoAsync("gpt-3.5-turbo"); + Assert.That(result.Value, Is.Not.Null); + Assert.That(result.Value.OwnerOrganization.ToLowerInvariant(), Contains.Substring("openai")); + } +} diff --git a/.dotnet/tests/TestScenarios/TextToSpeechTests.cs b/.dotnet/tests/TestScenarios/TextToSpeechTests.cs new file mode 100644 index 000000000..c880ff864 --- /dev/null +++ b/.dotnet/tests/TestScenarios/TextToSpeechTests.cs @@ -0,0 +1,35 @@ +using NUnit.Framework; +using OpenAI.Audio; +using System; +using System.ClientModel; + +namespace OpenAI.Tests.Audio; + +public partial class TextToSpeechTests +{ + [Test] + public void BasicTTSWorks() + { + AudioClient client = new("tts-1"); + ClientResult result = client.GenerateSpeechFromText("hello, world, this is a test", TextToSpeechVoice.Shimmer); + Assert.That(result.Value, Is.Not.Null); + } + + [Test] + [TestCase(null)] + [TestCase(AudioDataFormat.Mp3)] + [TestCase(AudioDataFormat.Aac)] + [TestCase(AudioDataFormat.Opus)] + [TestCase(AudioDataFormat.Flac)] + public void OutputFormatWorks(AudioDataFormat? responseFormat) + { + AudioClient client = new("tts-1"); + TextToSpeechOptions options = new(); + if (responseFormat != null) + { + options.ResponseFormat = responseFormat; + } + ClientResult result = client.GenerateSpeechFromText("Hello, world!", TextToSpeechVoice.Alloy, options); + Assert.That(result.Value, Is.Not.Null); + } +} diff --git a/.dotnet/tests/TestScenarios/TranscriptionTests.cs b/.dotnet/tests/TestScenarios/TranscriptionTests.cs new file mode 100644 index 000000000..64813c2c3 --- /dev/null +++ b/.dotnet/tests/TestScenarios/TranscriptionTests.cs @@ -0,0 +1,43 @@ +using NUnit.Framework; +using OpenAI.Audio; +using System; +using System.ClientModel; +using System.IO; +using static OpenAI.Tests.TestHelpers; + +namespace OpenAI.Tests.Audio; + +public partial class TranscriptionTests +{ + [Test] + public void BasicTranscriptionWorks() + { + AudioClient client = GetTestClient(); + using FileStream inputStream = File.OpenRead(Path.Combine("data", "hello_world.m4a")); + BinaryData inputData = BinaryData.FromStream(inputStream); + ClientResult transcriptionResult = client.TranscribeAudio(inputData, "hello_world.m4a"); + Assert.That(transcriptionResult.Value, Is.Not.Null); + Assert.That(transcriptionResult.Value.Text.ToLowerInvariant(), Contains.Substring("hello")); + } + + [Test] + public void WordTimestampsWork() + { + AudioClient client = GetTestClient(); + using FileStream inputStream = File.OpenRead(Path.Combine("data", "hello_world.m4a")); + BinaryData inputData = BinaryData.FromStream(inputStream); + ClientResult transcriptionResult = client.TranscribeAudio(inputData, "hello_world.m4a", new AudioTranscriptionOptions() + { + EnableWordTimestamps = true, + EnableSegmentTimestamps = true, + ResponseFormat = AudioTranscriptionFormat.Detailed, + }); + Assert.That(transcriptionResult.Value, Is.Not.Null); + // Assert.That(transcriptionResult.Value.Segments, Is.Null); + // Assert.That(transcriptionResult.Value.Words, Is.Not.Null.Or.Empty); + // Assert.That(transcriptionResult.Value.Words[1].Word, Contains.Substring("world")); + // Assert.That(transcriptionResult.Value.Words[1].Start, Is.GreaterThan(TimeSpan.FromMilliseconds(0))); + // Assert.That(transcriptionResult.Value.Words[1].End, Is.GreaterThan(TimeSpan.FromMilliseconds(0))); + } + private static AudioClient GetTestClient() => GetTestClient(TestScenario.Transcription); +} diff --git a/.dotnet/tests/TestScenarios/TranslationTests.cs b/.dotnet/tests/TestScenarios/TranslationTests.cs new file mode 100644 index 000000000..e3310bd0d --- /dev/null +++ b/.dotnet/tests/TestScenarios/TranslationTests.cs @@ -0,0 +1,24 @@ +using NUnit.Framework; +using OpenAI.Audio; +using System; +using System.ClientModel; +using System.IO; +using static OpenAI.Tests.TestHelpers; + +namespace OpenAI.Tests.Audio; + +public partial class TranslationTests +{ + [Test] + public void BasicTranslationWorks() + { + AudioClient client = GetTestClient(); + using FileStream inputStream = File.OpenRead(Path.Combine("data", "hola_mundo.m4a")); + BinaryData inputData = BinaryData.FromStream(inputStream); + ClientResult translationResult = client.TranslateAudio(inputData, "hola_mundo.m4a"); + Assert.That(translationResult.Value, Is.Not.Null); + // Assert.That(translationResult.Value.Text.ToLowerInvariant(), Contains.Substring("hello")); + } + + private static AudioClient GetTestClient() => GetTestClient(TestScenario.Transcription); +} diff --git a/.dotnet/tests/Utility/TestHelpers.cs b/.dotnet/tests/Utility/TestHelpers.cs new file mode 100644 index 000000000..a9ec4198b --- /dev/null +++ b/.dotnet/tests/Utility/TestHelpers.cs @@ -0,0 +1,77 @@ +using OpenAI.Assistants; +using OpenAI.Audio; +using OpenAI.Chat; +using OpenAI.Files; +using OpenAI.Images; +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.IO; + +namespace OpenAI.Tests; + +internal static class TestHelpers +{ + public enum TestScenario + { + Assistants, + TextToSpeech, + Chat, + VisionChat, + Files, + Embeddings, + FineTuning, + Images, + Transcription, + Models, + LegacyCompletions, + Moderations, + } + + public static T GetTestClient(TestScenario scenario, string overrideModel = null, bool throwOnError = true) + { + OpenAIClientOptions options = new(); + options.AddPolicy(GetDumpPolicy(), PipelinePosition.PerTry); + options.ErrorOptions = throwOnError ? ClientErrorBehaviors.Default : ClientErrorBehaviors.NoThrow; + object clientObject = scenario switch + { + TestScenario.Chat => new ChatClient(overrideModel ?? "gpt-3.5-turbo", options), + TestScenario.VisionChat => new ChatClient(overrideModel ?? "gpt-4-vision-preview", options), + TestScenario.Assistants => new AssistantClient(options), + TestScenario.Images => new ImageClient(overrideModel ?? "dall-e-3", options), + TestScenario.Files => new FileClient(options), + TestScenario.Transcription => new AudioClient(overrideModel ?? "whisper-1", options), + _ => throw new NotImplementedException(), + }; + return (T)clientObject; + } + + private static PipelinePolicy GetDumpPolicy() + { + return new TestPipelinePolicy((message) => + { + if (message.Request?.Uri != null) + { + Console.WriteLine($"--- Request URI: ---"); + Console.WriteLine(message.Request.Uri.AbsoluteUri); + } + if (message.Request?.Content != null) + { + Console.WriteLine($"--- Begin request content ---"); + using MemoryStream stream = new(); + message.Request.Content.WriteTo(stream, default); + stream.Position = 0; + using StreamReader reader = new(stream); + Console.WriteLine(reader.ReadToEnd()); + Console.WriteLine("--- End of request content ---"); + } + if (message.Response != null) + { + Console.WriteLine("--- Begin response content ---"); + Console.WriteLine(message.Response.Content?.ToString()); + Console.WriteLine("--- End of response content ---"); + } + }); + } +} \ No newline at end of file diff --git a/.dotnet/tests/Utility/TestPipelinePolicy.cs b/.dotnet/tests/Utility/TestPipelinePolicy.cs new file mode 100644 index 000000000..688c407ed --- /dev/null +++ b/.dotnet/tests/Utility/TestPipelinePolicy.cs @@ -0,0 +1,35 @@ +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Threading.Tasks; + +namespace OpenAI.Tests; + +internal partial class TestPipelinePolicy : PipelinePolicy +{ + private Action _processMessageAction; + + public TestPipelinePolicy(Action processMessageAction) + { + _processMessageAction = processMessageAction; + } + + public override void Process(PipelineMessage message, IReadOnlyList pipeline, int currentIndex) + { + _processMessageAction(message); + if (currentIndex < pipeline.Count - 1) + { + pipeline[currentIndex + 1].Process(message, pipeline, currentIndex + 1); + } + } + + public override async ValueTask ProcessAsync(PipelineMessage message, IReadOnlyList pipeline, int currentIndex) + { + _processMessageAction(message); + if (currentIndex < pipeline.Count - 1) + { + await pipeline[currentIndex + 1].ProcessAsync(message, pipeline, currentIndex + 1); + } + } +} \ No newline at end of file diff --git a/.dotnet/tests/data/hello_world.m4a b/.dotnet/tests/data/hello_world.m4a new file mode 100644 index 0000000000000000000000000000000000000000..ed8e09c8f0c53d942f888a85d78800657b2f962a GIT binary patch literal 79968 zcmeEu1zTLnwluMlYi6=?thA*pq>o=viU9h7v@RwZ#@5hHT-W0*rx_n)*x-uC#|}* zsgd4O4gKu%PyaCgU8~{(-?iX4HsAsJG)Yk`~-fthjzt=y%n?lELPqapQW}v@( zT3OiqIaYs29eoDulY9x%@4G&_&r8IgZ~<%Ck-^kQ_z2p zk@X+)XX`(M4SW6E*^|u+YmkjOG!)vi4(+!H>cvy_&p83R@vr>9>t8$>|I_~e)5HHa zdgMQRJ%2viLqGm25A!Viv;6K4`!DXv{@LH3*ZM5|t;hOX4}15dNB^7ut$(_Pzshg^ zAN~oPo`0M~uc5bia35R$vieUA1;w`S{(of{9RCyl|3B3KhvIMZpZNcuj{o=n#Q#6> z|IhmWuXEu4)cqe99p%CoR-0e>8w(N*Wx0jC23?kX*0^nErXoukTk;q2J87OwQom9G zyIuqkFQ!1a6(c@|6oH`+nj-JN3A6#~zozD3vpgjYHR|_V+mu6IPujyRsctKxo z_*o0O*P|8wgOlMQVGWXR%*AT_YSxnTSYcl;4Z>wxbj5sM}(FJXe45;S}@{ zZ_)_@GZLfGQ6ARqv5o0Jtx5~4F4#{&A4Zc?&i3QJ`rWb(qDvCA5(jrhtJ-7sz?bFM zapOPYXFlV3YNI^sUY+s6E!lCm%;PG5oq zqQ;*iLKphnuU=3?1#tTLo4T|Hn&=5I?N)QVip7B={G^q6Uj)mXKm;fz#&M9fL0BPA z|MH$G@1(YQRtSlV)5}2mBi8GWM)4$QH2$JtUr|W#WN91_#E9%8<@^&fbqL8y7HE3k z!Ll0)&AI2bCB2e7+%04I;g{$I#U&`{F8h`|)Ega%l!XJ%gB|tbSKP&^^-z=if$_8h z`f};^bodpUPdtmOADcCG5nH3dt&gO1h&7lJeD5dVQhe_njGBu^Q%)+-j7S^iPtzUw z$f>E}Z$YOPSH~pjgmA(VJZ3+8FaQSJiKc=tyxvNAM|tPbrHh8PWSRRY36F243yADy z9jSgUuYXG$f+eYA1uD*%2dY?+vKuQ$wn^4bK;04uDu+l~kc(5PgA`*N7_T|aO3RGm zG4kyTrhz?LmN*tlA#UV#5*Xn*-#$_qW7%UHtLj?zmbQMSEA96np?Mpgs7z1JLZ4mPYtfqI`2B^L-11})-ly`Wcb;GR9Q9RD;C?(k?~mMy zJk5oLyLVhDL@<~q^7)mridmSJj-+oZ*rt0~Y10i+^vMFS&gzKzxAU0pH7BcVbfhKH zMVDG|RseSRw(vB|JVbp8BOk7Ex;E)1*3>)F^uC)@-9D1+J#>s)&le(UxL|hf3^!h) zYR&M-=v~xkT!aYj;Rys1)IMfd9e%p5VRUKc&VwGEUF=zesKCuJLUeB)9(+!m#|z3e z$GKLgn;RM$a_V^(dR%p^;R{`cTU^i?ucEoA6Y4UO0=Ic6m3AB&$UWnoR=Qv*vM3u; zXDxE}S~pyQ`Lnt13W-@K=-?AiAM6I<)2LOE-64&&u4eN>iOKC9o7y++dT#Begy-Az z{Ot~!^^B7&ML-|AK>umYT0+WI+#iR%MT)levT7sBB~u%TmXFe!+1@^T*h!lnt^^Y)g44;~?kQ=w}g8c!Hl{J&sM7??8=9YghJuG(yx* zcrmDmW7!E!z~!;uXwG>s%GYmqoC=F8cE$&CGj8PJRl4!+3SuE0rA@MoET*T{X1y-U zJ=UhSmYxF2OQbv@1z6~}5 zZ$8mVwl)GwrI`MwyW*)%b+%NrU-(jZB*zJvErFea;46nn&PS&n1AcQxE#UX&PggGF zGqs=CdB?zW**t5-6fHYwCCoD(+ZMso4X{xnt5PQQpHGrR2Akt}Ho9%&Mn=@{oigf z+fXiv(SD>ZwltQqOdn>7v<1<~|DLqJb#+);le{vhkr7EUDpM_lLkC;lj25njlj7$D zRQUG#E*T!)*QjCCRImH-tTD5pU(NJksIHk<*@5qV3)*lfG2Q~jER89o^Sl@t!Msm& zU7b+(tHL9;;T6!gTG~c_4882gMr5>zQ9O#vP_5i}_2?C4J(Fu#vjs}Tn%)^M_w7R* zNtRNFz-Q|I84-wzFqp)M$W6aY)S|bR+G@kV6K})})q-?Fd8T5mt`UoC$Yqx(*&=kD z%{QDc$vDd;==y-E%ozhBwOK;FN;eOL@R8&eVc{IT-7)7LLlNbFs z{bh!JJy;$BdoTx|vlsbd`3PkPCnpIb2IM=WpYF<_UUP16T54*+IX!m*fdK#uAV2xZ zh>=H=Odg7B4JX=Z+Zw**9gQViOG&Ug_$r^AVKL2jsAHgCazStqG$>=%)K=0lx@+0s zn&x;-obeXgPNGn0?0MmD1NaxPa`R)5hEvA~?B>sVSyJp(7?81<*ok-aDrN zs>8!BxQla21e{Z@N!aNS>YC=$US-pY&?8v`8jmF9iMup!(~0XKM}eCZIrlQ&StdeY z(G>q?v&~LRLuQ#Hq)P32nN)xT!wBNK_cW z1~>JsA@*xPIp_Dw#OOi2yrJCGnopIUF1Gzi=~t_BB?vPHr8@a`NOkXVCuKu%ku&j( zcXZ`xUKHH~Xs5Tf4(CvXsptx(pBKcLcw0E(pdkogNqFYTJE%AsfAlG138q5+Bw4Y? zYd6iB$(`&fx}&*w(^a!WcXT#fq^f;DFUxVW;H?GZ?o>nbUoh_FYLCKw1I~8qD=ul2 zLls$d+ufsj9rw=phX#39T*EH|v@$C+HVBuZGhFs!Jg>vH+43IV&}fD8=1oYV;l&Lu zjhK5JbXU?~;SJx(>?)r+LziWZz=jhYa3_VQzlFZ@T0Ev@Ki2*+`(Ye(L&KC`Oli;E zzgd1{fH89$rL};s^Pcn!w>^kzJ49oRf$#3~D842kXq(LjmD_TJPp042J?D5&(!0c2 zbK)xbIGa8sST(FUwrr*~2v2Q`L8?j=68lhx9& zW))iTcbKr18z+@?#v#>aham}!`Lc`P6NZcRsom*~A@d)xL=9Xz-q2QLG5lXtdKW8F zy%a)66E_KbXE~miiaYCQ1Qo5U#2tmn$+g`As?01Udju#iJ8a~997hLE2Dc!OhO3hb z_82*h2Jk4O5Uz+A|IR~y9<)yM03txV2Z9s|P2gfEbVg#itgmB}jm)>vQ$=BA-r_LN z$-47WkY-%mr6IqGY77Mi30|_r#@DN7u-^0p!o&%$FpN`_Cn0>h3E<_tmhd# zr_rr53k7XRS1{jg<|(G(xaQS;f4aS^%v2J3evM|j4bVHL$ICJV)laArY~F*5q_Lf( zJ&T#@_Hsz+u8_n?eNlGB|Gh;jbX7X@S2wj7*YwYIC&iYpzKXETDZ>r}vxX0OyLc&TGid=BgHVfP@ z?Z+DZ-y6>eN0wy9((p!)DPkUXBvq(~tV}e*SV=)#m$Nv?tG0(Ib57^c&Q8Z3Dt&-6 zJ6L%jLA#8^G}D2ao1cviA%X-}`ei4$8utb3-;;D{`b<&Rm{8Hy@fz$2xo?J3ngO@<7z7LXHDB*gJSfJTrIIR;nxOF(m)c6 zlZqRf<^6kF#HtxrB=Ie`CX-^K{c+WLDf*7pV{M%FO%{3|!FqUA1-|wE2F7{RZ}XaL z!1Jk)Z9<|j=Ae3f$y{@11iZEx!)x->8WiG=#<{pGSkp(0RY2!-I0;-e9ow??&BN@8 zq%kD1X!a(gg{z|bd;Lh{KIt5!A=}AAM9Vzs)?4;CwUkqs+qRk+kuhXnRa4cRN+3xK zM;DJm*M3-D5YprpHCMa|hlOnxSao54oU_T6$U3tdxO(z>Mgy}AP;J&!4P8U(2uuXf zACs9j$?sol`T|;1a5y9d36fW^535N!Bv4vye6^NS93F0Vs}QsIWtL<2 ziQIb}y^UexE8nyhON;Wz&#E}eJ}{@AJ#@3YfT*Q=99@gVt<{D^8ToD+uC}1=HP*@e zDA~r*Vh)@u&04ZsQyQ;R1*YSO8-Q z3Sujz{bZ6jDG%h*5!kT{%Ik&~X>&sQ3|1P5m&V;G_V}?e&vASk+`m!Ve;7`#(WS#0 zU>;S%eDH=t^}Wf%CK7ev z(7KXrxu{M)I`>x@XWmAG#^Lm>gHLhIq;uagS3#pFTLYrvW~|+tfh6=FbAB+5GJAu@qIQ-mfA>rw_8J&vyog=LO>?NIQdqpVLYzvi!ErMoHb&oyuUh zRB(-`$^pr$$U!}Cam;Ndpm#et^(*+E;6pp8LD-q}z`oCl+?$`UN(yX{FiCdHoOf zyU90>LY<2(43TBS1D9P>LunUTsz)vF-+(o@G^!s^P4g)}xE5EE?UhBj~Y9JhpG6;1A`=*7tJqd zH)6h62o&#J=21%lUiLNRqZdR@)eS4K6n(C#6V*Q93>HpZ93Uv;0CT5|if;EeNdvuy zxa7@S0?+RBJezzEX`)S&MNo_@wk0^^?=aEjVy{Vy`5Yw(Ta-@%U&nXFOftmC%k<1E z%UQuinK?~&<(HP(zhj-X8OP^N9^J4IeDs|)UTqxa9JH)Br;eluOCSi-t2^8-Qc|RT zdmdcPIZ24W(xQ;YGs9+P=a;&{`A}t)oD05mZbx@Cga*Ig(9_hg$A8<#D4VdXg~I;& z7b!OLmHCx^VSzz3FEyinde~t`R;Sfrb4bebGseRc?ET+C_fPQl{|4O?SN@T_`X8bD zSAQqBUOb)sfcbam8|feVClUtXZyxsPnqdBc0rSL?E+zFbX;efur$vYE%p|M!sH7>M3AdylU<(6}%cOwa8-!{U@;)x=;;pSm z(_vd_3zroDypodEExkp<8TnURkxPpT;gFL{*=zl;NP@1;ADx~*oP!x7#|+`DLBgVbM+v7cP-4N?v^Mra!**Pl0&S6QFyxCu1dI2%Uk zKUj|ba?TKUEWYl5OR2(PQ154mJjhl3J`pi<(||gniy~>&u#+r1JBCSqQ>{MkJLJfg zH*R}MRS%!B?MRR{Pho17xSU7BkxYqIL@&LOXUb~B0jbk$+=aLtsL{Aqr#tOv^XhWt zuiM-VlkQ{iORSVG_r#|g7KcaTM&VKjh=1y_eC;k19ee^szrn;2DE zKvUFO2S72!#GM$QIbtjKQi3)4dTXuKC!H+rv+(-d`%tK->@LCYhcjR8%vAoDl8p6% zMf#KLyQ)9m7C0=Sdw{xE1+&FT-|4OVA~{D7q?%xpG2+)^d0PtuU9})@v`e^3n2saj3>nXEc~bJpM*H@;s7r0Uo#c9Q+eJy;*Ud zv#|4V*hTIfpQb9DJmr0_(qc_FXF#`&h~U%ScZlofD4g?+$Q&zgwcj~d2i))gG>KOa zDZby@!Nzy}d~|QMg@+2O?Xf0MV4ZC_8_2K^ zhG*|{qQD<*g0IPg4sl&Qg+kuy8X3)+K31R5<51{=h zqoVsRQQjq1*>IYFd4LPs_H}&8KNFc;l^|S?Vc?)zJM? z!8>;~2&!*s zUZ;yitwXe)$Ci=rC|IwD-URZtM!ej=b_zyPx^IZBpm{k+EdM#$x2OW`=n+gj84S6< zc!Ufr4O@wErK}$O*wZB4q>V|`;rz0hPjt5^n>>A%(zutj?5%Ujtlt~cY&A@fB8gXh z$9Wc%X!*66Z;3%QrDXLTvy;;#PYK`FTqfaI8N2Q7DU_2ti)dGBmdx!i&3{X5BunM+M{6CboigFq~4# z9Wwdg1zky-Dk~*nRn!}$)iC4cz=Z~U!!o5Ej$z9pOUz*3t%c1&?u=KftDAedd(MM` zcD0UKR>O{87SuK$qs2$YZLx~0$-Wfu0Wx&S9@RRw9GVVsm68c^>(1*X$c!N*rn$&5d?<5F` zh$XzF6H!?4S)*N5loSTLq5NSz%XtxBdp;7_k%MWDTymziQb)9MyH^>c85iBuMo? zTC$0&lBLv80A*Gv7L^V=&qqu$jw2%aa0_zH`LWz$bU@!dI;SeMP(5V_-!3@{=33+f z&a1|)N3tpG@7ukp1Wus@=bpa- zG((<2@|vNY6b~t97;aglQ)yl=T4LB1QR>!XnwP>N5*^GB*`v&7ksGeSkO#138`R}B z;6DrushM^RZ;l2{jO3OYUQg`#F-6mPW{sh_CG)NifaaGO4w1<ZzJL_tKi)Q5;rPnsXZS}T>%s^kXeSP(fdVv@+Of;ju(s3tgyYsK5WYWjEG zxxM`@knEHC?WJ7z4O&ySK!r_lJEg zhS6vW3XLik{@l3YqRMmPMFH@epe*}7Ls(TN#}@m`{iKPV=D8A-lE8yLCO(cV6Ke4s5q^e)!JzU&NB4ZyX> z8cJ(JR_Cr(Y9r-$ZeHmq677`8Yq&?1z9G^se&;d3|ERkxOZo%S9b059UJZGo2uz>q)SO}6VTUK z0tLS?Zg(Xi_6yzkTa&$={kFo~uvMNj=Kg#qxcmL!72gJ9P(zgGub-aQrv6V8X{i?RT1@eX0${FI?jsWILQ3`l7KWY( zCy0S~2Ve{B67j25dvPwvmWf`7JCe?X5 zkA8DZND?fSDKT<>T8WB_VN*hJS?#x@ zZ{dvvJL4v{Dk|&TQG+|n;Sb0g@X#E^+8V^CJ#RjreN;jM(wkA z<#0cW9cWUhJDN>O{29HxTPh z@t?&}+s(Z;--+O_1fgpWC@W!|qc)WZB;&g_oiHvDoA)-^(hv9G=;ZhF(&lrpOLea& zwI_X!VJ9a6*(m^C3?{uwAtPju@5YH*SfmpX^qQ`gtlCIPtyVt&ylF8du!$DkB$>zI z)EB%GlMF~UaZ*)#H%FLJFyu72Ia+OgAE>RhZ>J``(LdqrNTG((ZG9XqI&%!xU))mH zc0D#2;}X9Fgtr>9yrr-i*3>H2F}9|_Nx&D76}e!nnmasLJxA3xIl4V8SYkE5j$FTt zoVXU2YS0oI4 zb{;^q!ejEH4gtw)oynvN9bd+mBR)s8q~K$j55)vXtQnMy9Uq>!kgn85${*K1KTGPc zwm`D|4eHNqebF6r_G?s>{>&`SdH&si^lt-@HrS1?BV(dZ+ko`1<{2%<1x5@K9+BdV zK6WsWY5tGB{?njiCq7Y2u3TP^&v)>Ilgc4da~!)uDk{l?VmO(FZeUE^;!wgLf+LK`a`zY)cl-%vPTrFxhrR6${ z!qr6bz2Tx9@yR!)y4F?;KNg_-<5Yy!llk)XEE_!{4zUzsok@q7!2K+`tMv*RNZr~- zsd-W=%D;eRbxm)t(cFWUV-(rI|fcV)COs{EwXHo_{YDW)!4_Sj8JCll(``Jz3U005?M<<$2WmLLmWVeOZL z?=cry6@0)cU^gby1!aXR0b%1|#M}%CqGe|D;trb={xL)DI{4Gb!ga4pMp=eb_^;D= zR7=@w-qcCHr2XRH?Sk;aC~vgGx!maQEVkG!5JJTqpBixNcJ1KxR_? zq6@+W>SUznV0y|f#d}sPq(_87;x-Bn) zw<^EW%S2Jrb@Swk0v(rBviXz(%Pg6!k~EbxCk=DhZ0%(}OarND05BV-R@1Vx`anBR zoL?>?l8UR=^2ht^mm509pkIn)O)6)wYPMjsJq~KFi`nF*nkKEn2~M&5iyGb$B1LPn zTryePP{Uj^>CzKy)Q~aLm}W615gCK&!QrL0+Rtbz{WFfZSon!UA5N9x+lAvQ`Q(cy zBi7@R6%EtYjuw(W92$8oj@o+bQ&w0_5(NPoPlFUD)X;Br+11M`e$#e zKs@JNP64&0(E5g9P{kuBi9@sjA>ELH&T!3URiP#L#uY=?vH7n=|Ph2{{Ejz{u7kRJ#l6XzNG5YoG|s%PSOxroF` zY*5o+LXA2cL6K2}vFb94WYM;2P8_qk#h1#!-%KP_fkDMy{yy3I+Ly&T5tU3lNc!_K zh01g`0*35?jD(WT`9cK4qAjqX#9+ex7$R4~ilN5PxJcvhD5>8>q$Om9#lySjSQV*b zPJ@|WETxpGJ=HiV@9i}}cZq-+y)u@gDYuV3TP>a1g*(h`V%F6b$TYT~R^=3}f*WFg z@nRB!%4S}2K9p8{qCya5U^(e%c4#t}vXxe}ijz7P(S!e4w{0aKa7ddsM{%3XDNP?8 z=`xUlx*$M|D0h0mAxAfNCL+x|Vd(-?>L$50mLJbr&lrl7RMKR;EOLL!6YTun62|^| znXoKYE_26gCcAd8Hq>oa%lld2^u$9xImxHr}d^RAhPgPc=#gfko=4wo!| z*@gVu>AQe9D+Qts*P{H}92L+bjtezp7m~DHcy%+`0u1FDcf6C{Q?pApUT!f2BjS{< zSy=&*=VFJK$L!ft-g;cI6eEDoPZ7-%0-a)xksPABH#r&4=-s-U?^k#tWYN+#zJo5b zmt5UkG&H4;)xWGCYHE&hnd`-7qvg8<+rkmf)MK)RI0P+xvbhczoV{(+1f5p5Zv^Y+ zH(zbenFRpnlDI8maFfCe-}K4^d5T?eWS@ZK<|E&&{}{P9*sS)y398)}Ke@ z6~P)jil&{l;&rM!qSy&JbcXTUaiZfWbi&K*@PxURmTvs6rWa7y{XWg;I0g&6*|3q- zIQxEduyjp8~MGi$j5J7`k;inn+HwJ zmVhxOnEc)??)9%OVk#^gbo=Ie_m19ukX7$$2XFy8G4j>l`}0*Dlq!!26W= z%Hky2ABU{cm^O0n`w$<&ttA74$nGLh&N$B$m0wy`N8B1j*k-E5PZ zy|N}Cta>Q%JIE!Fox$gub?2$HlZD>40 zlrJINw^V88%7KAhs zLcvI)*fne<@K&Nb5XTrgdfR8ba&(H5tf6kpCFA+n@8I3L>eqCVvQ%=!O8$zbs_u++ zZ5JSwi#2RMlgHsC5EL4qb&}!2adEzP$GY($q((1kGLpxw*HS$Ibf>so2~Qmv$S_xC zo;JJk$byVgME)bGv~(CfuDz}HZMo)SUJwI`9PdTbA)#qH?8rmZ`H=5Q-P?4yZ$FIH zGyJv_<~nO!N9s7!&$|gxcDc`Aj4LV`R86`#9VJ=}M8AU#HNzd_gbW{)4Nqb}G`tzp zNH~ZUP8oB|2{pXeUc76Mq4{og2PYPCiuGM$vybDZeXgPXn{)75#$$kI#BF>{O+CaD zR&lom`g+iX-nUUL?tyZiQ2tJWpl8>$b0$%@_dbXW9b;`D&4jK!*OE?i+=^dp<33Ww3{iv=pF5ZuYtLT=BOLBh$)@e9%% z3m`jHl!P?y)A)u1#O-zjXPezxAd=)Uf(8`H$hlF~EI3}vy{MjgJcp4#f*;pl^h&O>zb`an&I z?{SQp9i2umd-Ce~V`#Qp43%$&$&E%Qe1l$NA~Lq>Tw1}m87m6>%98r?783GPb#NB} z?KOS!fYU=t0hf|FPTQh$iQaOZ)jm(!?PI4s+Ke+VGj$u|nNu!Fq8Ac%5gVgp!8s=p z+vcpIORxl{?(}2wx>={C6V^wIkOLFmWk2I7t^BJUu9}x8Y>Ny|Kp2Egw@XyqFEHzw zjpz=utDFALN?p<3CQ1N4C@{A|ku&g7ejOi2r2D(~@h?MbZ7gstXGrFk%l!=;V`I7^ z=}$-;X}zVmw*=u+Uo{`~#F|M{4sZ?QGW;?e(Gko#3vtSsn(1X*%azy7JGA5pP;<)t}?#cDf^r*sZ^-SiU%h^f1zd` zP&Q?**^^*BM9ZDrW4^Pz&zQHoX_DkrNEqgDsR~K*dh3MmZAUc3kcZ7}h(K3&*@Mk0 z!qp@_Rl#wytO{M9ND0lLk-v}hrmUI?SJ)0$*FOGUDVxXnjVfcl%pN2QO~$_7_yk#d z^oN&D{BGZu_xjDh5qX=Z36OOY-3~Qc&N7(3Z5`9)o^4)@fCVGMQ9>?75VJ|0iv5Bv zt42D+^IX*}@D_Hdxj!^&dUkWj{*m)NbEVSYSP$1@E*U=ld7eGSr+23Occ{(Vl*140 zwp&dp0EF0=yl6Y864ZQ#2ZfuOr)>AW_8Y1VRVUJVb_#pJER%0j%NI^pc&xfO^8HYq zJq&*u#x0pByk^dl3Esx*z8;jxYZi_lZjQpJ^p3T|iy3W7i9QkQ)^`%Vwrwwe_-5@8 zgS<%}adV)#PkDq7Jww2BRnPcq_EBXxN~GxK(e0v;PYF%@)Bt@-?f75{ZH`0t;JqBL z%i``VCherEL743^J?gX*jOAdimI^^%wqrj@`;oVTL=Yf(e^SGYt?h(C53zu@TjE^x zJS1OumllUt$EHE;jP}>yph-|A1s5g~|I*75U?QK1!^2P(>-i?Vvlm+sdAiEl2|k$< zpyXtDseQeW`u?>^GU+Yre6GgZv0oY=()Oe77ZIc3Q7l&5w9S!N>Lqhb_4i{B)R1jx zJ>xB+B*CeNgyIQC{aK|&86zqzO}4*)pM)8^CxxaS- z{{#O1gkS$}_`3k}AK3K&gug>SVd#IA-}v=E@b+i?{NG=7{}+7y`PKD*$KU^nmt#EX zasKN4&sXP<{^ns1pZfpv)&6JcZ~wplt-&8Q&-}mWpRjr8`#*HgpTF(j{LRB&KKaA_ zoByp}`I{#s_!EC0ya~}xYmKBZhWRX5(5S}JRV-C`?6YZrTR&6W0NKk&=jtpYK4_>?|8j5 zt3$MftP$gIfh9fXDzQ@}bc%baFwB+F!duYt7$M`vN4I+*FwhX(+*z%O>~8}pay!1P z**}ciBD9;qB$yo?Tv$%O8<)Lop2*}BIl7CgSmt?@9@E6>hX7zE<32AGoQnv5mb%Gd z0MWVaB(s(}w%ECB##&)|#_PS{AozSY4*-MnlSQX5I_j}C4;iLar2Z#RhNlr)U?xHLJdO|vaIK%F;(yA%MPn6B~B*0c7YE-?r^8y-8N$DY}~cxqY2wD->+H zE}H|yY@8e2CEn0$C|TTlhwXb-EwWd^9kw)}+u!b3^{b*u`3eFK0*9}D;DD1IhyYe# zOQW)_1y>Mm@X+^R3b;Ic9V74|K)rkw9=>**cWYxKVAM8}leJcGh13bNhoxSnsNANQ zx(56Lo=n&*#Dc1;wcPz|MId>LKKSM8oTP|Z|Bb|6sp{B^Q7jF$oyKz!!ocSR^x%0f z{rs{FVyA#sDnCFR#Q={gEYY$Es%Yba(e-s$sn-Oflmmhm$r~-HUqw*pP&|uQ$BoH^ zWIA=%xo2axdGNKzs=)=k%)Iqf#PYIF;0^6Yy$;ZV;@$eML2 zhQP_DbsL3@f>BCQ3jU=(({U<&QtZ^8cAQDxXm2yLdsRFO9^2YH+L1b%K^v;{pRXQh zMy)zroZqP1WEzYt(L@hzp5A6@B3zr8nIDgSC+`)SGWybi@*4h_D5<-q8QyZKVO^ar z42_=a5D^sAz}bIS;jwBB`fR2cxB9|1cE7)`{GnUOc%-^O#|#VMcy{B?|7Z7LRY{1; zNqYwqfO9oosj#Zm2B_v>rNzN}lT0`1wLjqGF9< z=_Wu$5wXom!sZHtWFjlu)17k*9TK!4iKqOMZCt3xX|+$CYNmL;s~-t~e!lrvR^=u_ z+ar}DC>YTf*}iVo5DTrg(}k z{RsIvtq*jbFQQa_uH8oJ_uZ)Kq3ain*9*iYiArVAZ7i3NGd*X%I(Kh0l#QWaZ`$l(J>b+3miC%WxMzM&)2fOAv_r_=4Zy!V2%F)PT|ZFp8@wt~ z(4!q=KDG4BShGnI?=JjKFpRG;kbh?jRxTU2l8iCKrLF9WQIFpxY`yPRzm+udIu+$5 zr~>(>4-V?8rY2qNvzdh=n9&zm|SJ% zQrq;T#7jtgOmo63F$~I5fGs2yG=lQX8nrg z&)h#z^7bf08flmK3%icGIqJJtpwA8yIQaT)IVr?xfJG)F=#MrA?%MbP&^Yt^}2|b){4Y zD=6%q4-e0nYWD)7EI(({_nNAaXM>A6@`%s_p1}t%)})X zs(S;U^@yyXk-UAGMtX`)d3Kk3K6sIOgeZG>xICC(wL_ep9Dk6_Of&RLnX%;xuiFA7(> zE-efbY?_EWc=f#v4*P7)rf0hYP`^|jx;vHV4ggthuJf;CCG;@14Lr$cK8a_Z86#)M zd~n{&vsflCW%r4<-`NRAEdG7aV*P0qXnOkTr+xZ?r2#1)?Pi%oC!O56%FUHHsD9-m zO1lt<_?a<6af0jvqbZKyl%J>brPU~FGQu=8xl&;2q- z+X*7tbkPiqX-j^13(atRj!5`ppMAHSp@WR*ZeA@Wy{eQ+@~f0gw_^aaUGl<^!sVLO zarPTCNBH-`*pe|ZS`PG|tPYx2rL6DuPoX@{<*qwhuNsOkkWJ5N*hV;rvu*^=8;83l zfgEPq3GGJWX#uX${R8#}EnrC5xazpc{hZ7$CrkH^y@DL}4eqY)!K+m~Vf%PjNlE>f z7b~F=B2llX3e2!-MW<_Onu6J~`1P@;!Lj$ewkOSwtS{G@$gx*x)0F2-C#y#^@cZ8p z7;2XFWq8!+XLRpOio@I$n2zk6DK|)YB}zIh-!u-c%owsI`KC6SEUSU$S&#daoGnCr zp3b=B1Z_^J@7DH@zeg1)b4loiPM9(R(YRFI>J@zkx<-lQe4eu8n7+8xvy!OXx)*tI z#Z5Z|3d$-;M#6w#i=9pZlSY9XmerD{9B*=5WedrnD!9n>+~?!t=w6E%Rj4PmlV>y2 zL`eGyUIc9RDHU}*@22sIsGm+tmP^ZF(!x_{BCr-*oBKEYNsI{bOWZmE96qRMeA&> zN2eeU_`1)2_m;psCwus{eNpSq>czkVen@_$KLc3~Ir>b*48tpv3!h6(tlgnk$&B=m zxAdgW9E`|u0uztm0|R>9!okpX@Bj2LZ+)SMv32G1E&Xe3e3v_0s8^UWKju+}G^~z&rT{qvk zwC{9SWE`UB)*1ERO2|6@X<)e3TLno;ohZGkW5-Xz_knYruRfR0T*H)a|Kf=DrZ4=A zHo^MsX?IlD%)S*jQCRc^MKlhJ@_yk5i%?#28fTzd7QA_8K_@qBQN(^UB!e;I^ z`9j)+03wfX0QNk)XTVJ9fzU~3CSRh=4)aQyN{en3jEB_EmWS~QQ6?A|P~0S~-*|;h?Q%FdPnTZeoJz=~&229Rh~zRej{b^p7ZSehGOdo|-!GM0lph zQNH;6bPsvoM#>2nPZpP;SBJ7~1HA0yzM@!ZrIM$Rg_BqDp-tL3A@W2TQDyj=eYmKg z475+AgOu^6rG11Q}dXrQX ze6mYu8t2OK`;cLtUa53|>5J#M5^@uR;pP~{^3>axzO4z1X%P0|c;>%`~pW{|G@q7TL_O6A^`^n^iMR?oDjWFFfe1+Ez#b)J? z1>bC_#K=8XxH=&3QtOBm-?zuZpE$kiGgX84M0ANMli(~)iO|Ang<4K%7!OM5l~&Vi z1s^lO(2!zdzjQkO=)Oenit&0%rrL7Ji{2U;>lT$D3P-N3|B_bxi?Svi;ubhd%0Lpu zDtST;(?q>h;6LQ)khJ?lE?(f5GI;K_2M%~YIG)x#yAB;FKz0Kl+XMKW>g(=g^zeLq z^XBm@{t_pXgOg|E^Gl{_tN7}<1OB+gE-ne$kw#?pb2(;k)3mbZ^@XIF+C*&{A>ZLM zF4kzo0@m7fJeu+BlkpxYVlX2UA!07JvPHSCoKB>SZ{AJD#?W+Oga0T)S@weN8-W=968{ zTSITi{WiG1iduuUg;^ZUnqZ+UGnGdFlH7ryp(jvn`2wc|>JRN!Y zs#(rNXuFpH{?OnLj$W;JO?|)N2RVOZf5&cy#GNJXV#aVcx9a{=a)X5Kgs@&$H`4-z zAvpTS@S!6q6^jOza5A~4Qh)qAmlf?|DS~hXbqr;x`l*N(A#=`21$k#8)>^o5lX=KJ zH?05mvjd)#CCfkaE!hK*9RMh9v~V52-ccMKU7cC(T|H2I@}zb?$#ie29A8xcI$pnc zK`-m&FRzaOfQ0xA!2G}#O!Un=A!&_5(e*^?XzGLB37a2=1>rMfP;zxE$v8vX=s<{+E};u(k~adrlCYUdgmoe+UqQJfe$GSa zcGRiAz|lv=b$%+cWhQC9d=9s6A&R%s42Q|RRj)_%(C9EIfZp+l{z{yKgCd9NN`ts? zd1Ml`PTnTg6Mo9;Z{w6CHdO}wAIisewhD+jBS2k(WDdlj(K+?JsvScwCa!T-UuS3=mI^ zTM=PPAwkBxC$MeY9CR^*+zDu0!*{K^t<->`Kh zvbeG(TTA+LrS0H@eEZlzF;IW_Cogw_sB$v1kJ$E9{%}z{-x>4qxspS6 z-IszlylPX?)bZ6mp`2%PNynS;mCmE{u=lFJ>(A0bayapvwJ)Eg)~>6cyg#9_AL3Zv z_21qDu>IQlAFp;!WM2OO?Eu=}iNvA(&=EUQU$Ub!T+a&XQCNgG<=864UN_2kN-TBS zC3s(`FKhi=%nfM1~pdI454xm%hjMk4D3^D$1#=b~TNlOO(3Z0*J5 zx|DCOU;1y8;?&pMu8dBscuF#cf}f>jZ&Q{14bgBJ=24NO20+;-p2S!?oKSCE7c8AdMsv7PnJWc!&Wr zZ^iF!XM!5OTkbfd#vkU#FkW&)oY5_8r4I?|*ua6F5}2)8HP8HRaNc5-5_=j!6$cex zA1`e}b}l3zI4i*ywl3XL&l#cfnB{gyX9aG7^~HCd3~tO_isjY5ZzB05Sha5VWCN+% zn`Z*w!~?wDrk}H+Z8R)jmo(}4ax9}ksyRRKSRQE%+LZ0FXC>=TOr2Zcr|)dwzdG<` zac`BVl!>%piW;l%=qV^N;1l5~TX!8P>~iz=fz&Lpsj&AE_8DJL2(QUGHo5Zy?T3%B zyaE@!y+Y0tyFO1~k;b?1R=Y(c%on|$iajn35s>W*+oAfo!R_ytZ(g1TzF^9ZDC~|I zbyy}1Dxu)s@@e4C$?a{my!%_4s>CY=D0@kHKZDfo*5J6ncRE-kO{{B9%u!L2l}xIwr?orr?vy)=^r1H@ z)uSrSN8aDulsI3r*d75R*OCw@oORN5cirvXUYklh4gv3yjtMV~j*>q%Ibe^=x?2Hh zMlmVB5199T<6n)img>%8*2wy>`e1l?j4M^sx<`2Wr1vHFvn4idRY#k-7b&cljXWmL z9YrkpF-=yE%M+W3-;omY*~TEXG?SOnjP_>B7|2sz^PNw18ibQ!mfn<=#_gr1(ZKJK z-QSSk0`^v;wJTwr-OPF!$24LUk{;2y2lp@cymX}g?oH!dC+AJz(uwe?@mNZp9rI{7oji2Rr-TmqTc0ymjE(fQ~H2dEap!Cnlq(h+Bzd9N?rY(xxtP+`%DhAc%dK(PI@TT zC*9+2`A{)CI#a|R-tTPND)&H69Mjmv04$5Twu{WWH>a0|8ffYzbm zoQWy-HaR8RlpaLPlEB06G>mN|wr<9H`lc(G|T$_jjV4H663bTs(YLZ*ONA zXHi5pU!4&3{MB)BQ1#Pakyd!FK0V@qvEmIArRRv(A8a%@bm`_he47%djzU>DPYZvV zY(87B%VYS~p7HFYnO>w`eQqC}Yl$!xwyO9zR)2E3S3A=jQB1QhHwg!h-?)Y7lr{5Z zeQ5yhA!t;Gu+!Ggc;3tKKhA9I&sP(=_6AX+Z4#g>h2%wL;qt zFUE6paZk4BPBU5tPm+B<0oN0`HE#x6{=;h1bR$px<+LZwP#^grW1O%C1E*)~lt{_s zQ`$Q966s8;(n5d^qKxFyCv?Ec{k&df<-^NwnF%s@rzx6bu2oxYsD(PPU=VmqSM z>>8VO)kL|GrGu-b;uJ&tdc&eSO~xwi>ISBfY}e}+4MHSw;>AE17oh!v+86%qVCPsR ztFj~aymrWY=9(-xGv(XAB_!SYZ(VTUzMA^xfuVa^xoJx5X?g{RdL;~7ZUT@zkspp= zC1$_Ao@=X}7cWKiQn!ynR`43!bf*HM-^Ku%-ga8lN zwD4Q?#)r|HV5V@HKT?)ZmO*D_SIUA+oi}d7W&0Au_8Lm!p^}X45dnF&9=&^>a_mLh zL5@~{?gpfXYry%3ZnD*AeR9SesXYW03jE9+tPtt%>F4two z<^m<>sPPQ8CvCy~C?P&v6H)VWmMrV0;=(|;$kaWSpUqX(2admMT^_+$YeLo zVK)`lhB>igE%|lDxpZJLXBkKvj*UG!%d*lx9ZL=}QwN5T))D~dpt>17ZRg(v)}qW$ z)r43yQJGfA&rVfb=t1nQH7rch{L7vBY^l>sK27>)+%m#IYcnS5eVXkw9c4lq%#gv7q!|pFC;YA+4w# zWiK=fG+uuzFcoDTS9&tS4GS&NOljLCq51vosihVkV3 zNAubj>4!*+Y9Ze0=xH696Z{PS4DJ#RYpjE~MvjuQ%hNLiQ0YT1)JSke5rhrL1P73a zvtibclGj2@iAgQcT8t)e!UGWt@Ue^F+RQQ$H7kbWaiEI3jOaBQIIYmTyDz(k*2OZ9 zlh~t5SM0iM;Bj*OwZ-@p4=DJZ_bZ7%z}$`ulHU8r(F;)AIo20V@LC=4h-_rE%(EXn z6nR!5c+DEfvb+5SvRP+hYbZFf~zwiVT_g#OOx>1kfaE%yc=gO7`1u3;tp2Z~o1`|BKBZ`Vaig+yig?2mZbUp7}Taar%wd|Be5}<}3X- z%=I7mpZg8}2mZbUUi)u&`@i9{f8!tDodv+>|Hl7%-h=~i=GVWv z3)~TjUj1_UdQ}bn6d4_X8$eYr)q{f+*sxx+liV?!_KeLhlu?di2}nkh4T)^n;=5jReB+(K*<~B6%3-J@lq} z)UlIEa(jHYq8TJN5Nrv+C*?%=6PzqKk}72F7^gzRXB@U=SjC_#I*IRm=J3j#+=ylm zL7;UWSDHu)zJeZ}1fDWAbsq@d(3CwdIZ;3)Q;r>hf_xfji=8kat<==bu@rrYjI8cV z{f@lG20oHE3^z7S9caIZcdXG2X%m82l`shYr(!u!pn>=^K5?8N-MQ1BUlgLC;(mXA zoZoOrda;Jz+et1#!VA{f)J~GZ^_|0M!1DAvOhqS@x#2S^X?|JHs&&{z8VdQYY+$}W zYkYCjBRQf?kOe4O{iY%fg+T<}N_$=wEdW~e3sF!I1rv;e7M`HWl-nk2t8KOl`SG?r&1w)mUTF z*iamloR~RCBZnf>-X-rn_j1M$WPG1zpWx5~_5Ch93`D9&P<}nW88Jo|q$+KV^|M9k z>a?Aq`bzDJF>B=68G6D_+CWUT->_pLwlI(g`;bQz2DosP@NhYBHY514Q1lR31JGhL z1-9F_<6gvX4^hVpnLIo90Axd6Co4~4UC*TFYltTHyJ(EJY_0nt+2`O2p1qf8GiA`x zz`Lm0iIxx6te`12+di?~o)MvW2v{(MHGgC=?u9s@k>p;_y-OY+36&z|&qN3Bm#Rlm3ICf12~M z(L)Ee^;X9Zg9qZMLx5&-_}@tIv(Dg1P9BIgg^h3vGNMRMvAmu`uJPB)@jL$&N1l{z zCn?6^a~rDMazG?rY^yZ85&qVo3vkFgQ-7%tupbXDFQ)9ZU-Na`uzq2#xNDpfNLxv6 zo+-x^si=GhsOSJ;`ig<#wq%eT73l}#>cI+uK4Dwdn+*Y&P=e@FSKM_sdre4k?5;AZ z%>s|pQlIZicr6hiSLyogJ?qb9-_!O#rnS#$`)Y~ERo6U#G_Hyxa zeq9V``_xZ!tM(2*Fihd7hWlu)%c-&j#o@R`v>}|oAUrdVfTouuq;#k$WTpDEsTW1p z(k!*O_w~EaUdQk(Izra4=(?>9g|6qIX^Ba$hyKs<4aSSWeb3OW=b|5yCRRx_%n7>! zg7pzSczyjps|s_rBiW_*o6zZ+n@z!Tkt*73M9qJP|03a7U|VJWiz(tRG(X;jMh3@b zoDv|~4BWU-pgJumBG2JGUlxtgsDDr(z6Fc7YQ3B%B%RlZIj*UZ+AY;{(xnniG9#c8 zh<3smCjp;MlnVVhoW}I$rSr$;21Cf}=$X;-2{=seK(w!eFk&qpY*i}*D_N?KI8wM0oSzA+8i@CqpGz2pqltutI_WnXhJN`za%KaLLU-l>>)d6I&|@ zD_wzWd3gmk)hVhN4ipg1yGsSVChxt!<6l>@gV7mf*Ue;~zhuJps`@aj+H15Y$Q!s@g6{DGR?92`wQjo1cEc7S<9rg_#ynjOK7Vh z0N{2;Xr~n`dUYuvGoAa=Y1vtp(U$9S{j_-bfRci$nb3Cb?qxSOLxdR{iBEZ~(685A z-b{w0Ev!o0D=-MQkDfFhl4hO?r$TRlypa5hbckdYH0$SSZctm@g4G@QVPqeDl(OW^ zlj@?)DE}X6U?TBXNY~4KoOC8Kv8F#S2>P=fvC8U>LJ71ZR09uOzm=x{pl=E~1&k88 z3c)~yK?GXqD2%5aANF@cEyQyRc{+_lckYE&YK)0$a=(?@f2>Y&nsy$4M9abVgxLJl z#UlRIG7?V3z9Zc@;=roG^^slAGk#S${SPaTf#~^;m<|mI0&&-AjIHU|`O$W_mF3zU zo++x7vgyb2K7Z?NKUaRnG3bw2oq?biD{W)|## zlHpD9)$25CJaFN*p}ch(1kHvITgHZivmQem=r{<31UW@-g^H1;mzgTkCCny3pqK;Y*r zykc;m!ll?#oDDibd0^B(p5&FGO&8<##X~g7LVBb4W_h?Je5rmo6N}x zaHd)V5Z4ZQ=`wzsEv6PRji_YEVU>xLpS{9Co4j2$6pzYo5Zno`4m{-VH&72Gw;0R| z+cuB7H{5xxSACGaAF&1=uO&Rj&$<`wVaQK0;E$VOM|@yZ;%r!DxT82z{VjLyFys`u zQ?(G8-n875Grsk+q?Rh%k53@$xdSAs_TpyrL0v-H`2=1La%G+cdQg6RusM25 zTUP&*Af5PoH&UdBUm}z`mw+#42ApQOwKe89%oa+8NVa_3--Y1lhcp`FZAX%4ZYP-Z zoaqHX|!d&-7ewC`)>6Y=CT(kg6Kvzqp(PrWLA-i=m$VM*;=(LqyZqdpupq%PC zZXDPpv@xWlwrS_~>&Tt!C1|ncMJ=5q0k(-SV1fj7Hu%`xGR>&@8K?NTswE4b zpCivKvmJU8@(KM4lOX%#f5hn8@ip7j5yTJ(6Q z={>GO?8^`eP8N1ZM3(3qwnXeA<^#vs?^#+sh4qu14Yc z5)kA`bo8;@}U_zRVDA*qqWXc(?ny*ksmHyS;L$dSg&ABP1^(flUuIXNj zO+H^Au00o>KR>%hUFC+hIT z5aj`=yM5lRy2>J@RIxxZp*On6k9c~`u#kDC9js^`FwH$HN1k+QJbB%1H**`q<{W$x z`o7I?ZVg4BeI)F*4R<;t8A%aX;WQt~8w!o)yjdI4&V6=dt`5nD>6L~?u7hO*zjEed z=zY?Alf8Z>%zWaMzsqRZtZuONpHl$JXc?F;nvb&9t)ihr)^jf_9Al)lRHjOkGmK~t zN2QFOdB+9k_66CgMc+uK533YkIL35?x-+zC!0&AGB!~QdwJqf3HHcRw-QR(-07}w_ zV<~G$30vuZ5!%IbhJuM@VJ20DQsAT}SO!qgP8iXRv8oqIB(978Or+A03T#k0$FQ1D z=qg>bkgQUh$XwV&c6pHqDOYy5v-{*Ks#N9#4MJ!634I>U9%;=+p*XwqbP+yW`3j~I zMatn8!JKZKVM+4ifTQ3FDZWy&V5mp%0FF`VbUdKa96V(V_B0=|lnZI!IRq6M3H6Q!Qj60{B2av%IGX_FWiu+7+1`Jl$DR7kVFFz^X z!PlBp*qAwf%v)Gs#EMj~sQaPUqTO`1ro&y2|rUGCUpo3Be# zn&5(`ryyaQu-rhc8~v^1&8rhfSHUGAfPE_$$$WRT5zF4ijCYvFykP^6-n%1$%P#R) z7SJ{%cu9Fez+OjBfM``1MjIK?I z;WpHS;F9RNrM+-dYn|aPUdGnAa3!fKbK8`q5NEA3djm}fxE)0$B7S)_JIJOgE>)+` zK;%lX++ym&P+3LMV`y|YN=O#yD1v$A|2d_nrCmIzS-`pxC>DuVl40m5Mwus^e@goV z9w^Z!uz>O6nv6d`axhc@?~rIByxW8*I5jTGS)Ygj%n^&Z6%(#@RA}nUD{z?N^N$M( z`}fLCd<69?;7!2A7C||}zHq)+R=QhOV((%a$g5|FDV!6;@GC7a&665j@X5|a=(U9K zdHL;;DG-*Uy+57Po|xkkN`>qZB4tm3Tq8#y*Y!$nRo&K_|C!=8F~N2eM`2@yx##DP ze+OIB?ET?GznUQH2|+bna(%>H_i zOgG*;9dnkI5pu_A-8=%sGG+Bntba&NFz6ZEeV;UNXwS)}VPNnMB*)@Ug|7YlKkyoU-8cUsd5St5edWE6E9UXd5Y zb3*2@X8fxPdAn+?AXIY#coG4^pBcJVYTt&A9(}r(W_?Xq*y#=g;%{f{)FsuhNv+FY z>_3MIhP~%<&3vNFBWGKVp3a;MhcPhXf7lo7y&KJP_|%$h5IYY`8p}?i>ti=@ctJio z>qp{I(v*$U4U^Q)sGh$HhV#B_e?|Gbiu~3%r-Yt0*Uf#|+3px){CS3blDGoAWuPQ4o>ycgIYuj{0m=vcShj+5!C*9;lH_v1k zzh9+rPCq8G9J-_S=+gaLJ@sSl0^3A3)?fBsp-u|d(HkDX&E@K7I-G2sk?ol=qsAs$ zl+EVUn`;zIouMjB0{SIsUl3uvk2s_t-h-?UOA4RL95;@!A3ncA;e7b@yeBE%iZ5+w z#rjkEB@GT=Hqf)zc^u=xzpzq)OiV&LCv3yQV~$F=-3+P>8=w>N^4e8@mq(~8gy0vK z5rTn%00+q?H9=wGl5{B1Isu`Pb%6W~2~^dSuK^gO_pMr7H!kn%ZPP%X^7E~kqhqvl zt1a5?8*AH`3+3G%Nq3Cj0f2u`A!IiKgstD86NsTQsdgVbBDvesXX+<%7agxqx>1RF z8jT96zviLn(%sP{_99wqNQWYZ}CjLHKm~6F8bCiC`2B+__z_2NqtT5}BGX~~B zOjCM#tuAijJ1Lea^uavp(94|Kj@OsG)Z%fN6ZawZ(r43vUwvfPb$u7zOg_Jtb>>4T zf9)(PF%pRFlZVza=0%s=a{uC?4$4s!<$(r3S7*v#mEE6QzCK10-o9W?LJv9aQY2n~ z*7Li!48WVD1=hiR`+`L@2!Og}blh9$hPhV26nV%@K57tkk~*KoJAdxJH+kFpee=Ga zR`11!-B9n_b{s2;Xk2v<{i|=&E&H&>W;B0-ke{ngL|tv_|bBCDdQop5qtC! zJ7L1yAc)&(7hU0TBVdzH<=Ayk#oS70^?QQFD6=-ntRPOev~>l*}_YtC4#7SA&R!WD_>YIR~91 zCKl{0kLpl8@~K(gYtxO~x*re7upPLuz8Pr`W{Kcd?`+oz)YW>bjr8?Y_d6_hR8z0O zxFV8A(X%kaFJ!Mnit+Z^-)H^Kj7T(w)y>!1({bl&;2&1sjor1aByTuCalR~l7w&Fd znFo*`fijE-^I5{_`!!#gf(Xpfl*=tVcQOYL$!RrdM1}M^Xo8(o>g9VoD|DyL*QfUh zZ{59YFXaF0zdLg&(AZZV*8~CNE8#<$yfsnAoFaYKu>?t@az^TMzZl$F_QpF$Q2tyA zj-e#~4O!uW%e67*ft;c2MPbuPzniA`0<%9^*7;89X2a^Mf757cyL9+%oE8a4M{a+< zu2G`cN8lWWz%O{1k;YJ&KfV1I4PMLkhE{GjSJ{b%EE|MD1o!ELg~;`sVm#iautwv> z%h&9j!o#mRurG0qjOuD5?7sDjGw{e~z*VY*&9haw-Dy|E8*VX7gQeM5IiP=M5#6rlKiw|aJK=OT$Zx`V%5KHA)rk94{q0p4n z1Xd@tDIbrO>mH@k%GueXyhMeaE2;RWeRMztBq;pEY^#af+B4<#A|ZWSPVbboQks#A z+MDt!hMAb#2#@}@nQRM%(=twu?XU7Bazj@4MBBlzGynDv2@3IY^@!CZucv`dqvjVw z=a8}1#%`86coUH`Mi>?E&nVMtFZue7=26t%x%$$<_NE1vS3_@Cr4O&!>se@9kZN;YNg5j&QsqgYRLq9$HMY?fd0McI#GI zk}#vgb)dJ#hNRr{^WyX z8Urdi8w*23FSaW_r+;*{{6>R$zm?6N=WK5hj;qe90l41Aj)WDP3le$$-vLm^$^@8nm@IDa$k67{}?I=F(w|!~j-Nh(Hgz5 zF&G@rRnI>mY-|UVrQ(2`GVu}|iA$#kdSQJu&~Aqw`3?M7tVLR&tX;C7>JZdS#wCLp zJzGj9p%lSjEdn*tTW1>cyX)OCpWZ(+cuzS0!MO7mtfMQk@W5Tw5qoo*dgPHyU!4A( ztMCKlcTupMi#DpfdFeBJ(1R)8^0deh!`~Pq*#U-_2db(3J%{3=IXzM|)6={cy^|j4 zsrp;3nG=YWcr`~2;QK8UkQy;Z&)mw+6_YQdP6px? zT5+t^}od4|7pJd z`2E}W|Ng+g=;4_E6aQcI^8Wuh4wCdA_#X=Vzlq_22md|(&l&U}i2ug_y5E1A%YQ%b zf117jGyZ>x=WqYd?*oth8)N(YmpC0U@t=hh9P>Ok9YmuJZ5}@NtH|ygs}|CnUeb2l zBRL32-WrdSsm~A#$n^mib5&iBHIE$Q@MD?HpV;L6R8f zj#|6CH*)7BswJ1*%XF}3cD)tqZ>$22A-x0ajrT7{dmKd;cWh@^-7}Tpv2*FPcTkO$ zA|^GEUe?Bw|k8t7u3G1Tqok^lOBCLqAtzs<0R0|osG`$2qvScwl*%8A=d$L0{^M@Yp zJZ}!4z7=CY&Ry8=t$fO!nH&2L&2sh(s?;W-$JK<3pXVcJy*+Nq!F}KQ0@5=s*pRHz z!*=9=xp6#NGX^NC8ha*5ub|B0%-Hi!Ztw^9FeHUz8f03hC_g0e>FF5%!*r7s%D5)^ zEHwLW$B~z>N+ZpsB5q!~Jx{k2lY~2+M)%=76E$LB&PTMnBqU(=rP6#Uw5uuNbC%E? zV&HJ5y-4(bwnc~kWIL~EzCfOv*6So%YGul;m9`$DVM9h1_B%fo@_ZHW1CT+aMEJn- zMz~=Fw{A)Xk*^{4j_)ABboL?%kZsu-#1?`DP=I0tDT(XjAi83Yc={Iw{|JZZF|CuR z9~fA`Q;t=aP4vcm*%!DVNun}}(9Wg_guO(6N5B!c-E@Xt3Tf^M1}z$}CcrfEA-`*I zq3aWLe`^zJ3xil4c{mnF%DUR@eUmA_)wzJ>Y45drMth5)d9Cb@IWy?Bh<#H0-tAgS zWE%a;z~LbFw#CSD8hdZixSlbfcz}^HFjNumE^Gwx0&i)$>ZFNFZlo7 zR|Fwc;BM|T<0?0xtRtyHMO%@tPCJ2U-jFA@IlXA1M!s*%a(rQPl3rL*)F?VMrDogT z_9?-G8ZfVM)S9W@ks5Uun*Xu*L;Q-)=7CR+}JTz@e7dLi3s~wpkpJ zD)#>_Jh}@zO$}ctMchSh-ejKYdXV9mSDswBS1z3qaDakGwwj|7sO2th*h>;INpU-x zKTLKG!=(Lg@c6}VO=XwP41hAJ)4PLD>Vsv147ztYo6_?{EeH+9+!ehaEcSLcn*pt@ zeEd*kd7=NOVs(#4@%>8m*DItI#cR18-jL>jbS+?8Mn5iVdNH)4{woq`7A7UBoQF*I z@caEsCV;p{{cpkJ6XNY<`CI$0yj2_Sj{eS{wmREmgc3hn9CBFD_$Ua!-#_ad^7o&S zZHZ=9gKr>rqm`|6BFVAOrl*oh*2>SS@4X1Z$VS1B%Ayhb8Ct|b3YP4QjETH}{1AKy zYG%(`BB)awYx1984i66jv3;EBqEkbwvXqlkRoPH9*cYHVT-?LpysBv4mTQkgjRdBt(^FjZlo+H{sK zJ4?|AJ0wuX!5vfqtss-fOlEzY(W_h4GXrAVzB^3|->JG5&A$AARx?kbY97?zcZrHP zc7fD_lYSJLDvoQK`MLJDuiDnV$&M+u)Ha0z8m!fDNQj@vIu!AB%vG>BLo4-DgK{=E zG;ik&WD7BGzKMv9Rt`Tay*m#d%JEd8QjTY=lVPS|?isJhlzCh5Lhu@rReTuvav*~O za=Ptm0`62;HQ2-#Y{8R${eg%94O`*u+@ueJ=ePXZk~d;)wG58`uLRf~vGF|t_- zN(uk?+)I!VmNZ*f5sH@i^rBMJcGsc_dYwhVUl8#7ubrB=!!Veot&p+PUWMoA_7}xy z@??FtDa*k_*`17KIiLiRTQ-8eo*dPn_vY(ke(UFcA#sqY(v{$1@JpNUw%=x=Y0^+Q|6edw9W!0h=1q|@0V;1b0v3^6a~u? zXPvVGnRLf`n8OQh#m0sMh_B=2EaAlSpMRk)V&ap5zmEXz?;`rIi2lS6Se+L`_$eO6 z8HB9`oJ^9DCYG1DAb=q4+Z#Keipxod);zkmy_Q_jbG|{$x3OwZAY>MzpTyzn#7G(Q z=Q8M_pw7dCpBRr+lmL2vnYkIdg`PtDxKhM8PEIhqjTGoZJPg_1p{z;W?y`NHi=gFQ z2H}yauwoB$QACaQTvUYGaEtY-wFLKR`KAQd#i2vy@?B=D%*IJ-c%03+_x19o(6k9m z1Uc$+S;d~_+V#n08ol1Q8+GSNIF&qc6oUQU(nO$g4q1KOZM-wzx(f2P;jjUxlb}kg z+d69VEYM8v<%jaM6hf}W9P`5^LmRt9My=SDl#zRy22Ag2MtGBZU!EBem~zBM{HsU41aX=a`tRgOf-=lp}71onoE}wN>HD)8d$Zxl%>t* zALFR`aEALA{qY~+^=&qV82AV!#_MC7(z>kWAtS?S5VbsbQ=nvM)rvD6W4W=v{d+rg zXBjss0a25qtTDq*B=7=7)={R)Gk8mZmJFw@6#xSBn;b$}qF9@xZAw+I3??rO3FV5QmjpeB_Y2FJ14YcJOrVZ$(-27ut{w^e7LMmym;U zj3Yav)!%LOvb7k>@(N77creR5@17A4qw;Bcp={}TiKR4xW_z4f@(ENo3Fjv}9{l2V zm5SW8Ec49+ljg@ZZ8F~iJRVxZT;hn#E(~;4LxWjOkzV9mzY?fFGDW2F`1kLAX( z>2Rr-2a5I{yf*A_mUTTXx_luX{PC9`<9j(d5eO@23^`F?)12hE*-6M40d9~$G*>B@ z7MzGkDd5{|g;V$ymE8{>5n1)!9251gokrr1@3|EyJ({j(MNvvSy$?&2oLDiz*Ha&^ zP|7tIHfqF}m za+@8bumK*#L)BUO=;3Vmg8jamXre`QnQ3|*Tg8Lz%#?*bgG9QWL~7mAw7W;c5&qPE z`eL^)7K4_};AJG-{9}T#Thl__8bj=Yb9t(*EWs9#`z32}@EQAl*}Erprf>r_K0=8j zQg_Zx+YfgRBR~c$+%q(RhJBQt?sUt+!?u$a!*g`^(VmtRpNCm0d zzrvU#vkK4s#6i2#C^EE_Vy)avRZMv3{6K4atQ<<#wrPv-XQ-DBkP}8)2rn|`5TsIH z)#x-tg)lyH@ep%P7ee_HN#?pxIU<2dZ@wjc)1jDe{9ww!FW5WraH_kkw3vnU7OY(w ztTMa{6KW6VBD#+I$8ug=&W3P45>2Wj^mbXtF4qJ2pFPC4%>`oRhaxfL&&Ho!<>T8D z^6{0n<_Q&3_Dw~=OYT5GEOvDAxzRPL|0} zC$tzr&0M>@A|EOw>t#8>JiqA9zBn9yAfJ)7AG(xrz4NrKl*dbHw1g$`?ey*g56&{ad$RYhST(!#RyW(mfr&vX1A} zO0Pg%Cuch%8z2n|x#|FyLU&v^AkmkaBw9d#SD;&y9wh1%A|4|6AhsjcC)JR%86zqd zoVX@w@NmU8XXkPOW{c5)Mw=4Fhxn@L1ErckX}La!3Wn+P7gzb`92gsfMWw$HbgZ^i z;p5l&>VBrkc-<_?tn$wuIT8Ydv7=@s)qHj+5T4a*`7xrl{)R6|pQwwUKxEI2Ae22` z0M}Q*_(>%Jpyi10BRRn>ZxL$^`}EDT-`79)HlYmbVrjPYyNe8f5Vm9F(k6q_==ckX3?OH<7v`lZlW=#1KwZH-BcLvzHtOd(d@ zk4Kra5%Erz8~bfDAS2i|cSe6pWxcj=B}tdyDjp8meSh$alArdk#>;fWUg~^r6*}@) zsFsuBmN@Ir9T`|BK=x;@LEW+S(&|%;VcbjeB2K$lHIWoo_i5zsWY+j$VIz$~k9jqPHh~+#}IHIL}>@LVs zrPU+NvJhNZuA&JH<8!!MbGDa`*=6O0O0DM~N&Hb;6x62t=~4M}E=a0^u+gCRgWL;} zu^Kz;M340_6Bj|Qv)$n{Jlubg<&t)Vy`1zZLvQi}?>gIJCPQgr3wWPjHujSwCv z1TLGCUzBPlo;S3DtN&ZNp@6IR!Fq!ny{!dP-$lW2VcELglD&3&0E9-p(kU{5*|R*5 zuVK{$zBkvl%q%KTtToGM+4uZxMebhslzdj8xrw_fwM;*cQSouW zmYh!6MWk?Oi{{sQ(vmiRQoLw#o2igKD|#OPBi*W9v}`6C|t08C^W zv+-+2`XF}09J6g$9z)d6^B$IA6jR%0L>ewR|Ep5^%D7(Zum+rs_kSgA<)NZ3Un&0qT!dSAS4`RM(?yir+8 zHl0=eC7xU$?*Z&@T?_STI0|p0VSEncpq(JYU_8G%+>nffb7|JyP7fyP{ZVoytlS!fwRcmdnsMgiAyoZUg4NitfHPwPlpJMgiJm67!u94`bgx8qUxVtIM z!_{x}=cfi(H&&vsfoEo-a#PFZNCp5|y{chR^Y-3*n$3rhp}P)=Ujw_Yb5z+B3sh-r zODsYqLv|M-4wXFyb*<~ouu&|;rso5%Rb_t?w=9#|AK0xeVMz-dV$3a3C~U&`FLXey zh25(O6q3%am(r>OSP|QvlO;Yq0pVR)OBZPoJO)R%sB$oH-1+J=zUyqEBXoBCy-rkY z{lN5!vNL5cK-tb3<5injx0W?kf?zgN3}}=9z{cvr+3Qho(zm=sj=OsEF$?Y!4j{&1 z05bgV-5oGP+bQK&?n zFACxV{A9#d9Es(}9>20HeI5=Ry%!nz!wHLfoT4vqy5{STx=EVB6Dq*motV9{ zGPQkVv`)i2xbi1%>-cHRM3{o28GMYeamA-~_-6$FlL>TFIiK|Ies>+E#D1YB0DOT$ zkiJ7%+|aCqV}QU#PCz~_F)xF-jlxOhW-`)?X_X8;K)1tB+nPUUiP%av&&a zKjItwty0psQ@nVn2p${(u`nESuFzfE`tS%@Z}UC~W{xXZRwA+rGEz6|_0eB! zGS9bmoIr?U@)c;us^%w&8c!X2Ar5mt(6>NxWHGTSI_965_v=U1WWYNn#n4K!je_Hx z6G=aw0$rODT));3xDdTpo?Krz4USF4{`H~*BzP781IxG&1?cJ0^XKXH2iI-ln|Jfn z=fLYneTKy~%IUsnH}{OMNS2E{hc3cY%*;J3w7+2qKSP-LN!-eQm}!e0o90po$l>0j z!}mMOW9rVn;X7DKaZ)PMo~JNa*s{@#$UxOcXAmuVx^T%9WwmAo6IaQRL&l;CQR~qy zmF6W1R54~Z7B_0k-8->=jn$pU8zn|TyS5QZ?})`m%8el9 za9K6Xt;Lc_kXs1iWBHAX1&`fv_UoPq8&C3*B$2_6mRcS1R?fK?+My4FO!fb64!sU6NYayt)l z(Em}qW2a+0iWBpdPPX;L4Hts1#lo)h1yo^7gY42r?=tP9cM9|2_o_kC9`*R#eLH#8 zdcVl#IsVY%G{JS}0AouoNkHdIeqOfUVkf0_P7zbw%I&?UaMYRhv6F$$TKDg-pSya5 zhOjcjM*OBQ7hyxP@l}+f@!(*Je-#HWrWcD323#IVt=Wjt$oBn$SRdXuf?Wvj8CAd7 zVfyy8U>K+w0)_HTvUadWdMP6I3S zVU*V^yd7OG1?M*!VTD?2R^zYDXYXT2sAyTcGw)#6E>Oeok^u=KOYdw$SuNv_SUm&s0e^bFr39B^2EUk%!t_m%O=$3{&7k+d=E0_$ z#wJhG;eqiINet+uP{xzv*`PQfnFcf2mt!)fvsL)C25x0H%F20CBEphzGexgFhwIO{p(tCop4zw7V+}M7=FSr44+u4L<)Mc*A3dL86 zk?6lxjdESoH(NmV@cmLvQUP;PYDA;n*SVUH`NuVf+Ki$XO5xLQQ(69?p`dz{c zf5DA|KYE)ewc{2|E0cL^@9E)Xu_tQi+|NqA7My}@YzVnAk+5(kc5>7I=o)qt2OUir z3}S|?LmJYX5U3?23$cQbJXk@vyp&Nojh%!>b{npt-+uO!4w9ef!yg__2&x4_Pg85J z`>Sln@tx+b6U2bc&t%tU8~xN``YZN_rkS!$k+jBVf}WepkOvvSiMg1@(!IbF$450f z_fSQm9vh`6DcqmuXD7gk+Q^j$RzT6_o zh(1hVJZRg`&n{ZiXqk~yuxK{Y!rHO&42^+}gCJnPu2!Tz0}#mZqt^6}c$J0%Aw<`?xSMGBRNHx>Ef+-ft||FHf8NP4vjrXbXCo6>O1Vb|=;I#k%yb$Jxn)0xQeql}s5A$*mJJN}fPChT(T zdSGszRZn?p?R(+&iQBOWH*Q6nMaB0_VLH*0DDwfjJ$65>);OGJ20vRgoD!PA9k&}x zmnNF}0cdYCITXM-RS>T!n0L86ZxbmNVr`;cOK5CtxQgS=_We^9*rmK|&pV${ib`jH zXp6$DM6^nc;d=V5td5+?L_8V|aU-yQG|25#prZ?8LUYFS=^}qxM(JmOr*m`27CC15 zr*iEdxD<~}s?_!=umrjolg|B1dK%eRY3BO`zsOnq-G`RFq>uI`@!k0p>lSN`TD7mz zVUyiN@zE`#UAJ))YA{xAj$-hFz1%7|p$0tC7D8Rx4`pf9!Y?ek2eaGjG)6xkrEITy zjs&m1Ngxbk{AK7hmijt`^?qVBEl{>l2*thMt_JX1d~(ZpZf)QRD5J6qn|+_K~m777|x+2^dd3bHX&+b0Ev2E%-6 z3i1N3y3mYTdbjHa4iyQ0Y^JyAR-K#k@=6Hh%>Qcd+1pDUcGA$8aFEtY3 zq@%AL4hXd!z#wE7yrFkYPTpX9#u46HtdS5foS#+ZS5XjSItWCbhhWTPx~Z}nV<_Dq zhr5!Q8?pXc&~JTd)&G-z|5wxgU-bLmasQ!T|Bw3pU-&!hzxC~Z>EX-&hyD3)8}#Dq zvo`;2|4Uzo`G@^=0fhP=_E!^l>)$r$)0ZFa-}b*=Z~qVb>%u?#|Mdg^)Bo4j{YU@5 z>FJ&Su)muB?L5N2-9T7N|Eb>-zeMTf)dZ3ic4>MkY`9rWyk1vf>BF5S0t60;v2&E8G=*K0XCg)WqR4OBhj%u1M7!! zh8p#giGK`>kpOAzyaa_(=lEO}D_CP*V03>69J$)zMLf;<2z!mfEUe`$khya-zu`7^ zXP-cCK4u~AwYuwT!A)NWv+qB{-8?*DZum*Q_z*8nZFmv08nfg)@>D zr*)lmz^`ypTEfU!-+=p7-vaL&&k`jDv->`>P|yG#z0yNV?m0OU%51+pfKN^?>{WEy2&H1WFv| zmz={IycbH3=!NR>@8sF3d(JJWJ6+}Y+vkfa)=#*ahKY2}$_E8Z0qNKCIWteI z1?zQv_ca>4dKEOy?`;A=RPB~i~t9V4~( z(IIa-hhM~+Xbi$sE1E!FGtoa=fwvuIahcE;Zj`R=nA=ZFmv7ISVhEe=6(zIjbWRUK z=Lulyu5lvt?ZTDb^CQu-9M7It=XdRxEp)oSl`c*$E=p@h;@{gnIY7e>cxOj5uQI1Hu9yFhN>(DtlRXv~kOs8~Axe=62i&Xu@kv6ab7F;1g4<0{3X3*|bSr z6uSn>sUB8VojF^c|91Gr!}=0xqw%GNI5xk~k#typ(UFXLrAU+PFXi4nI{g#4>-ojK z#jE=5Ik@g@Ke4D^h=DlSgTRD(N$_1v1{+Hug^)z7SZQVFtTy1Oa*9owHiI6X!;ei! z_If`~N9bb=oGu74ykG4tMFa^r?ZtsFvdI&`1of^j$DiH5qrzm`uo*iW88>C-y-4-1 zdcC+GO`q22@RqZObERa9tIuMwBv5qo1_F^hjYa|(pPrL^9(YV_c)S;@Vz32YRNU?{ z0Zi-_2W*b@j7%%!8^0Xts>FViQ}L>T>uK;iMDasXWImEHBU%&jO6DE#JoL&vT)wq* zr=TXaBs2xv#=t1_2#m2VTIp-xOth+$e{X+UStcI z656oS??PU2L}e&*RoD^re$=7#y;#u3)wEhU)S%P&d;?>XB&?dc%GUx$WVNd37u zfrSc3bYRIF(@=B7Y&j?@vKJB=#;t?2Gwi!ZS^!>)@V&V}*|o4ar=KPa_D4B}gt~yK zd@%=~ZWy#PKxugfxPux1IY(Y0XYCt2x^a>`X4rK4?YB7dbQmg~PNeHEbUFe*nRhIp zZ9Ff|ueHw?IP`1D{udyb{)Z}0=?K;-pN>#d%U9V3+*}{R1VcJ%$K78F{2O&{VVYO_ z4MMfRt&PZav8C78*)sY}bc?(bZl>qu)+gbO*C}6QD)5PY=n?WMj`%$_es(3$b>xS7;Rv|!hCdl%9#$mR**H;GvY0{4DtS@xj82Eopta5siRQy+wI`& z=m&QI)%)Pg^6Txp#H-m0uD5^jC`(j+ymzO5u`$lQq*WkB-l?T2N#(wyK(lNb;f9ui z0u=he@y$xUQri8^N*86-r9UN3xGWW-D&X(J~V3L&qf4oDH_99Wv@52FgbS_i?9T9CVQm;v1; z(>;`t5hpUE;ptQo>Z)F8__Dj4>F$l6AbKXEEKJ^LZV{h*$lXs=M2`o_le4(SoAPKY zT5Yjr=S~j{!ER7N zXgCQ`1a-CWp6+CCh?ii)NRoARdysiY_r+g3N0Q?2DBZ!kr=D4Ol)QtaC38!|w$m{O zFLIx;S;Txm@_fcO)C{~_it(Sh(y0B_aVceaFP1OK<6oY+eR_wgXl)H?6X0u~yH5h1 z0Gf8~z&wQ`$ZFKCgz!Chr| zUS4LV1}tz)rM!vPK!?n75nbp7cwyWADE;}wW4ANQPwJ*nXX=4o{nGvE;2v1uN)#_x86)mKq*lRFtEI$s zqvUwT8q_KpH8-#;o=707Y=3jRe%yVv%n-uasMcWLY+IXXt_)=xR+eff3s;X`lykBv zkg@6gWEV3&>*^{>bi0RHyQMr<7_6y^5`e`c*gvgn@)?W{7EfZ<*kyZdD=sk?jhH13 z09YY_El{xTI+gr|19twuv8Mi_!tFl<_6F9(5|p0tc9sravo6Rtx~D*?P=!wm9&C-Q zEFg}71zB1N`x`5Rhy%@|Kz(N*FViH$Pz-D1`|$&xyBg|pXPXROe}+f@Is65Wo>GV2O;GZvS^lO@wNdPrazZ=)=(OfQkYDG&c!XZ8Wx%cTlzsyHLfBgk*VtMk?>MK0oygJ`ja#;$6QQdc%NDEi z$`+sD04zwSXo*TO$j~gpy8}Qdkq*1TH8QX z%k-5FOhq{@LjBS-xgd>Yv+}l~ax8`WEm8P>pmu)mK;47kj89 zSL;&xr_&fJoZ%@x{2s`6!iU$+89+{H-qg(myO!6fsqR13na(hce9Xg!(4OM{}FZa z))ojf7PBBj*bg9v@Q<4|7&)*NEX=?Ii%ex(s;`(O2L8(BCSC~`-8towb03GBI(%KL zlAvv}t%2AQzz+(Sy6!9*)cO9Cv`wi~!UFXScHVQj|CMI38G_WKc+DPoIJUqaXQJbz zWrqxbTRrpVd_C4urY4V*!z4eA`}^rj6T2ypqVE0ZqxNxlcZZgH(I@c~g4=<6xU~x@ z#u9oE0FXIG|Lrw@-B~#P(0qx3R(HNnT-5V=*jQGa_zcc?W=wThfln5AVw7RL;c7P> zI}-lxtYVrdIeHc8m1;v6UDeJ?pKiGTueikOHAy72G&O%{E5VK2|;z zGYLF;B1u|e^X&yZOJ}IGQ3?58lfqPDC8I=pRjG0?-li5MP;6|C0p9}Uu-}Ad;c8rH z>ex*FE&^48HC%LCoIH&(+WiGhRJ1EUe)yAW^%ZzjnAxLunG=-HsjEh9ach4s-7?&d z3wrdWhYVx_smc>aIQ>`A7!TyAuRSh5d07H%Pqf3eQ|!~JkY?@H-SUN&6mq(@J!5l! zHg?3}SVIG}tNuSf9Kn3Y7(4QxX|ykK>t0-%UC8hQjzZ)1R3T}>qF@W`(3<-7@K|Ub z(+H7By1d-TAH}-xF|I_xZ{~jG-djt_AtJt;F>Av@{Xj{Y9l@TLyO@td_*fFP^#LTA z$Uze2Z3*=73YxFu62EwTZg!FyAYQmo#IaG&TUQP^>|-(S-PzQu6B5Xegb?K^m%U!z zS2`pFU`0$Acf^5+jV%_4fLBF&cu%*yq z4+++uDGe{0P6bkZsXI2fLU8XdO2f3fq;B#J%jgf%0*i3l!6emTcgl)|&C-Nbr#9~e z9PMq_X=mQ$T39~*3b?9*lld57XJ~*$zH~*{iJ1LWm&+U#9qo$C2BB14hlGdrRU7oN zwTe5Sd5X3oLJ+;A&D{9F$#V6?QJ~g^3i=~q-GWl3b{Ek!vQaHGgvn+&1^QDeF=Dv9 zO%h*TD!Y{8gv!gld{plnS|@C2cE=mi!77IXXo}G)ZBDz}0MtJl_+UrS-l%MnWQGLee)sB4gWH ze^>`zi)cM`tVVFchtQ8dP&)oDU(?a~(Y88SGawV_zJOEuh=3g3K5&)3muAuRM(^ht6P@WXs_`ij@DXTPma4I1`?d+*{Q zI6z1{7famb)M2KV^LUtP#^1`Z`gl zMFlt7PGJpVF=7MTudw4f2o!5XhcH>#M(VC-&kA{Qqx1?$6#S8RBNsIMO?>TTBw1o| zm~WW!1Cj)Gc!-i>ys7wQp1?rJfkooCViU{>rA5abKw$9YqCd%Mc!fd{;n+Q}#|&?H z!p_p#`P5j`t;#f3s6Qptwv$3~d>A53Vh9bgIr$=pm|eN^b6sy7Ayek9NpKD&c@Rm# zs}c9sUJ#D9^8VDHrB7y3Owr(KUJ<8)DSbc4YqD-5#V+e zT%4yq;Qs8*7)-0tqhf8%(^Zunm&CZ))$e+0KUSTMnK7$S9b8tDS`N@1K`CZnEE>M=l4NaO5l&Vp7Y6 z;=sb>i;HN$3Z1!q?28Pd+KW{y+tjT z^MQ^0%uyJhrU?|OJkE4zcb3E9F@nx+o|y$(N-B@Z`lGtvuDFXtx9-*ZVyU=r{g8aP z=jT^7x=_azpOqXE2yYNv_YonVKWtHc@i_TMZ;hVLg3XHQHY^@L1y8x z57@$|KX|zMh_c=S`g_wp@dfzbDZ-ywc8mmc<-!KU*l((8cZbA<-=z0SApEHpslvr4 z@mb@jXS}PFLR+u+n!&dwVk+d-17@S8C5d@X5&V3Fwx# zOF*4K(eAy)iSQ99z97U`to}~%l8%Q*1>mU7kaW7wRj0x{d{9^WOFDcOAU3UVsHT5X z+5D*#8Xi~MitpnK2=t_kp;6TsU|xXng<&1W*ck%|mNC_Pce zK_}sZ1>YTDiACEI6CsZ|k;7P4o}ORF29+Wl9q6Tw_vH35HI|%fl&23U`}zc64~~=( zB@`QBGPsXh`DqPMHNvydLR4BNQ{#nqpq-?_Xf1hpvgY9Z>j*d|GRlJhpRDh+BqQW^ zkmbP2K{Y|;>+2zet@wT8ew_R9@t$Ch*7XI9_lh{dY1@7ze zF{4HdTGNQu4qS8MDhJZ%4fLKM$qf8w-$#7b9655YZgr%VziSPoML`tL`Elu^YsPR~6zUeL-qtKfw zickBKGz`v!(KrZjvyP@{MM^lqHjG~SAbQcwo+;wcaytgaPyeBd{o49|{hCoI_X2#h zzQdw?7@2t_Hurle?Jw#i+wB1lG=kX#wNlmh%Otw*UEUe(4)!}u~&xQo|Po4t=- zJUemj)|_k_KU&rv&j}xG-WnHAufd(UTHDzw%gZYG=XIO;t}%2jpE;X(J&hf1K4p9O zCfp#=(KpFKN@ksaQIFy!CXVa$kZmf->Fs3Hj6G~bhfm@X8RCvZMJD$T1~rqT;yry9 zkK_tZVl+;o-1on3I>vmZ_fQudHe1!nGW>}?C8t0jE4fK3LIk6&&l}p^@6m4Hgh0#Q zY(9-Mi3EL?M;0?Us%>`y@NdHzXe+}yxPha|(zss#Qu$*xF zxPX=*#+%VsK}NPXu@;FmMqE8(tHROp6QWV62Q;n+OE8@y`<(16&1@QW zq8`a@&nqth$AE3O_BhV=GffaXnPV8gO$M$vMSdmg%ilTd22z$jQA$I62g3Np@Ai^L z3naR<5uC?$g7`#Uy5`Q(p!RF|J&IA22ixJ$@k;ca5-oqHot{cX=)s0W;bEL-PPere z%dP!5t8x;DroZW(Oj6V%pu%Z@HM+BNJ8l1-v-{F?f@D?J_j%EKOg)>D6L?qgB?WvW zQ-O~xlaF&8u1~rcoQ^2X*zDVGF!`@w^DK$d@6#IDAuBAQ|N98A{~P}o5PjaBibN84 zd?)&1DAyAk_wgzwFX@(DO>-5t&ZzJFfF1f9um)8q$wc?S*kyfSw%B&_c`XBo)K|0% z>>wV9Oa`zr>aA<_$Sl3LSML!3PTXWBbnMJQ4oh!|uAR+;k^GG^FS6ftERMll#Yi6g z12h&qy(_b}N7g_Y9%Ckb93S_<3%#lHf$eIx zL8a?0f8Sc-PY3WM>`#@G#KGv#zRE-07ppGL=LhcKe7a`}Ag(SOocfJeR^yN6u; zxjJXahv|8fp?3UtxS5VgG-I~b4aRH1KqM*{r0@Wc*#~IH8rH-mJsl{JMj644{CbMVw7cSZ8JEbH7)^8G18xt zXf?FCe?j1wz5mmFf#*vD{hxHqPn`Y+w~AEOfNtMh;B z&-CvOV31$m<=`Lo-}nD#p7`yT4g6pI|D~w-mkap*AO5etAmsnBznUQbSw#KsRnWgh z&-wljzs7&Hfsg)e<9z(5F3j|`tfRv#IYaucK?F3K(q`%8#d)e_=fJ=aM@E@okg$k2 z6Bi3BN<(tBGD_6SdHE(fvUt=Ambtp{34G<@7%H^nBe%s;45n^hZT zWMlKU&wCZtl=&sybiEOX-Y8N=>wnV& zQ~POfmlHRh9z3eZR|$XnnS{@x>>F42+xv7G6G`bqp1Jvls6OoGXqV1ySGSP?kPt9B zZ&!ge$f4c0O0aR(}FqTSgr&D!xE|wxry$qDRXwpJ?%S9uxf9{ z7JvH9ky{ZM_}eU;t-N3!&Kgm#Jk}$zx5_D}$QAa;^LyGD z%JMPg8C<;$6~nzot6*>!sA=*#obUbA7Bj?W&Uzy_DQqFl{1LCgwbH9XR86a4wgy*I zoz6bzA=zsG!C9*Kuec6_$5{d{^`e(uGblaF!K`wn<2B|`m=0)95CPcB*QqtjX%|f5 zfd(d!Bfa47SFvG0YdiHqt9vU2u*`_aeXqXWTR?=OhASFAtKIK1Gk1dcB=uhh*rT<0 zvx$2*ixrY*$CAiob=!0HENi{Lj@+3ktId@5f1Xv#@M!3b4s59aYQx%<+o_?eCfvqC zJv{zMceZwiA;W>@8O@lcinsc~v&QSG?vq21-SK9uU<31Kcunq>O5kS6g^zyi`>Cfw zCUO9nU_9CV7+gjOw6&eX=kjBkUTkrH11&Y&Yfl6qr8Y5L}YbRCddRN z1VxzBy#lCy@vsDcmhsZ?`jc8wumq9;mi0V6vb$&DWw!e8?Dyab$YjoxPRZ|xCtMkI zy%nt&DjK-_2BH0&O4iq- zG8G$O&H?2d2Yb)UkKa1Up94dcM-KB8ICGaG!|Hrduq&3Zdm=%}C%`@r^>2>l0#hXVhK<9f7RX1t$gCiShyitr+rZd(Ju?#SONjTDZQV{07l@sfMQ}9aS6JaI zj~`pD;E!cg^ecs@g;puDz3qCcv6Z^pocmHuGjB03?k1P4)z@wTPxF}xD_n6JpaCya z5jyOs`}djtu)imzJo~25momtOZ>ulW`u5Zl=@~JW;}{e@pE~n1GjY!~*8<{O5)M#6 zbz7^frkIbXQCB|~b99W>akEw8);EnGVs_i#jCTeqQh7!( zcsESou&zPBDL`Wk1|%mAQ%?U{)+-zyl=MJwv#Ky%wxptyZAgRJ>v*L!eg{=rIcPNE z<0D=5tmN8l41-X?ba#@;)Kq~Z-+mLOJ>cvu#fmCWteS7xo<7?>p5MeAyhZJvQ*XX| z6QDnvP;nUlmG0WDR@6u_dEUCBHAhWX>=IgX=k2+=hXux<8Vuv*rh9UYxIQiq`5VeV z{VG?*FSBMU{V2clw-DQxg6sEpnSLCWuC5p}MM(SoyI_ zeHOa~3~6*T3_gI?pNYuz!Z)*mjLHNAKQ%|+aKDO3+S6XO#Er8lh#rJ7z10+lQD{GL z;`6aiWd_@)uWMkN&AS+^j2;NP%N;O7bG8mWl2^~?-`sJ%)i|~FGL_OA%=G47-pCkI z^99_vN@qi_C_aFG=X+_mFaO1VOE+N8jN}QWOM2|JZP_A!M@cG zcEo8SqCeG#K;?w1cr>F2)!FDC>h0}%QwypcK*D>U^+wKx%5QPqG+Flgym|XY3_s2Y z5LXYydY04$c}&kc976TAyk?Tsf93?_HE7vO+A(&oweEWaYz{Gc!YWeo>~UI@)<^d& zp-4Ty3e(3~X2vh8Wvl(77rg25bJe<=Z@=0OHI8*Gyr(Hl*T=I<8nMcA5A5Sz zyO{P`Xu;)BS#Onnwie@{s)S}<6WxO0afKQ@%6(y>I#`g1+(g(Q$!>hwhiR$o4@hil zp5y&Xyj+Mq@4?Gfev^Q}mtx`8$+IcZwPX)2V@SI(BK9BPAF16 z!i1W<9yD`F@UDOCmIfmFnH?;{A5m-P0=iTXIuNP5u!#r(a1$`0P_iBS6)le1tFz5d zehh(`1C^2OdiE#<=AU=<6ru$jB*mWmmTkbO$(#TIeVAy~~d~uC0$oIiRrIS0Bx9 z^C8a{O_j(rRJQ@oZWI#7r;e>ppP2s}fdmzL7twyU>A_;%|=Rrt= zJ|#*^aVla_y%FXzsiDvocnkS0Pl+0l&RkclCXA|Xu8aurm-X~LX|WlZr@e5}Iv2$I zipx}cH?xf>6&dC`MPz*qwN}#0GOv~gTsBv?-gUKr7e{<2b1iJ_8=8n%?}%Lvsz=4p zgx!JNFz>4Ox`%1b79it6KhE37bgi8QWu`myhz9ZmOXCf>LnCiZee6jQpfvlLaRMl* zwFzpEIN-D?Behlh-rTE+81!x7;9lpA(0LKz%(5HRyu3tS0T5~gZ%ys{I8VEsrmujj zqW9PyPd@@%lF6`dQti9$SZpEBoXQPBP$I0|N%i9+0oHK0pr1*{PZ&e=B%G^OK>Wp! zMDQNN4Z_ZyD>Sp^uDkNz=tGbAY^Hb_3Djo6x^gzA3Q^h!f-#p9AUqA=3{+iG8kK8$%;r zX=JeR4>@_g2Rg8z9KpOuTfDfrgFQDFX$|?!(1gi^+I;$4KbQ0Nfp-a>z*NaIb}nxK zo>1(sGd`m+f{vHs^4GOsIPF4rq&Wr~ZH23tluNXGofLZ#)l1ELSpIqQ_ZNM4a@=f zoQ?V4hdT9CyahWwO+*sNb_hQkE*GwQ+?+MfEpn*U>>1fQ7s;_qmP=5C_ql8m#dJF- z$K!?ho#$uhe$RJi&L>U|mt!)|hdVJ;C}%`h@Km?(?K(b}mRU%mTvRuzRP<=S5O}7D z3><29{c!3A=Hgpp*fd+P@d%kmH=Fj7uqZ#%4k{*;yThRFuY=^-@dgw@V8_(C>16Gz z-zCLRD86>DSlmaOOkO3bFt}|Z8)&O^G55J!O$D4j3uW}obi5iF3zV~Mp0Qy)h-Cg8 z65Zu!x@7@+Zrp%A!g9c;fWfxF#C3{ly-jQTK^9E)+;ShD-tp>coN^ojZM5nN$uJ)dY)xQmKoa1 zW5eWQQwM{H>ojfQ6s&>7KWA2vVGseJ=nApA5fLd)u0VpVh@Qf6(2o#=E-EHx!{$z3 zd?|4!eohcKZb-)v;DbPibbU_(IvAXq=pPmFdl~ZB2FfyQ>3E*N$O(N@Cj1W-@}@mQ zq=ZaqK#I0YL*W-!hcP*+<;&nHB`nX3X6Xh78R@bENJVN^C?<#~iw*-}1awiWmRb z?QnDW2ozSe*|<+hR8`x0C`bwcIp&@?_5u{PS^9#*dRpxefOAc_zTNwp%=(zfDs-^W zIw@JoglIE^xvMp3S`Z=Q=+wjkt{g=aWP#aUedUNXv8yDmBQjF@UAjAL2r0f_lG)D+ z@8^-yDolT6Oa^TCfBgr1k&i)gmyRM)KyXae>n`fc046D#7L`ta%H6tz505Xar8b1J z;)r|%PSm~2cTbk&LW&%Z>61gCF}qt89MpSa_JQ65&YGS^eAOjzNGb241ynwU$?<=F zCsIB`9bw_Ud#1(BI;%(_i|NaW^dW{rQSqNg}A{37VSb2>^s z8s_SIZoHX{HuhK?|InNG{4vqCIA=Mt(@lZ0e_8cGDjE^OYoq1xk)szR`izv3_(Ec^$K}`Kj2|}0RTEEbd{7EuSZMny=bfXd@8hm6&?3ET z(NxU#;0FCHP5iC`$#+J|jDUDr;7u7y3Y%7-x6y1`QV06#bVJ2R zT&Olkg@M|Ld}uaZbT4Mk-{(!x_om+ujzR0P^J0oZ(*y-QS{7x$&LtJF&hc27&ts#l zVa-82p(k=^Ouixe=}k_MJVC9;_72y&!MkwJJ!V8{7Iq z76WeRlzbQ0mNr>`ht9e{|E7NQjfVE)VO*LYReo?T$8H7_dnCn7iP~vU=bnov_1;Ge z<6u9lDP^%k-$8^H8Z3v=p-~@fhb11d9@J4i-Z)sJqFr=fe=3lq;a^J6POHOO`2)tjpS_Sgq)Khv#gE1&hN49M!;c2tpzx zmxq!mT)4s*dxofuw$x4+D^?(Mv%|c=Ue|o1kTY&WP6HkrO~HEv*M|o)brxSAqSL4H zaW!N?HP!&Y9$RbC&7y)ly;56Px_!7tYXZa~vnDcyeRSi_9YrpWQMlpz*e-a( zL{<(tfA+%?21Hp!lYbJky04i(8$ssRRz&aDNa?%@GyP2)bS`6^U;=#Qwo8707~KMp zx32fx+Cxo*rg(WJO=t0`yu=Qh&MD8D`6`l={|mv!Ld%U57zbYnr}~4kMW)e%Cv&>E z3Y02ch7Bb&R)6g9JPa`$RVWF#!C3dCH4(kJyWPh1!6sRD=+qk?tj;R-+TXiwbo2` z)#>gvv#P4Q`kcM@AwK^?^_9EuY&$lSvZAvhyQ^)QTd9jq3ztBtm;CN6{X>_qwGmP; zw$o4*Exd(`zV3G*EFO&n*eTt{&*gM#Q!^UD>==EuI<+C+z_nDh>*)lH$1#Quf&}-+ zR@;;k8y6jPbs~lz@P9aXeGxr(wP~6zN-0KMroFiy{i0yN(WlJaUQYpaVpD3^a$49p zIj3DMyY=iI0n8%1ni3m@H85?pjdWYU0zuAYU7i;L$LUP+A*rc^ObeK4=NQ7oN8h(c zdCTm@Gu*w5#?io*_%II7R@%>9bT>Q8+A~{=dQWBhYG%(Rl>9m?S}0YOP!!>a3Phi!<)#T}Rr=qhC|YI(cvuo6J9;6V7bPa12_( zxaYVv#^8B=3P>R?OugM5wAJPPQef)Hk*1aZix)CK0gV}ue&o-bYC)xyLHiv&NzOBE z+ted%`^7M7j`DrZbC(%dLgg!h!b7SI-t==vj`rVa0bJE#F(R_S=mieaJWhgoJaykK zAZ$e!xwIvs^HVldw-A4{F+c?pcj%kc_hPZC&YPWN@)PUn3l4J`?&+lof)6PRl46}= z5q?T;NZVMQbsgV0CdSE}alVe7EH%ePkxVJv2fcgOE6O?Uj;4AHtZSZ@nJ%Yg+{6{l z{@?Iq0k@oscS$KU zHr4|@g%pg@>N{8NhJbl)GkuqW-+ZArlL7}@l#?I652u{SmG!~p^GbvHp=Xnc?^cO2 zWU^55*Ru(XN#D`3^g}ME-TFNXdCYaA%Ua1PlGCRO7RBJtpJMZlF}UWmKwQ-% z0fNT4w3&gKErun%wiDi!4o-qw#*+4{7}H5;zc5TgD=3RWH+lQ9$X8Y zM!jcnXYPAZ_ms1mg-byYpcfp_nabM&V!LR98|ud4 zW#Kb_kZoMjt)I|iE`6=J=Fq!w;F~etekSE8OayliEfqzO5yKkO0|`Q6K+&wdWyStke2+ZY)nB>y;+hW-1Z zx_muW!SwPoMzTY*NUI0h%v5v?GYe>1qthlw-pTyqhTsDYpn^B~AB&;q%e&ON+Lqjw zQ4N(WnhB$mcSbS$%TDS~w9@cD^J-PH@S@4Oi3C6-?H`56e!Rp4?+F274}84mug{vW z=Ub>?n~JHv=)ZF1us#pv?kWeR1`KGuaGm1vJ>$vCO4LKwH3ga_nSUCdqdd%FFwfn< z&oA%!0w{kQbT%!GGEs1J_C+gdj!T|!nwM*pfXg80BDm`$ zD!OzUirS1{W%aM0oDJwUxq`fKl4wr+hX!8riF-g+%El9`f~j-Y@ipErGwa-UAFa0w zP*UQK$4!s1eYni)2q1y-Md3R(6~tE4I};MNwI6w_58^E!OjUZ-5#KoVit@N?N-LVa z?{nC-oHPUp@^W-7l;#Suz8ls3B0T6cKS7ToUu%|2Yt-w2J&a!hbR-v^)8#C4z_AWh zfMhd!QXaEmU`>CK$+w>po?sc*^K%gkI}f>gU_=b@bnrsu&{ZpKUaq++2=_k1*{-!_ zT|#-dVp>Y}WD6M|X0%bJt2pPc1Uk*KbH#%6j+O zO1nySyU{CeT|ufCTdl9TIgjV1w2ejqnwCnQ74ssEX-ZYB8UD1JQq|t`kk5*a%6`^2 zulHxd;7lk0NzX<9YvogL32vCLuJ4cRia=s;q-=i($ilT>xT!cTwPj*5%ph!<7IKx^ z>vvze>r&b3_wSXazaK7ytNqzlpGRA({v9cVV;7@_Zu{V(e{Fwl)rBSnW_X2_;|HSf zv8Iw;(?baHRS)ov($1gENKc;$RU236Ni1$kTIYI&^Qf^ z1_>n70OYL3s~e!Pu(Q~#vo-kOc`f2$%Z!s}l%>(DHg>%BGafA?cS$c2zqn6C;qXpU zGj~vfk6!?IbeqDC1x>hhZ8N)EGTp2oPdn>Kal(m-k`Y^MQe>GLG~Et+Rv(ub5)v{j z{bYrq#2jTdUw%VI_1c%@t57y#q3{o`Z6!;=s^MwLS-_XCEFbj>FqO>V%$hINqX-^t z^~zA1CZwWj=NgC z)IklO|LgbtZT)NiJp14FzvzJfi^WdyA9hfZaj<`4UjJ(Q=luVJ_5Pn|2E5(>LJ#P? zF*AStQdztg!ufFLI#sgRx~F4vrjgijM9U8OBlqvsKI_PmWM>d zY@5B*@WBE;vx`=YNiT0NaJ%Ps4&lu}9V&iO07H%Qj7XKEAoQOUu=q~G9U~nZIv(;+ zYf1*q_kC~V3}AdQLF$8rtae1?OlwRVinpNX8D%P$Ys{37f)Oa#*c<}cgIep4R*4kp zmGLuhlL)hxWG0qS6T(s(f5uVoEJgsCovFMMt_c--`R zOn_l8IbO<(ZO_e-=_)FZCucfm&!itAMO)=)4VK+|9X^=JFKRGFRAi?U2Y#(J!2Kf`Gvo6mlPMm3KqR z({rwH#U|}YWhL~M%@wY0z~}j(8KZ45%t99&{!^CR=gH!jU2cNm$4~03?9wXPE3#&0 z%-h+Y$9^aj1%^5T1747re&NH6H4@+XifV*~zxdJ3v`9I>C1nyBpW-$=maUffE~F17 zo8oWY^W7`=^u6EJ)due!;@=c`Hpc`Iy9yWLOGOw&b3{~8twpB?ubr0c-B*p-=n;%u zTkCLZyO_5Ob)6BqXz9Y>?u}!S6(KggHA?MEe$=M$CR;$VURpWruj{fX1W(9`?c6-P z`OR_isfc`OSztvl^HFQ0Xqjtn2`wu2Pg2M0b$kPF>6|7$&(Qwee z#^xV|eD779#Ns}+?8I&xO*etx6ZDG_%W1I)Kpc@0Ax76Rx=%$#NPkW9MZ_hY$6uei zwd)f2xXK`4Q2}&>DgAGq)=E?SaEovPvvPM+3 z0cw5(v9eTdfvN7r#>dg2v>tRuuUj`YZk_KC_NI<+mj6nIr_zpIi(=k~AVco|vqJ zNroh5%qD?Q>q!x^KStmy97q5;-y*fOo67!|hQSJ4WS!}J>KCS=M6oR|wu5Ib+t(_U zW7_CbVAr2&&oJjmHK|b?jD4_h*R>JmOUV?|J zTC=w?G=T4k%Y#L6n6N^zgmH|;-5W<=IBoKQIQ$-Y*l2FRF>-zM>LMq@mV+an72VE+ z+*OqXp2`uyehGSVWh^qqj~^)C%yU!+$-W>#z`0?vUDSeoRj_A)%#qW6D44hYsB-)G zNE3oRz}LBCQw@CZO0l_q^PZ5p)X^J#jaIHP)o#e)bqDgTJ-eS;`!V(tV-<;;@ii^O z*Z4Vq<^TGz{HFQU58)fFvW1-=IZ{KUv?Lols)!@&FZI?u3$E9d!!L#eU$SV4N>Ms| zXc>vR_fC}>0S3n7TU9>8Dc>iaZPZk+6mF6mGAyIE0JN$V3$10FR4rnO0HDA1MjK`y ztOaByE5^mTgIgct2IdlMn~hGZ&4c}El4@(aF%?;eq`||7?b!qSf3XW_L2zDV!r~Za zBvVU=iOVHO+o+og%L-nW!TO3LH*VOtXH+iml^0#@FSF!o(YJ?Xjkm?ja#Zq$SDa-f z?AwQSj`yWF!hjzZiu!>?+^2qc&cfo7g{8#pXXXWVX^4VpCMw7YEP{2 zS#0?*ii_4uS%j1I5b~SgIbQ2|U+cA)cJAy;zT{&nOA;+jf0Qm0{7iy$Fjo90ck2xop23eA}lQ>pOM^qS?9u|4|XvKM+@6W8{I=TE)25S zF&1;5rp21@?@yn;-z=+MS0KCcz{qupqH3#xi?8g}%doBtH;LRa{xP0AeNU_|M)Hvq zKgxo3i3i}b9iyW_M<~+$Ip%{6J~(sY)TEujl;>Sgd&MMtoBMK9?F3(zZ;AFKbJhP? zXtA{E((nd*52AzYNY{VR#8w7t9Wl;D-&71bu?eCU_yZPjXW(0Sa)ZcxSvc&n=< z0o)$he^zch5sRyS1x;R-V-jO>M91T#4OzSj#T|`!gt5NpowRSREp}`Ja@gqq>lg;zYf(2IpoD6trwJi{ zwJ%Pd;)4>gIg{zIr5{wl8)Yd#2+g(EA}R9QLrcI{s-Feu5GM?aQr{Y!k(-o#L+0yW zMjw8sKKS|xYn_MVG50(9gyV;{GSrm=KPIoeE&{Ag#w8#6eO5;kRj(*yDR7@9pp-T{ z{X#leeM_n0!^H%56fU+K5X9mia?aP76L4sgYMxFX@L($hWN(NewR7eMk8qg3)Qw$d zOPyeFdpa~;iP!nH&)bpWfW(V<-8>F>zOn$y_Du3&KI8b$e#r>E1edaJ^Te~es-j6b z?syOho?f4JItsrFSFzdHk#y$9@pvoJ9O>?G^NqRX;fJ@Ug|c}~>V2c%=O7nf7VKVN z)pe~9i2&aEQ1}(ODkB-1SI$1z%l8>MWpba>Ff%LQG8EEW{So(HizU4Y4ZMD|>rXPS z*CXKIW$HLlMaSs$wTlB(?3Em9TAW9@w`i~@N5Gq<0;hdombHXP zJP856_mQ4J?+Vys#mZabAl!xyd{>)mT*D`t`N4495n4yp`Bcr&SNzU3>#>HoJcG|> zk#9Ou;hTOZbGV=4sLe4=k6&?SEs=k#^={O6_%@QpWV+{cq2n|MHSWC3np4$UO_N=F z>HWML7AhsuQb}TnM(?9!jeuNU9E?v2_MPEg0Ea#&;jR`)1#|qos$T3pYc!&O~|c>6ZN51u<^FVbG;wN2=ZRGZ_0ETiG$0 z-tc58oqQxGPbk^CQ=VxzhS8`eH}4h%c}Qg>|J(FZb{fKi~=$_Xu~$} zQaIH6Wfe(DGNiCrzVb}c&jK-fUX_}tk>Aw@PRrZukB-p|VVdwf#B7YgOnpk9WXLBl zesk^y%LJ|kq`2bqz+W# z`DCQ`U$Y#3Q4BuA;dW#~1Lb~ygc!#&faE+Ke;7QS7z*dxc(&tQ1q(hD zRVqIFo@VzCzp7pa>5pc!3aB@FC=7?o`U1xkUk7h6&9INz}cV_APO9 zG%2xG2a&_SJD7(3SRir>fhPa6;?XeS*fvE#ToB*&Hb~5J`C{b=)AAMun@ZZ_p`(D# z6Q)J2(RU0K?9LFy0Xn_;cGS{&hFZO=S{7BwROEZebm0oQ92PB2dKF#1Xo)E#LqHmg zq8Twil683%wLE(B#!jC`V`^Nk$<3TLUPV9fQ4~zDENC%>jimO-40d1|cfX7>#O2oK z3mDWJSW>N<^c-cAp=|H)jH#6lbt`C~Z2mSLN!1Dvv(ZZw2-3~>BDr%6q|pJJZ=s#> zX|`LDbs_u*DdguaMR@afbW8w3Bz_SJ1+?Ct4btwYteH5sT!d#bLNII9 z;@a8>$p~=?e3y5|9+~Y&9l9{@Fy+I7f0;QgaD)% zV!^1sI((aGl<7w88~Bb*Nm>8ZhJzA8AiH}P*-wj;11Za2%}0_6S|34_MbS;Wh1(0^ zhO7_O#X(Ug%d16@ZYq&)h@>X=)5su1K4%PsvOgI81$8ZQV4UNwgnv?`Qhr2oY74H~YxSnA?@SGf1WT;J4b(pUH+E74Ef!pLtk+H%@UAN)8*-xOk1GU12K}N0 zYB966^n@IJVzg{6YE>$KF47vPqY5KGGjV@jfa9l`+y0$^irrOU#6nB?JTV32lRy8< z0K*3@s5(d_(SGf>D$4$&CA*-vA)kI8kb$klhB7-+H19}6%-u_8ecet0zn%TVK@al2}_A3Z``Kpe?7*Gc==?pI#d zdKk~>^eA)J!XFPOo|FpIeEI|m8g78P8T};yj?mA^Hf?W><7_O+QZ++2tU)?`vOiPy zw*-vvE05qVX`t}_EsChhx^_AJYn@B;@l}DOUFE!!NrQ}uzJbQs`%{tH896z*Qu({T zG(nD73x2XcrAWQnglWY>UTx$$Oe-ikPx$uTb7@gSj-JXe_($d9gwe_v zYI%eq?&{cwrB`TiJ0@vGn-z66zJwqG(k(o*&^d=9HW;GQZnAmv#@F~p0i6jd2F^$Z|-(`Vxc#*XMg>42AsuM0rL;xf#JR#b<^_ zE#yA1=!0bO8yJl=sl18z7#PnPVdV4}tyXUAE%r9yP*QD}*Sbwla4Ce$I46*qL>KR< zVkmV3>+=-OrpEhPYik!h&;q8OZ7RJ&J2QW7KplNZ5ALobybIZb!DJe`^PF{@4i2{* zYitac4>U#LU&&O7b{eCUu!uw~WqLGVsok8qDXZs+VQ?^&S<_q`Ojtsv8CVTz?=KRv z!Oo<3qY^Pg8jBMf+iq?8|D!rb{g3R}r-rTL7aeL9QYRTpm2YunOi;LKvMeYNENNI3 zoAl+B_t$Be7h6OhMIFkE&AHLNr?Ju(3BT5!q zdtKiw2-iKJNLoCVcDrsaVr59T11X2nqY|#OFy}pLelZRw`k9@M;yWew8 zB4$*Jo$IYhMRzUf&s6^`MQXRPT3N7mZ}Og@%{!csS0qYbMwpt6&jWK;5BX^CE9n>7 zH*v_+1^ggw4!Gar+<;`3ng-7ZY#=L^j7xnw-Rgd%tlIXq3?rMmqwG-& zC`nVF;aNcz$HXVf{?ENSJd+5$RySl?xH-dswgbk}9`)ooaHX8g89nfw@R|}`^E4wy z*RQYIc<#?4je;sc4k`yo)8hs>GE_t(w3V9k>m&#BILn$2a`*iw@yLp;KRQN?3XSDO zF=i9JZ(WYymR52uxAM)>PKK#9j??~tgyrh({;JBoA|-uiC>d{TV;)I~WIy1&JXtBNc>uBb;;^2l!Q-U&j)w z-FwWqG4b&C&v;ndz$7QU;#_??GX5_~VhU)#Wt4*R$avw3+w79{kFO{QkERilAf(TD zW!b;tXIro2lhY*SLXQRQWK__JlJp|QSgb74C`uB%N-)Pw5Xq|ybd{3h1;#~%y)fF^ zgSDp?AaP50vwnAQLP3G1jwdY780ja%1kRR-qln?5!;O+)E%%F#NWXE|1cd$g@zIs# z-4}A1bQu}igQYaZDh5gp@X|C4bzKsBJqu^0|1VD}-6%yH4)%ntnt6EIfuN zRZkghcq5Rf^GFVx9#4-XY7oC6Z+re)>tdflEwo3|M^@zRbd3k{`M?U`>w0}2b3uiq z{1Un{ysHD_+OMJ<+@$0yS(%z)w$iy*qy%6CkUk75plfuc3UMh>1Pe$fap3%GwHL8B zDwY^n3lc0q?Et|fCF_3J#o!2pmtKGa$-Jjs6BwhbYX9)A*iy;;njuK+R@q!}={&*zO#kI> zbt|Z_W`J^8jgLp%o!#xZ9>opV`b5xba`ka9DGhu$`ncxJdwdqhQ#;qi2_E&gaakPQ z#?!i|B#+2>h84y=npzJXi<2v#=l>IcdYbHne8x_7;ghoFm^e!tAp0IdFXXmF_xO0x zLKVH8l^9q3-J#C1#naP%SrNe@rRj*xVpG1%CF|Jv7LGXYHj8$}>#oXClSb-P*I59P zWNA>d?Mt2Kps$l( z^#MkIPIW^wxV-&H@n+S^ikke2Kt;hm&Qip4l|9~Fgpn28viF{@UbmUAS;`Ra?p=c@ zGZhVj_aggL!9@9?d$6M6>PLbt$>js~BlzGZgVQxj{*z?p`MMdhj?X#j(CRCbLJC{kG|y{R zxp&dadw0zj-O2aW5S`skvW*hm5pzF7UEB&`sGeqL;o-(-zRNrMe&CRkf;5L3DzNmj z)_37 z0j}!%Y$EZ!bx}NxHI^|PtZSZqX&{_f_`v|P%_;h&P3e{S(Q9)fdNpHa0}YyTs~UGK z)+_Yr4g6JiViWL*8T)Hn>PY${OHW(_X7-`725_*E>7{o+*;N=Y)wsd$?c-Y6_~_%P zf@h_|MNbBv`Le}uJk+_p7@OTu;0x8$Nya+;z}^y4*dFYj@?K{kOtrDP>1cJOKg9FK z?RoQ{S0$d=S6w%lILbp_%PiECNJmFHtLny4=I=h_{9cHeal=b9L&YB)*c7V#vi)xVCD27nUHGbpc4^->7xq zVSu(<$D5tf9xggt_+Io9m?>HPW?#$|at{x}YC91g(n+vCmL&Q(p{oPAt>J%}RFLWD zm@u4<2w_Vz#g@52l|Kyda$1BtrE(#WFF5sWsBFc^T9qyiJD8b|R0VTBX_9B+2c|*9 z;6qxJmMFW`ZKXyy$*L%fM>>a{X@#SH=60~{y<4CP^6KjjlUb0?=9Qv4@O0U4Ttrt9 zMtlAW}-puu-rM~Uq@zR;laPcX%G0p0|j9Exy;&HSy!_&6MZxUd?Jpxy& z9%zZh9@#`JaoNo6Ud492QUls=*#r$+q8nLREyjIHbfAu&!#XeF*gxVN!6$`R*Y0XC zZudf>y+H1iNxW`OLH;}@W%T!_5ayaSGxycq_N{N@2|Xe3KAXG!CzPuTx#+!^PNwO$ z328{@2pvU*nT(}z6G0VQ3dbt+0(D9n@abd3I3y;&*BX=9!jWhp3sBQ#kK2uO<{!n8^ zfqp*Yve!fpbx@ipRI9`8Mf0ImBiEd0tc=IK@;*s&alO`5(N<$}5J~9Y4d;}_k{Qa7 za7NR|Q#PtoevS);eVS3hzS~H@iQ-a1?TZ0D^Swb)+y9AQ|CiJL|A~-)fb4(E|0_cN z`RZTxzu*7x<3Wkn{$2iwID?|p|1SU3|HBUc2#Qeu+X&kJV}JSY1_;dmA%YFkUjKIi zKLOeQ;{t!r|KIrcfBE(QJ{Yw9=la0^ZusZ;|KQJoe}Mic==1OLfBqi4t$#(xQ72wN z*(?LO6ICmu(dii_7EY4j53S4`##T^de}0X%GZK$0agJUFJW8;Es3+_-RQsD!?^$<` z76LuMjk8K5bJ@*W_9C}szZVl>{NSRevU#VVPn(_V0f=05)B@ZfmZLarQvMfiw4Nl;sF}&zm`|d2CM%R9+Q13aEGgmv#q$C9c z{D4hJd1p@8E$LNFA?ErP_aOJ(s80Ouvu7J$7J5M&N>(XgUoPT4WIKbgm@9Zx>sZCc zxRE83Qbth!EhN=ixon#d1M};szp2ynp}UgggcE0CBC6=e}V}uR~~ZGVP|e5m<7pF)9qeVk&C(p48VZItYfAJ zD~jk!nd&^{i1oDd(J4SxG#ObRs8bR(eO^g4Z#uMw|qeG&nl$XIr1p!#j@EN#*Yll@U87k*?NO};Y^W9*`G zmS;~BRpS~OE06JFm@oR{%1ly!JLB!cbW3@ubD-u*!U-ogAF^czd8*IZ3AMEtiQSf9 znBjrm*DE@IcEjSLkGPX5l#7EtL~xrfOXn~OZ+fLp!b+NnZ%!AyA<5w8)@mlbJ&C*L z*KXLl!XRTYlaJD1|LdXTnsubnvpd$^t43>#^^bnQobGN=$9rT(qf1m|kjprH03srJ zorYT_j?So&b+hrROcj6Zg8jL2lbkuRr)Ql?F_pSxM<%Ir8*KGv^%YA)!HB6*eZJT5 zrG`kgv;Sc&-T5*tbfvw9S4^qSPrIj`Dqaju&atB6Qa1VNDwtGx1|PZaF;piFtXNkz zTMp8-#u9pWZb;U^KH9n;#V)Q>ksd1q@Usu?R~#NIc|Gl$Pntu2uJ^Fd)k{?0ejOqo zNexT|Uv?ES?sFDl5=Ch#eo>Q@r#!km6dlDlFA*UO<*!3U!%5`H@gj%>^RW%RgZi&t zKX?f-$W=GDB8SG&Z6$Wc&f&mkiT%~`b~8qW5rkT`m=R9?1HqU5{#@S!O4 z(uM868SHI-6L?;6#HZYz0Gds}+KaJghhAFiEYKkWr|<_d9(}=lcH2$Fr{J%~??E%l zHV2p$6Rq0O#lY{H(6`HnuIhwI>g{`;VOI5%o~qfGIKS2CzMXHY3F8%}>&WwR2%~aQ zw<*MXVd>&SZ6#glO`2^2=p?6vsLA?5eaE@w^r#HZqNh1|^vkvJJ;ozaHt8e(QS}9o zg;kfN@3!|nfEw}cXDbb^XKRff_shq+&GdV2143jTz^C)*=s=D5j!yEXs=4Kj&&D|8 z6Jnc@&*6oF%IvBhhYM@!g=5}^it{w@-}MM}oj5q}z?Y;fnBvD@ z6WL9bLeOSYb7GEw2-ae?ZS_``=EawY#3FP6Uc#PaB5e|#9$S3i0l)$aZmx0jT$C4M z?)#sbM0xi%p)t>fv(a);0<5R&FO;YlEn9Z-Fva#yR3slVG`e)!p zvawwGk!cAdPliKMJ1@8RM%LN|Ro2gM50i2V9wEj5c(ZOn%b|u>v=nRzGUa1bOOXXL zRC;X*H5K^92hu&%h2aY>QysRoVR+x;OK+TN2fCIGW!G{a@a5JqsqNj>_h>rUv0ItG zTc&3wd`dvY$>O@s(0WVy)~0&_IagE7JrDNlc&iw$88y@%#D3Iq9slwL??+}&O6OF% zRi@|J{SQ`}yT)S)g76)zD%(SA`_}e{r&8Qg__rRZe&!@(?0{gz zP*2{_qLlWlQiByonf0oxPVc($S5Br23c5B*pjNch6uXV=m$sP}z1>WIWN3DKb zQ<~bd=rye#w&^B*US=k*K3uh)5>+}sqcUc+7Z{vwyqX1x|} z#TLHPbn%sw?@POF5;I?>*t!48S^he4e+Q-ls|nPqQn?3U@VV|>h_22*KjCP3IuWsrc~piRYfnlL$!ZtS#^qNY6Rt4lP5nXI>E zhoG7jGnV65k4qC$3mF`XUnb$&J|~GPjy5X68;sTW;aa-H1f{O)tSG)w+pL;jR}M1M zBL^K<2GzFT;&xyoiXvEiJlQl4zxD;Flwa2$ZwC0vs^+L#nYn_3NzCbu{JmN3&;pcG z2Yd=tV*8z5oA;E7^LA_q^_p$C*y?PloFO6AA1MZyZZ#ILXsvgJ*VQ)s1h>~mnxki9@X}?2OhhW57N}E@8#5QC$84_(OqpNZ)? zUIMfQ7Djb?*IuG0T*kx%tro>+V z=tTm>T_8U)tpec|hfx<9{>)fr*MQUu0;U!?|1t80KoHk{@P@`X z5Gs6N^X$S-gHOb|1XdmSD$43qN!6i1H|WlK&Zo-Qebq|dt4+CtzIn)Jksx-Vo)vH^ z>~T@lPHnOs;@G2c@*~k>i}7cUl81lB-S=fGdMq6%ziGg0lhABqT2@O#?*%6&M^fu>+Y3%fmg%Q9c>eRZ zMB0fhqwQpF5se?zhPv-%mhoGkrGpXtZ&T@BX5myQocP{=ZTW0$zq}i!VuWxmppRxv z5dwjC$r@)_si*5>?kBiy$hTKEI44Yyg)_;GTsK*9OB*eK38uEVhUq4+YCSEFzkUTHFqlh zY9wk_t|j;CUKP*dpxI@p)K*C}caHY<&^J0x+uAhCQn{7?$8$m8UZ!ITp=}+g)us7C zw8=K+HvVRwTa5vqV`?TIF9-J#SI|D%aS=vfW0~y}FO{OD3iJ-bGwm_1HR>L^r+h6W zn8aB6CJ8vnxwu34Ieu>SYQtxH_lcQ@JZh7YmnX7WT?1&s zs}SZr{4}wue_FLBJNM=Jz}yqI zgG3kUyoqtVlZgqu24hC2-hxwyy9zsdPC=8NH3P*6+)Zueg!3otkOnQ+bfy zX}JyFpY?%{a~pH(IK){rg_9euPcQ=^aSBOzr7k+vW@;)(2b!x+%mnp32Wbg8+)YMb z(t(ZX&Vw6KM?jT2)IuW!&+R)n-t;t(0^lzmv)IdXnnYf(rYTcBp6__WR<9psr}|Uf z2h%S2**ryy)(IYJur1(EEO_?$Hi;v;Q4(@p%PwUso^Fwcc$ynaZ)Wa~>^47*X4~Pz zRwmA#oLe>A#q=t|&>6>+QKeiOF?&zsR4-%oOU@lsPH}WUG%Y`ammj&OW!4FJ_X7H< zRUxk)aHNK)3a#OF*a*tC${wil?sV}B`a~DKZ7}X{e||I5;u^@cFqvGNUcjevhNPeq zM2F?PH}<#iqQ!0Pm|nOlemTlZQdGW$;|5k{)wfXmO{63Sx&b70-RdMUfRG=Gpqazw?yM`fy(j4_$hu;GLbkSp6OkIJ`nPtoCmE zxMF)o9Gy#b)qNzG{+Sts=Zda`_2l}^Ey{9<(eAN4jVJShUCg73?iOyu z&INOmB|l`M>)S%u!(qS%fkx>gntGAznjROSb%^o{{pn8`ZO4X}-Rv_&+9CpvUapHM z*8_?UFqoFr8WFND#A&19IV+6wjuil>HRO%+8jMOBY^%BH8_?~=2gboIX>5kt% zvtN>_F|#S9%W3;oIW41&wDY2DuqkCN0-2dkZA&>axfz8 z-1;LsfAuw1gY*lkZ#(?Amz8mLMou0bHliV}bYW)^%4%Bhk|9&jg9pKumNzp+U;GNsj_!Hp?c}Zw_*6@VS39eL^+IOH(Aks<5=O z@<7I1bC&QHDmqNFF9m0eg*I7a}>C3zsaSDlMzU+!yS(8u~k(=FUIo8s7%_$se z7ex&Wim#l=Eh^DCw=gIO@&MOmvdYe$L()!QKR><4|D;kokHx}2%Ohi2<`haBm!HaF zn45^hDfp$8@AJQB{=h5Rz*qJm2-otk`Nk*8#Rg$TQ`14Gco!2Y(|B=egZHA5JEZn<-4L^e0gBe@d& zH3*Hq>#ElriCuA?N6x@uOy=SGUV@e~ds!_Yr0srmf!@2Gt}t(i6RoYy&e~RYiZAMJ zSN!_BTq>@k?!L3YZHTYn+~F_*BgKxJ+%KQAR-q%L` zD-)meT(LzR30<}}dc0Sj*mv(#LH|O0;H947)P3ZDx9HPGqFYcopc;bPfMSBEc-L1KOW^euS^y-x?QH8i9T zBCtM!ZULY-zD6XN<^?n&%(Nn0b-_BeYKd!!lN#el7X^6n!;hD*cq$yuaugrnzdaBm zaogWKz%y8*eES(cbzq-j;%oS?c++n|_-V7&5+r!Hbux0}nWhpK;w_Aa2AM-I!22eXbnoZ<{7N&iCcPk?%s595ziY7 z$|S9MV(u@1uWFjD;|+c84qf-*I_2SIblH0st!g>p-yizuw0|LB9suBQ_y+%q5BVk`4rUuZ!bXx z`PCW3Ea#=FC8!?ZxeaC0XyuzQ=PbzN55~mH=UL2hP}|sd>6y55l3T)`BIgVo`E1Uw z8Von*rk0gC=}QdmV zc{;YBi;IqZZ-PqhC6-w3R^SXA?#{G&;qdwvbp8IcrRPlY*h>h`uuCTr)SEB3j# zKU{s6dX}%*R3{Z8vl<`CZzCw>fT^~@Ak3UQ085&ets^hJ?DWx;?njc+_}1-ed-U*h zvrDPVy0Y~NiLE9SGg-XKxrJV!lR4LmT^db;;1fNROh9aI%w`VF#r+p{n-^Q-#x$ig z?QrF~C{{m%nT4<$c>ne_ZZOz$?6;qX1(7=ic-b`}truQq<%|oPuD;>^F>7!g#*Fg~ zml&62Af1>t9L#pvtcOYfc1pHz~epV7bbR?*ef=)aPD@i znpznIveVSE8N?;*!kY?-VB{KLChn8ym{f}Ve)7efNG)3~>a}#&)^RT99xG_zV^Gz9 z_|VSl__Y)hz+V_bmUdIQn@$|zqnC1PXux+Kr#N-SDJJwPhbjYQhvburkx1#+!Y6BM= zG#LGl$8?_2lXhjPV;3EXT=89O^kRB#y4}5x>#nclJk9u%_v4AMVW8>;waoVo2c*iDm?gzFeMjPPJLGZ_XGnv{0bFZXL5uDZJp zSc+{Qe9F}P&f-(jI+fB)wJy|`O6T2p?`<8%sjwg69V;kn_AE0|sTRGOMwdo3>oNNm z-}Quqw8oy^=d`cVeJr=14zc^Dew`9zoa3@F8)!^8a|{27eh!*C zew=t1%e?z&+QY$*Chv5HOIstLn66lHUA=pQMgCl2c?E5~Tmg2}dO-h#^})@M4O|+X zvE#}#%(-d>g;sIVCaAg+pKhOhK8T0h)r?eh&Rl_V(e{#ZjKbNK#x9I? zrNPH0S1=s%h=mdEamG^qA*hG(iuZXUl$5=S(1HrIRA>OtiK3E>gqVB+T$ zJ=$#cI*JGShr=nzhY@U9no5pWmgV+zAm>Y;B4D_bQfBJ9gnQ~^c17Xl_-l6k zvJT}q^8_TG%pxkqJC#vNo7622`R}*V#M;3{-ni4*kD}m*a<=_-o(ky2)Y`R_D@cre zwKy*Ne7z=C!w(<22Yk~}+SgBbTJz7ADrd0NZF$yG?$6{-PlcT?MQIPj0fsM2)8<=E zHvg-y=YYrZ`<|Cbk+RAPDZ8ZXnPijfm6gql>`g`~*%Bg~kiEB%z4s69niJXkec`*rqwc3B5VNen{DCBWRe@sA@EkU5oV>t!P1&%+IK`b@ zVz8p#=}0ScUR}%ZW21opJ0I0K^=@_$mwDUj>Ggb-Z?3rRkB?tf$vx@F7pkeN*<9Hc zp8Hrn=yqY6Yv7cXr7NbRPa7uvDKz+iS>HUEH#f7aPQkLTcrKFaZjor8&C;ZRo_p_y zzL7;~+&PYg3W1D|1=-Dbm63zoj7`-lwHBpQv{yGjb$Q`C$6~prELW^1tEM21_ol|a zv@5=7TPY29zIeUs@f`*6#`eG_;S7bDxsd@FUiDk0=)r#YnB4Ql(VM~9bXQ-ZJ%C<} zg8mq21aCTEzE2H(pO)E{ZMKWX(}DTbM!k8+aI-6|&ad8&dqB8L9yF|Rf-V|wNS?V-O3@%ZUhHpQa>?_fGR^08FNoJN7OT|MZ@CJI|Hiks7W zMkWTooxsrX?k#pUdQdrq-LG=sR{rf*MyG7~&6i?lV{eI$h7Z;Kc9~;>;A-bA+vyhIwz?;hb@zJPf>cv7f8(3Mwr^0;2Y&hRIf zK!W?;gOr^1hr+9pJ}GW;Q7m7KSgw&a^|K!eHXoj~xhs0o?vx$*)5p@PWGGV=cT#qC zpA0TM9UdD?)K5Ik7^l4L{Z?~LK<@DqYO8%izqkV?oLX{zKc{tB$?f-LG;V4&_rnlr zlJnPtnl&nHX-5;9*%9k@76Om_9C>Txb5l9Q`}wED8qQK1hig-Zyq2R&oKx?u!?V(N z_RN-Q_BQD6=d|?nQMl3^aN&j(EM({`4P4%nx@KHE;`z2lkk7N1dO)6ajOVobFh^-C zpucB}bLO0PY_u-kF57>_qdjn--z3;hU++8Y+|C>E)Q#Gt z#x=qrZ`oZP9c|k%k0D*zI+~?8(lyN5s8DZs$}KJaU2682u_M8B@tD7ZgX-n*&a%}q ze>!HN-4*(d(fSKAEd$F}CWiM%3^6UoQ(nADi0S{x6Khpm3+@|(Uf1pi54mdO8w|>> zeBbR|aQiG5w2SM`aYg5c(}ebO$ChtRBuUVJyM?a7vR!F6wk_bEl-+GN{ji@ysf?Vd z5Mw(V-ryd_x9DQi#6j!2xKc{$z~@o3DL}Y8QJGWj-JT=MM=;1rxKa+MI)!^4x~;vL zGwGl1T4;ZVCaBQ5jQ^@v_B1exM~C1jSJb#hb*Gyo&Bo6%wZvUy#QkF|c5KD?rTwS9 z3|RBut#Y;k<=<#pz1BY{&3D!0HkpoJ!RP5N?V3#Soa|Lvyg)>uSbVy;=uWyeYC{-R za5*p9r(T14l&wQQJ7g<`N?kEpOiCoFz{%$H%Xf0a>Bpzd=*>i~s)pP2H%6~n-7LSi z*i%no+f1g1}qp1!)7I}y>Zp4$-%b?Gs3J{{|Pn$~4O zEK77Afe;E*po_{Xc)a+2UvV<|Th2zCcKWp4IHGTdY?>mW#3T6AlAvvGai_(5i{ zw;Zg-KV{)^Rg_xx>brEwiN5dFBevAW?><9V|I}7)D-+1!vR-`}S+lx%77^{fj&C#M zag3mNZ#wKUDPGpxFg^am-9?k%QL2>C%I}As#K!)0-3=qISfiYR%DYd4wPo!t1zMfC zU250&ObX1|mohDQ%e#z+Zt+lKD7Gl0=%l_)^bBjA`?z$9YaP0cMTmx(u=1jL#Q!YKkw0?U0hfm6n=*o} zd5mU9=Ju*bwgUNWrBvS55jFgZrH)sxZNX}q~5 zM)iK`onW7sY{8cA|zg!=(T--X~6i{%p zJoK-lScSLGVw*{QSjOcf@Rsv2?c(+@#r|QJ;7(2*-%PYu+{x7s6*dLL%W`gxgwyfc zA=?N;iwK&%O4YW&Zr~^4w$f1=%*^(x(zX5-{<#HWiUQJimHh{X6w#gWm8^qS*W4_fuz8Q@+Su4M{4qKMck$+Oh1>GAu?TFd-w; z;-%1hsjH##nifIlPcwIvnZU{^MIG#3ponwVr|@P?@o8h#>OpYB44m|FA`jXP@hdqn zY}TEF$KH$_Uyo&uaVBhMHt}%oV??>ouEsL9R7JCuhshET;MaLd%j&R|-Xl-02KK-H`wh0Y3v>LVWy z*(v+({3a@TAO2NYj`ndmC&N{d0gd7Hlh#zd;`EA!hQh&X-=~yAYqi!(cHZg|pf6MJ zOwTytdwWQ~O&C_Y6S9l{{$wa(go02YvB7{j3+E)gqa5yn(CFt#;KksPonqKj9hLt| zVn}7ThMNqZ?_3`*n^~kh@ec2ad}4@Z9;5^poBV#=Dcq{9W@0Ik2)rMBCd%S}I~bkiA_Yoo`DnBx;@YxgG#ca_My6^2|n z{QIRRNSo)U#W!Me+iY{18oGIkXp#k~C_2WT^KsqeNi97J7-xqY-Ff`^EqwFml_43r z5Wx@J@D8y!)e4h-)y08j9)@q*H0q>cTB-!s2z-J*@JHxtrNH_s@@fT3eh}A?ipmCZ zK2T}Vp3d^ejVbNSt6K{AyeED3AlINP)xv3Dhck79p`chso^X!uGI2wKreM{NMUqSM zeEw*EuBHcjz>9`drJp^sID*7%UrK@5z9pOSN=7CY2 z4rvzc747n!>?KnpBWrha{F(%BM{{rxU3&C#M?A^8NRg|4L)TeJF^X!uIfC%;_HNR_ zCHhI5yl1@9rH&Y)jY;`k=E>$eh+zjW%{&{$uqFGEix-;m?pZLJzsziVww2XS&wAI{ zSRgF*1~J0Kxkb1wt6zijq;!Ip>T2FfMTz;@L+$dw-AsWbzHFwM68#m2iuU&EU>wDR z+wF-zRV2rQqNz3Ioz7`JB-F~&j-w6s@$#p2MHuv>lUE$SSrM z3DYBFylQCXjczwgLFD5cL!_Q+5s+iUgoFElNp<6=!O%Cdyyv}W8%Mz{X>qhJj#x0f z!kChiBz|VIt)Sg{z8mkpX&0sx2^AoSjj$HPGH|ZB#5e7@H85Yi8gl*$JF>L`0CuXsjDFNZPma8IU^JjIv!YgM@E&cXF@GbFJ+mr&q(ByoEMb-Jr6f$p~ zp7+hyWE?rI=Bht_W}j%tt9aA;p@>}{Z@_jMdvBF-=mS%5s8bT_5Cv~U6`u;u23J6w zfN+PWaKcdX`e6A&2L(RMX=OX7kcJbs_~EBIo8rBbA3xo$@*95L;(W9cjP|LM?RYxO zt6eyC;nomS@uRYjYkLShR};ETe><~bSY8zG`(TIc8CRv-zRUP*9@xaU+HWqEoZ^nU z#o;01uNsYW$lPhC%^h0p&UP`Nuu2%?%N7tf(zZEnwYSd3&64Et@S!+Ah`2kLOza+P zZraUZQ=F-)5i3Z0GhE2?r0A03Pclux)(8nrYXOS`eg|JNiju^**~Qx)Ay3x9HoN@Rx>(D%|0-H-sNPxV5 zS4%PUNCDo?UvOZJh-~)|vsmD(8yH7@qZ@ZDrFrdKSO5?@(KBv=+K{ z(P-hwkJ*;)9m1*k_ShG=d2A~G7)`LVy#L)r)*G|LU-VgFBRR@4j+^Ai>4YQ}yYW%` zcQQ*05ECy?o>M7?Q!(IpX244rwYoQa@#jWxYlCNN zbsHg?xt$A*8PSxJ6XYcDC#3#WloLIq(b1tBq0`u$Qr6GV+$c9f9+1UaFO4dssEyiw z{A4<2enY(^M_{m;{Ij77!|Y3aK0dO6H!RC$aY0JgvW?;8UH$Kx!rEN;$gHEiYu4$* zftd*k)*_Na+iqfW1=RUcV90U+h99_v!!JYD%t=rL1ZZfLFVJ8@i)g@11zD>8lm0<~ z(!b~j|6%-}8DOZw9t1`*b5Q%#&K%h%`hWZUZ7hTOMM=mm{_5xd?-$$J!oXAyh@iGG z_}}AF{$=jEZDIho`O}rHmA&PkJcJbiZ6Kj%X#h8bc(5rfOf8XCKU<6cSp6Wrn+Cti zDGY23p?WAm>f+klz^Q-bvA)^bJ%fY1fvugb0l-j#c$6%jvqOqP38H@`7#cFz1S|oR z?i$(}*g_eM7cX8=0gMSS(F0;MU=oC_A>kY-fn5o~LI+VS7+-Ad?2!C`6>Ul@t^sC<{yP0%n^^8Pix zzvLs=1yz4YUzG9wYaf7sGF16c02wNc91m*yzdr|muj8L}0%DMI{vHo%Jp*_~=tGPk zfj%I9H2mKf@-qCMYnT$?a{=_96!ME5YVt3@1!91Xke2}R9Ke7)7!))|2mt5`1NAT< z7n#2ZFgOku3Ydeu2*^Cd19XN#N818jq04Nc06GXD8XbJjVW1t<2eJ*+7UClXcmn{~ z$1w0rgD^UD+${K9!N>v30(uib6MzW-Jpjf*xiSC&kZ%Ss2H+cjWdJJx<^h29K=u!f z73hKvyc;kuex!^!kbe*IKqvG@kS78F#)H%o(r*hu8z{R3@F)PNY#Pvb0PcW1=nFYN z;GvJ~7s9##szEu_-~R#*P`?1`e$NYXoM4Yam-a#z^}>V!Xab%e04D+rjk^gzIRJ3< zFLFMv1AG_IivaonKsqr10CNxJA^A`cB|3O!KywUXNEhThLpe4O0OXN!pt*zcUx6@gTRVL#fT0A&0zH3di_p+Sz!>zy(9pbk(a;h@ z(a@mFl%XS$LD1(f&z7aVft?;`!(m~F#87)0)Gt@?g$(BLpXt0Q+|~{hv8jQd1OPJv kz*m)J6}8m20^g~|N~RWupa>nnB{bqK_6gD@!!h*#1IF$Yq5uE@ literal 0 HcmV?d00001 diff --git a/.dotnet/tests/data/hola_mundo.m4a b/.dotnet/tests/data/hola_mundo.m4a new file mode 100644 index 0000000000000000000000000000000000000000..7bc01b755379aa13d424ac61492bdeb9514fa584 GIT binary patch literal 202099 zcmeEu1zQ|Xwl@+2L6bmm3GVI=AxMG~TtjeocL?t85Zv88xHGuJ;Db91GQbPj-MjzY zd*Ay7?$$F*9jU6*UFS@9SN%={1qFp|XzyZeVa>(@^<187W)nLri)WI`!NEjdvjE|q zCE^oYp{@by5Mq47vmBY3iGhiMg@uEKnStq<$FSBnv3#zdi+1Io3Kk~JLSNVZnRd2t zG}edyBZPule@G5|A^%plc>i4t1@+wEA8r1~{)Ks#{2MRyUmgEP0`|E>oxQED*|S#D z-ppA4xrTnR`FHy;|5L9&CjQON|85sr-a_9*_j#<37W#i|{5h|`o|{t7A4~Mc`sTL( zuxV%I@b|g;o8;RwuP>o%sc&xZrwzoy#M1Ct_14bfZ-f55BiZvDzsT|RZ4F-Pm-2GB z$PTvVAOBAOwzL0X{!HuJ+1tIG^hsQY;hR_G7*HBO}$AA9do+U8vqFzJ4OyFBPd;34_pXrzK%-iYxjr{YC@yxsa zg?}NR+vr*SMfTF?uVYYII2t%V>p$At{;6M@zS#QbG5?{Ug)FSB{>+hu;eXB1vj&># z+3A1g$o_Bgm)3uGHtg-oV9#w{+1ol;LPMdw=+OR%pk6&!e~k&){eR{EoBr{C*Z+I_ zSI+_%QLCV6SxY7wYLO3f}~VU1WrvdpenO?Tuxwc@$}PW;bM}={6ozk~n(vbu&JYhS(~P zoV7ZXds*Ekfiwmpurz`G6Y0@8^ua!eiPQW(jK*r-o_h;fv~FR7-1De&5ZzJ;Q9c`B zbk-*uZt3zm>;|n%;_BBa?Bb7G_ov$jO(E!~Zb(MwQfBQip9%!E^0wK#_%H0{RGu`*jZ)7?ui#Pq!-Sz@wqV#iB0jYjWf<5 z#_pze*v59kKGE!8ic3r2=^FgY7F0mHz4Xuuc5&}$c+6Hvo3_lf5~ixw$P!}-ssg>b zH5p!b03|Ibs_7`SYQ>Q!xhN#*Z;l%Zi0&+X_{=yhRY=~5psn4$F~KnmV^z6Dh)teg zYQnepDgmka8inNhxN|Kb_O-_u zVLt|F`TU4+$Ww>}=Ze_zevL;K%RPr>EA(Z>`{5n-h9j76cG{Ri@rE-VJwvkyBUw{= zmf<2A`;9*YDQ4&Mt^%?RMtcx4q*{cSq;JUoB;@U+t;NLd5IzP|A=CkJN12f)1a?S-Wy7Mq)lFmB+q`I;m`Ef(L~x0d5_ zoma;W2WH&Z`GVy7vWw$XXA9h~gm6^Fy=0jddWt$@VSU~M>C35a5||!YIwD$h(yi`E zAHe*&0EkDmB`s^8S8bY7s$ZF&!rUeuP27SBs0lQT&!XagL^TslRH#j#ABm;BO!h?* zIUyODi&7uwYTUriV65@C^^ayv)_!?DdG8dSfGQOnVHDii-0ajn14UBy{P_T8X}B;> zs|J*~9kmvV`9Zm$*yt1mw(Lr&&+GOjdyN`#vnF)M#5pF+-zo>pA=-1W$Te~yJb2uA+9 zSFcrnzEjr1xA{)XJ@N_#Bci&Bx3;h}FPwC)4ohM;^N^c-+fMw^U1OZLK`YF; z`EuvA7mtsfcZV8NS?ZMP*AeX}kw`A19dE9w;6)qite9;omYv|b@?LF4o?Ix(H`i$& z0Iz=3%DJ5M&APmLL?S6aGvBO%h;*&GtB8xbT1IY5d82}I5B4QpHE_G_SA+rd?FvtH zpGX($cildP*-iUw9D*wNyKhSoZW9NO!0Zwzb0<%MAVJena~XH>Qul)7HSEL~8+}3v z(}1=eWSduuGfCVU$X?PbEm@3YcOe|rGs@*|ZY!Bss(lVEeUDakvv~q{MD3chM`BY$ zwZv3(?!)%XIBQ&A7X9}-CTSdQ1`Y*!jW+E1akcxoll+Bs=Wp}lDZkI#I59Zb4fAxc z?=Uwq69M->^5n#DAUj;b&in#;w6+wpK0Qh3eY{MwYB|-r=N^-SY*Cr8m^($ z7JD4irBG19cD+m#Y*P`==HSu1ZKY|Z zC@N~jjZMLpL1Y%&^YyZwC*?IY_250ZJtNvqwU~aP)j2wsaka+ou{QA>KiX76AwS6q zmahJWY~KL%ukQHn^1Si&RFXJ=;L^57fveaZ7}0UEEwkbpRW$crn9K=gOLM~7_UY|1 zyGI&<(nxxJ*Ji(M<*fqWV`c~#9Hs4MWdFkvjm3>(W!qC5*s*>MjIv&E))5-iir#k# z%&t-gOCLRYoXkDNk9CGk8K0OxXlZ!35e>A|THQXdlDRCG8r$o_=H3wxUpBQnrw_Vs zG#pQ#j?q8{cCMN(d*!n&sp!VEZ$=xXv@)>{j!h5CngHnreQ|ttujXbE}ORIc+>Brr*FwxowX|2%ik7Lfp#U02j+7;0Hqj}JPO>rB6Rv_ zx?(7XlLqm8tMXxiZb(pAGI?5p#>;?}vwh{spr7DtUi66dqUHyU1)4VWp92q13RQuuh`8CX!55h9FY4~KE$iJ)~C{xt@6->ph+SR6KnNJ z6zY13(yR-Hdt!74jIEJ#YbYofkMC05*gt)+-fTT-fN~`=z9!b{=hk6H2LA2C{;GAh zOnp|ofPrp}^IqV}f~&0mX}-;~UFB+8ki<}lTTd*jlhb&n!>3b73d*bn{@RmmISEQY zJBV|M4{zjt|D!O;Ekj|I@K-uXvef(b^*inxPnA*a;USKX&fxX?NDZl9InHpbqOr|j z_{?p2XZzAOH?l+dm1yMsRcY<}P#$>VF4)sWa#<79scmEjF8o>!vkIYczYkjVbuAt= zjfdzP!OYHEdGA!2IZcDPNF$|oYkJ7XlQ;^+4b=<|c1E)7NSq<+B0ErT*XLO<%wpf> z>AHvR2!LgSPXLG41rv3ek*U^hSRvrZc@IDi_ij%-Gg~Hc_K}?Y3d!RlzX!DAlQu#G zax;D*&u^RIoqjR9qW&H3f(BE$TkW$4^L$yJknhk=dfrsa}`LE#H47H(*wlxnVGJAm^cE&UF z^ntic(@TrLJmMMkg>t1}?y*0ZvCd5cPj5Y?@6~%3vXr+oJ>Z$WE#b3H-HG74;fdWa zMDN0Nvn#!KHydz{%DzshAhyimIMNt`KYcJkD<8-v%fs!*LEjkogtnM<9~b=dOF@nQ ztlGQ>{^8BhcDi46tbO8T$LQ$nv=_>%&N(E}%wSu8JgKk!=p+S_xLTWNV1?RrFJ85P z-!gKaX@5tgVUv-!o)-NHrWZN9nu>T|BQ@@kh3|d_6&sjRS5azpw@dVG&IK=J>BBFs zxzGzuUW7YvCCGYNpe<`t9fP~3wA>SECr#t`wNk(0qhyjRB|KWGYS#E-{tigce5axc z&~++EbDS1F{4O%mF$1{c5?K6TRlhC7mA+coz5KyQ%znAo;}EGk%9FCA+)81{#mrLK zdzNTnCbO^a!b@GuOZW(T|6$n?j<$o1&SR0|W?-39xwen)2Rl9FCoKKl)cCtnL!6c; z%EZ);m8}Ea(_OHKEl$|Sa?D#C7V;{;C6BldC5Hk zr~DOzUg!l!D_Yi#S+9m>!yH&u1K$%xO85*jNMuV!I@P2#QM{i6l~VTeK7}Xaz-0$g z-zk5NL+%k@n^79EvD=Ij&I00CMxiIG!qC=xj@9V_s|=zNenVx8Z5_sACC;?X#50t7 zx-6UUFpWsw0Hp)P7`g*Fa1lb!Nytp9p&P7KwbsvAwvyFzFe6e(Zuwa9${jCR`!A^H z`-ScEqrwUVgO?ojl@B!gScMWzG`)QZz`__YdDPrPgm9)-^zz(77)yfGXHD^yhq?sJ zCS4g++*<{;lSzkTg{YfZA2^@p9>{~6hQBmVH>sR`sK1kpP?G^qykHFK7ov?+gqXMobSI4Iy>2XuBq4xNt`5Z_5$*bh=p5Tt_xD;;s z^PDonR4dP_{kv;N!D^7rrNZSK2lxuBhcU-hFT;#Q`|(Gr+5Wl5eZk{-`<67lY%TPe z-z#nD=`Os|L`>nCdETOS)6n-44rw|m>mH%CNj#=WmTG(!g|eddV(uA^Y|34giwquJ z1wni6IUyY8fv(FeB3-B_B_T{B3|cCQvJR204rY0SEAJXDC>3zV+V2{s?|?Pi-1ZR< zYKMxd^rkRZBY_Ru4#BdiX7@dNahqk|w&IuM*v&I&U_DT@UenFzUrW{RXuc{imE_2{ zq0AYTvB}iP2wdcT4v4n9luj69?lu+tG8&e-at5dsqaU?u^t=)q318&gmF1~1wBvut z>0U{ew%p}oSkNF5qP@n45SEt-v-&l+zmB+ve;}3WuXNX78YE<-_NVK2@e~w&s8Qcq z>lCo<+76cD(8+?xik+$WZOH~Lae^_&KTdEVG_d0Z*r|XIzT2Z&?O5D|X7c8r?*iwC zGro~LIw2lKY$j2Cr#|yih(;hl^&}tTt+-`XG#hjDrtf&ajVd`lZ-r^2%|Wf}DDZXk zY_XMOV4^WnavR^&y=Ll0nFj*){aj44D?mF6qzuk7KWqy(L40rTGcvzq7Yf@(m#ZWkDSwE3vuVCmmT?DXNhp!!2DH*geI<&rN$hr32+AwA5*KQLu0 z=kuEYi*KOc6;=(Xbqr@ML7XJNt#SKl^nTkUjRefWkVA(%iIl0f^FvH|R37=Y)|?pT zj2}VL@4%927IG>vJ!Xy;h-mSyoTi)3vg}&1D`#!*orvQ$Mm1)RWCXnmqHDN{4PCYg zGY=e}2C9_aZ-<=EyW!ghOvNuOxQi$Hr>_b+xiycR+%pSMOoH;y)%eJfwkwLvxRwwG z*&e%9SokiY_PHsnOy1|y;jWAPu*Lnbj3Z}(%|9qngV;-IrA4@~T%{qL9Q!WR!MQIv zI9Q8b7z%d$Ui>)->L3_+D7f99bfMiH-hhfP^3%T_wKS7P!WA<{V#hg+>W8}PNYAa#tv}GpQADESBCDrPAfcHbioR*IPKzq05x&AK{pi*;4ZyA0y1;|E z6E7WN5UH)VqP@45SitG&b*8g$;Dx!dAVKivGwN0I;0KWTZpgJ5#UWLNC)W zRW4kb(w5SUtZ@QT@|wg|ZGySR`Y`R{sA?8X-5RDR7nsqbxW-T|<5g-~H}`Wu%l?XJ zF=pW-`)*@rbJ7>>uq1HcP?w)G%$hDIO~{ji(HzQY zw=)}hs0T!N1j)y2-2CBI+X%*~djs`b!dC4db2^voG*6B9^~V(J`=uP=cBQ*DX)TH$ zn1Le4yaC;*6a?sDjT_P_B4pbo{RHc`iQ(5YnNBg%4jwM+TQ-!jLh9UeyvUloxE@l9 z-vjEW3n^uEskowu?+S~<+J(QVlXqJ>JwG-)r-J;CsQokS{=cF2q!oYLw)+2~_HX{V zt@Q^LhyM%zg1Y}ZDvtP1J?!1H?PnzaALUQ~XC(a}1@`H=Khi(AzoGUpc>m;I^w8)3 z$!B&v=*r;Gm!gY5$=1FBjUl0E7?PL!IA| z^3KZPq(VmKJ1Jwcg2-OK{S5QwwLV$w)^zko>ix8z2t-ad55#3|y$zdoTueI!I(HOf z-}lC#M}K}IbO!-;_u;(Ci4IC=%C;GH6J-O&KW{ECxw{8Q9!GXRjZF!xWeacMFqqE} z!Ql=;9Rnm+##~Jns(E>qDdkK$5VtCHmIAhIvJ2s|T2cf~+MKGi6d%tlcrKeg<5s`!OS8@5?n z@J7inlkN^bZm8nmIR7Ag)ytPMfcwMr*Xz7yT?TeFhfUE>Q1DsTkuMk5>$Fa|MDLK# zm@8N1G&D+DMqDgyu8fXfc+BS~K z5IM+nFJcE`?h%MAV_7%(tE_#utcW`A05IA7Mrn3K+x3twUB+LTc>r3$^bpiwTnH5U zn^3C~n)%xkE!cgazBXw{ZE6VqeD722`|4t+S^N%P7dXvTOH}hCmG|)dI0>Hx+}(Bk zm;?5&iNrJqOr5pXE6~EwPqbD?*I`N^6S$w_(u%Vm&+g0f9>E^K72rz6%K$gm^53?!_O6j0Y*=)l$N}GS&n%heIPxWK|zK z@|JvVSPU`lc%mS4{)^cOLPA0Nd}mK56*uw2&FKkTEZ?(dpgQa%ek7c-Kp_OVcyWTS zgIn8bj#KWpc9}p!cG8ZUiIyUUPr)HB_`v?Bbj3piz{qz3pW+(jhX)w=`V1LkTz z_Jz@ObVZv-W&HD4p&SnIf`jxondx669`_(6as7wQgXS9dnQOG}sSLekk3!&nU(Km1eSdN8+m@Ls2b?JNqLzqJ|dlKEd;I9>a>=74&YUnd|>8}^W{chS^ zPQLF59NDBzdV~h_%w5TY+kf7RamtqEu1Gc+{kZH&1Iicls>i6)@*5U65NJkUasjRT zh@9@Ftj5|q0@n|m8K(RyPWu`hQ#}Rpip4jbf!`UF2#k_DH&{U7NP5m`hwm-2ZvACo z(+wH{mcu|-W!6J3-M!HBCMH5K zGxlvZ#d+0BBZqH4`*NATH}nU0Ej9;Tbl(jvR2VZw9ku+H7%W%!8>p5!OK4qW2P z-s8bj32`Sdf*%_XNlNmhXah)aTv;v>Kzy-Ri<^WGbuVt}qIj>2pf~un0)fRs&bHi8|uB zl`Y>KBUKpkl(@hdNfdaTzJAq+}Wwo4+~Y;5CSrQPZ1B)%*~)=`5KisV#a`Csw`c zU!rI?C_;TR&8ffXK$58Tf#vuJ0a<-tw88D|s2$Uj&GMzd#ME;nj6yK#-yv=wE|s>% zOgMb$YgEXRT7ARK&H+o1)bYWjE)8KM|wzqYrcg=-= z45-mdx60xZNn>wrr_hlQ&ty+zTnz5)rtfI+xc-t1z0?e*+Den{+(XW}Q>!_AiwOSi z zF$6MJCT%7)t(HlgrQlW>v)F|ha|ApRF+o)3R3pmv2m;)6W;l#*q!+VbiqgIPlZr-d zdtoxYCf1K@dhr_^atbNCm2w@iZ!R>$%L))aDo2A_r~TUtC%5w_{Dj7Ut%ut%=faUI*9Lpr%(S%*@WZ~10cZ1)K+WXk@YIS9M{$6#E?&PEy~ZALIP8ObU%Nvy$;E+n$$lXJf7eW1o;VlM?C zlH~pOuF516x6%)?oBHE>d3^nOMIIl+bSXa4s5iYWyyed-3D6rxJ@FDK5iT<%=B}ed`Cnz_%2ZMhb;e7 zpV~I~0AG3Q=kVG}OyiYV2cwR=hDzg5LSR_i6XT*~xICW$uAY>pUe_L2F%YT)Nm;mz zf8$M0?mUGw7R~w|+^W(Y~Z6Tbu9&I%?$&LoiZ>_OS=JK>=w2hz5e#p|Pho4`` zls7HG|FS-9OmgJD;1ev)@Ln;yK=5!$wU>Cp8MtZx5kF}up~YbAJA-?NQl)%DWDz%m zdAs#WLf>stxygI+&Bbr!vxA>>cR%bFN>ufQ(v_;@rL@l3vL}WmETPb}XS7Xev>jbD zFbnq83Aj0Wd|W9BUplU~#p_+CPEFCoz`QCmL94j&|3Z0!s3+C>YLR0dHi7kR8;#(b zR#Lu8wEAK`hB^A`2ut)>$3M$%9%AVQ&VQgd_&|32iVk{;JQYv$3PQ~JQ#1>IMpBJTUU&F$QP zZ9oB1eiO#Z#b0^l-Zun|)JZz|Pud?kx!uqXflby#qI*LFG3$*}%)I5lUEtmFD?MQ^ z$*k<2lPsVc_38`1R=01c1S9hh{EScx3>qZiKJ3>WrjkS zz%V$h3AVG-ZVk4vPIhdYtA&Ls1f%V`v>Ze^R9!!onfU40^iqj@^cu4EcC4SGaqEf? zA)`XLYnWJ>|Giqv2w10h_IhxmlFXm#XO#z)uSgXF9ADKFH)wxB1H~8Ph>Xv^kV^Rk z!=`VWf+Il{Y6@cfd^LCu<{KwsH}uiFq%P!Zj;vB?$rbZ-p4s3pQlnE-P=^>?FH1|= zj~N1EF}9^kjk%&Q>tqV2qu)wvb2HL#h)jz@GfpBeZvd;FIRe=l=PMW6d50o!C4yoLUO#=4iBj^gPmh zJ%?7Y&l7}w@Svks*k6)`qew`eVd>7OZ=$7Jtk-&bhF3vwL2ro2Q&>XE)vChlXK2~V zYVXczej)h&5uWJ;b**uC5U-JkL4hEIQ8J3U-72Oj)^YR9hK0LQvqkx zQ%>W;s<5F_X^jL+j`sUandu`8uKT-8rzy3LG>^=dY*~lJ3lthwKis7+5_9Zxc~xV- zgyz-1>pKjsf5GfjHHQ=8-#1ipxBvbKLS{TO8?r6u)$7{+a&T5??LIbPJjX^&m&f{G ztW}n%XgSmt^3%DTvK%yfCp|kpJwk7uf<)eQ-E6qYpC>}kj^vI5OE!T7wOV-wbIH5v zhF~y5jnZgY{^hF)2+*C*!~COE=s6(f@}cLctY4g-A2sXl=+sv6*BaJ+_3d! zL0@9oYk#?`Iip z9I3JNOLFP00WbSn)z6!D#DI4BY~pgug!;UK4xY5OmVM!g(w|i5z?h~b+zB>%4E@kj&ag`9S4P5@Texh5ct%(J*&&MltN7n$&rATV1s=Wm zEzF-Sd4QJAp0yo=VoClBt?gYDggMTaZpwkRx4D};!|+#$r)_#B!~6HIo4obR${dbF zfnuY<+1@_gKODQtJw;gi^%!x+zU}Lu4XT{8aUFzZ+!=k6DuM-G7tX&=)@3L~jgwpc zzMg20tUj0Eg`Hb2SiEOqh2k<}0Iv8w3)Ep>>h-VxDnYJPX>nR{Ojl?yWQ;+BXo^x? z#*T1b^1eT%Cow2lnxnGO;Z700I(+L?OUJ1?y~h6+=U4T&4W1s(F|I#FRr=ZUrj^mj zb9*q8&RH`b+GL*5{qvVq!t=A?+f7rKfo~dSv>J_p1aFa7%@vskRC?d1GO=A z)ND=cX;>kS>43A?ec}aZf|j0kTaCuA3B9&~^&o(#!+xrq0M>WSrqG33yNp)kq%Q~h z2&%wfM>6M&3oZvhdA-&TB8b{v%OtSAO-dd<`HaS=LNJ={K1n~ktW_=TK#9)uof0&VP++*A}GzR16~)gFmLN3iD~;l7J4m7tr(SQi~9 z2}Txrj)rU(;1oBX)rs7mh$I*k*}gS57Y@0Qtx9>9T#8TdwA3xU(XJ;m9V%r7PvkSY z=GxDt7Ep7w%0H^FZ9THgXQF6Nonk_+d|@~>zMLS};-5a7V#uRcX;Q3tm18wcSL;aA zoN1+$wb6*;<0e){2`sYNsf-qAwC?ef zfJVGr(v)Fm`#i^bqP_~Pa=e~TLQ#vHRBSvH0?SGX!xK>oHrTwwQ^{JlMwAh~hS~cc zoAGVWw*pumlb?mzS{h`tjY_7~d#%5P?1MlC(P=}k*nXCkzyc{<9q@5U=$}KYJvwR+-dZWZ=N}dY1la$NWg(WH4{O z8s@<;j#DTEz0~y)l)o0%p5DUm*s=V$dN7xy#7(-(zd4iHXl;d*0s^DrmF-33wtH3N z+|m9vX&T2T!W!Y|S9NGTMB0=G%WfI(YR>>XQ|3(rcP+~v)w363Dod=b$EubSkVO3} zYMm%+?Tj29zSXp^;G6F(Jy91suG<>(h$nMb7cF^_Ud3?`VrO+@DD7~hE-{tO_Z%E! zJ5`&Yae09T{Jnv;8t!NxD)4OJMPnKTi>3w%n`DtF6EFF5^oP}MY9kJn>XPx5L4I1N z0GB`=(D4UgLE`txyDNd8iUGxHi+Abt+5CmvNtXh=VYLlWcCKQb#g=|%@WEFa@J;9K zSb^K|lcnx@G>2{qVQp3DO<0WtiR@cp-8)U9=n(RqUR+pgB$Q*EHf38owhF%Uuf(H+ zt#*?RBmS8M{Ayq%Ek2t2Ap1Ss0dv))bQJQ?{`qUZU?_X1MgGihJ%fv^M&JBriTVm9 zvMq^?j*a7Uc-?Koa;3PIUE${k=O<=oNCIE_l(`4Yq7mwge$Hh;g|4D##E`gwHTA!bC<7zAN~U2-cmp359HJ zw9#`F^5KvDISYIWSHr;C3#|63{PM4yRdf`xZ5^kaaJ|XQs15_E5{Ha*Ez`d)J+n5H zjEW~458m7>H;Rtf3ewv~5Oh`$eA{u>;!Q%oBQg~ua=P209JGSOck{4**#G=QT=O_H z;JxMb8p z-Bn}p-3M~jRmg4_dJ z-B#E)G6wlBd9;ng`T0ZHen2vrZ!|R$4k^JvNI0g+rsoZ0C-)v_&|0v0U$tV7d&q$v zmFA;Dp+II#JFDjnpZr48lU3~_x;0gwBQ!Hjh7R<;rcydp-sd6JZW z_Yr3NRWhtYN)Dx(XU2TH;F}cQ&ChR^i4+e|eB!9*PnZFn1FzF&e~blw>@two2cyfi z9#A0r^fUThT0kihpA2kDDopAaJL$0WZmw>4d@haK-v`hN#?W z%;GQ!W!$imm6;rmExhHcz6b6}T5xc%{93}zce1wr8q_b#X!SYAfixat;q>au$klPz z4NcRBWx}Hsx7dkNBLk4*4KGTqWy|iz`u2r2n4&1%^C@csex-o|L2}*Jlp;k7^N*(d zL{UraFjczakYDO2UZxrJG%#17JH+TE4d8vZWvgdJAw5NF*&ygyon1%=D1ZK zM}_8BIV5tbD5%55&&N^TA@{pmAO*~YWzyPUGa?|^=BjSVUYDe1lYXnZ=Smrsm!1(L z(N0bo=3}_^Q#t2HLCC?=;_SL%3m>ErDo=K)%kTx?DH($Ot@6ux;{j5!S zspA!L;u*E09z|qv6RDmF1sH4|?M1K3i4}SRfoIwTL`#dy$^6wkd~+-HuZvw;n!`*x zUJLQRtp5ZEpBI4JcPT#VN_+BpZ#0?k!5mTM;#yA*L9=sTSb>fXEAFh_!$i%RqV7SZ>UpB zgCe;y9^+*}$HixW#w9_Zcz!cd=o6ZWPs`$M1n9}LIEcLkK z*QQR)9s>Ny7SZ<&9#_2@Ewx-CS4{`H2XqUEQ`j(ybhz2TiCXBxY#HyP%)eHJMg?)CVR z$GU^Lm)NC!V)Vs=FbuS$iF;1|QmjT>B~2=ChlYiRPtYo{8J6_sif?u$MLk98bqg64 z@#5=u_h-M2*ZdHC&lBscTYnyrnL9PZ@W77N$8t@8U%z94 z_BBA-{dT&+xffoxr(`9|I(qPg{y>n$+v}s)8Ah$x4SbX9iql2x&AfLiOQe^J&55<$ z)$V3i6H5w)Xd_oLodd!Eom*KRqJTxAOY-rBhB7;GQjeA>M|Pi8KE;{KRsW^RVg#JQ zcz(=L>?5|!(b*>gw7TFGK~6TF=~pkV(yo!16`uYG8&Dz*OtI;|=jDGUpjEW~`02LJ#aj}(O&kxO)&kxNr5DZBa0$*I~ zLY9qEMGda@vM--UXM3|DNktHpRE%nGhw{%m`v^KBEJWQ*tR6%P<08cjguTr^kv?|6 zYsFo`l(b)kPKf9xb^LsP=MUG?AAg&7>WCSAR2h`PImMahae5!~7+X#DTZ^Otb}J6* z%THxzelayH1f17VHyv{Wsz)>Z;_?CXbr;fx474Rp4PSoLJ&N8to=&$pKlCh?`{DJC zy2*U{4)W67ywhnB9qIUXTt;ypE50a?njdi~@_8k}MgSB|0ZdqCGx&szAkcIGel5TN zISWg|r+jGBf1+xsNKYJ1wmn-Qa5#AEZLm0XDp+qv54MTSxzO&t9A2O4WPf^aJ4(da zSjO_aq=e~O<2o{(d$r)tB(tfv%!OaUd58`~ z0;j_YVX3;YXjbrVI$K#)+7W_zQS!8Mex_5}?X95uB!F z`no&ux+!W=3yZD2w!JXdF!WB9DKtsUw@faTgmxVQJq5(@kr4J*(0 z&<{?00gSJ=>)ND&xGjo>hE`QS;Gv48LBy|IpEkJ6Dr$|IW$kYOT5?^9Zvecc9pKts zT&RS~I{5hS8}s#C2J&03bZM+X_nc;s{;+wiL0zxmXyY*^h`C0E9CV!DS~_tF115+? zpZK$hcK>njZGpb>kNv=mGDw{NRaa>9v1#zUTsA-EFV!3(6*zFsmHmTQuZYnm+L|0OP#3? zl2d(g{e7nfDJttR31*`v$oR9-}LT1+8%wKar&NFv{L=kg4MiIs)A3uVUaKM zjfQ%IfE}E_JQZR0OsN*UQ)|*(^>zKRIL^@em1m7CDrOyNc`BNVimjM|YpisYAtFsl zNdnzIX1YJsOgU*?vP}KeR<(&nHHfyvRRb=1KIp5zEjUS)%T`_1QtJS4v^8bUp;QeHVbNMjj(~+{m0lrGb&$4ZjYUe2b2x*#a7)HXtNRE$XHYGC{ zm6PmxMH0GI-9^HDWFq-2*7Qk;m2Idh?*~_2z|`Jn$EQDUnLveauUx1o&uqUp3l08c zcoFTV`)0%bIxm$cy)ec|=PU@jIa@xMqN6idD6rF%=}4yeA}xv6`l$zqqo@T+IAAh& zzukA`U9Tswq>S*LeUhQb_8RNBY@MCMy&Yk-Qdy{Y5`2_$*6O~YN4$de16tnoI7M6m zGh4TV0fLUO9d$Py@NnBZZ7U6YktmP~iL->?ra_fA?jwIlRI>=pOio?u;1*`M}Y{OC0To0AK~9R$t{s>>}px%wS7 zuGg;-J{TkH16~(TwgMk#Z`}JM9f8Bn?s(*4%ePI6Ucey&H-mP1HQjX?%d`xN?OU^V z+YdUoqldSdKR$8aZd4UaZBf-S3Q7APP_y^bICMFFP2i&(0UCqMSW{_D6QZ>n8g7=)7sgQU&}$FIiR*&mmCuvXJ)W%zE#mU-nIX8o`RQig%X0x>q%v z_{+6xzJes_Z=b5BP+eF35#+B(vtEI-_pt@l$O>!*S|MA_7g@k%UaEIcoUJ#~I>l}^ zNxY1$9c|sW6&jF{6vQMg#-&?Ta0anR;BIyX9Dsfl{<6ybCyOwC&qTB%nBU?EC)CsF zQ6Tn`uc?v$_*?W7X?~l^5Tqf$y};GQ3-ddPYR}4w+S^8#_AgNux`uAgX`%e>7_1{I z#!WXlVeV!K-e!Q1t%&pSbn2fTl6CiE!7yA)&7=YnPyLNHvY+hO58BPo-lQ?}6RoN~ zCmQhC03Eq_0;^oRtL~Q&`@JB$H?wG9OQ+TJ__YNywvD>$zB$3*5G5HZ`nIdUls!D_ z&y)1(i--;U9__nUf>u%^y;`2CmTjshr4c>Lx}@6mjmuisQ^)u5{UU{&dy(x1HJ1W{ zsn>3yJIf=@EuoH`Dz2%P3(`l|cbS|KBh@z={7M%AZHD06qfNuPD^ec1s>dUa`{uq5 zgIlkJnM=+wkxCT(opufAVU`n#nrWwqlAk%%`T{iQdkbw}ZJ(SBIja(_hwrpf(Wfpl zQ2|-1Ctm)Gj;l(r9oBq=5|&Vs!;=RL@;HLo6;&F{t@r{Ig6}txF#Bq#0AEN+j+nl@ z3scR-SMV!B!i`eNnYI2kZ1?x%_CM)0--F(^a8C$@($LM}ihinb@eb0D>WxZ!cgw$+ zYy-A-FWEYRiB2UP^HB{M6HtC#4D4?OsM@1+vm#DV+L;$IvA;FK@N%5g$ixB(R87VO zVi_7eAilQth`+G?b){_z(o%E3lv`fNsJ95?c$ zRDL8n01^-Je)7Y7cj`h?)MHWR_HmuB5mHHYtd^RKqmOd&Sf~Gu$nMb?rhI4@_YMFL zA5J++1WjhsfkD4Un!$^S#t@oen-*P?RKsa+2I_wVcf4B0XGQ9S zkA%)-$p~Iw(W3WdwSCwdE6FRz#`3%gvB;>1#%~P#n7c-5UUe(5ApC>9zO?Q80o$Fa zGPIP9VV&E^QAjf~P~9Zdp>%dh)x&MClWK@zvZvBi@Z(MMm``Nc-u>5FsZhGyZ?2->dGECpXI&s)Jh|zCX6XUAQk~)88dBd09 ztE`S^?wZ`LJVdALt|4Bu=aH=1zPx;Ym{()4bHG)&OwRPPcQxLV*CA%fI6jWbcIm4M zE)*dr6UwR;@AGELOEP=goxg6b(C=~mlC(zk_hqhYxr${w|BtV;42tX7yEPCb1P>NG zxVyV0xVyW%y9EvI?h@SH-QC@7aMwZRlK;8qo>OnVb@vzcs%C1Ys(bcy_w!q;dmw~~ z{hH`ynILvSrxBzE!UcpJ34tRrCmSv${byN zoJ7Jvjm2K}uaodgd2uB>+`WAs(%rx*MnLl%+SSKY32ya~UGC+wf8!J21k97b_Nb!> z6s!@qfgQZNkp|jSUCNqz!uDv!(9d!_+@4R3_PXx`u6B8>dbU&vBc$9%zc#YFDpu3l zk0!&s?##Re0n@%C=-^E?`}l6YVDfj|(W!4I3s72@6lvxIE|zYl&=z0i7h)&fFQV&J zz!D>w;JaIyp^>bZgt zq45oOh&s-Aip+-|!f3GMTOvyVdd0g}w{taztab++MFa~IhaJeE=u12eGFMG+%TU{p zqE5pVhK?x5`eW=4+Un8PFNvRuTGg>mS!a~;#@#%ct=}IxMIQA5Tcw+PR8?Or49yl* z$Qja&(>7p&*5a2cu=>w4Z#U{J4lH;d0IHRHv~x^$(&h|mxTbiS4IwLznMGUGKve?a zG62V~>*SRrlmn%^JpiG)eBCAKA&#;~qI1@3o2SR&DAD0&W__#H3)nQynV2X-!p$Yy zw4P%si{kfy{q~oPy}wTZ>W={suAe6`PJ(lk*4-y`LilG}D;o&G z@EO8dxRKsnvnV@OX$6L2>2_vRq;w8Xvs_j-sP8sa^R2(%f?jDOBKx0zS#VdcH9XEc zzZ;%TRkm(&!q*RRzo5>=)4t7wmwUj0)TJfPkPzBCrraEmnvWx2!rmRL>n^Vwx+FMR za6YB5h1uvOU0sL&lzK zZ&_ErT2;vw+*@2DouBV%t(zVe&qa|7(MdQ87uBw%vsN_Kj6au)8x6>O{}UEY2^QT5 zN_y(^%=jp_wI`IG;L=6Q4ecxm?X;lmEJ@{=E4V4@I=FLkwt57efbhCBA07y};nE&9 z+P<#qu$T8_=Aaa)g2Jq(PVc2zPYI+hr-}iu@ZPG*n)?-!F;l~La_&|1ao`BIl z-ejE(JGWPEuXyW#M$qtY(ubZQex{wn4NFZxPidc3xcrWEqb0M^M;|~K%%6ja)hidpe6}+hJjFIzwCCLwOVYX;IMpm zd7w@h<=X?vWueoVtg>Q+_aKH;@Ixa`(APw=fwsIn zTCrXda8CJB!&jaQ1X_^g7GUY@J| z{FP$MM0S<2U932+WQV2C(B^i7%fR{*Scsh@aJi0`fvFZqGJ!S~_m@mHHM_TyB+JX; zj?J3f!sbImhxNA}dWaQQlTbTa>qx7nG%uQFHa^NBbX|-{*W>m@+Dapjubaih{S9Ol z*2bC?nfz*X<<_1&W#4}OqT?LM8^LMn23AA+Emh>PM|_hAWYb#I`MP2gpEd$B{*tgl zS)EeQf)>P;dg@0Ug9?U<**vneUtoIy`wi7Pg1ZJ%uMOI$6sh|VV7tlmxf78i0H){4i}S-Q9kd!`Quy7^sc2~6 z$`PpJzPIXe8lvi9Jfk`3g5hBH8wrR1bJ$LNE$2owYIdc}BgI?v^xc81(FB((s@`j)E45zK zB~3V|5sACYKE0s6u*~o`M{#+u^AX+Cvf2=4~*=^JaY;z*dp*Y!RL3o#bfA;E)Lk;>Lc=hkevue= zkD-%$!SM@sh|0d;F9f53Olc!njWi4 zM=wH;DcyG6RtLzmmpMg4N9z{t63uBOzpb}2GZ>+IwPT(Wg&O-aP3!y`*P(Zee={W2 z9k69hZ>(au!8a;eW3|=Wu+?2(mzUJ6W*PM0y^PH60jQ`Fa34Wu2gGgh)bZ!PPmmFk{jEjLZ$Pl;y?zHuJI+L{FoVijtsA4y|HT zfBy&F_cy^#A<(AbFkJ{-?3AXhX1z%iQ7)MNL)LJdpn zQvWoG$x65YEY%$0ROv>$2uEkJpoUDy_@V7N>zA~UkA5DMCeqzHs)yC2cm|63v8N1P zCY;)|$yvF?1eBR&brYi62}c$p!q)c7dvXk?e?O9uYk<3Je$9r#f%p%et+4%4{@P0*1 zpR93~>?4OpM?X%1xw|NMAGo7eaEF1?PRDOmz9!9E&jh=*|wDtX?mfO=HEVaQ< zsR2$&OeDF_9d%7z3q?&Frz%U-3}!34sDz|#w+qr?J>*Gd4p?>RtJX#4t3aSbipf?w z!5^iEIa<5>aZKrOXiLtG0!x3i}b}g_a&F;iQQ5yV>a{f0DO^(ReU()V62~R zZl5)pRV%@u_zDTNR)jzMbbZ0`vQ){jlxbFnaM1>D)XSuLDAUtg zv(=wKppmvh2+o31r`4SC68(iXFOl!1MDKbA^)st%t|TNNQdRmn4mWz#EfG00b?P;} zt9em){{nU^V=I1%_IC`Th#TK_n3^qUWxbA*SCb%YI!ZcZR>kfffbX@zO$A*ym_&Ai%jSvS)3wp?D=R?yc{|%< z%7Gsb$^s`k!@Aoy_jZXtf0h{|I;INXS$WJ*t#O|F;zUBwydspO5OwKI)V-ESu{AM;jb0H_;X zJ^jLoujJYGrRgPp{53?bf@1kK>ZM;;qb(V~+;ct4ws5jT7A)Sq&f`7Opx$HbvDoxH z=gLn0-Xi(i5t*~?#MU9NTk}bB$DDw?Vk8o>ViS>liad$yw{%)Cb)#&v=thu7tR{i$kI|yVooiqZj$#-kjsSK2ZwQ)NBQ)3 zfxz=I1R@_IKPoN!F#B}emO^h!ZJ3?qbb1TxKl8d9!VqG`mzAFq3 z9j!>CQF1vb|B^Y-S4KS|Y4EgjFbk~$kKWQkF^GH{VzPzwu@|5&c-i{?)tj{wA(I*! zljNEfxC`1p!Bz5$Obj6>YGJ4L6(;){f++6_y6E0MJ^{Dj-5`U=K{(r4zo6Yo0er-6 z$f)sX4#8Yo_3*IdM=>p7I|m!*XR4%hxfVJE-0zBIjB8D#QLjAdf^84CPEA&TJzAf? zSa4@L&^h)X(Ij)n=kgs4F1yydW;x8)-|x69`j+q9a*!meI=JBT;B-Z>>&kjeO5n&^c9;Jo;vk+ zwumrRkke3#PvJPUb^pTq1u1xz9%t|EBew?A2_4v5HgQ`_aj4m;dI5XA=}tBLlM4g3 zvAxZ6wW0&$HatHNg6zxPdRxYGCVuLDHr(r3_DbM?g=<|ksMb0W7BFw;*$@wHTX>(< zB)g2}+Rn5>t6kaDRk_9{X7W<+WwUTx*Ev}9tKlh@#)TBy702<;H6{P5IqlEbl1CFk z|GWjP9fxMc{X|>m3fCRJRbg(Zf2JGObIf*B3#r|SSn>}lMR!@R+vQd&%?d@o{$0EO z>xD{u%r@@<= z)YN?BGPIe)i+psO^!`N9`@EP744D_pKxHsJLdX3wj1Bt3#t)> z-YEVA;it9(6X4P7NWv?(k45F^OH{{c`yas{J;re*UXh%E5emrpDE>m|*b=Y14x}*> z;t>fiZ4#5bm;;5SbQS}=>>j297Y$`7D|!*_}}+HfL| zJxN^mWnKH47$Iyg27e?vOog%Bk=+s~ynQhF1d3C?3x3Sg>t;UUjD_l(;LPiIxOMxE z7Gfolw!-DkPE=O@`UE)hCxrZZL)<-@^W$LnLK>9?Xs;}G+=t->k)%XvjeMkH5^jjA zC$n4kCo8A$ul)pVHVyeY4<&JilOjWWF%6`*n^sM)z9`K(o!O5zrru`Jfl+U}g%&9t zUBXvdpf09Y%PEi2o0<5hqAdB%h0C{_5NA{V;mzyHyVnbmWzaS#`_r}$jQ+zyCxP}@ zrt8L3L|3JglA>M113+2R6G5EkSH(IG#{Hf)!2DAKP1!dr-&B@$^kbD|k3$^PLGPGb z(Rc=Av}omL)D~t zpP8np#bhpD;rW(2J@>vA|1!j^dTSjGSYQIyp=sk*mUW><&`!jFBijXlct^OxVS z#ax~qt5(!TIr4fDL?o4YDqyRDyZvz=jU}JHr{%HvMMkf*(fkxfZcWC6$>aQ3EtyVo zp`6Q9V}(0_#3XBW(tA&({?^p#?IG@{Gd9AhDO0e9e|}==eP@DE*FT}}tU`{obI0p+ zZ87-_$4$E_gE@$CYlD6No7H(I~+;b#Pl`CvWz zo>bT!agDKdqWAmT!Q2v19R5+c`Qi53fI-PwZY@9bM~hIlOeh^}?~kDw&L0dT!lD>C zJuy5cn~2qJtyXbX)1~`=+IOrP%@{X(Yo<#llPy&#JP>7us`T>b-M#DZU~vdNl<6Lf zOQj1fRi7T*&~Kw;35JHY*b|4Y@75Y^$oL=>PDYQYy0BY9-90K5d77i0QziY{7~`t> zamp|_o7bb-v=uE%mlaeH^wczUw#~P=COo!~P-}-_9W)MPr5$0#-%}$B@N}@8N?hO` z8nea^hV>UlEg}p*{A_#o%po89${+mte`E9?p!)yE=nXpm#i;*RM*sQawU_^4*#9tZ z#sAVD0rKEG|D``}AwGOV|Gxf)-h*%bTm9dV`v1xNY5uGIFY`bDFa2=~zWpElm-&Mq z{zw1)Jm44q(f`y0@cl>s)BfM~_z(L$gxmjUpZJf)H~1H$M~j>ZFv(diVZ+9T#I?xe z5kGt}TssxzKCPq=MT>G(9=6#IAdVCsp_;RVR|yqLtZmaeERE;>Yf3K)ONx~2+VAqxvOQzZyO}hxKenYTO#lL zUURNdw)<`EU0P#nkGR8gju!}nSprSw+4X#%YK?c;?h(_S*uKS4S24Ef`R7HN2N%Rx zYpJH)ol$o&FRgbm$B5v@u5CD4Uwx((Lp1%cRr@0B2<%jwmk$t_K75!s0};k3e(~2-pOnx=<1bLHHZc_YOhSf_-aHKaf!FRKV%ap`=M2kttKr=kUoDWy&w1ffLNp z?^4d4C-bAU<-_QT_F;6j_!Y1_MJYq3e}2w#Udh$PWIU6;IU)7wPr!u)F>5RbQRn`1 zGQ6YUXvc~0{wHM44u8Tmr_3mtqJSpuF#hg2M;{;d%dN?+gJR~ZLjz5}2rB`zDYKcl zZANcL(7P-fKZYyjz^#tu8TS;P4?nu4WC3ap&7jf4ek~ZQfm8xxvSv=&>&oVF)<7s zYCUD{m!q|81C%$pvT0(03SJc7=Qz$$lzz@?xw9Rf} zV%j74KJhFJV)bp(ta_ii+C9sDdFJ}#Jus(^pPkNQs^YuT-kbw=a(_3iR7vb zE$7$Q&$b-XIY5<|IW7Il8v5|pK&BheK@M+B9jZqyEy}em_>^0PZ=fQuDV-08=eFFq z{?=A$A##gn7LRnq!R?lHe6QAVmVE;cV>J~gi)vl9&SlfkCM?4%`5n2 zN16Q)rbp0EmD^90(y1D_0)eiJBCXDGPJU&mI1~m8%)P?Y=tlb1#&!Nv?uqrQVN#UKLG zA=P*IW?%?``=(cs6sK>x(|(soN-G~`@;$klQKq3&fPoQ;hF{BZ@fJsLzDuA>H_JMaOoT69yuorG9xM@N2tG2;(X&*Fj+a8ug)m>#Az%_IHfVGEc0_BMmieB+cCdT{$!19rb(LrkwBkrwJ4P!DzBOV zU-L{d(EMjyG{~3}ehcsS97~W^aOOozunc9A*L}l@mGO1kn4{G2@5L~FCBMElt~>Lw z+518&%zI~6s`B15wRPL7SFcP2vu(UO;mm$8?%MvV35lF9?3}b{gLAU931SQ`Sg83ZnhTtDBy4%8gV9@ z_)H?mcWfJXHoB||2ToAV!Imqj@qfH~yw=v$9DrwU({!T)MkyG3~LuBi|Un zKYP}q7MH_o>l_oNsa2c0k%BC8JH-u{bJ$pNHJ4~#OH<1Y zndgz&Y5=|D&lc{yn__Jo@)|GOEVcuHsPo2l$a`E4#p_b(<70sUpX0%K`N)FXp25Nj zp!$tjeOa2f={Bp7Z$7>BcO7J3c@Zc2@*Xef=X&@i+_#aakV2w<6B!iqkYx`Gi*{p( z2br>AlzfLQi~)cBv0E#cxgfbc##YDD=SiG4GoJ9-7w@V_i;bO^o&^BM7RM>>WQWo2 zwYqX-p=QjIsYAx2fv)!0sX)_uoh*IoNrKl8`rbRVX2ez_WnB1k=q=M-Y8^$zpr0Pn zkvPF`yq$U-sKnvKJ#|9n9c60k9>CSeo4ry7r)Aqw5L~d7Y${$@PpCJB(RJ~sJX!?* zYE{LE^#yp^+y%BgZsSj+r7f}=(WG3lBoX5YgYspDKIdqP%3wF^RU9;-6ifCH(sF;T zFpWup{NL?#Kfd@uf6;z1;;*WJ!P3?Ti4J@E-?{3k{`MMrH|?X}6)H{0#GUsk-yxg1 z-4esNk5aQwS-v~!$go5{JYhnua1UybGMa_c?o(i?;n_YA9Nod4WH~n`Fy^s8{W^& z9UePLKRwfH?$=qFk=g3_R-bJfH$2E44@IafR%Itv&267bYgvDVj@W%^_g+}?ZrkJo z5OA}QJ3X{x3r$V)5~E5Y#nB;0TV2hosR@sysP{#A=xcFnX%OZ@d1;k|YO%Sq^Wa5d zAFs$L2RrR1A^Kr$gfNBpjy2XsW5|SE~%@uK=i}tx+wKp0HkTK(#_FhI4n0PJuT~B>w6b&vnR`9(2~G%uLPY zT(CbMk5HAXFMk1W#5Oos0+OvDx%DaO$Ib2mW?%a>0~sA(y4}d0B_WpUV4gEX|3A9DKpgV)s3? zZq6v;^*$6LpFu>yu>G&9-JRS(MS@4_uQOebF?4k&d@B$9U9{tQFH$+_PN_wzfY+^L zW`5qIlBNNUsw)RPlH3Y!oP7H1-!GkN=f2yh$f0O*M`MU`D{-H^#GIHYwpLY^=zVYk zBn7TzUe=Z}VixlFc*ocsFqen5MYY@{^>1NYg^O8A_@4+c=ma0}=gx4}IS;lsT5un#|&aYyB^ff}zL!*5vyEm|6T=ZuK!;L%@ zNHJ<_r<3h3Y$LJk&1Ju>&uub68D3zH&ZV}Sww~Q+?DK|A<+8VBjp`YzUWybjx-U+d z)QN5GXhbxC3^cou{xp;r>7#XAVP(yvX)q-Z@RUieRlt>GnYAw2FEXVZUE1{2uNs|5 zaWgarkK3i`Dw)^V%cphH>)Za$U10v?oX(+=B~doRaxP7KV6wm5vAB7^_fh9`Y@IlM z?5w-6%s-kp5?#unLWO)r&Am^GDRn%6z(rZHc1&mmB}ssmQYGS-52oqM_IKI5=;sbe zNdD3Dk$V*&8p$F9w$u*p_mhi2OX zP5Vt!PMHm-w_Jye-((*zK&zZiXQ6{v6E90DcC1N5OWrFFVwe{kNPjNg`o$EO~bc8zVZ&fi-=`VzB_QWVTevg=%el`CbNBG&8G`Zv_MFK7grzKISa+sakzQyptSA9Zc7`Zt2 zbdoTu&uh9Te8jodgfG?&n~~+}G@)%*`3Z|;0gWe%4LFABu7`?88$Ab(*5@&OS1c9$ zTi5t9D>ftks74_1XwuV-*IVvQCW$9W=S*eQY`$51#1ZV_WDdPC*l;OY-5|=tFHvG^ zZtaXvfE~N-gBC;{v&LrD;u+yn!tk%lHOviLG<8P9tBFpP#$tCb- z@SDhr!=|NHx@ML)evE-J?AX|jA2hd;t@_%BT3E;az(o9ruXCPZP0AChR@Ewx`ts!# zi##*Y`fsQ6{e3Vz1i|E12NLq}YmGm@y7O!+YTb*STu@OpX)aH3F6Mrz`Cb2m1A;KZ3bD_*Dd6 z<(uMEX>!IqHI8r#5he3G6vke;UuZ0%(n3ux8BGF4lA??97Wvsaw1HFi97mu zrabYQHf{K;9>;w^HV?8Eds*WJt*pQFABmT`4361LB4)yc$ zVU1_lw$Z6cQ$y`e;5orXGEGU9|bwhnzKJ`yMF#MPELTpJ5zD`b>^v4%(8drzS*c=m1(osg5d zo(NC06gBYt49HnCSy7V7B-9Kx_`;`| zPQfvy3TJtcVMnccnkjQc{wUP`3*mx*0T2b%a)dDJOM7%W28xn~WgXgRr4wiRfe^ji z=x-@m7qp>t3N<@--e_b@5_BV1BN>VE@OKhm7(}3Ciupn!28ETqR9COS>3|zA(Uv{+ zTBhUWlL%lG-e^{jin3fbLP|rIt1G;5e#@<;Y|cJoL=eEOUMOL<9k1%JzQkSFAi41* zYHwd_Af#~dJxt)|``}c4Pw9QCYEuWA^t#|K+%v-4ZQ#iT8UxYlI5aBz7*{r2ucI%;`@jmJtE!dS!1K(u z^~hIlEnQVdw6lj)P0R4XqP7}Cr2pnD^d1M~S}sGegQj`|3PxvJshZ9I|nJvcs!AtZ&ymc%wpn^yQRt2X$m} z)iQeF1+NKk3#AC)-%IjxI}lWxFFtzt`k!8o*Ux1T`(8Gr?fq8d z%^(9bm)2=6p;S)o2+ot#=cR-84i>q@CJz%N6edUDB8Vq^4WCBY+D&TCmXlxQH}>+@ z?J|=y;6sqP3z9!x)m3jKj@Dji^C<7{e0>Ufl!dc%Pp;W18eSM|+&;2|%sgE1c4R;k^< zRt`(4TC>qp8)VOjhDZtOPyOyl$^q%N3q=Ltq-c}XS8R;#NrOq&+F)197rT?N6n zrk;!cn{Vj)(c2$Yccy+h!bcoSFOcOF+eNBNDDu*mUJVS02HgTYkMjkv1V4~Hi_MoW zf{i+90&(~5oCI7TOTBKn(o5}-V^Cn*(J957SF{3|(r=Klg-yX?1Y}q)cVcioz%m1< z;p}DmDubqVh~r+7ZHJ`*Y+5mRkllbm2GJ-R~rl(bU1BCPnzW>P~= zK=D}~bAwF3z{8AK5Uu;ZKkWU+Dj?(u2oo5|6-iuDQ5=Vj;lo@}%`o`^ktaT(Df&I+ z_~G}dph&{EKe78v?SL)9`%3j?ewAbFZq^p^ZV8IXKX;T8uwZfHtSRO!1a0DEU+DPd z-}f4LOE*a;gn|d57GW7$$%Q2iYJS!`s2tb*BsRveNS&z%QM_%5d6P$SiUY}wIeZ2# zT}qHc==M@rCJl*rh`gB8i6wO|GI1;NX^BR_+Xee}Zd74++F%M~1SrQM8Nwl_(s9VxB35H|uc%fWFXQ9LLSO1dVF!zbklBA# zSVB&Ol2Bzs#cu$3UTEO3n8R_OX}BXg*9t#_@B1Sb69@Gg?JX*kE|ZKYWziU|K@*2x zJ(5~sy=r2{vMB}ihl9ipiUsk(w*QBaax5wc`r|2V(mjf6oDuCr8WR39Sw zVw(&(4B6QWBP|zSPG9pCCZop;*$Cwzv3ZivHIhpj^=-t6Lp3_mrTp;Y-;*0lQZ^$l zu6Yf0V3Xw}L?R6MHO6S-pmv;05)W~k*Cin(SRYVk!r0!Cxk?i?_9LVI`~`d z>(DpPu;J?I{RX2Hm7@7%xxxM+7B@3dy4W>BY&>auzqvY7J2?vuuj*;}IL1G!S@*8T zf>1xisr`G?KXxiqWP&tloTk_|Pwa)&OMvys^O62%@k~-s`%5BI$G{J`_7RPXmUtLs zn5$tXhV1EyL!hw}h5yd#}RT@*Ij$0Ncul1VIg@jk{qJXA991;d&AL@a@g!E@+jj)Hr4z&J;$lrs~D z_z&N;TPb#N5FRMqlA9bQqHn^QYU}u`o-Jx7x69J!oR{B2Mwz3!I9@o}Ldpd`9_kr&v zfEMquVx^E7)a~0BZqxm`^i2eqo$5;9*s# z(D0ch;!EHO!t*|JW_(KZ#+Jwf#ttC1QWvS$q(RnL#NG9|^LfNYuf#TTw+z};o4oSg z#%=1`rIO_=*=ly~Ze*C{tmSl%*1;=7SVxvtUjbDbS-`-7I=;D%6#`gdslwYQEZ@xLW zNl&&N17DWBcfwvL_^XTscj1|ley?ko*0^QT1Mo>VJd%-tNs9Aq1DvV+)|Vj9d~Po8 z*D0Y3g3>9uDd$@1BLFQQh>~upn;v;cWS6x2zMT8N+ z9|Z*8W>8*>xUbN#;^TI@*~GXkwhJJ{c{Fx5_-Ry)zEfSkQSLLWhdopzDd?iWSEO9ATqg6eVvb+n6;|M*@Q6=Z zCq3O-5LY@$7!yLnAQcjhMI#-aiqy54fxdr2(Gbmsi><g#Yi6 z_uwB#$N&5K=l#z9t-!$kdB5BL=zrQD|4V;3=E46*JH$s||G%$){{Mfv{{P+h@fT13 z--I3r(UZTNL-D-{h@RqnuyR(~mU$Z9nWJLz`0E2|Cx1#dOUQ^462<08V^aGC&=b!! z^dS9g{tSO((@^V4tqMBm*HcvG(;OzuGYtSuX@5nKg^Veo6zCs z*_R=m>I}cvg$sA`4sh^f>)5@Zdqc=>^G2#@dJ&zod)1Jxe3NfZ?8Lt?Q@0NcNAotKB1;fLBpXBf@~8%O$I_u+O@`n-ZlSF#EGubg z$_qPO9Nmk{WpbV3BL;rD3ENcH+&;7hSqzsxdy8;OTBZLqF28-Qz*|2;N%X%$OMT!b zEMJG$tX8Yf-For9Rw;Q?HTkuZwAplYS1pLP+M_;65|(2_W zY~P03h+bdzNtNC(c;8~#1tE1o-L?5R?uW;Y=|`9oE81n^)f8Idwh{&WKB6>EVW;CX zQRBXr4BBR^4d7otw?UIoX1NUJV&CV1z)Q_*Re@}ETcyFJ5E56;&J z!Hq`wG{7DDe#P2TqS#nct7U#sdI6qrynbxlNdh;iX*xI0{oTkl{%#_!J<5bmc9`7v zW@ngqOYtIvbccRAUV<*SLh~pVXVR;sRB)IT7$1%b-sLvw%69pFK0vNUWut3D34kMZ zaEDeP(=fCK*G(L|P&aT3T6@~QY&VwL zz2gSOr#J&cIq++V_ruxEH$qqhmPT8EUoBVsiP~o$805mktm@-@@AzBJhxz-B`KRqB9G+R)Hc+sWz5|h*fRZDG{Z1|Xj}(5jEGFZu;FE?U-f!C zl|A{F#paLa-i59A$JS=wDv#c+-|o|@^rf?a-u(>c0MLQ@v{RBo zl3>0kbta1qRyxmkDSORx>An8q>!0Bim5|+xgC<9%fGimYsM2!xeMnwM^|D;VlDw~5 z;6~w3VwDNbG8eVx94wrcD$_Hug0wq|O>4*oKUh7Y3{%=hdl_vA= z@#RKbm$^%gY?rE?$uq6cmK9H@u(g1v98FE8Fg{7?#C4&}xi<_+vbqna-kL*WZiAse z0n0*7vaGeU4}8l+RNobdijH+|p>^DIwGXo(1s56N7^iOL_eY34E+IGC#~HBc@bKQe z*)ov}OG^HU)|N$JiCTxN(@4Y&7l!qHwgSK!3j6mCiXLLMJFwbd9+-CmBjB&kU%uz7 z{YodWeH}Q)I%$OdxyYA!oE~>V<^l~*AhF9)nR@rKWa){}5PRQf4eUCpsOlU!0Jd$3 z19j)U_$H7iw=(t=}7?YPHCVWk46{F=RF!> z-YZ0R&8OxyPWx%G=l9*KN!6^kF7YkAaGcvNbvHhGjy;~Okm~(T+v*pjh9g2T*Cwe~ ze9H*WUO%XyS%QLcn6lQ{bxbqE?xsf4p#)Z`t=8?1csE(@n?dr4SEbv0`j!NO+%|hy z4!o+CvTBPG0d6?0!>u*$jyBh$<-XcBMdvds_T`#I{W6AhB8EuKu@VaR_w|MHkFHA;ajAV=z@PzBR8VcnBF_x3sHS|?y{&p=eW5yV|uk6M3!)xDA{3GsedbE`zC2!@Hjvz$w^1pSSV4;%(^uR z{Kit_xWbQ;fG0EWcoptjC+z*=oq_!MuZ4HEA78;$-q2d5uB<}Q2^-D zr-Qn({hgH{Mzr+mE0IwxGoY!FyxE-q_r_<0gu#d4k?(xDkp?-b|f7u_V@3;s3$QH>mOSU+^Upex)CZ>L)MW3imwpIWEA9Lg5$Gf}6uS8muR#SQ!2C*C;#YA|U7FRdM zn5j1-Hz`u2)h<9N=dBQ=vtAD<^;7QF(x}7Zw^FdE`dtb1Yn6tQ;00>#dk09aaqRN2 z=eqp~{4k49X%ww?^ZuSD=#4D~Zx#1z0iCa?`CM0qkO%nSBm(nNM)GU#lEXk4ZZ}^_ z)d^$=8D1>e4h(rs{$zPM57~|Ap_CIPOm{KUl7m*xrz$8hDuQ|lFm~Rzj=y!$8GoMl z@TKLcK>kMWMxs6nxVvw9jxyT#L(rNyKX-iRZV{5!sz42=ppI4SG-V$1u@qTiEyU9J zd0u|I;tU<1R@R{j*o+7?)h%6Z@H*L>&WO1n{wNgy;0A7j zpz;|K=uE%(fIYD-)|`7cFQJw#2$i{O!g>b!eGa!*l1dN|ex0mni*|(& z+g-l~Jq{@P_P(>WBSgM{EE?wIW|9bRm2q}zT+0{=2G^0f!*r@~+yr;TSN8T+R?4N`3sj%rqA3bvE6c3?0!nic%)QHI9GusG znljmjmI;)NCSYZu_!my<#chbGU!PK`#_44d*qYOFyLZ|Kc=qQU?AJc2ZDzBv_sUjJV(`cXeEmRFkCC3!#)If>xRR~Tn_{gKkEq6;JSU<=;QwD6! z)uoDRD;(tkAOU*M5wcBn4yn|lKnC@TbcSsm>Lfx3J0|5RJ=0iPV=&~}*R2Z9+wgjWY?8J-*Mt+9ALo#oWOoA_rbA^2 zLoyNcUk~wxVYfO&PHy-E4>KY?HNY++_Cn9JKa4j*g3+!%sd`7UEhqSUMV@akIA_PZ zSF<=b(4Ytxmln^=Sxk-$dt1iOi0KRylOl=zjf_>y^8K$YaS=Pqn$8u$GAF#W@!GLM z3P~;6S&1$v6A_ZlUzC`%Ua2S3`383ScbTaI`e!W)#ENYB7H+EKdMq|l1Ee|+4`B_l z8~wVVI0myUPd%DnnYrKNh<{@xe$ku-@@(=mOq1#|Qr;3k#DqV8`o+{Da2mm=mb>q| za~)4uR-&pfUNnmKV5)(T``EcSaDA8i9A}#v)BUYVqOQx0l*|*>P2@^k{5$P^o&b)} zi+8K{-xcrZ$0YC1DwIjVvKa%(mw=;>dB(i(R1>YG#-1{TicQP64NdZGSH8=fqd}(B zQVWfKJb%zDxH2 z^F)!Z@;vNX)^HdG;6kgvDcI_Tp_ABJlXtJ`uRO3<1Py}dC(;%l`uTjs^`#vT^zF4F zC|{#+(7(=@8W1Lq@hBrJ#UT+!)cN+=@L4#P9fp%Jr<<>|ZlAQ@0}(IuBTe22MF&id z`graRtrr+Of;zNQG@n~H2KzPLMq1H9c!)z1rT%wTL|e%ig<*pfMr0EDfpXLZQk``l zQLz=2l*c9IDry6E$=V*_BN5qXrrfdyUh}5FdouhE<>ri|A&DOKa>w3ntLlYxJ}+CX z$ucGqBqd4N#6OJSbM+bS5{H8;`c1C~J6NAFxY@$SDioZsLnB!2Ro&yWw^vWkS9Jr@ zX)eIrFBg+#6R0_|3EZII{ZO{^d{yhcVzo{T^MbmcFy$#;k zRr|`a6iF31D}`a?M&tjqIvv9WoVTO*e;;vhc8c2`Pe)bfoM z7KD-Ldz#vDh%g;*qbd#&T?70dzTP^jji_z+ZlOS1tc3!_p;&P%?!}6?xVyBtI{`|8 z;_mJqoDkeo+?`;--6cRc^pW*G-}Ala?7uRT*)z%7Yi8eje)oM{l-OL~4NBq8jJy2j z91aI)Su|{(cK2uBd25jfmV7Kk81)T;%eds64v6}uumY~yDyTnP0o73RPOc!pA7%R- zY|1hT(cz5pq6a*VP z+qHsW(T$eD+GKhol2SB3=4`uZnzd6P?Vn#b{Gx)A?#&Jtichb-bmijQR_4d^ccw{6 zTy$s@5t-)kes>{+2k5lJ5m5T$oM^?*T}+pEIKd7gu~iZH#*QK$vZKtE;=sKh6{4RAn)}mON5GRizxp|(98Ly-)rz`%Py339s^y1P zKFC%Ci4xtv#{c}Bk)vFCZz#pHlx4T^0( zR}D%pK2r^Z^VMBmvj%_!k@a2;S7{$U1Uk%F~Tci*la7R`Zi78_wDSJK8D8rk*9;~ z78CB9p?+gQLe(EVC#*-k-{iPH&p$Aod+FRZ5AndhH2Lfy@54J6#bx|E&*II5LXmD$X^}pR z({6`DvBgo6AfoW}ZZ8I@Ju1~fCbl0Ao>WymX#ujNKiSkB%j9JgN=9;WQ+@tin_q>q z_&q(zN1uk=1|DbZPa%4e2ph%KKK7=+)M=xAB2eZQydOMXYtcHTsn9#e?oXM_N-#lvh->45qejsRzeIeUEK{_d!Hk{4~Iwf-yQw=S^)X?q3@4sO`_cc!xuaOqZG zzSOl9l!Y^WD{9J3!~TE|Z@fn~wL1cD5zq=L#Nq>U-uXc)@2r6{SKI6OrEVC;+k}S` z)xM$Wg!t+{&R|!GxUwU{+8R5Vx6SI{e4gOU^jpQ#uA<7btL@N!eYATPj#wVOelfHN zo|%d)6o<1YN-do4z4QFqcRHGvECX{y5c>U*sRJ2rVdv@*{@U||>%Ft!MHd5w;pMeF z7o{u*EdAq3+~r03UNh-&H+cU(V;lLXiEXD}K#_4W!EV5ZR;uSiG|Wt*V;CfpiJj9C zP=U+S073fC#SNbV_)ct+`gP8xxk)gYyVBYf)yiAcl<>L_5)SmxD~!`(Dilh8-&VO< zFji+T)VP;P=jo;y=y+=7g9m54B7Xok8}0O9y8AqWMZuSf350$_u2>etRyZA8#ZQb|+tkT*tLLerjc` z7mJ&f-14N`$OgZ^S;En2GXFUlKlhd*A14z&B$#B@Q6&5~<@GNscY&kG8fHuEKcCcJ zSXpR+DDW za|fhy2AZXn^qec)2r+-oI)ITJs3-4MQ@%pImC~?BMpM604q`zjU0Vd6h6^yZoB3H5 z^$2@B`jEEX6#=aoc?HyOOC1PM73lDfhgv1DeWidH2^UA56J(8ZyxWI9zF=;%hco{u zLT^N-2C>1?b>hYEy`hP~$_CyY5A)P33-!R?KhTp%&PD+et#k@MUU_4+|B)8<$Ss+i zLNtJo6}B?F(}dCU;ZmXqyyIQR9XE_@BIAJ?ZF+dqVi`4A`g|lIww$o3ybINSqBHCp zSkeOWRCeO};QJCkyd-au7W0#|Y2?oj{3w!DVa`|aP|?ta(m2bQonucQ7JlsGPiY(= z{Lz8sGH$y+xn29DlbJC<+rZJq3+(6$x<6H14g1$A72u7yC za;?ZT-a#`~++!&TEIQz zbUMFAnk4hJpEma_nf94HmBth@EfUTq zOSQuQ?>yi#*lz@w*3~RkGYdCq>j3;L=xi_-|a}2))&at@7`-(x+-&Nc_RYX7p#6z5XDGt!-CK~byavfHZ zEcbedHByp!SOl?CMBAa@c)YT0rCx6hUJ$$dAn}$+R8Qmi8-g_0O*&p~RHTwK@2VOv zQoZ-RSWq|G2X(&5`QP7A67T8RSdXwyBL#KYHl?YH37L zU~r~Lr2VAOy5iwg(zgT@`qkAscT$dNF>4Y_q>>_+UKsRa=sv?Xuv6{WE zsUn+<{#D*01dlPw5sjt*HI7EOOe+gr9!5s-01I##pK^g{Y2NzOOXBXfV9xe>_+PI4Scv18?BbB>}|ZG8XZWz9Glz&h4MA4S|2I3)=D`e%^$5>iQ5}g zelFC7A(Aw>CQ9u_j7vR2#rE|cXts*RnwiXd=k12AS)(_Cx)Gm&(OqQo2;ZK*$uYpi=orA8f5QtB^JMmL8tzLZ#Esj+k~Y#fazdOuZp` zki0=C>24)Cb)Y>S`cC|C`3>iJ7nP&&Y%X&Vrny%BSXyJ&X!Dr6%YL`=qx`a*OM7z0 zGF3Qc7&bHQYh&X^-RA?2*&m@v=9%H!L@Wwvm1|~Z7cLdAJ?-l)t)Z6!n%Y^ROy>uzNI^-MB_65uOSN{ba$I>U?AJg^WX1NYZpR&xFN42|= z+Z$5Ew9)r@#;!AIV#g?=oeX00#DnLw1ea};(4iMntj&59hdR9K_Aa$$-zRtI3R3fu zo6GKp@7}qgO*OmK3V6*lrS(*$+~8EnJo7NoKV^hiS_zxS-b)GWr|$6d{U{I6K{|tZ zKI18Luuos&Tw|L(mOPW{UC>jTYZ?(A2lQHm-EciX-faSYXe=`x7jLIHYq*>ydKG}u z$Dy+U2!kC3n^s#*Rd#Y8&=`OV&7UJ zThiXh?z6V{mgj>0LWgPATA+7ZM^@gM{*HVZ-@Y6VX!Ct>r4`a5Q%Lm^rzVvA>42I> zLX3U!De_BR#1-Qq&!Y9q_?U8at1W>a-}+Kai;{Y^sn=O3QsR8_*U~6zs)D=Ngsr=#$$1Nc^w|784KuFpTg+5Zhtqdx8bf5^P&{7Yv2e*@J2 z0J@j|LvUX_0p|afZU0Y)bL1xp_W$6)|CD|IpYG3IJe8#X{~Xlu{^x*yT<`9`w*OPc z{Vy>kT|kP}9BPbquz22`0RYOnwsk$1%cA@*EK+nARKSJt2wGM!t`K zX8W=0(}KQ94u?IJOD>*$aMqCr{^hsxw5bSxaR4KZCeP<1DY_Y^7$x3a1)FLw)_PvTH^HlU zc$gKXp2XuDI(u9*GgGBP!y{S8Iox>F0}M4JaqrUB54{#^G?`1E-evplz2(=~)5dM& z#&%wB&YHbsV%^3%WVlWd33{y50W@EMgELgt0dQMK{&NlLgSlSr`Wp}XZyPGV$W-{y z=MQV%H@RpQa(NUQX_UBzYF;D~o#eLVkmSUL4qR6C{ty*r^pXJy7uP5nCy4%@d9pca zFdw<_;MQSOpW5S}3A6wg_}6ak?9Hp%iGyrXMs+sv+=sA9Ks&<9`m^G@tx*+XT}v5* zSnU-`Y7+P?M|rtNN7dveYtCGaAwp-{VV<*OVgYivUi;H~(nVuSk*h2MJwdet0w(g# zH)@||UUBl`IBBmnUD3$lt@i&ZG9tOd#C3elW~#>3C)!y`Mu{Z+u=sxON2v{S+STtd z{ehDwF#aK(>Pd0nDT#RXDO4tfM;YZ-iP>c4?Jt(B^IyYwHSiz=W4In^B;g7#?<(?Z zR4HS-Uiv9;a`fVT7KY6Q|RRhY`|Uv>60AIZF9U_V40mWUf% zD+VG&`e2!+u2X9V*seE3B<{y;d~tgzOI3!yYam)aAepe6As!x2$8;>0c_iF`ko^8V};w3EjaZ}qL~}9wAvh&HnXs^cd}pE z_uug2q$;m`l$>6`UMI001CrfJ657wR1cBVf+=$UsiJ8tgNGF1;l*EjtE%-r;<7PN& zSxFjGO8rm>N14?pF9Y#ePU1DwR5c2pn4kjaH6o&rQ`x`tr1tAD?+mM6iCq$E)BgGE zSWqGB(Qj8Ng*Mw^_nN`01pqMY12KxB@-*$BVwzgk4L>rk?Dxj4RxR-11H5yY;YOdf zs+k*3e0%XMXK83s-@cfnOzNCrYcG$^)UZ#A@K%6Ug(UuhSWBR!P_sI~?d5MfrjJ=f zH0UOtyU{!U7V5!+hZyCvi3e;D9A3!C+Xc>Q0894@q^<)H8^5%?ooZhCH8{449N8RE z^$ST4+T=EeW!tQJYJ8j$&+YUhpSJCTLu|y#i`SK<<9*G>eMC=z@)Mhz5?OwO6Yk)F zB(KEo-j=9*VXF@|*UpOz+`mtLo4>ub^Q-s;AvUFCupiP$uw=7Xxj58v$5m?|FsGN- zV74anxWm5F^wu&khaHcmpW6PO!MA5;l9Mq$?`kV;_75KaaA?Q3)}8_Q+S|~+PltQH zG(D;f+G|B?Y*5-SWDbtGG_~=V)OHg&Z#86*^!^M6=Hc;Vepmn|76@AP7>x6W3$Qj) zqggyul+mhj!IJslFK17#{NUZ*1{aZ5SJ!57S`8g7!AZK68{Z~V)8G6F*H`_;xf^U; zhvH$bAAAKhvNZ$EoM?un>2=;!pCl>sG|i|l^oHhYeSPoFi-lEhFn1;q5~}PG5LS=s zk*T|cZlwI)N}Z#vjBmAVG|e`odM1X{NMD}?ADK%|50IMz&}qILKo5`{i^axIO2F@3 zjL1Jkl1`LTFN>&Mm$M+8z*#CTN)qc(Q_!tYXGz*^>H|A?6+^E-#x`V<2VlVptL-(H zW~v6`mt1<*Rx-zTop+Uh*1cIl1*08?B;Bl^YvuPC#6@wcE03pq$*RjXs$0&O9 z5%9%tQ0(Ut{$;iF_p%hZ?^k5ZWj#siQuI5^J&)?7r_sX?sckz74W z3FrOL49Ejz4?`GoUP)+fYw(QWu+j*243J5k==SbI;ANjcWi`j;$oAIv?xW~P)*&B( zWZ{5nz9>%@!TAFg0rRvHvDS}lx>+*aTZh{^KcOgw^xN(AOyur}t-%{j(AJ)>i}JKD zyA|3?@P+r~{&^qnhKug z@F@lTO3`w=m^fl4IVvJaIgFpR`P-`flZ`tx>sF5gLmr>%->>mN?A($FRlEH>oy|s< zpIJ^Wo9@1Hm*_!fM`T^(8_ zgm}PL80sPRzeE0S~L|Z566w(3aBY)hFIYKi#$?xIMgw5{Yf6@rTk4Z_ZQOG#Vef;5%Ew{ddvVPOI1pZF% zdc#%n=0<1ZKnCMW{jm}Z$8{RtR{DSh0-%mJwvJ;w+csprn;}UMoMi! z_+p}GLb311cC>w<8FfolB^;=ewc~})%aF;+n$+!@NCHt)Xp#ce#grdi>HPviW@;r; z#CtmxSbd{U6CFmjb&pPnmDmROTaH8+Z-RecdsDtwdNZ;8^DFs}T17`4;A>ppE|Eza zXPi%NA=SpE;vyCrp_jD+K#d!}m8-_Ziv`#9HjTJvdP~Uwxbx33bjaJqMmvK8S7CFT8A|h~-&-qU$;_Ta;!q zW42*TiQvX9@C)sLOhLOn-7=Ju;n_M4k(4t%gZSn`;zRC%o-iQf{5IX{E)Ghr~?fA_;m|$o{ifoTc($ z+I_-~2r@ZmKQ>*Y^!{MKjgL^g%c2EEzwI~+Mn?(e0kbrAwJe+Nhz4eu6HQFyAj##b zqzw~O(J*kSej#MpO$THO0e36Q%lgVXG5#7OkHem{-G|}9Mmd__%PF|Ai0s75wKEJz z7#P&-oL`0h%3jwbG^N*QD#3wh(zK(wL14m?5i4C` zmtb)4^{}H7m+bluHq934{;x5^{PM56BgrG3;aAL*M^NI6c7teslSvbdJQzWzp4H;5 zE}O`Um0R0a!5?Cflm;}O@4EU`seYt`|L9O%hebv z*~Ju2BNg*+FtHnLI~)Sd$%^k#vpr#t<4~i_bibe~!}4gI z;~Kg1D8HGVYdR*%9oux2$qj~p*{1{$X1iwt69wSQXCg_9guJH1V0Od*{)rR73wzzxh zNkW-=&X3w6k<2vugj~h} zBP>ZWE(}jPCq09u%Z4-gbOe1Cy{A}%zcdW5!kIP5sbYob$YYzrWtm$1Kl&xdkzo~x z{ET^K#_qcvHzhBApGDo5aVk9<+ffq)I=(}Hz1O@b{}6mlQgDkN>W$hQ|GZiNDFpkA zZ0D(GB_(>012a_>2zy&3>-rl8XXXK^Z-v0?QsqIC1eqVC4p&+EkRzc}p5E)Et_xu$ zl*>V-74WWCH;eJ_R%sK3N4i4yN>g2SfK|uvwcg_UwYhDPo8XomLAnASXK64ilT)&% zu`v80ABDNP4vN^~{CK6r>Cjx1Sb}w0@a`DP6cL$)-iru@!Xv1LcG8E-jl1?q- z%VPttzFq@Si8e7)LD|@}B{tt8)Uq=-i32KoYN$YC-mYLe*NUmKNreR#ULsyAPN4tM z&zvvDc*PC6xF~3eY8&5N@Iq}(ad9O!Pt#p6oVPxr*&F{}J{rnCgYZe@Z||En@ga;& z3f*1+TGa>KZck6Uje{Ewm&((x6t-nO8#&rzf7uTNV-{^!Fsv1QI@juShCePyo7ck~ zppmbY32;(*$bIeN@&i0FQn=zc2Ppoo4j-PL2bWKw!RHBoRxv|5q8WeL;RD4!T9>be zu!Cpqn0BF`?$v|OLF%VA5^Sa?g;dlv*=HVK%3p}KywA{qd{=*GCiwhCKdmb+8;9XN zgf5KFgbA5%Hv87q6@d+vZaqidb`(=|y^jO997& zLTV#*8zHLODy||yKHISZ33qG~=TQ!hFKu*mpPTelYKLdQ*1thf0c)OTsa~%AIm_Om zM_(sIfYkx-*DqMV@T-}js*_z@>7tCOkZ*u=Gi4f;Y#BRFNStc${k0P=S)S4_3 z$ZS`|Qf*hCWKUJ!Ya6w9$~}IP#={!18}i9nZ=n-BYgKqoGVr=v)nkHOPEc!h|85c7$i>vDB+E&BdHPw7D4!2CVVXjm z$SP~yS9J^G7BTxbIpS~m7%4mfS+s7=!+U!34o??p*QKvMyLhuTQYR^!s>*i!D{YEg z@7-0>fz56U`NQn}JJ8=QLLMyZRv$eq^Az7 zji7bETF-dii>K~xkZL=<{;Gm!Vahjfz0L7JX+Gt7#pCf}Cb@4Ut@4Dh*r1Qiv3sdi z_f4MtZhP%$=ZUW?jD@uny`Ruz!V^;14Docl%ALk%Yj-F$a>A>=8sTutIkhr{MwIdq z^droMPCtRES5H{K4r(uY9udBbL7wJ7qd6R?+;Yy^XJD)E2dtbdXcP#xB`;h`w+}7X zKSqu3=cltXpQm5uTrGDz6bfS{gfkIwul<&AaqU`W4>YRI4BV2fE_5i$#+Ts7lPaB~ z|8(9r3^EBv3WdAxZ9=H3C9Xs04CQq8vGJ~%ir8iu z2`{#0Z^$N2tDlcb+gF}n3);n82*;_pPZy}yd?m6Hip|U~m2pdyud?F}!d8f${l33T zA~R>{CH0CD@76^qtWfMW^)O+3ncoG^?TcUZpl&_1)Yl*!nX1AJ^OI~t^DoSJb(*f3 z(f4oLbe`m{;-S{f%z*Ac3yV)ifS~a>I+~N+Ec0i-T?2a}C{&R=vD(KFvd|}SLGq`& ze)`Go>%}A^qu>%jVwtjow`Fc_nMbwrA4=m06Xb!1#S;R}VosS~haPjTxh|-w7Z`U7 zlT|!8nykdz6B3MRkw~QV_ZF%kAqFKhmXdE-JVO~R`^S-lXNcy9<4Lntaye<=n|rML z&cLOXdDgt&$JX10UQxL|l8|a4@|x>9ADQvyU|^d->Xz7E`ZS2A-J46%D-HHR{#zi` zrBdz5+yHZ!+x1Hl4D!)R`xci;KpH<96yB$cXHTH(q##8;wLQ#5BMN&Dj9&uhIiD$* zlI{YVi2;WK62v@pEPfD>aN2o$_|^mCEk4lra+tDHU{u_q{aIeF?kea75frlfe8(%-BSZDGsAEdD)R0(Cx+Z>x)X7QJQ2ad@Xck zeqNYK_h?Yayq{Gqw0jeeAaWcjkhSdxiQ2`L;kqMe?{I4Tdx1&Y)Zv_4B#(=9fp}rv zq!ILO+y3$rtxLQ~|G|f7XO256l=|44N4gg-=;;G;ktYC@VMnkCU4uvPUQKwS(Tirv zC6H^n`JzQ!PF7Q7xv_dltOYVhUs3K1b>&8Sg|W-iLWqVO#vx!z3t5xDkY#4 z9u=EZC2yeWR3$_zqsg7BJRNMKb?8{y&-6w6+28Q*X{P^&!5^A@w^WVVbS}G(enDtL zO2*IUzohX4LLo#6__3$hCXS>(`*sfy~Mb|H+$sGPJOc3;^a;R{S~8XqAIN=gv3SMj)bdsm^S4 zoy_ZjRqvYFCK8ab)Zku|^&V<6z8n|mKsUPAgxqK~G+nkqUg}oVtJ^MTXd#oWlB2E&d28n;zAKRM&yoKZpRUx zCFn2#R^Xg73z3)uKGTk(hw5)eow9(ThP|rNeyeW^;;i0X)1PWi`JS=i>c=dwtDb1_E`O%6kru}Dr#L^Z zTjGjP?^iWmNCg7ipTLGN&lNQAxP=rxyt~far!Yo{fo<1)?Hpa(CaItEdW6bkUMM{{ zZV#GcdiAzgtTF6WDvxE)WjxF*fNH)+L%v^1`8Xfw9}cDvOfD@yA8sET@UWUvKTIBd zM!Q=GfPC=Mm=ict#t6G5*ADaWzXtfAhz>a5WPzr*y?xZ3(j{>^C|>H=^bFW0T5CAN(OE4<+r_$ z)>n3yE;Ca}ozwUsl=qv0E~?#G+-*R)(u=4j3kD_qFAl0UeW=OI@pHMGT8AnItDyJ0 z>f${CsZW`o5zSoLj^0kX{&;H??=-8LJm;@p(P*c$noeI149^_g((^^CC8(>;z?hWq zPS2kgW!;X99uouV#+#S-P8Dxg8%CT`Otu@&Y&gzag8`G~4W-EqD*8gXiwQ>$beTA_ z7SX50d7x!36Yj@3cMR#ivVi{Mi$~_3FNqCYsw|RmxJN1OG9-{1p5VU1J;SXBbt$z7 zpBC@7!Ne2SInwK1P|x5m7%0X3KxKb!_B=tw2)F!?QuIx9=qPQQ0&&<7-}eWeQnplb ziF9((f7LeO_TInq!b5%GkKU(S%%q++RW=CIp8i~5eqF8ehX%kWi1f-Nh2wKZ@WUl` z>}~Qz^4qSVFsQb}XZ{ zdLj_vy!I=R5eP0tIy;ux9y9BhwqoOpMxxUwgfFbvIYNr8$^XHiknh_#1m!6-aP40` z3RnIdFu@nLwuSZx8#KJt$!hTyukd=*UH9l7aTHR-tlTDc{g4g}fiKSA9zTXW5`Y{d z1rEtp4wfKS`WYv}C8PRctmyGh+=H94B)BI{;)gt+2&k|5+UTz%Xe#wcD-M0Ny4#a( zk`7(QTTOPOT0tD=y|(@!;e>-y+vsz>kO7q*?u_Jg)@7ysw=D|uit^IadbVGN8)pi6 zBvw?NI9aB3VEK8+tc9NvJR@iKy+JL9adEf8Zqw&qAbVT4<|p!(K?{4*se;WM)PNk+ zG>?%Qpk2~hj~Ds3Ii5$qR@;XfqxsR!YeaS5t`D5JX1daj%*1T%z39m?+g%~O)q1)@ zgu+NFi9td-ZFT>*GfhE7UiB#iRW)MyTGFF!>{JjN^J{nOp$;;9h}VutI>^9 zY?~G+*{cc+gf^bl#)M4H@aa56OnzbSkD%S%&F6AQitnp6l=X9qCbAhx1ckivCTppZ z)*tvwR4@Mq0Qc|uUWWHq@;d3e;cH3mYm6~y*h(6HATr!I(Nno~4(*S^btP&L+2%XO z`*UNV+D!9Ii)7{UrlL5dfZjjoxAVQy=u=_Z{bOOH7urN>q8sUE4Z{b;-Yitax=s3U z***o-Xcs%8^VO`&92my3PIxA=1s#lsejSfz_OG)V!G{I=%U-Y5swcWuU^DjUivUhQ=Iv%LY58AB7(VhJV+7H z^@%>wMjfsXdn@X{Lp+$9nppzvlu0kG?V!hTM^QcEz-%g^HBcwCS)`4NO z7G)3h#|E2(jHvA0Z5@=CocZ)_P;Uo=l{vz|NgiCI{)+kkY)dCo&Nkk ze*Ztt_q6Zd?H}9!@jdDJKN&l2?_Vcf`$E+Sr8mKs1K9d>)%ztUZ`*B}KKNUpS46Eaj><#qW3Eull`_70?T=zQy!m0etDA7Aq!YF^k#e-Omli}C#WV^`DM6?KuWpXdnNw7AFeMOrM7mFgc0j091pf%sr@h{ z6r=gm6kAPw&1!x475K2VHW}&=r1#UE)zh`jqR6>ncr=>G9M|l6S!_qzr+8^Eig`vj zwD`h^(n#9aSiPLz$XuXHq#vj@3lwr<1l9CTaUHaGJ_ssaU)7Oub@ZFMFbvqJxybF@ zk&*5D)GQ_{>mW{TA`=-5*jhDe_OJUAq-F+QqSL&qVHFO}*K}!7jJ}jKP7AJN>YBg7 z=te6T@zBhI?zU7KpKCh7w?x9aeh_LUt=L~KBj8r6Cz3Ai^+o-u{mO1*3gN&qJSstK ziD1wQ1e3O+{)6jNf%^8^ULr15EEnv z9%WP9rJhp(!YZAN30SiGnn^>{tkYGGjt^jry?ezo+F_cQy@5?7Z zP`8b}LqVq1UWgD5{f#|#i_BK-zzFj5Uc_c}r?;l}<$Pp&s@tPox(heS#(Cv#Uj++r zcCW&8niC!L1ILhBx_8&7Z*TE_RvqxEg{&drrziikU`C$nBc3<+wEq?U@&3x>i7>1} zrsm4Ldl5mkm@Gl}dJpKFg-)dYtn1vBF}()S@rSCdSq|Q&bp&4wXQSwUy8%I~g0kG9 zlMw@pxAhZL|2&&*43c$Ts_hFJkL2M=x0TMsMH8BC07%8!(LxJvOf@|=BJNtxg`@*!@W;ekgBO09;?N;zYLJD!IOg+UbYW-a@8TV$EnReQRv6nWmL+DF&ct7j7}2e<+-IY^$? zhPk_*P;=_~TScxFN8xYe;+9DO<(YjA{Dt-5s)aB zGL)&ro0$5xIzlF<+jSB6pOH3}Mm|Usz5>Ld>Fg`MV0YeI5l*+?6#l!$I(V8}Tc6&h z;=CXR>lx~+6-dNq=mVom)3Lekf_mt+UuN&0CAkHA z#Pcer-8TgMLCdJR2U#U9SZlN7UH+=5Rb{)TZX<>dN_QKpK8)PDp2qGZLwW}X!z9OfwC94Z zXye(bcjvNdK5;b&$XQ-=@{pvv!q#8Y9?K(nQJHpOi+h>=i(EX=)uO;eq%sL$Y3OtV z1ja{=xJ&CDFi(27ba)y_weS$12MZ$*|s90W0VEb!gLv zKGxx*rb;$GhGLt5q&jI%g0sA(>600xCI1o;P2Uw;t_ZxhwQe%71{jD*pz+h~xtW=hLX6u<65LA_#f~x`WMh?cRIQ(?h~B^8yC8_OgCVf|GCib<|1rx;>?jjK|itcFx{Ka(s(SHiu+_ z(p;L&e>mB@QR{!W{fcrp*<|7*`4xRF8vAs`B3@Jp=5X6i<}<^G_s#-!(4g#)%$^q$wnnBl2X;9 zYCE?az92rta=a{apLmTfx9EMXPgHE<)@SndvtU7a>Bx;P|od_fg$ zit8u2ja~1>H{qhEY6SbIk>7k0;QktT@~%wIcvd!8d#Xv5(WBb#W%}TRqe!3xF?6G2 zj+e6S@K)=KdlV@t24T8a=Hq~H{irZzWizPTQK+_umS2F%h5VE9}X&}PnK@yA5;$c zYP-nN9uZ9eedE1DwW&pApK!_JPqDg}G=?4Z11_6n6-Z~8s`q`D9bIcJ0vthZM}pu3 z)i-ILA~MGFw9yBGgL2>DOb$TrB2U>OY-)3U7j6Svl|R<#cE=)&Y^@e9mY9m^rC~cR zPFy6<0-HHgi#+4}HPea*vZXwSMr1F|yojV$`pg>uPtqvsy@8{E-0k0o1VY@CJMOP- z9t%uc#HKI#*ntW~V|}k6X3}Qb!RZgB?4=9LO_OLv(I0B1>EkAKNY*sHT@rgSPCx;J zu+1AC%!F%xw`xL<8cXPaqkh*i3}?lCh}?~+ZBcM1jOCMXfsdiQ{LFc;x?^yd(B9I+ zE4`D`C1JxmDUd%A#uzE zhTGo&ILV+theuk1Ts4>Fr&_VE?{>;0Cc*U*CXZ439^|Z?h`VNOrM$dmB42!%SiA5% zJaeW2u(Iv`%?3P6#??7mN+V0h*emkQQx`!>H~(kXhhqw|g|=MFKmnX*}_YQUXr zw}46!m%(`lw0kl~_P8U@$=YBhR6K2ymc1?iXrpE4|NZY#h=0(@$d zVA-Dk)#8Waf#)&}>O>MLygW#3n>efa96^axSyT)w_vQdcP`Qk=k56cAPU!kD6Jk2o zONHw>8jc7>y#}N0O-lag_jeuN)Qi6dlB`$?nR64&5h|8THS#{_}VYq7mv&z zVXEOLqmB1Dl&Jd{2`oVZF&y7r>?~Xfcw9#K!)Xm&fjb9fU$koS(DsB?lFtncO*O9Y z0s;e^OMBdpYHjFK!Vc$at8Xv&QY-K^T~Ix%+y3Nor&QD=5z1GHj4iUOW%Tern=b00t^z>K|<#5^6qiY-_0LuI(d zrBys>4s)%+DYT0GM<4q))8tetz0*6kd1%X(&~rP>V zt|RFc2&_YL&_o)rK8Az2!=9dPPeQ4H}ANG6Dn%X$0DdLNr*y^$(|#$)H!rz$JX zZ4I~YMP`!`GUPf3ifiC)N7txptDl#gPGv$_7Z=a2>FnlU(XCWDK3Lgk&RcuUE_-eM z568E{20LB^@VkC+0qwHwQ5n5d#Z)GzhC~}@Cno{kwNiddBh?J%7ts$j4NAdqI%v(Y z+tneDkd*zXQK_%MPMX~o*HOM@To;h@W--FKMFEg5H{~e4Db`rrQfGlwD>l=2so?bh zAX@7ud64{LnY>*YwksU`X_>)*NJPIAWZ8FITt({pCW&X!3d_pM4X z(jIA-%A$YdO9K3Lc zuR@ITs{D0{3kytjXEsplX9`~FR2%?aCfXXkioBQM`vs{iiQy0GxIH$S&B9&1qGV)O z5~{#FC-dqjrjj3TvT)S}OSumNWGZ}N*{78+`B>OsLS}p;xE6&vcrWe3SLOwqlVD&7 zMdnREHX+~;)dgk9cRk^*;dDf!mAsIyyg&GQ@yLXKcS{Cz7s}15FA2PEg4C zx?cQ2cab(&+pSM$S&%qg06kB{V0^Hl#Nd99O#EE1K9aw^$m)?|=cNrq2GPWM>Kk^& zHz%0&`!+!+9`Kny(OC&7_)YW<+C=`~^T-%1uMWFZURv(HdAt(Rck*_|kCB9)lSX}R zJ6!{2sg4M$HcZ#&mG!B8i(??18v^d>&=`+YgKM+$Sx(RU>`yVB8cx)cjHk<-&xmKX zd-vPcE5UrW%y=rneZnqy!_dcgrVKo#HF^7bZ?Vc(J`ZMU>%gDVDgc`%ul(ix+d_d? zWAyl4)+6=Ht1sL{Li`%F?Q7G~&^n8ksLR0enTL^^0wG-K(%-AfbMWETBIu1YBahOD zULp4Bz8<*>mg|u#O!Kyz=nEg={b4*f4H{Ex%A)KwF{tx%N>_(0BZXdyrMH9p4vS)g zw69ZI!!`Cw_h%OEDQ$<>+EU0o&K&4ziQfGq$Lgsj`6=D<$&aPnqBoRum#K`fyW%@N zCua;@dGrocl6qG-tq=f%_Z?g5klGoePkFiR=lgx8KviddgLZRIU@`b%kb&zBFju{h zqU+(TBT<|+a{gjy^r}5_8k}gi?dcZZLJZUvA;{;GO95w1w6%UZ1piD!Bz%8GaMdzj z(Mo%W_3ettAWT37AME5-gFtRchu&5Y6Q>d;n%DJ_r?0h)f^vAJwojIM4#>#0V|(y~ zcA6#u=0^kJAKy$NkG3xE>yDPXMBWYw*#H^sCt5SU2S|AOifS9?(Ibc(Tejofqk?zm8$dW5((fQ}@9#4rD*krt3XU82p06ssKlkSUwGcEMFDiy}k_lV)49%aA2?S zfo%K!9f6@X=7nB{1nXfvi_&xRUkvLOXGDqX3y*#BE{9lQSzxor$X3Nop%5=<)Au1m1boz1{`i;mP?UtbfC*T%Kxmbv?5)bnSfe!oz>%y;%V?2j!MRoU6PmUA=` z{*W2HyHt6oA=<99ZoCI>`WL(#GKQF|TI~5?zfCHL!X|ws0Ly&<@^z+;FM1?U=WdXh zft{Skw4By*L~1rYW&W-Khn|KW`nTAE%`dY917Dd=qt{<6rTT!t{7ZPx8*eOP=O~j> zSVl-PDC(!~FIkVz)VV@YI@JM;gLk(dUi+i=3@FD@ey6~nr_ zC1gyb_Src~Fm2#)MQ{22Ebl$7lrQngU zS|94?MEaEh+j^mfb2RzY|KaPKqbqs#eJ9CeGBGCR#I|kQwrx9^Boo`VZQC|>Y#Wmu zz0B{tyY9K?VgIq$s#Vpid+qP8?&|soDo*nS&1Zo=by$h#)rQnC`xwi`?s}eZPNQ>s zpclx3iPx>BRu#LP`j8%lzwx4GqiK4&Sd{pB<~Y|Zkv%^}o?Z56QS4W$epz^N_wWo8 z6L$Vpoe@BIpvvtxm+nxO#+H8^p)7X$?$F8sr0N?OeG%`sGe@88n>v58&qNsqc%#A- z3Yz$NnefaV3^boQPvVs?UxlXbt8^}YjU;Goa%G^jgnyjR}L@ z9d|zCD(m10fPLygXM^xDnCfjcF%RH&-s={c07eyYygrCtcrSS}B}LUP z&8WN|J5i9^K)x4r0Bk)DkZ2S`j;reC*a<2Qv~4{%95}6gB0mrW9MfR33|m~`OURk@ zz~;s2%aFGZ-w%d0M4d%D;yG6t*K38}4Mz82Ip0X`=U5q%i+)Hv@zN%oJ)0gW?dV@} zATMvR_Bap`)lsWBXsuKJyhhX4K~rXG&I)j|MN1h&sgRAyW~DN zElYXkx2bW|ju+*R2PfEEjn$qv+JvB>DT@nz_W_Kqq-CR3xQdj$D~WQ+*6PXNs~R2I zuKTQ+^t2aWDB(i5swk9LYb2ItLB4A`&3WTIwM{Rj)t8~Gp{MStg1=EKTz{wh;xzGCDqKR2b~y!M{flqDtXr1wC(`R zvCHd{z=$|c;-?E;IpFHXZTt7&wE7RhaR)QP7*O$k+UW)V8(o5C=;@DV@!meh^;6QP zujSlW3B&TYp5@4JXoEK-lP*rehx<@fSkLwwsSNY zyVv$x584au(wP497Pd}?#L158v@0ybl@=hji-@1G-i~hObY)b?ff=6qtqu#z?3U?z zJMKfedh`r8RpQx!nUUmD?yD|K`n6=O6V#cjO<)>LK&Hb}`Jj&B-Zvv?F>_<$Y$sBN z<+R6k>?r*_%c`1f2BJB)thFc7R?F|JVSu{aK-IPM#f0)Rp4i{Yp+N_en_=Z$Y;*kj zW~2Es$&lgY5Cwf&=cqEaig~@m(jx}!xaDrO#(zA2NX~O$N07AlQUni%N3?B!L@q6` zMM9VrF5jT6gB_U6Z?Q~VvWE3>qYB|=sX5}l1((<`Lo-5=q{fCTZhT#2UK_O6@%;|` zGZF+eiZf`^ptzPJQT%1`D@;xGTG`YH4L%B%bhotpUtVFK1j>{?Xq{9q*7gMya#bx$ zjU}=E_RT?oIK(_7tqO^R2D%4qbayciB)wHUm(hZym*(Bhys?vyaMz+IiM+ehQ?kzd ztq;a@<$Ki=@X7V7T6fQ#7w805*&DlPp`+{viSv>mr!Rishq?hk?=>W1)mV+(lL&z` zlme~zaT*$<#|zncRTf^n6&}I974^pFMo&joa~tn*JiisE>9ng4)Yrn8&7}TDPYt1< z1%js6U4z&4W*l|+Oyl*mm$H^SmYXgSp!Uv>LEEhIAopoxx1+VVeS%%BKwDrDN?b;^ zaLJ#-Ti3Z~KonRmr)u2O6#F*YdYP_Jf1(;)vrogV6oXh#$bI<~ipWP-V??tGB{#ow z_Q1ZQ)tTRVnj(uad?K%o;pSbEMZSFW@O-c~*c`*YibiYmq&bD%>Z6NgiLF;XWS#A% zJ*Ll`x#Ov(5K@LxrosETo#xXEAtfyePRvy+4uQW{P~$G|MEf#U$0^rTmrxR6ocj2i zB-uu9jivE=7gk^1l)X?*L2;FHh=My0(-bNabgZVcqq<}(lEuVP*5Z&W*2XXsW9rb% z6MDb92|S{6(Wn`mQ`zL%QrV5Y-xeW5ugD#^}2#-#MdqeXsg?!KXQNyl@*`MYyp7 zKx3cMCqqN1c9idyqSlpLcz&?E<|Qxwfom#eMiWmy{I~}^h+rk{swX?2$d0dIVX~KZ zpow~EWDlX@0ay* zOUr+Yt>aYW{8Acyo!wjk?xv2Iyx55ccLF^<d8kO}+UEo-2QYPJEV0)${ zH82zb788yfip?A#Hfm3#M5JYo_ndn%twW+h>^nXIJU zbj_K@M`Rm2E|HeUp@S|HEk+99Gh>mu$TQ;JWpLa;(MLaymqH88WbF|q z5K(@D%%6+B{!e)L|0YZSn@;}!k)>~?{0l(-e?jJ;IR6DOer!Z^FI*u zzeC!8A@u){h5!5UV4t4n|0z=k`FBYBZxZ=`pZ*`2`~T?w>;y&r?*ab_f&Z1sgZ#U% z`sdL99RKgW>bSK38!|umOfh6e3DC(iE*mTDv@N9A)6!`ul}bzvXkv!-I1UgCS$5${ zTP*0g2;low87u_yDUnD{X1KpO>28c_rquv8A@YV?tZ_2z^+^o;$>=yfYxZv8ZC%Mp z_W!_cWv(35e-!4^;hx&tl>**93Eg&IJrpRq8QwJA_-xF*2t1{9Z#5L?e)`f}KeK6R z%GgxHt!734`G{YSHv~HB4|Z37P3JMabSgj!gROtr*h;?*^4V&7xviemJym(c64EA2 z0!|Cxi-rl7#ysU_l6YTD20ege@M=sM*3`L35@wt zom^R3*fASw8IOrph{$-bC(eau$0zlRxCHCWamJgpcsv47nl!RJ?tLMrGpq>}qt+BH zYvyijn1ck1#G9{HOL}v#A6Y@~kt-ZX1S>1i5)b%$Cj4T+0HZZJ4`>)0^Dl`X$VS>h7lq(|n|tYk*hfygvqheokVf z`!=$3v1&wdfk}b|1xJ$ERd;>l%&C*hp<|NcrMKHUj=@v)5y{mgOxZdeQrH#<8)%4toCkUg=z(@v*r!|NC^2{p>sW5n z%4fzLt=6xQER5(CIB}ERt>uX;@)$(V?YSlUBl;FrH+rX5ZR7SD`Y8dU+o% zP{K}GyN6!dHnpC!bb1i`*5XPWq_6a7QET5pV~7JLQ!hCvDkIZN(<)OLY+HB^ug)X7 zNbmn<>oj5Xsg@~haG2ZzEj$O*SlKBoW!xIVR}=?Z=2bQ@Jm_ZCwNj_R%(Wz?kB-XF zUAqN=3I#QcVF(t~vrm0i{xjD0N9U(hEpJ873_(hgx)%8)tf@LG0cPSHAsQ)kKiMcx zq(KmBZ4#O6D&wnmyX1qZ>2c+#k@x%zd$KShU7@_@Zha&GS!~0DVmn7T<*4+}kKNZp z4bnv5;K4$pj9pI8tEr4)m%Fl2`Skn;^$c%*ORMc{H)6VeCYCU0%{NA7y@INM*1?+= znIC``Qf0BJ-;qC0i3E=;>K(lAaNKk-<~`B!HPRhM7>#PM7cZaSx&Y=7+{EW>-_2iZ z*L_B17oBBHe6BxA9JpC?Tj!vY3N%!H!yP?LyB?MFPrS!61n#P>KVz1hvpOs{Jf$c5 zdk0cL^!RoHvfhDOB>PkHX zTS;3{m`WDM8S@-vH<^S^r=b>9jJQX71<>%%8IaU;4H8C806n(UdVp3I_(x{qF{3>y zP}3t>aAT8_)08(%Uw*!j{Sw%Qaw;i%ZmnrX@snt|yXOv0`ze)E3Tq?NCQK%oZ#Udc zSn+Ax|9059=yk3sffRAR6Rx!yNXCou!%uO6oq5Z@MX-{Y9av76S=+hM#GX}=<@)o(xvwU3cW@6bXZp|& za>;G(<3wEC_R{0w_Ir}tq)qfidVPegeQFW|oq{5KcOBAef#bDjgFM>+OTXRq)fKM~ zm5Z##PA6q*tQR~|@AZ_RIx~8aZvPdBlIOdcV}nw{kw`cCZo4TlP!vOr2Qrw*vh~RoKmi3SyAf<3i{qDRYAUTD5RVlZH1Y^-kYUb9ydAJ5$0M?IU#e*I_Dvnq~vkq#!cW2eu#UR8!OqR+^m!($Q;+ z%4+uA9o6BF>bN3d5iVWwuG43%D!4O( zl|QUj0akRObclCAwIJ^OR_Agy>SF)NZ)l%MCdQU30hPMt*B6?+MCu3{+UdROOz5a= zT7CYfk}gw@dc~m70RGgxy76o=cUXcsl)KoUMS@Y;2QAst@08b&nV<3$?7!>k&v&YC z%}gLkP4^MY*xCt+jEs6I8j@z5u5>~R#P-|Yjy*Bh$jA`+pXTd#O!TZWch{ynCk=v! z(X`)+h}F}zy#?c%t?$%PebLelZ)`FxKk$(Dbm;bMv~Fys|FpOaHVXs8E%QsRuwRxm z|M+Z2b*O9B6mGBPSKo4t;J9quLBSyL65ro>=Iqf31d1JulS-W1)R`1cSqt@`Ua(b0$~hp7iwS z+Kr|uE{w)IHY7e32UUJnG0H)@8Kp?oLsEtGGz!4 zW44|xZ8jU3bB#2Sa?bu`yIItWy434Tx&MMp20w-Bpymx_ZJ$A8Wo^5_8jRH17@L^> zf*>mIjRa!uCOuc1gdVWTr8q-vZIOw?Oh{{O|Ikxh1dG*H$z_gVGP`#yI$7rL*>CWb zzFBE65bZ8JTPLEpn^Srp&g^XlS?qtwaF;>6(x|)fkg5hG@|!IOQ#(tIqr^# zgVJM%t5-2NVmAuoi8`4ajOUTK-_g!8Kr}%RrBrNr!w$Ps;sdO;J?W*NH&j~G2fV}a zb&L|)=MA2Gr>zwNtB*Y!cugvhseAu66Efhn`DD1f1~jmpZwMRGd!K|_HYtlUrV9DK zx-*vqF0RNw%halby;!rfXPh=I~=1M$1xBg;R(uyw8W<&hEno(f>= zysDPe4p$KFGO$$++^NgV2e)5v-pS~{oo7Ionh!kN)W(1>lcQrzTzd?CmnPXc5fkXT zGC{S(;5O7BBL`GawchCj zEv!YgH`>|elCx4ge}{@m@l1QhZ%Dvs{H;);@GJ3Pr8o&g?*zgLUln?plwQ#_@VpUn z(6&EP>mSVI@^i{J{w1V;L7iA~f|O}lAc&|uM~ICj95F>~hmviPIc@)wG0r=jDJ7ND z3nM0c{c5ib(a=@c#%&^20-nOuX%NABvSpA59q>DaZ+XaZc*f`1Rm{1iLv=~948F&55keq4fpPBc8K12!IE!m9(q5U(5diuw`}71$(n>=N_KN6HHp! z+@Zq!(d=}$zElexKYRya_29z!03O+1XALUcvfOuv5Vjs$?)3z{s0rR)O*o}|WMldE z)bzM-FTL2dMYjmFy7IP!d5L?tu)*eRwsEgR3)Th+7HtF4|Gw>1Ox{+j`W=}2t0t}c z(_l1O!cr$30-m_nWcxCMu+o%#mTTR`Xyv?}VwXJfY$o-Dbv44bb)C zik;vZcQWP(+j51`-8)&d=}vdF)Uwa3slP^It6yqFuYc+7dW2+ih5lDyqI@j!N))Mhlu8=MHv8c(6eGwV z6*gQ=>T+K`fpm_~zrtT}BF?ufOY8kfxuDaB9u)NsG-V{W;nm)pp9u7=L=m8uc?9ZU zB01nMMBhSH2pyc@mH&8W&@9gSI-WjtgRRg=uqJbcr{qhgrxx+g{G34Uk6ny2yBJSt z>pR8^T`g=5B!f?z@mBD|84n^n#B+Hg7J{i_i4f#%^n$smrSU*e3K7<_PYz1(A6uyA z?P>9fKdG07zU#J`Yk}l#)W72WXuho3VE1L&>u9A7I3bmGdB>-h;$lRc_FXOPf&{mM zWu95dAT4i1bGpikt=`A0y+ac{)~Q=q3)j_IAJ4b9F>JD;wsep)Szt>2lI~z}KE0v< zqkHQnYOLDvJTHfL349p8=Mqh}ozA8&?no1K6`Pi+Ki zB$C}Qld43_I@+(ccFP{?EDtH_ZgqkQJ9*HMT%$}jd;wO@VP43D- zZD?sl1UiygL6xW1(UDGO_?z#w^{5tJ)q=0$!PTf?Dqj|4*@nu-*~6I^{V@8h@aAX! zRo#7+-l*RMTlBJ*B2+7I2|SC~&Kh3lpu|ioxhWsu*3BY|Q#<`SsAQIfC+SV=;R`5a zBstuP@oMa~YJLd(uJZDP3BZI;WRj9a;>OW4=5l<7wp|9?SGCvnj=C-l=YjPDr@80t zgEzw^4fA?8Fl2>RSW39N+t_o*1p=&=?wsTM?}qs;CsZ~`)g0R3o4tAUZ>JDneoRC^ zO+rQ$ov2loX{63$SJt{PzfD5DNcxETug_Xi2QwJPqHB|G6kcJuV6JHwnsD^$THG`( zxa4b|xCD^hBy!zcg?y_V3!Y+V2=nZG0c0dW7u}j#vO0^M3r?og;6C$!m6Kc$$@7UX z-gP|RH9U3oSF1K%DynXKIDI>kV0>%z!MVe-w2wXHVd=E$Un(_i;O4$URLt&NA{ ze2+Gia?dTB$^~=>XQtNNZR{^ywrn#}UVBwKPac8is3ZJc0N{Ss|KK0|G&tGjFi{`H zAbhE@=MziOs}s{J1VklH2@sacdtK$bR9`7~C@$D*OtyZ>WQ{Y@A)D=(rv&Tg2yr{G z)92CG2rRP;PbQo4UxuU2eJ{43*FuYE)^QbO;^SqUD_7uvj;o@a8iXIbV6PmW)I?=l z*VTJb-2wK)0fLHOBAa$6Qen8B7qxRY63LoHhwZDL4r#pBG#Ti2`29h~!KEwjokyn= zj-kC zx5JnX(PMV{6d2j1(5&)n$b4TP|6B};Tp*NSgFAi=SSgf9*Qt}e&9)3a4uG*V{Dftc zHdWG&%1(Hko-aprzvooc+YO^H|LKUAk?p(TipGgtC%kA*dko2Tg98mP!Ylbq7WreE zHbJsF;SJ*gOYj<a zr!SF|KL|%^jPwMWs3J0G&BMH0cWPg{L~?jdOgAwe)~LGTSF|j5u_&iYxqt6v`d4e1 z>4|BB+1Xv@GzOXk#P4Crc}MO!g)CcT!;JhKLy--$r9qar(yXfY9zIKl*L)MK1d{UH zWd<$0nKrt1>ZUT%v>&=XY}}7rwVjmPFeQQyDVm^#&`+k;zNO-GbmK?qP?u>K^i=f5 zh?1m#bUJ6SqJSD>FM%!y6eV0S1^RNEV3iQK`rS>qF3)n zmT@y}YnXFW5Z0^I`d{@sQ(p{NtJ;PW18Vms7Vn;_7>16-pE78jb|;#H&RTx0L?cGI z8BwauGj+<>t}4G&CsmfVZnV_dbuhuouDGeBi&!TA=rVi)PI)?~kY_5_j8K&`wyP@2 z+MgFFz)2{$yG|0~eJMW4`f^ZYFXrRVTKckB{6oDWJ|Qh)y#=l`v{l0+il;DquewuP zr+IPjDT3;bg-wf{;AnjQ2Us1<9YU1HQhE!3%-Yrl_q=9Yn|slrupPV$B)>VSqFmNQ zOOUdKwc7>DgG8>d8|E>~+^7n1^J@IaoAdI&3{2`L-je6xHAV<3p+C#HF3d<6 z-8H<#8#L_g%+6aai<7GSKCx}ggPfg<+y}n+K1`M}Z^O|0!AlBdjMCZ&3+k<*++#JJ ze^xtc5@q;QD4Xj492FL}2Uv}+lQ}A5n3i`5PTjnFS6yJ>EL$~Nr-JKb9k`0`R6goJ zFwte%%a@CkvQOcWvRu^eS=TNRV$&|txaK|=^8lG^2TL$XSG#S)?jESJz}<9NBx##8o|z} z)A)l(K*1Xbs+5|{3jWzo(E4}x|Q=l=a9us z#~bX-*LCZUr-IjF-_x~j&8O}o^R1+@qPba_v~0m}_eDLjL%!tk-*-CHFBa9)-1m|y zH^uR9x@PJs!*~k$oJmQ2xlQ1cPVv6e-}RndxYs=_xNG9a zbAIo3M)rX*Pi_)}7vp774r=sgaW%_VEc)o?S@5}Segrbf^DV}MHuTG-r@MGW(GLEQ zaux-;gjAEo>Ck3-irOm$&MG~OT5qmQJp5cwLwSfC*Ups{s1DU-A*cyu;t36kAOh=C zS1(X&Na3nu&w5*?a6`;(Wp0(q?zC*MWdNOIbngA^mXs+ zT)bfk-@<57Nw{m8t;En_S?wZZY7vHvSB2S9efHoaG~UEZxOOhnRsdJ7jOH0nKetmp zBjFf`i8`wd*A>ZDWOT4q4Qw$iA$6E$y$W|2w;rY0uC(npe3p1yO}69 zjiPvPBwj`9>th6gDBFvr4geFbYv?yORp2`9Yk9D$+YQ}2$cJ!!rJK1Ie*S#};QTE6 zBrr(7kdDuZLM{!xW3`z!l(q2-QL@5JQdm|>c6Nlst?=@GnVD-LI zBH4hSeZEIB6W69rV-qCw5_@}>=8a#L*V>+hG+pvIA?lj@)0%LLBHk%n&zcEY0S(pC z<)lFNSD*aXst6XC45p@wVi^ca)~Ib?X;;YK0p%xjAc@Z!7`tjmfaxKo@0RB?SZJhmsl=iR!7Sv`8UM=5$K@2-jH^&aa< z=HFkWbFqp@5yw$3-$Bsr)q;fLo1fm#eyZ=9M`^V>Te?R{(xc%Q1-+$q3(5!!k|MT?^yuJB9+y6&Qxz7LW2O0ji zHtgql|J`N&vkPS5zx%=NKilj7-Tvo%NB`0O&;ING*$-;--|fHf{{IoX{%_ZhEA$WC zPKj*%m$BsA>IB)ECl|$B1rnOR@{R*zz!xk~^P6lzqNM#Xg~8HzF54x(q0fSynRerW zmm46TSfJ&Hi2y85<&hA6WBwF{eIYT_#GbCd*p=jqCf7wcfF}Y*W4$+d9>UwWr!+W+ zQ9W$BEJxo)1+sYfI^Myg;Z1#9rwISyO1$qx>vwc<+LSb(; z476OvG>%9<5B{$<@3*1NXqHkPdxS*QR_m*_Zp}C~l!Gla7rzj?lxJ#Flq<~a59DPr zSf>@fJ!sgfPV6Nc6uxI?(y#Xax`3I~E@7(s@~@S&C$jV>$pRK`9F*_3H`hBa^$Wvw z-ui_)J{qiX-5fz+k`n*oVe1UlBcC0f=m%v zFg%P5HIno|%$QK#31XyFO!HUyoDt0cdwL=IRVXYCLB=pb=r6L|M!~k+yuSmaWWYve zg88Y&dI`vhas~VzyK59+AVSJGP1Q}7{|&bm6xr0IdO!%`3vAxdb?I z;3-;PG6ziKwvRALYTpQCpKs2WzKZsF(faF0;`pxkJXcxUDy;v~ls(P-u`yh)|7LiFf=fB;)nDzUyKTqsL`vxOXt1gZ!Fsf* z^ps7n@7e8PYqeOZ#eJ~hL*3fJq93ZIxcCjv_X7uOjDs@!#AnZWWDCe>ZkS^5d|S&9f^{}_XEgNFEgx+??0#q|;Rd1Key?@pOjN@Ph#wZkTra+Qb=Jja&-bw&vRnZw4>quA`xJ)mOY#rE4wx~|&;`z~F zV;h$sNCmB>q(8^O;S8>vYX!A{O6H<9#)a+)DBkZDc-W*rtP~7)a>$nR@bx{%eu00O zpe62d^jI9tpL=`WJUFC8SHw~dxEgs5=`7Pbi)d{yNIO=qo#TA8#n?OunOa`<;D8&q z2H6a@{DMLIb<*Fe5Xy$RgQmPF2QOYL37Zi8SXnXdi@GLhEn8WIDnn`~3Gr?_G$PFe zDvHk1;?9B37qdI1`yrKeLTd42BB9YP6F+4Xi6BR^7IDk1UZt!tmC)_)E{{iPkQ+o8 zFt#d6C*8T#BBQwqwLHDs8p`$9%=wXKXqP!^29(bQVBAF`Rq59G*76^KV)eAw9bo5G z;-{shbGw`88>*h&_Ksn;spzlOS3xX$kX$i!F1`WmcDYvQ&5re16} zEwen*=tJ0(s$U0&ch`?eM9s1c*Cw(1W;cZfH-oUC(7#ulr~wOP4ZskRD>iMo2R_-; z8Z+>OAhe&8KL{9P?jIu4G2g$pQdml}zO1pQ4Q{OR)YJerwh`*2?WmW8naUvcb3Ksg*N^Ko@5 z8)-T0e}KC+XWKM8mz)ZNpcezOzAqp&soGqwb2e6NpI|E&fo(UDDKl*K?EjS48%Xr% zA7Ec9L;=OXF;y1U0?a*~bMMi6s4^Pk!8f%P3w@e8=)~WIxfE2=U@=%)q$XPl%k_=Y zoe_n9l8m!YMjQ?1=xitz$5r53{QqRl-e_s2OYb^UdV!qHCFRPY^uagSc*c%bUVJ|Fe7WvBHu>I%`$RSEQH7fnxGQgDvqLh1by zV7cO|;yrMuxDDD#I3+DJ^eYrRZ-I`g6buQ0{D^%x_pvbtSLIyt@1v!1Dcs1fCYS6d zT;=sHcDa@;owQbil8T9*ezS%EIos~{II0l`GhF_lr?Q2WKTL=RPG{Z^^a~)B{+Zys zSe&4rO7O2KSt60U3{pZ0;*mm2Qf1thgwG~lC=ppodBEWbwhLb}yJ(G)!D+&^mWYQ# zDkaH_bfkrD<>WK{1wp5&s?a~Z2mzk~nEfr8M=^XtUkFI%dC#1@m+z}ZS5u&D@|i%l z`73)4bbJBcs_?Zpb=~6IoM`s3+iyI>*DC-L-c$Kwo}Fy%vt|lhNqCs!(3ZwFCJnu9DF{+lk{h2 zSO~@I%zH4~VwCF_@TR6ovqhR~#zdG0ucAFY#?b3{o!5rJJD#&+Tp&v(vQh@7T5_!6Z?F7TPl#9*1NiMD zBkTJo;)d>k`fMgOsyID%<^13?#>y%G7ai;~vimcWeNK%}C{Y(r*=vqeXKlRbGw5|} zZ1S_zs!9?d`$;qq+v0LogC+tOyO!=qUqfRA%*y!3yl_Gdo8F12h&SIfVl@}Pbp zn?j@t@CLWjQ6ABo#hp%WZ-X?ATX~xnaN}x&?Ja67w(^#YE=1x9B_NS@391B9gg)gG zt!{U;fPGk7w;j%oIn#c6fP^LuTtl3*D3|_4m1R_Ofk$bLw(Ty1J5ZWPD;m6r@Ol`0 zzsBjd?iMX>(g_MPKd$Br(!w2G*I^%mQrv{6HS?u2KO%B6t5S93`A*qHBLYu%Q$4R8 zQCnHXlgI3JBe|BfO6MkWW-e%he)l;#g@mo&&1!o3M2E7BRTt4$@$p*g(&UmoTjm=c6m?n5UhZ?0Ozsf*Bw{KUv+}50W*QX?x z5V=u>NvVcjFCXbw8A{>2am=ACpW=272nInb#bHddP~+l$n;~Y5YLJ?V6Gy<8zk7g7 ze(u`xG@o-f5f})T+bhS1+2>PMVb1ZGsIq(cAgna8`5jfDNQ&_R77+A?B%8^QrP#iN zF*{%bWk3qWb93k4X`}WvUXU5b5DVu=B&JdtKXelOhn50q^M@;k@T-G?%ucz5gMA}{ zEALHdMg7@(DnkHe6*b?LPI>RJ8uY$7`xiGJd^zb^+1l>5oxrGLe7Zro^UvLSeYQ0hE*ylGv1j&PiySwSa z<8=w@YTq%^)3u}!B$f`gN81ANtZ50PD;Vv_(^QgplunuM`fft2Ytu|;G^OJVzNaVD z(>E}uT9cIePuk0#SDZ#3Bun5uU>$)Yqa45)E@=(nCI&kzuEDj-0lwL=s(NQ)Th=y> zF_?XH$}p=2UU|tIg(wg6%30U55DcJ;Z`MN*iBbqTiQxC096HHFxH;sx(Qu>l4~ z`L}~%sZD(VV1JQ$#7ooNQ`0YWnN9nA!-qZc#oL%xRV{&ATS!57$ z({IM)j5=h|Gi3iAE9tpD_D`LynYB212USG2D3GWQ{fSD^IfWZSb61t0RVqkk{vN^j z+R}mwakD9+%DA-(mD5D196;SHOE(<#;4SHSFgjV!8)9?Ix#_z)bE$ba1c8@!#2cRf zqSEuUJpOdPRxENTy|~2Hcn0C#ZUoSv6*((2l`ZLws`gAya~_&``?PpU6QA^B7PYd4&y2@I0DaTiOV&(d^G*JAC z=M8d=tEe1uY91D0@bsr3lc;?T36$-IX3C17z}E-L`fKw$VEtT*~;L?B) zCwkK_b9Z%+0^@WSo1v)MAREjwVt$YfVbI&sfZ>-q8sRKBjQIWdiU7MGlohG4{(OMv z?{2>CV(+QVVq`1VMELv{eW3gh40G0Pm?J!-=V*hp1Q+B@RxQNWuD0!n64r?OvzpEU zCH5bYn*w~n!lVu0@sjz#Z+X<;#p$!;pEs*7xt^VVIA^+^JYcYJ%jl{V{ibrOQYL+0 zSK0JR28rWk-l*?@kXwJX6bHVhz__Z;6}M*UKd=nxKsz~t>?~2LsHeIk3wu3w?4LVB=?L-rD6wcKXSlOFmC>6q` zslgexC8&(y=hh2zK~Ck-i{Fe}XG!NVU5=K}^cBqN%Xcd#NgMaT!gMm>WS(1{IVy(wTt)@Yp8*pjfseXZE}v4= z*ZQkor&vdxsh*hVO)yB#y7ucZq#vA9*$L1Mqvnt(IvJNxVmlyVpe>YcyG;UL1_MK5 z|66WN$^(~?W8|4#=V`-DaF`XR(7#w3$Q!_dY^*YTl7(E=M*=^BjXr;F6Sy82%DSa!9lKao|op3Gu_sYS}58 zF2@`-EpQH6nvO)uoO!8>fe79 zgHq8!ifAP071>kTe8oqKtNNB#_>P#cA7fuq*P^3mdVh-$Kflxsow{0=zQ?RF@Vm5P zIfp{-9;jGH%~BA-ZB_9RL9Qq!K#kGU^msYW;duHIRi)eJG$+T>Q#)e<^u=Sc=`uky;zz1YQdl~400aHcv{SAqE2Ftu}&~jl!>kNJ(EbXx@pi5~i-YH656vH>vZ^4xc17 zS@jaIU8jUZfin*unHqr3vTY72RQ8g6E2F1)8J>7G0wJbGK&Q|{Gb#Hb<(qM(tLr+|z=ba2t$VwSJ#F9`-0g>1@& zTN6-?-p-t+o9faWE3{>f!N5qq*t9i`6l|kup-Ge51FOd1AZYE2!%~mRX&={>AYJql zI>pw1Wy2wVGVUm;zx509`S}L294vPar!?!>sC3*7Ybld=9?`OamQLvjU^^qReC5MG z7Uus!nNO!(moN$*Gx|2_SXT~-LrxiCwMQ2hP({FJB zCgmId*Hmc1m;^*)u`N!%tN_>uv9)E|llF8FR2OCu%mJ@tljvyn=n{f`IJn;lAO|n) zr`*J;UTt~ccyAqVWh8%)4^;V*27(8_LCMLZqExJyt&X{&la@2FNLEt%y|7~Gua=X7 zBIdcM$_l->I`B3~iHgkPA9oerP6gM8FoxvvUK1p57k5<(n7hinw#?1wg`fY*jyt3+ zKBExCK&(2k~=A06;&jxc$Ta^0_ba4cvXu&1k| zFGOkLE}dgAt(PIane@t3J7Z-t^`X?G2R1CJ8fLGY@h|^Ouu5MDtnf6wKghjBmvzh0=ClG9f+)1=HNIOjp1m6L`}{7$@A`~ItnOSjn{`XV2AqAD@v zUYG=d8Q{Z0(nz2#lDSJVu$Zod_u(40-DU{bGu^jhm&fiQ!|t8$IPi6<+xm3`fQ^yZ z50&F5os-I(QL4tRr$gd1a`@x9j^W%eppJ2}=-AsJ2sq3S?fCQW@b(|FKi(f2Jr`e6 zJg;BDH=gtI4sq*KPxUz7IqlWEC;Dz>gAc4T@)}N@1pY78mnH#*x!=z9lc%r_>!QO6zUy9M9tD)Ls`NQIO=z5`5SsdL9ID^JtI; z>^)HmXL`Fz-&ThAyMJHgER{xxlhDfrd7g5h(SEAs3c4{Ob+U{tbWI>FOFbIE%Y)sj zvq6?#ok(w;N!8^%w;3Nq6qilwq0FlQ)KLUY=4!kU9otv#1^hPB>a%JB7Z730WLY!# zTtKrdO$&}AY-cbVD7FY7Em>e{UkOkLlV}(2Z!9a*%63CE608PHpzu!p zCAPk|iZg6ll74-*iVq=~030ZNr8G0+QkFSAH{{feKZl};bm~X+L z3K%}ya+L*y0s+1_%REjw8J|yL()sE?SFS^r)tkRrd&~(nf=&M5IP#k+07Um#SxiMV zmwgKUs)Zy1?n2hIuT3GDR9m7?cw6lr+j^J9NxqEIwi-}gn1nbSg^an$tY5*5es_Jf zdbmr!Ei>UwuxhumrFc$^o_ZnXKUk`B4BX|ANS;)VXVR8r}bgx5R=&2xo9_nqS-$1!HBBVlUBc z1%~erMmSRK)^~dU+X$b)VzV=1Ru~*l255IT4Lt>7^g(UX&CEr`{@{L=l#VW<6-nxR zy>tg{pGQ0fU@{KT^m{}RqgPSH7ArP`KmCn%KKb&0WALBK;Eia{~Y?iO31%{+W*PCh5lyV|H7kK-B+aSb$+kd*y|6kuv|F<8H`hQB^QQDs&CHQ{qTzYfL za<*&gcobTeEn-2am>NnhGF%Z&?357Pe=h$ID3i$#{^+kbdTeGvn&TR@@bBDTS;MFj z-%XioizWY=k2g+`eSZv^C$R@+dBbmQP8Q2uGTb?eO`#x_ByV%+lEO`Cx_QT zuG)|%Zr`@!oIM@-QHS4CsKz~OkKNNmpALN2wBFLQV`=hOI00!-!EAnl>)hCJ2w}U!;v&M^2TK$*fFY!P7(rkDe#MV2vZVxLG-zEK;Y}m?ko5PEZVTc)d zi|0#if6kPoTiZ73k99#*!aa~#^e1$~HS%G1CJ4vMz1K_0=qPSR z7=Xv4QzQ!fX8O&R1c9FIOPuWzPy0XWJ1F&k(l3xBQ2QirEGNZ|D@jQ))3=7|;kb_Q zl9PUoHVu$ zGgWQqP`!2HK@xeWgOu!nWm(?2gL9kCp|g&yGEw#C5!b;`tA2+!8R3vcfkPKwo~K`W zeha(8NLPz6qqAnYSGq64;|^!CqU5Dqca_FGreIIJ_H^TIAgVw&q{y^hB%k5YcB?_^ zWGnwt&&-$5!15wfcxazkS`gQjZ$#;7^XL1PwE#qU5WQ$Ckvt7jV}N=->MZY&C+~dr zIbnRZ3I6!&xeCh&6S}BtZq!3!jD`95SqXh?3I*g%r)d5a}uec?Rls4rtrwtu`<4z(x#I zf3xRVI{F~m0njv#tdlO?T~kY>xm$XNe>Y=AIy7%2M71WU7GNs5P+<`R$5A9FOPg4S38>L<&$oiBLJc9;6AIJ$8a1sA{N z+U|0uC{AxvC7x99Dz~WPGmdyZk9mt2`vdhlG2|(Th;vt*c`ktS48@PSzakjh(IS`Z zIu?6!PN%C(+l*GUD*M-#E>Vl4Fi2|8V=ZH8WG!lnH8Q(p`QDSik_HQjE?6G%P*~&l z`IKaIYWk|e%X;rdsK_pE1to@gN6)MfleG%}74N_9bT>dvvsd(_5J5Qhj;jz@cB5^X zRlV$I9!kr{FRjfW6QmtxYOH>e;ez1=M+8PyZect!!dJ$N{($$^hpcqgZrkX7&_?Ei zj@`tx(0hg(et;?d3@QrBFo$`I-xL!N(|jS=rda||NCg*it8bdW4=8-#yQhmv*J0n*8Tuht2V5TB^?O8?IK-|ia%L#_rRR0v2%NQI2=n7-|KrQO2^`n{81UpOAcm8@ zgF|N6NMS2Lp{Qj23lS=0>^j~HfSqsuA&AB9$3Qi_GRypXg@sP-K`VfUXo_?m zZX98ahh27?JHc3_4gZ8mj6esd|JB=>Yo+T&I~fmOUtc}55muJs$u`w-urH}ne-205HFxCK{$1Fd0l+?I=w$D#qbowmc<0Mc z=2yrnuR4mWmnH(Q)&1ww_-_PnX_IU<*za7M`)^RhcbA-c`Ul>K4{imj+jrkQ5Ym`c zZTC0ZRY=xxLvYvD>Ok>*0Kfezk7u~-kF98)3s+D9Q-#CfAgd?1# zdnuSfPHdVC8f$!qc=c))Pk{#Ha%3qg=zNkGTilGYE0m?? z=O~`~&Sl$O^+F>-GZh^y21qrrUb`Pl5IYgLF-7v&c(Dq5CTvNnCL}tv7#NN_GKvEXKmDtahQUTH@LmeaLW?Sbwhnkko-$0_Y5*~7532N-y&=*oh^&atfwqx9!f=#G4ZSajfCUt+W zhEY9>GB?#%u~~~`Z*2j}EQCKSPeaZ& zMcq%>Pga}RMx~w&4|@+QnvI?|%yPk#k708fJ2XiSTzNxoRE2!E=@r(-MegCYON$by z`|%s?IuJs5x|Q`zDxCUa(?^{_Am+!bT}RteZGKP6jny4MXADzsMO7n(L-#izitothQV)w8=4)qY63^l8Hd7ef(xY?? zrUXqVI&gpqP0^R7@UAMidQ$P=@C$Gyivn3{o&C5h&qD7+Pb}G%6e9=4X z?R>awO_e?+%X2~hBNOphJ3Pv?B8|uxOGZ2*+flo7V5zi&qfUI6{g&7qA2vwYLC*&! z!3CEy_9k)pb0V^|$FC|Hf5}qzNt{4X<}}vxy$D51IpYa?oXC5@Ynf`qJB_L@EJ?%>Btgs+?MB>)aAnT8-B}tmuqhD>ZcQT`RrzLZ{d>b zM49F|rgauXY(_1ht#$cT@pNFx2CYQXw|rAo(**G7CgI1{oj3Nqb~L73$Z~ef*r%VZo3WKtkv^UFCj=mZKB=oXnZ}x=ilt;ymi;g z2MH5njPd1mC61`STj^1>M%89mOHIg(8!KlJ8OKfg_}()f$72`kY>nSp#KC%#GFsyt zH38|`Ge7EfGne?QIvInk||z+ybNpaa+iZsg!7!0 zQ?SwfGBFia6U#+c6_#2w(XI)qmW&en!{(GrB>~A?Er(c8a=iyBeEWpYq?9;T1}y;|~pmz!M-JKRn+X zqLhB&xM1=f3{NnNP~wq!=OkocNlDdy5oYjpZyLswh`}9IJ|1@7!ms7!WBX z6Uif<;y+33B=fW!XRn9|LD{4I=K9|3kmczkdDgCI3cA`9z1J~r7+ft*0BH=ENW3qC z1;uqrT>P1{OGHS{arY}a_8xxk$+IqfJp3%}*hUo(gv#w#x-8q_sSw(NGuUU+2tc-K zZ)&T%x?3o|gi+f7XL7#H{8xpX{$}_&d`wN5qU%|2A(`U3;ax=YY)abi&N?xm<5!?I zT_L<#Ry@S7VKFX89H}NM=ZhKZgMxq>eN3>=#!tEX0TaC^nOR*@(tHF0^7>?c&u2qg zIiJ!Z`u9YFLU`4(RwG2ix$`)jI44?~Q!Q{sM5@nOW$vyNM-eXYj>XPC1*DY=XPBF* z>ZfU%vyRZ1tECN#h!N$ls?~xX{o(quV;rx%=GZW$bji)}TZPR%Bengw%k1Hymd62f zT^XL(H56;1`(@3{r?EKoujWrSA*+#$XvGsdhvVaS6)$p-20m4{KI>8!X@q5W@x`2- z6pn+7&60MLTl?yVADi_`T^mgyS6sa^FFYP|4%uAM8VflOGQoLm5CXftUyB}#2>q7F zQIp%T`eB=}9PQ6vUqs6ks$Tn9WpPW&xm|Y-QNP#`I1SFZ)evVIW009rL-o?VdXBVqXCFh|uW|8@o7&s2-Ni;S zULy{w#NJdlI-bUCFS-_9WWxJrz5UC(;xkbXQKOr>nG5LP=XiZhK}Q$qq9|`Sz9-y} z%^lIVbsK5Dj5ANUpA(B6@55U|n?Z-*h{6vH`Y)kq9ma?$GevQB?6-rmTzfTcBbpq% z#@51SNGXL%-UZN^WQM}o!WA~TbB{rAs5zu_hk9+6xTLbQfrWP5l+qPnnoAC_T{r0v zpeTYWpfyHQ%hinKzF4UZdTalbdbFQ@6Ay-+e-C~F;bx(GWOiCR&nyQOfX-^q104Hhgb+XAP@49(nX?Uyjnf1Vs z2LIkVs1AD*+8o#t54M7@<74{V+i}*TK6|aZ%fos;$Ain8M~t`IsR??%y;K3?&yH*A{qTd7sYc!d zSCi8M6TAJ$0viw`^5gM?cDb|7cH>*uMa^ep1z+2x?(^q&#K!DRrMC^51v{VgM*f{L zE_{tg__NA@8N|Yt$lO8ZepW))u~fX45w33~xP!wup8Z~`<6LK3MXhvjWzED*JTf&X zA_y3yUW2C27peE7&Y5n^{_rbOjS8_qNQ)8Xj7XI;KMV-RlP%iku%JT4=G@eoDpLd) z0#)IK{QKflHhhX<1yBb}GR0t(=Ubf|?8Zr@LGCgj@_4Eo()a7*M01)_lV*q1DyCOW zNnYZjz)g^qFle0{igOkmy4E3$eLS1X0ztz0$WAV?Qtd5qOUSed!~M+6ii)~td-e8Y z``A?5(n}-;PvM;*v2_LoJ%A{c+Tn5IdJAFY|B8_5Jng+C-J=)U_+eyng~f$0|=Kyi2f``Ffyz&$hp9w)+HH2A^cMwf03 z!f%!+pP9x6E_8{b0^OQ@gV|e|Z#6M3>6F@*=#H%X(N4=>?@EVl9(Sw@CxE>8GrBD- z?;IzVm|Y9dA>dkCU?D)br3GdZr= zmyq1K?Wvt-aFG2G+e67~M&pxrT<(nS8oy9>!-8{q=kNBDvs`m2->B(*tw3l{hyiXL z82v%r9wCq6_IQ&FA!68{Y?cik{{tr({%ha?rFNrb^RuuBNmHwgAA*>{hjuS*w#Yh$ z$;MfKxB@IWu$v|_UjIH#IfnuRAncCil*XoJp>w#!E^G*(=xp`f`^qc4}VX5JjW>!APLi? zjFS2NVR8`PH?B3!{+Ld7D!ILwWbXdy0tVs`k2Hz zo7VSTH-Owg)KA7Spg1ssT_%s<3L$V{CQQ5tsjyQza*l%XX^nf8u9D2~I$zU+Dm0U+ ziCszDcvf4g&x97`k`TC=C1N`?1vCZ{pCP(o2ol?gsz$qkmlReZ`(h&9Goy)T%F zX3)9o1{rc;hWy?ZB>+!(jjjT$(&CF2Bn5H%${W>$g`GUH0>00+e@hwQP)|0{_0Q{f z!hN7$&a#;t(XiK7^3Fko^<&p8*Xf_CnEC)H9o7%{9ylG`fI&I`kBDpJ^ zBG)85wyiBAFc&fO@2#>DBFFYm)BSukl z6Gwx`WPHcIUdz83xvcxoGPI^=aO95G7;tnc?$%PNHW$4Ps5qnVQdijyjmCxu=HvYh zAaO3Z9EUT>vz{zd?o}IShOQw|HWxOFKc$3we{phOPrhxAn44ayY(G2^m$Qh9`>yci z6%Z~8>^jhF_mRJ4mp0mV(U~9JMij2(wL5oe?JlIxL zw}?I;18N8G>j!&~hm3PDtAh8|$ZI*o)6VOnA)^uKV|FH&cW-{fbtgevcEkzv>E-t! zNi=K%th-ca!10Q|4zgTlHW;rp=t!>g8=w~qb%%}m?J&KfhTKF!@+QqA>i)XG(jBpu zZ>q=642A@%%8{SHQ8O>e`D=1pYQ*_>)nC5 z|J8o|zq0l}?c16EX3_tZZO{J0|2Y77^&j@=Z^oCG4)2iT1{!oiIt#;qqwxmv z-I9ti#t&`pBKuKChQucY)G5hu=PLS3R}X8G!V5Sh3W5d_$M6Wm@!e7~Xn_3Ej3kDz zM^65k!BOZePY-ir6on*#274oizys5m596;!TwBk6Ax-!-YFZizCSNOMo!)%6zp3@x zdf%QTrZS?LzMV)!L>v^hkYtt^A1DFvSM?eLQV@^Ak(?nPL9b(*yK|YQy}sHp$Sa3r zhRw|@Iqw&#DT95fOteGs*|ao`A!@c%uYEPp%!h=1s({D24UUbD+CARPGnj|E_3S(g zyNS?`EPo%zJI~SM51ChH1FS-}eJl^XnK~b=Q~PEdytZ`w_`Ns3a@sD2QX-Np9;P?2 z?D?RSI#Dhs5z*ANJJ3)sA>9yqQ@eqfXVbM&TR3h47@4ZuMa*SvsQ3+5I}c%e-(z0Q z7oFT)S*^W54MriJ4DT@6nL%eQ_e(ZVYg~+h2o?rxr*sdi?8Hx{c z%`c^@Y(PSuMk0m^8?<7+cyxL~6W6C5QIKg%tvVC%wbWn_%CKAV&KUHJx&jHZ--}uxKC|jIb@44mybP5Hi z>gmC+m+1JE%}^lW=lReT{`~%&318+1Nxfh2fr4gY*?pn>(|-VG2xmauYaOMMu!GPf z3y3E3$DFdw0ofnZ`t8kn27^eeQ-)AG*?zYfD@fy|na)TUz*i^-SN#gam;z3m?}2?> z59CfXyc6Y?3tna)wUJ+VGk2ZKuo6P6h54TKa-eas8nVSYS-!kAccN&#Z|?WDLhcb< z0QQV|hikasI9c5L?80bwkTkJ;tuA>cch*mTQCfl%(U;oOUzB==Nk;7GEUjJUTsUoa zchC`Yoy5Y(DpqjW)Y{TiMyIH@P}_Mw;wih5_uiSk$^h>*qE%m5i!(?EDzyEL?8{H} zPKVy3=SkzBbZJ;)jvKx8pOOI)i25Xw4TSXlnlIC(FC3vBn%e0{47W!pk=D}f(qNA- z@#~gMh!r0+Vs{z&DfS6;zBF7IYI_AW;q8q(qm3n#HG3c>+&-6jneg9cSz2zqjF@+K z20f~n+A}7!R`%H6u-x+J_=%L2PK}i`zDMYgNxs4}H7%1RjGrkFnsTK;W{#S2B4{T( zxT#Zbmt7}XmF;YFn71@-oa&!*?a&rq26I_SlB zXo^uB9zMdy@lZ82L1>1Vi(!B{$=FuVIS!bG&V!GqD!C_L!&ksn8AiGdLsP>m*;2Hw zZAt?nLX7)izPAbQw6>nFH0xu{I^@O&OcmLBR@4O)GM)V)8Ofd(&-0@%?O7PAq3?Yn zTN{gJa;52mx2>W7ZF~JI=&=J`;I)l$mHlMC5c95(!3w`My@6V1@0OH&7yD~#mXr?x^$0MiqFrZ436fPs8dRFns$@8~Hd zP?}=-nmm}wrG*I>_IYAXPfo_4npMAJ78l!(O5zlIHC?Xv5}!P^)ukx0G3LvKv&UY{ zRZzaEuCI2dZR&FR=un1b;LylnJ*YU9pTKa*riqZGo18U%hV&0C{cIZUj~T}d*?)ER z$o4q1lG+SX%j!pGq!Dj~HCze%19pAO+1mRVy7sk;<{) z`dAKe{$ZNe|C25v>({u0t$KUhg>O}dV4girI%xK`3@^>JQrjC5@p=T)$zDr^d9b}v zEXGHlB*9-b_DXy2$~miEA0ZhLs|sdINCEA@!@DM<1S2u4N^z;MpeL^_g}}vp(0tZ7 z+*#uvXkbBB8EjN9cPfNjfQuE7hd7H~Kh=th#a7-a+HG?pWrId5Kk+65I(^t5Dn<@^ z7%8(_Zpn!bFMicL7p*@oX0Vez%3jvzJNLTlo~N?vO!{E5-w&Gj47xHCBb+XAJ_qG? zGM@kFB?lG<{n;*F1lA~_h#sR}NuWo{E(k~Vi&??1S{9qIEggubJ=y9S)wSU_WqhZM zXP;bM#U`4`Pm9RQlc^LzGZt2?*`F6vZxJ5){p{kk%XdjC14$aFXAo;yxQv;UX(V?T z!9S1CQEj^C;|zBLx@H%tj{`^a9F5iBL52AxC-eM!;fmzK?*^_>*3%yQ^u z#APGzEK8C<)28q&q;+we`BU1@OMW>2M*70#wBtmMP)yid2> zd%W9rI8Oh>lVXpiLNecje4^6f;b+=yy!Q_v;L&YAf7XG7+ub$y_{e?29`%{M=!EMA z_K&XMx$dBP|Ej_3P{ z3UE*6sw%Jp+J|*H>UqfvHPWa$O^mH$Jv#VI?Ym*wnAjTR?zO)VFABChM@}>^%TMo4 zfnWYk&I!~s!niD->}*SDM*SMsk(M9RFZX2Z8uO;;!kheuyc=C}+e_X1^(?n{TXDw+ zarcFt&;z{VUhbt>s58Z*FU5<4OO|>DcRe-2td%|eHuw|QHrkyAYO0MsFyVP8qK?)( ze1ycH0L4eoq%%%Y@hO7m3U+-4$9@4?v1CTp=!D!4nEsYR=E5`$FEsrH{3sXt(G7H_ zTbfjAzL=2cge@IhiimIoO5^~21v_zAE`apmYu*SERL-9J#kX@$0gjSBcxPG@@&J)2 z<|*vx`ZS>gW-d~P@APGJ5Fuq+s~+YL7AJL<3I6faX9{FTv2OMdYpMrd2{nzYhTB=Q zN_#nm*%?W25Xh=obVlIU*&HxLDAx*=PtWWh1Kj*Hm6Wgo-9{!I@O&pZxq7?K698B? zh$CBpk+B`+1G`(3N=sur(D*27Q23K_9h#?R5GQn@R(BA|T@jV{fWns(L0FsM#p~mH zD3-$(b;V-9*)M2uYIg(Xr2tV=5batnJO4iu$M9!(3;wi~!I1G(wg8lRvf(-pRK$9; zn+0H3gv#!H*~>St5d#u-?F-^SA>x1XB1)(4div+~`tV_M$zq`kCX>&U(>4-u{XWNlIPdc?VF#kJ_fUt|;~T~(u!U^d~^m87BbzAk-=E$`DI9o~9t4?^)MyMy})BuNsw4J z`5=WhdG8`4a%?>9z4W@Ww7DsG|A3CKu50_La%r;pf~j2-nE-Vy{bLM~I_Fi}9`X=D zbM%=Z2ave^F3J&-Nj658SxJ|DR(z1Tr#X0C{?iftown;K$}RInAw7zim17|TyM;^n z!z0?ht0e(5IF`Vc@;MMeB5FyZaNy2-`dZ4gw2efQt}=q{8# z^QEKzI)_303h1*eGW*{UXgelSPnRMr_HJ_|#>PWM4~=I854n#@IO0yhU7qCM9H+40 zD~i}4gWulk?V%(IKCS9I zvBjD898_8)Ea0FV+Vhze%r$%69#fMlq-C%LG#n4-Uo0PYW1V&kKaP_h0M6VCb_YMG z6(?v@{S{f?x>Go@5hoEGLcg;zzS{;5s~6fVqq9&k2Fy4Qu$^N0Ry2szJ6?avKo+I3 zp}rgJEZI~6-m)5bLHnE<$*&XT8nCvV?&DvkgNOYUW#K+t|5>Zy)uXA&WW)`)?o_Qys)7}Gqle- zkLi_DI=JP*C8;JmX89|oQmrv-!kZnM`!>t#hm0ogCW})x+;7lD>}$U#Z9AXtH0Y11 z(JDU(pD%Cjr7W9-uar7#@4yE|SVnNNvd{-~Wu5(uHyOCZQr79)s%0`kha(2EDSa$~ z?ipL9w2WItIum72t0$7H^=5FsRptAa_ybDDGe~#dl9}08_}Y+p3*PU$6&4dVIz<}% z32l4KV+W)W-d}9MDfM_vX4)0;nz^cVjg&;%=++2I!gE^D?;dXnGh)O;-+H1GMKnhX zHdQ8Ym(~u8s0$<7FQoDWTg+?kp6-<-~)eFOxO@5tJ3M&WZw#VKn|+^!Mk zzKT-;2`JMA$yhIEn1URZzaIN20qtyLEkC^!)v~TN_7tJK$uJ#D1R^ieA6qXrUT*+$ z2lXZQ&$L(%vpa?R*P4I7DX!S+F zbJ!F?M4fIb9ZEmlMqO-X1zf${uj<~nN2B_cj~F8FWAo(v(xP;9zMR2!=+T532ehQtI^8qppNBu%vhFa zfQTNzeVgj>?wvd9hW>!Zef==TZaYN(RLvNCK=%3d759v+^I53RvGHLF(yy`%nT6n0 zaLNTwsQnn9v~bM>WrW-&WnPI{msan+q-HOHjFDaL5s`Xtuk>F%Y4%Jraj_XmI26`? zzVqApOvVH2pZSpZe@^tzN>r&Y!29Qmc*{G#7aMAs+AW@sB*r#5E%b1|1;&YrvH^YB zE|N1M16#nb{YAvq*lxvH2aBntIHBtiK>PZq79 z^c01@^2f{MwwGy(+AdCSJ}JX}(RKRMDF~W%U65@w0VoLkO7+H?p*&67SKT=2fYON72kGf&h`qAM#C&|bNQ zuvncXRWQqL+s^qeJ&x|QH&TW_`jk|7UC^Aw6|94D;U#fOIOU~E&H=$!SdT0mziOFHTLaZ;n(8wCiK>$Bb-x-eZ+kBc^4n>CZg*)FSz30 zH}8%oOvYEfXL7qVp`OmB_gk51sC3?ErTk`-yZ)e?HoRh|N1m(1mpodQWss_eKUy)$Xhj~F+ybF(DH5cDJYQYQZ`oS7l6;8x%kH(Dl(;F8_|9@x_~PLv8_P>?SdK?I6p5ZoQUg|D%K-(2TV%YawZZVaA)lvohNugF# z6Z&fIW3gdflF@eRaUsspx(FM>JfSxf2&4W zYG`KpxXj-*LX0SP_?V|3`KXeA9^W#1P%}!OEa!9?Z@GpTcKrc{%NMrD`B#D~C{MYMdsUxM0-1@w){TV{ceg@vcbQd8zvM3;n z)zH&JU}?PyL-oV|z$^*x zBYaaY8=pzmxXr1oWLY+CME6DVP7c`Z#uc7k#0TBheHb^s<_RACd|HpdOoc}n4b}6A zseqcB)dl7fW-F`fJ?ZNB;RAyK8-}9l&jU{;$~IV3&@Nz%kz>!?;8pja*`S-4fj2}2 zJSL`Czru2};dcXx1+uF}9iM6ADTKyC&+vg&FVG9aU%#6d2$;*PN^2KM^P*Wo{h&*( zT7xWNpVZp)=Py2lT7q2IW=07Rm`<80xhg?77@BDM@IuJ+qC$LF2%qXlN<@dYPvhhl zs14VxD_Jl}sXJkhpM%gQ)~KLTz$J46>58MLf4gXRJuqj)2iuSMr_`nqmMC08 z*}7z>y0OwHZO{oo=G;epp3L6&f!zi4LIm47=Din%YzuqmG!>Rwpw8`Q<{WtJwAxXv zATqMx#1-scE=iSY>3Mb~4&}}_DGyyV7wBTo5uNps3JVeLt2q=UUn5GP8X))j0-=YP zhu$D-e0m)(ytb9!-RI*RWx)mSH`e8ztTZUj*2%A1DFODzX{TJ{>6Uky8q=FvG4DmG zYn)kj_P@fO#$T))+AP;NM3XzkD|SyjJ1pf~XvRn3CPog{i;o>zG+S6v#mNVEH`Nm<&mNL6Miv8Y!o^lsx0! zoBv?>RJ1EoUb<16h=<9e&-ctmX}}#iOJZteM$p&#&JReF3ZZS>_nB$#5P0_JRxjM8 zqxzN+xKrVwSCf(iyH=qwkj`o*-QL;l}X% zX-oYj!l)cGB3-RAOJ$1L9`GU(3UxA%Q6dOpm|&{h<7c4%-0)jJ2Y}>u%Mp368!10N z)6rB^+AQ|%oXd;Y8sq?DArGy9`wN-8$?lYFnw>{u5Ko%JVsQ?}^8jCXld%S_O0h-_*JT95c==~mk_Uk?Gog0?M?=Vx@ zYNkc^#9h|0**mcgVTrPw_2BfqnR{(TR_Mkj{Ml4sX?Ho7r)rOwjEnj z@~Un62IFv06A2=q-q-n_=l06u1>sZb=htIJE$XEF+48j)1uo9!a}$jPLcq#hg5P)& z`BJj!mWTC2UdR4TWl1aa=ycYHrQRJJt76FP1A4s3%0!v3G3Q~S$#_`YE#*e}@|)?6 z#>ba4xS#>Hj>?!l={VR~K7a=Mz|@yGJ)mK4vTj*Zr0N)pe(;$S6H4lxBCcX~L^^y& z-EE1of}DM>@vLY8gUZEB$Hm+l7%j<+Uj~&k!-buC$Fr9E)S;+C-;$F7!&RrM!Ydc+ z%G6=FWemb43Rbc{SmsdFgi@v9wn5It{POCV$o#`~^{sWpKU7TH6c{Xbm3RC_+gm@= z6Kxfmp;z>_p7B&)je}o1QCEwvS|V2&3nLQW8%5@soMB~6uQAO@kae)dxoW{eUhbXV ze}%XFNq~0z#`&H2AKus;lZ{NL8U81~BF_MhW}Q2cHG#iU#Qwn0DT)j@dww*T@k{cjrt`5!jg z=lA{N_^6rA9c5h*4H!ILsKBFWoaD zwDx5~rTk_e=*b^vkNthv;@45c zX*^Tls2jRi=f_X}TEhcTDy7G5g~KkKKSZjm^*inp#}D)b-6Pu(;dxI#{O}Sqn2{AS zR6!T=m`N2f&bu}9HUUiRgz_+Kr7(?@pBIc@>Q#9alZ5ZB5q#|oRnzS$x$rbNak#Jr zVbf&DYSF&Nx1;KsQ5up`V@iTs26NnePSf6x2!$e=xEmBC8a_`I#%xE}FbO_^oQSgj zIW>FRRDBp?;ZJzs`g%!GbfEJVeZW=s8m%0JB|}c3|F-Fy1<2NLEjRQYi|8VT*4GZJnyJ%qOXar(Kgf~uFy40cx1(){h*$$1f#+sNyW*H42$`b*6a^f?0&ku=u%;ek=xF-u&E$&rW`a6s(twE5C!pnk1r!koWB}XFW(Fpx zFO}{BHMLF`Fw_!-+QR^yNWyDed~h=7sNZ=U6>o~hH(6@$&4+L`Rhlunq&Vf@}6mX`0YlQY6y!I zQSTc*S8a>OB>BqeN`4btq_aNE?L%#bU5kAZD!UVG(G5nDOTCixaD~~?R zSamJmmtcL6jwbYOBYxsjWXhEPB_e^0{_T~Y3i0*YXyRDW`{?6N$H3@v)M1HV&=n7w z`@M4OVFe4@GJaTAE7pNk@lmTgjs}08M=*S%fZ~T_jq?@mir)(hHMf`rwo+2%gxb*8 zDX+w|opdTzr)AuqKcaW7S5UJ&TlV{Ehh({^0RD}}k%kH^w7EO1nW?^nc9#kXBAW9G zKf{u3sY7RisjVhPSY(e}_Nh9EPqiR75+p6#pv>83TSR^(u*3z0F%Wc>CruH+bsnZX z%^i<5)K#Y^%b8HoI0Eoe-fA?R{IeB)Yrp?|l@JX3In~juJ>@}u*ZwGS1$Ue*I@Z#Qk4Hp2x0uK8Wt=G}7BexhFo- z+Z}0g@uv^-N|%8cS%zw;(OzU&dG0XsziCcUOnGfXw!{?MMe>4bC3a{)U~67s41<@Dp?G%u2aoP=Gr0p zJwt@CE0zgJzG~UO)*lC={JF)F{9(>$dx&6YkHGisO2N};ixOGkGFly0FnH|0t*F|4F)>!pJpeQuVb6m_y!%DtX79zq#-Po*p(f*b<^ceOZX~#OwGs1_k@7h6E#a_SKTf708U)cMzcA~$~@!}zC(X^Mun@GKNtBsPE8V(|~_yc(kKPy=Zihb@$tZx&#`N%9(w+{AV*t@zmTeMfqfD%Q1AH zBe0v{U{m(4@~?F|NSEr9hW4}$4{iE0tRnBG5*7>c+Iso=w^A|N80;-BO1av{Dep(vrks!267vY%p2<1kalvrVore{Gd z&aX}*^X=XC%gRcOqy~;Sm8eFcorwdEOAN9pq_n1*DYc{hV@t}XBw104u9*wYi9wkg zMmOAk9!qne0OKn{f;S#*?6YpQN_>N6d1hK9&EA~`tCb3>iqofCCU!iZx%#R#EjWkc zrr)74cqPgm5>&I`jF97Uw zq8vb)2MGe7zQnhN?hvaBlaqf5j5r6U@xS=`hz-j}`n;BW#e|H0Hhie?t-9qpinlq! z9hvht5bgP$q4{~Q?M7K__e15uo9%W|eA%t6y&bRN^euhz?a3OR7+#hs zx&*YRg@QAQ;RCF0t#S%w8rP*>v0C66+^l&(eVe5EhRNREl=hM0>t`0?Ygcr9)<4k@ z2^AY`LFby?m+%LWEjmmz_M}=S|3)U!TSU~`7W8!Psi{GM6pUqkW&GYTHE#6+q22Xr zB5CJ>`IIF(muHo_?0%`cB-RAAVz1u2(ykE&N~%hfy#83Y&kPp{_~QYu$JOj3LudTJ z6E*03opMX?fCgE&&k9}g8h^Ge9UUO%8jwIT(_4scEBg%&4SnH^F(TvKsm`|TP;3Lc?#7eogR3c<@&k)<#HwoE(gZw=GFutjH^-Y5Fe`G)R?O?(=6Y(0iT?ba2nCFYe zSm0{`%oS4;Yc%Tw^jPQt9dk+dbd|43TX7tfQ8$u`4##0g=pOvh=M6H4 zfM+ff7sQbF_Pp0MuKYJrN`ok(KFch_u?)9%zAS3Vr|92~mJ?KnmB#rYN5D zK%w(dfxRr&3z3-d7W6TfNr%Vpt}U<3Fb#)enqOkJULM0MgzS5-&QStDoLl5dY>PH} zfS2#+H4^<5v(gANp_&CsVm+?2m>%T(~{N|HL}~4AW6eI+Xo<4 zAFRwYB%*p%Di=W&Flr2MoLMrlY-D@fx{wzkMq2i-7~|SghS!hcm#B^`#NtX~90XH- zPU^mz;Lnw(EFPsC6lX`ob5iGbU#0yEQJ*B?LMH%4+;V$jNHvWc_|;l8=?q5(wos`G zqz?zIN0QvA>gcLnc6)qCTOOm;_ho-9@R+X_3yq`w@SdRo35hRzJ zUAD0`{9;0ztq_o}#V#9n04qk|pb!Q7M6e<0h!nq+$(BP&-ZpiRtAy=3M9|01Y)1#7 zsHWg<;;jf3w2o7MAAJ$|%A2Y0%WFM4;z8ICdAGvh?XjxioxQ1YL3)<0u=>~zNc1bj z_tD;Tm#9JSIsG6?diDEYIqdw!0eTjF=4YOUH(o0n?^2JhLu~J`N!O91>^7kh^4G9^ z%?1IepS~`+nX#j%4%M}qk#_`JXy;pJfWcW@_&pk zBWeqa=oYv&5wy|hmfD`3(X7CANTlJckcI4-sPopTda;bUqr++H{emlP{Z!Cri^KVj z38kON{Vx66$||-Y+H3dp)Lx<@@xnUW&haWGT~JfOJ!h}UT@J@SJ688sMVAe$6Y8N} znZxXkg|<3-YwHN5X+5Vh?C?ot7kJDgk$0y~fG5@{ zZOJ~F*YmALBQs)ZLSB2uchqA-IF#IYL!#<}zmDQgBw8z6Tg-Z4ojIwU`mmijElw2; z6blkhZ?wJbTisXp$OfmU4sY{2L)wdflD^%W!3rcnIu5S8Jyf4xzYsyzzVQmh%UXB0 z4lH=ncefgMRC!{(cT_!Peaf)Ra*b*Y(R`*j*7Z$=oaFGRYbk@P>see(F`IlmmEO`1 z8nJ(;24MJ74Iy?S2h0H`ua`ZQQg~RsF^`%6-U=e=jHqNn=PZZY z?OhbYe7dzl>DU-noO;mx)$p0yDJTh*$I|6`$_g{Ywq7BbofWL@uv$_esart!D00KQ zvs18|ysfup1RMgGp(jWC#*GpRN8FY4?dv@>x>v~KU3JwHkbz(8RVn*98*Sh=yV(_) z<$8;^zdgoJ=wbBYOMAU2{zH|->`GvY_?xwsV+^)f4Dw>1(1`i5)b18CZ$`b6bueuK zCnjufB7dk>3X5ulR;OnFif(vlV|fBk*-G^)oAGrOews`sr)E(fn5*%$pVZ=xHOo~h zHk2E=S!?4*Uz6ZpC)hAt%MKxnJ0&}U)*ij-bRN#`nHc8!vGhz;= zTw3xBpWr>FT?LRt)?w^7U~$pJFW6Uj^=8EH2>-~oIiq?(hoJ2kk@zj6>}Ax4(CzoL z8#f0F>am%GT_eu$uy~&6hJ0RJ;)?SW;+C-$Q(B6d-~Iv3Dwi3hsQJoBAy=@JPsW@b zye+nT_u&XJ4WcLo>otlwuyGXeacq{X39MHcO7)f?drCf@1Lne1AC~ChB)u}Q{$1&(~6xDNM z;eUUYXkdxer^YS|{(>L?^-+4*4F)TZ!3FuVh&ErHN|VTQ(QV8?r%jY>RCmrAyB3aNSTvT3ssz>7qk*bf=J+hR<@MXeR=y?H!XnXRsI9o$1NyYu#Xo=bH-9IB7y^y+Dp32txGt$Iv!(xtp?zxhp9c^Qa z#FOAx!&s!3T)X;XRRMd1iM7$ySpO?H8A-|-u+d8FuWelYHg-dX>^ABd=?ihnn> zp(S(fv_g84p>vYaF_Zn$#lfo*&YQQ3&qM)lI_|kw?xA#g2dJXDIywD~HM&65(SJo) z^^HkVJOPn4PO$S;f>azf@ehmB2RT#r+1Cdty$@G9j;8ElVZ3=yTkTgx2sb8^Nt@F3#ZbPw97GoxqVt zEZ*NAMfvu5-g9c-?J;sVD%nz8Kkgnw?tIW|V5U61DmA&3KY@T)s%ooLx7)`krP!hw z1=p21Bjp4r%?y?TDb{us+f7F;55T&65|}@ZubvpCV94h&0y(B?|ifnQi~St78|v?~oVHla0zju9d;lgXlBLjO5!A{E^qe33~lF)B|(buEskuY@|6n7SUhqkp!qlfx7jD6YZ zWxCF_;ZqyWr{?vAy8L42dRV3 ztJ{*2YZBK2-Ns%8Q>=3+jISrdP~MN|0vk`Zh|bI;wPrJB>}YO#Q8=CzV&dMTNO6M3 zXs+Tn$%Ax`KZ`nt#<(8ngp|pdt$*#(2(Ev;P$uZa>!E0>-7B7d z42XE9H^>%To}yx$rQ_KwPRR+XVJ2z(Y-DM*KKM;VF_5H9@<9@8gllby?5}vMh5Dy` zlwnQKR5$u7aW)oEFedU#6jDe%d7u>Tpav>;U6AwqXj-g9^ql$NH&Oi>DwOwbnWo)n zu1|4KbMkDy&S6s62*#n_H7^l~qwz&LJH4(N4@et5yZlu&yg_pm?x)aN`$LSMFvQ#i z*!vp2Il)6u8}fA*@;t$L8;18G*M)KN8W&*=W>O?&d49B&i)Ru70fo5U(Se+?)|;y` zSmSN`(6nP4e3{18+TKp3jDETY6v0cmWWWpJ$RPouUPYv0azv}n! zm+X|T9~H2LO(Y2hI{m3m z;gJ;s%!1o*qP95=obZo&Pfmi3Q06p*aJ^93X7IrML6Y6c*a(b-K;5cI|40Y5Bx7u< z%gn(~WtNH7A@+6r+dFrvh>#!rOwlz|(bQnfDXdNyp3*_587&lvKPvPsNQ|pF#e?(Z zC_dkFj+P`P9n}(jFOOjfQgN1G{i8@uYkJ8JNm6W6=*z^B=Oa7-NSapH&Kh!S2_u;YY}q zOg=7^$@Sv1Zn-2kv%c-9JqOT7=dU)LM&Z$?wd93E^mk>R+FgrHn-c|2I^XQe4Q3gR zfM&EUID@=aQBAKYFis33UQDkYCo6SZxvz9#VU9;F9IkXEb~og_7{9R?LkMque%UEk zIla3uc#O|=oe*y32E1pRs(Q=E(e+nz+k7}ivE;RO*{yTcI+4>KNz1t6vnVusQ2C(q ztR3;xlyY)qP}rTWe)Cj|;=@=@7hR-Ak?DSohqZI(?5qpP=x8hD_M+36ZYdg%cl zV`WX{MAZ50Qu{~Zr&!C#g#nK0Hmr%wGASx!5#!AF>Tkm3KN5Zx2nbBwiA8qRY;NVD zwFKp$ax5ll?&2)b+Lh3ua9QfB_njLWsrYOPG&J;%kCR2`pD1FlQExC?yzAh<_%>12 zB19DH^B| zYtLtglrd0#m``DOyrE#Ox%3xivP*)8Hu0Pc&hJ~xq42v5OC@|yeK%H4V^RQolr*PW zrVt@x9xW!_Y12_SWV!JV#f(!Yu5j^d0+kQ`c(lcd7v3Eb-J|xwWNGFoP%Xe&X76SG zuJ$E4>;AHyE6hcEXu>Wdtnl5W$kS$!IVkWcUVw<)@+LdRq;r0(jehxDh@E0abD|S4 zZw@3RQVZ!~51z@Zu_2SCRe253b&bk(y1$D{zn+;=Q~uMe6Eh*V z$QOD1V9IhOPUtkw>RZO|(7zO5l9J72bQ(&RL0ZPFK&y~m;@PKEcy-$-CvR-Q=t(GT z;2-kiqHa9~p4gfVeE^S68fFzAwxVYtmnFE*r10XIn?8~>BEZhcvji_IYgWX*#_>jf z9jK^p5oM@)BT*I+$|hG)!78~K!243ktw`+3kofj}(hAS}mD|Y+i24*4)H$HH7m|i= z?c$CJ1|AV1>68#H9{(oWU|E<9e@dU!K@&0xHU*CUW(%_)}|FOR@^8ey>|J@FHu7B)r z4Ew+9po1=a+WV&!3=HB0Z~kL{`#1l1px=iA-Tyz^L4EvVe`Egd|9Jm? zUJ&>FAN#vM5N`jE5#0Z)E;?w^1b`R(;Ah%vO%Jtjjw!;|Zjd)rveAvnpI(#4<_Ihq zA%UDDJ#<{19$|N{qTr$!`sU=pV@jKt`pw&Wd(rnj`wskP!1v%kP{CJpec05=MlH1wj zWyOew(ReT&0i}a_TbvT%0WBd!l{)i4yUE*weYxI20k?A_>%MxdF3Tp)1o8&e1{zmD zq|h99LgOx#D1FjsoQrLAq6^%aEQabt7eTV3;bg(B`T?Kg1T|c>I29&!!qw>o>vITi zwp8A z##)6gk#311-A#2}ciFN@elL%CH^!KjyX&o18HnO0B~K>=i9rVoIEG3wEK}1OOn|0H zvfbuPhEbfslg}0!*>mep=F?E=+pR8egmz=sj0R*98Ev~+eH$tr%n^5;h;m8M+OA@j z(te(|_Mxqj%pykBQtP^KW#o<2j4EC7*whp4l6DnV=nbP&trFlt7X0d&BrkU@?yxVF zA1=VYCkRuiR2o+t=in8u2bXWRouk0tF`cAmuVuuJ86nXEP)i-rXX>bpi1S(>kKk~~ zs=tIDr`E1y$bw0^5jpFhO15f7TgMm#IeFZQO?a%AUF>v zaF|%Hgj+q*t6v2JF+bCQJrSyYE>C^cWDkgw5k5y1u^{~+yL;wQEgTA~*r_8A@z`~A zK^3EUxv5|jKQC>tb`%%oSdCPymbDo3XOrCTOG~+brDi)J>n6)WhWeB?U#ZHP9xEM7 zy32(BK$I}9t1}`hL5viYrP_lBvxK?iL3Q6SDz<@@2PMnwD zorNE7j3~e2{w8NHw7z-lAwhdy5!gFv_ZV{*6})lktAQ!+|7Df#A@5!sE}JQEpBAc4 zUOIed8HV5cRbJ1EWU9A3V8qSX2svf;<=(x6vD(PDM`|{fK!$+gbu!OT)43iBFC`bY zpLqncLK_m2OpIB}t0}hpQ$U034lgpD0{0j@ULF6ZcoE}EqSdX|b;#x%fUd1sX zHTDl`^2>Yb(!4Fl+PGIo%14=lJR)DGaHyd$8iwP@#;SNPE_^&%Av~=%0rzxA2K`(f zTv+q3yajYKIlfo3uE7LzH$?h!f{vuaB1f*%2ma%y$kpd5jLd>tO!jTWIIj$VA*vj| zIcfLbEYGXMNpooAayq^~>y8UFN?B!zG6|XXDh8tysVdaH8d?kDgr*86UfsqoO4c`Y7A59Ma<*`Y0*804tTGiTgrJ#ku^Z zf@=}lMOPauuz#vsj)m%peA}G}y64)H4;1zU$@hS$qTT+Ho*Y$VybY&E_~v$lAa)Lek1vTo zf2Nq6nSS`L+GZ*pE;P68-EjuLdG|SfpzpnJpM77Q+J0L+aJ`y(^m}}xYtsl0jB8k; zvY|iEuct;_r&5&t9PgO0fe7GU0b+_c;gpJc=bn0~i=6Ch`b6 z09Sy>D8jIU-<`}ymTFGlIV{#HDjz%Cc4L*auB@aWDh$Cs5oO%5m!;$vEEJQd$HK6F zA&YAe*#OIE{-#};kyt`^>?8l8a?;uHG|Z>_XtD{@>_qZI7Zt{O`$X}axAi>XYqMzRji{2~neMR~rZR$pd+)6g3=Z=m zF)#erF)$(fy;LaT<*F;x70h_K)}3dFNabbw9=DJIj^yW-b-D}WXNJJFU+|BtzFw#& zm<}AJ1=aK2&OY4LFe+DvYxP9en=Ju0_957r&xvr<{PM*d?J)w~PFrZV{hmwG-uEXA zW6(V>zkENnvO)(v;i+4Gt?U2!OAg+r|J(TTthSLHLi`^8+~l`g~hw8&3*Z-k?o-O6-Jp``dX`r z*zJ=3FS0QmU(5Ux8yAh|DXMpR^&Lz|Y<|+q6HDIg^Nh8=^T>~7j`t=ytOxwLT^{j_ zZ!=wXd`E>(z+qNv3tMmF-Ln$HPX#B-n449&r{{GDlWewDo#wfH_npdCz-UM7itI3; zjo}5Yi*;Js^;NMFI?(*S#RX<3TxAAoa?JquydBM*X2khCm32UXtbMZ0y)EZAU}qOu z=mNx6vLFV7dHXA58VRaeU0BQy=j8;@xI4eAslx=PTQL7Kp1sM3%1+}rP#Rce zuujRmID25_@q00623H7v_U$E?YYy-nZ8wNaqhLfd^$JQWw3}hTVv~HLw!b8*JX21g zxemQA5>fXc>zlI??J>pk#MBC{KX&EasCs(dU1$$L187r@zkwLiwYU{2OU*R=4%AtH z;7b;!IMN+YcvL(F(}8KNzE=zv&_q5epIU>)J1V%VP0psK-7Qj|qqFaio=bm=KAn`> zKR5$8@J`*lGxJ)n-rRw_3%v4nNiBaRIq;^?1t*`_i|{mo51X> zl^E-y3E~+caSEJBrFUkkNfz6^O4w&`e<$$L5udE(xV>rg)PdkgFyqn%ZvrIR#;98E ztu-`dED}nxPvp50#@wwMGTLz95Q-N{S#>#E?>MbAWasLdRy zP5!OsS3^|x{i7p-IJ(zITv6xQjE6%eG;N9`8ZL+sm&56$mz(tG4VJT&*4VKvwOwA9 zW-$``LEql(STA!mknQ$0c6p%^20QIwl0? zNn-{EB;un5?H%>#T|?y;S_-KcIczt?T`vZdjxD|47^tXjnhnu1F5AFT67=Ehp3Ffkr5|{WO1_7uAK(DOm-3{?Haj)7 zxfuFE1>Zl=>Ti7^x$g`-=(H~3UzyXu(rZk^BOH0j^ttcAxOYAj9%o%cLc(7d{6ITb zd-!qlll0;R>ZRal+uC!h3E0dcV-|i7?J@|%DC6)_H&x_PbbEZMdy9=ZzPuJJrdaEF z)x+wdAVAPrw&4|x;%!QJ?=dzcwWl^?a>Jit?qm=M!|n{7@D*#SR14Gg3|7&Tp>B0q zOrwZ4UL-K4Gb4b1!nj~V6os*yM(*mRY@ZeGylv(3bdcY-sI!vzRyfE8w{gHexaM$u z)O7-~wnDphmC~g0^ldb*rbF>851^TGg+1@&ZIEuHR@i0qC$LI)hpf7>6m=kB^-Q_6 zoHqTDIIFHoZ_$ipctoZuzk}-+?Ah!%AE$=1NnSPAhdVhtLXj-Bq^Zz>;XWwa91Pc- zJU1K=lE^fa!-U;%Xh&g1Pz`#A|5XV$=oJB%CpfT&zuGqDUEAD9aX~$iLdDZGVSM6T z?!&wi^{3KQ`$TQNI&90ScgXEC=NY8q@ki`6@duiYHL4&HlRAoNfnhgV#MOsKk*J=W z$&tA>Ctu!=kaMSz22nA?A8cu#lOq zEQQkJCiisPO$6L#`&XVLXTlHLuG^w%ZM?G1Hav!|l7{eJ*`!`6daa}$63Xc<0rVOv z-aT;hzs{(ODmsij4ZHbmvN~(7Q;tsF=bAZy2ku1NYFJZ31{EgC#~EW%!mJtiltmX! zEb5B-VMQ|fPq-FGt(D2WUu!yljMOGC6szDU?#DrtbMKpHQqwG*;q8yNL}}P^&tC== zY--o;oPO!?7U6ye;29mLQpr{Y9W-j2npw{e=H{+(c1@WwcQm5#^0bEgji2)NK5;B< zYIBYiy*ZTcTLII-z3JR{<~D&9$TWjztq9SlrRxFyIbIu$9^@;?>6ivQ*Zu|pGWg4x znh)2NwPz1`NDtExOf&OOMJ|MWk&%S?)hq*oj6J0>K_Lv7$Q!F>A(iRJ5*vz-GH<)Z z@)@AL-#lpGL0ah`rddXR!_v$}0T3Uhe__IIKt#eO4OiuXAYUBC4NVCZwLp)(6DCfw z-7#!4XtA}(b{eWJLW+L}&$MovX6INlmPgGT9cXf?6$7w*SapgCeI*U48 zm2>$Eyso*q{OrG)5WQHHg@0`T>EDwX)o{EM@2?VAeClMcO!hOj?07;9D`3qy$Lrl+ z@EV$4&F1c8Ia;CNN|*4GJkRd0_7OcvD?4wiTW96Hk9&ds3drW%2eWX&xZZ6I2m%zD z9*+t0h&R~LcitmKcD5x;IwsEK=N3x{rCsl5cJYL|Vh4CU1 zX|;Q2yrY9g6~5~Bd(}o;dY27C4HLw&zTiy=o&H*q5j2zVO9x=1b*$e9aLKd3bD&|K z(y-~2nbJ1d&D1oSm;e@Si>kMy0_KLK5`k(Yi7)5dmpv!P-Chvkhr*Rn{0w54r*bK) zwtb0Ej$TyK|_nc=uuM0tqIRr&;3 zB^0Fw78TSuMq22x zcS{%1UFNe;^6oJ%lctGZ3Qxj=jf4bNmY4=7qUlC6ISQ|aUO`cRXR+L#XHt@{N-R;= zQ759$GC5tMi~+6Vr=Y2h@eHk>$Di=6F0%WW&C~THNrpbs>{sc*@S^ZZnP^jI#m(H4 zr;gNVWy>gzY(u#nN{pvBlFhA`!sI&BVHnaU((qH|b4DsZ|Mqmb%`M}e)Gp83d>^rK zzl}`k(+s7@s%jfv!vom9bghlq8g|K_vtc#h0 zSounr*`D@#`Rj;68uLAQlzU|fVp0X*$FeTM7U$G2W^GlnW;BMa7=W#9(jNRL`>SOL zuGDlsG1XNgbH(MgZ4(Zo4~&PlO5wcPTVH&1YFeI)s$r;>$<^=cuy24PX6dqW{3?kQ zjg#A7>5{@0Dhp?21XD^?O(R??V^|Pas?nP+S2S14N>hsH>VIHVJlwr`!QZaggO+!E z5JqFblzxTe>p%t$f9!D_1m<3}KjWb(d@W5->8%d+ znr$8-gGAD$o)ohR_Vo9=`{OtGmZST+G`e^NaaN?Zf6C1fm~o@j%u`U--netv^AMlO;N^2Z%kCLlE(Pb$APmO2IIENW#uszhiA-V;$*e%pn75h&XgR9ETm(J%NTcx@Iy zw}>UYr%-{OsFo4o+l!kTXa|`|x*bi-=zz7|;^X84ebW!F`IZEW{ucZb943NUUF{yd z6lqXlLHl_cE5O9iU~eS%D_g9qnD1v{olW{qo`8I|!;OJDQL-*hN$p{;7gr;W=8(y{>8X@WvP9hQ6sy*lbF=Jb<|ZQAlk&loNFLU7__h-;bbhGrYUwO&HxNbKVjXBvyX&c|Zd(~~Zig-X1DLG7=llHVHizxIz%*W~UGI}qx5A#d+WXy2&y@rN zqWc-V@B@~{WWL(AJes9(Ijt{7r=obA&G(Nplr(r)&9F++{4ih#|_!P-_Jq z8|KvmQ9iscy+>L(*M z3&q|dM7@7o{`J=nT$b1O(!*pQ^~t@9sGM>Lx%k@LXNpPtxORH`v*(C=k&r3vpXV*a zTjD9SS&LB)mxN0ZWoRkv2p4Xqb2i)?Ut zv$|9AlCT@3L3ijD)wH6;k;mKDiox+QdndYdrKJ~}fxlKjX1A;}1JFKigH}F@d?{^E zwpUlLHOrI}?`h=I3gTJ1q)j{as;REi{c$^Ovu&P;72fL?VF!r*!S;o7vtK+T9N8c( z9ySAC^ahkPzuExLjPP4vl8P5ZgB!n4;vV~ z$b!)iZARuJM(>=juU~h!8rgk6{Usg;LXiJA)EvYF|No)p=}P~?jQ^jg`TakbG6;2s z0iopo6LtPCl=^QJ8t|X{|3cY6ffUdGvA^2?FJ?ICnZZGyf9&seoPRtJiVi}w!T-AW zZ~MFd|3cj%K!`Xv`aiAzM#WwKhwt$p|8Eq1HIM!0StDiBNc$W2t1;0dZ-_6nQqro@8 zBw^LSlRcJ^bbZ>T(PERNyLoDb{UxNX{crhsCMyJdE6Vz~$zYiVPXtnblcY!a-U0=R zLY%^lWWOj&NJOoU+y;&l?^umR-FTbEa)=8oilF{;_q(*va)XU57G**%wy#LIadj+- zGIi9Xfj^mF#);Cf*M6h`u`2-1L;AVA2i~cNAyU>{u%xNwZRliN^FdASQYD5So7xu3 zPUq-@2Wfjj+(v~vlil+!>fDJoWIJ8mOox{93u+r%OTu%U*<=lCz}JY?PVEQmHTPuQ z7oWuR(h7Mw2@%HO%Q{xezvEvZQa0MgE4m~iP9WsGc%4RgetrL*rX3nx4XBQ}L+GJi zjJFQwXyQb4=6qZ&-M)lLaCn$8ysi+Sy>EH8|X6v*d$&o z;gb#nVSj^Bf-b;@*vz0t8vh9rQJQ@!D)KPHTB9Z_fsj^r$&0b({e@3;r6V(Afv=JgjJxXdCbi*Vbg`ak6cc%eh_ zm}juc5b9CcPIBQEQn2V3*I!>ga78&qk!nN~@L3lhdW>@Ba3@99#?#0E>F zP{i)>_;SuAXF*-oZD~{X{Xok?eAcC#NxloB(15i{>3J?QiF1_W7-i_>M^N7(!b@5q z>15wfAFRoa@*mn`53KRg~3Rnk& z3*Bd#%B8??uzs+=x6jm|4OFbY-yi;#xisGiVxdTja7CO6OBAoK9PF6Chx^i9qK+}9 z;~2spdQxfZ|FXN7V@Z({UJ;hi)8DDrkY#P6ISQwfm2KR7Pnkp_Z_@LacIWqart z8hzB5bgeJ?DIdy4&%7A4mDZaSyzx!aSGb>+eb|MSw-tUQe2lHT830=i>aViORyhBW}kj%002Z(LuKYH-_7J#lC;hm(^SKc_09 zILrUV`W6`6Zc>vbys>!tDjh{&u~ZSXWW~2p@*^^cJ#3DD$U9EEj83ApB&@HjU_Q-a z$L1~$5$nC^==`7VLfH(?Ers#Fe=?zdol>%lB&ImkL*hQ>K-Zh@0V$tnPTp3)TeVL@ z^Pv3DCcm$o`UsK}KIA&2=G7=BA8nGPQfQdGAc7MVKz-o*QOijIq$;`3(Rwfp2i7M7 z*{do7P+8)76|LmUW~*Yv*8PwPCo+ItLTKuDolrYtB+zun_{h%y4=&xdUP* zz0XSmN%SkExqh+|kD7_9Fc}f7|thhF)6B2 z;Bn_y?Jv6GA%~875q_ftS#JmEU+EtPRsg{wCd!sReLbGaFs<4ad2Wm;2Y5E?l^G|bz7foQ^c>bpz*BY zRi5We09SLPr+mXJrH#u~zQRH(L%SfghLhJyO3c&ml(O2E8rQeACH2cM-@}w=8@0xL z4YY(YV7pbf#SOLO);n|EddKjVys1CeuJ*t}B-}O#V_Gl(`kRj+bOnKBf{5e@HQ9zXk}C>u5FKXjySMOq_1cvk8zOn+fykCN=>V?Zk*LM zgoYUtyndq_I`=a?K}kk`|@M64xd$FZtoa}lMNQBox5X!sKb z{>v3&0HLj+fi!HNz@6=S+ zkhKydc}{}I`(h@mrYSL@&p=#oH4o=+2v)Z%IDgv3$6VDc_gj^5q|;hKIhKU)-m)Kd zTJ+Z~dTU-|IqZ$Sb8j(ic@B2mC^hH-Y+j^Aa0?9k`coDPUGd{HTw>a2%o*HDS*^6M zXN^AVujHAhh8`vx2R{7g_^t;!K5IPSwB?U;k0MSS-7odx?`xhrrDNGh7ZVPwpCQ7R zS!-$Z0aZQRQ_i98MKg%<;~=^HpuqRx=nZTR2)Y zya_Ss=M4B51Z`zpg(kg3Ay-TI!t+eMDogO(jy}n1l@s;b9JhgG3qi6_;w+}2HG`TQ zQ;Vw|^k}9#yn+E!(}$#jmR;ac#*4VsvotQcjmPX>ZQG^CM^hY1wLufV!6|pctF?}4 z%_lPt7h0E4#!~k3t@wtQ8u{I-0@55bWYt7gH_m7+e;;*1J~p8YWnai|K`r`>5E#uy z8844L(B{lYcm=UFFyy$h(mKL>wpW6|b$7XNiO#?F;f=Thtf^k|FWu#ayPnu+B+zzP z*Cuyr(|wJGBhM30Fr(@S$qm0GB>ot*JT7`%Km1U)`Tw4qpVIxX(YlNo8Xnb&$ub#nqR1wyR2Cx157%c2WS<}odHP6P2Y#bGeQVx-22Z~)lDKX38H+MQ z%;M~;>n7;BXBoyr&FEmepW@*ZyrzbzLLnm zFgezJ>^Bl}V^`n)Tb7;{((CTF1M&8dNt+XR_S zVYbkE5!pcsU#ae7f?F4371n9Hw_c4J@bzvc(IZ4M{;rxeD)vq8Dk-gRL!E_=(J_AD z{%j{rxAGTq^-ltPHh?$iq4&|ozHg7=ay_^bDC0~*>)5KCu_yICb5JM;UQy0j(1k#Z z)}(2~cm6c<)A#M!V(59o-L0_t69lzuZ}9NassAdUzJ8nl@X-B9E6Nj9>I=Ub-wr$u z?9Vo5#T3W)@kCO%UCmRPF6fIAaInypZh^)1ZtbBCdzz9 zXKRuKSj$cf*CGCzg0WP$G;8}yN@2d@B4n}~t+$pn&=CYU$1uU& zU?+Q{8IiL$?cuB1m1i_)ho5`E3rE)BefqAL{X`F3vgB8soa}dl+qbr(^|tEp{qd`+ zO!aaI+Wxn338LZ0?B4NX05ILiLE>DQ0UiP0;bH7nkXAq_#E=){bGFRz7In#A0$ME+ zHw1EA@Emf+w3T!P|E@2$C@1L{Ugu%$V`JZ_x7r$jI!sE(-5gO&`OVscJQIZH*kdjH}hi-8ugnwHjJ|vY7z$nHb zkHOzU^lJwgQu?m84zfnZasfF+YtXAiqnP^*AXCH(x?K=!3scsk8gG} zQJM5P`1urhrDst{k<*Y|G~Yb-{m} znZD`M++RMm>Ieyq*ni+%wqD^7OOrDoP(JRD_w|u7rbNF@x`8>^D&ndsZf7bm`_IPG zoR{r8f?+`kvSFs-oQenfXK4Q7QR7=~ASi7||G69_uA2Km&kWLX!oeBim_i<6>{5ZJ zCfo;pznM=>;*4JUolD0V{9d_Gm!uVv~GAQ^KC?+9xQI?1H+^Ddja@#I4vT_t*;cgLWRp)w~mz6H%^|a{`y|CQ}4~5 z%P*$6HSG30E;KHxip>}aYhue}+gVP^>9Fj`F?5y;fiw7sEx5Z-iP6%E=-f+EA;ldN z^3pcB!GHAPdf*VbRGzj8;%C+)J%X%5occO$iB1<&c&@f!aO zD_V;k=O+@yJx-<39F!2&VzHn2c*(XX-xaW1QmI6B;!yVK@8R&n${Tg#BdQuK*I9Oc zx}kFjdkKZ=M{AWJ#B4I_&!5u%C~kFTVToxA%-cMUM9CXmm3jE+-QvFF6wk$e@my8; z<9_Y31NpO3wv<{;lbtZo{RS7(7;f=ZBc z9Z7m5!qAOAX7bn0tDltNRhKMeeKJ_#`;bfU1%Z^?c+`Lk12v(ERz`6h39+xsyPMVI zv8QZ~6&~7|+ZX@X--);;^a%m-yLnLQZ3>3_T!!S@;lBOwv=SO1;pcocr z>-mS5*7;jkBua;Ojsc#d?L<)O;Rf>>kEkV;SK~_Yqs{$<%X*yJiVs94{V1agPP`4` zyuGGXmKt=$LVU8+1$wER+v5S%I5wBHEh>MPDg8ssAuNs)O^~kZ1I0xx*FDTn#-M)k z>aA=Wl#J#CEM;b90Wy#EU6!(?+&x*DsGwYueB?VE<l3$ov&4HdN`ar~EhC~w|Lx}n@f2a0&mi4CgIQ315xT&%;$^9$LCNesT=@&N9D5^B zBrH)p|EOloeg3tTJ~k{PHpA3^nmofxgEVE($NhfX_v4cSw+cs;pypoH1w!g;1(x(@ z$SwYJzV>xM;mIYfWp>vD&p9tVo(cW_N}N32uYmN&ty|p3?z=ZoCB!=26R!5M%$IJ< z&_0S+P1Ud6`TXc3%g0KpD)k%n)yEg{&kb{toGyh>Vm58fFIyJ(l;C_rpfxouhioF@ zBCF{eb;_?H_qP_4Uh(CG*Yvv}s3P4^Vc`LW?HLbXE<@chCvvHU<8umr4?)`2GQ@IsIBFqSKed11zkTpJMG^i6 zhts09b-;u~<%(><#S~5taOD+7SWraDHl`nwk{lYeb|;gi$ETL-NeeRlrp4L@lj>qN z9M->y;+Tt?>r3v4B5(UU?4Tx45=Dg)LLXWyO$0jYdGF5aQ_#uYHBVrYg1+*w`K6i| zqse z@TuF9fwp`&nxv|DB+cx z^`y&r>K$y*=HZEF4F3)7`1zbDyLFC9`@*K?_Yge|V{&(oA+o2ovVK`UqQk^UMY7n? zAQSTndW#$V66@7KQ`bory!8R%=QDzU!>W|CH~l}cdN}2e5h-LNA-INh^q5>OhZP_+ zTZ12uP4KDpP$>@|Nd+GgZ$D?f51^R2*92Ixk7Nx@$^_Xo#1U~Wi$j} zj!x&_%FLd7I>@00Jka9(*<^@Ms2cvnc+uDv{Cbr4^d@MVUOq3>t&!VZL>H4O$JB(;g_RF{V3eo^EJ zB3c|E{hUT@45OHqP?8X#olVt?tt7Ec5$6CS9w|-C5nI#pKvE@5gs+H+1U*fJjaJCH z#}Ip68#c@O6L#T?jCm}7?>-2Rj$9d;fvqdJB&fbUWYxq@X}n#2((7W!^Z8)|LL^0qpEEBM^3Rzx zLMRPtMj@lmoVWT&1^Dfn<*sJ^lJ^Ay&;671cbinlrZ7L9CquP zq3lOk{#dCZ9K?gg)wMU+^P=@jb{)8*Q9iamlj$Cg>6Ahp5ptcC7$On`nwUMGw+?vY z-o5H`LG0hzy9Q1jKj@=BLhjC3WsYAA_UE9utnb27y%TaBZZJSb*&ER`{7)^!F~K(_ z&O zCh}Hkf~Po7I*8z>$Nsignz0ON;D3%Qw|DCJsz-gZH_zHM*T&sG`ONWty$7%k1Q=rU+0XDI|9aVUh#_0vn4M&;Mv?Q zDq1J7xkV(k7^m_u?g+R~E%JrzeN1KVKBzqH4vl4Qr9qXYBX2mMy+maUcP-dq7k7>K zP|$7XEf=DwA*0Xe*zST)m|g}!ry7t^lSXwa<$4Mrfowat zbb;T^yZCUyUnx=5Ci%|DDf(!FZA4GC=DXJJ!g^l$wWNXlO-JJC15<^4!i_P?ke?v@ z-ci%6>JB;kYch3sAg^()0j0h~U*ETvFxWe|Qc$;S=fYt+`YlarIg!KuLja(w_abT7 z<%tXee_;p@hcRbY7@|aD%o!;#&et@MR$AByjpv-Ic^S5)oh~XFG|hybwpEvkO;-DN6w!xoVW5J#1d`?UpmiKPUG?PYsoTBA58-- z@E)d9NY*)=aLKBG9j{~ASMmo|SEd$D8nnqac23D9dz;(mbn#DcdQL;nQZS>l+RXGbgz66jCwomnsYN{@;@LKN9(W2A0EoT>C%F z+W(pVPmuj%DcI}(SpT0O{Rd?K8T@~r2fz537x_Qu|1&`SpZEI@(EiKEwf=wR@<;z( z|4$3T;lIcKtUvzu7%$*oX7i2zkdS@br`L@ZCT|YuDb?XS)e%cysF@k8HP{rPBMH@z zM`Y53#C{cV#jdUHO+4vzdx4_oj+UeQM|8eklspM@S`5Oq4>Pv8ct(42`*=wQggGur zx#*F$9^7HgI}%cyZjW^9Kc=TL>FDdIna1R|3bVa*dtR9qc%xE7xx15R8p_c$-~s9Ngi?9Ah!a!DilY5YA+NgAQMbzoF6EIx+{xWkt9v&owgd+S#d00Cvhw zI)}2!b^GiruJ_`}3?68Z;_Y=wrFxU=3{S*NUW>3(0JF`#fYlwN0zj&|gR4xjX$Zxs z`T3ju!xh8|j&%#S9kYsROnk=xR_q=x)J|?kXCmuPv&2LxKb9NsPiPHPupbV__FmigIWdV~J5v--RT|wtm ziX$sdKqFzG9L0s#Ve`*EZ>rvFve62nB@BNRPx_X%@<{TS6cNld2P*o^tLTPv~cZTP^~nw6#9Oc{W*~PAPLx{x(mHR zy&=jA+6jfY$%3rN=9IKrCMBF>m7SB7{{kY$ts53KY>r}v%B9V(uo}r$F(P4g1{1g$ zZ4`!yXRk4KyFH;wdTD2EGNe{D+ZXDd>J;0(tN2denL?4`nkc3=yVeNWqhcjn-hXiT zF{@H9r6t69=DRj3HPSX9sLDvx43oWJn4(0BD;wS3#peN4=HCRKl(^ub$8yIpw~{Tb zJlFUmRw?2FEqFU`{U;cih-<9BvoSw3U)9%{+4TPgh!hm|N*0k8;2*!c+M`hm0v+aERsh z(MqlRV6y@AD2^RYpdg0FYiP{bf9qYtK_nzs6T8|$i~k-;v>OPu>En!LjFu)iSQOO1 zoyt~;TMaOyMqAs!n&8qaa0wbL_sJ+O|2oBG9r5@WVC;=-mqd|t8D5TV8=r& z`x-?@p<~l{;0a`Ly)3U|HbFSs_{U-uGUUfDJNrp(7VKfQ;h$3pW7%=cN|Kc`Avg36 zv5)O!0Rxpq&j?sjsbqCfX}F^~pUPhPhG>^m+oxrD5#E*+#AC!4S?LOusD>eUONDsc zb$x0b7X3n$6pRb+Vv<(suL~JPaeUVQ*ulNT1SmrFFl`|9RKxo6-#0sJC+KHkO)TLH zzMd$4H%qn6?b%R5`C>yFmeyuJMXW7y-8e9gkOTD!dN0Nhg>!Iq|6+2#+7ZLpjH_`V zT7r;;;nLA3=oigv8+4-T=Eht0=A(Rx7Z}rLA1K~;f)Q3zj4S=oXYoY-k-mGMVr^DG zP6sGcC?9X}RIMlaryTvEA7+^wy$p~%4Y}NtCLspbe_K1pi z;h^{p1h7Pv9fUW|U$1Y%uIFrm-{{&cRI=>DTd85cycs?DgblI9a+`n;dQ?lgrD!eb zZ@+w4Q65Kx18n5G&);tFW972mDLw8EJN>T^EoM9W2Qu`fFw2YTP0yED!*ZF4=U z#L`XLL~jExxmEE<}=vzJ+pE9pHPe&K|-+B|~IZ)R1` zXcJ+TGeMEd$7VEKVDm{!Y+h-7inJ$DJ%sY#mA{okk5Q{?DBoopz1=CCN^WnFyIk;S zG$@1^+oPz>SpOOC>#S4wTy3W+$DVx8gn2Vmc1Jo_d!ITvsS-jpC*R3>#Ia`b9YJW) zI$}yR&A@%OuKHe5<6oqAj+#T32!?nT*N z?AlDPtF&4WqJX`3^9R`JxIZ)&A^zuT=pqAOmt z8G4NJ+paUCsqXg^I5m4qq{=UXWRjS}AGQUb%VPlBxN0$AyX$2yiGGipVy$u(ZbMx^ zt4l%jfCaP|OY1Qk@~-3I?tQ{aFC$!l&ksX8`H)!xx-pgB27ZQ%i51JBNh=58VV*4> zl_XnDnd^J)n|rIT#6(g|4WnArt8j&AxlYWqbz|x{y@iavuG$~r8+<`I3*C;HPS9Vc zLe=nCFQypoIuS&h#;*qvIO*a{yC)f6Eobz3@H%!mZGf>CUE;{0*V@MQ&j?)aL#Oyc zI23+<13!3-xW032OlFK)zLsaHo>mu~dYUM8Yc4ZPq8$22rT8TNe%D+zVvAtL1-efY zdbG27RJ$0TxGxO|@n`)uvMxDzdF855aE7F}zq)Wfw%*eUFa3(eUDBYd86QIT;{n3$ z^1myug?~{7^^cRv9bm)Gl@@r`oZXu3aZ-%SHD9Eo=I*ZmA?Vwq$2VTf&^p`7@|h*6 zz?1(M2dSyoH`E7w#mCBAx9cHjGde-UetT^X8B_Pm+98QGw`P=u*Mzuh_VpLk!fyEd zB*P5q@mo^Hw!P_Bh)i5&n(It<uAjOD-L`OvP#xF&`pgx?AKMBO6LjKYnwAHD!0BU|Cz2vcT}M}!+-q>9)n)Z z&sF5aIv24#nBrriMs|L2$nGbV`1BV~9Q%0G3!lOa^VIbvkRJGl3zcslle`^uoPW3M zJk5{%D9<|e0Z|)3SEy#fI6?|IAF+=(vP3&1LGYYk)qz_}#DhXV;Sw=hYw*(%t@32_ za{&iHOY@xkx-++8O0Rs>_p3g~R|khLy49)LfOzzWW_6lGr;DdNu$6m%G${gm- ze6ho~m^cJ7D-Ai~0{Nonoc{Xy&g}URm`w^b-0XxAgfR`C+@J9v5meO#Hatzn+88=& zf10!$qcQ$I2@)68KsbU=z_F*<8(FE@zw%x+0-9#~JT2Lk4%5k}4jA{Y`MnX~mZt6`jrtFOSc&#yM+DD(P_yFz)etf&5AU--E0e zjqUP>)>f$CwWv+^ffuOU#<{b5{L~(7Fp_Qvj zYWITihe_e5b9Ytx2Xtqg06KoMY}App)Zp0KKDhhZcfusDDA{yb(zsG-?0(Tuzi>v$ zTr|U=lnpz5iAO!$x1rR(^oqmZE%5RE{;S6P+c%UzJIl+Rs`*!LxyFwk+^ZiJ&i&y= znai^)U*SXvd$)e*hkm3aW7$8HlV||I;743A=*)C(8<}mCVsVsh<^o>jc8@8Q@rFde za`czUyVm-he3sZlm>#+|Uzxp`BWXV|a2{@-(t6SbP&Yfxe2s8xDu0Y*3Lr<^$wBLt zw7Uwi00fH!0LmQ}vPiEd6x4xw_D(qN@5y0sMhP);Z}PR2^#nBe5LAQI6ipTh#0C(= zVm0f{jhE)8fA%cq1-PFj4DK*OOh*a1+YENsm*%iBo;x!hbGpl4_CaScC%Ok$9?XLr zDdC_ADZ1F)u+dpv$05f}^V9b)djvRTT{+e&m#-twGNcv2yS}79<*efwL<`BE3JR1t ze$~&2+D&o(Dzf>4y*#r#fF))&^BK~5biD8D*U+eX*65rQ=07E}Sq)8+R!fk}(Lvm_ z&uf-%NvnStY1lC)+%IB&(%{@PJ$rsbh4DLbW|>a(gs8PyxP5&mTVq$#GV0iDs=w*L zcb;?+O=RcI7bUhDdclv{HSO$bL6B#T;3wE-BHK%l(ima%Y!vByOpu3JYS329VjA3i z0A@3_J`0BH3(7S;cJV;*&Kze?CIvs=Q%pszJqmkRd3w~7T|S;Lj(_}g$NRY3QGZ1g z3c*S*R+svRL)RgdDz31>>d8RPDtBCzyumm}$>6gUvq0KO{s`yMo}ER5Jg9DCdq?L_ zysr1}#K%$334PT?3ilVeFQ_p~*4-$*;6Re;h*pj(HSKt7N@Bu*Q{)VEGbtW9`Y{MP8IXTo%AD2`s3VwSc&dUCx)GF< zC6-AB9DFUlYG%i?Nl;))va!MVj%RRG>c+A^KF zxs~4V0)b+Sp@O{ASuEx)awyvwVxEJ|G4(azh7+o7QLqt=^8qk7u=;Ft{bqP^+Kaox z4P`b)&>5jnzhbqG3FsM)WLkRsCPyDFRSLrqC@Ru2?t(?eZljZ<3P2lBq0LBGw$u{m z1MyNzKHG#-mFyo~2|1P{-XXW9hymiurk1lS&7O;X7i4M@ACQNN%`H?$M&&-u+SnLq zn}sl)zqgDDSr2FRm4Swg&8qY6ih{DnI2H13-8pii?5uhEH~+*1cQtuPJ91OJBzRw? zj(d2fah>egze7mV+3JK5sB4>Vv}^S_P+xr8PeaJS_JJDgbz(Pv>4^?EHj;>3 z?L1yaA%eH|**9;MZE&nPg6A~&G2QxW!N?50@Gq2PGfarQJm0g8j{2$n^^42Rf{(5( zj*mT*^w-Y_j-wrTtZ{hQf0Y6HPq*)R;* z@n}R*=H>1DIj7U^)sy0_ypr!sB;>+VbqH#~1UdyV=3!1nJs_66JZbHe3x5dxAVOD4 zU?pq71l-o!QS9(ZJS4&;2Ki#b74iEgw{i!=?gMlhuCXS^i+PfA#3$Ya8JF7-u=zDC zyVvg+Y;uUl9a@pD3C(Q`-<8IHirLhZ1TY7%czb@oW;1#u2%F&7iC9WR!qqjH_1=z= zzMo3X6`t85p2P0`40ocv!nmqne`$#dAUlN`b5z~^l|x>Uu(NxmIxW&oT29h*RtFOdc=WABCQduOL%; zFN`7TxA>A{n;)wiK{O##d)%YqO_`_EZjVpnsKT~wR9rAk77HOUY@L^!V%pN;?X+=C zw2hNGVU&I~!DQ^xS4u5r#Lhyx|i)hX$SK`x{R;#LvCfjK#S}%L0r;RMBx{};!5gUs?4WJo*R2}I)c9ip7ppe1zDFX{MPY%GC*I~g+R%OAkQO6W} z;g)jD)4YfAvny20!)q0806YF*^37ZtyO6w?j2l^?&UyC={T>l#X9Jv-;mjq8Lmb5UTRHo8Wp|>MX=|( zRI#UXKnA*c3s*4Z@&+W`;l~brL6I*5W}ZidDJ9fW5bg8L-=@R8$L@HC<0;Q-ej7)t zip{sGscBk7_iSG`+^PM6gB@lX3H#FJ`tYfHL15Y0Raa5W$i7s)`kSV!(Zj_qv83dP zi8bxQ?OY6H!6$xJPEK%&5eOIHpsito%hn|w06@dmY8HtBd3WtM{+bka@KCzZ$rH}T}i^?z&t7stOI_f!{- zu0o=AqLD2a@6IKg zjyqYnV4IC=2UMe4=~!LGu63sSl$~2S=Yo-Tsf?rn!#P+#Q@?1*oOh9KEyk_Cb`?Jlsm- zyKZu0x|HiPjKapbTZ^3W&9l-;MuWKN?(lt{<`LaF$19hOg#`v>G6a=rbK5^=+U;}+ z4-OD07*XbZe0sc3pK==FJOg^I2@l#2HHX#J&Yv9UiI2K_U({A<=nzihYIVdmNP$U&!zy9=ALDnj?j)q`nv^CkVR-42k;V! zt7OCiLEl>X%?$@)Di04Ixw{@xfye2&Q?C`G+XWsE$BWo{`F%Y(W6gln9ee9}EhQq{ z>+sPVM2Z`{WO0tBox`wT>GU!PvD@0IX+^SBQ2ng|y?b_KWeP>TQN6ib2+CVB8@je7 zpVUZjVc6WdyW8DbwYaHGWd(OmcICa^-c=eW_M92gu<~!;Paod}O~Dnq9}h3Y-;8C! zSfU=y^!4u*HM>d?L8Qhm)Z%gFgpm>poChYRj7N)4Dp1nADTC(C-VEyR^IkGj2Y?|=RU&6fKbQ%`>@M5v4N)hEe|m$VW^fS zkLv9gX@ptrIMf9Kw>w>X8#9?WQGe~P<6Q(*4)4F;UGlyil?rPHL#=N4-UOVYy(%5B zQq&WWI+63(j>%|!P6w6V0E$Pg!xKe%;A19ic>Jjw@Zauxo)iMsNm_I4vG)XRqMWJ231k5u+m?{mk@->^kF)> zbDnI#HChlGFWRIHQC1kcQ%7)fTzN!WumY|dUpODYdgQ#myq3Vu_VU6Ga>V0N!@ifx zEsjC56#-9om38bYX_e-PD9S9mRHI;)uIV>73nNiD!o)lbquc9_iJ4eon+Q>rXsCV> zYD8o5S)-o(^@jeZ8S}4|ji0&AuB3=1ohp4q7Yh|Lv*CvW>x3b443k1io==%*b4UQ9 z-RrfAAVWeBqJ+m83az%x@T;!vvODSzm*T)bv57Y^6y9WPjxxS^!ecnXy7<8M3QyNa z+t=O9nyvWXCU~HA42U>}D`&;woWwYiR?{J7x#ImsBpy8(ceqYpxTnj z{?vN@9rpL&M{DA>>=|O=>M+!EP3BOKgSeKreB?Z_96eLq`sR1>R(j0+clMKwnLSmV zPy)h}oWs*7<45stGnEhnoG4i<8SD~rgtBE?Mmr!v+U|N=IwUSC4ak^`O!h-|fZ#nr zfJ;t~DK2&9UXMvCuMB)%HtQr^O!fZo4h%~iKeK|tjM=#3s$a{TkPbNG*Q3eyx>Pn; zL3k+>cTM{Zehw9A!0W4D(B|ZxTmL<(JzQM-naor4?E6#+&qyFhR*!sB5`*%T~^Lv(wOf)pI=&=UP(+hTPtEf~Y z(=xt{M3MgLiw%TBc#Q z#FxDE=pc={gSXZ5rfq%XLU5S=jjsZ$jXqh~y(tE11;z&*T@y67Z3yWrOfT9-mW zY#V)9FJrSSn{#s7=aVg#98hc=Ufp6S2Y9mea-vYbr672D2*>Ct?_zo_-qum2_C{$C~R4@w>EKPBgne+Bzbiv5F#|DO`{XQO{h!v9Fb_x~P)e-Q2e zQ}X`LJazC7xf|?1sP})?|A$2WH!mFw`0wleGyk8|{09LK_L15C-><*(pC$i$^e?Ji z>;D{s#r}JY&-O2B{q}Fgv?r0sB1z4V>iXP9QjV3Yg-VUJMkJT0D@I_FoQzC3V1!>e zV#WU$K{rc$yTdrA6TI){D1_e)a{Lgh+TC)Ct-#^eb4XT`DaXCj8=h%h_gcdV;oGkN zirn#qu^QNJp{vB>GKQP^Sb?|rC42E2zfReK;)|{1>)SCn@XU_{u%*@FlfR=IakOUa zUQmaL%Lm4I%nhVQd_UjXD`q2rIB%s^8{FJIX26t}I4EW(_c7`-?Roub(TvD+9nIa@ z$pn=SD4xF0!NX@Fc`eL~bSuL1^iCJ(bj^n2-5z`+A82N$aO|fpdWM|uQ z3Ln}l|5KA9ZCs)N;hp)3J}}FoO0BuUl>G}@x1CId+=;tzGOG#m$R^cF0j={dju^dh zhU9PZwCO-Lh||z<08E6O@2gg9GYQjHxDr;qo<^l1Tu z-oscPjUVP+Shn%8L|X;wE6GZZ81UU29-GZoXQul95=Y=aGEW2|%&}a>HA?$$cP@;Y(zJxXKi^ItV?M7p^9M?i$OzSMQDHwIv&CuJg3US z5eddvDS-#Y8N)V(KqyT%K_$0Fz#R^1m={F}(cMc=$lzV_eyRNVMG^z?*8EF ze#e=|)b3_)ZNh^r1TjQiqn!RAT4~%alM1np(x*&v=M?)6!rmfAch6G&18oDX_wX69Cq&$ z_4UvH7RGo#s$SACdlk~AOFXp8T1AqJjY9$xhb-JpUXtJYeqjn4HxjO%5`EjoaT8L1 zBNU4kNi&w5uYB1CQzV4B^M>r7Ovq>;?K_R##TwCYIO*mRzz9=F6eH1G=0WhsklXWz zLHvFUE$(|gRC2n)$jsHHd&V05v9*dNiA}U$JULuW9Y}p8lwkB&i^UpfOKg*~zyq=y zWS76&5J6;LkUT3VN)}kl69tA#Q@0U!Y@nohrRv=5X9eBqx$J7-)N>|D&aJqh4JOG) z=AX026hL%ZO5jkZ_auo!HcDSTF6%uUrGN4fRHWd!jTONHHfSMPGX9`CV%@zJnk$nD!;wIey#!aq;qAKek3L`>Tj_Rhs&q; zsw+MJgA|*!3O?3x=8>-sfqI5bH>`!9Io@d!uZSBe%qA;G2P*h=$iar6sL|zsGYF-V0Q_AIu|Y4^NYj=JDRsj;#rvJT&H2Q_VgS`b?Z>ZMPOlmxOoMu!TTX|;i>XNVOx1Q`C zjg??_B>eEZSqX36VH_m&iJ)%3y}idfC4equ;ELp8oS91JFONvRnzJ!-R5Y%yj#gdQ zG~*gBtW89hDEhOr#J^eDmM=BnC|EXMAlt|4&&4Jm)o-);6&Rs`me%)}ex%_zplW~K zl*8gv@nOhAO>RKNX!!r}^%p>KbZxgVj7vyxcMEO_?(XjH?he7-g1bX-cMIsFCKr^4!2Ht@|C`0(4N^5w%Fpeol^%-lHI5U{Kzw+vDRo! z(5nF|!$4QAVApy&)p0svXW+|O1X}6rRY09?vVatC^?rG6GmqN?~lr$tTAoqH2 zUVqyst3DhSS$kVKIw>Zn*A3>a9L6qeKx4Imsq!b8PqNv3DFq1MuhLwe zpROY1DvMDa$S&w#*A({%XVrkf;vHq{9`O!$z*INqrff&OIE_?i@z7G0Uze#J6YCqG zPES{XOo6vvppi#fri?ut2%x9H;tv^P_bs-5(y!(+1B~US&3docuPJkG-WILEH|$cY zc=(;(dHzM*&q3B7O(2acFrrA{ocG$Yo_BiX?$a_{dHJS|@F+Ia4coOE8lwD+hh13n z;b=3*v4iP0Q+Jz;;F(sin}1c`!VW-=1Ic*$jeQ=_`u)h~aal$SjQ9x~iU&-M__9}; zc(05?IMJ%^d2~2jHZEeP*5pKq(!c1a+b&tumC!#RR!eXwMUqOWPV}+t?KckdDpTU< zXm)#@w-K%PEaB4cg#j#ZsqO?_?!dNey37WKFt zNLUx*>zrB~PHFe{lDWMt@Z=EfT@a3c^pl??=kMT+noFKA5S_Se($Cz=s+MqF5~11;7`O=hf`fWYW}m^yzsVzkSlOIj}I&s&j#LHz; zB=7$=ahI$gCvR8Z&X_g+>k1zE;oHM_DF&)7)&8&2#GRY_44YFA2O`B5(N#7=yxVl= z+dpS-Iz@?@!W0c5+(V>}C<<-E7`hDw$H~ch38C;j;s{?$8$Yet+hGJcTUgK%e0?JF z(}cV!5Sz8ur;LFBU;?72U79bT+^n@rL2xLs;+Yopu(NO@3gY;CFXh_ZQlq zW?8FNor9SrtKqxvq(hrX3}4vx{`2uG`9rrRE5*|cVTcA`kVFvYuyu-nnV<9&w|&b_ zLsw``wXcG4$FBk@cb49B@Lij+G7HD?Croj%iKdVec=6!RIW7OcZZLm=kSZVqh|`lq ztiiXxJh|y{Ho42uzKkVQ;W6?Ec*g?oJ96Dvv{ra3=EXsW3h##=D<{R)@BOG5+8;t@ z`VPU9v4le9pI(3#OzNN{2SMfqSv-OPMN$J6Z6r=Xl8AF?sIUEe*XtWg65F!1Zn%)f z#Dk|Qqf?~|`MC9r3hp4_6vm+HRikW})zrv-=zNmNkm>#hjQ55pD*HPnNkxHZO5(V6 znmNk4{UTSS>-(1QQBC(&JkYLl+DlqYT`1tp9=C|Obp-zI_r(qhHd)tW97q1xit3EY zkIzfgM?M&1E##KDk3KGxfU+BV4uDLr&75`$OMq|Sw4G1-AQfZ|@ha_ukHbL;gO%`C zXYw?}P6go8-Lv)f}AvLE3O2w)DPCF^Bu6eyY32GSM|%US=c8c~n7c z6G6_HYX5*MOO8tbBw%m@>g;TgeMez!pEiU;>v$M}*Ea&4Q;?XS+HLUrXQFQhoN&*m z%9M@0RHx{wJxwLq(!a`7#2RnE8O0&fW9ad_Uz1vVxiNx@8&3ukweCUMPCVsMS>a-= zuXJ!`D9}=Do%hZbf^m|c_dGN2?uw7?Cp6f7(QP{NuEM8P@!y|V^SC#ExMQn%)2CnB zPL>ulU+oZj!(VDVnccqO_6&eXN-{oRbMaMTd3ZIKXq;)Zd%er&0Ny_fy}@)!*){ zJEkdJrSWTKKhbOjz|A06ltV5z_6{#(z>v^Tq=m8dut4e2F`Mn)B*#xFv$)EBudp(U zQ00l7Ej1|oO=lk3cA8K-NZ`pEL*qx&>!9UnX3RIGxn@mgX>zKNT5k=ndW~c(zD@VM z_5e-yd91^=Q@>`iH7n)tIgh)q__yHDux6#VC8RUe8ObP{l|eJSN;v)<#d!`@;nD)l zt!ai;vueWgRvmvonZ34Sk zhwAmQydB{+wKw5eq3oD)G91|qsT@&-{d)YtHi`3X!bwmVPw>>8<0au)f`5fAu&OmR z#A-s8-TnINAd^S&>%ee-(JmiF)x0)M{N&B@1Q2&`d$+jQ`qf}Na9Wqr@G_%h)%jhO zG6*>%0VPDJOOLI5LUsF+)-cAilVD3|K3Nj5)%>RBN4fU;_4|ek$f_Y|j*D==Sf_pJ z=#C+&OmJhh-~X-P^u_09<{{il(N{R4$*Jb+UY|Uu9!n1HLSn8zJUz=(WS=w*8>k6Q zGmN#wb;wb}=M~vj;JT`@WNo!~}5b5MGU!GKf)R(+*!X2R3f`soyv0%x)&mrTNDW{ra@E!ErH z>U8-$`EcGX9;h|*h#QL1yrm2l!E?MA9y=j#j?+L1jMA|dn%l2$yU31KIOm^dAd2O! zXx#b!n`6b5tA*#`=kzM`Z?iT%=%$~O*f0zm&M3eJ(L_c5VVmS|tKz|>3c}E}!fquw z^&{hN$?qigMpG$CaZGd%yi04iAKv!5F<#uhR(Pvg`hZ*Ng?sAWBC2L~{!a`n zsYc^yA?YN+BXMoJpOb_PiPa5HKXJlrElBjZr_1kCLVc8u2;7vL>9T0YGL;GBMxHy- zs}9rTNIVdJMablSH1XkGO}T|{Z9|H<&_DCMAFS0@b100IKs~Jza6)`riXXJGTp(ze zT`+fi7pqw8KH1wCWsK2;CBLEVOZC%fgy(#@v(4_$ueSAr@*Z_tia3`$suUQ;cOrmo z&$|yc-1PyM7+_X`$UMre2g(3twO{oAoN2(u4dK14u$gS~gK!0p#q>N~5DneUa-)Mi z(v+l0E#PV$o5_ymfcNVXCPJ#!O^tz7CmO@g@zA%RZh%?a86^>Psvyoq&C$hKsw3I5 zo^tIrvkS-<@8tM&+Grw$9D0!>J<)WFf-&00Gu zK(6T-w}id5!8b0*;W+W_t_AqIRRdTD*v@nrmT(?<92CuIRnrK$)LtYR0oN(>%m2BQ zuaSTr+<;-0hR{VMatZEFEx4zrC0ffmI%TR^chMHG$!QP|?Y;qUPT%w3ri)^shjkw_ z=+mf=za`REI=(~5!NUYSpi#4QW)K)o{viT*SZh8t$uL^(oxhHbn@w_vl10<^@YC5D3z9MT5&hyR-2ibF(G;@WshdGH6WfigOCWgH-bXJaT@Y&a2 zkdFo3bg^$GKyQ9VCcpu_$tO{#zTZycPnT7G!JbL!-m_s$cfzjf^E5B9*fgH=#meGN zyxQ-PksfXdeTnz5)_$nLvFlp!cwt2D9nPihC}$sLu$~glg={xa{!Kxs^qe00ojZkl z{PQqj1)CfnqM%Z&V|T*xtZkWHC^IpbQ+J?R-W3vLdcY#l%pNDglxmxKmG`h^FOQk7 z0X3-7USVo0Ct29?`OIP8<4*4&F37yatqAX)WYjHVDS* zin9{1WH0D62(V3s;2l&^xH8MR(JuHIRlB($J6_www>RQ<@y=RlNLxYH%_g%dd7Jw3 zO@gv#vSAs(y`f6_on>q$pV$gsFLG^ft`kb|kh_iyPcVG1^Oe|WH+oFip_=iHCfcNik-ifz_{xNvnp`Sx`+P8qNBAcJQ&?Gin*1g#b_-cKg7fxQvlXTPrG;s zpIJ}ahtt6i28}O=`{b=;Q08Ip-V6$PkxI+8NM+XTPQRpZQslu{|Kz3H?9H(f=xQNr zq*O)v7ldp2MW|+g081h}EUxboyj5&J%RXC6<@%m8-JLOg@-_7;F zVnBVBqp|TuFEvCFk$xj=p}oi*HS4`|d`YI&pnZ)IN7FdFyGd~A4bA|%7zlQn*d2$9*R#0u?;PuUQS&`w7)E`) zsuXeuqoUgZ?R|OKmlzmO-;&O(KCS~$m~^sPrT12q>n|J@1Q#`hsrPQ&2kEy_M>w0A zenn^`E_Jn^=x6r?ihLyp)5v>eKk@L2b#3OH(8G(#Z7y`u820tq zTfXRX+kfY8`p|0Rgi{Sy&Js2*AZai}L}?~zE|m8ekeJ^F(38i;4q9wYkItVQPiI?HTiOQm#Y z*5sW*(bB^<@@ODmlvEokJDDt`OSX5!iAh8A3z1$4-`jTKomIj)n4A#mxwu(Q&{l=v zF)HwfUz$@Q`~4jIeQ6E$p@u{G)MViUp4nl^V^kt$qgRjBchC}X-wk!b z?VxXd9`8{3nbAMwC^~jwe@E7)En~PGdj_(+~Zd#dE4LzWVv5|{J7fr!B##( zz7((J=g>>Ea5D-kt|T{aC|i^mgh=fVlQhenv1V^xmzlgjAXwaogwzg4g_X19(`MK9 z7MsQzS|ge|rxwqW>OC*g;H+7ha^}=6`#jjFX{63C58{AbJeVc+z2}ymu628gjq4Yl zc2X~&E>aqlDEZP~^LR*YqZbCvyr|73`Qg9ofDq^Z4RuEWZU6t{u>KtIAJqAOL){@k zh&%Y^e^KdwP;BM@Cno+^3=V2P|F0Od3cmKQ_%E^h|0Qn!Tl*cPEe~P-ulAtT|I5++ zZxz75kN0;!?*H8${9oGd|NP#+AOGj|PyFeBqW_@q;Cla!2~z(LaeNXGYzWGgQ(;8# zJJpz@j~{2yJd`apnaNCU|4A_A&r6yW87F3)5y|cG>u)w;SKceT9su*en)ong@bFtN zzF6GQ4HI)2aOV$7c~Neq!t&OICLO{W2k^aB3J+!knags*R-Z1jWbw7V=fqC&3%aCb zL|8)>Xzl9kr6O))WAxJ}pj;(T%E^bGC@pBH zvpl=jS)AvO=C!h)d?xI{6q=q>v8_%bvdsYf6ZXygg zOVm6&^i>MLOkd|WKHNH7+axlv_agyDQyRLIWEAP~*vl}1k2caie z>V$U6Yyds7xBGja@B`D#?MbJxrA7p@z1^OsxE>fSSL{P55GE?C>ypcJ378mk=fDdx zen+U(C@4jvE1BH?u<07lk8iI?Z=6Gj&asC!JiPZcc9JseNX8<{VR{rSU>>uzrrT%T zQ;p&ce9&xj#oW%TXm(&@536P^kQJNh8z*JO$_7{@i5-(s(|*S8GAe&Ia~&@SfW-9L z`vw8>-58P;iQ_}2QId!QPU*`7Y8E3gk??_wdTQHPtcj5=?5r=eB@WAo|G``TF-Xo^ zHIPB3+)>%T)7Brtb!=|Savds~JmNm6!Z|^jrY_~5;wXCwTtfL?8g4WiN0O*yZMb1Y zVHy`i7aW4-0LkL0(zYw{aifTDHIgTW0pxowFa%FX(4bc!UiwuYM^9m=Zy?~E8 z_&w7gIOztA@rTv62W++9N8RLIT5%V0GFLk1=Iz@6Tc);XJGhORwhyil(^E#xz&GrP z2r*A6j(+t{sY9h92B%A&^81c%Q;=`>H9#uCejr8Ndhn$4@SDYvaaCNT?W~RVybvlb zvd%ugkQcZY!+lGK*Xiq6tq*^P6v$@xlY714+)G2_@nHHE*$V@64R$vbWi@ z^Lf+mSZDtIWU?duGXjnv*&R_9=}x9ENcY#18d1r)k0_w^^$&ZcflhU4~ zlpTIK5zT3IIsxa`>p!OCU_J%a&hoS>#HZPmJp{H_8i_%65K~ZIAFV2ca8IMWDMda; zEsm~$>1x4B_V?g*B9!(;J1*sh_O*&DG$%uuvbZnz2k(b;@a>hbs#e z-)}9G(nfNzic6HNB2Gk>+&Xa^zC1Vw{*SDfC194y$;lm-WaN5PC;gjS)H;EiMZ{MT z#ED$k#u0oz-Uu2r6V$Fq?rbf(f;zqxdUclr9mE)Lvke?mDz9zsO9q62-7 z)JZLhCf6Fg6A@!U;uQ_lQS^CQUR+;#);sPgkvH@FAwTrD#$F=ALp_8GdO{x>$n39* z9N)qGh~5-&UbCfXksLt`C0LAi1<&SJF z)xwi^+YLn9Z2$rCm8x3K+^9x2t(szsJEJ-SHm2i0E49M zO_tlVCsx$8QOq_b?sXbd<;>Pv6MS*)?4fHa>m>6ch&3nX*_2uOY0csRG^dk=ne!%_ zS6MxjkcLpdbp%c4$^|n-Xzy>+o~{-iOa2z9r@<-*3dg>{g2u-IFz7RJ{cyy?k^{$N z$&UJgt{kgpol*mS2*XhNo*N2>q5B{DX`lIh9S%uK6G?O&zdeEQw9(|S@{S*Y@Xx59 zLyTawW9>i5cF7TtBGEOW>L(^O3_{^&G0H#N*Y7x$`qb>EJm?2LX~OdGuVq=g=H4be zSu}pT(6jT~QHebkG5-1a$7u+rD^Z2Bx~|=`K_(%u^=nJG3!rX;lZVjj=m(saSP>HG zrKAl`h2+)KP^_Tg)r+V*zWmt214UBQs$XhkWe8)s`wG**Iv2>YHsPUC`tzy+dkZ}~lEZ>0>?*yot9&A-BLn=1IAepxNEbKpFm7wF{WV#)2pY$vQ=}-moajuTk_))4g8JO;fQ)D3? z_fSmD%CZ)3-cXhqFSObgd7OsG0TV*k6u!BLhpgqCfVgs%fGqZ$Nn7pA6bS&B3*dQ!%_5az4l{7dU1< z6-?){AmN^q^FYoS%uQa%&(h~@v?F{wj~(s<$HcZYGtX1MVQbD3g7dx#G&Mt>Iz zoqxT_v48YKY#dcW;B{m;t_lNo3zgG?8KKH`{)*^o#r;62$?j;a=#n3O6OOJ`d{sLV zS&rvlu2qfNxv9ZdjNP_7XGm1 z90I75AA4=3G@0(`23^05Uo~D9Mhv;dvG^MAw$dcnDj05b*jKo9^4D)e?%6iAaK7pe z=Wo2Z>TmK*`-Ne1VTKSpFs<1mmklJfPm2X8Qu5j>L2uz=GtnPyaHXQQ-!RU&+N`4K z`0Kl*9mJa=Ra$)i6R+XnkUaG5fy{aOihge_(<1+yZ=)fkGLpZhbEOS}DqjR=zgddZ zg9Wb2fLv#jOksanYvF;@Rt=@+D6R(g-kCE72J>lzY55ntX>NT4W!WP222{O!C>jf| zpIt}Tw><--sHQ%NAMf}qQL86cY1u`!YZbEug8@~jgG{U&DAj*4b5KDRNTVPKvOq(F ziVV*5I)->5ORw!TORuR+QB~)NrsX#b;E z)Ht-<$&BG_S<~w!TT&onMv|ml1?z{k2YO_faDu1L8=0bMb=q!p22_RsC-afu-DYz+N^$g ze8;q%DZl;9MLtteu`51%8#}eRiIE)^t7)I1NZH&ZxmTi-G4Nxmd zg_wPuP@ZO_M;UO&I-?U{HJQDWP5WG~|E1H<>L9Q8cT0ErP1jNSplGObdzT-%I36Bq zkONnHUZM|ynQRD~QliU-s?2uP;LGH2Bj4UPaqnkai_7HI99rp(z_+_hR%Jrx>DTO> zdx|_I``060Jo8gidVE}zkkBkK$obl%gpTuU2cIT+7ELgt=1tHy&NF+JwD^-;HlFobL@DSt4X?FUpcLThE2ZR#e@-t2k@fM14bREf9W zMr|dl>SDxfmfG$%UDZP=$qmvO_1+>Ep4gIC@Z_ZP=zHRY+Iy0p^kt%Bl8)O^XL7mP6N?9xdLqI~WKW8)> zE|={Mm-W>4V(n<=>RY-TPpWicz>Z>?X#9y$Juq*md>x|6H2OxQQj{e$u3i|=xV&zi z>Ku$w(WyEMA)H1P5ljDMDRV(yA|hc8$Tb9)|xaTFhpN!bjeq^l%Br56= z;V?(m*S%U)Lab-($0znN&fhI8A^uI-|dSzU7>= zH`>aNZo{o^U>?S2U7IXXnS#0N0O{KnJD&U&XQVC0uGZ0ZM!+y4>`I>O`wgMs#;692 zn2W=~x8`41cQnbw)k@A~N@E+21m4lk_u=@Cb3B#NUz2U*&-S+8=CmoMKe%I)UEczJ z^H0Cm+@pe}@Y|9r7>)BX53XU=zQ7*x3OCytA>%Z;ig?G@0aS$u*Sxl#yZ!FkVT#+| zU>=&C_$NKSI900X(A}}o0;@qJ#HEm>JbqL`nad+n^AO`08gTu$_LlnCn&e`cC+b&=l*1xxvRCTcwpf>xXXJ3K=>)#|XU3dP ztHd+S*7k1tYCOEvoYEgQt{agL-b<<7dm^P%hl(=_7Zsxc5-en3I%1)xN)jFEJS7`6 zT+y&R3NJ8O43z?)+kxfcCMOQa9Il0OFn`H??ZKYE4+J3d3D{U_`i51BjMj%P+R29* zUD>5)%O0i!2ROKrd`W8h?0VQhNhmVneTg!>K$V^dx2w>jC~s{9!pL*p(YNz+qEgTE zdQJf~e`CFXLqh6CSl~;gDwWL$^R?slRW(&PUH15qsy@VJw;h1y7#X28IZz^LAF;eg zHLtqY4R({h*8ut#>>W!kX`TO z5!*_9ESQ0r@1k~mF2pQxp*dVqX(nK8J*x^j75W zzN0ByWn~IrH4()jmf{*9MUqJ4r`53?{S}(b5gBRlwYq@WeN!7W_00Plj%_2OxRP+A z>jI$i1VPVl-Rq z%^jBI+2Fd{RYGzjo#+90{YCh&fB87J4dC5^%@3PQ(~-W+$h}MgGsqFzI%|c!Ho@WUQi{ylrm<5VK-BL}aBo$}Aw1?KUd)dcGA zM6aolUU1ywsgvJ$K;O9Xx#VpQ|AHZ?Rg61BRnF%h!JGI(8KNclK^~K9Dj~;F40jTF zm>b(uF}6QG>a&>&e;e&5f&tZ0b2Ng4vahW-s2>y-J{@0wOB%}CtLv$vbEYSL(9oW& z)LFWNZc+KO?V9`Yn<(U+ofQ6Z!wV-kI)Ai|sbL#HSXB^|CG4IZ%P=3`GWsXBrm#9z zzq_)hMVS{C?mD@-aA_p)oyC)rErk4r7?%z{(EtlpFd9DVOA=Q4<6Af?sO)Yz3lfnL z&i{EAf1N~km2Zp1Frq6jy(_P*E3Z|oqx=J3dH6ngsowEP>gFxBnCN)iZ~OGX^}ZC! zsHER?R;Vr0ZD+k-7I=?YfV7&@6#E_?rzM}xZA3frEZFqW*{=jMep)Vm#Z_|bWTF34 z!h!hiG5YHL*FD4XiB@<*4{szz-D+l<@mIwbntA{hN_i|K(*=o*#B|!mHos?fr&vbn z_a>=&u**n)|1`4Cl~8k!)U>u|Cj4aT_sg~xed2_19eJ&7S}@^Jek6qlv)%~i%8EEq1!yY*Jcc0iX>@;1_02u_$FRrA0`5I(8jaDe^CvsZDaHGrB3d)S> zgq#GdT5@{->?&%WP&63F&t?Ll3^(xgKa!4xuvyU}O>)R!3bH6)|IO;yMKXo#a@h~O|j%^a8 zZ1KzwexYIl<091E2;!4q^DXf7pTZimjc;uYVPF@HYhp1#lqj8UpgQsFS!Haz-SSCf zdi{GDRmPH2Fr*MSxwg>nmG}7b`I9?kyoC>NYY|(f7nK}kLEz=FcNWNVfTO>C z(B;-iTe}|%wywJ3i-=?vwg$jsXRRTs$}aP)F<^szV4ykg-Pdw&9EO303D*?;vLmyq zxUk+lI3%T9sP$DrUJh3_PM{p=dv)aXwGP|L1hFw8%ds)8EW}Im$hHuFUO9_QoOJ)o?N0YGI{!;IonKv!wrQ9tX<5G z)d?aj2rBJ2|I8Jmd=mUmhXP1+Gr9kBs)NqfBy7uG$t#g|>JhcXUT(&})w6XJEWz}JH*!B6=GD~+h& zyho+=mMbZrDAM#REa>b9EHIL=dnvXqb{m< zP@V#g0qT_D<1XY2?~1ytg}&G`6=YLzqWETFP(0nDWSQ-Yvz9@{ey#^pZN`LTgXQ0r z1rHHP?m;x7ouV%N^m3z#{gwLdL$Xy4oJFsOWFLJ>q%fDI#b+JtbQLEt`f8dSsz~?G zi8VTC9iNd;v~~AXOS6t%=2GLsp^3!uD}gG}+1Hfep1c8#$zfuQAsGV}`XP%b7?Czw z`b_%yYP#@eBcb?IdhMFEB46s1R*GR7nwApi8EgX7XrZpM8ZDoPz?sD0x?d^7@?`@O zVI6W_6ZE;S7=k(*8PXo~`8F0Id}EW12p8+CkRK-1ngHeKvWi`PF}Fj8r$krYZ~WA; zOY8tlfj~#@EW5L_*S243c{(ke&Iz_p&1{h(BONFu?#VStb(k`!KPfj9(O_1xWj_<4 zY{TYVgU;%HSQ6cN9{s=5ec3-_AP`@#UVZoE>>g+KbpvE?E?lA?rQiLb;BmB#8X?t& zu|^boE<^OnjmvW*cQwrRiy5O!K0h(+oG9WYJBO-U$V>hBVM7de#q;(Tl-C2})7|6S zbu6;s4qhpfStZrbY($}0BBK3n#4|u(a<#` z+;5TwMP4u*)n$-f?9p@1#F=rhWKA0U%qr>fXg9-R+I&hYD3bHqooi_e<*e6m{ zgGb7_a8W%$tcRmswO>>F#C<4alGFW(4JuhsXk$AOJEhz9Ry<0fe;8^yRCn!rEkrwU z=Ijn-(4d7dFOk#Z$s(szX2N?@#4y1E5SBsWB&d1JQ>41zXtcO;p6Q^SnDK?+B@8}` zeX2(`zy49PQN&^|sh}1<8oex8WVG$z2;^He3j25#@ryfheW*Ky>WEJD6LA1b1V{7$ z)#icV@SvTMzd$$uiUd47E)W@6AZPPif5t&I#}w}B5&z`1v#^yx@i@^l>bFs-?jdp% zR2}NHm3t@&#$@03wQH^1eZQO6*xE<&BaXFO{lN38@mqe6djj2ut0rZ|X)6H2<2jFZ zy5DnLz=>w;OLxG*rvbiUJa6(l66+7wc3F+oc6u_hQXvuWb?yYlcuz!u-?bm27kZ85 zG={dP?NJ8_&ia3za^@ECEx$Rdf2H{>tq)1`~LrbU? znA*$lc^?GW4F3c}oe|Wxdgm(v*n0A5zFcWASQ-XSgdgH^p?Pyd+kgsPL004cE3AzQ z+W!9=)`q~=_z$f6zrosnVPNo%|H8iiSdLTtH~t4E*8Nuuf~_HV{}qE)|1ZmP5bO;B zdjHSspW}P{PXvOQ|H9%1|B6AY|8iUhg9H)B{~iDH{r_za_D@Xt-wjY0p!V$livJ$( z{~?n@5dL>Rp~U}zwM7BK;3&1FmE>@U7}CmHoa-+eI!!#k_o^Jr?b(|fA;*Zp5Qw01 zK1uR?KP!=jf6nrsBgGn(i-*Tf{^i?CU0}7w)u+ggIE+gio_vvYRU95nn7vcWyTJw9 z`UCOil~shX#7~OdM;9zfQ8?%&GuQguE3&DBYl7l<- zxZH+b{9S+dhR}0q6$)K~n^H-}ql6}`+-G^j+E7|x%w}t~0=Q=U(=)X{HuMqftYag> z$LqTcUiTGOU=fm(so}>s5yW9yeXS%Ep;zb-_b>>wY>3cytY|VueGcr@AB)eZ@OR{0@p!()iuA-jK=sdJyi)mGBGj2-0{EM+Ch@tV*p}bL&i6kg*8xLH2)f5%9T@G3_}8{lNOW;GkB%gNo;5#5N?!hm z;Cb)K`MD9Zi)n^DF-hrk0_z_je4q2|&UX9zo~IOv?u4;B(W$FY(1BrKyzjCrqrT9> z9&dF|a|?eo_k9b`JPN-B#szWWZesDk4@Cq~pTbDBZCE()1jg(x0&C_CZKv&0++t6D z;9Wobj_i44uK}|C<7!1jfWNxAJ8?xKoVQ_j{jI4E`-olN4OTTC%pwjbx>tUp)0&y7 zaF8`k-g7wJwW*)@_ITBGH_F?7$8UNt;m*F_v$(a962`VHlmig2k$tG;(N>b-&n*O6 z&*C!?Ist=g`HGU{ORfoj3vTng?;OYCI_mB9CgUDaqNP;_Xv}ipG-0Njk?HNL7A3%k zrGUq5xlx!IX~A$E!4=WEQ(k*G9MW>2BT z-mL4Q^};LDJ2!NKgASOuZ{b?Ale}UX>p5cwGG@M1j_|Lui1`E=MJ8D=TeP6J!FOBB z?}ZO+xJGcpTt97%+Nd*nJyIM{fa!CWJi4awctIleUGY2@8+Y71WSIZD&)jTT&COi= zp^vk>=kJxFpa!OV=|te{fc4HBX`jPQ5=`(PM-fgq#mM-HT7xto0e3AWWKJk`uJr) zf)u?miVeOpwq(Wt- zp7hLYf0(U9QI#V?c@ZEfxPr{>6oSo}Cbe(%(t_u1(ZMS!zdE{1GVkY>8y_y6IcR>W zoSU7Fnk9Z=Kk5uB>K2CUZ+D9L0)}J+)Tfjl*sn1tgYa~LW#aJ2q-!}ebZy2vA(8;_ z$^+I8Pmvz8^>cwpMkfckhXBHvwg~x~qQ0LrHPyK>lgg?EtsC@d$I~qpPJBz&b;sXM zqie0GxGY6E8Vj3O4II!}2<=#_nDe}1+kdlu(WQQNzH=PYI2jBs*q)QCiFMVbH9Wo= zeK^09kPqG_fM+uM;`+2D6LEIcFl%}~?fBx8k67?V!SNI;cR?gk`yKgJ9U%|ua`V-v zr99{6wS?$s6IAZ0_Z0TQJL0YU(~9AGxaS6G>Vl7`>it9SX<7M0tYRx$_<>$@Lq;dF zpE;yu|4njF*K(??&=R9{r=BGl-s$-Cdz`xitbNJ)Ti)ab|5Tb2pu&OUs^O!yC9%Jq zk2%TVzVsQz>9T{K7$>Mi_oX~7h6iwTJ+}L%T~u$>+J=%td=hZeMcg{&cYSvF6INFbz__!XWRqF1?Swe7dE@9ck$aj_jE+Le2e z(W3g0VB((QzcSvP*dHHtN33e&&A+f4KC$nb{^IDwjF(t57LBnP{<0js?F_Bfgvxi6 zR7&DeQgl=yf=b_pBht>sKHJeg@BOlbE@7qkXsiEbs||JVBBM6VBm8JeKfCm&YVYjj zG7yW;r3in0&zy1NRXyFP?N9+4y*v$Skn|_YckC=QHPRJqIKOvdi3b#rmj-ZweBGx_ zg0w+Y07469talW6e%wk3cev?0nXr$jaWGBv{$m)mqHg?5yT$dcnKi<=Qg!kwFMSe! z*LeRkOxm9I5(}nKnv!#j0l8%9@aT6{lysr7uM2mWQ36xO&G<#YZcERLN@n$m*^NYY z-`jI}iFMp{KY8_?$4Js@?&P4>cbH*Vq}{FebDfu6J(#x-ZIQ*<>juQqmHu;jS%Y)q;DU(Hq%3 zd-lZB;P2E}D_#_`dGEJ=qo+#~N@v@5$0&Zk%uaI&cBb)4yrBtLR;Q0&s{zwxBPUgk ztl`SISb-PX1x!~Ju9N6!iOfZsQ57h_%TTsK<-_}8{~cXc;qbq6(!9SBzWkm$Xz)~l zyjTJ>O5Ly#pdnx?VwfKuI=$KshKDko+E1!$DmE7Fy{igXAU?I>kXhGTyirAGe1RW{ zo7<0F+?!!@p$)4wj}0s7boXobaf-Jtjr=Y^@zhlR9jrqf9#|2^`&41R%;v{`%c@*l znfq(d=6aSg<|Q@9bMIYyUf^WYtl;eOqY>$zjfrTM0-MDQScIrLiKnjnl+D<@oKv@g zZJUfOW!N$&v;RUk)h->%$iE3@{ux5KQ3I)_?FT0ZWdI zFU;#FM@|ajcJb)kpo7Bs78-dhJw@&y@r zMm417jmqgn3c?qN%dkl&imq$MlsGI=ilcwBT$vC4BB#_%ho5tdrk>MjK7Tgvi=z&z zi&2}+0EY(ty;!{xqvPQ~kigc3R})z`b(QeCdp~z`R^#ikO%RDlP1cJ>y9?f>yUZV3 zB&i55xI27xeUiK!B0rUPKgE&2(ZPPgUwcNPKD1o-CBpVNa=Y{X{oN=`lW`wTFxm{p z4Sgm;WPZSxcAj^+3?SxRB|I3Mg>%384p5f8LUq?x+`R9E=zz9J%uz{FEb&Bma&$WUGy{is00FmEri# zG2?O^6rLFQxI!>{Vn>3EP8TV522_MiXvz-MH1LKCk%B`+V9p^+4&CEk1b-L90m?9Q zFC722OmezIwjTbNSzk;ZD~&RD2$_vMv_BI0xFe-@P;Oa5A>LxU+fMYTXHy=DDMzei zTT+Y!{(JT40J$Ow#VjIkADl#%IEdXPszOEy=Pr})vbQ&rIAsKX`JKQ?Ae4+M_-HVH zU*#;09briNTS!KIKJ;;JTZ6D#Um27TxRr)0*S8Hoj9CDEb%Iq`gVyMhd@K~K+~ z?s95@L(*DXYR{Iqz|3r|qH+?rZRX=R!?=jE;4q)~7%9khfb6dfbx!JQuM__1n|k9NUi|E) zql6Uu9Dg6~@^D8T0Rv-bauOeXZC}!`#!qBSQ@iixm-klI6~I){`x@W8P*!8)n!=~d z#aXq}_ec#%`~Oi}L^lQf@rKdhQN{>)ql6ZAcC#abSA2`B1d-fu0n%q`WN=wpCJ7O1 zL_|C*`;i9QcYSa$E|jxRE*+dRxUljTWlSIa$%0sS(X4l|n`LdIYl(h#Z+_jp?vZwJ z^2k-~bsJ4HWjJT;4QjlY$#ntq9}pXRxCI#=B*Q-~KDcw-Dc|l~_YC%|1E=y9bIk`H zR8uYw856I@;>i0L!Onc!w)C8L-IELXKHiEyX34{Cv}gW5>fSOqj%C{#lq^|ji|A@T zMbDiEgF56s-$Gg}&3aWLy0lGHyeLjd0gEb2ahTAzLCX0>hEBvaSw>|C3yL;2QWoPo zcL^!Zh1w4ZcFoxN`5TBz7Kx!N=^nAJHnhUb9qL&5&z~?2fYBoOGrj7A656n#CjI`d zMvhA+SY-p~v{GNN+3@b8O-eH|L4yGd4A|kxX_8~4n$k1xA#pKt-Aj#2_q)4VfA!8B zz6Qj$i4ywjTYndjtuz|_eV;H>0KBh7A}YFS#mKO0<{e>9eF12%b=kDkw4-S`T(Z>O#JJ8p=eS9IKK95Y0d zAmvS4K?6(fdt=$n;1LmceHp-S$RxXAVm}3O81)G-xqPQOBhau9fL`GY;Q@|0KWn|~ zLdjy}$n6nTZI0vd2tDgd?fUs&gsA4KV%+&jN^5K7I7)8~(EgmrKYj2lO;75~tj zL{f4kp>L5UqNK1y<}v9-$)k&Af}HOp9<|YZ5#d{y7C?_6PMSgPy~+2+OpK@Z=UI>A z=i0U2voSshBr8fkz`%HX$m%{?O`hxTW{{J`K2t(`k}e@8)6{*U{%Cf( z?beXp-4P3ITjMaih?Cz-0nh3|O_2iRlz+sudSqGf1rj_Ra7$rd62qIs!Z2kIHV&P+ z2dfb-1N~R-tJIQu=pYt+{I$u=WMaECjdEP){gf^UxO7AE8X6?KofN`ui}TNEB)=(& z{f?Ik2r-&Ch=`~@`nGH>*d`c;@xoAQ*xOd}2N>a|klQTn@YYJW<=R+3mEy#7F_k-% zUORCyuY)qq%@G=ewby^6!hF+ypcaSB5Jpzb7H2dveu6?oFVi_cKaWIVrbzbE*kN-C z<{$C1N|Tu+BBtm-4SeYd#!vT|aOQq@O|sDISl_dA=aplDADa(W^|;g3?m zm-9m|&}gbGq5=bhXr2Y#vj6`T7f}1_-UsG^#ipuI;lOyf7K|Ry@H{@KYWJQWyx&GS zvT}AHSPzI*!ZXdT$OZVte9(QTeAY9*indxCxjTH=ZVvez>HNIee z62I5CxjGjFvQh~oRp|0Zoo(_SBwW!NySQHn6s6UrB-~;icb$*QZ3yoRiik&~!3vZ3 zB2$U&xWYD_>sPdwUwp@U?DmzM@a;Xmz3da-BO#*rByfF+=ZxZfx*7KlivqFhV!g{W zUZ>9Plhz!c^M;xr9wYjo`3apXx(mLs`9U7R*~MrLlfr-CGi{-hW6?G;n2-B_aTRBF zi@T>mPkq1}rBZg(eU5M(fvY>w9mD&c;Pu}6wyF{L9GjHKkkm2@dShP zhzlV|{+;#as$kr!3w%L zj*{`ew&;U35Gtr-#zsRn>b~xsY@${{9Mfmt(2<}u1@3CoZ=|=ivzp7T zDs>JkTBh*1>}IU!Ow0UtTlGn2y}}#0X#DH9pI#}g9`=?mjNPtso8gCU8TP@>K84(g z_-`;LT?#V*k% zPuwoJb=rvS3ft#;^IIQX+u&3oIbEiDuV*W{g`?U#V%E$EU?>I-wetHoOnKjHTcN0-P-{R<4u66?t*6t+Fm@T03ZJT`s zcMj0=-E-zG=}BJos>cdfAaGOg7W)`NSBphLTSIx#spYTqZ{81%3K7KV_K1YCA8R?JGZagfS@VUFl_I%` zm->}s*UFuc^ggP03~<3;cO|5Nr(k?@={Ai8qf_xA$z>2Ea9GwE+5i`;$an--jcjR| zl|+?|b4}o$u>7W?j;6n8(!132Y%Ropo7P?;GP!qvNbTR`tX5j>dbn~abadYV$jPTT`bHb)C)OXdpT|^x#e@a$1Tal zvA7}I49^;|Tzwkq7#v-&Zu)LqSDKC%ef_-iuu|=lE7maGzR8YDrq^-sZoB#WumskE z+QUEE!d|JS%u>QDxbixNY4=KAKY_Wcxvq87#|WRj0Cf{ZY(3Cr@qv}V>@mGc8ES4< z=R+yP<=`w47;a*}*U5`&Zr6c@U;gOR2^@_ZZaS`5toK&7S|B}Pz>}Zy$0;~eGQSn%{E$mop?_SVit7`zIO5=%vBPcB!7W!SLr|H>wAKX@h;y% zWOYp$(fzzOplEpHBbm->wwPlIM~=i8`MTot!CqeDU>bT1&5^n<@wJl@K(}cmfLiOA z#QnEre4;SeBdFUHfoyhifBzIfEW3RkKE!Fymbh>nY&)T;X3aW3{^3Ro(h>gTDNcka z$Ius_+$*K98YUer1G{e&$HqeU`MST`XMCOYl>eA3r*Ewl#z$L0$lm~al zN2`v&`kfwy`w8!DP>WX6GudakllFHJv5wMGN0p9%QNbOB_ujk5P0prs(f3zP+*RT_ zohF35CU%8O`YrP`S?|irhDvs(?}1P{>2$v37ngXc1W4;4b6GGOtsL@Af50k3G~jQ$ zd}^Q7DIg-+tX8h!kXo3i+nMxIZUD3l&uZLdd>7zv2t$@~&5qt5+r;xVwoHD>rCV`~ zUlV&`g&)>S*Csz!Uz+@8Ue&D;1-68p@8wZto;WV=WdND3o9J%=Q7@vqY(8CIHh0%r z_tsj^_-y}ZT+-FwBXm^__YB)$Xh=8~VP$(hG!} z1AsDZ4mGlQvAg&5*^4>J!n9GDwn*)qJI@DE2^;PS2VN=4PBuJGs@=}o(--inzS1_6 zZH@UIUwAv>(VyDg0$uMDz76nV6yn|{>7?5i$%JvVQ=~6dZfa#m5@F|V;|^W055UGTAHg^3AOGP$}TPz-x%K9BvdQ;Sxa}+2APXY-aK8E zZK`D~UKW*#Y#bewku!>v^j)T$2vsdVp0xBrEXzRAJm z)B9qFA-0=Q(x~(sL~y+N@z>71&4GbO0;yW?mTs-;d2HNBpRTyl9x?d#ZO`&-{_Z+n z$#6G!1G-Z8Gt6G&c5+0)=fzp>m;ql92kNe>qGv+cvQlV&+v14d(_ua z=&6=I`$COk0H@<0YaIL`@{L#@j?1Q<&=m%VdC8s*G$yAB_UbI}t!rZvr0@IbA@Smb3cxvL7jsard8Z9>2U^LTp(wd_k?rI|L#kU z`hUjZIk*06ef7WL@Si|f_CEy@1cLvk2>ahS`hU{H|H8>ZBJ%%~zj5*ZLI3{5`%f(h zcL&kMKhge23_AP-${YBnK>p=hj`mOVFFN?gzhj77P(8>S{h#vp_56pZ``;q*J^$4H z>-?kt5ik8aCeZwssQWrF#IUeNB6=d0T1hR*`)*gO6I^>^NDF0~>kI{(N3m*p^{_2c zUXdq5?P>F}%c2bpImOAXd)^bsbeeE?-2Q2Ph1dGFd-})SgaBkdNZr8C?qvbL zoQl|dG#5|OZsJG&aH^}H(}h-vl49TdV$tCSe;(PxoGTX4RzzEcPg@-Kg7vC3hxx{j zw`6{a$(zFDZSDyFgFb=~o>?0`morC!!-=#^*w&jJjV>6Uwf2L*f@_3D`N00fui0cD zn+8zir~AOI!Y3&1HMiL_dEPy=n8CSYQu?fQi5a{Hczk%c7s+9*w$n2CA-dBBWKRrA zG2k05@@Ta-@#f5a46Hx|01^C6zbsykYjM)c+UR>#zu=76h}LCIPS;f^kX9byJrp_q z;cDv&w8mneIdCkXt2sD_SDWr5+oSV~uh8wFR=Z~QsS~1KW!`NmVU)T!>mfeqVP~6F zvEI*-@u`sQdV%gh&8y=Y>r1?+w`wf~NQM{OMrNC>(q8i|W`8KiKaDg+1dc1ivLa_+ zUyxiI4fq!Oq-sMaE4G;%?Y6yBXTMlwEpihoB+PNEQ@5Bn+eBHj{W^5XuoL{f%<%@{ zwuf$YFVlICKIU#D^_FGk>eID5^TMxa_n}$4EEoWDU*K1*N=PPWc+xl=xu^c4BYrrs zaGJ;Xw_>B{_wfCbu3D@rmsc?G|3cSL=l|*yxk7rkS_N($x?0Cx3oQ#WLH6E}!TwlZ z@X8}dLxY7R>cPz-;2}GnmVT!y~<$2B7ytgc~ zAkX>QP#j%m9MfBlW5UoMC+BBhKl?G^-YX3Bjq|YbL+|jS;K{d=^=Av;O+gt%#H6&~ z-6dK=$hVFmtNLd(*l1K$%QIi}6st9B*rpTxU%|V&nx}cduVK~N_w&}#5g=)=;i~~b z+lf{M&PTbNF?QZiHD^RPr>Y)<#STgM?^c0ZV^$upFOO=qh7ASMCRNVKc^K zuo?+PFh|`=@?H31CZ-p!%gv^c_?23iSy8Wf1luf7MLRKl?QujCJ)7`L9GfFdh1?_? ztuE^?bq$|@l!-J*rrGd0)l~Lq&SyP>d1fK7$cR=x7lWT~r}Y3il`(Q;#C^s&wGLFf za>6c~_F6&*-W>6Z3pL3sSXCN!X$e0vx>#ep_O5Qyh@ojSb{^dN-fTMD#zWS$2|KQ; ze!W}C+dK+BM*o7I+~fg_l1@}i^80lOLf5h{uJCweG!Eyp7X<;#F$Eu&JkaV81FxwL zc+!MZR6a)U$|;2}nojHe{Z@ygl3K%>kIFh@zh zPV19RN>#;U zU=Gb+{BnoZ7=#yA+?=)3=xuj+Ob{?yejj=^lwHUp0w;k!l9HJj3Vg6}`{mX1X=^!&t?BLPZ7E`QYY=M^KllUm7RI9w{m%ZImPzCA zq59-Z@Q(?aP(3{uV$faG$j-0N%gxY)*Ew0P4CNay#ReT8t0KzYG2uroki4}UPI^6a zZ=}4amK>fXmcH!ym=dAQJ?C;46Xo5|Hv9O`9$Tba>(38_elAF%qDxp@N6w-je)JHO z$$p5N0KX>MC5eNHPQ_N0jYko zM16bTT3Ekc0(m8C+6D1fgtb^x&7-a#F zrW2*j#{X0-m6R#^5j&Mo&}?^1%${4OJmQk2OkOy&zyW=9x>>p0db#4;B&Z+o-0fOI z=$LZ*2j~~#h+T+iDY=2c95$n?YxJ7Qci~B?*t=cwz;iL!7yR3thw}Aa*4uq%3*D7= z7xx%@opH~;jG3P4J>-i|NkdN0wyFldZ&b(kr&sxJbYpE8ze6tevKs_~UtYR|s@?Y;GR%$pnA#o1OUWCw5tQ> zCfWBCe$3`inMEghf=KD!ce**G^VRP=%2nn+*Gd3-5#Ct|?e2MDz`-6E^a9I=y4_2) z^}q*b{uE9rw@ox-;sS%lm*WO_fSder+kp0x^m3y&)&BZ2Z;pg0(|JGgWc$7RM9C|4 z3(^95-_HI+sHh?-fTq`LYX@0fw+Nh_B)`q>57%Fe43IL5P1L_soU1RTtZM$0s2hvl zTWW{*8h&c<5%DBFgigTvA_K-!f%7L(5r(KzBSq1>m8ok?wZ5U#R~_SvlQ?dI{(VWo z9_m3B^??8>6N zuf30g0tL|%JBgoFhrbxEo(|}nGfoKcBL;vpxClTQ$4Q`kio9hDDI`~FX)UgvzM@t3 z>-1Y6D>Hk})d{c)q$fx*Fo+?ILNEd_h(o^jUYCo6R=SL+gz+~POK`S~=l$yqAHyek z8;xZF+DjHB)15exs(i(s+=21vOV+pUkei3gUBagMdcCxlOJrxUmyom0fR`NMeBovW z1pzYBEuk|?_th{ML3)yIztmXKn<;!;B-GOu7y(Y%qs>2YSllJeKJIu@D$}~MQpIEj z@3aeb4ot*;`)hC9t(8^V$AX#%lMA<(E>7nO=i$uwnDG6@?Wo0mV5h&LhUid^Q)q$bYUNrX)^pDY?|RtAM{n}o_oI1sc9AZIMGaA4t}?T$ zV267>=EH~c)_3)eSG|<%PRBj1dp?tF>%OG!Buo`vcWU!VPvX!wcG+o(RWj>X`|z4C zjXrpYlatr)t9gjilP~2sXfuCb>ABh7GbIa*X)o=+z~mF z=+h!{;*O*5IB0*{`hRwXOAzn9Xyvnc-g2odk*Br$`e8kr@b$T!eWgCmO!$N|&J-eP zYPEKiMSS#fvO27cPG78t)HVdw;>hf9HaNfcj<{c6JelQNe`q@&pGYv;AX~^nqr$aR zWM^NJs68PugwrCDR1Y_I-b61ki7FUfHaA|Xt_oRdS)&oN(>Ab ze=vnqJv{wuf?zh!sZ`6Eu@!*)S97(T>}|>9-2o08OEc2X7Sa z!JWHEN)NL6kx8|veaNP z+n^SDC9L6}#hdp|+DV8WxO}$hKt$MUXXIQ}7xFYN1X7e4t2{D@uee=#_3hPCh9hx* z(>=1KET+U{*I$o=`3AMpgC}O3x*t7DGQhi(iU%cgSpYCyR@Vj03pAmq94} zo{l|YodMPC30G+Zn_}2>cJZLgUiLKpjqA+k`wSuSSd#U7<#_EuK}^|+f}8bf0u>LN zIvwEiOIig2|Bibm>Z{XW%LKE9LhlF1dXfChoEh=+lc4WS6uY#ZPsUXzxU^`VMMcMC zQ}=C%)lLnaWDN$JYW!#x^(zH^>Ynf7^hX{M$sjL3ul&F(t5>aSLrjn8atpH_Ssfy- zg4%(=M1o*+ust*d2U?H`PI4wz>Otb_gI2paLlk+3S;1^D!m}SUT<+i6M!5u%%sQZ_ zOyx?5DagOw0u@@kSxp0M6ErS7D=dqyMiULKHQ~fpAh9r>TEY`iJ zrrS|hT*BnuvQDzm_4N_>meB@Qigb?&VzTfwo*6eDs0ctpKTTo zF4!!iJci)1A?=dc)_{+9myWJ(nJ0I)N_X{!+h*|QfQM?FfoA<}cTAz%VB{x#iGvBX zaB-Jni=@87hlzu?)AH66hZZ`7xiOuV^6ImSWwiL4D33!oO~vU+RTd;qU)Jd_b;|~u zyLD;c3zE8z#la-yicVGOS0iZ}nH=@Z4#~Tunt>dBQ6^UeW~(VsBlRjHievUQfctJc zFQ5maA8{i=K*xd&l8;j>V$62!Bfy;$l(G4#dE86(Lg!g+tV;JeGhvXj;}HE9O=Azm zqiR=9FMy+yJYx8?1>b$lmNWB|_r@lBwA#B#^~e+2GFqd8dd;h7gg0O0w41doyqmd!c1qM(q~V8>nhX!NIO!6@^HaR7)=c zOt!YFhklV++|-*JjP#7JW9QwDGgG4O?@f6Pu{zPS+Cyu*j0F^QHLrKy0UvTt^ly2) z@}-GJ@-qOZJi)b;7JmBx2L|I^T*VYeKFZ8@3eqPA|LB9&TtUxT@qx6ZoZ#oK(~a)^ zIqdv{<5RB9l4K|;$dk&9dQXUImLs?!jAF74)=)P=on>@g_DavkYhB#vQiHL)YD1?| z-$#GUlJ_6!fkKiKTc>I{Wy5{TO;CDQU0^HLD?=|KVRx@%;+^C->LILtB14}6!{1bp zC}VtPaBk=)uAVh87k(eYQR!uk_X(5 zrr}#X4t>*0yopg^l>^pI2#0#D>3T_Z6E>&S0;dKTQrZK&#QTi=fj=JTh~5R?W}F)N zwK8CbUj6o}&s6v&<3J?*h^9s*pOmn4Us z(>aFC&e5AY2*)&nHt4Y5{Kjm(zOrR0u5Y7Mt^7m~xn+fM(#RXJb|>St4DicH;oC%E*|Gad8$?iDv(Em`Op;~++R z9K`5QzXH|I-7^+%RcOTZ0}b9@Ez$5ef;vN@0Lj}hO5RHaMhq%T#;;?ZgzEEr=cVn9 zyJ)%4Wj?9?d9uIPntrftR$cI*t55bDkAbN*ZZ8izTvW}tTrLxS zFczC#KF%`BiKlv)DNpAZrHSZiPpdPc8}dKjuPm({8=Q7k=k|0{S3yiaE4ViE5|a5| zW^iXKAnx(Giyq_nCF7+zw;o^K>FFidk^TH^h5);%+Twz6cqdItrpouGiAWH@eq_I~ z?K0Up>2kiku<0|--Mf0wRs5FqGrCAfhIBF#HJ9R&WfAZ0u71p>2^&ksl3SKA2n_Yh zS3_$wF?FShajh_89)CFf6@D2^O9#H!@5h%7us^}`(8Plgsug<(XBa zBFm^-J9dNjG+eQ^h`*AXvy|7`znJ({0S2wL9z_dAZ20`fl0de}%8cSW{G){P^sG6b zYo@ne_R1E9tdRw??{$`=H{QZI!-Cev;cxhrPVHuzFf1mV7lUCcmJU)oc;!c%)U2ks z8W%6~y6P^*q)!fH)ryqUY}#?qYP*H!2&8M8QE}Miz$@Bd&_pLolY12tSTlF0Ui2Kk zNd4U3-Jwhrx_%^HPyj`m3s-WQ*4m z@G0ZdclC_-lST_x5sTrcsj6&8V8I&(?Z|f4@U1NJD}3B~QU$D#!y&ms{*T~mo3XEmRc7$BXvIY=C&$D~ zCCWKpLKSi;Fs-y!Dlt0rITWTbyD?{`?AenJSU9;(}oq14xC|5kR?^)fiU)}5+n2RL);)FCAF5xg5 zlEZ`yK=ju9v7M#rno#SAU~)bhvrai-2Crff>Fa`4|R@0gbpFOe37zQwG@J`dY!V?HnHs^Gc^nM|Ku*mB8yO^ zZYdhkr5SEH+8^L>;G7n>*>b8IL3edXOTne6FWmtCBuXu8;sV@aqHQZ~WFbi{)~xAz z_6TtWBTQ`O>R2b)74m-Qi*mo8PVnJA%*_n9xp>X}c|Ba03dQ%FL=jn)clAhv!EiV$ zP|>C-`qGm7QMCnhKm)@RiYP9TQo>C-)a)e|0&l4`VP#BNlfW=16e3UGN*toq=w@9YElq*Wu0nrybaGdPc* z{F_Y!K+7K{39iCq{66$MdC8OUH8>Kcu3Xvrz-H?U(*Zs1M0z>ZFhi6_|8&g()fy`s^^4JnNxK1D(KXmxe23e%OW-Ug{D(Z z99wk^I-5j-wg_FNKsr>KLNTQ}ZsVJ#U(R!dcUUfGBvMh!+L$lXP=^~V^4za#(g(jL z>SnmV2^`S%t!Z=CwJf5H@#3PF)Sm@mt=gKiCXUlk{=--NZ1#}}RXmvTmf_$jpd1Aaf zI<2Caw5F1ln=^qN?VdVLo$GglA@`WyvfGb(DjM_rpa`EZ^* zm+}`Ro@(*5=qCCErm3-6DD!I7J~>qQ{dBugJKuNCNaF^V2wzj?UJnOfunwPhE=u_X zP4O06u(fWC_)sgFX(|D}Q@K2d^5LOX<2h2x@Cmt%q)W zZsS9(hs#r$-J-R8=EJf74#{X^{sTAr8QHWFs@GkYl@#x1HCzr1RchkOeL-G}Zh1RZ zcv)~XfBLDSlNs7Ti2uq5P67tN3xj#nD0f)$j03FYTINq9WI%E>4O>_VibMT^Glne1 zE|EBQuu`V_q8P!|4ACx7qMm3*VZw3ekYqvS_G&c0ze??0DSleP+uPmANI0JU8n>j= zP2;ZvIfn6mx1qmUDb4^d=PjW|XloT+x#>`mPQR`%y>$?w0U647XwRr)s8uNd+7~E%Eeg1VFUD^3%x;qC=(MBJOPqf5e~*;3 zNr>Ak?ffZlhAaez)W-JCA%?Vr*%88Po4djtR%j8ShTzI(497c z-Yq<_kmE0TjmBSm>Um0%sN1NS&?iD#)Zf7d=oLDtr}G0)F`1ebv~PdyOV*W*^fbw4 z5L;4uamx)wze>=|H%iPkKg(XV-zm|1W?DLBC-qXY`Yv83ynuw04;xSIHKs@6P7Jad zQE-DtC=n}3_1BjfMda-23C-JcP&qLL4k^rMQN$HohEK@m*BD|FCnhy|m+Iz(=2=dZ z$)PWkT1CLs zQJTK zy4_bh?S;tsUM}YrnNjx|vRkf6 zFfkhBRUv6F)nSs}Fb@a*YgkPshwrSuA5$0DKC(#c6*lNpFAD@ z@spPKXxq4Ln`RBZfEIn1yiWu3(fq}7w$Yu?x{`SQR&UoSd_lC)t7*WzhLi*NEK&I6 z`bv-W$6yj%Uwqc~&?K3}?zp+*GDRc;z2GKqlPH88vQ%nc{L8Yj8#o*eDNAdSR_@Be zeY%F1ESHznCG%2Dr9uZ+=c`ZtNY1w{4;)t>wvIG@hyVq@JDpy5JI^|PG});BGb%CFbW6v+{znWt{7*M|{K$V1U;RQ01?F^}#2@O!3`f z*n>Y&0V5uk84p3QRhr`~w<|QW;aWmG8CDfKmQ3%9>}vcoK`@#R^Nrj}*j(!jerC0Q z+Ru>JB=&a&hJmyTg5ir9BIrIZz4wv|qcvXM?)OpsjA<-&w(mmC!ZH@e2VBNhf8H4< zZ2eji!jwv&C=4@-iwSIM0CzYDNG~M@iky&1fjT?F>>Q5M4YQz)qK4;pVz0ExWp2Kl z&z1d|od8nL;aj8eX`5Na)kO!Uu#GY1*@h{pT$ zu4lj>uN>ACqMrrzA5shJ(S-_x7n+9>aW)X)RZpPnHblCzRpFi#9nTJ#*J>N5+MJ zDfEH1FTVKv_|XdbuNnn%$je)%vOuDr^3X^2R$i^rbgz_enI9S3WSjkAj5$e}4Q|gu zRV+%Wbw-Sp(1`HK?`ymTiFSTzk-2z$o}C7z&S*Th@(RaDpu80%!B6BH#4aGT_!}kj zNI)^r{YI0EQ}NX3@a=6(sgxD|ozy7A@ws~NJ^1vU@tX|Mb+0J;I*K}gW0FMOsp9f2rD9@J`E8#`su-&p^TnG>g%H-S5)oLu97N`9^=RM~g zC-Wu0?##o$XnQMR@LBduf;8Uo%BZeSNrkTRBHwt_`fL~qILCc7!pXSv7XM@wfbgvQ z^E17EmiWWw@2BYGzJ|^08)+iKp68}QDE;mA=S9ast96E#3~D8fj`(A@J_cdSaKqs* zE~XYV=S$E~q*JkT%K8Ahj+oF!4l2ET<;$$PHrFogeNCuTK>NU4zrDborM1v3Ib<&o zS5-PitVWGBx<_M>sx~8G5f~j%qr#7inr)nQO}2HL6AY+(=C4f=Bp%m=ZrjZvYpr|m zTm}U6FS$f|HeU}}xqPuy&>-MkLit?;ro1qk0Xx8F*N+!g%_fOq_Oh{J?Quy$fOPO(ju{>@zbGDWdXwmewv zYxk>%Chw15XKxBsoH-wkk~v-7dewsV?}FKvqbu*gl0T$scIq~5%ButQ)0>@X@YU_s zet_W%Sp@5+%T4LrR}N>#$Fm^$9hGx_#{qLriWWOc@|v+Fo3TaV`AyAF9I@Z5D{Jv& z7l*fOP(qbZZDY52{F%*B2X>^F0>AwjBrcea?L`ZGCZp^p#^(C6szJ7l5upwdU@~R! zXDAog{O1)JDS;M|B7hhErTb=$V-e?=?nRDAcacs8?w+qT4q1TZM?w~*M_1fe=IjM^N#Aq5e@oHhac-<&~Oi%h!vZ?Xg_oY9(nrSeqK+_bIF=9KrNTcsK;yQctAm|J=&$P)#0sW{$ z9W~3Wrl`GLp1>7;c%KJ5=kpc+H|ilXUQ2295N(j|I(tyLid~hcpYRQJzSU~C?p6C}DdohU+txqt7pJAS z<1eP}gByNo5iMgJS+x{D9@(#xkI$_EVF%q2_LkoVBwbRrGs_h}SJf@d>jWKld+xAW z^W3JJC>+fh}7_40zu*@Kom;U zl+vV(3zs>wJ{a`yG#^>po4Xv2>Iik2p@u8o3$xvk^rxY4Lk`5d)2M&_zeB-c6gr<#?LMc+xvxj0^LK#!Qgfv=nFjT}icwpDj_rC#+OOM_{#g zAI{G&&%FM%fTe9xtqavW>e43p*b^tL|0c9Y%pe<2XJWQ;sW>@z>kq3Q)n5ol3}`jX z5A^RTWcow=_&O^~Bw(+Spw4JDlF({GPVq&ie|2T0dY4E(MFd1PY;?~$!(If z$C24m9WUIp;PE{gSdZjErC;WrEZA_AM+%t0)``Ga{|e8y(t0>q-oRz==nir-Tl2}T zAi9dys`F1NK6j`6MIA8-Hv2?mW+UfC-DKk<7V_rsedkw{HYudP43)q~K{sGQ4!<1v z@Emf*Z5`3%aiMpqhObpL;HjM>&gFZ<&R$uxs`QSemfA?(`U4f$%+Ab$g`mgT<=aB# z^6R$TevWZzPviO(wBMm|CI8!`vhw@OB_H5{-NzPl>(I54Yh&MIfje&5+Lw76k2j+t z+&A)QTzfKc!D8lhRKdkG{p(`rv1?akrGI@izhJD3nNKf|GMjJ8z-5fZD$s7Z_*G!# zBiOszb)K<+Qyt2vcxkg?GAP2&3*#DkHRY9t*o+))ACD^BW5c5adnyWC(V4l$-ZE;(-M zZ!E_rZVss{N6!9vIoVo2hGlydW0nf5c3QtRw1|_zY?@gjr}Wz{QNFMoa)T{`?O^Wd zS;vF(d*x?#j;LbhvBji1ROw(Zt1kTG7JH0au<~zk!#d5uGqcrtJh!!YoPSO?Q!cMq zn#Z2;?n{5)|C1WW4-7FXV2Y*YQHH^h|=3ou)-One>m^WWpre1u-uQN$DF!`iM&SoAM8bA&j`xgyyK<<^9B0Z+$RPx(5|IbGpgBckKZe`_RzDTdWxw`d#G%3 zo#cHm-d?fT#2F0TM*?KGrby1jp(>?I_9koP1)%s&!rF9VGK(OEkdBY95nM0Wu4qpg znoY9qviUl5JORCR@gnQ9hob$ z^0!)LX*1SIz)tym16D)MpEc1Vx{zcBxATZUr*6Nb9=0_qV&$UXTZOFkJ|Bp=Cx~+K zRoxYbRo3YTaZ=(LvL?hA1rYnR6%RpXAUjwKrYeI}lX*Zx`7#?=QSR)ukr4 zdYkRmie2=nUaWx&6#4Iiq*TFIQwwm#^lDtJgdE~^xxCj!J?nsnl<2iI2@OP}{bfWm zANilq!QZym1%gZa^lrtnwMkQ7#vVg=GqO<)=bJVRrnA4lW|ORyS4ZzZD^%$tok>W_ zEtV-1Ck%IBc*|$D>mi4_A(#@f9M8$dslkH76?*OU#HqV|!G&2~Dd#Lp5^cx!*k!4p z(-N5D1cd0h(dy_NL>~nsP9`QQvGY`wOHE~!!y+Qk+uo*VnE_$!nSUk`8&A2kJeGI6;ug_q?I zRm`xezqzFUS`@Vid_I~T+_NBn`V+W^Nk>1kJ7j(i7pX?HmDr*^iX*$~6 zoe>DKv6BQfHa*DFx`-)$f~)gNMUh^-KCIHwM9s`e;FLwEh%|f|(DU)I#PASqN)7#q z(tNkO)0u4y!0|QHS{B!<7+UZ+HO11E@|&lb0BOd=`|TLUGc5HJu?@%;p6R|GU_P~# zGg{wbYxR}5^T=QiQuyWZ_`NaCViapM-;FA_TQ%$3T}(mt>mtCE+naxAS)mIbu(Fg^cG!)@8uLf0f(g(C{j7~vSNz3@|qCYnUDjr4C4Vy4e>`j zW^t@aT-hgPgWT^@3Kq+64*DU|K45_tet^onOEHTXt>t`=p$ zRDM;2n5jz%mL(vQ?R|H9wcvu&fQ1pWHE=RsP3oAZUj3}+Uco?oC&}z#pwN9bV(1=y zUr?amLyBbL)2s(_%u@91m+P+!$IRjcVKsR;Urc62ms;Z$ABm=ormBAst)9!U?>q)} zvr2nW-wIpjGzV^&G~OIzr9g)dEu40|RdxHG_01ZejNYC?Xl9ZR+=?qiFFA{x3HCs! z@6{w{xvgd_Jot64j;ub1MO`L}90Kgoe>e52KUS`^vt3*9DO6O+F#uGruCa@sy|1a* zua;<}Ya8!%GEzrI8{q=WaXq4>(pp^u3#+%A(wkl8>p7Aq;R?akKoRV1_#I z5cH|A<_DUcwl{T;CT68|7&+Yd84>|kb^wh`+%Q8L-Z%r83?B9dGYRGZ*CdcYz019!@;Zq!!vbHW z23>L>EvAnPb|2Bgw?2-#?XdatBHN&D_RQAr3>}uv;1$uz{&jKMQ+>lOCv)j@_blpkv`@4W^Kfqh(P<9X&Gy>rY2n4ni9&o`6$n~fJ`*8{;<7GjOuDktTbK; z_90&^8C<5*-znTLhsng_)Y>J&Ao2bdb@{GH0cU0;9;Xij zH{6g|I)2jaqXXT*Aq$Z@y62+8?YD#+U^o4#0{CfX$%0qT(Js%es-riLJ_NI|eMG(L zu<9QH+GS~~DgJ|j0LW0``^!%+9LK;#EcXk|k`6$uZ;62MZ|kK+CnZ2M^-?P?+RMaM z|G6Up@M6o<`|6!@JO`P39G|hC?^gS!`;yO(ddB;S$u2v*uhs9kHn?16;ntRM>QRII z%9)Vkm3SS!-xZ^Mcf3W2Wc*2zKo8@y11)2lTbI#k^5c*h=SJu>us-bOYtdER6a~&2 zp?%OBVwrRSFd{i}7@^AiEJ?P4#{D6Q$WlJ-C0J=%eY~kLPnC9TeN%u1LjR<8Mq;1ZQHhO+uWtM`@84<=fr(C-k-4}BUj|gl{+$LM6NZ*9AkK^HIllOQ4l9AD)^UA z?VDpcMHKPq(%;`vWSY0nJ-9wzTRopc>5QAF4+cmh$n>dXGEUfro*wbS@61Y1OWu8# z=}E9j5ED&qNdntQXKU)5ofc0@wp)#qhSu&JG0BYFQonnyDoA-zj1ZZNAEI z9-n36zr-ji6J-rLambkPVs3o&01umWf@k^YAz-bFBJj0r%d&7L*m@de-3zp*Xp;%8 zS--n)vHENa#;~w<0g8D5sm~|}fFv-?N*2~1RpL!p=HlmjV&Rb%`Z?)8Gfqd&wA8(rYJ)S{&u(hS^reNMff&w#E8h4NrO{^pUV5 zL)j53P!O=qQmnC5Bu#{?@dsSc_i`3L;MxGko;Wn+bTfA-L(bd8>?z;))6;JB!1EzAl+ZSnq3g{ey#r7sSy|PP8+{XhC^g=BmgUP1kx{>0)TgrA{H(g{_b?A;;xy=;8KsoQnd}+pw+B+ZP1p3(Y$q${MDpM; zLHyZ`qTkWekJy3Bv}<4#JOJsYaL9c^x*@rF?%Y>2>c~OrOX#`E#=3-{^C@ncnI1wI zT_Ne;SU|0Fb;X8BJNeGe+<#WXB~wc>+)WNnvLDqui!PnMvykZ)+|Vi)dN<{=TKz;o z`L)(2%HlATa@>-h@jB{A$fvwLiHnkN#qT;m6}#5q7sF!pla0C%A103{tow`2(u4rh?uq-y?G>{_=hXZDGcq9*o6E=8cB`xpVKmoNK0wfJWcQ}y+cRC;#|HhH z7DJG+=SFoIy`jag^o#he^MMJ1^keQAmc!$|6mFSQ+Tn>rMWdyM1Y2+V;nu1Q186fI z41_buFK29gz0bp!vE%Py+jbXzzExVo>o?d_K>dqDuB$=^X`j;iuR@3Mq3rHSj2+{Y zTHg_QIOGG&Pe@>3(0Ve!XM5k_SF@nwklRbF_lDetFb(U#pcBq#WweGE!PRYQ?;|<9 zJv}~U)cv?M07Ez*e98es_mouRK2E&~gBBbaWQkGf*mh7BRVo`~yR90xYb19Phg+~+ z#Y;>nhlq^H8KK=wWkw6?0huS}%SL|J+*mw%Pn$L9S z(L0U86M^&;Uf#nR{O!yJfs0=|sZ_{{Gm5CN&Bv8m`zzR@0pIosufo2!&JudGCYsbn zm6OKP?P^*SfH5gf`V85wIzW_e@DAhYL{?gFN!w@C{>D*>Z4u==Z_Qa&M{AH{&}rJC zb!FeI&gzKsjpu<$t-b9>O2^eV?{O&aSNQwpCJaiqb@g!;zC8Q0TD(-g7Co9wFPiV4 zcNa%T--jJwkjseU=XA-3(d2Thuo?Uy&`=ukOIK@qsc&nakJ3kQe!xKHjdw7HrUVB zHx{i$2U+c?8?9Zu=cpl>RPM_MIXOE-z^y}S-?Q%N3GS1CEx-G?Gu-6#7ROwLYR^GV;Wipp(S1p+GsOYY&=Qk@c9*;rX zurmiTD2CD}xa@>FQ=$1&$}>aDO4P&|zOr{>K+&N7f6T~t0F<~M?iV5trPVT}atfs- z3`$D1VI#-b-({!-L{7oU5UYs0B+};83kO2J_TQbqa4r+XtmtGaX zMUH@7RH5e`6ua)`Ggr4iKz7oBC%T)C6JZ@wxd4DP43dAK=lQd-vHF3KuGO)IDczCv z&f+8NAcdCQ1T}G6kPJW8|F%n7TK47D=_$m~v0BGuN;#U3s>Lfa-bUWNZJ3s(OH{96 zMtT;3WsFN7dpirAn>at(C_j!gaiXr06%8VYq@iP-tx!=zQUOAfR_WMsP>|Ed{Rs0% zW|(P-T^Y#R7<8HgZb!c5QMOILsNVBrk~LoBj{$}R?d`$h#dhb=)Q9|8*D(?nl>zsK z%d#pfbktf)64Tha@{Dfy^BNocUGcnFv~nD;ch$ zIN$%74+3%7{|(CqlEnXCShln9e_^}-E0zt!g+YPD@&Dfb!G?jv@c-Vx?t$1a=zq8V zMdE(?k2dgv{|k%$50dylE;os@P#{~;>Ws^OSEN%2l|xB5{J2_`Z_F5zpJX!{%mx=Ou8O;1hCn&Y_dNP%`7I&S zJ4tL7Z)VelcIow_zt%{X-;gU)=u^4#@kiclLq(4zv#{2d=gTJb5pVDkKIzQ*;=I9G zvh0TaYWdb%O54s4TW=kG9d5hFk4VN@m0L&8t z#|whdu9pGdm)Jv|uAPmN`V7v(5csrloh5+3!P83z`O)xl*yTG!;f=P#aA@BN=a(?n z>68@v0Xq-o)!-tSjAHFaHE1xSfK^(Ky!TKEuByN(;*aIl$eyf4((;Yahq3*#L;<6r zp59xp*4MJr3PJEO6{QOM{p4i#NSq35`RM6amq3%cfG`UrH`vsxSDg?fU! zDR(Q&tF7*=k+A{XZFwWyagm6)Zg79ha`za7Zs3jO0K!@vAn3^MqbxkZTUAsbN^?8@ zylIEM>JZQ9t?S>#;}m|hEqLpgA@9Bq9DK~=4fedPJ4JHQB{pEJ8p_Cp$G7I~J~Mw{ z8;mG>3{eHDpg-6s|H%W$N<0TSqJcZ>!+2d7Rl6Dh7)M<)u8ZqS#EW>Bf6-%+R)EMb z-P)$FO$Hza-$zO>Sr=NVp-ob~OB(E{w$u3i7?ZvODDgF~b&s!X7TDM-OC9s+RtCM!ZWKp!ms#!b<@D)Y$&IL5qhPqir$wH*1+;!I6qJlTZ*3h#6M%&cM|Nw`IqS(!Q}5X&7n zLqR!pkOlEcFs0o6sOOk~m;1Hw?t$w`?uJ@Bamp)gnyadZvAQ&T zuYr9|)hKJs8@U-O=2+c3Tp3V78NA#bOO?$=U+FM#huUsW1eGgLo`FFXWpjM7E%fDh zW2XConQgv{%o?VYHDPa&tJDyo=Xi}S!+YaSalc-9QZ%|r*5wy}x}!{LueWb5xC!f> z)<`A`enk#+fWlnrvGU-JLe9j)d0cDT>-PspH9rSYYLU;djaE!|iP9ULh~AdSN7JjM=R0`DX?;P zZ9WKd?A?+N5t&J<)0){LcL6HSH>#}&6wk(%%O!--gfv*{$;hAojc`IdruPu-qs1w< zs{W~Dng7yaPvzQ=A?Ald6V}xwD-R~;sCh$NCg%1zMf(dc&XVl4bQFlY8H>Y_t% z*xi6$1>`xB{!pjcgcIG}dy2oiihP)xCDH^zn%lB_S>ZfSC!#Pop!L!M_~@i$A*4>l0JKr_&y zP13W2)yC4LOs4e*y@9fufqBKmQgNq<cF~AtHB>-j;dy>k3H%OcPuUP zqf-peqlfC&K9q0TXoh?!*~wbGY=jR#GVlYMJJwBh8Q6!sidZTxZgKDo=xXLS+MR?k z*9PSjhFa=(9?{q(6kvjn=My!E(i`jc)RFbAx$F*+cZDnaJER)KiNTY8g@ie0iE!@59rB}RD)X(w%6HKMU)t>U zw?ak(ioQcg^v*YY);I>41K@wv{YgKWZ~$5vwOZJkRW)A9xs==vsC2`FTla_jZ|65n>c~h>xnnnw50;a z9`tMCIm^`<9MWn+_R8mmF0Gj@BhD9UF|V_-sAN82 zzE0(M&rupPd-`0c16d?Cs^U_%8FXL~YDlRmn98zLBMsa$1#eQSw`_zRb~yLb{t7eJ zljs4w>8k2;Rq&9zDz;DYrIzR-&HJ7Jio40B=&{YL7;f}YX;l@XuT^?b*FDZw-Fk%a z;=28_h9u1Oa-Aawq&PYraS=6k{63gFQi?Opl_1X*t7O*XX!CIm+N5?R3JnkV3s018 z?wI{D-TjRuhckh9tx0E@&U;3{QP*>(86axT>2e|7N^`300GGFGYL~+@Hh=IM5|gvV z27y`bUWh>esB3W)lybR!hO*xHwv{KJwt_xS>d>JMlq7>(*ixKXBsFE|1d73rTrD6C z!d&Caed1fDWnF<^W@x~1NRJp$Ii*h5)IW+3mIUR?ay}_XH+@yeCIXuO*3K|NBK9c3 z_44;QxjrgMIcm)c{7X#c9gy6MzFmP?V?bJA{k+&}6llZxI18?bp*G~{*3zHGK6f-*QMjWu_U>^$Pet+H1c$nHLA zwiGxZ$Y}WbIilOfu&~7YILaREX%mfSysj{z7fw7X{t2ZQOOc#;RHxMo`o>^QZYEB$ zlnzs?I9R7Dm0_eYHlS1tAFD~Fw)r~oG$^Yy>hBp!TIkbQd$Fii`0#zH6bJv|;fhWQ zMV<0A4*n%S0CEW(;(rUDA}JsJveF`U6Jb z73BMgO3o~kl&0_CVnkw^!MSt$Wfn|nG&zxcNd~#YIC3g`zKYjudE7}}V!gwYbc=?q; z{#q{l{qVy9qV#gA2dX66zCws6&hG+38`#?S-8*?>6qKs&d8)A$PS?d1yu*5<+O@31OI9w)+#JA6dz=}bn0hP~07(fsLk%#~X zm!QPeAop85Ae8XS5nox<%WmZgYYn<}RcTfiJ?$1|ph9_YRn4C?a6psx)GwUI28r2}FPz$ppH^y9|KNHINWUq8o1d2_ zfZN#RS-aP^kz8OUOVdSLwMZR8t?8Q{* zrdPj{I9f(*3@Ove@tgu=-YEF+AM#d0UO~#u-FR{vs`xrYtHt)y-T~Kc2LP*^^tFQY`Ei( zL%QHoY*m^lc$}?q`QAQBE#~jZ<}YtspAk{jEatwaIC9)6>sx8nW@|v zCBVY9aRBg`aE)^>YJ&93=jJu)#*a)dt?2`ucdsyjB(92sqW7ESPXsAB{NUkBMivkd zAmZXmM1?W|tai%&z>nH|mfo+C}JDY`1`|P`KH-^m(%{G2p#Q<1kbq`=#uI??dIM^HbtMhVOoY z;4(goI+(L%R7&(4;}BH&{RKa5Ng>;MHGaUEkNgV8%B5V)c1BgmKeEHR{#cG4k zyK!!HOB{7YK37);?Q5%CYfbGAI@;|_dTuIB#4~RI=LChjPq2kcZsHHDkJv9u8d~ti zggFe1SLI|zjb}Ci=s`*i6*c_jYuoYWsh7?K1P!L26uz}12k26PPMmva>Hz71Iy2|u zyrs@|*;KJDB87@u){Tg(Nd8Ly8)aG=^7KHZ@}W}|Dq;c*ol+Sie1q`Zb7U&@Z;ZQ{ zZ)?p*=^-dK&9x-A2rjL%3uDy)Q8wZq3tc^;W|=&2E2c(onV(2M`FqIr5$PGu@I+`? zg3-)-f6YUCg>{Z=1R8AZ0URsEaxDulo=r^5)#i@W#QInn>!u;efUbqWoWNg_F6Y)S zV1>IL(3(1>Qy+^iY~Wqd^@F=Xa;aXW4M`!2gQ|19N$IBHmYySl-GZA;QR0Hyo$41> zUh=Dh(mj-;b@^8|sQB5`MJ#m$D0mt0Oqiiq>iNZeVnj>2g_GnXRxcB`$EvUR67?ZD zUQ3iN;Mq{3b7x4)_>^xcb=NgXdddT~B1*}o@+~mB!vagTG&99hrL|Ugy=lUY@m)8+ z6qUJ?nKaDlQ1$ESH}mvZiY-eHHpENU{LJH(WcvkhG$c#5g-S^JO*+@PZm2KI{09mo zU5eOPbN$9b6S?gLN)K=8mj*W?qo9Pc5t6TFh0#Ii%-7jZ?jFWmrMl)MeE5>waq{Nc z?n+L`;LNCD&btu8cH^Cgx`R&nC|7(OM+Xk0svkw=9It%EyGtwV_XDMC`5|ZL;X-*i z7E;N$`WkDqU#5SVhHj{}1QYTMbbng{`kenTz~)j*i$E>+DFMXZ5-*>z@&@i3g2}4) zeYXbXG%9BzN)eGJz`;=XF` zsmrxvZR7LP&iiRZn|s4$$|~kIEtW!F=A8;>dS#u=>o;OhH2Ahz2qEd-@GA!D7+748 zjen}zgdyrIl3JW$+>lNjGdK&!E>pVnjd%oCXNre~O;v5duj^j$UD3jJXgw25UjaBx zSwaLspokDc5HKpZ{ze7xC$$dTr{fd&^s7W&Txc&RJr%gTU>wn%c^Z7+o6Q0_&WY4c z!8beG^Tc*0SVZKtxLdU^j2aZOhPO(YYNJ`i!ukD9{R7-GBc$NQVFLpVEk7EtFv;WK zk4F`ttZ9xy2JR*34X+(67pQ6)Wd7~8a^-p+Z z1uC*$tQl5xOZdK@8KX0{RvK9Z-&JddjN!C{5=3!U=A_m&Yp9TGC+Ex*KTk?bAGjSD zV$<2JDHDS;Nn14ToiqFG1Cz5AU=m^2#N@Y$sJ&U3i^m=~U6KJBPdb)mj3`tfGUm0= z)Wfop>(*Q9r1 z)bTd7I;OP;qcHjfZ6WBatLsM)S9tjh|js1jn=Oid`Tk?3@?V^?&M5G zZ(LT}%GEjjms9x*(5akb%hCTYk6SAGPiM*>(53CIu21;TtEsKas)qx?sG)_cPuwss zulMF|LGE5J1adL{o+Uxe!>15E<=UNm#1V2V(Uc}|MPQ~jDdvzXtU`O zFb@#zs>K`u!4PLe{8;N_%;wfP=|a$C673QT%Pr1i-Qh{%skz=PpB_mJ!or(MVvI{Vjpr`_8d+984>nc7HUcDN4#? zd}|Svo(h<)R+0+ts=w_-rcOgJNKTGMJN%=B%9o&c z?Q~)Rq*mDRTeq|R=UDWFP~3U_ybjyzh@(6lDzqghhpe+yw%GUHhebVE^V92jC(;x= zp7Nl7Q@SGnzIvd9+2yE+#X*X~7IERI<{o@nb_F(Wep$SNWgb-&k->b&y2Qjp`%7{p zH~3hPvJ>Mrwi%_wg(W|$)|JP=6xiErmh*3j1UdmGGq^l58y zvZ3ep>=J+$c)_sUb;1&qi)tgIymp>mjVsiWb~OP)5TP>YLG-(_mLFanUa@m3wl>_R zVyQrk!%T>xuHNU8ss3nlMA2RH3@zZZudr`9#W&|jpnjtV_pfS7^Eo6ifV8^8ASGTA z|4gz38FYWs7sWD>>^`d?sy^MyJjKb;&eK)?qrAXOWcNkXyGb}JEQ(Fw(DI@xhT$e* zP>V}6ZoH8e&Ms}h6i)MWdEwG_Fi9`oqC=&!-*GZrv~*pLJ%Bvyx;tTBT{%F}Fe}@I zX&RK9Dm6eE-^$`|EsY(J(q;sdU&&QHmLded;H85_?#gft-pNUkF+Ds6}_~ zsD(9heO!^5$_5?pLQ~A1+Np!r!2!B9qKRG{iWsvSp#*<{yy1JCbbi+NO(wG!i74=sK}b zoV#~bvXR!qNNI9`Kpi-=cxwI@c$RV}IB9Ioz7M+HcuB0p^5jNY;4$%hLts6;BpJuO zLW&<6(UvBe^0_!trSyT`RKYtE%4_)e!u#w!-Q5F_<;j9VP)qr9UcY$X%lfgDFrW(6 z!^GH+^4?&3#$fKxWiHZ=m=?UfdDj{%8RL zmd0C!u=a3tN-#`}`L!Sl?sAZ^AKY|iAQpUC)%o!H`jEUdT95s#TK<6Y5g)P+Ut^Q@ z*Y$l$SVsUA8C=%`tk0`#()1K*JI!Vj{KFJ zUT_oS<{H!k$Y8nvx(MXZ3k_}W8kg-^?;L5b7Fbkrb}}1|PuwoXc0%FajfMrt_+(y6 zlZz3$P4+!%sp$vljt7vbk)~vj=)bVf)s@<}8Wl{|jF4xQ#aa=Q+Yt+ppozEL&T}RGY1$ec_;ce%FVG@E zn(NExTYN5ryLjKO`VWBx6*2Ff+z+-(G3)9B0R^?O?K331#K%j_qV-ddvF-ybA#9XA z7vr|iQHh8HIiIXB63%eUub&V@0z;WhwpJAAhJL+wY-LxKyBFyRRgL7c<)Va7=|A_J zM7H>rxaVe7lD4=likT*+igRK~w>E4Ep3}S_6Vn}d&b1uA?$z4#{?Z{j;caJ2X9 z2P@*Q&X_EOWwW+B0^<xdlPk2w}%90-eCx z7!6zEfH^&QIJ5M>h#Fn3!g9grT6Bh|yS{(s^5i9o#n@Akjp`F};%zx+@9T=T#B|J{Ql{onXF==gt}2Oj<>SNyNY zJLvxZIIsBc_W$4g{=xTK|K0y@Jpkx`n#cdGKlqRF{yAUz@BV+e>|Z$k-TtSZK=oID=?SvT&nJk=llTcVzKaE{YN?Pv6TLnY7HvB}~&np3Oc# zbPY&1UTa(U4$sC%ISh+lzq&4Cbtxr`ETFr(2dRzv#=f!=EpW-c*10g8O5-i8Qe5F| zZ5TJQv=wz-V)(edzN*47I|k;>+|@| z&3Wk++=5^|y9B@YmR?3OUuGc9E9=y_U&4y3x-{Om3gYkZP~cv6yxYrvt~s92mK8R1 zeC1yUs&=SCzoQ*ZShYM(f?2*)-8#Bz2FiJA?o;85+>D(C8W`IuiBvy=2jya~B-tyd z@(l5&5&^IpBaxIHt+TSqptTwlUp5UI+l{W67;j$>_%;E5%2x+gYePGNGT(+89cfFJ zoz}JG@E4lIp?I_Pw2S$oD<5de?l9T+tTv5vRN=v= zvfS5oC$fK{ygB@w#?DrCx~n8YuZ_+?w97~K?Of( zGbJ(58nAPr21^4ziZgo#iWtTZXfKDwIIDr#P;JQ^h8Z$hA=kZyAq43+6myWR&%{ z-HS;R`tjuOLs8i7>)_G5^Qh!qYClF6sh!P++nH0@m6q5`CMHGDY5b-SqDc2>vqEoC zVu`Lro~-l9YQud-e=2PSZsBnzlvOQyC&ggPlzow`(vaKZNWNE&3$UuNkS?>dt|vjz z009cjBtwrgf7u&zObFhN=Dn&1mvFg_dCYxm>#q!M)CqTvxI{)1cz-*U83!8BJ6Frr5n&26 zyj>pGzYQ|RBKPFT$sy65>|!#j`n83t7kFw!SXfgf#{Q|mQn!T_2>tnm_ymX|cmX-i zxoU)#U;J>LA~?^~?Nj$yhOqY}wm6#7L6yHf2Y&^*J_iK_>jnYQ^p9gOG!k6r}pa^BO)Qs7MrtnYl-O8+t7v1TjLc? zt<@oSgE36_+a9ht&t~5M!w^I=Mn~$V{cC*6L>%a6<&<&mEzUf4@rQ)7n0+AFC}&OF>72_rROr z3kv1OfIke{+bNpbNU(9E&Blb>ImXyUO7%vB@__G$Q6lYsh2y48I@%xv|uM zTVJx}KKf~{ticuEnXu}%fDod2`)nv3vWtmC)W-F_QGdZls=0IVb zFid05MA)^vog!v$g;JYiT>$i2H@bU?V6@!bxr13Y3_3jFOY>{V_#m`J3t8$OFV{`! z7S`pnFptP8KVzHvYnTNimdn|2O!6Q!9DykLb`pO9;pPJ2=f4I4!64#7H3CMBf41*} z4a`(Uvkdz*ZEH)#j~}H?HF*1^OZmDhB=q1jj^Xzjy)~=CdyAd+^I{NWAhRuoxO09W z(NH(b%BW54T|5k*P=_|qeYFN*ZcnnvpdpmbcJ(Sw8VVM5%hhT3x~p@X-W^8-ijVCf z==0A~Ff+B95}hp>F66mUQx(rqz0K#%fx$=TFI=G!hbpbCdZdIJo?3;A>gEx$rAw;= zOKA!b*sm_dP1@|4ykzFXO`xJin9MEVIy1KUK#rBUA6o6 zKH6M2nY*ddyy0W*bb93oP=bxsi);g2T^huFI6tBSB6`!^WxgpY*(yqw<%W7?z>1rO zmP9y0OlELCNOzm<=7=$4R+}t&rl!gRAAi^r5byJp3MlAbMK$Cv?O*)C~ z)soJ6>Oz=gcYBBV-c^^QlI5q zmORZVcrG`H^k1 zEo_CWqb{daLkmpNI#wK$VoodNI1js>Sz|qodP{z&3=x{LZ7-Q#Kam zKvOmIRYpQuAB(#O_R%s3XUCuUr595?o;YONp7fDhDhu;K=s5_EqTeO|5$mUmPRB4q zk{*fRSpW_t1Fx`zBs2uErm5M*N!VczA$Y_wsXW$3fl!(0xM+T^8-*qQ(LZwnL^{*tc2kHPYzmQ*@%v)79e{QyVYLQvL}L zNqSS#G9-ILZ{&)YY-}e%n9*00g7bj08<8az6w3Zx!Vp2`>Eml=Pfj1mfV=1v)8O4% zM9k%Co02ElA|2sVMiHLNg3r_|{S~FyOr1^GotFp|F1OYbURb(W2+HZoGH(Leqbz!{ zF$D@7&8<%6+^=OTzf^rbunPC0HCz8I`^mm*)7N(I-1$qL!I}e>O`$;e)8&^v0gpoF zEbb4X_j#DHE+z6LJs8k}Tjj z@9PJsEVwq55o3xSs_Chgt}waZLR#U1lh+$~2MVa^f8}M;`;X!7m|IAvmT(I$l4$Hw z(xO4N_sXKH4|VT2NTLKb3p_QWgEL}sw=E7&M`reF`gdi|fh7ZhO z!Me_qyBxl;CVPD3`BJqQu7UxQD${N(bYkNH>_i{E>-ohE(Y$+-EX1N*F$?6}lMZE|H+ndP+UP^JR0tne989Z_VOt(>ZL z7ft?l4!<>rGsSIcy?$qb>%-@W0dopZF~|)jJm5TJa3MI`T5!nN&+KpH-$;s)=a*xq zLwjyNOuJ z9{q;3^7NGrM>61ACcc{2XYi9P0&9$aGb_u=X+9uP0|(<=(TVEDTjSmypPTHJe%RAD zA7;wzky^Mk4~=Ocx~rj)Xwa)s-7CrV;>OdVnj|Xwt9btxiqMv-P+ZqbArHb_G%@4t?sN`zY<2k3wqp zaz<-)M&To9@hN18p;4!6`za8a4@dhZE$pg9K3vg3FPy(McLG!vuKxH$h5F2S*m+sX zMm~Setwi&=@xyQocr<0*YX12d(q}uA2CM~lLPVJId}oQN4l{>X0GVZ~?`;n0_bAHHta@g?t$2m8q4^r*hyzL}`Fd6M7--)ICo@;s)dCOsnmt*gQ;zDCOVTRL$n{r8gF(0i{dgyHs zHpdpb)!q1Xh(as9e-b4&jR`!i(T=46kl5o6K3DlTEBP=GL zK(9CfZnDA+shk8<#bewq^Rdg$}ly zQU!+9#L{S@D8H~@8a>^2;$+KQe^C{? zKcswEIf&=0in?8knm0wexGiuFH)Q07%a#;edqDKfoC|2)uc@p^A45Y^OMESzO(EOU z{l-jl?=T`oZT0T^mR}Ur2CPS3{OtyQh4D8k-cY&F(0qLnxL*BYAzIGX5?hy0A{=|` zGWI4xLk))QdbrC8Q1(M}RQ=yn>;Zus$D5fm1Q!RmIcuaudD=1Z!6FcroD)1sj$a==?_KRlUauECX=_pLa`Few@M@%O5)0}jT}b%2q@f$t|OFg5K5`7 ziCVPEa;S|y2(9-r%5qqUEs{%z!ji_M=0duJ@f1daDE`!*jPk^?cN9*s&{lgfi21`p zxcUrwD8%BaGy_jkG!$ra#38!#F1ll|Ee#)p z#iZ^=bgT>RO=WjD#(;p0*lh!XM&msp%k~C7k<70*a`Y{Y z4q3z#6-NY^LWb8h`50ohy)ep;!-_8|T=4-WFs=tG)}$3870oN`takPZ5o5o6JHfEQx2{dU=Uqw%Lmi6V z^L10@r3J>8(is8|C$gq^h2jDzQGe?xhG%O5at)|6 z&s`C!=5;H;9NYliY7Mi8|v&wh_<*Qcc&wgJNVy$yB za2Gw-IjZ_lR2ej1S25DtU&X}xs3=0R4I8mm%I7%Bu%iffu_Pgbf9NU|gC)@{1EfH4 zi4TrB!0ZK)ib#^|pkFl6-FTSo8Sy1Wb^Td3o{{MgI!R*;!MyLBx)P>o!K5g|WVe(+ z%s~{7Ca>*V?A{qcGE6e_iCE$QI#w4j{H~%$d@;U+CsBGF*uV4~vY7(%$iDLBp1%?n z7#(luk-h$$ewa?N!!Y3<9z?Q0uMd+isMrRf0P64krGK&je2Kmw@$1Kf=P-Th(!T=@=9C4I{5w<-~CBdy20_ z=(XDc4;+UVBtInlJ(KqIxK(%&X?pdUvMrsCiPsV3@15E-a30%DCNM8nu4lx*axkkg z>I~ODIe$1j)M=KYiskpnou~+*{{}QT?hOJ}{^>>!<`j!u3KDHg|5GDuU!2G$0`SDJ zq4ye9%SDZwI(bG0l21gPoGOJ}B!`GiJALnEji8+fG1$mOwsbf&HF@;ILH`A(5c1&6 z2f}{78{l?4BDC%MPez#Xrtn>M++!*2*GcFXp@3{aj;)qGdUd2|V&KWOKrh0vPC9p< zdy^4t;+s3*L{<*?aPfc-SNfq(0Pzp_cZRQUPIMO5lnrQeGqqdb?RaL!@ZsG@|_{B*MX1w0Cpy0h0wcOwrSA;D#CgWxq zU+5b_9kpQ<(X5+gLxZ2s%ynZvUP7v?f*26 z>}%yzCJ@)(;tJiH^L~0l;&ohmXFne8DZLQfLBbZipSBl9;|RRYIkq4HfhNITM4)P> zsG8Hqbkulr=3}+hqS<@=lhc;iO0cu*9(TQA)YU*x`og>Qr?dQvJkO9R&6_nBjNVl- zzlJjS)X;nNA4?qFX4JmxdbA^}6maP$_`#2L)W%8!KdoixPB5Z>C0=R35yx5F&r zy738qiUR7D%Av4Z3IOQX&cz%3;Ok!_1rStKHuKWi^R8AD-?qi?&)D4-E0o{iVS31JU&C%Acrb!Hqy4X6_nE4)G~tTn>^`LL02Sa3b!ek@lw zm8Tg!dyOI=?&#x#x^yLW8q0(Y>bxF4YG6*fX#C{5f-jB}JVeE;oiu;4z{MNM%gVYl zEh0ZJ)tS>lx8;c%xv;BNVq-mir+vAc7~8qF*)_|+sCo3Ebb-?4Vhn8SnYbU}=ep9g z_e#e)X)C9Ua0~O!NmgF8qMjQ`31e)75gzfI1CD92}rFP<(|GTdfc82FBw-xccgM`DXH|tXaIF$ zDceqt@zt~b{TB6s7748jnl>Bjd3E^op#o`%rEdQ~(RSt8{aj_mA^$&`ui%vIz~~3U z<2`@Wj_C=X9gk$v0X^Q1csF!qXOWfTlP-<*V1xal!9v@HT9d)L%}8*wBgdpD;@6!0 z-2aEKw+wG%*|tSvh#|%pV`k=<8DnNwk1@W;s3G2BIlBBB5 z1F-*HKyBs>L`D3$zn0#GeR^|*o1~4LHWnNFO<*7}3zo9+RP~uzy3Ks9H5@@rS#Z-m zC2DRhKlS>)?M{YiDE@;j(euKQ{q_L64jFNr-EaAC1N(c53fMA6J8*DKC5&&+X*Yd% z?9bjl`Ro_zrES-@ZK1C1Q$!Nk;T;%86qqKwRKt#U(@5jrU&p2v=;J0xC2>~i{lRX^@Knx{4|D(+47~@~u9oYI63x9I8X}8Z1=-@;9$RQ7Sv6B{yU! z8!gXtR9n~L)bv{NfDiZ8Up?2db1g0#AedG_*K=Z9=;bi8Q&nE;xUkIrWc zZ>l^cls9EG>#vWIJMXpQ^+Cm3PSdo_lg>px3iWC32JtjMl}Tb=gs}t&0fU;H%|Tj!>F|Omf_WOw84Oe*QcQd75?cxFHFtv82J&uvEt)x(;a| zYa30lo$r5U6YbCb;B^AyK|RZRcjpiIPTo9oI}!C2Oh9XTv#RI)0sU1lV?j+A2M-7mvjDwji=ln97(3=%;i*i1ax8EvO-+fox^a<9qsrgSd|BM zg)w&TC(sz`YR~$P;0kefb`|kDt_M1Ei&!=i>k%|Wyj8wmO6p&tsd&$ z8PdJGPb+IX^V2;SA@&Z-zHHAlp4hRs%Tyepag|-r;~vtAjkSj1;km^Q9|?^=t`Gc* zWiBJApP!=_eTAW9a)yj;tC+bRHT>l!J9DcdoAmngY;Z$$F4~L822mYy$r5^`h-rgv z{dKy%`tCjA-F06VIz%e_Qd-OBx{_x`uLzWmQF?p5usd-BC3?G_T*|d_ydib!kN^!K zh#4LZ?Ai5E3MPQl6wGPil#Y;tilB5kArO`MF-1ncRb2J`D?>AA=e*7ZgbM$kka0}V zc}hon0~65yJ+7mPnGxvq4?F!o40G^$2GB96{GX8VzsNF(ZT?UBizL(j6a9zT4j%Wf z7<2*H;J;$fA=tlF*55(#$bZ$Z{8zm2U-9O@;*X&BAN?x^9sZk%{}(xb`mcHry8lo4 z`+fe4IS=;mPwj^fcqRWYG9J*K&lupAp|;0;)$rB?+HVfszMC(VNga1aYfjLN>^?Se zK}Yjhcz=+VIw^vKbdlc9>{-*K$lZ)nKOy0BuRyNsC$POmVssQNk6;+d>*b40+3@)R z$}^&Zi0g_5j5y@@o6zIny3@Sfd+mX~ zp{-7h)tkoPP7Z%(Qd$-y+H1Rtrd7N5i%8X0`&NQ7$__E;DHV;YvC2tW3`nqg=KW*( ze%{RYsSi=kr1>e5q<9zSzVT6u9k=^3enpg}e6Lo@m^b)YhJ4#&mQA@YO{hZg%ftF+U<+D`^}q@x1Om(c8`JwE;sdotb=;p$^nVBn1Y_& zX`)J%2JtaQgooDghRdaL+kyXq(}m>m18wOjS0(p&4cS8|hMwV?Dc|qHksTkTpAa91 zuS+YA&V_$_9HyzFk^~nR?Ag6f46wn0<2sBnIF!A3w_DtGT;E`|ZJn^E>=M)Ix-+%0Y|JL1ili)I^5~avy-mUmsLLI=u^dI#w zkevG~GK~lb-JY-S*%M17(9#(@(Uz^ZFEMsVi+n@|DRCd5hKz83j2Co54l^SKr@BFu z$iDGuIkze#@W9o&1{%L2dYw~qg^-oFB^fE={`j;ybE_zaUo(08(*mFFd3-oRJRn&` z&+pdUvr5Y`bM0|M@>vCg&u5%9>TC;z)7ymVy5@-08EaaW7pJdD_IyutV)BPE7Z`vom-5qm~r+}qQDayuHhez(D6{+szB62cF~_d-goHsL&SD? zx`0((PC$0A4mC`z>E@ur$3xc&C9w$&U+;5k-?=l%De=AKmj>EYI1zw`C#sExs2YlY zxeSEC;;y5nIX)$QSnBLfxL6nE*ACb@yYo|^rs>m@axPS!Y~#!H}*@XkVT28 zxMx3B`A9L<8FAt1?bt<@Q0p%(oORr{FQVwW^8?z&OefZ>a}FPmYI)rx#g{XXS@=VJZ(;Dx@`infS4-Jrio(dZDhlx_Fnm zFVS8N43gcODqu;Ev5g5wc+B-qwnKPn#5Kg8eeDncjkkX?-ojs5a99!ie8DfI+LI_Y zcIFFZK%&44DXRcbqGT5nF?lc&bVooywc(ksPWL*L=Eo3Gl#>c z@j$5l2rsQ62GHp53y;tvcx3^Onowrb`g4~4X;*~{GI2`)ameM= z457?tZdX4}`Iv5b5(-6O5?vFY$>AT?kF#z3HM^6l{uPnFCA~6eY3%W^!8%*jJBUXk zsrgD}R^O*6UaFZPL{WSpQ!kB3MJR~vD83JFIgwT=-|Kxsam7VngAbmTU<~dimn{-w zhe@!**WzcNn2YDCpXuB9Hb;32c?U=zfhaGd{D3e@WP5&d$BWFz-@p5PKSw0xj4%h6M$i~E zDh<{e>j&2IKb+N~YsB^}DKFW~_B0x>H6)NNhA+$c)J@lmp21X%B=~D0*~$9vUli{W zs&4N}u(WHaGlu-6MHD%kxZyPP*>Y_&C~=U6>ciyi)dDINkR!~#bGOuDXf|JSVKwl2 zsRUKew70}2-k@=R%0F4jV;wZej2>}S&s40W_f{I5x{YT$)pPr-If2dGk{vtcmay6& zDV~2v&&`+K`bBf!ZeKp3vSZJ?ain05+Rn7&C(%dYWIU(GbCfcAWm>pp)9|3RVMz}R zfET#q(5I{&9ng~epT5=!nq%sK9C`F7H?2rvAd17Z_HRLH&5fm%rb^nZDjKO;Qm%)7 zA5QFHVHbw%B<~Quga6bXZ#5pxCycsN$^4YVfAu^*;iB<2(8o=3*8I_aDi2Fl{e+rV}c92 z%o*2hW+oKG8psaBqG$gL{zb%BAYj4*#~*EbINNC`iA241B5rWDTo&#k>?x%y5AOle zWPBP-C-aOoOgrq&fN0Rh9A{fz`Kdb`d-p{lH z+EV>#4Pb+ z-IPDlAC?XWD704K)@u&1-L7&c%@v3;@U;^p&$FSmKG;MBzTXu9g0I)6_RgxdcrlgMIpg?ECWk zHb=n^e{4|!&(Puqet5KZI?gjo3mML!|G08(g|@NsT$-EVX;=z2Ri)M=&BkZ`w5Ibc zo&1Do;ZVW~XPGv*`d#FZuFS5T_ox?&uexcWy}|hKB~%=wgzQ8^CsrfQp{}1jc-RCN zQg%EnFFMyb`=!CYK;`&b;%|>We_wBsWr2(fO}D0>*(RDiZ~7n}{nl{k>%|6g!i&n^f*!6En$? z#r!%Gs6h`lJ_??2a)?ZpKnOPde$9J+?pk}+K952BPrmv2*p%{ zZd6#J5QdC^DQQw(woNi{4^*2dr>9&imm!JaA}DThRR0+NH1p$>`JqAaiheb-8rYxn z)uB*rd!88>VvV8o&v4xp3|w0$l^T7b-EzxE&6?%DTq1Wh?z2Jj8L;pN`D$ju!-GZh z*T<4;-@)IvvWMW&;3Ds-LIz3po{!*l% zqRA1xt^5~nuRk_jU+rdab!?R})0NH~#hORL zyOY277%~=yOhk4sZYhy9^UyJ-pTu6nw|eU{;n$pBFf~`r5ebXi>~h}~Yd+b?seX$0EhfW&7r&OSW4Kw1pJXqr8` z6k0b*do^|JBsgO<=(Q?SA;)od8J;U z(-rkqiHVZXV$BUFue0Wug^HLVQg$x5!p5~~pdz?CMd-V)2Mg~Cx8CE)u={S?ImvWT ziorp5-gWi8D2e5%G2(a^e30=AS5g`Wua2eLZLFl-hKy?3Gwloky@UGc%;@M`gt{); z%1;az$%l`PQ4%s5Qf0at3g9CDnL~p1o~MIMeHF5b&`hQ>p=nBRJ?t#76Z`XpX_%wk z7pp0RACJJRGUzmvjlxQW5D?(*U{*=789MXkV=>vQ>M9p5>xr=>;)A3h+n zo0D!sYTGA~b3Az95H{Hf)aUnf?#B9r;l5W4L7WQ$JD|7ww0e7Wt&s%dcty*_n&8Yj zYUf&g6Z1U{k6GCW6&S?*?@NH}=^g;~inn!+`&({Lc^9iSwN!Z1^n=5FXQQF}v=e;M zP!BjY_~cK{3QERcLIon^f+<$re6t30LMoB3`Az$a2f< zl`5PDnB!+{i)k1r<-1GmFUv`%G+j96c?@>f`=&Hmh!XjfsZAkU<_ELQgxp)88%oxW45pdmWs4uNP11>{ zBR>#$=4rU)3l$Z3W+ezM>(fM*-=UJC{87T%1pP|Cf%L@v|z4*`voO7#`aD0E2Y>Gwe3Q9D5LCAr{>{T`LiPK z0lqz~IOT@EgK-dNtY`K87t??qT_x%E+IYtx$dGD72{Zbb0IS!kV(^Jzb|*GQ8-qip z5=D;wFyd$F>w>X5_Kt>KB>P?eC!B?7K(X(QxQpuaMY(ZK^@4!|rl^&s9vjucIc=5p z>j-oeb_=U2)T?7Z(Dte`{*=z2@a*}=mqa;n&*mlU>5hX6z?vAn4(y250v*#0V~m!$y}hFbf|k{l^pxjMFEMa_Yo&*vEzbRn>wSF(>*elIMsBUd9P^)Zpi z*PJ4uzdi8tvNx{NG^yE}u*_L^X9PHX%+#9Btx5i@%`Wr;OIl42WJf~{WFOD76~%D@ zO^@s_Bck@qfvMh1hIwWpo>mh~G0Lo|8Jzt82;6||J^aD*7<)p|!KYj#VscC8>!{1e ztdwq*51A+;Dd0Lw4VOYe_9?m_UFb!YMEv|jwCix;`4t``^G7IqsJ;wbUyONWXsweb zes>iT!F+1dyy|TZ6}T?$+(Nza79tD7PWze!^9$XNnIw0T7XC#Cbw5Yfv3#R}WB+ZS zevz|M%ermG9J=D=VDhEjhO1RB` zhbyMNgn5*!4&9^gwp93?ag*N*rRbpjVCiljr<9?BKGk7E>vdhROi(k3?HB?UFlp!oi!|(@yG*JgamljO&4H-tPd|TGsM)0^yozqEV^NrCtN?jc>QffW zfCQ`bvUOjIyLH>KjD&)g{`}wjxs-NGMaR^IM}(zaM1joSHUI!&*Fgg|<@^elxJrQ; z&WN{Xz|8M9$bBp3)xw0HEDb?14%zXzhM5b=${X<>VHnfcQGi3$quOfdxg7$Mbn8)T zjTrf`kx5u7$f4uvg)FF!k|Y*I0^cA@xbrAQ_*)Vg`Yy~Lrjpt19|1eimVKVUBvGzl zOUaSt+H}2&r)9*4bxw?9agkDIe{Sa36U@&<6x}F(7PK!OjyhdKS?9la@JdJGef4PU zGH;)BHK3W^OHQFq%w7O|XHTg>`-#my^YvkX62T^U*Lw26L!ScjK@Id#V(Mvo*9cQwrd&5hC(A4Tin#G41-Jlt+5qHHTSqmWG=s+_w zYMX^4r`1D$S_d&UuW6xM0tPJ%;!HK0e9xrK9{8fVlJItL+)SZ+avJ>@hRi78ulc5> z?8RKon<_Q)6iMWN#dQ%RwmO43;`ECvg@2H6gn%{ZDU#cOV|&}z&`p_#U1FGqhNQ%< z>gJ*r>8p=9)o6^fyf~fsDwZ!)+mi)0&0-sZd%_3-ZdaMTHN4;c?}D&sUnGo~L~^eh ztYugQDFzC@k6t3qzZ+2MbxRZbtJhG~ckV}KM_aJQ03uHCh!YMa_BdgI1|191Rqyzr z?BLFjj13u;VOqo%P489pEgkBHAGN=&ecKv$?h(JmejaD+Ycbt1N4vsLZX(rv>kwq| zk_)a&p-%l#SR?EUQWq}5MVgcJPUnzea)yB~)&LJqudx+#Ur}tDrrgU?K?^8}Aj|IL z9KoIp`I}(+N2Vmj7o6M-bK~PhNZ*)@6J0bjbCkdX%xVv6m)`IhnU@MjMV!1C;k4Y~ zbU43U17F{+PqbL)&I8i5bk{&mw0SY6uTFk__ag7o$&Ir?wb)FXA)da#erKfzrmeU8 z{dq>rMmnIq8 zgx8eP9N=OOe$dMsF$gn${O=m5PR_zwvPaO!dEH$ROI)J_2+8XKD7D}Y7Cr>Fp1?g`#qjF z?d7EA`o*vn$zn}GM2PSMc@LpB-C>gTpTDL#6P^wiU+XSNCM+gwynS2Y5J={TUemqb zm@{CR7X@^xl3Y>b?=Zr$2~}IQZ>I$9`;B0wyJH<*=%vPNd?w!fRzcd|JD}en=7h^y5YYt}2KRHflLi70D&aM^-c!nq6`d=8lVTFy2wswyvsl4SVwiwE1&8t??A5 zopil@A7k>tacMjS9dvAA?gbpSVBv6&xT-Ma1?tkw?24IXJ?%#-2(55e1Y zs>^A__5P_Ozs4OA5#yX7W2kk<8(mg%uS%E4Gktt4Rl-W=#qz+|rmv0fc)A@LGtJ88 zjOkF8IYKR<+e}E^pLz5mJRn9%iZ($gJpR>xOgr}kyJDn9?kKp?vLPy_s+wY!s(RUb z?<7{TsBYM?R5XoQ@_;l$Yp=P!j=J7~VRxy52@1Fqsk*_ya1(6w#L5&QW!T(-a!K$B zseVT@ggSsSNscgN4#u4P3qzQG>U~FUe)%7|veVy&2Z>X4#SxlsC>O$u& zm*~A8&7k_6(EKo%HsCJwJ+k0=pSaz?*b5s;UObARzL%~gM>2W1t%{Hi`o7ve&;q5E z`SEe1Fp5)-PQPme?_nFyj>|vaLcr#1Ad?H}j1AWe_;{2QB|9H&o|L|GMWFcd+D4yS zqgVd0AsCslH?CGZ^SiUv-Zif^DGC`zh0zhGl9nTuyl!K_>`ql9DuumhTSu3iqI#^} zyk4^PFeHUP>3d(JRMFM%(F1fEvLxQn;n&*Ff}4jn7g?O_0uvU2Hb+#>q3L9u<(>%+ z&n2f6_{Z;t+NWx7N~SN$5x+Yf+*dERpE4biZg8x@z$#07vt2&y2D&QQXl5~U)+3w)$`e$i z&0d3#WUiNMteTzHn;>N}ZcyWcqzSuGU0f{4YDz;AWLnbokR?q_`h@IJ$t4CF(Lss_ zcws^$&~k_C>k1pMehfW4TB+VhR1GZ<`7LsM*A4Kz+;gY$`a+v{2k0lV>%>9;xHfI> zq!&|z%5~X}mgwCG?cWv(MwtpHh&Xd6D7MVNvVPn^_B-w(o~c!bxjkO(HjVY$0V0ik zuY=K-+}j)YU%eFskZ*L>pWw0S*5bkLZKC(_%}YVlGKXI->l7x?LV(r;-lY`?H(TG+ z`=o>BJpS)v9FLf547;i-TMzD?%nxrWJvKBWxvLIMBt(2?Y;P|a#ew@{dp=mPBRE&i z>;)Y`T_pznZTwT^&D*!-zT4}Kveu)$+oM_|WSl0C+AbK`Kz_jW_?}mMYar^U-4?V! zyiRiE^RKW|*nwPAF)lK1kP=+qrNyVUT}x@%kq z^e>qh7|7vs%xY}V%8;hKOj*BnrLD77Ga*MeOw?vf37*Qlz?V*PuRsQeHcr+1q!-YL z{o?~@(7ymR|J?sf?6PB9rzmwT-5m-wJIduvK;o32VY22TKBTjvD4*ncNyyV&t20*_ zL^I{x8XH@1Hb|9!dsuCsdAr?n-o73G-N3w{&|;&v!p#go#(UF5Ww9E2nONQ?=*MgA zwXU+BAg+LryD2bxl^Z!2t$maAezgMDzo$kCR(kr2Ud5+D^AB4-`Jdsb;0Q63{oGGg zggw%$Sf1Ce_yQdyYG*~WY_Ar!hfukL2f%uaJC)|`9z9o^mpp2-%VYos*21DSQ_9PJ z^;4YJK?}8PVkvZe)#F<98A>!P|9PER`|iO)-_62ZI!;&7I_xOOgABLfBt0;d#@X{mY#O}4Ziy@~ z&j;ATF`jq0&lN^=I8Zcn(|In=DDJDyeQC$f$gSr?6c&*ODU5{lPIS>s0JBtbYyrK&bEk3F-aH6G|BD3w;b{MV{J{V1{|(Yi2LtiE|0#cwakPJ;4<8^vC^Oi9Bj@=4TMr3B z-@!nh0C)bEdi|XZX zU}O}arC(%cPjzy+LN15s@f>LVyZJgD5#cf)PAR}Y@@C$%qnVUB2&7^(w4G3{WMv9p zbNcNr;9b32BWMUfoqPQR7en?ZgYeWQwSa>B{J>*d^J+bBO;fP>I zhI!AaTzXAydnIbcO4_V7kGdIt{K)$BF0x~DVIG;1Df;+EVKGAN?a5E*^mJQ8M+6p4 z@M=mO6zPK2Go-tsW><+ltX~&!Ov-r7`)h`vFxWW|FY~F9HtSAOY@cZDqTdUt?LDZe zKz@Qpl%Y~*y)?CYKxN8HOP2YQGRlDkbz}0_Mpvtmxucf(O2)L(z zQMS>Z@#4$9#3w%PJlJv6c?qB3>1f~DV$UuHjc8bY&?WDUfB*wxwa!Nb|F+Hmdv{%%TD2$YNbJ^X19;v9S3SlcK@0 zkk^9G_^?1P{d&kV`rymTPR6nRpiX0RCy6x#B*lg6jHev~;tt-V>McH^?{IcxldZC)Kef3dgFd6xzHD+(_P zyQ>yD=*|^OHcb2h?w@xrnNRT#SHgqIKfXxOR;)Hx=qlL?sT+rLzdHQYt0&WSP*|lc)`iwxoz-sNE8M#_P-I&bm-}fa7NM*rAKwzr0%{c%K^?2D2HEm)8nFBN$<%CM!FZh^()_4Z?TiTJkZ$&@j zOmjW$o+cN8F3eSfxJ;DVI>6z}S>3xfKs_bJB(v&|f&J9TUkN`}NRWizUp8j8uk?LE zru@_EZT$fODn)0@8f{1NHPwxtUI{o_;Ytok4js2)@9~E0LCdJ3uZs-MXuI9Aqbbv2 z`@+*mmCw^-JUBLgEO2O4{iK+M9b>>T4D=IyM`bs0i0<(ylzISjl?{R$wbh!q~DG+~%kLQ9N z!o;X@;jekZv*0toEq6Qf7?*_{rq7s=n`n9!Qvz0&%r(B(*;#hbjLL@vV!rmY9}-@r zyJ@0U;Z1MNzJ01im_=A<(zBOi^x`+wrS(xy4*Jqn=9*}GEUZG8M>^V#y+eaqLW)l= z@kI2~<<`M7kmB^A+odL74F8ztc~CQWF6*F}US!%>=xftWHvBl3$m-YZ5|bx33}=KR zeal8T8yJS7qp}F9p#>xy<>sCP+t?D_h2$BQRzQqe%#r;mm`;!;H^MQ>K6b% z3VUzoG=>-aEVv*f-=Hie|4qdg)!Ang(jDPBnr-vCjNT~Gf5fN5qpRp`-`HwDWSHic;nba&w5dIWLVR z22dAj*%5R)h(25og*tfSb3LTx7V)da`?9?_yDv_Q*tpusszIZfX844!tZ|PVKw(?moW7mAN|W-o+_MgKrI(!y9t_gmsH1&f-{x z6fL3WxP^?=ZnEr*)sJ4Komu|i%9gsw+hYx(XJ0P_EmVCr!k+KoWhuAMX}RZ6C_l@5 zfXty4BCOPFMTxHGF7^N!uqCcc_zAe8QHXO=A}VY#xu)FhiEUtDcGR;+HNuTONZL|HY}t! zOx$nMml_Oj(y<{AX7 z{n{(tY+y_%Uz=A|lL}LQYOH!se60*~%wfwKMF^y+Exhz7ENpg%wtSZm1I}1Q|r<2YJb+giuK=oeQRR8P%G;j?uedI^!4<&an?gA zmh$z-b1U>e^Xj}m_s@ptZ?xz2ui)2PT;A{*&Y9nszg-)9wf?#0jZ@I9H%_Ln*uipK zEq2|!D*v8xn>n!_(`1{Z%iMoj0d%?!PCjsce@V>NR&-`8Y=%;pXSP{$I)`vB+*>hX zF>$;Dw;1nW3EV4arCXj0O;(D~M{9*ba`BVD;x_>6E6FDZlvuRAlB-!X4!9$l{#aMk z=UZ3FeKgSGg`$;{Q zucIqaP!eHjuF5_({7l;(^+)9y7t|-#K;Fm#zOWlA)M?b30L%*3UTIxbnFy(^cf*?u_sJA~aAU4DYtOaWR~t#F(6f?^0{eo-9P+#3`n=H?a-?xYVo~*^;noM zqHU>oY#({q!}xoIlf!I0Z=eeotBi@xBYI~pb}HFg+4FvTgQXUf3$$fnJy-0qoy^kt zy}NtHiaKm7r&fsyjOD43`AfN<>~rE^FN$hIbY4%g(Dr~DnuVaQU0xnd8^#iJ@pdxT8 zalqKDWRZKs$x=%MH~xz?(z;xJW)CYUQ>6Q1ck>mRc|L3K>7^}4(Y7XI=QUoxvSG+~ z2%pM4k`U1g`qzng!^mbSFB0KcjW2g+#YHKule6W%vLXZB$@=P1guJr>Hw3S2&S^Li z*d;3#K5{n8O)w6Gh2aSf+J@p9s^?F@JEo+S&Qy*|Zb{~@)`8S6y8MZ`hG+##-Lg9P zkMJ6@7K@~1levOQ61IC)%uGhFYj+#bDpOt(){VM`WA%=6I;-`&DR(4^X`hLM*GQoA zFJAxW^hTOV3(mlmWgbg7x)g9>!3>}wpVx`SHs3c1htbFe8yzi>9O;m{Leq_0+nL9O zNZ$p%wO+mjez?rRdSDE<<^1)WJDTIVaD$eU6{1e{q1QDk%X=YS_Awq@Jm91ugB|zU z^P!fdk9oK`mul-ptZ(wd%YAX7t60DE9t$RgmhGU;zVSDLy83LAU}-qzDCGCt zZrXJJY3CJ7U(;wIy zSP`nvR4VWk+UgCOzP*Hc54uWc*X+NYKdo?WwQHy4}NiFsbOP%by zmvOa^TC!_-1whP*9fFNL?o@e;aH8G057t?huUBvvm0AV|YpjE=KDG}TuP2gx(@?#& zQuBEM2<+v~h5I5U>wQ;xUxxS62cFh{-!yxE&Zs-C78_W5#z5k*lqY~$=hXF-u*pshhHo* z41ajBtP#HgI+33{*j9qNXn!1IA1Kx`bi^FuUbl*FOah#6D|%*poPLg|I1OBU&F4ZK z&m7B zSKN@PG6wA^fOc3xbf-T%tiBw~MOO3U;3)PtrQ6d#P3&osuASMzsZe<0ToaOm!J1cD zxlOM`mm|0a^@n}4eu*?xp$!=NZ_?Nfcw*@>E%F~+;G5j>hra~UJupZt<80K|kxk;) zZ(FRNtzv}+?Za1z8nVhY zuD)h3rS!Nt>&SAi%((l`eRzihzI1w>;$75uN7hiTByM;z9I!Qn9!<2qHhLdae%t~4 zqVty?A$cmgXsoXcwG8>8-IG|fr|YzsIq*Yn^YJ9s``X0TrXP_%qqfh}72@Xnx1;EA ziPK2Q?fOFDxhpG4AV=v-V(h)mGF7WMAHdD>#Z<{c^O>g9a^3gPmA>xk5Z$E2-ZtLp zSWK`7$}B{GDq53`Y_?0WutkIJc7&#zgC-@=iH0VgN^9-*S=;K;P@qjY1y9qMf@#C6 z0A!z<^<04?X&n}cw+_YY%$7`+xzYp_xrYz7QZgdFFY`EA&eo=0%K2X7nTK*2+!WDOyE?uj>K7?IsdW;rr`Wu|0$(PO?$VjrJWp4W zHt$7;?sWjcSJq19JzpU^8ZS7-tuJ02k+=?i=$6hnSJVhzo5zJUTO;^uacSk%jzj`$tw>L+T|hG=GF~u{1k+NN0~DfA2s1L zJDM{UwsZ!T=6<9xoJ<_@(~+5-YZ$L=T-=S{RIa@(^rxvzcysPx`|agG2u5IL>ews> zd=j5*_GA1ZgJ`0G&wTG%>MiLw$IxC+T@_+l3NmKrOiP)RE|Xn`vi~kCU_(8$eFrFn z9&T?=$mr%y=_GN!u7jkU?!M)nhy&Qw)?mpFwVjvFTd_$8M64yIRLv|Ez-+JSR#U?0 z{4Ufl$tX1)4eLnQ*nX33Oc1}dDt_0=*ell1w%6rd_MGu#)+z3}p+&;pPeh2Q&NQMj zQc|MIKE&X;c-Q;zJYQF871+8W;P9alHlu!6rGuq*XrfY)&rgLe4JBPl?nCmK|0~zm z6u%e|upK|dF*~Eo@W>5oaTz#8{eY6csSKFc_tU*cku?C1yt& zaia~PRX%6vC2QJlrC7w1=`7rv1wqhQ(+GCx_5pPklf*6_oVLUZ>*-u-czd-2l!I}> zW5jIQo}1iwR)9CqxNrNmAUxxMmM7&&?_M*HbjCenFUo0o*pAuc4G{o~9N2a7!l41r zA8`MPyT+u%!RnpzhEhQwymX9xjy0I`>u(<`OqnJGfJE_|n&DNT$Q(~=|LeqUNvpa~ z|0w~>PF*L_?IVld;nG*#upNdWOWmmL(hKR`NgL{1Bjv6tZ?TTLpD4IwCp=gCQ13Y-?-ebTnHP~hn|AXA*;aM7{dURN z6QwEa8ncEJ8%in#e7UV$jDnUqTDoj>Z1+uKu~ann$}XXvbnCxf0JY3Ra&~WRvq55$ z^wNqZk7p3}sr|WT=WS?s87^X~UCtDHxH2udPB|6FiYG|q)iG+mqbtH1Vm9O1)Xu{- zuQH_}$EgR}GH;mcZ?cPPg8SFJziYi~2JP6nEHfOV^Q=a0F0t{ub(xl2Jx$g-2ahoQ zIh`&d>bh^{zF|Nhe|4D*pYQzMSdLS*i%J>7`*mu7*U%1Y3b4}1U^b*g+CZROU(rH* zb>4)E$NEQJ^^fo)>KO<~BhE5Y)}iq;wrJXZtdiNcGP;i$smnkiSUhC5FC5)3=5Q2X z-mvqsC_p-9my@tFvCbz>HIah?&f;nS->8V~jJidjMxpNFx^A#8#ly4kP}y%?+AY?t zTID2W$?AObi@6!nfw9O_Jp93)4P6NmH7y}=ZETQw*E~I<<`MSi=aVNhcom3R)`NBp z?^Z^U<3rnngn>HUcallmB#Hv*YhY_}0&jK;S>^8!fqUh+E*m}V+^5>-1AXW!tQp?^ z)i5qxD3mD)+#LD|)Ki8L_=SNP2roUS@#IzfYX3meQX!jI#x^FWJD?2jXo z-?`KlfE&{unBn#BExTb2dOX*OCM^+d+q84}{W8NDYVxS_B>%2}0jDzx7gne<5?5;hT*NbQn5hdc*~!fc)N>NS;@9n z62zim3L#j`L*rrEdzR_)zF@>t!>_@kNT?`SZ5J+RPLwZjcrQ=>v*Me34mHT>>h_@F zV=HC=(6F@3SL%pU7d%vnC=c3F?>ec6@+{4fDz5ts{3b!ZmG{0ve$AY1mOI0`aU0eC zg*5->u_3L?&|S*pFaQHpVSS5}(Fpyc{iV#Xg<{ejT7|zAshCYq#9*ZLB$aRCdD{?D zsh>DaDUtQQNTcrK8G&>W1jYhy_^3`|-f_n8&iy`&_(m={Ura=;Nownel}h^0lFIOb zlsB(2vLS9@rq;{EK#d;s1x}oT>%9IB1O92rhc;AYP4J_0u?+g^QFW7CU(bmvn#Bj7 zH`94{n>HsC(;ip$*ms<(RIiblI7Z%*F{JHh|HXy|yc*^W?ayB@nD04m@iHEZppP^B zzhd&Z`*aI9>h2uThPtu8t8t7MvuIRk7}?&y5~q*|GT8>;Gz%x@`Ww=c!67d+L!t7H zB-kV3urvvE%{tq37Cgy9(AJ(Lu(mw3bfzA6L`IyZnIhc0^)Q`n5M%-fP#>QwOWjqR zX8==<9X&3ic=GvZ1+6tZXREjS8HODYDFYsf)@An&TsAg0boW9gK?4*m8~q*^$~{K$ z@1hXNyH`tIg}?R^QU*TUu%k;H;Rv0FSoSYlU9zo>n5|KK#S4nfsuPANB;h4}K4hBMk1CCx;2&f|XX>`X9h6q1}mR;n?A0)Mc;PXV9MA-3#!aKdg`lP`fKj-w| z?t!x|GG^*>x21%xY)zBznF2xfE7`G7n|()_La!Id>-c?|_*s}mcP9cUL?{q4C+g?T zNI3`8llbYwo#^~QXB!te+__BV92cj0ey29NGd`-P7e)R2iz6>kn@4aHkDNVBtLaSl zl$aFo7f!p2Jyeiu`>=&)PYy8m8cJOK3K;u#Qx2c)J8e4w;465~$UuV(> zJ>H0EXKb*J7{8J7)NU2Dc+FXo+I?y^(Yw6tzeUt%-ZemKbv-F9ICDb4sVeI3Se-qn z+AKPQEQlOeTM%BJtkrs-rQ%%Ekm5_)tQ>tLt#N!Bo*5$j8fU3c2g?avqj++1R~cOb zIHv1~2fhaA3{Vgp-Va0qHpVpTR^+fUhAGzBW_hL*lYk9Zd?m#ieV@CI<22?l^tRgM zo`DS;syhihJ618)Cv-&XDj#bmjGyK=V)s2{)U@_A-A@Kc2LY|lVRhGB6Rb6wmg}KW zCGNOdbj2qF!%uGvId2q$w7Omd4*jy5rz}i&60SF#Q&4PP(v3*q9tegRab4er}b0YlvyTN1f+1;;u%#4D&o zD&`wKW|53<5#$>s4?(MKI2XJsfCB=WL|?E)j_~NyjqGgt=+V50SBGxPB`fbEI}0pV z0;>%V%nKD4!bjcVNH5E|)i$l?mn@ZUj<5&az=IZMD`Khw<1Vh}#Bvwmi60(NGtJyf zr1YJI+yg}{ygd_dA012w-fr0YTc(-=-$rQL8+im)q@hH4NQjItheJ0D5AD@Yr~Rr=S;_3V~5Ef?cc)z+D+4-1{!i6`5_f5o7C{8J!7HvlL5C;HF*z{>wOMq2obdxP-K=Y$@pq-bmFYt~Uf+G*)c5rIU%&r%eb42- z?lbOl?{l8V;dzdMe&uzNFgts7)zRO36ulkCLjZ-_@RDHG40(ZA#X@ zZS3hFKKQL!fjGsz>vayRJ{SFaJ394Fubzc){}O|v<%Kq8f%WRb`o-0orkrmW_UY9Q zda51zA(OROalQ0>fw$4kgh}UDO|l(x2@=I=;&mB6r#M3ECe4VAsXOu`Lvj7W#q#)` zhZoLl7-CeRwMx6L(srbhS_WT2INV3u#M~BliI1YQSC+A7*782T1pd+2G0mZh_zJHW z+oDwET~FS$dNhBR5^{f&U)iv~L?rF`!8x`OxJU9-^FL??@Ef zGvgEV&Ukogd^9MScKB42{l#$8e6C8mb)rYz(w!veMI(CORlNS^k;nHYm$oB1FSoI- zLE9O!JZiO)2Iz#U_vpv*R`&X-u~SkW{&s=;;J92<4|m&%sHy#Y10zZ9KK!D2hfhlA znaQ<1v6%3&6m+7)3H~Tmp3Hvak;qSfvCeP7FU58rQwj0w*M<1%sv_(1=;})}^A_86 zD5qxg@Y8xsU1@BTlGDwa-xeqDf2tj`G@M+boor*IADw>@`Id&#uTi(Z_qkWzZX?FP z`*jhjgZJsqIb?CrgbKb7)_MLcDU$Y@31fLE+m#axS;Zf&rSuazr6k>NSU3&JUEOt; zR-9+bHEA2pOyow`HZfwAMc|l^-5q0G@b|sl2Rx1iZurPqq`;P&;t^PybZJLDQKYy2 zy6oHMx#2u)HLg7dtG7=~x$Dx5*ZCbBTo-9|&!xZby!tnp@#O1ST}lT9hhD9D{5-%{ zTkW}q06sX_w>>4wQdCG_?XC9b!A&kb<`J50Qr!3LzO=l@OBKBJNmt9endFe=J}?x^ z%KMRmwQ%I8NXR(5XrPQfv9X;f6f^XUpR*>ov5C9SX5xO6t-rTu)%J@?wiFrKC)VAP zAhwh}7>MtEA9TxAV{t3JSt!-62fi(@6>F#>j<{`@^Hn_K@?K8cF#p9M*RCjk#hW9S z;KKgXUK}Tm>YTeG$ zjZ_K}y6KNMpIGCed5xe}#BkD8?)FwK|ELl<8r+o@7U7(dwwwT$MX|`unzPb=ebPG< z?S+nv9xWZzs=z{P2R*@Fy?}2_ zD)Nl|`XA#5g>pVMC!>`UpC(;&u6ibsCod5Za*fW}OYLsiMuRn$TQ%qSg6jOr?7dvh z4F}Y@ZV@V%HXeAxO#L-{;iFknZ=sXz@#ZPsY^#jg*>uC5Bk!N+I!A4nf9}*PGsP(s zChF18KKE3~f6`fJ_VTVTGEWSwKaGYCUp`T~3E@r{P|Q_ISKsSETL7 z2KCP|5oHG13n6XY;wiTC!(k)TW(@6iS=km*ku*KWHdp6J{L_;h-Tad$Nd9}(=JC=c zhhg2O&l2XYVg|B}c~>^ik5~)4NGH$*4+y;;(n*-_V(`dsc5F!Pa!o3yU(%DNP-(e* z+hD3Pd0w--$J%#8;InjvgZD!_T&}St=YC|$-d5ZzF&z;R)_C_Q73-j3+%>l z=_MjNTNKAB#Z~#Id+xu^d-c3qwfTUCI(Os1pSMTQYhXd#nqdVEb)Um?&ZQ?dB(R7` zly2dFPQ$*Qd4`f@YsB1XsA<%>7x1DT0A@kZ*dmq#LtOPdDv?(O!zOlaU5a}+wPufMZb=gH^zstK!* z&gPc*q01Gl3cma2a}H01Q0{c$luI4S#yeCrE*9Ajb ze79Iq5o^dh=gXDx7kFY92m18JrWP}I(OwG(y2P}*{ySHXyD0m}sgL+(m%tXM19fLh zZV&NK6FY)?(&s}p)OhjMYB|GPB^8?_)p%XwtRBm^d%f=_IOnA>nNF0EA>{3 zR$_u%b-%WxeB3!YGo1N^V&%s6GgEb@auMc-PSIvKt;QNkoGjKrDK*# zEHjZSy2@V4u2e87@|D`RZ@a?WM~hFLb5AJJm-e_BzLmxA%@M0;n`xC7;!1b3W0EB9 zMHwZ$jJY6tq>Dd3Rg$`FAX!i^dAq^`JEv6w#)<2i#3PDwEt_o}@ANBOEmg~T)5NvI z>^rASC++G-u`cv z8vD2OhQQxD(V_O;q>WDRy_?c=rwi&CET=x)eJbMmQf@+daDdU9*Q>uW@ZK85&XR&0 zih1i2JKu1lE@9^$O%_9HI#yZ1HSDrQCYQ=`oFY@J^W{bUY3--6|O zgUqvcX-5x;>Bev+_Ljcb;B2ZQIAMRO@XGGb-d#B%(uJ< z@0Vfyz7W7tlqlq)zu?I*Gqv!O6IVjJ3>Ta;x6i> zxerod?Bf?HV-NfEvz4gr3QT1%-zIm0vd3Ump2xjGVq>mHzKngZ_)fRE0Yb4RWyBND zs<*`n{D;J~BC^)7yfi)U88s%t%PZI9%`K-x`TF)5Cbm%8GI1Ao{Y>T%wW4H&M=HbD zdn@0R$2_{KMX~rrGp)yN&pdlc+|>&46lI>#32ao^(YI5^!IlN9^)0B*)M; zH`ncs*ITbw|K(Nto}SK1ZpUGvo|2Pi9YZ^bOuU{--LK=9IFEWho6$6|nHG}&YT@*P zdp0-vQ&X#q@ebUu&83S&s@wh4X5$Qc6XM=wxzpV2jK9o%{!ptz$hqkOCnY^j?lRhM zajRnmwHvzT^$fV^bUWAWkj_xakhPPRxH%IQKbBkn!NJq^)|{`^C!QNS@+Tg-e;l5X z@;_$$Xl-khNtOsrsB!+V%ShZ{e4b#@e&Ywy#^Psav<0~yj^^dGFC04|>a4k8aILG>{+W9;Y+7T`{GUtgSPlskwsaju#^~RVohU0vy`zED^Cu1MNA#rYq-aq-85aT3^r@EO^!TYO)xKfo5zyZl~B`n0cqbdV;a?1r;h zwa%V<72|4>72GEq0)%IAbDbNcKQ)IN&7Ih$ux*PxZjCk)pc8hgtB z?Ehp_nSUwVU_IXUyG)?@qnuj(`F=)w$s#>>nN~_Z!70^wM!Qx7M?j@@x@yv}?@EP<(uQuU(Uq)f3}022@L-#fK(8 z@zwg{92umPY(I#YO{Q&*EtPxay}0Dx+;J|b^EB(v3R#-j`qLXTHoercy^*nWM#EO% z5k>V27CVaA@3+ru^=+Hhs`U}ww@oVVmU;D)LpPX%G$$Qwbc!tUfBE70-gC9J6>us?zz&N?ma*T$;pFsJPpbErROSjTdWgeJPB!o4 z@}iFvzSn`nt-lA`6i)DKzO{`L=>vDTZ$BUUA;95syl_)XNwj~%}!4`-W^y)fV{rafh|H@qcxE_*=xkw_&$R)@NY@>yFGYg0tSFl%FBOWM9UP6Up={#d=wJdEU5G}*=Da9U6kVwP>muM7Pys@KuHB{%CJv}T z;pOFpHhm}oGt05yR>7vtFbSxjg}@lJ3J>o`to`cX=t!ammivJm_@ezGgem&T>hNVj zI|)o1D7>y)ei=7-+zK;zLD3;&Ws zt_QLUBZqSR-*pN&A_+Sly?278C-KLQNBeNtI_=+el3%fY{4e@Ykn4x+#PI)Z=kK_X z=&}6ai;jQMqrE)rcyj)0p&v7z0_`PY=qbthp?7Am{m6cpynpl|_dnXZ#L!cWkk!fj zOOl7>hw=iWA1Urweq{Qth!61kYdpC<=$#)7KXU%)y&`NsItvn8C(lDl;Dz-EC3!wf zLO(d`kH5&}lj%o^<~zpz|Cj%;_91&P{8r3sq(6p!MgEh;i!^Vs{K)3Rit*(BLyq`2 zdF1(o&K1SdqZ^^g%OmAEww-)llG{nHH?k8m9__nh^&roq6@DOFUw`+n z@Fz9!#mt8l`Gn>ZMh{Aq@BiixviOkrVa8LSughY`FR!bA@)gAc)4wAB>3;Q-@)X0D zG%v8@{x5oh&_8m??+@hb7#Y76@<29L?-lV!=h?P zbw%8!fgbBWC}YR3$OH0q5asK?jVI4{v@T)vTQP6YeFFN)@2@NFS7dQm(U0y27`@Qh zzF4^{?r-GzMy@xScbGgOU(Z&^2OF^RR@~pn)|=n+@=qTAx44n58!Pe#jmOx(;yH@k zALQ!_@-KEgQtWr!c;Ok+80zQ(8q|)XU&b2%FXt##9niT!efh$l|3!|5tbldE26Br4 zm;irAB*6WQ05K%MeTo2chSUzZL4fBJQX8ySc$k~${AL2YFH7KnI;_#C4ZSam-T@}{ zfgO0rIrPph0oDvW*hzpk5)Z^1bSKrfLLcNZi6`nuvQ(hn3;=5%K^^cn!0doK01!WZ zz(W9E0eS$?qp%EMB-;gW1;BTJPJk|G;{wue5 zK<_e!XJCRYl#!2so+Jy!4z(*Tx3xi8`B#}>3AhNL0|3S1AHb-8H-Ol3doAD=sOv1( z5#Qz5ApV4tP)EK;e((E*2|$l;0w{-m$Poeqlu_)Op}Z4tHegx6UjQ@xYR7j2MzKe> za{$mn8O2`-FtT+y<|N;ty!`ENBG8~1Bi|xlp)%-&zP3*CG4Lnl_y)i*=SVq#d<*$R zaEE#sKs5mJC9(tQfcT?0BKauh2!sDfwjw>i4*U-QI6?>u z4xMg5fbTvKNdJ>!d(p)4EXWhHF(qLnn+Lmb0-G)Ns1bXuv9NY{R{G1IWH0Ub&AE;<}bqZZBMQPXDp{{R*yb&~)9 literal 0 HcmV?d00001 diff --git a/.dotnet/tests/data/stop_sign.png b/.dotnet/tests/data/stop_sign.png new file mode 100644 index 0000000000000000000000000000000000000000..002b3ae1aa1967d4cfd490481f67deff52d92279 GIT binary patch literal 2125 zcmV-T2(tHyP)Px#1ZP1_K>z@;j|==^1poj532;bRa{vG#Hvj-3HvuzPpLzfQ2jWRYK~#8N-CJ91 zOj{V9hbA-AQkRG0CN~v4o z(VJ78i*uZF^mM9o*)yFhX4YBX+H39Evu4k9rk(qjeA#RE`v0}o|Lx1H+cpsjQwH$K zZ2s9+Uhe3%TAent+1YD0yL!xK>9JVclpf9jo5kYg^ileIEtUX1f3<>|H7-!FX)w;jNn7BUXifF zLEO7`socF=rTD#bCvorCL6nvjmCemW*}k19+qS9Px|Jwfwh(3WW|d7%MA@{7C>u8t zrLmFG)zCoPd-vj|Wg^f!A-^JM!M}XTf=4MO#H_(EA{yZXVxhUAlu zx;j;a%1VgvD{ppY@Q>9x0hQoCIO292HlPrw7lHmm{l=R=JoHIC^78R06uq*D$75JwA=0sMArqkwB3P|M6XBBxo_0%%DR@E0f(5DwqehXQ zva&yTSgE1UWcm?>pl@D`!3#QSYtt9uFA+Q)oJ0l&3=1wMI%;ZE5k`-O2p@Tv#;}=8 z0=I7QGs$6}IaBjVMF4^RsZ%vSK>$Yx z@XbhoFB2y+qf`N(p$LF0lnzAu(h-1!!YEdD87IEn6RGy>C;(WA_2T7PViJL;G?HFhdV$Ops6Xw0Kv*|ty-ly%h3CR znCCZwY=?P88CYOJo)N_3mPi2Pxz?@YzoHQJ35s**2ZV6tuo$5?!0e9<&%uM5vkaTn z8snd;07megJ)1Dkd-5d9OD2Hm_Ua+(6BOqlKoG-Ttjg#Od>1b=BN;3o!OGP6g4q%P z?LuD01n7kTbm%J!AjZ;0aIIMrV{62e1)x1L8Oa3jU%A5ak_q5kxsv~iLewWH&fyLS z;CV-b^#))v3^qTQtvkSf{W{A_BtYNz@g&^W$A3kkuTzwbBLwhc2#f_vy#a_f?qI;+ zLiPjmB|swR>l9_&e9?s><jhGtvVuLge8p$nf8|5#yh#0N=X1c}^7AXoWl@ zxK^)bd6~Qe+}O#5jZ8*HGSC+{g4~l#RR9dajNmD1Xqj4|Au%fUnFtV+KcEQsvUrzu>+yPF!bdm{Biqr(K zRa7{b0Q>jH8I>}l0$8o@uuKGC#Y8qNMdIa-i%$OmBW!&T3x%2p35K_&LM#D_eg@V> znE*hH2-Vebj-@~rJbmFK9TU6&*3HE{J?7Qq0*aX|c=~tGew$3=c*yaT2QWL?T&tHgS!Si*@zpd6P9?H;xjmOw_%0yVW zFs{NvA`8B~ou#qKn};xbU<