Skip to content

Commit

Permalink
[openai] Updates for Azure OpenAI Assistants (2025-01-01-preview) (#3…
Browse files Browse the repository at this point in the history
…2256)

* [openai] Updates for Azure OpenAI Asisstants (2024-12-01-preview)

* Adding suppression

* examples

* removing suppression

* format

* changed directive for file_searcxh

* rename

* moved to 2025-01-01-preview

* updating readme

* optional

* Added option for java emitter to prevent sample emission

---------

Co-authored-by: Jose Alvarez <[email protected]>
Co-authored-by: Travis Wilson <[email protected]>
  • Loading branch information
3 people authored Feb 20, 2025
1 parent f63bea6 commit e50d53d
Show file tree
Hide file tree
Showing 101 changed files with 13,843 additions and 90 deletions.
64 changes: 60 additions & 4 deletions specification/ai/OpenAI.Assistants/common/models.tsp
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ alias OpenAIListRequestOptions = {
@query
after?: string;

@doc("A cursor for use in pagination. before is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.")
@doc("A cursor for use in pagination. before is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, starting with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.")
@query
before?: string;
};
Expand Down Expand Up @@ -94,9 +94,12 @@ alias OptionalNullableMetadata = {
};

/**
* Specifies the format that the model must output. Compatible with GPT-4 Turbo and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
* Specifies the format that the model must output. Compatible with GPT-4o, GPT-4 Turbo, and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON
* schema. Learn more in the Structured Outputs guide.
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
*
* **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message.
* Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit,
Expand Down Expand Up @@ -124,6 +127,7 @@ union AssistantsApiResponseFormatMode {
"auto",

/** Setting the value to `none`, will result in a 400 Bad request. */
@removed(ServiceApiVersions.v2025_01_01_preview)
"none",
}

Expand All @@ -132,11 +136,60 @@ union AssistantsApiResponseFormatMode {
* If `text` the model can return text or any value needed.
*/
@added(ServiceApiVersions.v2024_05_01_preview)
@discriminator("type")
model AssistantsApiResponseFormat {
/** Must be one of `text` or `json_object`. */
/** Must be one of `text`, `json_object` or `json_schema` . */
type?: ApiResponseFormat = ApiResponseFormat.text;
}

/**
* An object describing expected output of the model as text.
*/
@added(ServiceApiVersions.v2024_05_01_preview)
model AssistantsApiResponseFormatText extends AssistantsApiResponseFormat {
/** The type of response format being defined: `text` */
type: ApiResponseFormat.text;
}

/**
* An object describing expected output of the model as a JSON object.
*/
@added(ServiceApiVersions.v2024_05_01_preview)
model AssistantsApiResponseFormatJsonObject
extends AssistantsApiResponseFormat {
/** The type of response format being defined: `json_object` */
type: ApiResponseFormat.jsonObject;
}

/**
* An object describing expected output of the model to match a JSON schema.
*/
@added(ServiceApiVersions.v2025_01_01_preview)
model AssistantsApiResponseFormatJsonSchema
extends AssistantsApiResponseFormat {
/** The type of response format being defined: `json_schema` */
type: ApiResponseFormat.jsonSchema;

/** The JSON schema that the model must output. */
@encodedName("application/json", "json_schema")
jsonSchema: {
/** A description of what the response format is for, used by the model to determine how to respond in the format. */
description?: string;

/** The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. */
name: string;

/** The schema for the response format, described as a JSON Schema object. */
#suppress "@azure-tools/typespec-azure-core/no-unknown" "External API shape takes an arbitrary json"
schema: unknown;

/**
* Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. To learn more, read the Structured Outputs guide.
*/
strict?: boolean;
};
}

/** Possible API response formats. */
@added(ServiceApiVersions.v2024_05_01_preview)
union ApiResponseFormat {
Expand All @@ -147,4 +200,7 @@ union ApiResponseFormat {

/** Using `json_object` format will limit the usage of ToolCall to only functions. */
jsonObject: "json_object",

/** Using `json_schema` format will ensure the model matches the supplied JSON schema. */
jsonSchema: "json_schema",
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
{
"title": "Cancels a run that is `in_progress`.\n",
"parameters": {
"endpoint": "{endpoint}",
"api-version": "2025-01-01-preview",
"threadId": "thread_abc123",
"runId": "run_abc123"
},
"responses": {
"200": {
"body": {
"id": "run_abc123",
"object": "thread.run",
"created_at": 1699076126,
"assistant_id": "asst_abc123",
"thread_id": "thread_abc123",
"status": "cancelling",
"started_at": 1699076126,
"expires_at": 1699076726,
"cancelled_at": null,
"failed_at": null,
"completed_at": null,
"parallel_tool_calls": true,
"last_error": null,
"model": "gpt-4-turbo",
"instructions": "You summarize books.",
"tools": [
{
"type": "file_search"
}
],
"tool_choice": "auto",
"truncation_strategy": {
"type": "auto",
"last_messages": null
},
"max_completion_tokens": 1000,
"max_prompt_tokens": 1000,
"incomplete_details": null,
"metadata": {},
"usage": null,
"temperature": 1.0,
"top_p": 1.0,
"response_format": "auto"
}
}
},
"operationId": "CancelRun"
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
{
"title": "Cancel a vector store file batch. This attempts to cancel the processing of files in this batch as soon as possible.",
"parameters": {
"endpoint": "{endpoint}",
"api-version": "2025-01-01-preview",
"vectorStoreId": "vs_abc123",
"batchId": "vsfb_abc123"
},
"responses": {
"200": {
"body": {
"id": "vsfb_abc123",
"object": "vector_store.files_batch",
"created_at": 1699061776,
"vector_store_id": "vs_abc123",
"status": "cancelling",
"file_counts": {
"in_progress": 12,
"completed": 3,
"failed": 0,
"cancelled": 0,
"total": 15
}
}
}
},
"operationId": "CancelVectorStoreFileBatch"
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
{
"title": "Create an assistant with a model and instructions.",
"parameters": {
"endpoint": "{endpoint}",
"api-version": "2025-01-01-preview",
"body": {
"name": "Math Tutor",
"instructions": "When a customer asks about a specific math problem, use Python to evaluate their query.",
"tools": [
{
"type": "code_interpreter"
}
],
"model": "gpt-4-1106-preview"
}
},
"responses": {
"200": {
"body": {
"id": "asst_4nsG2qgNzimRPE7MazXTXbU7",
"object": "assistant",
"created_at": 1707295707,
"name": "Math Tutor",
"description": null,
"model": "gpt-4-1106-preview",
"instructions": "When a customer asks about a specific math problem, use Python to evaluate their query.",
"tools": [
{
"type": "code_interpreter"
}
],
"tool_resources": {},
"metadata": {},
"top_p": 1.0,
"temperature": 1.0,
"response_format": "auto"
}
}
},
"operationId": "CreateAssistant"
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
{
"title": "Create a message.",
"parameters": {
"endpoint": "{endpoint}",
"api-version": "2025-01-01-preview",
"threadId": "thread_v7V4csrNOxtNmgcwGg496Smx",
"body": {
"role": "user",
"content": "What is the cube root of the sum of 12, 14, 1234, 4321, 90000, 123213541223, 443123123124, 5423324234, 234324324234, 653434534545, 200000000, 98237432984, 99999999, 99999999999, 220000000000, 3309587702? Give me the answer rounded to the nearest integer without commas or spaces."
}
},
"responses": {
"200": {
"body": {
"id": "msg_as3XIk1tpVP3hdHjWBGg3uG4",
"object": "thread.message",
"created_at": 1707298421,
"assistant_id": null,
"thread_id": "thread_v7V4csrNOxtNmgcwGg496Smx",
"run_id": null,
"role": "user",
"content": [
{
"type": "text",
"text": {
"value": "What is the cube root of the sum of 12, 14, 1234, 4321, 90000, 123213541223, 443123123124, 5423324234, 234324324234, 653434534545, 200000000, 98237432984, 99999999, 99999999999, 220000000000, 3309587702? Give me the answer rounded to the nearest integer without commas or spaces.",
"annotations": []
}
}
],
"status": "completed",
"incomplete_details": null,
"incomplete_at": null,
"completed_at": 1707298439,
"attachments": [],
"metadata": {}
}
}
},
"operationId": "CreateMessage"
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
{
"title": "Create a run.",
"parameters": {
"endpoint": "{endpoint}",
"api-version": "2025-01-01-preview",
"threadId": "thread_abc123",
"body": {
"assistant_id": "asst_abc123"
}
},
"responses": {
"200": {
"body": {
"id": "run_abc123",
"object": "thread.run",
"created_at": 1699063290,
"assistant_id": "asst_abc123",
"thread_id": "thread_abc123",
"status": "queued",
"started_at": 1699063290,
"expires_at": null,
"cancelled_at": null,
"failed_at": null,
"completed_at": 1699063291,
"parallel_tool_calls": true,
"last_error": null,
"model": "gpt-4-turbo",
"instructions": "",
"incomplete_details": null,
"tools": [
{
"type": "code_interpreter"
}
],
"metadata": {},
"usage": null,
"temperature": 1.0,
"top_p": 1.0,
"max_prompt_tokens": 1000,
"max_completion_tokens": 1000,
"truncation_strategy": {
"type": "auto",
"last_messages": null
},
"response_format": "auto",
"tool_choice": "auto"
}
}
},
"operationId": "CreateRun"
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
{
"title": "Creates a thread.",
"parameters": {
"endpoint": "{endpoint}",
"api-version": "2025-01-01-preview",
"body": {}
},
"responses": {
"200": {
"body": {
"id": "thread_v7V4csrNOxtNmgcwGg496Smx",
"object": "thread",
"created_at": 1707297136,
"tool_resources": {},
"metadata": {}
}
}
},
"operationId": "CreateThread"
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
{
"title": "Create a thread and run it in one request.",
"parameters": {
"endpoint": "{endpoint}",
"api-version": "2025-01-01-preview",
"body": {
"assistant_id": "asst_abc123",
"thread": {
"messages": [
{
"role": "user",
"content": "Explain deep learning to a 5 year old."
}
]
}
}
},
"responses": {
"200": {
"body": {
"id": "run_abc123",
"object": "thread.run",
"created_at": 1699076792,
"assistant_id": "asst_abc123",
"thread_id": "thread_abc123",
"status": "queued",
"started_at": null,
"expires_at": 1699077392,
"cancelled_at": null,
"failed_at": null,
"completed_at": null,
"parallel_tool_calls": true,
"required_action": null,
"last_error": null,
"model": "gpt-4-turbo",
"instructions": "You are a helpful assistant.",
"tools": [],
"metadata": {},
"temperature": 1.0,
"top_p": 1.0,
"max_completion_tokens": null,
"max_prompt_tokens": null,
"truncation_strategy": {
"type": "auto",
"last_messages": null
},
"incomplete_details": null,
"usage": null,
"response_format": "auto",
"tool_choice": "auto"
}
}
},
"operationId": "CreateThreadAndRun"
}
Loading

0 comments on commit e50d53d

Please sign in to comment.