diff --git a/python/src/aiconfig/editor/client/src/shared/prompt_schemas/HuggingFaceTextGenerationParserPromptSchema.ts b/python/src/aiconfig/editor/client/src/shared/prompt_schemas/HuggingFaceTextGenerationParserPromptSchema.ts new file mode 100644 index 000000000..cc605cf9c --- /dev/null +++ b/python/src/aiconfig/editor/client/src/shared/prompt_schemas/HuggingFaceTextGenerationParserPromptSchema.ts @@ -0,0 +1,73 @@ +import { PromptSchema } from "../../utils/promptUtils"; + +export const HuggingFaceTextGenerationParserPromptSchema: PromptSchema = { + // See https://github.com/huggingface/huggingface_hub/blob/a331e82aad1bc63038194611236db28fa013814c/src/huggingface_hub/inference/_client.py#L1206 + // for settings and https://huggingface.co/docs/api-inference/detailed_parameters for defaults. + // The settings below are supported settings specified in the HuggingFaceTextGenerationParser + // refine_chat_completion_params implementation. + input: { + type: "string", + }, + model_settings: { + type: "object", + properties: { + model: { + type: "string", + }, + temperature: { + type: "number", + minimum: 0, + maximum: 1, + }, + top_k: { + type: "integer", + }, + top_p: { + type: "number", + minimum: 0, + maximum: 1, + }, + details: { + type: "boolean", + }, + stream: { + type: "boolean", + }, + do_sample: { + type: "boolean", + }, + max_new_tokens: { + type: "integer", + }, + best_of: { + type: "integer", + }, + repetition_penalty: { + type: "number", + minimum: 0, + maximum: 1, + }, + return_full_text: { + type: "boolean", + }, + seed: { + type: "integer", + }, + stop_sequences: { + type: "array", + items: { + type: "string", + }, + }, + truncate: { + type: "integer", + }, + typical_p: { + type: "number", + }, + watermark: { + type: "boolean", + }, + }, + }, +}; diff --git a/python/src/aiconfig/editor/client/src/shared/prompt_schemas/OpenAIChatModelParserPromptSchema.ts b/python/src/aiconfig/editor/client/src/shared/prompt_schemas/OpenAIChatModelParserPromptSchema.ts index dfd398ca3..f293cf4fe 100644 --- a/python/src/aiconfig/editor/client/src/shared/prompt_schemas/OpenAIChatModelParserPromptSchema.ts +++ b/python/src/aiconfig/editor/client/src/shared/prompt_schemas/OpenAIChatModelParserPromptSchema.ts @@ -38,7 +38,7 @@ export const OpenAIChatModelParserPromptSchema: PromptSchema = { items: { type: "object", required: ["name", "parameters"], - parameters: { + properties: { name: { type: "string", }, diff --git a/python/src/aiconfig/editor/client/src/shared/prompt_schemas/PaLMChatParserPromptSchema.ts b/python/src/aiconfig/editor/client/src/shared/prompt_schemas/PaLMChatParserPromptSchema.ts new file mode 100644 index 000000000..1986e99dc --- /dev/null +++ b/python/src/aiconfig/editor/client/src/shared/prompt_schemas/PaLMChatParserPromptSchema.ts @@ -0,0 +1,61 @@ +import { PromptSchema } from "../../utils/promptUtils"; + +export const PaLMChatParserPromptSchema: PromptSchema = { + // See https://cloud.google.com/vertex-ai/docs/generative-ai/model-reference/text-chat for settings + // and defaults. The settings below are supported settings specified in the PaLMChatParser + // refine_chat_completion_params implementation. + input: { + type: "string", + }, + model_settings: { + type: "object", + properties: { + context: { + type: "string", + }, + candidate_count: { + type: "integer", + minimum: 1, + maximum: 4, + }, + temperature: { + type: "number", + minimum: 0, + maximum: 1, + }, + top_p: { + type: "number", + minimum: 0, + maximum: 1, + }, + top_k: { + type: "integer", + minimum: 1, + maximum: 40, + }, + examples: { + type: "array", + items: { + type: "object", + required: ["input", "output"], + properties: { + input: { + type: "string", + }, + output: { + type: "string", + }, + }, + }, + }, + }, + }, + prompt_metadata: { + type: "object", + properties: { + remember_chat_context: { + type: "boolean", + }, + }, + }, +}; diff --git a/python/src/aiconfig/editor/client/src/utils/promptUtils.ts b/python/src/aiconfig/editor/client/src/utils/promptUtils.ts index 4f6886067..557bf3210 100644 --- a/python/src/aiconfig/editor/client/src/utils/promptUtils.ts +++ b/python/src/aiconfig/editor/client/src/utils/promptUtils.ts @@ -3,6 +3,8 @@ import { OpenAIChatModelParserPromptSchema } from "../shared/prompt_schemas/Open import { OpenAIChatVisionModelParserPromptSchema } from "../shared/prompt_schemas/OpenAIChatVisionModelParserPromptSchema"; import { DalleImageGenerationParserPromptSchema } from "../shared/prompt_schemas/DalleImageGenerationParserPromptSchema"; import { PaLMTextParserPromptSchema } from "../shared/prompt_schemas/PaLMTextParserPromptSchema"; +import { PaLMChatParserPromptSchema } from "../shared/prompt_schemas/PaLMChatParserPromptSchema"; +import { HuggingFaceTextGenerationParserPromptSchema } from "../shared/prompt_schemas/HuggingFaceTextGenerationParserPromptSchema"; /** * Get the name of the model for the specified prompt. The name will either be specified in the prompt's @@ -67,13 +69,13 @@ export const PROMPT_SCHEMAS: Record = { "dall-e-3": DalleImageGenerationParserPromptSchema, // HuggingFaceTextGenerationParser - // "HuggingFaceTextGenerationParser": + HuggingFaceTextGenerationParser: HuggingFaceTextGenerationParserPromptSchema, // PaLMTextParser "models/text-bison-001": PaLMTextParserPromptSchema, // PaLMChatParser - // "models/chat-bison-001": + "models/chat-bison-001": PaLMChatParserPromptSchema, }; export type PromptInputSchema =