Skip to content

Commit

Permalink
[editor] Fix Model Search to Ignore Casing (#756)
Browse files Browse the repository at this point in the history
# [editor] Fix Model Search to Ignore Casing

Previously, the model search was doing exact match, including
capitalization. So, searching 'hu' would not show the
"HuggingFaceTextGenerationParser".

Fix by making both search and model names lowercase when filtering:
<img width="354" alt="Screenshot 2024-01-04 at 3 35 46 PM"
src="https://github.com/lastmile-ai/aiconfig/assets/5060851/10baf717-6192-4de8-bab6-046f678ac9ef">


---
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with
[ReviewStack](https://reviewstack.dev/lastmile-ai/aiconfig/pull/756).
* __->__ #756
* #755
* #754
  • Loading branch information
rholinshead authored Jan 4, 2024
2 parents 9df8fbb + b38c8a0 commit e8b72d2
Show file tree
Hide file tree
Showing 5 changed files with 144 additions and 4 deletions.
6 changes: 5 additions & 1 deletion python/src/aiconfig/editor/client/src/Editor.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,11 @@ export default function Editor() {
const res = await ufetch.get(ROUTE_TABLE.LIST_MODELS);
const models = res.data;
if (search && search.length > 0) {
return models.filter((model: string) => model.indexOf(search) >= 0);
const lowerCaseSearch = search.toLowerCase();
return models.filter(
(model: string) =>
model.toLocaleLowerCase().indexOf(lowerCaseSearch) >= 0
);
}
return models;
}, []);
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
import { PromptSchema } from "../../utils/promptUtils";

export const HuggingFaceTextGenerationParserPromptSchema: PromptSchema = {
// See https://github.com/huggingface/huggingface_hub/blob/a331e82aad1bc63038194611236db28fa013814c/src/huggingface_hub/inference/_client.py#L1206
// for settings and https://huggingface.co/docs/api-inference/detailed_parameters for defaults.
// The settings below are supported settings specified in the HuggingFaceTextGenerationParser
// refine_chat_completion_params implementation.
input: {
type: "string",
},
model_settings: {
type: "object",
properties: {
model: {
type: "string",
},
temperature: {
type: "number",
minimum: 0,
maximum: 1,
},
top_k: {
type: "integer",
},
top_p: {
type: "number",
minimum: 0,
maximum: 1,
},
details: {
type: "boolean",
},
stream: {
type: "boolean",
},
do_sample: {
type: "boolean",
},
max_new_tokens: {
type: "integer",
},
best_of: {
type: "integer",
},
repetition_penalty: {
type: "number",
minimum: 0,
maximum: 1,
},
return_full_text: {
type: "boolean",
},
seed: {
type: "integer",
},
stop_sequences: {
type: "array",
items: {
type: "string",
},
},
truncate: {
type: "integer",
},
typical_p: {
type: "number",
},
watermark: {
type: "boolean",
},
},
},
};
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ export const OpenAIChatModelParserPromptSchema: PromptSchema = {
items: {
type: "object",
required: ["name", "parameters"],
parameters: {
properties: {
name: {
type: "string",
},
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
import { PromptSchema } from "../../utils/promptUtils";

export const PaLMChatParserPromptSchema: PromptSchema = {
// See https://cloud.google.com/vertex-ai/docs/generative-ai/model-reference/text-chat for settings
// and defaults. The settings below are supported settings specified in the PaLMChatParser
// refine_chat_completion_params implementation.
input: {
type: "string",
},
model_settings: {
type: "object",
properties: {
context: {
type: "string",
},
candidate_count: {
type: "integer",
minimum: 1,
maximum: 4,
},
temperature: {
type: "number",
minimum: 0,
maximum: 1,
},
top_p: {
type: "number",
minimum: 0,
maximum: 1,
},
top_k: {
type: "integer",
minimum: 1,
maximum: 40,
},
examples: {
type: "array",
items: {
type: "object",
required: ["input", "output"],
properties: {
input: {
type: "string",
},
output: {
type: "string",
},
},
},
},
},
},
prompt_metadata: {
type: "object",
properties: {
remember_chat_context: {
type: "boolean",
},
},
},
};
6 changes: 4 additions & 2 deletions python/src/aiconfig/editor/client/src/utils/promptUtils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@ import { OpenAIChatModelParserPromptSchema } from "../shared/prompt_schemas/Open
import { OpenAIChatVisionModelParserPromptSchema } from "../shared/prompt_schemas/OpenAIChatVisionModelParserPromptSchema";
import { DalleImageGenerationParserPromptSchema } from "../shared/prompt_schemas/DalleImageGenerationParserPromptSchema";
import { PaLMTextParserPromptSchema } from "../shared/prompt_schemas/PaLMTextParserPromptSchema";
import { PaLMChatParserPromptSchema } from "../shared/prompt_schemas/PaLMChatParserPromptSchema";
import { HuggingFaceTextGenerationParserPromptSchema } from "../shared/prompt_schemas/HuggingFaceTextGenerationParserPromptSchema";

/**
* Get the name of the model for the specified prompt. The name will either be specified in the prompt's
Expand Down Expand Up @@ -67,13 +69,13 @@ export const PROMPT_SCHEMAS: Record<string, PromptSchema> = {
"dall-e-3": DalleImageGenerationParserPromptSchema,

// HuggingFaceTextGenerationParser
// "HuggingFaceTextGenerationParser":
HuggingFaceTextGenerationParser: HuggingFaceTextGenerationParserPromptSchema,

// PaLMTextParser
"models/text-bison-001": PaLMTextParserPromptSchema,

// PaLMChatParser
// "models/chat-bison-001":
"models/chat-bison-001": PaLMChatParserPromptSchema,
};

export type PromptInputSchema =
Expand Down

0 comments on commit e8b72d2

Please sign in to comment.