From 798b751f2b1393c2ce9dd718679ab5318a581019 Mon Sep 17 00:00:00 2001 From: Hk-Gosuto Date: Fri, 10 Jan 2025 16:55:31 +0800 Subject: [PATCH] feat: #327 --- README.md | 32 ++++++++++++++++++++++++++------ app/api/config/route.ts | 1 + app/client/platforms/openai.ts | 24 ++++++++++++++---------- app/config/server.ts | 1 + app/store/access.ts | 8 +++++++- 5 files changed, 49 insertions(+), 17 deletions(-) diff --git a/README.md b/README.md index 0b229d93d51..79deb32ac8f 100644 --- a/README.md +++ b/README.md @@ -282,15 +282,15 @@ Azure Api 版本,你可以在这里找到:[Azure 文档](https://learn.micro 如果你不想让用户使用历史摘要功能,将此环境变量设置为 1 即可。 -### `ANTHROPIC_API_KEY` (optional) +### `ANTHROPIC_API_KEY` (可选) anthropic claude Api Key. -### `ANTHROPIC_API_VERSION` (optional) +### `ANTHROPIC_API_VERSION` (可选) anthropic claude Api version. -### `ANTHROPIC_URL` (optional) +### `ANTHROPIC_URL` (可选) anthropic claude Api Url. @@ -305,11 +305,31 @@ For Azure: use `modelName@azure=deploymentName` to customize model name and depl For ByteDance: use `modelName@bytedance=deploymentName` to customize model name and deployment name. > Example: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx` will show option `Doubao-lite-4k(ByteDance)` in model list. -### `DEFAULT_MODEL` (optional) +### `CUSTOM_MODELS` (可选) -Change default model +> 示例:`+qwen-7b-chat,+glm-6b,-gpt-3.5-turbo,gpt-4-1106-preview=gpt-4-turbo` 表示增加 `qwen-7b-chat` 和 `glm-6b` 到模型列表,而从列表中删除 `gpt-3.5-turbo`,并将 `gpt-4-1106-preview` 模型名字展示为 `gpt-4-turbo`。 +> 如果你想先禁用所有模型,再启用指定模型,可以使用 `-all,+gpt-3.5-turbo`,则表示仅启用 `gpt-3.5-turbo` -### `WHITE_WEBDAV_ENDPOINTS` (optional) +用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名` 来自定义模型的展示名,用英文逗号隔开。 + +在Azure的模式下,支持使用`modelName@azure=deploymentName`的方式配置模型名称和部署名称(deploy-name) +> 示例:`+gpt-3.5-turbo@azure=gpt35`这个配置会在模型列表显示一个`gpt35(Azure)`的选项。 +> 如果你只能使用Azure模式,那么设置 `-all,+gpt-3.5-turbo@azure=gpt35` 则可以让对话的默认使用 `gpt35(Azure)` + +在ByteDance的模式下,支持使用`modelName@bytedance=deploymentName`的方式配置模型名称和部署名称(deploy-name) +> 示例: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx`这个配置会在模型列表显示一个`Doubao-lite-4k(ByteDance)`的选项 + +### `DEFAULT_MODEL` (可选) + +更改默认模型 + +### `USE_REMOTE_MODELS` (可选) + +如果你想使用远程模型列表,可以设置为 1 即可 +可以与 `CUSTOM_MODELS` 参数一起使用 +建议配合 `one-api` 类似的中转项目使用 + +### `WHITE_WEBDAV_ENDPOINTS` (可选) 如果你想增加允许访问的webdav服务地址,可以使用该选项,格式要求: - 每一个地址必须是一个完整的 endpoint diff --git a/app/api/config/route.ts b/app/api/config/route.ts index 109345f476d..85f31a6cd27 100644 --- a/app/api/config/route.ts +++ b/app/api/config/route.ts @@ -18,6 +18,7 @@ const DANGER_CONFIG = { edgeTTSVoiceName: serverConfig.edgeTTSVoiceName, isUseOpenAIEndpointForAllModels: serverConfig.isUseOpenAIEndpointForAllModels, disableModelProviderDisplay: serverConfig.disableModelProviderDisplay, + isUseRemoteModels: serverConfig.isUseRemoteModels, }; declare global { diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index 2c23dd28609..c1b24cbe2b5 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -51,7 +51,8 @@ export interface OpenAIListModelResponse { data: Array<{ id: string; object: string; - root: string; + created: number; + owned_by: string; }>; } @@ -80,7 +81,7 @@ export interface DalleRequestPayload { } export class ChatGPTApi implements LLMApi { - private disableListModels = true; + private disableListModels = false; path(path: string, model?: string): string { const accessStore = useAccessStore.getState(); @@ -651,7 +652,8 @@ export class ChatGPTApi implements LLMApi { } async models(): Promise { - if (this.disableListModels) { + const accessStore = useAccessStore.getState(); + if (!accessStore.isUseRemoteModels) { return DEFAULT_MODELS.slice(); } @@ -663,25 +665,27 @@ export class ChatGPTApi implements LLMApi { }); const resJson = (await res.json()) as OpenAIListModelResponse; - const chatModels = resJson.data?.filter( - (m) => m.id.startsWith("gpt-") || m.id.startsWith("chatgpt-"), - ); + // const chatModels = resJson.data?.filter( + // (m) => m.id.startsWith("gpt-") || m.id.startsWith("chatgpt-"), + // ); + const chatModels = resJson.data.sort((a, b) => { + return b.created - a.created; + }); console.log("[Models]", chatModels); if (!chatModels) { return []; } - //由于目前 OpenAI 的 disableListModels 默认为 true,所以当前实际不会运行到这场 let seq = 1000; //同 Constant.ts 中的排序保持一致 return chatModels.map((m) => ({ name: m.id, available: true, sorted: seq++, provider: { - id: "openai", - providerName: "OpenAI", - providerType: "openai", + id: m.owned_by.toLowerCase(), + providerName: m.owned_by, + providerType: m.owned_by.toLowerCase(), sorted: 1, }, })); diff --git a/app/config/server.ts b/app/config/server.ts index 7794a614115..83ba21799a0 100644 --- a/app/config/server.ts +++ b/app/config/server.ts @@ -238,5 +238,6 @@ export const getServerSideConfig = () => { !!process.env.USE_OPENAI_ENDPOINT_FOR_ALL_MODELS, disableModelProviderDisplay: !!process.env.DISABLE_MODEL_PROVIDER_DISPLAY, + isUseRemoteModels: !!process.env.USE_REMOTE_MODELS, }; }; diff --git a/app/store/access.ts b/app/store/access.ts index 0175fca6126..20a8ab1e05e 100644 --- a/app/store/access.ts +++ b/app/store/access.ts @@ -130,8 +130,8 @@ const DEFAULT_ACCESS_STATE = { edgeTTSVoiceName: "zh-CN-YunxiNeural", isUseOpenAIEndpointForAllModels: false, - disableModelProviderDisplay: false, + isUseRemoteModels: false, }; export const useAccessStore = createPersistStore( @@ -156,6 +156,12 @@ export const useAccessStore = createPersistStore( return get().isUseOpenAIEndpointForAllModels; }, + useRemoteModels() { + this.fetch(); + + return get().isUseRemoteModels; + }, + edgeVoiceName() { this.fetch();