From f2f8cf7b950a8b70ab95d757b063c5bcd4dfb96a Mon Sep 17 00:00:00 2001 From: chenweiyi <737649321@qq.com> Date: Wed, 8 Nov 2023 10:36:22 +0800 Subject: [PATCH] =?UTF-8?q?feat:=20=E9=85=8D=E7=BD=AE=E6=96=87=E4=BB=B6?= =?UTF-8?q?=E4=BC=98=E5=8C=96=EF=BC=8C=E5=8E=BB=E6=8E=89=E9=9D=9E=E5=AE=98?= =?UTF-8?q?=E6=96=B9api=E6=94=AF=E6=8C=81=E4=BB=A5=E5=8F=8A=E9=9D=9E?= =?UTF-8?q?=E9=80=9A=E7=94=A8=E8=83=BD=E5=8A=9B=E6=94=AF=E6=8C=81=EF=BC=8C?= =?UTF-8?q?=E5=89=8D=E7=AB=AF=E9=85=8D=E7=BD=AE=E7=AE=80=E5=8C=96=E5=8E=BB?= =?UTF-8?q?=E6=8E=89=E5=90=8E=E7=AB=AFproxy=E5=9C=B0=E5=9D=80=E9=85=8D?= =?UTF-8?q?=E7=BD=AE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .env.example | 34 +- README.md | 43 ++- README_zh.md | 47 ++- packages/backend/package.json | 4 +- packages/backend/src/@types/chatgpt.d.ts | 35 ++ packages/backend/src/app.mts | 2 - .../backend/src/controller/completions.mts | 341 ------------------ packages/backend/src/controller/message.mts | 240 +----------- packages/backend/src/routes/completions.mts | 6 - packages/backend/src/routes/index.mts | 5 +- packages/backend/src/service/stream.mts | 86 +++++ packages/backend/src/service/thirdparty.mts | 74 ++++ packages/backend/src/utils/custom.mts | 128 ------- packages/backend/src/utils/util.mts | 175 +++++++++ packages/frontend/.env | 4 +- packages/frontend/.umirc.ts | 14 +- packages/frontend/package.json | 1 + .../frontend/src/pages/ai/chatgpt/Chatgpt.tsx | 2 +- pnpm-lock.yaml | 13 +- 19 files changed, 460 insertions(+), 794 deletions(-) create mode 100644 packages/backend/src/@types/chatgpt.d.ts delete mode 100644 packages/backend/src/controller/completions.mts delete mode 100644 packages/backend/src/routes/completions.mts create mode 100644 packages/backend/src/service/stream.mts create mode 100644 packages/backend/src/service/thirdparty.mts delete mode 100644 packages/backend/src/utils/custom.mts diff --git a/.env.example b/.env.example index 2a1fce1..fb91b80 100644 --- a/.env.example +++ b/.env.example @@ -1,28 +1,20 @@ -# Server Port +# Server Port, eg: 3000 SERVER_PORT=3000 -# TIMEOUT Time -CHATGPT_REQUEST_TIMEOUT=2 * 60 * 1000 +# TIMEOUT Time,eg: 2 * 60 * 1000 +CHATGPT_REQUEST_TIMEOUT= -# Default is OpenAI API Base URL Or use a third-party self-built service address, such as the `endpoint` of api2d +# Default is OpenAI API Base URL Or use a third-party self-built service address, +# such as the `endpoint` of api2d +# eg: https://openai.api2d.net/v1 OPENAI_API_BASE_URL= -# Default is OpenAI API Key Or use a third-party self-built service address, such as the `endpoint` of api2d +# Default is OpenAI API Key Or use a third-party self-built service address, +# such as the `endpoint` of api2d +# eg: fkxxx OPENAI_API_KEY= -# PROXY_ADDRESS represents an http proxy. If left blank or not passed any value, it means that the http proxy is not enabled. -PROXY_ADDRESS= - -# change this to an `accessToken` extracted from the ChatGPT site's `https://chat.openai.com/api/auth/session` response -OPENAI_ACCESS_TOKEN= - -# Reverse Proxy - Available on accessToken -# Default: https://ai.fakeopen.com/api/conversation -# More: https://github.com/transitive-bullshit/chatgpt-api#reverse-proxy -API_REVERSE_PROXY= - -# Third-party service API address -CUSTOM_API_URL= -# Third-party service API may need cookie -CUSTOM_COOKIE= - +# CUSTOM_PROXY represents an http proxy. If left blank or not passed any value, +# it means that the http proxy is not enabled. +# eg: http://127.0.0.1:7890 +CUSTOM_PROXY= diff --git a/README.md b/README.md index db2dadf..90c11c7 100644 --- a/README.md +++ b/README.md @@ -19,37 +19,36 @@ Node version >= 18 is required. In root directory, copy an `.env.example` file and rename it to `.env`, then modify its fields: ``` -# OpenAI API Key - https://platform.openai.com/account/api-keys -OPENAI_API_KEY= +# Server Port, eg: 3000 +SERVER_PORT=3000 -# PROXY_ADDRESS represents an http proxy. If left blank or not passed any value, -# it means that the http proxy is not enabled. -PROXY_ADDRESS= +# TIMEOUT Time,eg: 2 * 60 * 1000 +CHATGPT_REQUEST_TIMEOUT= -# change this to an `accessToken` extracted from the ChatGPT site's -# `https://chat.openai.com/api/auth/session` response -OPENAI_ACCESS_TOKEN= +# OpenAI API Base URL - https://api.openai.com/docs +# Or self-built service address, eg api2d: https://openai.api2d.net/v1 +OPENAI_API_BASE_URL = -# Reverse Proxy - Available on accessToken -# Default: https://ai.fakeopen.com/api/conversation -API_REVERSE_PROXY= +# OpenAI API Key - https://platform.openai.com/account/api-keys +# Or self-built service key, eg api2d: fkxxx +OPENAI_API_KEY= -# Third-party service API address -CUSTOM_API_URL= -# Third-party service API may need cookie -CUSTOM_COOKIE= +# CUSTOM_PROXY represents an http proxy. If left blank or not passed any value, +# it means that the http proxy is not enabled. +CUSTOM_PROXY= ``` - `OPENAI_API_BASE_URL`: Indicates the base URL used for chatgpt, which can also use a third-party self-built service address, such as the `endpoint` of [api2d](https://api2d.com/). The default value here is `https://api.openai.com/v1`. - `OPENAI_API_KEY`: Represents the official OpenAI [API key](https://platform.openai.com/account/api-keys) that will be used. Alternatively, you can use a third-party service key, such as [api2d](https://api2d.com/). -- `PROXY_ADDRESS`: Scientific Internet access proxy configuration, for example: http://xxx. -- `OPENAI_ACCESS_TOKEN`: OpenAI's [access_token](https://chat.openai.com/api/auth/session), The field usually use with `API_REVERSE_PROXY`,Indicates that unofficial APIs will be used to access chatgpt. -- `API_REVERSE_PROXY`: Indicates available unofficial reverse proxies. By default it adopts "https://ai.fakeopen.com/api/conversation". For details please refer [transitive-bullshit 大佬](https://github.com/transitive-bullshit/chatgpt-api/tree/main#reverse-proxy). -- `CUSTOM_API_URL`: Indicates the third party support service url. -- `CUSTOM_COOKIE`: Indicates the third party may need `cookie` info. +- ~~`PROXY_ADDRESS`: Scientific Internet access proxy configuration, for example: http://xxx.~~ +- ~~`OPENAI_ACCESS_TOKEN`: OpenAI's [access_token](https://chat.openai.com/api/auth/session), The field usually use with `API_REVERSE_PROXY`,Indicates that unofficial APIs will be used to access chatgpt.~~ +- ~~`API_REVERSE_PROXY`: Indicates available unofficial reverse proxies. By default it adopts "https://ai.fakeopen.com/api/conversation". For details please refer [transitive-bullshit 大佬](https://github.com/transitive-bullshit/chatgpt-api/tree/main#reverse-proxy).~~ +- ~~`CUSTOM_API_URL`: Indicates the third party support service url.~~ +- ~~`CUSTOM_COOKIE`: Indicates the third party may need `cookie` info.~~ +- `CUSTOM_PROXY`: represents an http proxy. If left blank or not passed any value, it means that the http proxy is not enabled. -> Priority: `OPENAI_API_KEY` > `OPENAI_ACCESS_TOKEN` > `CUSTOM_API_URL`。 +> ~~Priority: `OPENAI_API_KEY` > `OPENAI_ACCESS_TOKEN` > `CUSTOM_API_URL`。~~ ## How to Develop? @@ -68,7 +67,7 @@ Execute `npm start` in the root directory. 1. Start front-end:`pnpm run dev:fe`. 2. Start back-end:`pnpm run dev:be`. -3. Access front-end project address:`http://localhost:8000`. +3. Access front-end project address, eg:`http://localhost:8000`. ### Method 3: Get from Docker diff --git a/README_zh.md b/README_zh.md index f27a7cc..65e3657 100644 --- a/README_zh.md +++ b/README_zh.md @@ -19,41 +19,38 @@ 在根目录下,复制一个.env.example 文件重命名为.env,修改其中的字段: ``` -# Default is OpenAI API Base URL Or use a third-party self-built service address, such as the `endpoint` of api2d -OPENAI_API_BASE_URL= +# Server Port, eg: 3000 +SERVER_PORT=3000 -# Default is OpenAI API Key Or use a third-party self-built service address, such as the `endpoint` of api2d -OPENAI_API_KEY= - -# PROXY_ADDRESS represents an http proxy. If left blank or not passed any value, it means that the http proxy is not enabled. -PROXY_ADDRESS= +# TIMEOUT Time,eg: 2 * 60 * 1000 +CHATGPT_REQUEST_TIMEOUT= -# change this to an `accessToken` extracted from the ChatGPT site's `https://chat.openai.com/api/auth/session` response -OPENAI_ACCESS_TOKEN= +# OpenAI API Base URL - https://api.openai.com/docs +# Or self-built service address, eg api2d: https://openai.api2d.net/v1 +OPENAI_API_BASE_URL = -# Reverse Proxy - Available on accessToken -# Default: https://ai.fakeopen.com/api/conversation -# More: https://github.com/transitive-bullshit/chatgpt-api#reverse-proxy -API_REVERSE_PROXY= +# OpenAI API Key - https://platform.openai.com/account/api-keys +# Or self-built service key, eg api2d: fkxxx +OPENAI_API_KEY= -# Third-party service API address -CUSTOM_API_URL= -# Third-party service API may need cookie -CUSTOM_COOKIE= +# CUSTOM_PROXY represents an http proxy. If left blank or not passed any value, +# it means that the http proxy is not enabled. +CUSTOM_PROXY= ``` # OpenAI API Base URL, default is https://api.openai.com -- `OPENAI_API_BASE_URL`: 表示使用的 chatgpt 官方的 baseUrl, 也可以使用第三方自建服务地址,比如[api2d](https://api2d.com/)的`endpoint`. 这里默认使用的是 `https://api.openai.com/v1`. +- `OPENAI_API_BASE_URL`: 表示使用的 chatgpt 官方的 baseUrl, 也可以使用第三方自建服务地址,比如[api2d](https://openai.api2d.net/v1)的`endpoint`. 这里默认使用的是 `https://api.openai.com/v1`. - `OPENAI_API_KEY`: 表示会使用 openAI 的官方 [api-key](https://platform.openai.com/account/api-keys) , 也可以采用自建的第三方服务 key,比如[api2d](https://api2d.com/) -- `PROXY_ADDRESS`: 科学上网的代理配置,比如:http://xxx -- `OPENAI_ACCESS_TOKEN`: openAI 的 access_token, [这里](https://chat.openai.com/api/auth/session)是获取方式, 该字段通常和`API_REVERSE_PROXY`字段一起使用,表示会使用非官方 API 访问 chatgpt,如果配置了`OPENAI_API_KEY`字段,则当前字段不生效。 -- `API_REVERSE_PROXY`: 表示可用的非官方反向代理,默认采用 `https://ai.fakeopen.com/api/conversation` , 具体请参考[chatgpt-api](https://github.com/transitive-bullshit/chatgpt-api/tree/main#reverse-proxy) -- `CUSTOM_API_URL`: 表示第三方服务地址 -- `CUSTOM_COOKIE`: 表示访问第三方服务可能需要的`cookie`字段信息 - -> 优先级:`OPENAI_API_KEY` > `OPENAI_ACCESS_TOKEN` > `CUSTOM_API_URL`。 +- ~~`PROXY_ADDRESS`: 科学上网的代理配置,比如:http://xxx~~ +- ~~`OPENAI_ACCESS_TOKEN`: openAI 的 access_token, [这里](https://chat.openai.com/api/auth/session)是获取方式, 该字段通常和`API_REVERSE_PROXY`字段一起使用,表示会使用非官方 API 访问 chatgpt,如果配置了`OPENAI_API_KEY`字段,则当前字段不生效。~~ +- ~~`API_REVERSE_PROXY`: 表示可用的非官方反向代理,默认采用 `https://ai.fakeopen.com/api/conversation` , 具体请参考[chatgpt-api](https://github.com/transitive-bullshit/chatgpt-api/tree/main#reverse-proxy)~~ +- ~~`CUSTOM_API_URL`: 表示第三方服务地址~~ +- ~~`CUSTOM_COOKIE`: 表示访问第三方服务可能需要的`cookie`字段信息~~ +- `CUSTOM_PROXY`: 表示 http 代理,如果不配置,则表示没有使用 http 代理 + +> ~~优先级:`OPENAI_API_KEY` > `OPENAI_ACCESS_TOKEN` > `CUSTOM_API_URL`。~~ ## 如何开发? diff --git a/packages/backend/package.json b/packages/backend/package.json index bc23e3f..7e4deec 100644 --- a/packages/backend/package.json +++ b/packages/backend/package.json @@ -9,8 +9,8 @@ }, "scripts": { "dev": "nodemon ./src/bin/www.mts", - "debug": "cross-env DEBUG=\"server,custom,message,app\" nodemon ./src/bin/www.mts", - "start": "ts-node ./src/bin/www.mts", + "debug": "cross-env DEBUG=\"server,app,,service:*,controller:*\" nodemon ./src/bin/www.mts", + "start": "cross-env DEBUG=\"server,app,,service:*,controller:*\" ts-node ./src/bin/www.mts", "test": "echo \"Error: no test specified\" && exit 1", "clean": "rm -rf ./build", "build-ts": "tsc", diff --git a/packages/backend/src/@types/chatgpt.d.ts b/packages/backend/src/@types/chatgpt.d.ts new file mode 100644 index 0000000..3d6d7cd --- /dev/null +++ b/packages/backend/src/@types/chatgpt.d.ts @@ -0,0 +1,35 @@ +interface IRes { + id: string + text: string + conversationId: string + response_first: boolean + stream: boolean +} + +type chatResponse = { + id: string + text: string +} & { + data?: null + message?: string + status?: string +} + +type IResponseGptCallbacks = { + onData?: (data: any) => void + onEnd?: () => void + onError?: (e: any) => void +} + +type IResponseChatGptCallbacks = { + onData?: (data: ChatMessage) => void + onEnd?: (data: ChatMessage) => void + onError?: (e: any) => void +} + +type ISendChatGptDataProps = { + res: IRes + finish_reason: string | null + msg?: string + tojson?: boolean +} diff --git a/packages/backend/src/app.mts b/packages/backend/src/app.mts index 1cf3cf2..30cc280 100644 --- a/packages/backend/src/app.mts +++ b/packages/backend/src/app.mts @@ -10,7 +10,6 @@ import onerror from 'koa-onerror' import serve from 'koa-static' import path from 'path' -import { routerChatgpt } from './routes/completions.mjs' import { router as message } from './routes/message.mjs' import users from './routes/users.mjs' import conditional from './utils/koa-conditional-get.mjs' @@ -79,7 +78,6 @@ app.use( // app.use(users.routes(), users.allowedMethods()) app.use(users.routes()).use(users.allowedMethods()) app.use(message.routes()).use(message.allowedMethods()) -app.use(routerChatgpt.routes()).use(routerChatgpt.allowedMethods()) // error-handling app.on('error', (err, ctx) => { diff --git a/packages/backend/src/controller/completions.mts b/packages/backend/src/controller/completions.mts deleted file mode 100644 index c95e5bd..0000000 --- a/packages/backend/src/controller/completions.mts +++ /dev/null @@ -1,341 +0,0 @@ -import axios from 'axios' -import type { ResponseType } from 'axios' -import debugLibrary from 'debug' -import { EventEmitter } from 'events' -import Koa from 'koa' -import type { CreateChatCompletionRequest } from 'openai' -import { PassThrough } from 'stream' - -import { IRes, chatResponse, transformContent } from './message.mjs' - -const debug = debugLibrary('completions') -const events = new EventEmitter() -events.setMaxListeners(0) - -async function getDataFromResponse(params: CreateChatCompletionRequest) { - const res: IRes = { - id: '', - text: '', - conversationId: '' - } - - const requestData = { - url: process.env.CUSTOM_API_URL, - method: 'POST', - headers: { - cookie: process.env.CUSTOM_COOKIE, - 'content-type': 'application/json', - proxy: false - }, - data: { - prompt: transformContent(params.messages), - systemMessage: params.messages.find((item) => item.role === 'system') - ?.content, - options: { - operator: 'openai' - } - }, - responseType: 'stream' as ResponseType - } - - let resolve, reject - - const promise = new Promise((res, rej) => { - resolve = res - reject = rej - }) - - const response = await axios(requestData) - - debug('requst params', requestData) - - const stream_response = response.data - - stream_response.on('data', (buffer: Buffer) => { - const responseText = buffer.toString() - debug('on data...', responseText) - - const lines = responseText.split('\n') - - for (let i = 0; i < lines.length; i++) { - if (lines[i].length <= 0) { - continue - } - // logger.log({responseText: lines[i]}); - const data = JSON.parse(lines[i]) as chatResponse - - if (data.status && data.status === 'Fail') { - res.text = data.message || '' - } else { - if (data.id && data.id.length >= 0) { - res.id = data.id - } - res.text += data.text || '' - } - } - }) - - stream_response.on('end', () => { - debug('on end...') - resolve({ - id: res.id, - object: 'chat.completion', - created: Number(String(new Date().getTime()).slice(0, -3)), - model: 'gpt-3.5-turbo', - choices: [ - { - index: 0, - message: { - role: 'assistant', - content: res.text || '' - }, - delta: { - role: 'assistant', - content: res.text || '' - }, - finish_reason: 'stop' - } - ], - usage: { - prompt_tokens: 9, - completion_tokens: 12, - total_tokens: 21 - } - }) - }) - - stream_response.on('error', (e) => { - debug('on error...') - reject({ - id: res.id, - object: 'chat.completion', - created: Number(String(new Date().getTime()).slice(0, -3)), - model: 'gpt-3.5-turbo', - choices: [ - { - index: 0, - message: { - role: 'assistant', - content: res.text || '' - }, - delta: { - role: 'assistant', - content: res.text || '' - }, - finish_reason: 'stop' - } - ], - usage: { - prompt_tokens: 9, - completion_tokens: 12, - total_tokens: 21 - } - }) - }) - - return promise -} - -export default class Completions { - /** - * 模拟 chatgpt 官方 /v1/chat/completions 接口 - * @param ctx - */ - public static async chat(ctx: Koa.Context) { - const params = ctx.request.body as CreateChatCompletionRequest - debug('entrypoint params', ctx.request.body) - - if (params.stream) { - // 流式响应 - Completions.flowResponse(params, ctx) - } else { - // 非流式响应 - await Completions.normalResponse(params, ctx) - } - } - - public static async flowResponse( - params: CreateChatCompletionRequest, - ctx: Koa.Context - ) { - const res: IRes = { - id: '', - text: '', - conversationId: '' - } - - const stream = new PassThrough() - const listener = (str) => { - stream.write(`data: ${str}\n\n`) - } - events.on('data', listener) - stream.on('close', () => { - debug('trigger on close') - events.off('data', listener) - }) - - ctx.req.socket.setTimeout(0) - ctx.req.socket.setNoDelay(true) - ctx.req.socket.setKeepAlive(true) - ctx.set({ - 'Content-Type': 'text/event-stream', - 'Cache-Control': 'no-cache, no-transform', - Connection: 'keep-alive' - }) - - ctx.status = 200 - ctx.body = stream - - const requestData = { - url: process.env.CUSTOM_API_URL, - method: 'POST', - headers: { - cookie: process.env.CUSTOM_COOKIE, - 'content-type': 'application/json', - proxy: false - }, - data: { - prompt: transformContent(params.messages), - systemMessage: params.messages.find((item) => item.role === 'system') - ?.content, - options: { - operator: 'openai' - } - }, - responseType: 'stream' as ResponseType - } - - const response = await axios(requestData) - let response_first = true - - debug('requst params', requestData) - - const stream_response = response.data - stream_response.on('data', (buffer: Buffer) => { - const responseText = buffer.toString() - debug('on data...', responseText) - - const lines = responseText.split('\n') - - for (let i = 0; i < lines.length; i++) { - if (lines[i].length <= 0) { - continue - } - // logger.log({responseText: lines[i]}); - const data = JSON.parse(lines[i]) as chatResponse - - if (data.status && data.status === 'Fail') { - res.text = data.message || '' - } else { - if (data.id && data.id.length >= 0) { - res.id = data.id - } - res.text = data.text || '' - } - - // params.onProgress(res) - events.emit( - 'data', - JSON.stringify({ - id: res.id, - object: 'chat.completion', - created: Number(String(new Date().getTime()).slice(0, -3)), - choices: [ - { - index: 0, - message: { - role: response_first ? 'assistant' : '', - content: res.text || '' - }, - delta: { - role: response_first ? 'assistant' : '', - content: res.text || '' - }, - finish_reason: '' - } - ], - usage: { - prompt_tokens: 9, - completion_tokens: 12, - total_tokens: 21 - } - }) - ) - response_first = false - } - }) - - stream_response.on('end', () => { - debug('on end...') - events.emit( - 'data', - JSON.stringify({ - id: res.id, - object: 'chat.completion', - created: Number(String(new Date().getTime()).slice(0, -3)), - choices: [ - { - index: 0, - message: { - role: '', - content: res.text || '' - }, - delta: { - role: '', - content: res.text || '' - }, - finish_reason: 'stop' - } - ], - usage: { - prompt_tokens: 9, - completion_tokens: 12, - total_tokens: 21 - } - }) - ) - stream.end() - }) - - stream_response.on('error', (e) => { - debug('on error...') - events.emit( - 'data', - JSON.stringify({ - id: res.id, - object: 'chat.completion', - created: Number(String(new Date().getTime()).slice(0, -3)), - choices: [ - { - index: 0, - message: { - role: '', - content: res.text || '' - }, - delta: { - role: '', - content: res.text || '' - }, - finish_reason: 'stop' - } - ], - usage: { - prompt_tokens: 9, - completion_tokens: 12, - total_tokens: 21 - } - }) - ) - stream.end() - }) - } - - public static async normalResponse( - params: CreateChatCompletionRequest, - ctx: Koa.Context - ) { - const res = await getDataFromResponse(params) - debug('normal response', res) - ctx.body = res - } -} diff --git a/packages/backend/src/controller/message.mts b/packages/backend/src/controller/message.mts index ff367d9..191b64a 100644 --- a/packages/backend/src/controller/message.mts +++ b/packages/backend/src/controller/message.mts @@ -1,120 +1,22 @@ -/* - * @Author: chenweiyi - * @Date: 2023-02-21 15:54:08 - * @Last Modified by: chenweiyi - * @Last Modified time: 2023-02-21 15:54:08 - */ -import { - ChatGPTAPI, - ChatGPTAPIOptions, - ChatGPTUnofficialProxyAPI, - ChatMessage -} from 'chatgpt' import debugLibrary from 'debug' import { EventEmitter } from 'events' -import proxy from 'https-proxy-agent' import Koa from 'koa' -import { isNil } from 'lodash-es' -import fetch from 'node-fetch' -import type { CreateChatCompletionRequest } from 'openai' -import { PassThrough } from 'stream' -import CustomChatGPTAPI from '../utils/custom.mjs' - -export type chatResponse = { - id: string - text: string -} & { - data?: null - message?: string - status?: string -} - -export interface CustomChatMessage { - id: string - text: string - conversationId: string -} - -export interface IRes { - id: string - text: string - conversationId: string -} - -const debug = debugLibrary('message') -const chatgptApiMap = new Map< - string, - ChatGPTAPI | ChatGPTUnofficialProxyAPI | CustomChatGPTAPI ->() +import { flowResponse } from '../service/stream.mjs' +const debug = debugLibrary('controller:completions') const events = new EventEmitter() events.setMaxListeners(0) -function GenerateChatGPTAPI(props: ChatGPTAPIOptions) { - if (props.apiKey) { - return new ChatGPTAPI({ ...props }) - } else if (process.env.OPENAI_ACCESS_TOKEN) { - return new ChatGPTUnofficialProxyAPI({ - accessToken: process.env.OPENAI_ACCESS_TOKEN, - apiReverseProxyUrl: - process.env.API_REVERSE_PROXY || - 'https://ai.fakeopen.com/api/conversation' - }) - } else if (process.env.CUSTOM_API_URL) { - return new CustomChatGPTAPI({ - url: process.env.CUSTOM_API_URL, - cookie: process.env.CUSTOM_COOKIE - }) - } - throw new Error( - 'At least one of the fields in process.env needs to be OPENAI_ACCESS_TOKEN, OPENAI_ACCESS_TOKEN, CUSTOM_API_URL ' - ) -} - -const getRestOptions = ({ - parentMessageId, - conversationId -}: { - parentMessageId: string - conversationId: string -}) => { - if (process.env.OPENAI_API_KEY && parentMessageId) { - // OPENAI_API_KEY存在时,不需要conversationId - return { - parentMessageId - } - } else if ( - !process.env.OPENAI_API_KEY && - process.env.OPENAI_ACCESS_TOKEN && - parentMessageId && - conversationId - ) { - // OPENAI_ACCESS_TOKEN存在时,需要conversationId - return { - parentMessageId, - conversationId - } - } else if ( - // CUSTOM_API_URL存在时,需要parentMessageId - !process.env.OPENAI_API_KEY && - !process.env.OPENAI_ACCESS_TOKEN && - process.env.CUSTOM_API_URL - ) { - return { - parentMessageId - } - } - return {} -} - -export const transformContent = ( - messages: CreateChatCompletionRequest['messages'] -) => { - const contents = messages - .filter((item) => item.role === 'user') - .map((item) => item.content) - return contents.join(contents.length > 1 ? ',content is:' : '') +export interface ISSEQuery { + msg: string + ownerId?: string + parentMessageId?: string + conversationId?: string + model?: string + apiKey?: string + temperature?: string + top_p?: string } export default class MessageController { @@ -123,125 +25,9 @@ export default class MessageController { * @param ctx */ public static async sendMsgSSE(ctx: Koa.Context) { - const { - msg, - ownerId, - parentMessageId, - conversationId, - model, - apiKey, - temperature, - top_p - } = ctx.request.query as any + const query = ctx.request.query as unknown as ISSEQuery debug('sendMsgSSE params', JSON.stringify(ctx.request.query)) - if (!chatgptApiMap.get(ownerId)) { - const api = GenerateChatGPTAPI({ - apiBaseUrl: process.env.OPENAI_API_BASE_URL || 'https://api.openai.com', - apiKey: apiKey || process.env.OPENAI_API_KEY, - completionParams: { - model: model || 'gpt-3.5-turbo', - temperature: isNil(temperature) ? 0.8 : +temperature, - top_p: isNil(top_p) ? 1 : +top_p - }, - // @ts-ignore - fetch: process.env.PROXY_ADDRESS - ? (url, options = {}) => { - const defaultOptions = { - agent: proxy(process.env.PROXY_ADDRESS) - } - const mergedOptions = { - ...defaultOptions, - ...options - } - // @ts-ignore - return fetch(url, mergedOptions) - } - : undefined - }) - chatgptApiMap.set(ownerId, api) - } - const api = chatgptApiMap.get(ownerId) - const stream = new PassThrough() - const listener = (str) => { - stream.write(`data: ${str}\n\n`) - } - events.on('data', listener) - stream.on('close', () => { - debug('trigger on close') - events.off('data', listener) - }) - try { - debug('execute sendMsgSSE ...') - ctx.req.socket.setTimeout(0) - ctx.req.socket.setNoDelay(true) - ctx.req.socket.setKeepAlive(true) - ctx.set({ - 'Content-Type': 'text/event-stream', - 'Cache-Control': 'no-cache, no-transform', - Connection: 'keep-alive' - }) - - ctx.status = 200 - ctx.body = stream - api - .sendMessage(msg, { - onProgress: (partialResponse: ChatMessage | CustomChatMessage) => { - const data = JSON.stringify({ - text: partialResponse.text, - id: partialResponse.id, - conversationId: partialResponse.conversationId, - done: false, - error: false - }) - // debug('onProgress data:', data) - events.emit('data', data) - }, - timeoutMs: +process.env.CHATGPT_REQUEST_TIMEOUT, - ...getRestOptions({ - parentMessageId, - conversationId - }) - }) - .then((res) => { - events.emit( - 'data', - JSON.stringify({ - text: res.text, - id: res.id, - conversationId: res.conversationId, - done: true, - error: false - }) - ) - stream.end() - }) - .catch((e) => { - debug('request error', e.message) - events.emit( - 'data', - JSON.stringify({ - text: e.message, - id: 'error-' + new Date().getTime() + '', - done: true, - error: true - }) - ) - stream.end() - }) - } catch (e: any) { - debug('catch error:', e) - ctx.body = stream - events.emit( - 'data', - JSON.stringify({ - text: e.message ?? 'server inner error', - id: 'error-' + new Date().getTime() + '', - done: true, - error: true - }) - ) - stream.end() - } + flowResponse(query, ctx, events) } } diff --git a/packages/backend/src/routes/completions.mts b/packages/backend/src/routes/completions.mts deleted file mode 100644 index da777a9..0000000 --- a/packages/backend/src/routes/completions.mts +++ /dev/null @@ -1,6 +0,0 @@ -import Completions from '../controller/completions.mjs' -import { routerChatgpt } from './index.mjs' - -routerChatgpt.post('/chat/completions', Completions.chat) - -export { routerChatgpt } diff --git a/packages/backend/src/routes/index.mts b/packages/backend/src/routes/index.mts index 43b82d6..e901b5c 100644 --- a/packages/backend/src/routes/index.mts +++ b/packages/backend/src/routes/index.mts @@ -1,10 +1,7 @@ import RouterEngine from '@koa/router' const router = new RouterEngine() -const routerChatgpt = new RouterEngine() router.prefix('/q') -routerChatgpt.prefix('/v1') - -export { router, routerChatgpt } +export { router } diff --git a/packages/backend/src/service/stream.mts b/packages/backend/src/service/stream.mts new file mode 100644 index 0000000..8bebe5c --- /dev/null +++ b/packages/backend/src/service/stream.mts @@ -0,0 +1,86 @@ +import { ChatMessage, CreateChatCompletionStreamResponse } from 'chatgpt' +import debugLibrary from 'debug' +import { EventEmitter } from 'events' +import { Context } from 'koa' +import { CreateChatCompletionRequest } from 'openai' +import { PassThrough } from 'stream' + +import { ISSEQuery } from '../controller/message.mjs' +import { MAX, logText, sendChatGptData } from '../utils/util.mjs' +import { responseChatgpt } from './thirdparty.mjs' + +const debug = debugLibrary('service:stream') + +export async function flowResponse( + query: ISSEQuery, + ctx: Context, + events: EventEmitter +) { + const stream = new PassThrough() + const listener = (str) => { + stream.write(`data: ${str}\n\n`) + } + + events.on('data', listener) + stream.on('close', () => { + debug('request done!') + events.off('data', listener) + }) + + ctx.req.socket.setTimeout(0) + ctx.req.socket.setNoDelay(true) + ctx.req.socket.setKeepAlive(true) + ctx.set({ + 'Content-Type': 'text/event-stream', + 'Cache-Control': 'no-cache, no-transform', + Connection: 'keep-alive' + }) + + ctx.status = 200 + ctx.body = stream + + responseChatgpt(query, { + onData: (data: ChatMessage) => { + debug( + `...in writing, Display up to ${MAX} characters: %s`, + logText(data.text ?? '') + ) + const res = JSON.stringify({ + text: data.text, + id: data.id, + conversationId: data.conversationId, + done: false, + error: false + }) + events.emit('data', res) + }, + onEnd: (data: ChatMessage) => { + debug( + '...write flush over, Display up to ${MAX} characters: %s', + data.text ?? '' + ) + const res = JSON.stringify({ + text: data.text, + id: data.id, + conversationId: data.conversationId, + done: true, + error: false + }) + events.emit('data', res) + stream.end() + }, + onError: (e) => { + debug('request error', e.message) + events.emit( + 'data', + JSON.stringify({ + text: e.message, + id: 'error-' + new Date().getTime() + '', + done: true, + error: true + }) + ) + stream.end() + } + }) +} diff --git a/packages/backend/src/service/thirdparty.mts b/packages/backend/src/service/thirdparty.mts new file mode 100644 index 0000000..b6b6de7 --- /dev/null +++ b/packages/backend/src/service/thirdparty.mts @@ -0,0 +1,74 @@ +import { ChatGPTAPI, ChatGPTUnofficialProxyAPI, ChatMessage } from 'chatgpt' +import debugLibrary from 'debug' +import proxy from 'https-proxy-agent' +import { isNil } from 'lodash-es' + +import { ISSEQuery } from '../controller/message.mjs' + +const debug = debugLibrary('service:thirdparty') +const chatgptApiMap = new Map() + +const getRestOptions = ({ parentMessageId }: { parentMessageId: string }) => { + return { + parentMessageId + } +} + +export async function responseChatgpt( + query: ISSEQuery, + callbacks: IResponseChatGptCallbacks = {} +) { + const { + msg, + ownerId, + parentMessageId, + conversationId, + model, + apiKey, + temperature, + top_p + } = query + if (!chatgptApiMap.get(ownerId)) { + const api = new ChatGPTAPI({ + apiBaseUrl: process.env.OPENAI_API_BASE_URL || 'https://api.openai.com', + apiKey: apiKey || process.env.OPENAI_API_KEY, + completionParams: { + model: model || 'gpt-3.5-turbo', + temperature: isNil(temperature) ? 0.8 : +temperature, + top_p: isNil(top_p) ? 1 : +top_p + }, + // @ts-ignore + fetch: process.env.CUSTOM_PROXY + ? (url, options = {}) => { + const defaultOptions = { + agent: proxy(process.env.CUSTOM_PROXY) + } + const mergedOptions = { + ...defaultOptions, + ...options + } + // @ts-ignore + return fetch(url, mergedOptions) + } + : undefined + }) + chatgptApiMap.set(ownerId, api) + } + const api = chatgptApiMap.get(ownerId) + try { + debug('...input messages: %o', msg) + // @ts-ignore + const result = await api.sendMessage(msg, { + onProgress: (partialResponse: ChatMessage) => { + callbacks.onData?.(partialResponse) + }, + timeoutMs: +process.env.CHATGPT_REQUEST_TIMEOUT, + ...getRestOptions({ + parentMessageId + }) + }) + callbacks.onEnd?.(result) + } catch (e) { + callbacks.onError?.(e) + } +} diff --git a/packages/backend/src/utils/custom.mts b/packages/backend/src/utils/custom.mts deleted file mode 100644 index ba3b510..0000000 --- a/packages/backend/src/utils/custom.mts +++ /dev/null @@ -1,128 +0,0 @@ -import axios from 'axios' -import type { AxiosInstance } from 'axios' -import { ChatGPTAPIOptions, SendMessageOptions } from 'chatgpt' -import debugLibrary from 'debug' - -import { CustomChatMessage } from '../controller/message.mjs' - -const debug = debugLibrary('custom') - -interface ICustomRequestProps { - url: string - cookie: string -} - -type chatResponse = { - id: string - text: string -} & { - data?: null - message?: string - status?: string -} - -interface IRes { - id: string - text: string - conversationId: string -} - -export default class CustomChatGPTAPI { - public instance: AxiosInstance - public props: ICustomRequestProps - - constructor(props: ICustomRequestProps) { - const { cookie } = props - debug('into CustomChatGPTAPI...') - const instance = axios.create({ - timeout: 30000, - headers: { - proxy: false, - cookie, - 'content-type': 'application/json' - // Origin: host, - // Referer: host - } - }) - - this.instance = instance - this.props = props - } - - public async sendMessage( - msg: string, - params: SendMessageOptions & { - onProgress: (res: CustomChatMessage) => void - } - ): Promise { - const currentDate = new Date().toISOString().split('T')[0] - const SystemMessage = `You are ChatGPT, a large language model trained by OpenAI. Answer as concisely as possible. -Knowledge cutoff: 2021-09-01 -Current date: ${currentDate}` - - let resolve, reject - const promise = new Promise((res, rej) => { - resolve = res - reject = rej - }) - - const res: IRes = { - id: '', - text: '', - conversationId: '' - } - const response = await (this.instance as AxiosInstance).post( - this.props.url, - { - prompt: msg, - systemMessage: SystemMessage, - options: { - operator: 'openai', - parentMessageId: params.parentMessageId - } - }, - { - responseType: 'stream' - } - ) - - const stream = response.data - stream.on('data', (buffer: Buffer) => { - debug('on data...') - const responseText = buffer.toString() - - const lines = responseText.split('\n') - - for (let i = 0; i < lines.length; i++) { - if (lines[i].length <= 0) { - continue - } - // logger.log({responseText: lines[i]}); - const data = JSON.parse(lines[i]) as chatResponse - - if (data.status && data.status === 'Fail') { - res.text = data.message || '' - } else { - if (data.id && data.id.length >= 0) { - res.id = data.id - } - res.text += data.text || '' - } - - params.onProgress(res) - } - }) - - stream.on('end', () => { - debug('on end...') - resolve(res) - }) - - stream.on('error', (e) => { - debug('on error...') - reject(e) - }) - - return promise.then((res: IRes) => res) - } -} diff --git a/packages/backend/src/utils/util.mts b/packages/backend/src/utils/util.mts index 134fd8d..552f72b 100644 --- a/packages/backend/src/utils/util.mts +++ b/packages/backend/src/utils/util.mts @@ -1,5 +1,123 @@ import http from 'http' +export type Role = 'user' | 'assistant' | 'system' + +export type ICreateChatCompletionDeltaResponse = { + id: string + object: 'chat.completion.chunk' + created: number + model: string + choices: [ + { + delta: { + role?: Role + content?: string + } + index: number + finish_reason: string | null + } + ] +} + +export type ICreateChatCompletionResponse = { + /** + * + * @type {string} + * @memberof CreateChatCompletionResponse + */ + id: string + /** + * + * @type {string} + * @memberof CreateChatCompletionResponse + */ + object: string + /** + * + * @type {number} + * @memberof CreateChatCompletionResponse + */ + created: number + /** + * + * @type {string} + * @memberof CreateChatCompletionResponse + */ + model: string + /** + * + * @type {Array} + * @memberof CreateChatCompletionResponse + */ + choices: Array + /** + * + * @type {CreateCompletionResponseUsage} + * @memberof CreateChatCompletionResponse + */ + usage?: CreateCompletionResponseUsage +} + +export interface CreateChatCompletionResponseChoicesInner { + /** + * + * @type {number} + * @memberof CreateChatCompletionResponseChoicesInner + */ + index?: number + /** + * + * @type {ChatCompletionResponseMessage} + * @memberof CreateChatCompletionResponseChoicesInner + */ + message?: ChatCompletionResponseMessage + /** + * + * @type {string} + * @memberof CreateChatCompletionResponseChoicesInner + */ + finish_reason?: string +} + +export interface CreateCompletionResponseUsage { + /** + * + * @type {number} + * @memberof CreateCompletionResponseUsage + */ + prompt_tokens: number + /** + * + * @type {number} + * @memberof CreateCompletionResponseUsage + */ + completion_tokens: number + /** + * + * @type {number} + * @memberof CreateCompletionResponseUsage + */ + total_tokens: number +} + +export interface ChatCompletionResponseMessage { + /** + * The role of the author of this message. + * @type {string} + * @memberof ChatCompletionResponseMessage + */ + role: Role + /** + * The contents of the message + * @type {string} + * @memberof ChatCompletionResponseMessage + */ + content: string +} + +// 最多渲染字符个数 +export const MAX = 100 + export const getClientIp = function (req: http.IncomingMessage) { let ip = req.headers['x-forwarded-for'] || req.socket.remoteAddress || '' ip = Array.isArray(ip) ? ip[0] : ip @@ -9,3 +127,60 @@ export const getClientIp = function (req: http.IncomingMessage) { return ip } + +export const logText = (text: string) => { + if (text.length > MAX) { + return text.slice(0, MAX) + '...' + } + return text +} + +export function sendChatGptData({ + res, + finish_reason, + msg, + tojson +}: ISendChatGptDataProps) { + const getChoices = () => { + if (res.stream) { + let response_first = res.response_first + res.response_first = false + return [ + { + index: 0, + delta: { + role: response_first ? 'assistant' : undefined, + content: msg || res.text || '' + }, + finish_reason + } + ] + } else { + return [ + { + index: 0, + message: { + role: 'assistant', + content: msg || res.text || '' + }, + finish_reason + } + ] + } + } + + const result = { + id: res.id, + object: res.stream ? 'chat.completion.chunk' : 'chat.completion', + created: Number(String(new Date().getTime()).slice(0, -3)), + choices: getChoices(), + model: 'gpt-3.5-turbo', + usage: { + prompt_tokens: 9, + completion_tokens: 12, + total_tokens: 21 + } + } as ICreateChatCompletionResponse | ICreateChatCompletionDeltaResponse + + return tojson ? result : JSON.stringify(result) +} diff --git a/packages/frontend/.env b/packages/frontend/.env index 1af384a..bf9a2cf 100644 --- a/packages/frontend/.env +++ b/packages/frontend/.env @@ -1,4 +1,2 @@ # 端口 -PORT=8000 -# 代理 -CUSTOM_PROXY_URL=http://localhost:3000 \ No newline at end of file +PORT=8000 \ No newline at end of file diff --git a/packages/frontend/.umirc.ts b/packages/frontend/.umirc.ts index 4f774bd..9670d95 100644 --- a/packages/frontend/.umirc.ts +++ b/packages/frontend/.umirc.ts @@ -1,9 +1,14 @@ +import * as dotenv from 'dotenv' import { defineConfig } from '@umijs/max' +import path from 'path' import { autoImportPlugin } from './auto-import' -const serviceUrl = process.env.CUSTOM_PROXY_URL +const config: any = dotenv.config({ + path: path.resolve('..', '..', '.env') +}) +console.log('config:', config) // console.log('serviceUrl', serviceUrl) export default defineConfig({ @@ -58,12 +63,7 @@ export default defineConfig({ ], proxy: { '/q': { - target: serviceUrl, - changeOrigin: true, - disableHostCheck: true - }, - '/v1': { - target: serviceUrl, + target: `http://localhost:${config.parsed.SERVER_PORT}`, changeOrigin: true, disableHostCheck: true } diff --git a/packages/frontend/package.json b/packages/frontend/package.json index 6b7be02..28af719 100644 --- a/packages/frontend/package.json +++ b/packages/frontend/package.json @@ -52,6 +52,7 @@ "@types/react-dom": "^18.0.0", "@types/react-syntax-highlighter": "^15.5.6", "cross-env": "^7.0.3", + "dotenv": "^16.0.3", "prettier-plugin-organize-imports": "^2", "prettier-plugin-packagejson": "^2", "stylelint": "^15.6.0", diff --git a/packages/frontend/src/pages/ai/chatgpt/Chatgpt.tsx b/packages/frontend/src/pages/ai/chatgpt/Chatgpt.tsx index aeb1072..89780c6 100644 --- a/packages/frontend/src/pages/ai/chatgpt/Chatgpt.tsx +++ b/packages/frontend/src/pages/ai/chatgpt/Chatgpt.tsx @@ -132,7 +132,7 @@ export default function IndexPage() { sessionId ) // 如果是error数据,则关闭EventSource - if (result.error && result.done) { + if (result.error || result.done) { source.close() return } diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index d49528b..85171be 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1,5 +1,9 @@ lockfileVersion: '6.0' +settings: + autoInstallPeers: true + excludeLinksFromLockfile: false + importers: .: @@ -277,6 +281,9 @@ importers: cross-env: specifier: ^7.0.3 version: registry.npmmirror.com/cross-env@7.0.3 + dotenv: + specifier: ^16.0.3 + version: registry.npmmirror.com/dotenv@16.1.4 prettier-plugin-organize-imports: specifier: ^2 version: registry.npmmirror.com/prettier-plugin-organize-imports@2.3.4(prettier@2.8.8)(typescript@5.1.3) @@ -9398,7 +9405,6 @@ packages: name: dotenv version: 16.1.4 engines: {node: '>=12'} - dev: false registry.npmmirror.com/dotenv@5.0.1: resolution: {integrity: sha512-4As8uPrjfwb7VXC+WnLCbXK7y+Ueb2B3zgNCePYfhxS1PYeaO1YTeplffTEcbfLhvFNGLAz90VvJs9yomG7bow==, registry: https://registry.npm.taobao.org/, tarball: https://registry.npmmirror.com/dotenv/-/dotenv-5.0.1.tgz} @@ -16383,6 +16389,7 @@ packages: resolution: {integrity: sha512-yPw4Sng1gWghHQWj0B3ZggWUm4qVbPwPFcRG8KyxiU7J2OHFSoEHKS+EZ3fv5l1t9CyCiop6l/ZYeWbrgoQejw==, registry: https://registry.npm.taobao.org/, tarball: https://registry.npmmirror.com/prr/-/prr-1.0.1.tgz} name: prr version: 1.0.1 + requiresBuild: true dev: false optional: true @@ -21952,7 +21959,3 @@ packages: name: zwitch version: 2.0.4 dev: false - -settings: - autoInstallPeers: true - excludeLinksFromLockfile: false