diff --git a/.env.template b/.env.template index 83c440724..a33d2433b 100644 --- a/.env.template +++ b/.env.template @@ -1,6 +1,9 @@ # proxy -AXIOS_PROXY_HOST=127.0.0.1 -AXIOS_PROXY_PORT=7890 +# AXIOS_PROXY_HOST=127.0.0.1 +# AXIOS_PROXY_PORT=7890 +# OPENAI_BASE_URL=https://api.openai.com/v1 +# OPENAI_BASE_URL_AUTH=可选的安全凭证 +# 是否开启队列任务。 1-开启,0-关闭(请求parentUrl去执行任务,单机时直接填1) queueTask=1 parentUrl=https://hostname/api/openapi/startEvents # email @@ -15,7 +18,6 @@ aliTemplateCode=SMS_xxx TOKEN_KEY=xxx # openai OPENAIKEY=sk-xxx -OPENAI_BASE_URL=https://api.openai.com/v1 # db MONGODB_URI=mongodb://username:password@0.0.0.0:27017/test?authSource=admin PG_HOST=0.0.0.0 diff --git a/README.md b/README.md index 039934956..8503d25c6 100644 --- a/README.md +++ b/README.md @@ -6,32 +6,33 @@ Fast GPT 允许你使用自己的 openai API KEY 来快速的调用 openai 接 ![KBProcess](docs/imgs/KBProcess.jpg?raw=true "KBProcess") ## 开发 -复制 .env.template 成 .env.local ,填写核心参数 - +**配置环境变量** ```bash -# proxy(不需要代理可忽略) +# proxy(可选) AXIOS_PROXY_HOST=127.0.0.1 AXIOS_PROXY_PORT=7890 -# 中转方案,修改 openai 的 base url +# openai 中转连接(可选) OPENAI_BASE_URL=https://api.openai.com/v1 -# 是否开启队列任务。 1-开启,0-关闭(请求parentUrl去执行任务,单机时直接填1) +OPENAI_BASE_URL_AUTH=可选的安全凭证 +# 是否开启队列任务。 1-开启,0-关闭(请求 parentUrl 去执行任务,单机时直接填1) queueTask=1 parentUrl=https://hostname/api/openapi/startEvents -# email,参考 nodeMail 获取参数 +# 发送邮箱验证码配置。参考 nodeMail 获取参数,自行百度。 MY_MAIL=xxx@qq.com MAILE_CODE=xxx -# 阿里短信服务 +# 阿里短信服务(邮箱和短信至少二选一) aliAccessKeyId=xxx aliAccessKeySecret=xxx aliSignName=xxx aliTemplateCode=SMS_xxx -# token(随便填,登录凭证) +# token(随便填,作为登录凭证) TOKEN_KEY=xxx # openai key OPENAIKEY=sk-xxx # mongo连接地址 MONGODB_URI=mongodb://username:password@0.0.0.0:27017/test?authSource=admin -MONGODB_NAME=xxx # mongo数据库名称 +# mongo数据库名称 +MONGODB_NAME=xxx # pg 数据库相关内容,和 docker-compose 对上 PG_HOST=0.0.0.0 PG_PORT=8102 @@ -39,13 +40,20 @@ PG_USER=xxx PG_PASSWORD=xxx PG_DB_NAME=xxx ``` -```bash +**运行** +``` pnpm dev ``` -## docker 部署 +## 部署 -### 安装 docker 和 docker-compose +### 代理环境(国外服务器可忽略) +1. [clash 方案](./docs/proxy/clash.md) - 仅需一台服务器(需要有 clash) +2. [nginx 方案](./docs/proxy/nginx.md) - 需要一台国外服务器 +3. [cloudflare 方案](./docs/proxy/cloudflare.md) - 需要有域名(每日免费 10w 次代理请求) + +### docker 部署 +#### 1. 安装 docker 和 docker-compose 这个不同系统略有区别,百度安装下。验证安装成功后进行下一步。下面给出一个例子: ```bash # 安装docker @@ -59,48 +67,12 @@ docker -v docker-compose -v ``` -### 安装 clash 代理(选) -```bash -# 下载包 -curl https://glados.rocks/tools/clash-linux.zip -o clash.zip -# 解压 -unzip clash.zip -# 下载终端配置⽂件(改成自己配置文件路径) -curl https://update.glados-config.com/clash/98980/8f30944/70870/glados-terminal.yaml > config.yaml -# 赋予运行权限 -chmod +x ./clash-linux-amd64-v1.10.0 -# 记得配置端口变量: -export ALL_PROXY=socks5://127.0.0.1:7891 -export http_proxy=http://127.0.0.1:7890 -export https_proxy=http://127.0.0.1:7890 -export HTTP_PROXY=http://127.0.0.1:7890 -export HTTPS_PROXY=http://127.0.0.1:7890 -# 运行脚本: 删除clash - 到 clash 目录 - 删除缓存 - 执行运行. 会生成一个 nohup.out 文件,可以看到 clash 的 logs -OLD_PROCESS=$(pgrep clash) -if [ ! -z "$OLD_PROCESS" ]; then - echo "Killing old process: $OLD_PROCESS" - kill $OLD_PROCESS -fi -sleep 2 -cd **/clash -rm -f ./nohup.out || true -rm -f ./cache.db || true -nohup ./clash-linux-amd64-v1.10.0 -d ./ & -echo "Restart clash" -``` +#### 2. 创建3个初始化文件 +手动创建或者直接把 deploy 里内容复制过去 -### 本地 docker 打包 -```bash -docker build -t imageName:tag . -docker push imageName:tag -# 或者直接拉镜像,见下方 -``` - -### 准备初始化文件 **/root/fast-gpt/pg/init.sql** ```sql -#!/bin/bash set -e psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL @@ -121,6 +93,7 @@ CREATE INDEX modelData_userId_index ON modelData USING HASH (user_id); CREATE INDEX modelData_modelId_index ON modelData USING HASH (model_id); EOSQL ``` + **/root/fast-gpt/nginx/nginx.conf** ```conf user nginx; @@ -173,6 +146,7 @@ http { } } ``` + **/root/fast-gpt/docker-compose.yml** ```yml version: "3.3" @@ -183,8 +157,10 @@ services: restart: always container_name: fast-gpt environment: - - AXIOS_PROXY_HOST=127.0.0.1 - - AXIOS_PROXY_PORT=7890 + # - AXIOS_PROXY_HOST=127.0.0.1 + # - AXIOS_PROXY_PORT=7890 + # - OPENAI_BASE_URL=https://api.openai.com/v1 + # - OPENAI_BASE_URL_AUTH=可选的安全凭证 - MY_MAIL=xxxx@qq.com - MAILE_CODE=xxxx - aliAccessKeyId=xxxx @@ -240,7 +216,9 @@ services: - /root/fast-gpt/mongo/logs:/var/log/mongodb - /etc/localtime:/etc/localtime:ro ``` -### 辅助运行脚本 + +#### 3. 运行 docker-compose +下面是一个辅助脚本,也可以直接 docker-compose up -d **run.sh 运行文件** ```bash #!/bin/bash @@ -264,10 +242,15 @@ do done ``` -## Mac 可能的问题 -> 因为教程有部分镜像不兼容arm64,所以写个文档指导新手如何快速在mac上面搭建fast-gpt[如何在mac上面部署fastgpt](./docs/mac.md) -## Git Action 配置 +## 其他优化点 +### Git Action 自动打包镜像 +.github里拥有一个 git 提交到 main 分支时自动打包 amd64 和 arm64 镜像的 actions。你仅需要提前在 git 配置好 session。 + 1. 创建账号 session: 头像 -> settings -> 最底部 Developer settings -> Personal access tokens -> tokens(classic) -> 创建新 session,把一些看起来需要的权限勾上。 2. 添加 session 到仓库: 仓库 -> settings -> Secrets and variables -> Actions -> 创建secret -3. 填写 secret: Name-GH_PAT, Secret-第一步的tokens \ No newline at end of file +3. 填写 secret: Name-GH_PAT, Secret-第一步的tokens + +## 其他问题 +### Mac 可能的问题 +> 因为教程有部分镜像不兼容arm64,所以写个文档指导新手如何快速在mac上面搭建fast-gpt[如何在mac上面部署fastgpt](./docs/mac.md) diff --git a/deploy/docker-compose.yml b/deploy/docker-compose.yml new file mode 100644 index 000000000..9e47a2141 --- /dev/null +++ b/deploy/docker-compose.yml @@ -0,0 +1,66 @@ +version: '3.3' +services: + fast-gpt: + image: c121914yu/fast-gpt:latest + network_mode: host + restart: always + container_name: fast-gpt + environment: + # - AXIOS_PROXY_HOST=127.0.0.1 + # - AXIOS_PROXY_PORT=7890 + # - OPENAI_BASE_URL=https://api.openai.com/v1 + # - OPENAI_BASE_URL_AUTH=可选的安全凭证 + - MY_MAIL=xxxx@qq.com + - MAILE_CODE=xxxx + - aliAccessKeyId=xxxx + - aliAccessKeySecret=xxxx + - aliSignName=xxxxx + - aliTemplateCode=SMS_xxxx + - TOKEN_KEY=xxxx + - queueTask=1 + - parentUrl=https://hostname/api/openapi/startEvents + - MONGODB_URI=mongodb://username:passsword@0.0.0.0:27017/?authSource=admin + - MONGODB_NAME=xxx + - PG_HOST=0.0.0.0 + - PG_PORT=8100 + - PG_USER=xxx + - PG_PASSWORD=xxx + - PG_DB_NAME=xxx + - OPENAIKEY=sk-xxxxx + nginx: + image: nginx:alpine3.17 + container_name: nginx + restart: always + network_mode: host + volumes: + - /root/fast-gpt/nginx/nginx.conf:/etc/nginx/nginx.conf:ro + - /root/fast-gpt/nginx/logs:/var/log/nginx + - /root/fast-gpt/nginx/ssl/docgpt.key:/ssl/docgpt.key + - /root/fast-gpt/nginx/ssl/docgpt.pem:/ssl/docgpt.pem + pg: + image: ankane/pgvector + container_name: pg + restart: always + ports: + - 8100:5432 + environment: + - POSTGRES_USER=xxx + - POSTGRES_PASSWORD=xxx + - POSTGRES_DB=xxx + volumes: + - /root/fast-gpt/pg/data:/var/lib/postgresql/data + - /root/fast-gpt/pg/init.sql:/docker-entrypoint-initdb.d/init.sh + - /etc/localtime:/etc/localtime:ro + mongodb: + image: mongo:4.0.1 + container_name: mongo + restart: always + ports: + - 27017:27017 + environment: + - MONGO_INITDB_ROOT_USERNAME=username + - MONGO_INITDB_ROOT_PASSWORD=password + volumes: + - /root/fast-gpt/mongo/data:/data/db + - /root/fast-gpt/mongo/logs:/var/log/mongodb + - /etc/localtime:/etc/localtime:ro diff --git a/deploy/nginx/nginx.conf b/deploy/nginx/nginx.conf new file mode 100644 index 000000000..bae160c0c --- /dev/null +++ b/deploy/nginx/nginx.conf @@ -0,0 +1,49 @@ +user nginx; +worker_processes auto; +worker_rlimit_nofile 51200; + +events { + worker_connections 1024; +} + +http { + access_log off; + server_names_hash_bucket_size 512; + client_header_buffer_size 32k; + large_client_header_buffers 4 32k; + client_max_body_size 50M; + + gzip on; + gzip_min_length 1k; + gzip_buffers 4 8k; + gzip_http_version 1.1; + gzip_comp_level 6; + gzip_vary on; + gzip_types text/plain application/x-javascript text/css application/javascript application/json application/xml; + gzip_disable "MSIE [1-6]\."; + + open_file_cache max=1000 inactive=1d; + open_file_cache_valid 30s; + open_file_cache_min_uses 8; + open_file_cache_errors off; + + server { + listen 443 ssl; + server_name docgpt.ahapocket.cn; + ssl_certificate /ssl/docgpt.pem; + ssl_certificate_key /ssl/docgpt.key; + ssl_session_timeout 5m; + + location / { + proxy_pass http://localhost:3000; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + } + server { + listen 80; + server_name docgpt.ahapocket.cn; + rewrite ^(.*) https://$server_name$1 permanent; + } +} \ No newline at end of file diff --git a/deploy/pg/init.sql b/deploy/pg/init.sql new file mode 100644 index 000000000..3841e3c2b --- /dev/null +++ b/deploy/pg/init.sql @@ -0,0 +1,19 @@ +set -e +psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL + +CREATE EXTENSION vector; +-- init table +CREATE TABLE modelData ( + id BIGSERIAL PRIMARY KEY, + vector VECTOR(1536), + status VARCHAR(50) NOT NULL, + user_id VARCHAR(50) NOT NULL, + model_id VARCHAR(50) NOT NULL, + q TEXT NOT NULL, + a TEXT NOT NULL +); +-- create index +CREATE INDEX modelData_status_index ON modelData USING HASH (status); +CREATE INDEX modelData_userId_index ON modelData USING HASH (user_id); +CREATE INDEX modelData_modelId_index ON modelData USING HASH (model_id); +EOSQL \ No newline at end of file diff --git a/deploy/run.sh b/deploy/run.sh new file mode 100644 index 000000000..e8fa46203 --- /dev/null +++ b/deploy/run.sh @@ -0,0 +1,19 @@ +#!/bin/bash +docker-compose pull +docker-compose up -d + +echo "Docker Compose 重新拉取镜像完成!" + +# 删除本地旧镜像 +images=$(docker images --format "{{.ID}} {{.Repository}}" | grep fast-gpt) + +# 将镜像 ID 和名称放入数组中 +IFS=$'\n' read -rd '' -a image_array <<<"$images" + +# 遍历数组并删除所有旧的镜像 +for ((i=1; i<${#image_array[@]}; i++)) +do + image=${image_array[$i]} + image_id=${image%% *} + docker rmi $image_id +done \ No newline at end of file diff --git a/docs/proxy/clash.md b/docs/proxy/clash.md new file mode 100644 index 000000000..7819a4fef --- /dev/null +++ b/docs/proxy/clash.md @@ -0,0 +1,68 @@ +# 安装 clash + +clash 会在本机启动代理。对应的,你需要配置项目的两个环境变量: + +``` +AXIOS_PROXY_HOST=127.0.0.1 +AXIOS_PROXY_PORT=7890 +``` + +需要注的是,在你的 config.yaml 文件中,最好仅指定 api.openai.com 走代理,其他请求都直连。 + +**安装clash** +```bash +# 下载包 +curl https://glados.rocks/tools/clash-linux.zip -o clash.zip +# 解压 +unzip clash.zip +# 下载终端配置⽂件(改成自己配置文件路径) +curl https://update.glados-config.com/clash/98980/8f30944/70870/glados-terminal.yaml > config.yaml +# 赋予运行权限 +chmod +x ./clash-linux-amd64-v1.10.0 +``` + +**runClash.sh** +```sh +# 记得配置端口变量: +export ALL_PROXY=socks5://127.0.0.1:7891 +export http_proxy=http://127.0.0.1:7890 +export https_proxy=http://127.0.0.1:7890 +export HTTP_PROXY=http://127.0.0.1:7890 +export HTTPS_PROXY=http://127.0.0.1:7890 + +# 运行脚本: 删除clash - 到 clash 目录 - 删除缓存 - 执行运行. 会生成一个 nohup.out 文件,可以看到 clash 的 logs +OLD_PROCESS=$(pgrep clash) +if [ ! -z "$OLD_PROCESS" ]; then + echo "Killing old process: $OLD_PROCESS" + kill $OLD_PROCESS +fi +sleep 2 +cd **/clash +rm -f ./nohup.out || true +rm -f ./cache.db || true +nohup ./clash-linux-amd64-v1.10.0 -d ./ & +echo "Restart clash" +``` + +**config.yaml配置例子** +```yaml +mixed-port: 7890 +allow-lan: false +bind-address: '*' +mode: rule +log-level: warning +dns: + enable: true + ipv6: false + nameserver: + - 8.8.8.8 + - 8.8.4.4 + cache-size: 400 +proxies: + - +proxy-groups: + - { name: '♻️ 自动选择', type: url-test, proxies: [香港V01×1.5], url: 'https://api.openai.com', interval: 3600} +rules: + - 'DOMAIN-SUFFIX,api.openai.com,♻️ 自动选择' + - 'MATCH,DIRECT' +``` \ No newline at end of file diff --git a/docs/proxy/cloudflare.md b/docs/proxy/cloudflare.md new file mode 100644 index 000000000..ef130a207 --- /dev/null +++ b/docs/proxy/cloudflare.md @@ -0,0 +1,46 @@ +# cloudflare 代理配置 + +[来自 "不做了睡觉" 教程](https://gravel-twister-d32.notion.site/FastGPT-API-ba7bb261d5fd4fd9bbb2f0607dacdc9e) + +**workers 配置文件** + +```js +const TELEGRAPH_URL = 'https://api.openai.com'; + +addEventListener('fetch', (event) => { + event.respondWith(handleRequest(event.request)); +}); + +async function handleRequest(request) { + // 安全校验 + if (request.headers.get('auth') !== 'auth_code') { + return new Response('UnAuthorization', { status: 403 }); + } + + const url = new URL(request.url); + url.host = TELEGRAPH_URL.replace(/^https?:\/\//, ''); + + const modifiedRequest = new Request(url.toString(), { + headers: request.headers, + method: request.method, + body: request.body, + redirect: 'follow' + }); + + const response = await fetch(modifiedRequest); + const modifiedResponse = new Response(response.body, response); + + // 添加允许跨域访问的响应头 + modifiedResponse.headers.set('Access-Control-Allow-Origin', '*'); + + return modifiedResponse; +} +``` + +**对应的环境变量** +务必别忘了填 v1 + +``` +OPENAI_BASE_URL=https://xxxxxx/v1 +OPENAI_BASE_URL_AUTH=auth_code +``` diff --git a/docs/proxy/nginx.md b/docs/proxy/nginx.md new file mode 100644 index 000000000..e8d445dba --- /dev/null +++ b/docs/proxy/nginx.md @@ -0,0 +1,72 @@ +# nginx 反向代理 openai 接口 +如果你有国外的服务器,可以通过配置 nginx 反向代理,转发 openai 相关的请求,从而让国内的服务器可以通过访问该 nginx 去访问 openai 接口。 + +```conf +user nginx; +worker_processes auto; +worker_rlimit_nofile 51200; + +events { + worker_connections 1024; +} + +http { + resolver 8.8.8.8; + proxy_ssl_server_name on; + + access_log off; + server_names_hash_bucket_size 512; + client_header_buffer_size 32k; + large_client_header_buffers 4 32k; + client_max_body_size 50M; + + gzip on; + gzip_min_length 1k; + gzip_buffers 4 8k; + gzip_http_version 1.1; + gzip_comp_level 6; + gzip_vary on; + gzip_types text/plain application/x-javascript text/css application/javascript application/json application/xml; + gzip_disable "MSIE [1-6]\."; + + open_file_cache max=1000 inactive=1d; + open_file_cache_valid 30s; + open_file_cache_min_uses 8; + open_file_cache_errors off; + + server { + listen 443 ssl; + server_name your_host; + ssl_certificate /ssl/your_host.pem; + ssl_certificate_key /ssl/your_host.key; + ssl_session_timeout 5m; + + location ~ /openai/(.*) { + # auth check + if ($http_authkey != "xxxxxx") { + return 403; + } + + proxy_pass https://api.openai.com/$1$is_args$args; + proxy_set_header Host api.openai.com; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + # 流式响应 + proxy_set_header Connection ''; + proxy_http_version 1.1; + chunked_transfer_encoding off; + proxy_buffering off; + proxy_cache off; + # 一般响应 + proxy_buffer_size 128k; + proxy_buffers 4 256k; + proxy_busy_buffers_size 256k; + } + } + server { + listen 80; + server_name ai.fastgpt.run; + rewrite ^(.*) https://$server_name$1 permanent; + } +} +``` \ No newline at end of file diff --git a/src/pages/chat/index.tsx b/src/pages/chat/index.tsx index 80e62b6d3..0a2d7a82d 100644 --- a/src/pages/chat/index.tsx +++ b/src/pages/chat/index.tsx @@ -57,6 +57,8 @@ const Chat = ({ modelId, chatId }: { modelId: string; chatId: string }) => { // 中断请求 const controller = useRef(new AbortController()); + const isResetPage = useRef(false); + const [chatData, setChatData] = useState({ chatId, modelId, @@ -166,7 +168,9 @@ const Chat = ({ modelId, chatId }: { modelId: string; chatId: string }) => { const resetChat = useCallback( async (modelId = chatData.modelId, chatId = '') => { // 强制中断流 + isResetPage.current = true; controller.current?.abort(); + try { router.replace(`/chat?modelId=${modelId}&chatId=${chatId}`); loadChatInfo({ @@ -199,6 +203,7 @@ const Chat = ({ modelId, chatId }: { modelId: string; chatId: string }) => { // create abort obj const abortSignal = new AbortController(); controller.current = abortSignal; + isResetPage.current = false; const prompt = { obj: prompts.obj, @@ -229,6 +234,11 @@ const Chat = ({ modelId, chatId }: { modelId: string; chatId: string }) => { abortSignal }); + // 重置了页面,说明退出了当前聊天, 不缓存任何内容 + if (isResetPage.current) { + return; + } + let newChatId = ''; // 保存对话信息 try { diff --git a/src/service/utils/auth.ts b/src/service/utils/auth.ts index 73feefc0a..96d4cf7a6 100644 --- a/src/service/utils/auth.ts +++ b/src/service/utils/auth.ts @@ -9,7 +9,7 @@ import mongoose from 'mongoose'; export const getOpenAIApi = (apiKey: string) => { const configuration = new Configuration({ apiKey, - basePath: process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1' + basePath: process.env.OPENAI_BASE_URL }); return new OpenAIApi(configuration); diff --git a/src/service/utils/openai.ts b/src/service/utils/openai.ts index 9c0b7afa4..3fbd23bc1 100644 --- a/src/service/utils/openai.ts +++ b/src/service/utils/openai.ts @@ -146,13 +146,13 @@ export const gpt35StreamResponse = ({ const decoder = new TextDecoder(); try { + const parser = createParser(onParse); for await (const chunk of chatResponse.data as any) { if (stream.destroyed) { // 流被中断了,直接忽略后面的内容 break; } - const parser = createParser(onParse); - parser.feed(decoder.decode(chunk)); + parser.feed(decoder.decode(chunk, { stream: true })); } } catch (error) { console.log('pipe error', error); diff --git a/src/service/utils/tools.ts b/src/service/utils/tools.ts index 3ed656880..05cac36e4 100644 --- a/src/service/utils/tools.ts +++ b/src/service/utils/tools.ts @@ -86,7 +86,12 @@ export const authOpenApiKey = async (req: NextApiRequest) => { /* openai axios config */ export const axiosConfig = { - httpsAgent: global.httpsAgent + httpsAgent: global.httpsAgent, + headers: process.env.OPENAI_BASE_URL_AUTH + ? { + auth: process.env.OPENAI_BASE_URL_AUTH + } + : {} }; /* delete invalid symbol */