V4.8.20 feature (#3686)

* Aiproxy (#3649)

* model config

* feat: model config ui

* perf: rename variable

* feat: custom request url

* perf: model buffer

* perf: init model

* feat: json model config

* auto login

* fix: ts

* update packages

* package

* fix: dockerfile

* feat: usage filter & export & dashbord (#3538)

* feat: usage filter & export & dashbord

* adjust ui

* fix tmb scroll

* fix code & selecte all

* merge

* perf: usages list;perf: move components (#3654)

* perf: usages list

* team sub plan load

* perf: usage dashboard code

* perf: dashboard ui

* perf: move components

* add default model config (#3653)

* 4.8.20 test (#3656)

* provider

* perf: model config

* model perf (#3657)

* fix: model

* dataset quote

* perf: model config

* model tag

* doubao model config

* perf: config model

* feat: model test

* fix: POST 500 error on dingtalk bot (#3655)

* feat: default model (#3662)

* move model config

* feat: default model

* fix: false triggerd org selection (#3661)

* export usage csv i18n (#3660)

* export usage csv i18n

* fix build

* feat: markdown extension (#3663)

* feat: markdown extension

* media cros

* rerank test

* default price

* perf: default model

* fix: cannot custom provider

* fix: default model select

* update bg

* perf: default model selector

* fix: usage export

* i18n

* fix: rerank

* update init extension

* perf: ip limit check

* doubao model order

* web default modle

* perf: tts selector

* perf: tts error

* qrcode package

* reload buffer (#3665)

* reload buffer

* reload buffer

* tts selector

* fix: err tip (#3666)

* fix: err tip

* perf: training queue

* doc

* fix interactive edge (#3659)

* fix interactive edge

* fix

* comment

* add gemini model

* fix: chat model select

* perf: supplement assistant empty response (#3669)

* perf: supplement assistant empty response

* check array

* perf: max_token count;feat: support resoner output;fix: member scroll (#3681)

* perf: supplement assistant empty response

* check array

* perf: max_token count

* feat: support resoner output

* member scroll

* update provider order

* i18n

* fix: stream response (#3682)

* perf: supplement assistant empty response

* check array

* fix: stream response

* fix: model config cannot set to null

* fix: reasoning response (#3684)

* perf: supplement assistant empty response

* check array

* fix: reasoning response

* fix: reasoning response

* doc (#3685)

* perf: supplement assistant empty response

* check array

* doc

* lock

* animation

* update doc

* update compose

* doc

* doc

---------

Co-authored-by: heheer <heheer@sealos.io>
Co-authored-by: a.e. <49438478+I-Info@users.noreply.github.com>
This commit is contained in:
Archer 2025-02-05 00:10:47 +08:00 committed by GitHub
parent c393002f1d
commit db2c0a0bdb
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
496 changed files with 9031 additions and 4726 deletions

View File

@ -58,7 +58,7 @@ jobs:
# Step 4 - Builds the site using Hugo
- name: Build
run: cd docSite && hugo mod get -u github.com/colinwilson/lotusdocs && hugo -v --minify
run: cd docSite && hugo mod get -u github.com/colinwilson/lotusdocs@6d0568e” && hugo -v --minify
# Step 5 - Push our generated site to Vercel
- name: Deploy to Vercel

View File

@ -58,7 +58,7 @@ jobs:
# Step 4 - Builds the site using Hugo
- name: Build
run: cd docSite && hugo mod get -u github.com/colinwilson/lotusdocs && hugo -v --minify
run: cd docSite && hugo mod get -u github.com/colinwilson/lotusdocs@6d0568e” && hugo -v --minify
# Step 5 - Push our generated site to Vercel
- name: Deploy to Vercel

View File

@ -3,7 +3,7 @@ FROM hugomods/hugo:0.117.0 AS builder
WORKDIR /app
ADD ./docSite hugo
RUN cd /app/hugo && hugo mod get -u github.com/colinwilson/lotusdocs && hugo -v --minify
RUN cd /app/hugo && hugo mod get -u github.com/colinwilson/lotusdocs@6d0568e” && hugo -v --minify
FROM fholzer/nginx-brotli:latest

Binary file not shown.

After

Width:  |  Height:  |  Size: 254 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 236 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 146 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 326 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 321 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 353 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 154 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 197 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 380 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 393 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 377 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 257 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 205 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 221 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 245 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 108 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 368 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 253 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 322 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 323 KiB

View File

@ -25,251 +25,6 @@ weight: 707
"qaMaxProcess": 15, // 问答拆分线程数量
"tokenWorkers": 50, // Token 计算线程保持数,会持续占用内存,不能设置太大。
"pgHNSWEfSearch": 100 // 向量搜索参数。越大搜索越精确但是速度越慢。设置为100有99%+精度。
},
"llmModels": [
{
"provider": "OpenAI", // 模型提供商主要用于分类展示目前已经内置提供商包括https://github.com/labring/FastGPT/blob/main/packages/global/core/ai/provider.ts, 可 pr 提供新的提供商,或直接填写 Other
"model": "gpt-4o-mini", // 模型名(对应OneAPI中渠道的模型名)
"name": "gpt-4o-mini", // 模型别名
"maxContext": 125000, // 最大上下文
"maxResponse": 16000, // 最大回复
"quoteMaxToken": 120000, // 最大引用内容
"maxTemperature": 1.2, // 最大温度
"charsPointsPrice": 0, // n积分/1k token商业版
"censor": false, // 是否开启敏感校验(商业版)
"vision": true, // 是否支持图片输入
"datasetProcess": true, // 是否设置为文本理解模型QA务必保证至少有一个为true否则知识库会报错
"usedInClassify": true, // 是否用于问题分类务必保证至少有一个为true
"usedInExtractFields": true, // 是否用于内容提取务必保证至少有一个为true
"usedInToolCall": true, // 是否用于工具调用务必保证至少有一个为true
"usedInQueryExtension": true, // 是否用于问题优化务必保证至少有一个为true
"toolChoice": true, // 是否支持工具选择(分类,内容提取,工具调用会用到。)
"functionCall": false, // 是否支持函数调用(分类,内容提取,工具调用会用到。会优先使用 toolChoice如果为false则使用 functionCall如果仍为 false则使用提示词模式
"customCQPrompt": "", // 自定义文本分类提示词(不支持工具和函数调用的模型
"customExtractPrompt": "", // 自定义内容提取提示词
"defaultSystemChatPrompt": "", // 对话默认携带的系统提示词
"defaultConfig": {}, // 请求API时挟带一些默认配置比如 GLM4 的 top_p
"fieldMap": {} // 字段映射o1 模型需要把 max_tokens 映射为 max_completion_tokens
},
{
"provider": "OpenAI",
"model": "gpt-4o",
"name": "gpt-4o",
"maxContext": 125000,
"maxResponse": 4000,
"quoteMaxToken": 120000,
"maxTemperature": 1.2,
"charsPointsPrice": 0,
"censor": false,
"vision": true,
"datasetProcess": true,
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
"usedInQueryExtension": true,
"toolChoice": true,
"functionCall": false,
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"defaultConfig": {},
"fieldMap": {}
},
{
"provider": "OpenAI",
"model": "o1-mini",
"name": "o1-mini",
"maxContext": 125000,
"maxResponse": 65000,
"quoteMaxToken": 120000,
"maxTemperature": 1.2,
"charsPointsPrice": 0,
"censor": false,
"vision": false,
"datasetProcess": true,
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
"usedInQueryExtension": true,
"toolChoice": false,
"functionCall": false,
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"defaultConfig": {
"temperature": 1,
"max_tokens": null,
"stream": false
}
},
{
"provider": "OpenAI",
"model": "o1-preview",
"name": "o1-preview",
"maxContext": 125000,
"maxResponse": 32000,
"quoteMaxToken": 120000,
"maxTemperature": 1.2,
"charsPointsPrice": 0,
"censor": false,
"vision": false,
"datasetProcess": true,
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
"usedInQueryExtension": true,
"toolChoice": false,
"functionCall": false,
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"defaultConfig": {
"temperature": 1,
"max_tokens": null,
"stream": false
}
}
],
"vectorModels": [
{
"provider": "OpenAI",
"model": "text-embedding-3-small",
"name": "text-embedding-3-small",
"charsPointsPrice": 0,
"defaultToken": 512,
"maxToken": 3000,
"weight": 100
},
{
"provider": "OpenAI",
"model": "text-embedding-3-large",
"name": "text-embedding-3-large",
"charsPointsPrice": 0,
"defaultToken": 512,
"maxToken": 3000,
"weight": 100,
"defaultConfig": {
"dimensions": 1024
}
},
{
"provider": "OpenAI",
"model": "text-embedding-ada-002", // 模型名与OneAPI对应
"name": "Embedding-2", // 模型展示名
"charsPointsPrice": 0, // n积分/1k token
"defaultToken": 700, // 默认文本分割时候的 token
"maxToken": 3000, // 最大 token
"weight": 100, // 优先训练权重
"defaultConfig": {}, // 自定义额外参数。例如,如果希望使用 embedding3-large 的话,可以传入 dimensions:1024来返回1024维度的向量。目前必须小于1536维度
"dbConfig": {}, // 存储时的额外参数(非对称向量模型时候需要用到)
"queryConfig": {} // 参训时的额外参数
}
],
"reRankModels": [],
"audioSpeechModels": [
{
"provider": "OpenAI",
"model": "tts-1",
"name": "OpenAI TTS1",
"charsPointsPrice": 0,
"voices": [
{ "label": "Alloy", "value": "alloy", "bufferId": "openai-Alloy" },
{ "label": "Echo", "value": "echo", "bufferId": "openai-Echo" },
{ "label": "Fable", "value": "fable", "bufferId": "openai-Fable" },
{ "label": "Onyx", "value": "onyx", "bufferId": "openai-Onyx" },
{ "label": "Nova", "value": "nova", "bufferId": "openai-Nova" },
{ "label": "Shimmer", "value": "shimmer", "bufferId": "openai-Shimmer" }
]
}
],
"whisperModel": {
"provider": "OpenAI",
"model": "whisper-1",
"name": "Whisper1",
"charsPointsPrice": 0
}
}
```
## 内置的模型提供商ID
为了方便模型分类展示FastGPT 内置了部分模型提供商的名字和 Logo。如果你期望补充提供商可[提交 Issue](https://github.com/labring/FastGPT/issues),并提供几个信息:
1. 厂商官网地址
2. 厂商 SVG logo建议是正方形图片。
目前已支持的提供商, 复制 "-" 之前的字符串,作为 provider 的值。
- OpenAI
- Claude
- Gemini
- Meta
- MistralAI
- AliCloud - 阿里云
- Qwen - 通义千问
- Doubao - 豆包
- ChatGLM - 智谱
- DeepSeek - 深度求索
- Moonshot - 月之暗面
- MiniMax
- SparkDesk - 讯飞星火
- Hunyuan - 腾讯混元
- Baichuan - 百川
- Yi - 零一万物
- Ernie - 文心一言
- StepFun - 阶跃星辰
- Ollama
- BAAI - 智源研究院
- FishAudio
- Intern - 书生
- Moka - Moka-AI
- Other - 其他
## ReRank 模型接入
由于 OneAPI 不支持 Rerank 模型,所以需要单独配置接入,这里
### 使用硅基流动的在线模型
有免费的 `bge-reranker-v2-m3` 模型可以使用。
1. [点击注册硅基流动账号](https://cloud.siliconflow.cn/i/TR9Ym0c4)
2. 进入控制台,获取 API key: https://cloud.siliconflow.cn/account/ak
3. 修改 FastGPT 配置文件
```json
{
"reRankModels": [
{
"model": "BAAI/bge-reranker-v2-m3", // 这里的model需要对应 siliconflow 的模型名
"name": "BAAI/bge-reranker-v2-m3",
"requestUrl": "https://api.siliconflow.cn/v1/rerank",
"requestAuth": "siliconflow 上申请的 key"
}
]
}
```
### 私有部署模型
请使用 4.6.6-alpha 以上版本,配置文件中的 `reRankModels` 为重排模型虽然是数组不过目前仅有第1个生效。
1. [部署 ReRank 模型](/docs/development/custom-models/bge-rerank/)
1. 找到 FastGPT 的配置文件中的 `reRankModels` 4.6.6 以前是 `ReRankModels`
2. 修改对应的值:
```json
{
"reRankModels": [
{
"model": "bge-reranker-base", // 随意
"name": "检索重排-base", // 随意
"charsPointsPrice": 0,
"requestUrl": "{{host}}/v1/rerank",
"requestAuth": "安全凭证,已自动补 Bearer"
}
]
}
```
```

View File

@ -118,10 +118,17 @@ services:
```
## 接入 FastGPT
参考 [ReRank模型接入](/docs/development/configuration/#rerank-接入)host 变量为部署的域名。
1. 打开 FastGPT 模型配置,新增一个重排模型。
2. 填写模型配置表单:模型 ID 为`bge-reranker-base`,地址填写`{{host}}/v1/rerank`host 为你部署的域名/IP:Port。
![alt text](/imgs/image-102.png)
## QA
### 403报错
FastGPT中自定义请求 Token 和环境变量的 ACCESS_TOKEN 不一致。
### Docker 运行提示 `Bus error (core dumped)`
尝试增加 `docker-compose.yml` 配置项 `shm_size` ,以增加容器中的共享内存目录大小。

View File

@ -144,7 +144,6 @@ curl --location --request POST 'https://<oneapi_url>/v1/chat/completions' \
"usedInClassify": true, // 是否用于问题分类务必保证至少有一个为true
"usedInExtractFields": true, // 是否用于内容提取务必保证至少有一个为true
"usedInToolCall": true, // 是否用于工具调用务必保证至少有一个为true
"usedInQueryExtension": true, // 是否用于问题优化务必保证至少有一个为true
"toolChoice": true, // 是否支持工具选择(分类,内容提取,工具调用会用到。)
"functionCall": false, // 是否支持函数调用(分类,内容提取,工具调用会用到。会优先使用 toolChoice如果为false则使用 functionCall如果仍为 false则使用提示词模式
"customCQPrompt": "", // 自定义文本分类提示词(不支持工具和函数调用的模型

View File

@ -7,6 +7,13 @@ toc: true
weight: 707
---
## 前置知识
1. 基础的网络知识:端口,防火墙……
2. Docker 和 Docker Compose 基础知识
3. 大模型相关接口和参数
4. RAG 相关知识:向量模型,向量数据库,向量检索
## 部署架构图
![](/imgs/sealos-fastgpt.webp)
@ -202,6 +209,10 @@ docker restart oneapi
首次运行,会自动初始化 root 用户,密码为 `1234`(与环境变量中的`DEFAULT_ROOT_PSW`一致),日志里会提示一次`MongoServerError: Unable to read from a snapshot due to pending collection catalog changes;`可忽略。
### 6. 配置模型
[点击查看模型配置教程](/docs/development/modelConfig/intro/)
## FAQ
### Mongo 副本集自动初始化失败

View File

@ -23,11 +23,11 @@ images: []
![](/imgs/faq1.png)
这是索引模型的长度限制,通过任何方式部署都一样的,但不同索引模型的配置不一样,可以在后台修改参数。
### sealos怎么挂载 小程序配置文件
### 怎么挂载小程序配置文件
新增配置文件/app/projects/app/public/xxxx.txt
将验证文件,挂载到指定位置/app/projects/app/public/xxxx.txt
:
然后重启。例如:
![](/imgs/faq2.png)

View File

@ -0,0 +1,442 @@
---
title: 'FastGPT 模型配置说明'
description: 'FastGPT 模型配置说明'
icon: 'api'
draft: false
toc: true
weight: 744
---
在 4.8.20 版本以前FastGPT 模型配置在 `config.json` 文件中声明,你可以在 https://github.com/labring/FastGPT/blob/main/projects/app/data/model.json 中找到旧版的配置文件示例。
从 4.8.20 版本开始,你可以直接在 FastGPT 页面中进行模型配置,并且系统内置了大量模型,无需从 0 开始配置。下面介绍模型配置的基本流程:
## 1. 使用 OneAPI 对接模型提供商
可以使用 [OneAPI 接入教程](/docs/development/modelconfig/one-api) 来进行模型聚合,从而可以对接更多模型提供商。你需要先在各服务商申请好 API 接入 OneAPI 后,才能在 FastGPT 中使用这些模型。示例流程如下:
![alt text](/imgs/image-95.png)
除了各模型官方的服务外,还有一些第三方服务商提供模型接入服务,当然你也可以用 Ollama 等来部署本地模型,最终都需要接入 OneAPI下面是一些第三方服务商
{{% alert icon=" " context="info" %}}
- [SiliconCloud(硅基流动)](https://cloud.siliconflow.cn/i/TR9Ym0c4): 提供开源模型调用的平台。
- [Sealos AIProxy](https://hzh.sealos.run/?openapp=system-aiproxy): 提供国内各家模型代理,无需逐一申请 api。
{{% /alert %}}
在 OneAPI 配置好模型后,你就可以打开 FastGPT 页面,启用对应模型了。
## 2. 登录 root 用户
仅 root 用户可以进行模型配置。
## 3. 进入模型配置页面
登录 root 用户后,在`账号-模型提供商-模型配置`中,你可以看到所有内置的模型和自定义模型,以及哪些模型启用了。
![alt text](/image-90.png)
## 4. 配置介绍
{{% alert icon="🤖 " context="success" %}}
注意:目前语音识别模型和重排模型仅会生效一个,所以配置时候,只需要配置一个即可。
{{% /alert %}}
### 核心配置
- 模型 ID接口请求时候Body 中`model`字段的值,全局唯一。
- 自定义请求地址/Key如果需要绕过`OneAPI`,可以设置自定义请求地址和 Token。一般情况下不需要如果 OneAPI 不支持某些模型,可以使用该特性。
### 模型类型
1. 语言模型 - 进行文本对话,多模态模型支持图片识别。
2. 索引模型 - 对文本块进行索引,用于相关文本检索。
3. 语音合成 - 将文本转换为语音。
4. 语音识别 - 将语音转换为文本。
5. 重排模型 - 对文本进行重排,用于优化文本质量。
### 启用模型
系统内置了目前主流厂商的模型,如果你不熟悉配置,直接点击`启用`即可,需要注意到是,模型 ID 需要和 OneAPI 中渠道的`模型`一致。
| | |
| --- | --- |
| ![alt text](/imgs/image-91.png) | ![alt text](/imgs/image-92.png) |
### 修改模型配置
点击模型右侧的齿轮即可进行模型配置,不同类型模型的配置有区别。
| | |
| --- | --- |
| ![alt text](/imgs/image-93.png) | ![alt text](/imgs/image-94.png) |
### 新增自定义模型
如果系统内置的模型无法满足你的需求,你可以添加自定义模型。自定义模型中,如果`模型 ID`与系统内置的模型 ID 一致,则会被认为是修改系统模型。
| | |
| --- | --- |
| ![alt text](/imgs/image-96.png) | ![alt text](/imgs/image-97.png) |
### 通过配置文件配置
如果你觉得通过页面配置模型比较麻烦,你也可以通过配置文件来配置模型。或者希望快速将一个系统的配置,复制到另一个系统,也可以通过配置文件来实现。
| | |
| --- | --- |
| ![alt text](/imgs/image-98.png) | ![alt text](/imgs/image-99.png) |
**语言模型字段说明:**
```json
{
"model": "模型 ID",
"metadata": {
"isCustom": true, // 是否为自定义模型
"isActive": true, // 是否启用
"provider": "OpenAI", // 模型提供商主要用于分类展示目前已经内置提供商包括https://github.com/labring/FastGPT/blob/main/packages/global/core/ai/provider.ts, 可 pr 提供新的提供商,或直接填写 Other
"model": "gpt-4o-mini", // 模型ID(对应OneAPI中渠道的模型名)
"name": "gpt-4o-mini", // 模型别名
"maxContext": 125000, // 最大上下文
"maxResponse": 16000, // 最大回复
"quoteMaxToken": 120000, // 最大引用内容
"maxTemperature": 1.2, // 最大温度
"charsPointsPrice": 0, // n积分/1k token商业版
"censor": false, // 是否开启敏感校验(商业版)
"vision": true, // 是否支持图片输入
"datasetProcess": true, // 是否设置为文本理解模型QA务必保证至少有一个为true否则知识库会报错
"usedInClassify": true, // 是否用于问题分类务必保证至少有一个为true
"usedInExtractFields": true, // 是否用于内容提取务必保证至少有一个为true
"usedInToolCall": true, // 是否用于工具调用务必保证至少有一个为true
"toolChoice": true, // 是否支持工具选择(分类,内容提取,工具调用会用到。)
"functionCall": false, // 是否支持函数调用(分类,内容提取,工具调用会用到。会优先使用 toolChoice如果为false则使用 functionCall如果仍为 false则使用提示词模式
"customCQPrompt": "", // 自定义文本分类提示词(不支持工具和函数调用的模型
"customExtractPrompt": "", // 自定义内容提取提示词
"defaultSystemChatPrompt": "", // 对话默认携带的系统提示词
"defaultConfig": {}, // 请求API时挟带一些默认配置比如 GLM4 的 top_p
"fieldMap": {} // 字段映射o1 模型需要把 max_tokens 映射为 max_completion_tokens
}
}
```
**索引模型字段说明:**
```json
{
"model": "模型 ID",
"metadata": {
"isCustom": true, // 是否为自定义模型
"isActive": true, // 是否启用
"provider": "OpenAI", // 模型提供商
"model": "text-embedding-3-small", // 模型ID
"name": "text-embedding-3-small", // 模型别名
"charsPointsPrice": 0, // n积分/1k token
"defaultToken": 512, // 默认文本分割时候的 token
"maxToken": 3000 // 最大 token
}
}
```
**重排模型字段说明:**
```json
{
"model": "模型 ID",
"metadata": {
"isCustom": true, // 是否为自定义模型
"isActive": true, // 是否启用
"provider": "BAAI", // 模型提供商
"model": "bge-reranker-v2-m3", // 模型ID
"name": "ReRanker-Base", // 模型别名
"requestUrl": "", // 自定义请求地址
"requestAuth": "", // 自定义请求认证
"type": "rerank" // 模型类型
}
}
```
**语音合成模型字段说明:**
```json
{
"model": "模型 ID",
"metadata": {
"isActive": true, // 是否启用
"isCustom": true, // 是否为自定义模型
"type": "tts", // 模型类型
"provider": "FishAudio", // 模型提供商
"model": "fishaudio/fish-speech-1.5", // 模型ID
"name": "fish-speech-1.5", // 模型别名
"voices": [ // 音色
{
"label": "fish-alex", // 音色名称
"value": "fishaudio/fish-speech-1.5:alex", // 音色ID
},
{
"label": "fish-anna", // 音色名称
"value": "fishaudio/fish-speech-1.5:anna", // 音色ID
}
],
"charsPointsPrice": 0 // n积分/1k token
}
}
```
**语音识别模型字段说明:**
```json
{
"model": "whisper-1",
"metadata": {
"isActive": true, // 是否启用
"isCustom": true, // 是否为自定义模型
"provider": "OpenAI", // 模型提供商
"model": "whisper-1", // 模型ID
"name": "whisper-1", // 模型别名
"charsPointsPrice": 0, // n积分/1k token
"type": "stt" // 模型类型
}
}
```
## 模型测试
FastGPT 页面上提供了每类模型的简单测试,可以初步检查模型是否正常工作,会实际按模板发送一个请求。
![alt text](/imgs/image-105.png)
## 特殊接入示例
### ReRank 模型接入
由于 OneAPI 不支持 Rerank 模型所以需要单独配置。FastGPT 中,模型配置支持自定义请求地址,可以绕过 OneAPI直接向提供商发起请求可以利用这个特性来接入 Rerank 模型。
#### 使用硅基流动的在线模型
有免费的 `bge-reranker-v2-m3` 模型可以使用。
1. [点击注册硅基流动账号](https://cloud.siliconflow.cn/i/TR9Ym0c4)
2. 进入控制台,获取 API key: https://cloud.siliconflow.cn/account/ak
3. 打开 FastGPT 模型配置,新增一个`BAAI/bge-reranker-v2-m3`的重排模型(如果系统内置了,也可以直接变更,无需新增)。
![alt text](/imgs/image-101.png)
#### 私有部署模型
[点击查看部署 ReRank 模型教程](/docs/development/custom-models/bge-rerank/)
### 接入语音识别模型
OneAPI 的语言识别接口,无法正确的识别其他模型(会始终识别成 whisper-1所以如果想接入其他模型可以通过自定义请求地址来实现。例如接入硅基流动的 `FunAudioLLM/SenseVoiceSmall` 模型,可以参考如下配置:
点击模型编辑:
![alt text](/imgs/image-106.png)
填写硅基流动的地址:`https://api.siliconflow.cn/v1/audio/transcriptions`,并填写硅基流动的 API Key。
![alt text](/imgs/image-107.png)
## 其他配置项说明
### 自定义请求地址
如果填写了该值,则可以允许你绕过 OneAPI直接向自定义请求地址发起请求。需要填写完整的请求地址例如
- LLM: {{host}}/v1/chat/completions
- Embedding: {{host}}/v1/embeddings
- STT: {{host}}/v1/audio/transcriptions
- TTS: {{host}}/v1/audio/speech
- Rerank: {{host}}/v1/rerank
自定义请求 Key则是向自定义请求地址发起请求时候携带请求头Authorization: Bearer xxx 进行请求。
所有接口均遵循 OpenAI 提供的模型格式,可参考 [OpenAI API 文档](https://platform.openai.com/docs/api-reference/introduction) 进行配置。
由于 OpenAI 没有提供 ReRank 模型,遵循的是 Cohere 的格式。[点击查看接口请求示例](/docs/development/faq/#如何检查模型问题)
## 旧版模型配置说明
配置好 OneAPI 后,需要在`config.json`文件中,手动的增加模型配置,并重启。
由于环境变量不利于配置复杂的内容FastGPT 采用了 ConfigMap 的形式挂载配置文件,你可以在 `projects/app/data/config.json` 看到默认的配置文件。可以参考 [docker-compose 快速部署](/docs/development/docker/) 来挂载配置文件。
**开发环境下**,你需要将示例配置文件 `config.json` 复制成 `config.local.json` 文件才会生效。
**Docker部署**,修改`config.json` 文件,需要重启容器。
下面配置文件示例中包含了系统参数和各个模型配置:
```json
{
"feConfigs": {
"lafEnv": "https://laf.dev" // laf环境。 https://laf.run (杭州阿里云) ,或者私有化的laf环境。如果使用 Laf openapi 功能,需要最新版的 laf 。
},
"systemEnv": {
"vectorMaxProcess": 15, // 向量处理线程数量
"qaMaxProcess": 15, // 问答拆分线程数量
"tokenWorkers": 50, // Token 计算线程保持数,会持续占用内存,不能设置太大。
"pgHNSWEfSearch": 100 // 向量搜索参数。越大搜索越精确但是速度越慢。设置为100有99%+精度。
},
"llmModels": [
{
"provider": "OpenAI", // 模型提供商主要用于分类展示目前已经内置提供商包括https://github.com/labring/FastGPT/blob/main/packages/global/core/ai/provider.ts, 可 pr 提供新的提供商,或直接填写 Other
"model": "gpt-4o-mini", // 模型名(对应OneAPI中渠道的模型名)
"name": "gpt-4o-mini", // 模型别名
"maxContext": 125000, // 最大上下文
"maxResponse": 16000, // 最大回复
"quoteMaxToken": 120000, // 最大引用内容
"maxTemperature": 1.2, // 最大温度
"charsPointsPrice": 0, // n积分/1k token商业版
"censor": false, // 是否开启敏感校验(商业版)
"vision": true, // 是否支持图片输入
"datasetProcess": true, // 是否设置为文本理解模型QA务必保证至少有一个为true否则知识库会报错
"usedInClassify": true, // 是否用于问题分类务必保证至少有一个为true
"usedInExtractFields": true, // 是否用于内容提取务必保证至少有一个为true
"usedInToolCall": true, // 是否用于工具调用务必保证至少有一个为true
"toolChoice": true, // 是否支持工具选择(分类,内容提取,工具调用会用到。)
"functionCall": false, // 是否支持函数调用(分类,内容提取,工具调用会用到。会优先使用 toolChoice如果为false则使用 functionCall如果仍为 false则使用提示词模式
"customCQPrompt": "", // 自定义文本分类提示词(不支持工具和函数调用的模型
"customExtractPrompt": "", // 自定义内容提取提示词
"defaultSystemChatPrompt": "", // 对话默认携带的系统提示词
"defaultConfig": {}, // 请求API时挟带一些默认配置比如 GLM4 的 top_p
"fieldMap": {} // 字段映射o1 模型需要把 max_tokens 映射为 max_completion_tokens
},
{
"provider": "OpenAI",
"model": "gpt-4o",
"name": "gpt-4o",
"maxContext": 125000,
"maxResponse": 4000,
"quoteMaxToken": 120000,
"maxTemperature": 1.2,
"charsPointsPrice": 0,
"censor": false,
"vision": true,
"datasetProcess": true,
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
"toolChoice": true,
"functionCall": false,
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"defaultConfig": {},
"fieldMap": {}
},
{
"provider": "OpenAI",
"model": "o1-mini",
"name": "o1-mini",
"maxContext": 125000,
"maxResponse": 65000,
"quoteMaxToken": 120000,
"maxTemperature": 1.2,
"charsPointsPrice": 0,
"censor": false,
"vision": false,
"datasetProcess": true,
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
"toolChoice": false,
"functionCall": false,
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"defaultConfig": {
"temperature": 1,
"max_tokens": null,
"stream": false
}
},
{
"provider": "OpenAI",
"model": "o1-preview",
"name": "o1-preview",
"maxContext": 125000,
"maxResponse": 32000,
"quoteMaxToken": 120000,
"maxTemperature": 1.2,
"charsPointsPrice": 0,
"censor": false,
"vision": false,
"datasetProcess": true,
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
"toolChoice": false,
"functionCall": false,
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"defaultConfig": {
"temperature": 1,
"max_tokens": null,
"stream": false
}
}
],
"vectorModels": [
{
"provider": "OpenAI",
"model": "text-embedding-3-small",
"name": "text-embedding-3-small",
"charsPointsPrice": 0,
"defaultToken": 512,
"maxToken": 3000,
"weight": 100
},
{
"provider": "OpenAI",
"model": "text-embedding-3-large",
"name": "text-embedding-3-large",
"charsPointsPrice": 0,
"defaultToken": 512,
"maxToken": 3000,
"weight": 100,
"defaultConfig": {
"dimensions": 1024
}
},
{
"provider": "OpenAI",
"model": "text-embedding-ada-002", // 模型名与OneAPI对应
"name": "Embedding-2", // 模型展示名
"charsPointsPrice": 0, // n积分/1k token
"defaultToken": 700, // 默认文本分割时候的 token
"maxToken": 3000, // 最大 token
"weight": 100, // 优先训练权重
"defaultConfig": {}, // 自定义额外参数。例如,如果希望使用 embedding3-large 的话,可以传入 dimensions:1024来返回1024维度的向量。目前必须小于1536维度
"dbConfig": {}, // 存储时的额外参数(非对称向量模型时候需要用到)
"queryConfig": {} // 参训时的额外参数
}
],
"reRankModels": [],
"audioSpeechModels": [
{
"provider": "OpenAI",
"model": "tts-1",
"name": "OpenAI TTS1",
"charsPointsPrice": 0,
"voices": [
{ "label": "Alloy", "value": "alloy", "bufferId": "openai-Alloy" },
{ "label": "Echo", "value": "echo", "bufferId": "openai-Echo" },
{ "label": "Fable", "value": "fable", "bufferId": "openai-Fable" },
{ "label": "Onyx", "value": "onyx", "bufferId": "openai-Onyx" },
{ "label": "Nova", "value": "nova", "bufferId": "openai-Nova" },
{ "label": "Shimmer", "value": "shimmer", "bufferId": "openai-Shimmer" }
]
}
],
"whisperModel": {
"provider": "OpenAI",
"model": "whisper-1",
"name": "Whisper1",
"charsPointsPrice": 0
}
}
```

View File

@ -94,70 +94,16 @@ CHAT_API_KEY=sk-xxxxxx
![](/imgs/oneapi-demo1.png)
### 2. 修改 FastGPT 配置文件
### 2. 修改 FastGPT 模型配置
可以在 `/projects/app/src/data/config.json` 里找到配置文件(本地开发需要复制成 config.local.json,按下面内容修改配置文件,最新/更具体的配置说明,可查看[FastGPT 配置文件说明](/docs/development/configuration)
打开 FastGPT 模型配置,启动文心千帆模型,如果希望未内置,可以通过新增模型来配置
配置模型关键点在于`model` 需要与 OneAPI 渠道中的模型一致。
```json
{
"llmModels": [ // 语言模型配置
{
"model": "ERNIE-Bot", // 这里的模型需要对应 One API 的模型
"name": "文心一言", // 对外展示的名称
"avatar": "/imgs/model/openai.svg", // 模型的logo
"maxContext": 16000, // 最大上下文
"maxResponse": 4000, // 最大回复
"quoteMaxToken": 13000, // 最大引用内容
"maxTemperature": 1.2, // 最大温度
"charsPointsPrice": 0,
"censor": false,
"vision": false, // 是否支持图片输入
"datasetProcess": true, // 是否设置为知识库处理模型
"usedInClassify": true, // 是否用于问题分类
"usedInExtractFields": true, // 是否用于字段提取
"usedInToolCall": true, // 是否用于工具调用
"usedInQueryExtension": true, // 是否用于问题优化
"toolChoice": true, // 是否支持工具选择
"functionCall": false, // 是否支持函数调用
"customCQPrompt": "", // 自定义文本分类提示词(不支持工具和函数调用的模型
"customExtractPrompt": "", // 自定义内容提取提示词
"defaultSystemChatPrompt": "", // 对话默认携带的系统提示词
"defaultConfig":{} // 请求API时挟带一些默认配置比如 GLM4 的 top_p
}
],
"vectorModels": [ // 向量模型配置
{
"model": "text-embedding-ada-002",
"name": "Embedding-2",
"avatar": "/imgs/model/openai.svg",
"charsPointsPrice": 0,
"defaultToken": 700,
"maxToken": 3000,
"weight": 100
},
]
}
```
### 3. 重启 FastGPT
**Docker 版本**
```bash
docker-compose down
docker-compose up -d
```
**Sealos 版本**
直接找到 FastGPT 服务,点击重启即可。
![alt text](/imgs/image-103.png)
## 其他服务商接入参考
这章介绍一些提供商接入 OneAPI 的教程,配置后不要忘记修改 FastGPT 配置文件
这章介绍一些提供商接入 OneAPI 的教程,配置后不要忘记在 FastGPT 模型配置中启用。
### 阿里通义千问

View File

@ -27,139 +27,13 @@ OPENAI_BASE_URL=https://api.siliconflow.cn/v1
CHAT_API_KEY=sk-xxxxxx
```
## 3. 修改 FastGPT 配置文件
## 3. 修改 FastGPT 模型配置
我们选取 SiliconCloud 中的模型作为 FastGPT 配置。这里配置了 `Qwen2.5 72b` 的纯语言和视觉模型;选择 `bge-m3` 作为向量模型;选择 `bge-reranker-v2-m3` 作为重排模型。选择 `fish-speech-1.5` 作为语音模型;选择 `SenseVoiceSmall` 作为语音输入模型
系统内置了几个硅基流动的模型进行体验,如果需要其他模型,可以手动添加
注意ReRank 模型仍需配置一次 Api Key
这里启动了 `Qwen2.5 72b` 的纯语言和视觉模型;选择 `bge-m3` 作为向量模型;选择 `bge-reranker-v2-m3` 作为重排模型。选择 `fish-speech-1.5` 作为语音模型;选择 `SenseVoiceSmall` 作为语音输入模型。
```json
{
"llmModels": [
{
"provider": "Other", // 模型提供商主要用于分类展示目前已经内置提供商包括https://github.com/labring/FastGPT/blob/main/packages/global/core/ai/provider.ts, 可 pr 提供新的提供商,或直接填写 Other
"model": "Qwen/Qwen2.5-72B-Instruct", // 模型名(对应OneAPI中渠道的模型名)
"name": "Qwen2.5-72B-Instruct", // 模型别名
"maxContext": 32000, // 最大上下文
"maxResponse": 4000, // 最大回复
"quoteMaxToken": 30000, // 最大引用内容
"maxTemperature": 1, // 最大温度
"charsPointsPrice": 0, // n积分/1k token商业版
"censor": false, // 是否开启敏感校验(商业版)
"vision": false, // 是否支持图片输入
"datasetProcess": true, // 是否设置为文本理解模型QA务必保证至少有一个为true否则知识库会报错
"usedInClassify": true, // 是否用于问题分类务必保证至少有一个为true
"usedInExtractFields": true, // 是否用于内容提取务必保证至少有一个为true
"usedInToolCall": true, // 是否用于工具调用务必保证至少有一个为true
"usedInQueryExtension": true, // 是否用于问题优化务必保证至少有一个为true
"toolChoice": true, // 是否支持工具选择(分类,内容提取,工具调用会用到。)
"functionCall": false, // 是否支持函数调用(分类,内容提取,工具调用会用到。会优先使用 toolChoice如果为false则使用 functionCall如果仍为 false则使用提示词模式
"customCQPrompt": "", // 自定义文本分类提示词(不支持工具和函数调用的模型
"customExtractPrompt": "", // 自定义内容提取提示词
"defaultSystemChatPrompt": "", // 对话默认携带的系统提示词
"defaultConfig": {}, // 请求API时挟带一些默认配置比如 GLM4 的 top_p
"fieldMap": {} // 字段映射o1 模型需要把 max_tokens 映射为 max_completion_tokens
},
{
"provider": "Other",
"model": "Qwen/Qwen2-VL-72B-Instruct",
"name": "Qwen2-VL-72B-Instruct",
"maxContext": 32000,
"maxResponse": 4000,
"quoteMaxToken": 30000,
"maxTemperature": 1,
"charsPointsPrice": 0,
"censor": false,
"vision": true,
"datasetProcess": false,
"usedInClassify": false,
"usedInExtractFields": false,
"usedInToolCall": false,
"usedInQueryExtension": false,
"toolChoice": false,
"functionCall": false,
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"defaultConfig": {}
}
],
"vectorModels": [
{
"provider": "Other",
"model": "Pro/BAAI/bge-m3",
"name": "Pro/BAAI/bge-m3",
"charsPointsPrice": 0,
"defaultToken": 512,
"maxToken": 5000,
"weight": 100
}
],
"reRankModels": [
{
"model": "BAAI/bge-reranker-v2-m3", // 这里的model需要对应 siliconflow 的模型名
"name": "BAAI/bge-reranker-v2-m3",
"requestUrl": "https://api.siliconflow.cn/v1/rerank",
"requestAuth": "siliconflow 上申请的 key"
}
],
"audioSpeechModels": [
{
"model": "fishaudio/fish-speech-1.5",
"name": "fish-speech-1.5",
"voices": [
{
"label": "fish-alex",
"value": "fishaudio/fish-speech-1.5:alex",
"bufferId": "fish-alex"
},
{
"label": "fish-anna",
"value": "fishaudio/fish-speech-1.5:anna",
"bufferId": "fish-anna"
},
{
"label": "fish-bella",
"value": "fishaudio/fish-speech-1.5:bella",
"bufferId": "fish-bella"
},
{
"label": "fish-benjamin",
"value": "fishaudio/fish-speech-1.5:benjamin",
"bufferId": "fish-benjamin"
},
{
"label": "fish-charles",
"value": "fishaudio/fish-speech-1.5:charles",
"bufferId": "fish-charles"
},
{
"label": "fish-claire",
"value": "fishaudio/fish-speech-1.5:claire",
"bufferId": "fish-claire"
},
{
"label": "fish-david",
"value": "fishaudio/fish-speech-1.5:david",
"bufferId": "fish-david"
},
{
"label": "fish-diana",
"value": "fishaudio/fish-speech-1.5:diana",
"bufferId": "fish-diana"
}
]
}
],
"whisperModel": {
"model": "FunAudioLLM/SenseVoiceSmall",
"name": "SenseVoiceSmall",
"charsPointsPrice": 0
}
}
```
## 4. 重启 FastGPT
![alt text](/imgs/image-104.png)
## 5. 体验测试

View File

@ -34,7 +34,7 @@ FastGPT 使用了 one-api 项目来管理模型池,其可以兼容 OpenAI 、A
<a href="https://bja.sealos.run/?openapp=system-template%3FtemplateName%3Dfastgpt" rel="external" target="_blank"><img src="https://raw.githubusercontent.com/labring-actions/templates/main/Deploy-on-Sealos.svg" alt="Deploy on Sealos"/></a>
### 开始部署
### 1. 开始部署
由于需要部署数据库,部署完后需要等待 2~4 分钟才能正常访问。默认用了最低配置,首次访问时会有些慢。
@ -52,27 +52,15 @@ FastGPT 使用了 one-api 项目来管理模型池,其可以兼容 OpenAI 、A
![](/imgs/sealos2.png)
### 登录
### 2. 登录
用户名:`root`
密码是刚刚一键部署时设置的`root_password`
### 修改配置文件和环境变量
### 3. 配置模型
在 Sealos 中,你可以打开`应用管理`App Launchpad看到部署的 FastGPT可以打开`数据库`Database看到对应的数据库。
`应用管理`中,选中 FastGPT点击变更可以看到对应的环境变量和配置文件。
![](/imgs/fastgptonsealos1.png)
{{% alert icon="🤖 " context="success" %}}
在 Sealos 上FastGPT 一共运行了 1 个服务和 2 个数据库,如暂停和删除请注意数据库一同操作。(你可以白天启动,晚上暂停它们,省钱大法)
{{% /alert %}}
### 更新
点击变更或重启会自动拉取镜像更新,请确保镜像`tag`正确。建议不要使用`latest`,改成固定版本号。
[点击查看模型配置教程](/docs/development/modelConfig/intro/)
## 收费
@ -88,7 +76,20 @@ FastGPT 商业版共包含了2个应用fastgpt, fastgpt-plus和2个数据
点击右侧的详情,可以查看对应应用的详细信息。
### 修改配置文件和环境变量
在 Sealos 中,你可以打开`应用管理`App Launchpad看到部署的 FastGPT可以打开`数据库`Database看到对应的数据库。
`应用管理`中,选中 FastGPT点击变更可以看到对应的环境变量和配置文件。
![](/imgs/fastgptonsealos1.png)
{{% alert icon="🤖 " context="success" %}}
在 Sealos 上FastGPT 一共运行了 1 个服务和 2 个数据库,如暂停和删除请注意数据库一同操作。(你可以白天启动,晚上暂停它们,省钱大法)
{{% /alert %}}
### 如何更新/升级 FastGPT
[升级脚本文档](https://doc.tryfastgpt.ai/docs/development/upgrading/)先看下文档,看下需要升级哪个版本。注意,不要跨版本升级!!!!!
例如目前是4.5 版本要升级到4.5.1就先把镜像版本改成v4.5.1,执行一下升级脚本,等待完成后再继续升级。如果目标版本不需要执行初始化,则可以跳过。
@ -148,8 +149,6 @@ SYSTEM_FAVICON 可以是一个网络地址
![](/imgs/onsealos8.png)
### 管理后台(已合并到plus)
### 商业版镜像配置文件
```

View File

@ -31,7 +31,6 @@ weight: 813
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
"usedInQueryExtension": true,
"toolChoice": false,
"functionCall": false,
"customCQPrompt": "",
@ -56,7 +55,6 @@ weight: 813
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
"usedInQueryExtension": true,
"toolChoice": false,
"functionCall": false,
"customCQPrompt": "",

View File

@ -0,0 +1,50 @@
---
title: 'V4.8.20(进行中)'
description: 'FastGPT V4.8.20 更新说明'
icon: 'upgrade'
draft: false
toc: true
weight: 804
---
## 更新指南
### 1. 做好数据库备份
### 2. 更新环境变量
如果有很早版本用户,配置了`ONEAPI_URL`的,需要统一改成`OPENAI_BASE_URL`
### 3. 更新镜像:
- 更新 fastgpt 镜像 tag: v4.8.20
- 更新 fastgpt-pro 商业版镜像 tag: v4.8.20
- Sandbox 镜像无需更新
### 4. 运行升级脚本
从任意终端,发起 1 个 HTTP 请求。其中 {{rootkey}} 替换成环境变量里的 `rootkey`{{host}} 替换成**FastGPT 域名**。
```bash
curl --location --request POST 'https://{{host}}/api/admin/initv4820' \
--header 'rootkey: {{rootkey}}' \
--header 'Content-Type: application/json'
```
脚本会自动把原配置文件的模型加载到新版模型配置中。
## 完整更新内容
1. 新增 - 可视化模型参数配置。预设超过 100 个模型配置。同时支持所有类型模型的一键测试。(预计下个版本会完全支持在页面上配置渠道)。
2. 新增 - DeepSeek resoner 模型支持输出思考过程。
3. 新增 - 使用记录导出和仪表盘。
4. 新增 - markdown 语法扩展,支持音视频(代码块 audio 和 video
5. 新增 - 调整 max_tokens 计算逻辑。优先保证 max_tokens 为配置值,如超出最大上下文,则减少历史记录。例如:如果申请 8000 的 max_tokens则上下文长度会减少 8000。
6. 优化 - 问题优化增加上下文过滤,避免超出上下文。
7. 优化 - 页面组件抽离,减少页面组件路由。
8. 优化 - 全文检索,忽略大小写。
9. 优化 - 问答生成和增强索引改成流输出,避免部分模型超时。
10. 优化 - 自动给 assistant 空 content补充 null同时合并连续的 text assistant避免部分模型抛错。
11. 优化 - 调整图片 Host 取消上传时补充 FE_DOMAIN改成发送对话前补充避免替换域名后原图片无法正常使用。
12. 修复 - 部分场景成员列表无法触底加载。
13. 修复 - 工作流递归执行,部分条件下无法正常运行。

View File

@ -114,15 +114,15 @@ services:
# fastgpt
sandbox:
container_name: sandbox
image: ghcr.io/labring/fastgpt-sandbox:v4.8.17 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.8.17 # 阿里云
image: ghcr.io/labring/fastgpt-sandbox:v4.8.20 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.8.20 # 阿里云
networks:
- fastgpt
restart: always
fastgpt:
container_name: fastgpt
image: ghcr.io/labring/fastgpt:v4.8.17 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.17 # 阿里云
image: ghcr.io/labring/fastgpt:v4.8.20 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.20 # 阿里云
ports:
- 3000:3000
networks:

View File

@ -72,15 +72,15 @@ services:
# fastgpt
sandbox:
container_name: sandbox
image: ghcr.io/labring/fastgpt-sandbox:v4.8.17 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.8.17 # 阿里云
image: ghcr.io/labring/fastgpt-sandbox:v4.8.20 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.8.20 # 阿里云
networks:
- fastgpt
restart: always
fastgpt:
container_name: fastgpt
image: ghcr.io/labring/fastgpt:v4.8.17 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.17 # 阿里云
image: ghcr.io/labring/fastgpt:v4.8.20 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.20 # 阿里云
ports:
- 3000:3000
networks:

View File

@ -53,15 +53,15 @@ services:
wait $$!
sandbox:
container_name: sandbox
image: ghcr.io/labring/fastgpt-sandbox:v4.8.17 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.8.17 # 阿里云
image: ghcr.io/labring/fastgpt-sandbox:v4.8.20 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.8.20 # 阿里云
networks:
- fastgpt
restart: always
fastgpt:
container_name: fastgpt
image: ghcr.io/labring/fastgpt:v4.8.17 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.17 # 阿里云
image: ghcr.io/labring/fastgpt:v4.8.20 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.20 # 阿里云
ports:
- 3000:3000
networks:

View File

@ -23,7 +23,6 @@ data:
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
"usedInQueryExtension": true,
"toolChoice": true,
"functionCall": false,
"customCQPrompt": "",
@ -45,7 +44,6 @@ data:
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
"usedInQueryExtension": true,
"toolChoice": true,
"functionCall": false,
"customCQPrompt": "",
@ -67,7 +65,6 @@ data:
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
"usedInQueryExtension": true,
"toolChoice": true,
"functionCall": false,
"customCQPrompt": "",
@ -89,7 +86,6 @@ data:
"usedInClassify": false,
"usedInExtractFields": false,
"usedInToolCall": false,
"usedInQueryExtension": false,
"toolChoice": true,
"functionCall": false,
"customCQPrompt": "",

View File

@ -2,12 +2,22 @@ import { ErrType } from '../errorCode';
import { i18nT } from '../../../../web/i18n/utils';
/* team: 503000 */
export enum UserErrEnum {
notUser = 'notUser',
userExist = 'userExist',
unAuthRole = 'unAuthRole',
account_psw_error = 'account_psw_error',
balanceNotEnough = 'balanceNotEnough',
unAuthSso = 'unAuthSso'
}
const errList = [
{
statusText: UserErrEnum.notUser,
message: i18nT('common:code_error.account_not_found')
},
{
statusText: UserErrEnum.userExist,
message: i18nT('common:code_error.account_exist')
},
{
statusText: UserErrEnum.account_psw_error,
message: i18nT('common:code_error.account_error')

View File

@ -3,7 +3,7 @@ import type {
ChatModelItemType,
FunctionModelItemType,
LLMModelItemType,
VectorModelItemType,
EmbeddingModelItemType,
AudioSpeechModels,
STTModelType,
ReRankModelItemType
@ -31,11 +31,13 @@ export type FastGPTConfigFileType = {
feConfigs: FastGPTFeConfigsType;
systemEnv: SystemEnvType;
subPlans?: SubPlanType;
llmModels: ChatModelItemType[];
vectorModels: VectorModelItemType[];
reRankModels: ReRankModelItemType[];
audioSpeechModels: AudioSpeechModelType[];
whisperModel: STTModelType;
// Abandon
llmModels?: ChatModelItemType[];
vectorModels?: EmbeddingModelItemType[];
reRankModels?: ReRankModelItemType[];
audioSpeechModels?: TTSModelType[];
whisperModel?: STTModelType;
};
export type FastGPTFeConfigsType = {

View File

@ -15,15 +15,13 @@ export enum LLMModelTypeEnum {
all = 'all',
classify = 'classify',
extractFields = 'extractFields',
toolCall = 'toolCall',
queryExtension = 'queryExtension'
toolCall = 'toolCall'
}
export const llmModelTypeFilterMap = {
[LLMModelTypeEnum.all]: 'model',
[LLMModelTypeEnum.classify]: 'usedInClassify',
[LLMModelTypeEnum.extractFields]: 'usedInExtractFields',
[LLMModelTypeEnum.toolCall]: 'usedInToolCall',
[LLMModelTypeEnum.queryExtension]: 'usedInQueryExtension'
[LLMModelTypeEnum.toolCall]: 'usedInToolCall'
};
export enum EmbeddingTypeEnm {

View File

@ -1,3 +1,4 @@
import { ModelTypeEnum } from './model';
import type { ModelProviderIdType } from './provider';
type PriceType = {
@ -7,68 +8,74 @@ type PriceType = {
inputPrice?: number; // 1k tokens=n points
outputPrice?: number; // 1k tokens=n points
};
export type LLMModelItemType = PriceType & {
type BaseModelItemType = {
provider: ModelProviderIdType;
model: string;
name: string;
avatar?: string; // model icon, from provider
maxContext: number;
maxResponse: number;
quoteMaxToken: number;
maxTemperature: number;
censor?: boolean;
vision?: boolean;
isActive?: boolean;
isCustom?: boolean;
isDefault?: boolean;
// diff function model
datasetProcess?: boolean; // dataset
usedInClassify?: boolean; // classify
usedInExtractFields?: boolean; // extract fields
usedInToolCall?: boolean; // tool call
usedInQueryExtension?: boolean; // query extension
functionCall: boolean;
toolChoice: boolean;
customCQPrompt: string;
customExtractPrompt: string;
defaultSystemChatPrompt?: string;
defaultConfig?: Record<string, any>;
fieldMap?: Record<string, string>;
// If has requestUrl, it will request the model directly
requestUrl?: string;
requestAuth?: string;
};
export type VectorModelItemType = PriceType & {
provider: ModelProviderIdType;
model: string; // model name
name: string; // show name
avatar?: string;
defaultToken: number; // split text default token
maxToken: number; // model max token
weight: number; // training weight
hidden?: boolean; // Disallow creation
defaultConfig?: Record<string, any>; // post request config
dbConfig?: Record<string, any>; // Custom parameters for storage
queryConfig?: Record<string, any>; // Custom parameters for query
};
export type LLMModelItemType = PriceType &
BaseModelItemType & {
type: ModelTypeEnum.llm;
maxContext: number;
maxResponse: number;
quoteMaxToken: number;
maxTemperature?: number;
export type ReRankModelItemType = PriceType & {
provider: ModelProviderIdType;
model: string;
name: string;
requestUrl: string;
requestAuth: string;
};
censor?: boolean;
vision?: boolean;
reasoning?: boolean;
export type AudioSpeechModelType = PriceType & {
provider: ModelProviderIdType;
model: string;
name: string;
voices: { label: string; value: string; bufferId: string }[];
};
// diff function model
datasetProcess?: boolean; // dataset
usedInClassify?: boolean; // classify
usedInExtractFields?: boolean; // extract fields
usedInToolCall?: boolean; // tool call
export type STTModelType = PriceType & {
provider: ModelProviderIdType;
model: string;
name: string;
};
functionCall: boolean;
toolChoice: boolean;
customCQPrompt: string;
customExtractPrompt: string;
defaultSystemChatPrompt?: string;
defaultConfig?: Record<string, any>;
fieldMap?: Record<string, string>;
};
export type EmbeddingModelItemType = PriceType &
BaseModelItemType & {
type: ModelTypeEnum.embedding;
defaultToken: number; // split text default token
maxToken: number; // model max token
weight: number; // training weight
hidden?: boolean; // Disallow creation
defaultConfig?: Record<string, any>; // post request config
dbConfig?: Record<string, any>; // Custom parameters for storage
queryConfig?: Record<string, any>; // Custom parameters for query
};
export type ReRankModelItemType = PriceType &
BaseModelItemType & {
type: ModelTypeEnum.rerank;
};
export type TTSModelType = PriceType &
BaseModelItemType & {
type: ModelTypeEnum.tts;
voices: { label: string; value: string }[];
};
export type STTModelType = PriceType &
BaseModelItemType & {
type: ModelTypeEnum.stt;
};

View File

@ -1,9 +1,18 @@
import { i18nT } from '../../../web/i18n/utils';
import type { LLMModelItemType, STTModelType, VectorModelItemType } from './model.d';
import type { LLMModelItemType, STTModelType, EmbeddingModelItemType } from './model.d';
import { getModelProvider, ModelProviderIdType } from './provider';
export enum ModelTypeEnum {
llm = 'llm',
embedding = 'embedding',
tts = 'tts',
stt = 'stt',
rerank = 'rerank'
}
export const defaultQAModels: LLMModelItemType[] = [
{
type: ModelTypeEnum.llm,
provider: 'OpenAI',
model: 'gpt-4o-mini',
name: 'gpt-4o-mini',
@ -24,8 +33,9 @@ export const defaultQAModels: LLMModelItemType[] = [
}
];
export const defaultVectorModels: VectorModelItemType[] = [
export const defaultVectorModels: EmbeddingModelItemType[] = [
{
type: ModelTypeEnum.embedding,
provider: 'OpenAI',
model: 'text-embedding-3-small',
name: 'Embedding-2',
@ -36,12 +46,15 @@ export const defaultVectorModels: VectorModelItemType[] = [
}
];
export const defaultWhisperModel: STTModelType = {
provider: 'OpenAI',
model: 'whisper-1',
name: 'whisper-1',
charsPointsPrice: 0
};
export const defaultSTTModels: STTModelType[] = [
{
type: ModelTypeEnum.stt,
provider: 'OpenAI',
model: 'whisper-1',
name: 'whisper-1',
charsPointsPrice: 0
}
];
export const getModelFromList = (
modelList: { provider: ModelProviderIdType; name: string; model: string }[],
@ -55,15 +68,10 @@ export const getModelFromList = (
};
};
export enum ModelTypeEnum {
chat = 'chat',
embedding = 'embedding',
tts = 'tts',
stt = 'stt'
}
export const modelTypeList = [
{ label: i18nT('common:model.type.chat'), value: ModelTypeEnum.chat },
{ label: i18nT('common:model.type.chat'), value: ModelTypeEnum.llm },
{ label: i18nT('common:model.type.embedding'), value: ModelTypeEnum.embedding },
{ label: i18nT('common:model.type.tts'), value: ModelTypeEnum.tts },
{ label: i18nT('common:model.type.stt'), value: ModelTypeEnum.stt }
{ label: i18nT('common:model.type.stt'), value: ModelTypeEnum.stt },
{ label: i18nT('common:model.type.reRank'), value: ModelTypeEnum.rerank }
];

View File

@ -7,11 +7,12 @@ export type ModelProviderIdType =
| 'Meta'
| 'MistralAI'
| 'Groq'
| 'Grok'
| 'AliCloud'
| 'Qwen'
| 'Doubao'
| 'ChatGLM'
| 'DeepSeek'
| 'ChatGLM'
| 'Ernie'
| 'Moonshot'
| 'MiniMax'
@ -20,6 +21,7 @@ export type ModelProviderIdType =
| 'Baichuan'
| 'StepFun'
| 'Yi'
| 'Siliconflow'
| 'Ollama'
| 'BAAI'
| 'FishAudio'
@ -29,7 +31,7 @@ export type ModelProviderIdType =
export type ModelProviderType = {
id: ModelProviderIdType;
name: string;
name: any;
avatar: string;
};
@ -59,6 +61,11 @@ export const ModelProviderList: ModelProviderType[] = [
name: 'MistralAI',
avatar: 'model/mistral'
},
{
id: 'Grok',
name: 'Grok',
avatar: 'model/grok'
},
{
id: 'Groq',
name: 'Groq',
@ -155,6 +162,11 @@ export const ModelProviderList: ModelProviderType[] = [
name: i18nT('common:model_moka'),
avatar: 'model/moka'
},
{
id: 'Siliconflow',
name: i18nT('common:model_siliconflow'),
avatar: 'model/siliconflow'
},
{
id: 'Other',
name: i18nT('common:model_other'),
@ -165,6 +177,7 @@ export const ModelProviderMap = Object.fromEntries(
ModelProviderList.map((item, index) => [item.id, { ...item, order: index }])
);
export const getModelProvider = (provider: ModelProviderIdType) => {
export const getModelProvider = (provider?: ModelProviderIdType) => {
if (!provider) return ModelProviderMap.Other;
return ModelProviderMap[provider] ?? ModelProviderMap.Other;
};

View File

@ -80,6 +80,7 @@ export type AppSimpleEditFormType = {
maxToken?: number;
isResponseAnswerText: boolean;
maxHistories: number;
[NodeInputKeyEnum.aiChatReasoning]?: boolean;
};
dataset: {
datasets: SelectedDatasetType;
@ -117,6 +118,7 @@ export type SettingAIDataType = {
isResponseAnswerText?: boolean;
maxHistories?: number;
[NodeInputKeyEnum.aiChatVision]?: boolean; // Is open vision mode
[NodeInputKeyEnum.aiChatReasoning]?: boolean; // Is open reasoning mode
};
// variable

View File

@ -16,7 +16,8 @@ export const getDefaultAppForm = (): AppSimpleEditFormType => {
temperature: 0,
isResponseAnswerText: true,
maxHistories: 6,
maxToken: 4000
maxToken: 4000,
aiChatReasoning: true
},
dataset: {
datasets: [],

View File

@ -25,7 +25,8 @@ export enum ChatItemValueTypeEnum {
text = 'text',
file = 'file',
tool = 'tool',
interactive = 'interactive'
interactive = 'interactive',
reasoning = 'reasoning'
}
export enum ChatSourceEnum {
@ -75,5 +76,3 @@ export enum ChatStatusEnum {
running = 'running',
finish = 'finish'
}
export const MARKDOWN_QUOTE_SIGN = 'QUOTE SIGN';

View File

@ -70,14 +70,23 @@ export type SystemChatItemType = {
obj: ChatRoleEnum.System;
value: SystemChatItemValueItemType[];
};
export type AIChatItemValueItemType = {
type: ChatItemValueTypeEnum.text | ChatItemValueTypeEnum.tool | ChatItemValueTypeEnum.interactive;
type:
| ChatItemValueTypeEnum.text
| ChatItemValueTypeEnum.reasoning
| ChatItemValueTypeEnum.tool
| ChatItemValueTypeEnum.interactive;
text?: {
content: string;
};
reasoning?: {
content: string;
};
tools?: ToolModuleResponseItemType[];
interactive?: WorkflowInteractiveResponseType;
};
export type AIChatItemType = {
obj: ChatRoleEnum.AI;
value: AIChatItemValueItemType[];

View File

@ -1,4 +1,4 @@
import type { LLMModelItemType, VectorModelItemType } from '../../core/ai/model.d';
import type { LLMModelItemType, EmbeddingModelItemType } from '../../core/ai/model.d';
import { PermissionTypeEnum } from '../../support/permission/constant';
import { PushDatasetDataChunkProps } from './api';
import {
@ -152,7 +152,7 @@ export type DatasetSimpleItemType = {
_id: string;
avatar: string;
name: string;
vectorModel: VectorModelItemType;
vectorModel: EmbeddingModelItemType;
};
export type DatasetListItemType = {
_id: string;
@ -163,14 +163,14 @@ export type DatasetListItemType = {
intro: string;
type: `${DatasetTypeEnum}`;
permission: DatasetPermission;
vectorModel: VectorModelItemType;
vectorModel: EmbeddingModelItemType;
inheritPermission: boolean;
private?: boolean;
sourceMember?: SourceMemberType;
};
export type DatasetItemType = Omit<DatasetSchemaType, 'vectorModel' | 'agentModel'> & {
vectorModel: VectorModelItemType;
vectorModel: EmbeddingModelItemType;
agentModel: LLMModelItemType;
permission: DatasetPermission;
};

View File

@ -1,4 +1,4 @@
import { VectorModelItemType } from '../ai/model.d';
import { EmbeddingModelItemType } from '../ai/model.d';
import { NodeInputKeyEnum } from './constants';
export type SelectedDatasetType = { datasetId: string }[];

View File

@ -141,6 +141,7 @@ export enum NodeInputKeyEnum {
aiChatDatasetQuote = 'quoteQA',
aiChatVision = 'aiChatVision',
stringQuoteText = 'stringQuoteText',
aiChatReasoning = 'aiChatReasoning',
// dataset
datasetSelectList = 'datasets',
@ -220,7 +221,8 @@ export enum NodeOutputKeyEnum {
// common
userChatInput = 'userChatInput',
history = 'history',
answerText = 'answerText', // module answer. the value will be show and save to history
answerText = 'answerText', // node answer. the value will be show and save to history
reasoningText = 'reasoningText', // node reasoning. the value will be show but not save to history
success = 'success',
failed = 'failed',
error = 'error',

View File

@ -220,6 +220,7 @@ export type AIChatNodeProps = {
[NodeInputKeyEnum.aiChatMaxToken]?: number;
[NodeInputKeyEnum.aiChatIsResponseText]: boolean;
[NodeInputKeyEnum.aiChatVision]?: boolean;
[NodeInputKeyEnum.aiChatReasoning]?: boolean;
[NodeInputKeyEnum.aiChatQuoteRole]?: AiChatQuoteRoleType;
[NodeInputKeyEnum.aiChatQuoteTemplate]?: string;

View File

@ -176,6 +176,7 @@ export const checkNodeRunStatus = ({
}
visited.add(edge.source);
// 递归检测后面的 edge如果有其中一个成环则返回 true
const nextEdges = allEdges.filter((item) => item.target === edge.source);
return nextEdges.some((nextEdge) => checkIsCircular(nextEdge, new Set(visited)));
};
@ -207,7 +208,23 @@ export const checkNodeRunStatus = ({
currentNode: node
});
// check skip其中一组边全 skip
// check active其中一组边至少有一个 active且没有 waiting 即可运行)
if (
commonEdges.length > 0 &&
commonEdges.some((item) => item.status === 'active') &&
commonEdges.every((item) => item.status !== 'waiting')
) {
return 'run';
}
if (
recursiveEdges.length > 0 &&
recursiveEdges.some((item) => item.status === 'active') &&
recursiveEdges.every((item) => item.status !== 'waiting')
) {
return 'run';
}
// check skip其中一组边全是 skiped 则跳过运行)
if (commonEdges.length > 0 && commonEdges.every((item) => item.status === 'skipped')) {
return 'skip';
}
@ -215,14 +232,6 @@ export const checkNodeRunStatus = ({
return 'skip';
}
// check active有一类边不全是 wait 即可运行)
if (commonEdges.length > 0 && commonEdges.every((item) => item.status !== 'waiting')) {
return 'run';
}
if (recursiveEdges.length > 0 && recursiveEdges.every((item) => item.status !== 'waiting')) {
return 'run';
}
return 'wait';
};
@ -355,12 +364,14 @@ export function replaceEditorVariable({
export const textAdaptGptResponse = ({
text,
reasoning_content,
model = '',
finish_reason = null,
extraData = {}
}: {
model?: string;
text: string | null;
text?: string | null;
reasoning_content?: string | null;
finish_reason?: null | 'stop';
extraData?: Object;
}) => {
@ -372,10 +383,11 @@ export const textAdaptGptResponse = ({
model,
choices: [
{
delta:
text === null
? {}
: { role: ChatCompletionRequestMessageRoleEnum.Assistant, content: text },
delta: {
role: ChatCompletionRequestMessageRoleEnum.Assistant,
content: text,
...(reasoning_content && { reasoning_content })
},
index: 0,
finish_reason
}

View File

@ -63,14 +63,14 @@ export const AiChatModule: FlowNodeTemplateType = {
key: NodeInputKeyEnum.aiChatTemperature,
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
label: '',
value: 0,
value: undefined,
valueType: WorkflowIOValueTypeEnum.number
},
{
key: NodeInputKeyEnum.aiChatMaxToken,
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
label: '',
value: 2000,
value: undefined,
valueType: WorkflowIOValueTypeEnum.number
},
@ -91,6 +91,13 @@ export const AiChatModule: FlowNodeTemplateType = {
valueType: WorkflowIOValueTypeEnum.boolean,
value: true
},
{
key: NodeInputKeyEnum.aiChatReasoning,
renderTypeList: [FlowNodeInputTypeEnum.hidden],
label: '',
valueType: WorkflowIOValueTypeEnum.boolean,
value: true
},
// settings modal ---
{
...Input_Template_System_Prompt,

View File

@ -31,10 +31,7 @@ export const AiQueryExtension: FlowNodeTemplateType = {
showStatus: true,
version: '481',
inputs: [
{
...Input_Template_SelectAIModel,
llmModelType: LLMModelTypeEnum.queryExtension
},
Input_Template_SelectAIModel,
{
key: NodeInputKeyEnum.aiSystemPrompt,
renderTypeList: [FlowNodeInputTypeEnum.textarea, FlowNodeInputTypeEnum.reference],

View File

@ -43,14 +43,14 @@ export const ToolModule: FlowNodeTemplateType = {
key: NodeInputKeyEnum.aiChatTemperature,
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
label: '',
value: 0,
value: undefined,
valueType: WorkflowIOValueTypeEnum.number
},
{
key: NodeInputKeyEnum.aiChatMaxToken,
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
label: '',
value: 2000,
value: undefined,
valueType: WorkflowIOValueTypeEnum.number
},
{

View File

@ -6,6 +6,22 @@ export type CreateTrainingUsageProps = {
datasetId: string;
};
export type GetUsageProps = {
dateStart: Date;
dateEnd: Date;
sources?: UsageSourceEnum[];
teamMemberIds?: string[];
projectName?: string;
};
export type GetUsageDashboardProps = GetUsageProps & {
unit: 'day' | 'month';
};
export type GetUsageDashboardResponseItem = {
date: Date;
totalPoints: number;
};
export type ConcatUsageProps = UsageListItemCountType & {
teamId: string;
tmbId: string;

View File

@ -18,30 +18,30 @@ export const UsageSourceMap = {
label: i18nT('common:core.chat.logs.online')
},
[UsageSourceEnum.api]: {
label: 'Api'
label: 'API'
},
[UsageSourceEnum.shareLink]: {
label: i18nT('common:core.chat.logs.free_login')
},
[UsageSourceEnum.training]: {
label: 'dataset.Training Name'
label: i18nT('common:dataset.Training Name')
},
[UsageSourceEnum.cronJob]: {
label: i18nT('common:cron_job_run_app')
},
[UsageSourceEnum.feishu]: {
label: i18nT('user:usage.feishu')
label: i18nT('account_usage:feishu')
},
[UsageSourceEnum.official_account]: {
label: i18nT('user:usage.official_account')
label: i18nT('account_usage:official_account')
},
[UsageSourceEnum.share]: {
label: i18nT('user:usage.share')
label: i18nT('account_usage:share')
},
[UsageSourceEnum.wecom]: {
label: i18nT('user:usage.wecom')
label: i18nT('account_usage:wecom')
},
[UsageSourceEnum.dingtalk]: {
label: i18nT('user:usage.dingtalk')
label: i18nT('account_usage:dingtalk')
}
};

View File

@ -1,3 +1,4 @@
import { SourceMemberType } from '../../../support/user/type';
import { CreateUsageProps } from './api';
import { UsageSourceEnum } from './constants';
@ -10,6 +11,7 @@ export type UsageListItemCountType = {
// deprecated
tokens?: number;
};
export type UsageListItemType = UsageListItemCountType & {
moduleName: string;
amount: number;
@ -28,4 +30,5 @@ export type UsageItemType = {
source: UsageSchemaType['source'];
totalPoints: number;
list: UsageSchemaType['list'];
sourceMember: SourceMemberType;
};

View File

@ -40,7 +40,7 @@ export async function uploadMongoImg({
expiredTime: forever ? undefined : addHours(new Date(), 1)
});
return `${process.env.FE_DOMAIN || ''}${process.env.NEXT_PUBLIC_BASE_URL || ''}${imageBaseUrl}${String(_id)}.${extension}`;
return `${process.env.NEXT_PUBLIC_BASE_URL || ''}${imageBaseUrl}${String(_id)}.${extension}`;
}
const getIdFromPath = (path?: string) => {

View File

@ -1,6 +1,5 @@
import { ApiRequestProps } from '../../type/next';
import requestIp from 'request-ip';
import { ERROR_ENUM } from '@fastgpt/global/common/error/errorCode';
import { authFrequencyLimit } from '../system/frequencyLimit/utils';
import { addSeconds } from 'date-fns';
import { NextApiResponse } from 'next';
@ -9,7 +8,17 @@ import { jsonRes } from '../response';
// unit: times/s
// how to use?
// export default NextAPI(useQPSLimit(10), handler); // limit 10 times per second for a ip
export function useReqFrequencyLimit(seconds: number, limit: number, force = false) {
export function useIPFrequencyLimit({
id,
seconds,
limit,
force = false
}: {
id: string;
seconds: number;
limit: number;
force?: boolean;
}) {
return async (req: ApiRequestProps, res: NextApiResponse) => {
const ip = requestIp.getClientIp(req);
if (!ip || (process.env.USE_IP_LIMIT !== 'true' && !force)) {
@ -17,14 +26,14 @@ export function useReqFrequencyLimit(seconds: number, limit: number, force = fal
}
try {
await authFrequencyLimit({
eventId: 'ip-qps-limit' + ip,
eventId: `ip-qps-limit-${id}-` + ip,
maxAmount: limit,
expiredTime: addSeconds(new Date(), seconds)
});
} catch (_) {
jsonRes(res, {
code: 429,
error: ERROR_ENUM.tooManyRequest
error: `Too many request, request ${limit} times every ${seconds} seconds`
});
}
};

View File

@ -33,7 +33,15 @@ export const jsonRes = <T = any>(
addLog.error(`Api response error: ${url}`, ERROR_RESPONSE[errResponseKey]);
return res.status(code).json(ERROR_RESPONSE[errResponseKey]);
res.status(code);
if (message) {
res.send(message);
} else {
res.json(ERROR_RESPONSE[errResponseKey]);
}
return;
}
// another error

View File

@ -6,8 +6,7 @@ import { FastGPTProUrl } from '../constants';
export const getFastGPTConfigFromDB = async () => {
if (!FastGPTProUrl) {
return {
config: {} as FastGPTConfigFileType,
configId: undefined
config: {} as FastGPTConfigFileType
};
}
@ -18,9 +17,35 @@ export const getFastGPTConfigFromDB = async () => {
});
const config = res?.value || {};
// 利用配置文件的创建时间(更新时间)来做缓存,如果前端命中缓存,则不需要再返回配置文件
global.systemInitBufferId = res ? res.createTime.getTime().toString() : undefined;
return {
configId: res ? String(res._id) : undefined,
config: config as FastGPTConfigFileType
};
};
export const updateFastGPTConfigBuffer = async () => {
const res = await MongoSystemConfigs.findOne({
type: SystemConfigsTypeEnum.fastgpt
}).sort({
createTime: -1
});
if (!res) return;
res.createTime = new Date();
await res.save();
global.systemInitBufferId = res.createTime.getTime().toString();
};
export const reloadFastGPTConfigBuffer = async () => {
const res = await MongoSystemConfigs.findOne({
type: SystemConfigsTypeEnum.fastgpt
}).sort({
createTime: -1
});
if (!res) return;
global.systemInitBufferId = res.createTime.getTime().toString();
};

View File

@ -13,15 +13,6 @@ export const initFastGPTConfig = (config?: FastGPTConfigFileType) => {
global.feConfigs = config.feConfigs;
global.systemEnv = config.systemEnv;
global.subPlans = config.subPlans;
global.llmModels = config.llmModels;
global.llmModelPriceType = global.llmModels.some((item) => typeof item.inputPrice === 'number')
? 'IO'
: 'Tokens';
global.vectorModels = config.vectorModels;
global.audioSpeechModels = config.audioSpeechModels;
global.whisperModel = config.whisperModel;
global.reRankModels = config.reRankModels;
};
export const systemStartCb = () => {

View File

@ -2,7 +2,7 @@
import { PgVectorCtrl } from './pg/class';
import { getVectorsByText } from '../../core/ai/embedding';
import { InsertVectorProps } from './controller.d';
import { VectorModelItemType } from '@fastgpt/global/core/ai/model.d';
import { EmbeddingModelItemType } from '@fastgpt/global/core/ai/model.d';
import { MILVUS_ADDRESS, PG_ADDRESS } from './constants';
import { MilvusCtrl } from './milvus/class';
@ -28,7 +28,7 @@ export const insertDatasetDataVector = async ({
...props
}: InsertVectorProps & {
query: string;
model: VectorModelItemType;
model: EmbeddingModelItemType;
}) => {
const { vectors, tokens } = await getVectorsByText({
model,

View File

@ -1,5 +1,6 @@
import type { NextApiResponse } from 'next';
import { getAIApi } from '../config';
import { getTTSModel } from '../model';
export async function text2Speech({
res,
@ -18,15 +19,26 @@ export async function text2Speech({
voice: string;
speed?: number;
}) {
const modelData = getTTSModel(model)!;
const ai = getAIApi();
const response = await ai.audio.speech.create({
model,
// @ts-ignore
voice,
input,
response_format: 'mp3',
speed
});
const response = await ai.audio.speech.create(
{
model,
// @ts-ignore
voice,
input,
response_format: 'mp3',
speed
},
modelData.requestUrl && modelData.requestAuth
? {
path: modelData.requestUrl,
headers: {
Authorization: `Bearer ${modelData.requestAuth}`
}
}
: {}
);
const readableStream = response.body as unknown as NodeJS.ReadableStream;
readableStream.pipe(res);

View File

@ -2,6 +2,7 @@ import fs from 'fs';
import { getAxiosConfig } from '../config';
import axios from 'axios';
import FormData from 'form-data';
import { getSTTModel } from '../model';
export const aiTranscriptions = async ({
model,
@ -14,13 +15,21 @@ export const aiTranscriptions = async ({
data.append('model', model);
data.append('file', fileStream);
const modelData = getSTTModel(model);
const aiAxiosConfig = getAxiosConfig();
const { data: result } = await axios<{ text: string }>({
method: 'post',
baseURL: aiAxiosConfig.baseUrl,
url: '/audio/transcriptions',
...(modelData.requestUrl
? { url: modelData.requestUrl }
: {
baseURL: aiAxiosConfig.baseUrl,
url: modelData.requestUrl || '/audio/transcriptions'
}),
headers: {
Authorization: aiAxiosConfig.authorization,
Authorization: modelData.requestAuth
? `Bearer ${modelData.requestAuth}`
: aiAxiosConfig.authorization,
...data.getHeaders()
},
data: data

View File

@ -7,14 +7,14 @@ import { getErrText } from '@fastgpt/global/common/error/utils';
import { addLog } from '../../common/system/log';
import { i18nT } from '../../../web/i18n/utils';
import { OpenaiAccountType } from '@fastgpt/global/support/user/team/type';
import { getLLMModel } from './model';
export const openaiBaseUrl = process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1';
export const getAIApi = (props?: { userKey?: OpenaiAccountType; timeout?: number }) => {
const { userKey, timeout } = props || {};
const baseUrl =
userKey?.baseUrl || global?.systemEnv?.oneapiUrl || process.env.ONEAPI_URL || openaiBaseUrl;
const baseUrl = userKey?.baseUrl || global?.systemEnv?.oneapiUrl || openaiBaseUrl;
const apiKey = userKey?.key || global?.systemEnv?.chatApiKey || process.env.CHAT_API_KEY || '';
return new OpenAI({
@ -29,8 +29,7 @@ export const getAIApi = (props?: { userKey?: OpenaiAccountType; timeout?: number
export const getAxiosConfig = (props?: { userKey?: OpenaiAccountType }) => {
const { userKey } = props || {};
const baseUrl =
userKey?.baseUrl || global?.systemEnv?.oneapiUrl || process.env.ONEAPI_URL || openaiBaseUrl;
const baseUrl = userKey?.baseUrl || global?.systemEnv?.oneapiUrl || openaiBaseUrl;
const apiKey = userKey?.key || global?.systemEnv?.chatApiKey || process.env.CHAT_API_KEY || '';
return {
@ -63,12 +62,23 @@ export const createChatCompletion = async <T extends CompletionsBodyType>({
getEmptyResponseTip: () => string;
}> => {
try {
const modelConstantsData = getLLMModel(body.model);
const formatTimeout = timeout ? timeout : body.stream ? 60000 : 600000;
const ai = getAIApi({
userKey,
timeout: formatTimeout
});
const response = await ai.chat.completions.create(body, options);
const response = await ai.chat.completions.create(body, {
...options,
...(modelConstantsData.requestUrl ? { path: modelConstantsData.requestUrl } : {}),
headers: {
...options?.headers,
...(modelConstantsData.requestAuth
? { Authorization: `Bearer ${modelConstantsData.requestAuth}` }
: {})
}
});
const isStreamResponse =
typeof response === 'object' &&

View File

@ -1,11 +0,0 @@
{
"provider": "OpenAI",
"model": "text-embedding-ada-002",
"name": "text-embedding-ada-002",
"defaultToken": 512, // token
"maxToken": 3000, // token
"weight": 0, //
"charsPointsPrice": 0 // /1k token
}

View File

@ -1,33 +0,0 @@
{
"provider": "OpenAI",
"model": "gpt-4o-mini",
"name": "GPT-4o-mini", // alias
"maxContext": 125000, //
"maxResponse": 16000, //
"quoteMaxToken": 60000, //
"maxTemperature": 1.2, //
"presencePenaltyRange": [-2, 2], //
"frequencyPenaltyRange": [-2, 2], //
"responseFormatList": ["text", "json_object", "json_schema"], //
"showStopSign": true, //
"vision": true, //
"toolChoice": true, //
"functionCall": false, // false
"defaultSystemChatPrompt": "", //
"datasetProcess": true, //
"usedInClassify": true, //
"customCQPrompt": "", //
"usedInExtractFields": true, //
"customExtractPrompt": "", //
"usedInToolCall": true, //
"usedInQueryExtension": true, //
"defaultConfig": {}, // body
"fieldMap": {}, // body
"censor": false, //
"charsPointsPrice": 0 // n /1k token
}

View File

@ -0,0 +1,4 @@
{
"provider": "AliCloud",
"list": []
}

View File

@ -0,0 +1,17 @@
{
"provider": "BAAI",
"list": [
{
"model": "bge-m3",
"name": "bge-m3",
"defaultToken": 512,
"maxToken": 8000,
"type": "embedding"
},
{
"model": "bge-reranker-v2-m3",
"name": "bge-reranker-v2-m3",
"type": "rerank"
}
]
}

View File

@ -0,0 +1,4 @@
{
"provider": "Baichuan",
"list": []
}

View File

@ -0,0 +1,147 @@
{
"provider": "ChatGLM",
"list": [
{
"model": "glm-4-air",
"name": "glm-4-air",
"maxContext": 128000,
"maxResponse": 4000,
"quoteMaxToken": 120000,
"maxTemperature": 0.99,
"vision": false,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "glm-4-flash",
"name": "glm-4-flash",
"maxContext": 128000,
"maxResponse": 4000,
"quoteMaxToken": 120000,
"maxTemperature": 0.99,
"vision": false,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "glm-4-long",
"name": "glm-4-long",
"maxContext": 1000000,
"maxResponse": 4000,
"quoteMaxToken": 900000,
"maxTemperature": 0.99,
"vision": false,
"toolChoice": false,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "glm-4-plus",
"name": "GLM-4-plus",
"maxContext": 128000,
"maxResponse": 4000,
"quoteMaxToken": 120000,
"maxTemperature": 0.99,
"vision": false,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "glm-4v-flash",
"name": "glm-4v-flash",
"maxContext": 8000,
"maxResponse": 1000,
"quoteMaxToken": 6000,
"maxTemperature": 0.99,
"vision": true,
"toolChoice": false,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "glm-4v-plus",
"name": "GLM-4v-plus",
"maxContext": 8000,
"maxResponse": 1000,
"quoteMaxToken": 6000,
"maxTemperature": 0.99,
"vision": true,
"toolChoice": false,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "embedding-3",
"name": "embedding-3",
"defaultToken": 512,
"maxToken": 8000,
"defaultConfig": {
"dimensions": 1024
},
"type": "embedding"
}
]
}

View File

@ -0,0 +1,93 @@
{
"provider": "Claude",
"list": [
{
"model": "claude-3-5-haiku-20241022",
"name": "claude-3-5-haiku-20241022",
"maxContext": 200000,
"maxResponse": 8000,
"quoteMaxToken": 100000,
"maxTemperature": 1,
"vision": false,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "claude-3-5-sonnet-20240620",
"name": "Claude-3-5-sonnet-20240620",
"maxContext": 200000,
"maxResponse": 8000,
"quoteMaxToken": 100000,
"maxTemperature": 1,
"vision": true,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "claude-3-5-sonnet-20241022",
"name": "Claude-3-5-sonnet-20241022",
"maxContext": 200000,
"maxResponse": 8000,
"quoteMaxToken": 100000,
"maxTemperature": 1,
"vision": true,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "claude-3-opus-20240229",
"name": "claude-3-opus-20240229",
"maxContext": 200000,
"maxResponse": 4096,
"quoteMaxToken": 100000,
"maxTemperature": 1,
"vision": true,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
}
]
}

View File

@ -0,0 +1,48 @@
{
"provider": "DeepSeek",
"list": [
{
"model": "deepseek-chat",
"name": "Deepseek-chat",
"maxContext": 64000,
"maxResponse": 4096,
"quoteMaxToken": 60000,
"maxTemperature": 1.5,
"vision": false,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"type": "llm"
},
{
"model": "deepseek-reasoner",
"name": "Deepseek-reasoner",
"maxContext": 64000,
"maxResponse": 4096,
"quoteMaxToken": 60000,
"maxTemperature": null,
"vision": false,
"reasoning": true,
"toolChoice": false,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
}
]
}

View File

@ -0,0 +1,195 @@
{
"provider": "Doubao",
"list": [
{
"model": "Doubao-lite-4k",
"name": "Doubao-lite-4k",
"maxContext": 4000,
"maxResponse": 4000,
"quoteMaxToken": 4000,
"maxTemperature": 1,
"vision": false,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "Doubao-lite-32k",
"name": "Doubao-lite-32k",
"maxContext": 32000,
"maxResponse": 4000,
"quoteMaxToken": 32000,
"maxTemperature": 1,
"vision": false,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "Doubao-lite-128k",
"name": "Doubao-lite-128k",
"maxContext": 128000,
"maxResponse": 4000,
"quoteMaxToken": 120000,
"maxTemperature": 1,
"vision": false,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "Doubao-vision-lite-32k",
"name": "Doubao-vision-lite-32k",
"maxContext": 32000,
"maxResponse": 4000,
"quoteMaxToken": 32000,
"maxTemperature": 1,
"vision": true,
"toolChoice": false,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "Doubao-pro-4k",
"name": "Doubao-pro-4k",
"maxContext": 4000,
"maxResponse": 4000,
"quoteMaxToken": 4000,
"maxTemperature": 1,
"vision": false,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "Doubao-pro-32k",
"name": "Doubao-pro-32k",
"maxContext": 32000,
"maxResponse": 4000,
"quoteMaxToken": 32000,
"maxTemperature": 1,
"vision": false,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "Doubao-pro-128k",
"name": "Doubao-pro-128k",
"maxContext": 128000,
"maxResponse": 4000,
"quoteMaxToken": 120000,
"maxTemperature": 1,
"vision": false,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "Doubao-vision-pro-32k",
"name": "Doubao-vision-pro-32k",
"maxContext": 32000,
"maxResponse": 4000,
"quoteMaxToken": 32000,
"maxTemperature": 1,
"vision": true,
"toolChoice": false,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "Doubao-embedding-large",
"name": "Doubao-embedding-large",
"defaultToken": 512,
"maxToken": 4096,
"type": "embedding"
},
{
"model": "Doubao-embedding",
"name": "Doubao-embedding",
"defaultToken": 512,
"maxToken": 4096,
"type": "embedding"
}
]
}

View File

@ -0,0 +1,107 @@
{
"provider": "Ernie",
"list": [
{
"model": "ERNIE-4.0-8K",
"name": "ERNIE-4.0-8K",
"maxContext": 8000,
"maxResponse": 2048,
"quoteMaxToken": 5000,
"maxTemperature": 1,
"vision": false,
"toolChoice": false,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "ERNIE-4.0-Turbo-8K",
"name": "ERNIE-4.0-Turbo-8K",
"maxContext": 8000,
"maxResponse": 2048,
"quoteMaxToken": 5000,
"maxTemperature": 1,
"vision": false,
"toolChoice": false,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "ERNIE-Lite-8K",
"name": "ERNIE-lite-8k",
"maxContext": 8000,
"maxResponse": 2048,
"quoteMaxToken": 6000,
"maxTemperature": 1,
"vision": false,
"toolChoice": false,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "ERNIE-Speed-128K",
"name": "ERNIE-Speed-128K",
"maxContext": 128000,
"maxResponse": 4096,
"quoteMaxToken": 120000,
"maxTemperature": 1,
"vision": false,
"toolChoice": false,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "Embedding-V1",
"name": "Embedding-V1",
"defaultToken": 512,
"maxToken": 1000,
"type": "embedding"
},
{
"model": "tao-8k",
"name": "tao-8k",
"defaultToken": 512,
"maxToken": 8000,
"type": "embedding"
}
]
}

View File

@ -0,0 +1,4 @@
{
"provider": "FishAudio",
"list": []
}

View File

@ -0,0 +1,144 @@
{
"provider": "Gemini",
"list": [
{
"model": "gemini-1.5-flash",
"name": "gemini-1.5-flash",
"maxContext": 1000000,
"maxResponse": 8000,
"quoteMaxToken": 60000,
"maxTemperature": 1,
"vision": true,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "gemini-1.5-pro",
"name": "gemini-1.5-pro",
"maxContext": 2000000,
"maxResponse": 8000,
"quoteMaxToken": 60000,
"maxTemperature": 1,
"vision": true,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "gemini-2.0-flash-exp",
"name": "gemini-2.0-flash-exp",
"maxContext": 1000000,
"maxResponse": 8000,
"quoteMaxToken": 60000,
"maxTemperature": 1,
"vision": true,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "gemini-2.0-flash-thinking-exp-1219",
"name": "gemini-2.0-flash-thinking-exp-1219",
"maxContext": 1000000,
"maxResponse": 8000,
"quoteMaxToken": 60000,
"maxTemperature": 1,
"vision": true,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "gemini-2.0-flash-thinking-exp-01-21",
"name": "gemini-2.0-flash-thinking-exp-01-21",
"maxContext": 1000000,
"maxResponse": 8000,
"quoteMaxToken": 60000,
"maxTemperature": 1,
"vision": true,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "gemini-exp-1206",
"name": "gemini-exp-1206",
"maxContext": 128000,
"maxResponse": 8000,
"quoteMaxToken": 120000,
"maxTemperature": 1,
"vision": true,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "text-embedding-004",
"name": "text-embedding-004",
"defaultToken": 512,
"maxToken": 2000,
"type": "embedding"
}
]
}

View File

@ -0,0 +1,4 @@
{
"provider": "Grok",
"list": []
}

View File

@ -0,0 +1,47 @@
{
"provider": "Groq",
"list": [
{
"model": "llama-3.1-8b-instant",
"name": "Groq-llama-3.1-8b-instant",
"maxContext": 128000,
"maxResponse": 8000,
"quoteMaxToken": 60000,
"maxTemperature": 1.2,
"vision": true,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"type": "llm"
},
{
"model": "llama-3.3-70b-versatile",
"name": "Groq-llama-3.3-70b-versatile",
"maxContext": 128000,
"maxResponse": 8000,
"quoteMaxToken": 60000,
"maxTemperature": 1.2,
"vision": true,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"type": "llm"
}
]
}

View File

@ -0,0 +1,166 @@
{
"provider": "Hunyuan",
"list": [
{
"model": "hunyuan-large",
"name": "hunyuan-large",
"maxContext": 28000,
"maxResponse": 4000,
"quoteMaxToken": 20000,
"maxTemperature": 1,
"vision": false,
"toolChoice": false,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "hunyuan-lite",
"name": "hunyuan-lite",
"maxContext": 250000,
"maxResponse": 6000,
"quoteMaxToken": 100000,
"maxTemperature": 1,
"vision": false,
"toolChoice": false,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "hunyuan-pro",
"name": "hunyuan-pro",
"maxContext": 28000,
"maxResponse": 4000,
"quoteMaxToken": 28000,
"maxTemperature": 1,
"vision": false,
"toolChoice": false,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "hunyuan-standard",
"name": "hunyuan-standard",
"maxContext": 32000,
"maxResponse": 2000,
"quoteMaxToken": 20000,
"maxTemperature": 1,
"vision": false,
"toolChoice": false,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "hunyuan-turbo-vision",
"name": "hunyuan-turbo-vision",
"maxContext": 6000,
"maxResponse": 2000,
"quoteMaxToken": 6000,
"maxTemperature": 1,
"vision": true,
"toolChoice": false,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "hunyuan-turbo",
"name": "hunyuan-turbo",
"maxContext": 28000,
"maxResponse": 4000,
"quoteMaxToken": 20000,
"maxTemperature": 1,
"vision": false,
"toolChoice": false,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "hunyuan-vision",
"name": "hunyuan-vision",
"maxContext": 6000,
"maxResponse": 2000,
"quoteMaxToken": 4000,
"maxTemperature": 1,
"vision": true,
"toolChoice": false,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "hunyuan-embedding",
"name": "hunyuan-embedding",
"defaultToken": 512,
"maxToken": 1024,
"type": "embedding"
}
]
}

View File

@ -0,0 +1,49 @@
{
"provider": "Intern",
"list": [
{
"model": "internlm2-pro-chat",
"name": "internlm2-pro-chat",
"maxContext": 32000,
"maxResponse": 8000,
"quoteMaxToken": 32000,
"maxTemperature": 1,
"vision": false,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "internlm3-8b-instruct",
"name": "internlm3-8b-instruct",
"maxContext": 32000,
"maxResponse": 8000,
"quoteMaxToken": 32000,
"maxTemperature": 1,
"vision": false,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
}
]
}

View File

@ -0,0 +1,4 @@
{
"provider": "Meta",
"list": []
}

View File

@ -0,0 +1,240 @@
{
"provider": "MiniMax",
"list": [
{
"model": "MiniMax-Text-01",
"name": "MiniMax-Text-01",
"maxContext": 1000000,
"maxResponse": 1000000,
"quoteMaxToken": 100000,
"maxTemperature": 1,
"vision": false,
"toolChoice": false,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "abab6.5s-chat",
"name": "MiniMax-abab6.5s",
"maxContext": 245000,
"maxResponse": 10000,
"quoteMaxToken": 240000,
"maxTemperature": 1,
"vision": false,
"toolChoice": false,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "speech-01-turbo",
"name": "speech-01-turbo",
"voices": [
{
"label": "male-qn-qingse",
"value": "male-qn-qingse"
},
{
"label": "male-qn-jingying",
"value": "male-qn-jingying"
},
{
"label": "male-qn-badao",
"value": "male-qn-badao"
},
{
"label": "male-qn-daxuesheng",
"value": "male-qn-daxuesheng"
},
{
"label": "female-shaonv",
"value": "female-shaonv"
},
{
"label": "female-yujie",
"value": "female-yujie"
},
{
"label": "female-chengshu",
"value": "female-chengshu"
},
{
"label": "female-tianmei",
"value": "female-tianmei"
},
{
"label": "presenter_male",
"value": "presenter_male"
},
{
"label": "presenter_female",
"value": "presenter_female"
},
{
"label": "audiobook_male_1",
"value": "audiobook_male_1"
},
{
"label": "audiobook_male_2",
"value": "audiobook_male_2"
},
{
"label": "audiobook_female_1",
"value": "audiobook_female_1"
},
{
"label": "audiobook_female_2",
"value": "audiobook_female_2"
},
{
"label": "male-qn-qingse-jingpin",
"value": "male-qn-qingse-jingpin"
},
{
"label": "male-qn-jingying-jingpin",
"value": "male-qn-jingying-jingpin"
},
{
"label": "male-qn-badao-jingpin",
"value": "male-qn-badao-jingpin"
},
{
"label": "male-qn-daxuesheng-jingpin",
"value": "male-qn-daxuesheng-jingpin"
},
{
"label": "female-shaonv-jingpin",
"value": "female-shaonv-jingpin"
},
{
"label": "female-yujie-jingpin",
"value": "female-yujie-jingpin"
},
{
"label": "female-chengshu-jingpin",
"value": "female-chengshu-jingpin"
},
{
"label": "female-tianmei-jingpin",
"value": "female-tianmei-jingpin"
},
{
"label": "clever_boy",
"value": "clever_boy"
},
{
"label": "cute_boy",
"value": "cute_boy"
},
{
"label": "lovely_girl",
"value": "lovely_girl"
},
{
"label": "cartoon_pig",
"value": "cartoon_pig"
},
{
"label": "bingjiao_didi",
"value": "bingjiao_didi"
},
{
"label": "junlang_nanyou",
"value": "junlang_nanyou"
},
{
"label": "chunzhen_xuedi",
"value": "chunzhen_xuedi"
},
{
"label": "lengdan_xiongzhang",
"value": "lengdan_xiongzhang"
},
{
"label": "badao_shaoye",
"value": "badao_shaoye"
},
{
"label": "tianxin_xiaoling",
"value": "tianxin_xiaoling"
},
{
"label": "qiaopi_mengmei",
"value": "qiaopi_mengmei"
},
{
"label": "wumei_yujie",
"value": "wumei_yujie"
},
{
"label": "diadia_xuemei",
"value": "diadia_xuemei"
},
{
"label": "danya_xuejie",
"value": "danya_xuejie"
},
{
"label": "Santa_Claus",
"value": "Santa_Claus"
},
{
"label": "Grinch",
"value": "Grinch"
},
{
"label": "Rudolph",
"value": "Rudolph"
},
{
"label": "Arnold",
"value": "Arnold"
},
{
"label": "Charming_Santa",
"value": "Charming_Santa"
},
{
"label": "Charming_Lady",
"value": "Charming_Lady"
},
{
"label": "Sweet_Girl",
"value": "Sweet_Girl"
},
{
"label": "Cute_Elf",
"value": "Cute_Elf"
},
{
"label": "Attractive_Girl",
"value": "Attractive_Girl"
},
{
"label": "Serene_Woman",
"value": "Serene_Woman"
}
],
"type": "tts"
}
]
}

View File

@ -0,0 +1,93 @@
{
"provider": "MistralAI",
"list": [
{
"model": "ministral-3b-latest",
"name": "Ministral-3b-latest",
"maxContext": 130000,
"maxResponse": 8000,
"quoteMaxToken": 60000,
"maxTemperature": 1.2,
"vision": false,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "ministral-8b-latest",
"name": "Ministral-8b-latest",
"maxContext": 130000,
"maxResponse": 8000,
"quoteMaxToken": 60000,
"maxTemperature": 1.2,
"vision": false,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "mistral-large-latest",
"name": "Mistral-large-latest",
"maxContext": 130000,
"maxResponse": 8000,
"quoteMaxToken": 60000,
"maxTemperature": 1.2,
"vision": false,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "mistral-small-latest",
"name": "Mistral-small-latest",
"maxContext": 32000,
"maxResponse": 4000,
"quoteMaxToken": 32000,
"maxTemperature": 1.2,
"vision": false,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
}
]
}

View File

@ -0,0 +1,4 @@
{
"provider": "Moka",
"list": []
}

View File

@ -0,0 +1,71 @@
{
"provider": "Moonshot",
"list": [
{
"model": "moonshot-v1-8k",
"name": "moonshot-v1-8k",
"maxContext": 8000,
"maxResponse": 4000,
"quoteMaxToken": 6000,
"maxTemperature": 1,
"vision": false,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "moonshot-v1-32k",
"name": "moonshot-v1-32k",
"maxContext": 32000,
"maxResponse": 4000,
"quoteMaxToken": 32000,
"maxTemperature": 1,
"vision": false,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "moonshot-v1-128k",
"name": "moonshot-v1-128k",
"maxContext": 128000,
"maxResponse": 4000,
"quoteMaxToken": 60000,
"maxTemperature": 1,
"vision": false,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
}
]
}

View File

@ -0,0 +1,4 @@
{
"provider": "Ollama",
"list": []
}

View File

@ -0,0 +1,252 @@
{
"provider": "OpenAI",
"list": [
{
"model": "gpt-4o-mini",
"name": "GPT-4o-mini",
"maxContext": 128000,
"maxResponse": 16000,
"quoteMaxToken": 60000,
"maxTemperature": 1.2,
"vision": true,
"toolChoice": true,
"functionCall": true,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "gpt-4o",
"name": "GPT-4o",
"maxContext": 128000,
"maxResponse": 4000,
"quoteMaxToken": 60000,
"maxTemperature": 1.2,
"vision": true,
"toolChoice": true,
"functionCall": true,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "o3-mini",
"name": "o3-mini",
"maxContext": 200000,
"maxResponse": 100000,
"quoteMaxToken": 120000,
"maxTemperature": null,
"vision": false,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {
"stream": false
},
"fieldMap": {
"max_tokens": "max_completion_tokens"
},
"type": "llm"
},
{
"model": "o1-mini",
"name": "o1-mini",
"maxContext": 128000,
"maxResponse": 4000,
"quoteMaxToken": 120000,
"maxTemperature": null,
"vision": false,
"toolChoice": false,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {
"stream": false
},
"fieldMap": {
"max_tokens": "max_completion_tokens"
},
"type": "llm"
},
{
"model": "o1",
"name": "o1",
"maxContext": 195000,
"maxResponse": 8000,
"quoteMaxToken": 120000,
"maxTemperature": null,
"vision": true,
"toolChoice": false,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {
"stream": false
},
"fieldMap": {
"max_tokens": "max_completion_tokens"
},
"type": "llm"
},
{
"model": "o1-preview",
"name": "o1-preview",
"maxContext": 128000,
"maxResponse": 4000,
"quoteMaxToken": 120000,
"maxTemperature": null,
"vision": false,
"toolChoice": false,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {
"stream": false
},
"fieldMap": {
"max_tokens": "max_completion_tokens"
},
"type": "llm"
},
{
"model": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo",
"maxContext": 16000,
"maxResponse": 4000,
"quoteMaxToken": 13000,
"maxTemperature": 1.2,
"vision": false,
"toolChoice": true,
"functionCall": true,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"type": "llm"
},
{
"model": "gpt-4-turbo",
"name": "gpt-4-turbo",
"maxContext": 128000,
"maxResponse": 4000,
"quoteMaxToken": 60000,
"maxTemperature": 1.2,
"vision": true,
"toolChoice": true,
"functionCall": true,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"type": "llm"
},
{
"model": "text-embedding-3-large",
"name": "text-embedding-3-large",
"defaultToken": 512,
"maxToken": 8000,
"defaultConfig": {
"dimensions": 1024
},
"type": "embedding"
},
{
"model": "text-embedding-3-small",
"name": "text-embedding-3-small",
"defaultToken": 512,
"maxToken": 8000,
"type": "embedding"
},
{
"model": "text-embedding-ada-002",
"name": "text-embedding-ada-002",
"defaultToken": 512,
"maxToken": 8000,
"type": "embedding"
},
{
"model": "tts-1",
"name": "TTS1",
"voices": [
{
"label": "Alloy",
"value": "alloy"
},
{
"label": "Echo",
"value": "echo"
},
{
"label": "Fable",
"value": "fable"
},
{
"label": "Onyx",
"value": "onyx"
},
{
"label": "Nova",
"value": "nova"
},
{
"label": "Shimmer",
"value": "shimmer"
}
],
"type": "tts"
},
{
"model": "whisper-1",
"name": "whisper-1",
"type": "stt"
}
]
}

View File

@ -0,0 +1,4 @@
{
"provider": "Other",
"list": []
}

View File

@ -0,0 +1,223 @@
{
"provider": "Qwen",
"list": [
{
"model": "qwen-turbo",
"name": "Qwen-turbo",
"maxContext": 128000,
"maxResponse": 8000,
"quoteMaxToken": 100000,
"maxTemperature": 1,
"vision": false,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "qwen-plus",
"name": "Qwen-plus",
"maxContext": 64000,
"maxResponse": 8000,
"quoteMaxToken": 60000,
"maxTemperature": 1,
"vision": false,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "qwen-vl-plus",
"name": "qwen-vl-plus",
"maxContext": 32000,
"maxResponse": 2000,
"quoteMaxToken": 20000,
"maxTemperature": 1.2,
"vision": true,
"toolChoice": false,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"type": "llm"
},
{
"model": "qwen-max",
"name": "Qwen-max",
"maxContext": 8000,
"maxResponse": 4000,
"quoteMaxToken": 6000,
"maxTemperature": 1,
"vision": false,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "qwen-vl-max",
"name": "qwen-vl-max",
"maxContext": 32000,
"maxResponse": 2000,
"quoteMaxToken": 20000,
"maxTemperature": 1.2,
"vision": true,
"toolChoice": false,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "qwen-coder-turbo",
"name": "qwen-coder-turbo",
"maxContext": 128000,
"maxResponse": 8000,
"quoteMaxToken": 50000,
"maxTemperature": 1,
"vision": false,
"toolChoice": false,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "qwen2.5-7b-instruct",
"name": "qwen2.5-7b-instruct",
"maxContext": 128000,
"maxResponse": 8000,
"quoteMaxToken": 50000,
"maxTemperature": 1,
"vision": false,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "qwen2.5-14b-instruct",
"name": "qwen2.5-14b-instruct",
"maxContext": 128000,
"maxResponse": 8000,
"quoteMaxToken": 50000,
"maxTemperature": 1,
"vision": false,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "qwen2.5-32b-instruct",
"name": "qwen2.5-32b-instruct",
"maxContext": 128000,
"maxResponse": 8000,
"quoteMaxToken": 50000,
"maxTemperature": 1,
"vision": false,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "qwen2.5-72b-instruct",
"name": "Qwen2.5-72B-instruct",
"maxContext": 128000,
"maxResponse": 8000,
"quoteMaxToken": 50000,
"maxTemperature": 1,
"vision": false,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
}
]
}

View File

@ -0,0 +1,204 @@
{
"provider": "Siliconflow",
"list": [
{
"model": "Qwen/Qwen2.5-72B-Instruct",
"name": "Qwen/Qwen2.5-72B-Instruct",
"maxContext": 128000,
"maxResponse": 8000,
"quoteMaxToken": 50000,
"maxTemperature": 1,
"vision": false,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "Qwen/Qwen2-VL-72B-Instruct",
"name": "Qwen/Qwen2-VL-72B-Instruct",
"maxContext": 32000,
"maxResponse": 4000,
"quoteMaxToken": 32000,
"maxTemperature": 1,
"censor": false,
"vision": true,
"datasetProcess": false,
"usedInClassify": false,
"usedInExtractFields": false,
"usedInToolCall": false,
"toolChoice": false,
"functionCall": false,
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"defaultConfig": {},
"type": "llm"
},
{
"model": "deepseek-ai/DeepSeek-V2.5",
"name": "deepseek-ai/DeepSeek-V2.5",
"maxContext": 32000,
"maxResponse": 4000,
"quoteMaxToken": 32000,
"maxTemperature": 1,
"vision": true,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "BAAI/bge-m3",
"name": "BAAI/bge-m3",
"defaultToken": 512,
"maxToken": 8000,
"type": "embedding"
},
{
"model": "FunAudioLLM/CosyVoice2-0.5B",
"name": "FunAudioLLM/CosyVoice2-0.5B",
"voices": [
{
"label": "alex",
"value": "FunAudioLLM/CosyVoice2-0.5B:alex"
},
{
"label": "anna",
"value": "FunAudioLLM/CosyVoice2-0.5B:anna"
},
{
"label": "bella",
"value": "FunAudioLLM/CosyVoice2-0.5B:bella"
},
{
"label": "benjamin",
"value": "FunAudioLLM/CosyVoice2-0.5B:benjamin"
},
{
"label": "charles",
"value": "FunAudioLLM/CosyVoice2-0.5B:charles"
},
{
"label": "claire",
"value": "FunAudioLLM/CosyVoice2-0.5B:claire"
},
{
"label": "david",
"value": "FunAudioLLM/CosyVoice2-0.5B:david"
},
{
"label": "diana",
"value": "FunAudioLLM/CosyVoice2-0.5B:diana"
}
],
"type": "tts"
},
{
"model": "RVC-Boss/GPT-SoVITS",
"name": "RVC-Boss/GPT-SoVITS",
"voices": [
{
"label": "alex",
"value": "RVC-Boss/GPT-SoVITS:alex"
},
{
"label": "anna",
"value": "RVC-Boss/GPT-SoVITS:anna"
},
{
"label": "bella",
"value": "RVC-Boss/GPT-SoVITS:bella"
},
{
"label": "benjamin",
"value": "RVC-Boss/GPT-SoVITS:benjamin"
},
{
"label": "charles",
"value": "RVC-Boss/GPT-SoVITS:charles"
},
{
"label": "claire",
"value": "RVC-Boss/GPT-SoVITS:claire"
},
{
"label": "david",
"value": "RVC-Boss/GPT-SoVITS:david"
},
{
"label": "diana",
"value": "RVC-Boss/GPT-SoVITS:diana"
}
],
"type": "tts"
},
{
"model": "fishaudio/fish-speech-1.5",
"name": "fish-speech-1.5",
"voices": [
{
"label": "alex",
"value": "fishaudio/fish-speech-1.5:alex"
},
{
"label": "anna",
"value": "fishaudio/fish-speech-1.5:anna"
},
{
"label": "bella",
"value": "fishaudio/fish-speech-1.5:bella"
},
{
"label": "benjamin",
"value": "fishaudio/fish-speech-1.5:benjamin"
},
{
"label": "charles",
"value": "fishaudio/fish-speech-1.5:charles"
},
{
"label": "claire",
"value": "fishaudio/fish-speech-1.5:claire"
},
{
"label": "david",
"value": "fishaudio/fish-speech-1.5:david"
},
{
"label": "diana",
"value": "fishaudio/fish-speech-1.5:diana"
}
],
"type": "tts"
},
{
"model": "FunAudioLLM/SenseVoiceSmall",
"name": "FunAudioLLM/SenseVoiceSmall",
"type": "stt"
},
{
"model": "BAAI/bge-reranker-v2-m3",
"name": "BAAI/bge-reranker-v2-m3",
"type": "rerank"
}
]
}

View File

@ -0,0 +1,129 @@
{
"provider": "SparkDesk",
"list": [
{
"model": "lite",
"name": "SparkDesk-lite",
"maxContext": 32000,
"maxResponse": 4000,
"quoteMaxToken": 32000,
"maxTemperature": 1,
"vision": false,
"datasetProcess": true,
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
"usedInQueryExtension": true,
"toolChoice": false,
"functionCall": false,
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"type": "llm"
},
{
"model": "generalv3",
"name": "SparkDesk-Pro",
"maxContext": 8000,
"maxResponse": 8000,
"quoteMaxToken": 8000,
"maxTemperature": 1,
"vision": false,
"datasetProcess": true,
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
"usedInQueryExtension": true,
"toolChoice": false,
"functionCall": false,
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"type": "llm"
},
{
"model": "pro-128k",
"name": "SparkDesk-Pro-128k",
"maxContext": 128000,
"maxResponse": 4000,
"quoteMaxToken": 128000,
"maxTemperature": 1,
"vision": false,
"datasetProcess": true,
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
"usedInQueryExtension": true,
"toolChoice": false,
"functionCall": false,
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"type": "llm"
},
{
"model": "generalv3.5",
"name": "SparkDesk-max",
"maxContext": 8000,
"maxResponse": 8000,
"quoteMaxToken": 8000,
"maxTemperature": 1,
"vision": false,
"datasetProcess": true,
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
"usedInQueryExtension": true,
"toolChoice": false,
"functionCall": false,
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"type": "llm"
},
{
"model": "max-32k",
"name": "SparkDesk-max-32k",
"maxContext": 32000,
"maxResponse": 8000,
"quoteMaxToken": 32000,
"maxTemperature": 1,
"vision": false,
"toolChoice": false,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "4.0Ultra",
"name": "SparkDesk-v4.0 Ultra",
"maxContext": 8000,
"maxResponse": 8000,
"quoteMaxToken": 8000,
"maxTemperature": 1,
"vision": false,
"toolChoice": false,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
}
]
}

View File

@ -0,0 +1,308 @@
{
"provider": "StepFun",
"list": [
{
"model": "step-1-flash",
"name": "step-1-flash",
"maxContext": 8000,
"maxResponse": 4000,
"quoteMaxToken": 6000,
"maxTemperature": 2,
"vision": false,
"datasetProcess": true,
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
"usedInQueryExtension": true,
"toolChoice": false,
"functionCall": false,
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"type": "llm"
},
{
"model": "step-1-8k",
"name": "step-1-8k",
"maxContext": 8000,
"maxResponse": 8000,
"quoteMaxToken": 8000,
"maxTemperature": 2,
"vision": false,
"datasetProcess": true,
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
"usedInQueryExtension": true,
"toolChoice": false,
"functionCall": false,
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"type": "llm"
},
{
"model": "step-1-32k",
"name": "step-1-32k",
"maxContext": 32000,
"maxResponse": 8000,
"quoteMaxToken": 32000,
"maxTemperature": 2,
"vision": false,
"datasetProcess": true,
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
"usedInQueryExtension": true,
"toolChoice": false,
"functionCall": false,
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"type": "llm"
},
{
"model": "step-1-128k",
"name": "step-1-128k",
"maxContext": 128000,
"maxResponse": 8000,
"quoteMaxToken": 128000,
"maxTemperature": 2,
"vision": false,
"datasetProcess": true,
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
"usedInQueryExtension": true,
"toolChoice": false,
"functionCall": false,
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"type": "llm"
},
{
"model": "step-1-256k",
"name": "step-1-256k",
"maxContext": 256000,
"maxResponse": 8000,
"quoteMaxToken": 256000,
"maxTemperature": 2,
"vision": false,
"datasetProcess": true,
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
"usedInQueryExtension": true,
"toolChoice": false,
"functionCall": false,
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"type": "llm"
},
{
"model": "step-1o-vision-32k",
"name": "step-1o-vision-32k",
"maxContext": 32000,
"quoteMaxToken": 32000,
"maxResponse": 8000,
"maxTemperature": 2,
"vision": true,
"datasetProcess": true,
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
"usedInQueryExtension": true,
"toolChoice": false,
"functionCall": false,
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"type": "llm"
},
{
"model": "step-1v-8k",
"name": "step-1v-8k",
"maxContext": 8000,
"maxResponse": 8000,
"quoteMaxToken": 8000,
"maxTemperature": 2,
"vision": true,
"datasetProcess": true,
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
"usedInQueryExtension": true,
"toolChoice": false,
"functionCall": false,
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"type": "llm"
},
{
"model": "step-1v-32k",
"name": "step-1v-32k",
"maxContext": 32000,
"quoteMaxToken": 32000,
"maxResponse": 8000,
"maxTemperature": 2,
"vision": true,
"datasetProcess": true,
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
"usedInQueryExtension": true,
"toolChoice": false,
"functionCall": false,
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"type": "llm"
},
{
"model": "step-2-mini",
"name": "step-2-mini",
"maxContext": 8000,
"maxResponse": 4000,
"quoteMaxToken": 6000,
"maxTemperature": 2,
"vision": false,
"datasetProcess": true,
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
"usedInQueryExtension": true,
"toolChoice": false,
"functionCall": false,
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"type": "llm"
},
{
"model": "step-2-16k",
"name": "step-2-16k",
"maxContext": 16000,
"maxResponse": 4000,
"quoteMaxToken": 4000,
"maxTemperature": 2,
"vision": false,
"datasetProcess": true,
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
"usedInQueryExtension": true,
"toolChoice": false,
"functionCall": false,
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"type": "llm"
},
{
"model": "step-2-16k-exp",
"name": "step-2-16k-exp",
"maxContext": 16000,
"maxResponse": 4000,
"quoteMaxToken": 4000,
"maxTemperature": 2,
"vision": false,
"datasetProcess": true,
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
"usedInQueryExtension": true,
"toolChoice": false,
"functionCall": false,
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"type": "llm"
},
{
"model": "step-tts-mini",
"name": "step-tts-mini",
"voices": [
{
"label": "cixingnansheng",
"value": "cixingnansheng"
},
{
"label": "zhengpaiqingnian",
"value": "zhengpaiqingnian"
},
{
"label": "yuanqinansheng",
"value": "yuanqinansheng"
},
{
"label": "qingniandaxuesheng",
"value": "qingniandaxuesheng"
},
{
"label": "boyinnansheng",
"value": "boyinnansheng"
},
{
"label": "ruyananshi",
"value": "ruyananshi"
},
{
"label": "shenchennanyin",
"value": "shenchennanyin"
},
{
"label": "qinqienvsheng",
"value": "qinqienvsheng"
},
{
"label": "wenrounvsheng",
"value": "wenrounvsheng"
},
{
"label": "jilingshaonv",
"value": "jilingshaonv"
},
{
"label": "yuanqishaonv",
"value": "yuanqishaonv"
},
{
"label": "ruanmengnvsheng",
"value": "ruanmengnvsheng"
},
{
"label": "youyanvsheng",
"value": "youyanvsheng"
},
{
"label": "lengyanyujie",
"value": "lengyanyujie"
},
{
"label": "shuangkuaijiejie",
"value": "shuangkuaijiejie"
},
{
"label": "wenjingxuejie",
"value": "wenjingxuejie"
},
{
"label": "linjiajiejie",
"value": "linjiajiejie"
},
{
"label": "linjiameimei",
"value": "linjiameimei"
},
{
"label": "zhixingjiejie",
"value": "zhixingjiejie"
}
],
"type": "tts"
}
]
}

View File

@ -0,0 +1,49 @@
{
"provider": "Yi",
"list": [
{
"model": "yi-lightning",
"name": "yi-lightning",
"maxContext": 16000,
"maxResponse": 4000,
"quoteMaxToken": 12000,
"maxTemperature": 1,
"vision": false,
"toolChoice": false,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "yi-vision-v2",
"name": "yi-vision-v2",
"maxContext": 16000,
"maxResponse": 4000,
"quoteMaxToken": 12000,
"maxTemperature": 1,
"vision": true,
"toolChoice": false,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
}
]
}

View File

@ -1,6 +0,0 @@
{
"provider": "BAAI",
"model": "bge-reranker-v2-m3",
"name": "bge-reranker-v2-m3",
"charsPointsPrice": 0
}

View File

@ -0,0 +1,21 @@
import { connectionMongo, getMongoModel } from '../../../common/mongo';
const { Schema } = connectionMongo;
import type { SystemModelSchemaType } from '../type';
const SystemModelSchema = new Schema({
model: {
type: String,
required: true,
unique: true
},
metadata: {
type: Object,
required: true,
default: {}
}
});
export const MongoSystemModel = getMongoModel<SystemModelSchemaType>(
'system_models',
SystemModelSchema
);

View File

@ -1,6 +0,0 @@
{
"provider": "OpenAI",
"model": "whisper-1",
"name": "whisper-1",
"charsPointsPrice": 0
}

Some files were not shown because too many files have changed in this diff Show More