Compare commits
No commits in common. "main" and "v4.9.8-alpha" have entirely different histories.
main
...
v4.9.8-alp
2
.vscode/settings.json
vendored
2
.vscode/settings.json
vendored
@ -21,7 +21,7 @@
|
||||
"i18n-ally.namespace": true,
|
||||
"i18n-ally.pathMatcher": "{locale}/{namespaces}.json",
|
||||
"i18n-ally.extract.targetPickingStrategy": "most-similar-by-key",
|
||||
"i18n-ally.translate.engines": ["deepl","google"],
|
||||
"i18n-ally.translate.engines": ["google"],
|
||||
"[typescript]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode"
|
||||
},
|
||||
|
||||
@ -132,15 +132,15 @@ services:
|
||||
# fastgpt
|
||||
sandbox:
|
||||
container_name: sandbox
|
||||
image: ghcr.io/labring/fastgpt-sandbox:v4.9.10-fix2 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.10-fix2 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt-sandbox:v4.9.7-fix2 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.7-fix2 # 阿里云
|
||||
networks:
|
||||
- fastgpt
|
||||
restart: always
|
||||
fastgpt-mcp-server:
|
||||
container_name: fastgpt-mcp-server
|
||||
image: ghcr.io/labring/fastgpt-mcp_server:v4.9.10-fix2 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-mcp_server:v4.9.10-fix2 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt-mcp_server:v4.9.7-fix2 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-mcp_server:v4.9.7-fix2 # 阿里云
|
||||
ports:
|
||||
- 3005:3000
|
||||
networks:
|
||||
@ -150,8 +150,8 @@ services:
|
||||
- FASTGPT_ENDPOINT=http://fastgpt:3000
|
||||
fastgpt:
|
||||
container_name: fastgpt
|
||||
image: ghcr.io/labring/fastgpt:v4.9.10-fix2 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.10-fix2 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt:v4.9.7-fix2 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.7-fix2 # 阿里云
|
||||
ports:
|
||||
- 3000:3000
|
||||
networks:
|
||||
|
||||
@ -109,15 +109,15 @@ services:
|
||||
# fastgpt
|
||||
sandbox:
|
||||
container_name: sandbox
|
||||
image: ghcr.io/labring/fastgpt-sandbox:v4.9.10-fix2 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.10-fix2 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt-sandbox:v4.9.7-fix2 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.7-fix2 # 阿里云
|
||||
networks:
|
||||
- fastgpt
|
||||
restart: always
|
||||
fastgpt-mcp-server:
|
||||
container_name: fastgpt-mcp-server
|
||||
image: ghcr.io/labring/fastgpt-mcp_server:v4.9.10-fix2 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-mcp_server:v4.9.10-fix2 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt-mcp_server:v4.9.7-fix2 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-mcp_server:v4.9.7-fix2 # 阿里云
|
||||
ports:
|
||||
- 3005:3000
|
||||
networks:
|
||||
@ -127,8 +127,8 @@ services:
|
||||
- FASTGPT_ENDPOINT=http://fastgpt:3000
|
||||
fastgpt:
|
||||
container_name: fastgpt
|
||||
image: ghcr.io/labring/fastgpt:v4.9.10-fix2 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.10-fix2 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt:v4.9.7-fix2 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.7-fix2 # 阿里云
|
||||
ports:
|
||||
- 3000:3000
|
||||
networks:
|
||||
|
||||
@ -23,7 +23,7 @@ services:
|
||||
volumes:
|
||||
- ./pg/data:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: ['CMD', 'pg_isready', '-U', 'postgres', '-d', 'postgres']
|
||||
test: ['CMD', 'pg_isready', '-U', 'postgres', '-d', 'aiproxy']
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
@ -96,15 +96,15 @@ services:
|
||||
# fastgpt
|
||||
sandbox:
|
||||
container_name: sandbox
|
||||
image: ghcr.io/labring/fastgpt-sandbox:v4.9.10-fix2 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.10-fix2 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt-sandbox:v4.9.7-fix2 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.7-fix2 # 阿里云
|
||||
networks:
|
||||
- fastgpt
|
||||
restart: always
|
||||
fastgpt-mcp-server:
|
||||
container_name: fastgpt-mcp-server
|
||||
image: ghcr.io/labring/fastgpt-mcp_server:v4.9.10-fix2 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-mcp_server:v4.9.10-fix2 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt-mcp_server:v4.9.7-fix2 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-mcp_server:v4.9.7-fix2 # 阿里云
|
||||
ports:
|
||||
- 3005:3000
|
||||
networks:
|
||||
@ -114,8 +114,8 @@ services:
|
||||
- FASTGPT_ENDPOINT=http://fastgpt:3000
|
||||
fastgpt:
|
||||
container_name: fastgpt
|
||||
image: ghcr.io/labring/fastgpt:v4.9.10-fix2 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.10-fix2 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt:v4.9.7-fix2 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.7-fix2 # 阿里云
|
||||
ports:
|
||||
- 3000:3000
|
||||
networks:
|
||||
|
||||
@ -72,15 +72,15 @@ services:
|
||||
|
||||
sandbox:
|
||||
container_name: sandbox
|
||||
image: ghcr.io/labring/fastgpt-sandbox:v4.9.10-fix2 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.10-fix2 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt-sandbox:v4.9.7-fix2 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.7-fix2 # 阿里云
|
||||
networks:
|
||||
- fastgpt
|
||||
restart: always
|
||||
fastgpt-mcp-server:
|
||||
container_name: fastgpt-mcp-server
|
||||
image: ghcr.io/labring/fastgpt-mcp_server:v4.9.10-fix2 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-mcp_server:v4.9.10-fix2 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt-mcp_server:v4.9.7-fix2 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-mcp_server:v4.9.7-fix2 # 阿里云
|
||||
ports:
|
||||
- 3005:3000
|
||||
networks:
|
||||
@ -90,8 +90,8 @@ services:
|
||||
- FASTGPT_ENDPOINT=http://fastgpt:3000
|
||||
fastgpt:
|
||||
container_name: fastgpt
|
||||
image: ghcr.io/labring/fastgpt:v4.9.10-fix2 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.10-fix2 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt:v4.9.7-fix2 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.7-fix2 # 阿里云
|
||||
ports:
|
||||
- 3000:3000
|
||||
networks:
|
||||
|
||||
232
dev.md
232
dev.md
@ -1,118 +1,114 @@
|
||||
## Premise
|
||||
|
||||
Since FastGPT is managed in the same way as monorepo, it is recommended to install ‘make’ first during development.
|
||||
|
||||
monorepo Project Name:
|
||||
|
||||
- app: main project
|
||||
-......
|
||||
|
||||
## Dev
|
||||
|
||||
```sh
|
||||
# Give automatic script code execution permission (on non-Linux systems, you can manually execute the postinstall.sh file content)
|
||||
chmod -R +x ./scripts/
|
||||
# Executing under the code root directory installs all dependencies within the root package, projects, and packages
|
||||
pnpm i
|
||||
|
||||
# Not make cmd
|
||||
cd projects/app
|
||||
pnpm dev
|
||||
|
||||
# Make cmd
|
||||
make dev name=app
|
||||
```
|
||||
|
||||
Note: If the Node version is >= 20, you need to pass the `--no-node-snapshot` parameter to Node when running `pnpm i`
|
||||
|
||||
```sh
|
||||
NODE_OPTIONS=--no-node-snapshot pnpm i
|
||||
```
|
||||
|
||||
### Jest
|
||||
|
||||
https://fael3z0zfze.feishu.cn/docx/ZOI1dABpxoGhS7xzhkXcKPxZnDL
|
||||
|
||||
## I18N
|
||||
|
||||
### Install i18n-ally Plugin
|
||||
|
||||
1. Open the Extensions Marketplace in VSCode, search for and install the `i18n Ally` plugin.
|
||||
|
||||
### Code Optimization Examples
|
||||
|
||||
#### Fetch Specific Namespace Translations in `getServerSideProps`
|
||||
|
||||
```typescript
|
||||
// pages/yourPage.tsx
|
||||
export async function getServerSideProps(context: any) {
|
||||
return {
|
||||
props: {
|
||||
currentTab: context?.query?.currentTab || TabEnum.info,
|
||||
...(await serverSideTranslations(context.locale, ['publish', 'user']))
|
||||
}
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
#### Use useTranslation Hook in Page
|
||||
|
||||
```typescript
|
||||
// pages/yourPage.tsx
|
||||
import { useTranslation } from 'next-i18next';
|
||||
|
||||
const YourComponent = () => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
return (
|
||||
<Button
|
||||
variant="outline"
|
||||
size="sm"
|
||||
mr={2}
|
||||
onClick={() => setShowSelected(false)}
|
||||
>
|
||||
{t('common:close')}
|
||||
</Button>
|
||||
);
|
||||
};
|
||||
|
||||
export default YourComponent;
|
||||
```
|
||||
|
||||
#### Handle Static File Translations
|
||||
|
||||
```typescript
|
||||
// utils/i18n.ts
|
||||
import { i18nT } from '@fastgpt/web/i18n/utils';
|
||||
|
||||
const staticContent = {
|
||||
id: 'simpleChat',
|
||||
avatar: 'core/workflow/template/aiChat',
|
||||
name: i18nT('app:template.simple_robot'),
|
||||
};
|
||||
|
||||
export default staticContent;
|
||||
```
|
||||
|
||||
### Standardize Translation Format
|
||||
|
||||
- Use the t(namespace:key) format to ensure consistent naming.
|
||||
- Translation keys should use lowercase letters and underscores, e.g., common.close.
|
||||
|
||||
## audit
|
||||
|
||||
Please fill the OperationLogEventEnum and operationLog/audit function is added to the ts, and on the corresponding position to fill i18n, at the same time to add the location of the log using addOpearationLog function add function
|
||||
|
||||
## Build
|
||||
|
||||
```sh
|
||||
# Docker cmd: Build image, not proxy
|
||||
docker build -f ./projects/app/Dockerfile -t registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.1 . --build-arg name=app
|
||||
# Make cmd: Build image, not proxy
|
||||
make build name=app image=registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.1
|
||||
|
||||
# Docker cmd: Build image with proxy
|
||||
docker build -f ./projects/app/Dockerfile -t registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.1 . --build-arg name=app --build-arg proxy=taobao
|
||||
# Make cmd: Build image with proxy
|
||||
make build name=app image=registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.1 proxy=taobao
|
||||
```
|
||||
## Premise
|
||||
|
||||
Since FastGPT is managed in the same way as monorepo, it is recommended to install ‘make’ first during development.
|
||||
|
||||
monorepo Project Name:
|
||||
|
||||
- app: main project
|
||||
-......
|
||||
|
||||
## Dev
|
||||
|
||||
```sh
|
||||
# Give automatic script code execution permission (on non-Linux systems, you can manually execute the postinstall.sh file content)
|
||||
chmod -R +x ./scripts/
|
||||
# Executing under the code root directory installs all dependencies within the root package, projects, and packages
|
||||
pnpm i
|
||||
|
||||
# Not make cmd
|
||||
cd projects/app
|
||||
pnpm dev
|
||||
|
||||
# Make cmd
|
||||
make dev name=app
|
||||
```
|
||||
|
||||
Note: If the Node version is >= 20, you need to pass the `--no-node-snapshot` parameter to Node when running `pnpm i`
|
||||
|
||||
```sh
|
||||
NODE_OPTIONS=--no-node-snapshot pnpm i
|
||||
```
|
||||
|
||||
### Jest
|
||||
|
||||
https://fael3z0zfze.feishu.cn/docx/ZOI1dABpxoGhS7xzhkXcKPxZnDL
|
||||
|
||||
## I18N
|
||||
|
||||
### Install i18n-ally Plugin
|
||||
|
||||
1. Open the Extensions Marketplace in VSCode, search for and install the `i18n Ally` plugin.
|
||||
|
||||
### Code Optimization Examples
|
||||
|
||||
#### Fetch Specific Namespace Translations in `getServerSideProps`
|
||||
|
||||
```typescript
|
||||
// pages/yourPage.tsx
|
||||
export async function getServerSideProps(context: any) {
|
||||
return {
|
||||
props: {
|
||||
currentTab: context?.query?.currentTab || TabEnum.info,
|
||||
...(await serverSideTranslations(context.locale, ['publish', 'user']))
|
||||
}
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
#### Use useTranslation Hook in Page
|
||||
|
||||
```typescript
|
||||
// pages/yourPage.tsx
|
||||
import { useTranslation } from 'next-i18next';
|
||||
|
||||
const YourComponent = () => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
return (
|
||||
<Button
|
||||
variant="outline"
|
||||
size="sm"
|
||||
mr={2}
|
||||
onClick={() => setShowSelected(false)}
|
||||
>
|
||||
{t('common:close')}
|
||||
</Button>
|
||||
);
|
||||
};
|
||||
|
||||
export default YourComponent;
|
||||
```
|
||||
|
||||
#### Handle Static File Translations
|
||||
|
||||
```typescript
|
||||
// utils/i18n.ts
|
||||
import { i18nT } from '@fastgpt/web/i18n/utils';
|
||||
|
||||
const staticContent = {
|
||||
id: 'simpleChat',
|
||||
avatar: 'core/workflow/template/aiChat',
|
||||
name: i18nT('app:template.simple_robot'),
|
||||
};
|
||||
|
||||
export default staticContent;
|
||||
```
|
||||
|
||||
### Standardize Translation Format
|
||||
|
||||
- Use the t(namespace:key) format to ensure consistent naming.
|
||||
- Translation keys should use lowercase letters and underscores, e.g., common.close.
|
||||
|
||||
## Build
|
||||
|
||||
```sh
|
||||
# Docker cmd: Build image, not proxy
|
||||
docker build -f ./projects/app/Dockerfile -t registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.1 . --build-arg name=app
|
||||
# Make cmd: Build image, not proxy
|
||||
make build name=app image=registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.1
|
||||
|
||||
# Docker cmd: Build image with proxy
|
||||
docker build -f ./projects/app/Dockerfile -t registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.1 . --build-arg name=app --build-arg proxy=taobao
|
||||
# Make cmd: Build image with proxy
|
||||
make build name=app image=registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.1 proxy=taobao
|
||||
```
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 386 KiB |
@ -959,16 +959,10 @@ curl --location --request POST 'http://localhost:3000/api/core/chat/getHistories
|
||||
{{< markdownify >}}
|
||||
|
||||
{{% alert icon=" " context="success" %}}
|
||||
目前仅能获取到当前 API key 的创建者的对话。
|
||||
|
||||
- appId - 应用 Id
|
||||
- offset - 偏移量,即从第几条数据开始取
|
||||
- pageSize - 记录数量
|
||||
- source - 对话源。source=api,表示获取通过 API 创建的对话(不会获取到页面上的对话记录)
|
||||
- startCreateTime - 开始创建时间(可选)
|
||||
- endCreateTime - 结束创建时间(可选)
|
||||
- startUpdateTime - 开始更新时间(可选)
|
||||
- endUpdateTime - 结束更新时间(可选)
|
||||
{{% /alert %}}
|
||||
|
||||
{{< /markdownify >}}
|
||||
|
||||
@ -645,7 +645,7 @@ data 为集合的 ID。
|
||||
{{< /tab >}}
|
||||
{{< /tabs >}}
|
||||
|
||||
### 创建一个外部文件库集合(弃用)
|
||||
### 创建一个外部文件库集合(商业版)
|
||||
|
||||
{{< tabs tabTotal="3" >}}
|
||||
{{< tab tabName="请求示例" >}}
|
||||
|
||||
@ -1,50 +0,0 @@
|
||||
---
|
||||
title: 'V4.9.10'
|
||||
description: 'FastGPT V4.9.10 更新说明'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
toc: true
|
||||
weight: 790
|
||||
---
|
||||
|
||||
## 升级指南
|
||||
|
||||
重要提示:本次更新会重新构建全文索引,构建期间,全文检索结果会为空,4c16g 700 万组全文索引大致消耗 25 分钟。如需无缝升级,需自行做表同步工程。
|
||||
|
||||
### 1. 做好数据备份
|
||||
|
||||
### 2. 更新镜像 tag
|
||||
|
||||
- 更新 FastGPT 镜像 tag: v4.9.10-fix2
|
||||
- 更新 FastGPT 商业版镜像 tag: v4.9.10-fix2
|
||||
- mcp_server 无需更新
|
||||
- Sandbox 无需更新
|
||||
- AIProxy 无需更新
|
||||
|
||||
## 🚀 新增内容
|
||||
|
||||
1. 支持 PG 设置`systemEnv.hnswMaxScanTuples`参数,提高迭代搜索的数据总量。
|
||||
2. 知识库预处理参数增加 “分块条件”,可控制某些情况下不进行分块处理。
|
||||
3. 知识库预处理参数增加 “段落优先” 模式,可控制最大段落深度。原“长度优先”模式,不再内嵌段落优先逻辑。
|
||||
4. 工作流调整为单向接入和接出,支持快速的添加下一步节点。
|
||||
5. 开放飞书和语雀知识库到开源版。
|
||||
6. gemini 和 claude 最新模型预设。
|
||||
|
||||
## ⚙️ 优化
|
||||
|
||||
1. LLM stream调用,默认超时调大。
|
||||
2. 部分确认交互优化。
|
||||
3. 纠正原先知识库的“表格数据集”名称,改成“备份导入”。同时支持知识库索引的导出和导入。
|
||||
4. 工作流知识库引用上限,如果工作流中没有相关 AI 节点,则交互模式改成纯手动输入,并且上限为 1000万。
|
||||
5. 语音输入,移动端判断逻辑,准确判断是否为手机,而不是小屏。
|
||||
6. 优化上下文截取算法,至少保证留下一组 Human 信息。
|
||||
|
||||
## 🐛 修复
|
||||
|
||||
1. 全文检索多知识库时排序得分排序不正确。
|
||||
2. 流响应捕获 finish_reason 可能不正确。
|
||||
3. 工具调用模式,未保存思考输出。
|
||||
4. 知识库 indexSize 参数未生效。
|
||||
5. 工作流嵌套 2 层后,获取预览引用、上下文不正确。
|
||||
6. xlsx 转成 Markdown 时候,前面会多出一个空格。
|
||||
7. 读取 Markdown 文件时,Base64 图片未进行额外抓换保存。
|
||||
@ -1,25 +0,0 @@
|
||||
---
|
||||
title: 'V4.9.11(进行中)'
|
||||
description: 'FastGPT V4.9.11 更新说明'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
toc: true
|
||||
weight: 789
|
||||
---
|
||||
|
||||
|
||||
## 🚀 新增内容
|
||||
|
||||
1. 工作流中增加节点搜索功能。
|
||||
2. 工作流中,子流程版本控制,可选择“保持最新版本”,无需手动更新。
|
||||
|
||||
## ⚙️ 优化
|
||||
|
||||
1. 原文缓存改用 gridfs 存储,提高上限。
|
||||
|
||||
## 🐛 修复
|
||||
|
||||
1. 工作流中,管理员声明的全局系统工具,无法进行版本管理。
|
||||
2. 工具调用节点前,有交互节点时,上下文异常。
|
||||
3. 修复备份导入,小于 1000 字时,无法分块问题。
|
||||
4. 自定义 PDF 解析,无法保存 base64 图片。
|
||||
@ -1,5 +1,5 @@
|
||||
---
|
||||
title: 'V4.9.8'
|
||||
title: 'V4.9.8(进行中)'
|
||||
description: 'FastGPT V4.9.8 更新说明'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
@ -7,17 +7,6 @@ toc: true
|
||||
weight: 792
|
||||
---
|
||||
|
||||
## 升级指南
|
||||
|
||||
### 1. 做好数据备份
|
||||
|
||||
### 2. 更新镜像 tag
|
||||
|
||||
- 更新 FastGPT 镜像 tag: v4.9.8
|
||||
- 更新 FastGPT 商业版镜像 tag: v4.9.8
|
||||
- mcp_server 无需更新
|
||||
- Sandbox 无需更新
|
||||
- AIProxy 无需更新
|
||||
|
||||
## 🚀 新增内容
|
||||
|
||||
|
||||
@ -1,43 +0,0 @@
|
||||
---
|
||||
title: 'V4.9.9'
|
||||
description: 'FastGPT V4.9.9 更新说明'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
toc: true
|
||||
weight: 791
|
||||
---
|
||||
|
||||
## 升级指南
|
||||
|
||||
### 1. 做好数据备份
|
||||
|
||||
### 2. 商业版用户替换新 License
|
||||
|
||||
商业版用户可以联系 FastGPT 团队支持同学,获取 License 替换方案。替换后,可以直接升级系统,管理后台会提示输入新 License。
|
||||
|
||||
### 3. 更新镜像 tag
|
||||
|
||||
- 更新 FastGPT 镜像 tag: v4.9.9
|
||||
- 更新 FastGPT 商业版镜像 tag: v4.9.9
|
||||
- mcp_server 无需更新
|
||||
- Sandbox 无需更新
|
||||
- AIProxy 无需更新
|
||||
|
||||
## 🚀 新增内容
|
||||
|
||||
1. 切换 SessionId 来替代 JWT 实现登录鉴权,可控制最大登录客户端数量。
|
||||
2. 新的商业版 License 管理模式。
|
||||
3. 公众号调用,显示记录 chat 对话错误,方便排查。
|
||||
4. API 知识库支持 BasePath 选择,需增加 API 接口,具体可见[API 知识库介绍](/docs/guide/knowledge_base/api_dataset/#4-获取文件详细信息用于获取文件信息)
|
||||
|
||||
## ⚙️ 优化
|
||||
|
||||
1. 优化工具调用,新工具的判断逻辑。
|
||||
2. 调整 Cite 引用提示词。
|
||||
|
||||
## 🐛 修复
|
||||
|
||||
1. 无法正常获取应用历史保存/发布记录。
|
||||
2. 成员创建 MCP 工具权限问题。
|
||||
3. 来源引用展示,存在 ID 传递错误,导致提示无权操作该文件。
|
||||
4. 回答标注前端数据报错。
|
||||
@ -43,7 +43,7 @@ type ResponseType = {
|
||||
// 文件列表中,单项的文件类型
|
||||
type FileListItem = {
|
||||
id: string;
|
||||
parentId: string //也可能为 null 或者 undefined 类型;
|
||||
parentId: string | null;
|
||||
name: string;
|
||||
type: 'file' | 'folder';
|
||||
updateTime: Date;
|
||||
@ -59,7 +59,7 @@ type FileListItem = {
|
||||
{{< markdownify >}}
|
||||
|
||||
{{% alert icon=" " context="success" %}}
|
||||
- parentId - 父级 id,可选,或者 null | undefined。
|
||||
- parentId - 父级 id,可选,或者 null。
|
||||
- searchKey - 检索词,可选
|
||||
{{% /alert %}}
|
||||
|
||||
@ -68,7 +68,7 @@ curl --location --request POST '{{baseURL}}/v1/file/list' \
|
||||
--header 'Authorization: Bearer {{authorization}}' \
|
||||
--header 'Content-Type: application/json' \
|
||||
--data-raw '{
|
||||
"parentId": "",
|
||||
"parentId": null,
|
||||
"searchKey": ""
|
||||
}'
|
||||
```
|
||||
@ -185,40 +185,3 @@ curl --location --request GET '{{baseURL}}/v1/file/read?id=xx' \
|
||||
{{< /tabs >}}
|
||||
|
||||
|
||||
### 4. 获取文件详细信息(用于获取文件信息)
|
||||
|
||||
{{< tabs tabTotal="2" >}}
|
||||
{{< tab tabName="请求示例" >}}
|
||||
{{< markdownify >}}
|
||||
|
||||
id 为文件的 id。
|
||||
|
||||
```bash
|
||||
curl --location --request GET '{{baseURL}}/v1/file/detail?id=xx' \
|
||||
--header 'Authorization: Bearer {{authorization}}'
|
||||
```
|
||||
|
||||
{{< /markdownify >}}
|
||||
{{< /tab >}}
|
||||
|
||||
{{< tab tabName="响应示例" >}}
|
||||
{{< markdownify >}}
|
||||
|
||||
```json
|
||||
{
|
||||
"code": 200,
|
||||
"success": true,
|
||||
"message": "",
|
||||
"data": {
|
||||
"id": "docs",
|
||||
"parentId": "",
|
||||
"name": "docs"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
{{< /markdownify >}}
|
||||
{{< /tab >}}
|
||||
{{< /tabs >}}
|
||||
|
||||
|
||||
|
||||
@ -28,6 +28,7 @@ FastGPT 商业版是基于 FastGPT 开源版的增强版本,增加了一些独
|
||||
| 应用发布安全配置 | ❌ | ✅ | ✅ |
|
||||
| 内容审核 | ❌ | ✅ | ✅ |
|
||||
| web站点同步 | ❌ | ✅ | ✅ |
|
||||
| 主流文档库接入(目前支持:语雀、飞书) | ❌ | ✅ | ✅ |
|
||||
| 增强训练模式 | ❌ | ✅ | ✅ |
|
||||
| 第三方应用快速接入(飞书、公众号) | ❌ | ✅ | ✅ |
|
||||
| 管理后台 | ❌ | ✅ | 不需要 |
|
||||
|
||||
@ -132,9 +132,7 @@ weight: 506
|
||||
### 公众号没响应
|
||||
|
||||
检查应用对话日志,如果有对话日志,但是微信公众号无响应,则是白名单 IP未成功。
|
||||
添加白名单IP 后,通常需要等待几分钟微信更新。可以在对话日志中,找点错误日志。
|
||||
|
||||

|
||||
添加白名单IP 后,通常需要等待几分钟微信更新。
|
||||
|
||||
### 如何新开一个聊天记录
|
||||
|
||||
|
||||
2
env.d.ts
vendored
2
env.d.ts
vendored
@ -4,6 +4,7 @@ declare global {
|
||||
LOG_DEPTH: string;
|
||||
DEFAULT_ROOT_PSW: string;
|
||||
DB_MAX_LINK: string;
|
||||
TOKEN_KEY: string;
|
||||
FILE_TOKEN_KEY: string;
|
||||
ROOT_KEY: string;
|
||||
OPENAI_BASE_URL: string;
|
||||
@ -36,7 +37,6 @@ declare global {
|
||||
CONFIG_JSON_PATH?: string;
|
||||
PASSWORD_LOGIN_LOCK_SECONDS?: string;
|
||||
PASSWORD_EXPIRED_MONTH?: string;
|
||||
MAX_LOGIN_SESSION?: string;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -27,7 +27,7 @@ const datasetErr = [
|
||||
},
|
||||
{
|
||||
statusText: DatasetErrEnum.unExist,
|
||||
message: i18nT('common:core.dataset.error.unExistDataset')
|
||||
message: 'core.dataset.error.unExistDataset'
|
||||
},
|
||||
{
|
||||
statusText: DatasetErrEnum.unExistCollection,
|
||||
|
||||
@ -2,28 +2,13 @@ import { type ErrType } from '../errorCode';
|
||||
import { i18nT } from '../../../../web/i18n/utils';
|
||||
/* dataset: 509000 */
|
||||
export enum SystemErrEnum {
|
||||
communityVersionNumLimit = 'communityVersionNumLimit',
|
||||
licenseAppAmountLimit = 'licenseAppAmountLimit',
|
||||
licenseDatasetAmountLimit = 'licenseDatasetAmountLimit',
|
||||
licenseUserAmountLimit = 'licenseUserAmountLimit'
|
||||
communityVersionNumLimit = 'communityVersionNumLimit'
|
||||
}
|
||||
|
||||
const systemErr = [
|
||||
{
|
||||
statusText: SystemErrEnum.communityVersionNumLimit,
|
||||
message: i18nT('common:code_error.system_error.community_version_num_limit')
|
||||
},
|
||||
{
|
||||
statusText: SystemErrEnum.licenseAppAmountLimit,
|
||||
message: i18nT('common:code_error.system_error.license_app_amount_limit')
|
||||
},
|
||||
{
|
||||
statusText: SystemErrEnum.licenseDatasetAmountLimit,
|
||||
message: i18nT('common:code_error.system_error.license_dataset_amount_limit')
|
||||
},
|
||||
{
|
||||
statusText: SystemErrEnum.licenseUserAmountLimit,
|
||||
message: i18nT('common:code_error.system_error.license_user_amount_limit')
|
||||
}
|
||||
];
|
||||
|
||||
|
||||
@ -5,7 +5,7 @@ export const checkPasswordRule = (password: string) => {
|
||||
/[A-Z]/, // Contains uppercase letters
|
||||
/[!@#$%^&*()_+=-]/ // Contains special characters
|
||||
];
|
||||
const validChars = /^[\dA-Za-z!@#$%^&*()_+=-]{8,100}$/;
|
||||
const validChars = /^[\dA-Za-z!@#$%^&*()_+=-]{6,100}$/;
|
||||
|
||||
// Check length and valid characters
|
||||
if (!validChars.test(password)) return false;
|
||||
|
||||
@ -7,10 +7,6 @@ export const CUSTOM_SPLIT_SIGN = '-----CUSTOM_SPLIT_SIGN-----';
|
||||
type SplitProps = {
|
||||
text: string;
|
||||
chunkSize: number;
|
||||
|
||||
paragraphChunkDeep?: number; // Paragraph deep
|
||||
paragraphChunkMinSize?: number; // Paragraph min size, if too small, it will merge
|
||||
|
||||
maxSize?: number;
|
||||
overlapRatio?: number;
|
||||
customReg?: string[];
|
||||
@ -112,8 +108,6 @@ const commonSplit = (props: SplitProps): SplitResponse => {
|
||||
let {
|
||||
text = '',
|
||||
chunkSize,
|
||||
paragraphChunkDeep = 5,
|
||||
paragraphChunkMinSize = 100,
|
||||
maxSize = defaultMaxChunkSize,
|
||||
overlapRatio = 0.15,
|
||||
customReg = []
|
||||
@ -129,7 +123,7 @@ const commonSplit = (props: SplitProps): SplitResponse => {
|
||||
text = text.replace(/(```[\s\S]*?```|~~~[\s\S]*?~~~)/g, function (match) {
|
||||
return match.replace(/\n/g, codeBlockMarker);
|
||||
});
|
||||
// 2. Markdown 表格处理 - 单独提取表格出来,进行表头合并
|
||||
// 2. 表格处理 - 单独提取表格出来,进行表头合并
|
||||
const tableReg =
|
||||
/(\n\|(?:(?:[^\n|]+\|){1,})\n\|(?:[:\-\s]+\|){1,}\n(?:\|(?:[^\n|]+\|)*\n?)*)(?:\n|$)/g;
|
||||
const tableDataList = text.match(tableReg);
|
||||
@ -149,40 +143,25 @@ const commonSplit = (props: SplitProps): SplitResponse => {
|
||||
text = text.replace(/(\r?\n|\r){3,}/g, '\n\n\n');
|
||||
|
||||
// The larger maxLen is, the next sentence is less likely to trigger splitting
|
||||
const customRegLen = customReg.length;
|
||||
const markdownIndex = paragraphChunkDeep - 1;
|
||||
const forbidOverlapIndex = customRegLen + markdownIndex + 4;
|
||||
|
||||
const markdownHeaderRules = ((deep?: number): { reg: RegExp; maxLen: number }[] => {
|
||||
if (!deep || deep === 0) return [];
|
||||
|
||||
const maxDeep = Math.min(deep, 8); // Maximum 8 levels
|
||||
const rules: { reg: RegExp; maxLen: number }[] = [];
|
||||
|
||||
for (let i = 1; i <= maxDeep; i++) {
|
||||
const hashSymbols = '#'.repeat(i);
|
||||
rules.push({
|
||||
reg: new RegExp(`^(${hashSymbols}\\s[^\\n]+\\n)`, 'gm'),
|
||||
maxLen: chunkSize
|
||||
});
|
||||
}
|
||||
|
||||
return rules;
|
||||
})(paragraphChunkDeep);
|
||||
const markdownIndex = 4;
|
||||
const forbidOverlapIndex = 8;
|
||||
|
||||
const stepReges: { reg: RegExp | string; maxLen: number }[] = [
|
||||
...customReg.map((text) => ({
|
||||
reg: text.replaceAll('\\n', '\n'),
|
||||
maxLen: chunkSize
|
||||
})),
|
||||
...markdownHeaderRules,
|
||||
{ reg: /^(#\s[^\n]+\n)/gm, maxLen: chunkSize },
|
||||
{ reg: /^(##\s[^\n]+\n)/gm, maxLen: chunkSize },
|
||||
{ reg: /^(###\s[^\n]+\n)/gm, maxLen: chunkSize },
|
||||
{ reg: /^(####\s[^\n]+\n)/gm, maxLen: chunkSize },
|
||||
{ reg: /^(#####\s[^\n]+\n)/gm, maxLen: chunkSize },
|
||||
|
||||
{ reg: /([\n](```[\s\S]*?```|~~~[\s\S]*?~~~))/g, maxLen: maxSize }, // code block
|
||||
// HTML Table tag 尽可能保障完整
|
||||
{
|
||||
reg: /(\n\|(?:(?:[^\n|]+\|){1,})\n\|(?:[:\-\s]+\|){1,}\n(?:\|(?:[^\n|]+\|)*\n)*)/g,
|
||||
maxLen: chunkSize
|
||||
}, // Markdown Table 尽可能保证完整性
|
||||
maxLen: Math.min(chunkSize * 1.5, maxSize)
|
||||
}, // Table 尽可能保证完整性
|
||||
{ reg: /(\n{2,})/g, maxLen: chunkSize },
|
||||
{ reg: /([\n])/g, maxLen: chunkSize },
|
||||
// ------ There's no overlap on the top
|
||||
@ -193,10 +172,12 @@ const commonSplit = (props: SplitProps): SplitResponse => {
|
||||
{ reg: /([,]|,\s)/g, maxLen: chunkSize }
|
||||
];
|
||||
|
||||
const customRegLen = customReg.length;
|
||||
const checkIsCustomStep = (step: number) => step < customRegLen;
|
||||
const checkIsMarkdownSplit = (step: number) =>
|
||||
step >= customRegLen && step <= markdownIndex + customRegLen;
|
||||
const checkForbidOverlap = (step: number) => step <= forbidOverlapIndex;
|
||||
|
||||
const checkForbidOverlap = (step: number) => step <= forbidOverlapIndex + customRegLen;
|
||||
|
||||
// if use markdown title split, Separate record title
|
||||
const getSplitTexts = ({ text, step }: { text: string; step: number }) => {
|
||||
@ -320,7 +301,6 @@ const commonSplit = (props: SplitProps): SplitResponse => {
|
||||
const splitTexts = getSplitTexts({ text, step });
|
||||
|
||||
const chunks: string[] = [];
|
||||
|
||||
for (let i = 0; i < splitTexts.length; i++) {
|
||||
const item = splitTexts[i];
|
||||
|
||||
@ -463,6 +443,7 @@ const commonSplit = (props: SplitProps): SplitResponse => {
|
||||
*/
|
||||
export const splitText2Chunks = (props: SplitProps): SplitResponse => {
|
||||
let { text = '' } = props;
|
||||
const start = Date.now();
|
||||
const splitWithCustomSign = text.split(CUSTOM_SPLIT_SIGN);
|
||||
|
||||
const splitResult = splitWithCustomSign.map((item) => {
|
||||
|
||||
25
packages/global/common/system/types/index.d.ts
vendored
25
packages/global/common/system/types/index.d.ts
vendored
@ -70,9 +70,6 @@ export type FastGPTFeConfigsType = {
|
||||
show_publish_dingtalk?: boolean;
|
||||
show_publish_offiaccount?: boolean;
|
||||
|
||||
show_dataset_enhance?: boolean;
|
||||
show_batch_eval?: boolean;
|
||||
|
||||
concatMd?: string;
|
||||
docUrl?: string;
|
||||
openAPIDocUrl?: string;
|
||||
@ -130,10 +127,8 @@ export type SystemEnvType = {
|
||||
vectorMaxProcess: number;
|
||||
qaMaxProcess: number;
|
||||
vlmMaxProcess: number;
|
||||
tokenWorkers: number; // token count max worker
|
||||
|
||||
hnswEfSearch: number;
|
||||
hnswMaxScanTuples: number;
|
||||
tokenWorkers: number; // token count max worker
|
||||
|
||||
oneapiUrl?: string;
|
||||
chatApiKey?: string;
|
||||
@ -147,21 +142,3 @@ export type customPdfParseType = {
|
||||
doc2xKey?: string;
|
||||
price?: number;
|
||||
};
|
||||
|
||||
export type LicenseDataType = {
|
||||
startTime: string;
|
||||
expiredTime: string;
|
||||
company: string;
|
||||
description?: string; // 描述
|
||||
hosts?: string[]; // 管理端有效域名
|
||||
maxUsers?: number; // 最大用户数,不填默认不上限
|
||||
maxApps?: number; // 最大应用数,不填默认不上限
|
||||
maxDatasets?: number; // 最大数据集数,不填默认不上限
|
||||
functions: {
|
||||
sso: boolean;
|
||||
pay: boolean;
|
||||
customTemplates: boolean;
|
||||
datasetEnhance: boolean;
|
||||
batchEval: boolean;
|
||||
};
|
||||
};
|
||||
|
||||
@ -2,248 +2,6 @@ import { type PromptTemplateItem } from '../type.d';
|
||||
import { i18nT } from '../../../../web/i18n/utils';
|
||||
import { getPromptByVersion } from './utils';
|
||||
|
||||
export const Prompt_userQuotePromptList: PromptTemplateItem[] = [
|
||||
{
|
||||
title: i18nT('app:template.standard_template'),
|
||||
desc: '',
|
||||
value: {
|
||||
['4.9.7']: `## 任务描述
|
||||
你是一个知识库回答助手,可以使用 <Cites></Cites> 中的内容作为你本次回答的参考。
|
||||
同时,为了使回答结果更加可信并且可追溯,你需要在每段话结尾添加引用标记,标识参考了哪些内容。
|
||||
|
||||
## 追溯展示规则
|
||||
|
||||
- 使用 [id](CITE) 的格式来引用 <Cites></Cites> 中的知识,其中 CITE 是固定常量, id 为引文中的 id。
|
||||
- 在 **每段话结尾** 自然地整合引用。例如: "Nginx是一款轻量级的Web服务器、反向代理服务器[67e517e74767063e882d6861](CITE)。"。
|
||||
- 每段话**至少包含一个引用**,多个引用时按顺序排列,例如:"Nginx是一款轻量级的Web服务器、反向代理服务器[67e517e74767063e882d6861](CITE)[67e517e74767063e882d6862](CITE)。\n 它的特点是非常轻量[67e517e74767063e882d6863](CITE)。"
|
||||
- 不要把示例作为知识点。
|
||||
- 不要伪造 id,返回的 id 必须都存在 <Cites></Cites> 中!
|
||||
|
||||
## 通用规则
|
||||
|
||||
- 如果你不清楚答案,你需要澄清。
|
||||
- 避免提及你是从 <Cites></Cites> 获取的知识。
|
||||
- 保持答案与 <Cites></Cites> 中描述的一致。
|
||||
- 使用 Markdown 语法优化回答格式。尤其是图片、表格、序列号等内容,需严格完整输出。
|
||||
- 使用与问题相同的语言回答。
|
||||
|
||||
<Cites>
|
||||
{{quote}}
|
||||
</Cites>
|
||||
|
||||
## 用户问题
|
||||
|
||||
{{question}}
|
||||
|
||||
## 回答
|
||||
`
|
||||
}
|
||||
},
|
||||
{
|
||||
title: i18nT('app:template.qa_template'),
|
||||
desc: '',
|
||||
value: {
|
||||
['4.9.7']: `## 任务描述
|
||||
作为一个问答助手,你会使用 <QA></QA> 标记中的提供的数据对进行内容回答。
|
||||
|
||||
## 回答要求
|
||||
- 选择其中一个或多个问答对进行回答。
|
||||
- 回答的内容应尽可能与 <Answer></Answer> 中的内容一致。
|
||||
- 如果没有相关的问答对,你需要澄清。
|
||||
- 避免提及你是从 <QA></QA> 获取的知识,只需要回复答案。
|
||||
- 使用与问题相同的语言回答。
|
||||
|
||||
<QA>
|
||||
{{quote}}
|
||||
</QA>
|
||||
|
||||
## 用户问题
|
||||
|
||||
{{question}}
|
||||
|
||||
## 回答
|
||||
`
|
||||
}
|
||||
},
|
||||
{
|
||||
title: i18nT('app:template.standard_strict'),
|
||||
desc: '',
|
||||
value: {
|
||||
['4.9.7']: `## 任务描述
|
||||
你是一个知识库回答助手,可以使用 <Cites></Cites> 中的内容作为你本次回答的参考。
|
||||
同时,为了使回答结果更加可信并且可追溯,你需要在每段话结尾添加引用标记,标识参考了哪些内容。
|
||||
|
||||
## 追溯展示规则
|
||||
|
||||
- 使用 [id](CITE) 的格式来引用 <Cites></Cites> 中的知识,其中 CITE 是固定常量, id 为引文中的 id。
|
||||
- 在 **每段话结尾** 自然地整合引用。例如: "Nginx是一款轻量级的Web服务器、反向代理服务器[67e517e74767063e882d6861](CITE)。"。
|
||||
- 每段话**至少包含一个引用**,多个引用时按顺序排列,例如:"Nginx是一款轻量级的Web服务器、反向代理服务器[67e517e74767063e882d6861](CITE)[67e517e74767063e882d6862](CITE)。\n 它的特点是非常轻量[67e517e74767063e882d6863](CITE)。"
|
||||
- 不要把示例作为知识点。
|
||||
- 不要伪造 id,返回的 id 必须都存在 <Cites></Cites> 中!
|
||||
|
||||
## 通用规则
|
||||
|
||||
- 如果你不清楚答案,你需要澄清。
|
||||
- 避免提及你是从 <Cites></Cites> 获取的知识。
|
||||
- 保持答案与 <Cites></Cites> 中描述的一致。
|
||||
- 使用 Markdown 语法优化回答格式。尤其是图片、表格、序列号等内容,需严格完整输出。
|
||||
- 使用与问题相同的语言回答。
|
||||
|
||||
## 严格要求
|
||||
|
||||
你只能使用 <Cites></Cites> 标记中的内容作为参考,不能使用自身的知识,并且回答的内容需严格与 <Cites></Cites> 中的内容一致。
|
||||
|
||||
<Cites>
|
||||
{{quote}}
|
||||
</Cites>
|
||||
|
||||
## 用户问题
|
||||
|
||||
{{question}}
|
||||
|
||||
## 回答
|
||||
`
|
||||
}
|
||||
},
|
||||
{
|
||||
title: i18nT('app:template.hard_strict'),
|
||||
desc: '',
|
||||
value: {
|
||||
['4.9.7']: `## 任务描述
|
||||
作为一个问答助手,你会使用 <QA></QA> 标记中的提供的数据对进行内容回答。
|
||||
|
||||
## 回答要求
|
||||
- 选择其中一个或多个问答对进行回答。
|
||||
- 回答的内容应尽可能与 <Answer></Answer> 中的内容一致。
|
||||
- 如果没有相关的问答对,你需要澄清。
|
||||
- 避免提及你是从 <QA></QA> 获取的知识,只需要回复答案。
|
||||
- 使用与问题相同的语言回答。
|
||||
|
||||
## 严格要求
|
||||
|
||||
你只能使用 <QA></QA> 标记中的内容作为参考,不能使用自身的知识,并且回答的内容需严格与 <QA></QA> 中的内容一致。
|
||||
|
||||
<QA>
|
||||
{{quote}}
|
||||
</QA>
|
||||
|
||||
## 用户问题
|
||||
|
||||
{{question}}
|
||||
|
||||
## 回答
|
||||
`
|
||||
}
|
||||
}
|
||||
];
|
||||
|
||||
export const Prompt_systemQuotePromptList: PromptTemplateItem[] = [
|
||||
{
|
||||
title: i18nT('app:template.standard_template'),
|
||||
desc: '',
|
||||
value: {
|
||||
['4.9.7']: `## 任务描述
|
||||
你是一个知识库回答助手,可以使用 <Cites></Cites> 中的内容作为你本次回答的参考。
|
||||
同时,为了使回答结果更加可信并且可追溯,你需要在每段话结尾添加引用标记,标识参考了哪些内容。
|
||||
|
||||
## 追溯展示规则
|
||||
|
||||
- 使用 [id](CITE) 的格式来引用 <Cites></Cites> 中的知识,其中 CITE 是固定常量, id 为引文中的 id。
|
||||
- 在 **每段话结尾** 自然地整合引用。例如: "Nginx是一款轻量级的Web服务器、反向代理服务器[67e517e74767063e882d6861](CITE)。"。
|
||||
- 每段话**至少包含一个引用**,多个引用时按顺序排列,例如:"Nginx是一款轻量级的Web服务器、反向代理服务器[67e517e74767063e882d6861](CITE)[67e517e74767063e882d6862](CITE)。\n 它的特点是非常轻量[67e517e74767063e882d6863](CITE)。"
|
||||
- 不要把示例作为知识点。
|
||||
- 不要伪造 id,返回的 id 必须都存在 <Cites></Cites> 中!
|
||||
|
||||
## 通用规则
|
||||
|
||||
- 如果你不清楚答案,你需要澄清。
|
||||
- 避免提及你是从 <Cites></Cites> 获取的知识。
|
||||
- 保持答案与 <Cites></Cites> 中描述的一致。
|
||||
- 使用 Markdown 语法优化回答格式。尤其是图片、表格、序列号等内容,需严格完整输出。
|
||||
- 使用与问题相同的语言回答。
|
||||
|
||||
<Cites>
|
||||
{{quote}}
|
||||
</Cites>`
|
||||
}
|
||||
},
|
||||
{
|
||||
title: i18nT('app:template.qa_template'),
|
||||
desc: '',
|
||||
value: {
|
||||
['4.9.8']: `## 任务描述
|
||||
作为一个问答助手,你会使用 <QA></QA> 标记中的提供的数据对进行内容回答。
|
||||
|
||||
## 回答要求
|
||||
- 选择其中一个或多个问答对进行回答。
|
||||
- 回答的内容应尽可能与 <Answer></Answer> 中的内容一致。
|
||||
- 如果没有相关的问答对,你需要澄清。
|
||||
- 避免提及你是从 <QA></QA> 获取的知识,只需要回复答案。
|
||||
- 使用与问题相同的语言回答。
|
||||
|
||||
<QA>
|
||||
{{quote}}
|
||||
</QA>`
|
||||
}
|
||||
},
|
||||
{
|
||||
title: i18nT('app:template.standard_strict'),
|
||||
desc: '',
|
||||
value: {
|
||||
['4.9.7']: `## 任务描述
|
||||
你是一个知识库回答助手,可以使用 <Cites></Cites> 中的内容作为你本次回答的参考。
|
||||
同时,为了使回答结果更加可信并且可追溯,你需要在每段话结尾添加引用标记,标识参考了哪些内容。
|
||||
|
||||
## 追溯展示规则
|
||||
|
||||
- 使用 [id](CITE) 的格式来引用 <Cites></Cites> 中的知识,其中 CITE 是固定常量, id 为引文中的 id。
|
||||
- 在 **每段话结尾** 自然地整合引用。例如: "Nginx是一款轻量级的Web服务器、反向代理服务器[67e517e74767063e882d6861](CITE)。"。
|
||||
- 每段话**至少包含一个引用**,多个引用时按顺序排列,例如:"Nginx是一款轻量级的Web服务器、反向代理服务器[67e517e74767063e882d6861](CITE)[67e517e74767063e882d6862](CITE)。\n 它的特点是非常轻量[67e517e74767063e882d6863](CITE)。"
|
||||
- 不要把示例作为知识点。
|
||||
- 不要伪造 id,返回的 id 必须都存在 <Cites></Cites> 中!
|
||||
|
||||
## 通用规则
|
||||
|
||||
- 如果你不清楚答案,你需要澄清。
|
||||
- 避免提及你是从 <Cites></Cites> 获取的知识。
|
||||
- 保持答案与 <Cites></Cites> 中描述的一致。
|
||||
- 使用 Markdown 语法优化回答格式。尤其是图片、表格、序列号等内容,需严格完整输出。
|
||||
- 使用与问题相同的语言回答。
|
||||
|
||||
## 严格要求
|
||||
|
||||
你只能使用 <Cites></Cites> 标记中的内容作为参考,不能使用自身的知识,并且回答的内容需严格与 <Cites></Cites> 中的内容一致。
|
||||
|
||||
<Cites>
|
||||
{{quote}}
|
||||
</Cites>`
|
||||
}
|
||||
},
|
||||
{
|
||||
title: i18nT('app:template.hard_strict'),
|
||||
desc: '',
|
||||
value: {
|
||||
['4.9.7']: `## 任务描述
|
||||
作为一个问答助手,你会使用 <QA></QA> 标记中的提供的数据对进行内容回答。
|
||||
|
||||
## 回答要求
|
||||
- 选择其中一个或多个问答对进行回答。
|
||||
- 回答的内容应尽可能与 <Answer></Answer> 中的内容一致。
|
||||
- 如果没有相关的问答对,你需要澄清。
|
||||
- 避免提及你是从 <QA></QA> 获取的知识,只需要回复答案。
|
||||
- 使用与问题相同的语言回答。
|
||||
|
||||
## 严格要求
|
||||
|
||||
你只能使用 <QA></QA> 标记中的内容作为参考,不能使用自身的知识,并且回答的内容需严格与 <QA></QA> 中的内容一致。
|
||||
|
||||
<QA>
|
||||
{{quote}}
|
||||
</QA>`
|
||||
}
|
||||
}
|
||||
];
|
||||
|
||||
export const Prompt_QuoteTemplateList: PromptTemplateItem[] = [
|
||||
{
|
||||
title: i18nT('app:template.standard_template'),
|
||||
@ -252,6 +10,11 @@ export const Prompt_QuoteTemplateList: PromptTemplateItem[] = [
|
||||
['4.9.7']: `{
|
||||
"id": "{{id}}",
|
||||
"sourceName": "{{source}}",
|
||||
"content": "{{q}}\n{{a}}"
|
||||
}
|
||||
`,
|
||||
['4.9.2']: `{
|
||||
"sourceName": "{{source}}",
|
||||
"updateTime": "{{updateTime}}",
|
||||
"content": "{{q}}\n{{a}}"
|
||||
}
|
||||
@ -262,7 +25,7 @@ export const Prompt_QuoteTemplateList: PromptTemplateItem[] = [
|
||||
title: i18nT('app:template.qa_template'),
|
||||
desc: i18nT('app:template.qa_template_des'),
|
||||
value: {
|
||||
['4.9.7']: `<Question>
|
||||
['4.9.2']: `<Question>
|
||||
{{q}}
|
||||
</Question>
|
||||
<Answer>
|
||||
@ -277,6 +40,11 @@ export const Prompt_QuoteTemplateList: PromptTemplateItem[] = [
|
||||
['4.9.7']: `{
|
||||
"id": "{{id}}",
|
||||
"sourceName": "{{source}}",
|
||||
"content": "{{q}}\n{{a}}"
|
||||
}
|
||||
`,
|
||||
['4.9.2']: `{
|
||||
"sourceName": "{{source}}",
|
||||
"updateTime": "{{updateTime}}",
|
||||
"content": "{{q}}\n{{a}}"
|
||||
}
|
||||
@ -287,7 +55,7 @@ export const Prompt_QuoteTemplateList: PromptTemplateItem[] = [
|
||||
title: i18nT('app:template.hard_strict'),
|
||||
desc: i18nT('app:template.hard_strict_des'),
|
||||
value: {
|
||||
['4.9.7']: `<Question>
|
||||
['4.9.2']: `<Question>
|
||||
{{q}}
|
||||
</Question>
|
||||
<Answer>
|
||||
@ -296,12 +64,263 @@ export const Prompt_QuoteTemplateList: PromptTemplateItem[] = [
|
||||
}
|
||||
}
|
||||
];
|
||||
|
||||
export const getQuoteTemplate = (version?: string) => {
|
||||
const defaultTemplate = Prompt_QuoteTemplateList[0].value;
|
||||
|
||||
return getPromptByVersion(version, defaultTemplate);
|
||||
};
|
||||
|
||||
export const Prompt_userQuotePromptList: PromptTemplateItem[] = [
|
||||
{
|
||||
title: i18nT('app:template.standard_template'),
|
||||
desc: '',
|
||||
value: {
|
||||
['4.9.7']: `使用 <Reference></Reference> 标记中的内容作为本次对话的参考:
|
||||
|
||||
<Reference>
|
||||
{{quote}}
|
||||
</Reference>
|
||||
|
||||
回答要求:
|
||||
- 如果你不清楚答案,你需要澄清。
|
||||
- 避免提及你是从 <Reference></Reference> 获取的知识。
|
||||
- 保持答案与 <Reference></Reference> 中描述的一致。
|
||||
- 使用 Markdown 语法优化回答格式。
|
||||
- 使用与问题相同的语言回答。
|
||||
- 使用 [id](CITE) 格式来引用<Reference></Reference>中的知识,其中 CITE 是固定常量, id 为引文中的 id。
|
||||
- 在每段结尾自然地整合引用。例如: "FastGPT 是一个基于大语言模型(LLM)的知识库问答系统[67e517e74767063e882d6861](CITE)。"
|
||||
- 每段至少包含一个引用,也可根据内容需要加入多个引用,按顺序排列。`,
|
||||
['4.9.2']: `使用 <Reference></Reference> 标记中的内容作为本次对话的参考:
|
||||
|
||||
<Reference>
|
||||
{{quote}}
|
||||
</Reference>
|
||||
|
||||
回答要求:
|
||||
- 如果你不清楚答案,你需要澄清。
|
||||
- 避免提及你是从 <Reference></Reference> 获取的知识。
|
||||
- 保持答案与 <Reference></Reference> 中描述的一致。
|
||||
- 使用 Markdown 语法优化回答格式。
|
||||
- 使用与问题相同的语言回答。
|
||||
|
||||
问题:"""{{question}}"""`
|
||||
}
|
||||
},
|
||||
{
|
||||
title: i18nT('app:template.qa_template'),
|
||||
desc: '',
|
||||
value: {
|
||||
['4.9.2']: `使用 <QA></QA> 标记中的问答对进行回答。
|
||||
|
||||
<QA>
|
||||
{{quote}}
|
||||
</QA>
|
||||
|
||||
回答要求:
|
||||
- 选择其中一个或多个问答对进行回答。
|
||||
- 回答的内容应尽可能与 <答案></答案> 中的内容一致。
|
||||
- 如果没有相关的问答对,你需要澄清。
|
||||
- 避免提及你是从 QA 获取的知识,只需要回复答案。
|
||||
|
||||
问题:"""{{question}}"""`
|
||||
}
|
||||
},
|
||||
{
|
||||
title: i18nT('app:template.standard_strict'),
|
||||
desc: '',
|
||||
value: {
|
||||
['4.9.7']: `忘记你已有的知识,仅使用 <Reference></Reference> 标记中的内容作为本次对话的参考:
|
||||
|
||||
<Reference>
|
||||
{{quote}}
|
||||
</Reference>
|
||||
|
||||
思考流程:
|
||||
1. 判断问题是否与 <Reference></Reference> 标记中的内容有关。
|
||||
2. 如果有关,你按下面的要求回答。
|
||||
3. 如果无关,你直接拒绝回答本次问题。
|
||||
|
||||
回答要求:
|
||||
- 避免提及你是从 <Reference></Reference> 获取的知识。
|
||||
- 保持答案与 <Reference></Reference> 中描述的一致。
|
||||
- 使用 Markdown 语法优化回答格式。
|
||||
- 使用与问题相同的语言回答。
|
||||
- 使用 [id](CITE) 格式来引用<Reference></Reference>中的知识,其中 CITE 是固定常量, id 为引文中的 id。
|
||||
- 在每段结尾自然地整合引用。例如: "FastGPT 是一个基于大语言模型(LLM)的知识库问答系统[67e517e74767063e882d6861](CITE)。"
|
||||
- 每段至少包含一个引用,也可根据内容需要加入多个引用,按顺序排列。
|
||||
|
||||
问题:"""{{question}}"""`,
|
||||
['4.9.2']: `忘记你已有的知识,仅使用 <Reference></Reference> 标记中的内容作为本次对话的参考:
|
||||
|
||||
<Reference>
|
||||
{{quote}}
|
||||
</Reference>
|
||||
|
||||
思考流程:
|
||||
1. 判断问题是否与 <Reference></Reference> 标记中的内容有关。
|
||||
2. 如果有关,你按下面的要求回答。
|
||||
3. 如果无关,你直接拒绝回答本次问题。
|
||||
|
||||
回答要求:
|
||||
- 避免提及你是从 <Reference></Reference> 获取的知识。
|
||||
- 保持答案与 <Reference></Reference> 中描述的一致。
|
||||
- 使用 Markdown 语法优化回答格式。
|
||||
- 使用与问题相同的语言回答。
|
||||
|
||||
问题:"""{{question}}"""`
|
||||
}
|
||||
},
|
||||
{
|
||||
title: i18nT('app:template.hard_strict'),
|
||||
desc: '',
|
||||
value: {
|
||||
['4.9.2']: `忘记你已有的知识,仅使用 <QA></QA> 标记中的问答对进行回答。
|
||||
|
||||
<QA>
|
||||
{{quote}}
|
||||
</QA>
|
||||
|
||||
思考流程:
|
||||
1. 判断问题是否与 <QA></QA> 标记中的内容有关。
|
||||
2. 如果无关,你直接拒绝回答本次问题。
|
||||
3. 判断是否有相近或相同的问题。
|
||||
4. 如果有相同的问题,直接输出对应答案。
|
||||
5. 如果只有相近的问题,请把相近的问题和答案一起输出。
|
||||
|
||||
回答要求:
|
||||
- 如果没有相关的问答对,你需要澄清。
|
||||
- 回答的内容应尽可能与 <QA></QA> 标记中的内容一致。
|
||||
- 避免提及你是从 QA 获取的知识,只需要回复答案。
|
||||
- 使用 Markdown 语法优化回答格式。
|
||||
- 使用与问题相同的语言回答。
|
||||
|
||||
问题:"""{{question}}"""`
|
||||
}
|
||||
}
|
||||
];
|
||||
|
||||
export const Prompt_systemQuotePromptList: PromptTemplateItem[] = [
|
||||
{
|
||||
title: i18nT('app:template.standard_template'),
|
||||
desc: '',
|
||||
value: {
|
||||
['4.9.7']: `使用 <Reference></Reference> 标记中的内容作为本次对话的参考:
|
||||
|
||||
<Reference>
|
||||
{{quote}}
|
||||
</Reference>
|
||||
|
||||
回答要求:
|
||||
- 如果你不清楚答案,你需要澄清。
|
||||
- 避免提及你是从 <Reference></Reference> 获取的知识。
|
||||
- 保持答案与 <Reference></Reference> 中描述的一致。
|
||||
- 使用 Markdown 语法优化回答格式。
|
||||
- 使用与问题相同的语言回答。
|
||||
- 使用 [id](CITE) 格式来引用<Reference></Reference>中的知识,其中 CITE 是固定常量, id 为引文中的 id。
|
||||
- 在每段结尾自然地整合引用。例如: "FastGPT 是一个基于大语言模型(LLM)的知识库问答系统[67e517e74767063e882d6861](CITE)。"
|
||||
- 每段至少包含一个引用,也可根据内容需要加入多个引用,按顺序排列。`,
|
||||
['4.9.2']: `使用 <Reference></Reference> 标记中的内容作为本次对话的参考:
|
||||
|
||||
<Reference>
|
||||
{{quote}}
|
||||
</Reference>
|
||||
|
||||
回答要求:
|
||||
- 如果你不清楚答案,你需要澄清。
|
||||
- 避免提及你是从 <Reference></Reference> 获取的知识。
|
||||
- 保持答案与 <Reference></Reference> 中描述的一致。
|
||||
- 使用 Markdown 语法优化回答格式。
|
||||
- 使用与问题相同的语言回答。`
|
||||
}
|
||||
},
|
||||
{
|
||||
title: i18nT('app:template.qa_template'),
|
||||
desc: '',
|
||||
value: {
|
||||
['4.9.2']: `使用 <QA></QA> 标记中的问答对进行回答。
|
||||
|
||||
<QA>
|
||||
{{quote}}
|
||||
</QA>
|
||||
|
||||
回答要求:
|
||||
- 选择其中一个或多个问答对进行回答。
|
||||
- 回答的内容应尽可能与 <答案></答案> 中的内容一致。
|
||||
- 如果没有相关的问答对,你需要澄清。
|
||||
- 避免提及你是从 QA 获取的知识,只需要回复答案。`
|
||||
}
|
||||
},
|
||||
{
|
||||
title: i18nT('app:template.standard_strict'),
|
||||
desc: '',
|
||||
value: {
|
||||
['4.9.7']: `忘记你已有的知识,仅使用 <Reference></Reference> 标记中的内容作为本次对话的参考:
|
||||
|
||||
<Reference>
|
||||
{{quote}}
|
||||
</Reference>
|
||||
|
||||
思考流程:
|
||||
1. 判断问题是否与 <Reference></Reference> 标记中的内容有关。
|
||||
2. 如果有关,你按下面的要求回答。
|
||||
3. 如果无关,你直接拒绝回答本次问题。
|
||||
|
||||
回答要求:
|
||||
- 避免提及你是从 <Reference></Reference> 获取的知识。
|
||||
- 保持答案与 <Reference></Reference> 中描述的一致。
|
||||
- 使用 Markdown 语法优化回答格式。
|
||||
- 使用与问题相同的语言回答。
|
||||
- 使用 [id](CITE) 格式来引用<Reference></Reference>中的知识,其中 CITE 是固定常量, id 为引文中的 id。
|
||||
- 在每段结尾自然地整合引用。例如: "FastGPT 是一个基于大语言模型(LLM)的知识库问答系统[67e517e74767063e882d6861](CITE)。"
|
||||
- 每段至少包含一个引用,也可根据内容需要加入多个引用,按顺序排列。
|
||||
|
||||
问题:"""{{question}}"""`,
|
||||
['4.9.2']: `忘记你已有的知识,仅使用 <Reference></Reference> 标记中的内容作为本次对话的参考:
|
||||
|
||||
<Reference>
|
||||
{{quote}}
|
||||
</Reference>
|
||||
|
||||
思考流程:
|
||||
1. 判断问题是否与 <Reference></Reference> 标记中的内容有关。
|
||||
2. 如果有关,你按下面的要求回答。
|
||||
3. 如果无关,你直接拒绝回答本次问题。
|
||||
|
||||
回答要求:
|
||||
- 避免提及你是从 <Reference></Reference> 获取的知识。
|
||||
- 保持答案与 <Reference></Reference> 中描述的一致。
|
||||
- 使用 Markdown 语法优化回答格式。
|
||||
- 使用与问题相同的语言回答。`
|
||||
}
|
||||
},
|
||||
{
|
||||
title: i18nT('app:template.hard_strict'),
|
||||
desc: '',
|
||||
value: {
|
||||
['4.9.2']: `忘记你已有的知识,仅使用 <QA></QA> 标记中的问答对进行回答。
|
||||
|
||||
<QA>
|
||||
{{quote}}
|
||||
</QA>
|
||||
|
||||
思考流程:
|
||||
1. 判断问题是否与 <QA></QA> 标记中的内容有关。
|
||||
2. 如果无关,你直接拒绝回答本次问题。
|
||||
3. 判断是否有相近或相同的问题。
|
||||
4. 如果有相同的问题,直接输出对应答案。
|
||||
5. 如果只有相近的问题,请把相近的问题和答案一起输出。
|
||||
|
||||
回答要求:
|
||||
- 如果没有相关的问答对,你需要澄清。
|
||||
- 回答的内容应尽可能与 <QA></QA> 标记中的内容一致。
|
||||
- 避免提及你是从 QA 获取的知识,只需要回复答案。
|
||||
- 使用 Markdown 语法优化回答格式。
|
||||
- 使用与问题相同的语言回答。`
|
||||
}
|
||||
}
|
||||
];
|
||||
|
||||
export const getQuotePrompt = (version?: string, role: 'user' | 'system' = 'user') => {
|
||||
const quotePromptTemplates =
|
||||
role === 'user' ? Prompt_userQuotePromptList : Prompt_systemQuotePromptList;
|
||||
@ -314,7 +333,7 @@ export const getQuotePrompt = (version?: string, role: 'user' | 'system' = 'user
|
||||
// Document quote prompt
|
||||
export const getDocumentQuotePrompt = (version?: string) => {
|
||||
const promptMap = {
|
||||
['4.9.7']: `将 <FilesContent></FilesContent> 中的内容作为本次对话的参考:
|
||||
['4.9.2']: `将 <FilesContent></FilesContent> 中的内容作为本次对话的参考:
|
||||
<FilesContent>
|
||||
{{quote}}
|
||||
</FilesContent>
|
||||
|
||||
@ -1,19 +1,14 @@
|
||||
export const getDatasetSearchToolResponsePrompt = () => {
|
||||
return `## Role
|
||||
你是一个知识库回答助手,可以 "cites" 中的内容作为本次对话的参考。为了使回答结果更加可信并且可追溯,你需要在每段话结尾添加引用标记,标识参考了哪些内容。
|
||||
你是一个知识库回答助手,可以 "quotes" 中的内容作为本次对话的参考。为了使回答结果更加可信并且可追溯,你需要在每段话结尾添加引用标记。
|
||||
|
||||
## 追溯展示规则
|
||||
|
||||
- 使用 **[id](CITE)** 格式来引用 "cites" 中的知识,其中 CITE 是固定常量, id 为引文中的 id。
|
||||
- 在 **每段话结尾** 自然地整合引用。例如: "Nginx是一款轻量级的Web服务器、反向代理服务器[67e517e74767063e882d6861](CITE)。"。
|
||||
- 每段话**至少包含一个引用**,多个引用时按顺序排列,例如:"Nginx是一款轻量级的Web服务器、反向代理服务器[67e517e74767063e882d6861](CITE)[67e517e74767063e882d6862](CITE)。\n 它的特点是非常轻量[67e517e74767063e882d6863](CITE)。"
|
||||
- 不要把示例作为知识点。
|
||||
- 不要伪造 id,返回的 id 必须都存在 cites 中!
|
||||
|
||||
## 通用规则
|
||||
## Rules
|
||||
- 如果你不清楚答案,你需要澄清。
|
||||
- 避免提及你是从 "cites" 获取的知识。
|
||||
- 保持答案与 "cites" 中描述的一致。
|
||||
- 避免提及你是从 "quotes" 获取的知识。
|
||||
- 保持答案与 "quotes" 中描述的一致。
|
||||
- 使用 Markdown 语法优化回答格式。尤其是图片、表格、序列号等内容,需严格完整输出。
|
||||
- 使用与问题相同的语言回答。`;
|
||||
- 使用与问题相同的语言回答。
|
||||
- 使用 [id](CITE) 格式来引用 "quotes" 中的知识,其中 CITE 是固定常量, id 为引文中的 id。
|
||||
- 在每段话结尾自然地整合引用。例如: "FastGPT 是一个基于大语言模型(LLM)的知识库问答系统[67e517e74767063e882d6861](CITE)。"
|
||||
- 每段话至少包含一个引用,也可根据内容需要加入多个引用,按顺序排列。`;
|
||||
};
|
||||
|
||||
@ -60,3 +60,5 @@ export enum AppTemplateTypeEnum {
|
||||
// special type
|
||||
contribute = 'contribute'
|
||||
}
|
||||
|
||||
export const defaultDatasetMaxTokens = 16000;
|
||||
|
||||
@ -5,7 +5,7 @@ import {
|
||||
FlowNodeTypeEnum
|
||||
} from '../../workflow/node/constant';
|
||||
import { nanoid } from 'nanoid';
|
||||
import { type McpToolConfigType } from '../type';
|
||||
import { type ToolType } from '../type';
|
||||
import { i18nT } from '../../../../web/i18n/utils';
|
||||
import { type RuntimeNodeItemType } from '../../workflow/runtime/type';
|
||||
|
||||
@ -16,7 +16,7 @@ export const getMCPToolSetRuntimeNode = ({
|
||||
avatar
|
||||
}: {
|
||||
url: string;
|
||||
toolList: McpToolConfigType[];
|
||||
toolList: ToolType[];
|
||||
name?: string;
|
||||
avatar?: string;
|
||||
}): RuntimeNodeItemType => {
|
||||
@ -45,7 +45,7 @@ export const getMCPToolRuntimeNode = ({
|
||||
url,
|
||||
avatar = 'core/app/type/mcpToolsFill'
|
||||
}: {
|
||||
tool: McpToolConfigType;
|
||||
tool: ToolType;
|
||||
url: string;
|
||||
avatar?: string;
|
||||
}): RuntimeNodeItemType => {
|
||||
@ -65,7 +65,7 @@ export const getMCPToolRuntimeNode = ({
|
||||
...Object.entries(tool.inputSchema?.properties || {}).map(([key, value]) => ({
|
||||
key,
|
||||
label: key,
|
||||
valueType: value.type as WorkflowIOValueTypeEnum, // TODO: 这里需要做一个映射
|
||||
valueType: value.type as WorkflowIOValueTypeEnum,
|
||||
description: value.description,
|
||||
toolDescription: value.description || key,
|
||||
required: tool.inputSchema?.required?.includes(key) || false,
|
||||
|
||||
20
packages/global/core/app/type.d.ts
vendored
20
packages/global/core/app/type.d.ts
vendored
@ -16,6 +16,16 @@ import { FlowNodeInputTypeEnum } from '../../core/workflow/node/constant';
|
||||
import type { WorkflowTemplateBasicType } from '@fastgpt/global/core/workflow/type';
|
||||
import type { SourceMemberType } from '../../support/user/type';
|
||||
|
||||
export type ToolType = {
|
||||
name: string;
|
||||
description: string;
|
||||
inputSchema: {
|
||||
type: string;
|
||||
properties?: Record<string, { type: string; description?: string }>;
|
||||
required?: string[];
|
||||
};
|
||||
};
|
||||
|
||||
export type AppSchema = {
|
||||
_id: string;
|
||||
parentId?: ParentIdType;
|
||||
@ -107,16 +117,6 @@ export type AppSimpleEditFormType = {
|
||||
chatConfig: AppChatConfigType;
|
||||
};
|
||||
|
||||
export type McpToolConfigType = {
|
||||
name: string;
|
||||
description: string;
|
||||
inputSchema: {
|
||||
type: string;
|
||||
properties?: Record<string, { type: string; description?: string }>;
|
||||
required?: string[];
|
||||
};
|
||||
};
|
||||
|
||||
/* app chat config type */
|
||||
export type AppChatConfigType = {
|
||||
welcomeText?: string;
|
||||
|
||||
@ -9,9 +9,6 @@ import { type WorkflowTemplateBasicType } from '../workflow/type';
|
||||
import { AppTypeEnum } from './constants';
|
||||
import { AppErrEnum } from '../../common/error/code/app';
|
||||
import { PluginErrEnum } from '../../common/error/code/plugin';
|
||||
import { i18nT } from '../../../web/i18n/utils';
|
||||
import appErrList from '../../common/error/code/app';
|
||||
import pluginErrList from '../../common/error/code/plugin';
|
||||
|
||||
export const getDefaultAppForm = (): AppSimpleEditFormType => {
|
||||
return {
|
||||
@ -192,10 +189,17 @@ export const getAppType = (config?: WorkflowTemplateBasicType | AppSimpleEditFor
|
||||
return '';
|
||||
};
|
||||
|
||||
export const formatToolError = (error?: any) => {
|
||||
if (!error || typeof error !== 'string') return;
|
||||
export const checkAppUnExistError = (error?: string) => {
|
||||
const unExistError: Array<string> = [
|
||||
AppErrEnum.unAuthApp,
|
||||
AppErrEnum.unExist,
|
||||
PluginErrEnum.unAuth,
|
||||
PluginErrEnum.unExist
|
||||
];
|
||||
|
||||
const errorText = appErrList[error]?.message || pluginErrList[error]?.message;
|
||||
|
||||
return errorText || error;
|
||||
if (!!error && unExistError.includes(error)) {
|
||||
return error;
|
||||
} else {
|
||||
return undefined;
|
||||
}
|
||||
};
|
||||
|
||||
3
packages/global/core/chat/type.d.ts
vendored
3
packages/global/core/chat/type.d.ts
vendored
@ -26,7 +26,6 @@ export type ChatSchema = {
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
appId: string;
|
||||
createTime: Date;
|
||||
updateTime: Date;
|
||||
title: string;
|
||||
customTitle: string;
|
||||
@ -113,7 +112,6 @@ export type ChatItemSchema = (UserChatItemType | SystemChatItemType | AIChatItem
|
||||
appId: string;
|
||||
time: Date;
|
||||
durationSeconds?: number;
|
||||
errorMsg?: string;
|
||||
};
|
||||
|
||||
export type AdminFbkType = {
|
||||
@ -145,7 +143,6 @@ export type ChatSiteItemType = (UserChatItemType | SystemChatItemType | AIChatIt
|
||||
responseData?: ChatHistoryItemResType[];
|
||||
time?: Date;
|
||||
durationSeconds?: number;
|
||||
errorMsg?: string;
|
||||
} & ChatBoxInputType &
|
||||
ResponseTagItemType;
|
||||
|
||||
|
||||
31
packages/global/core/dataset/api.d.ts
vendored
31
packages/global/core/dataset/api.d.ts
vendored
@ -1,11 +1,9 @@
|
||||
import type { ChunkSettingsType, DatasetDataIndexItemType, DatasetSchemaType } from './type';
|
||||
import type { DatasetDataIndexItemType, DatasetSchemaType } from './type';
|
||||
import type {
|
||||
DatasetCollectionTypeEnum,
|
||||
DatasetCollectionDataProcessModeEnum,
|
||||
ChunkSettingModeEnum,
|
||||
DataChunkSplitModeEnum,
|
||||
ChunkTriggerConfigTypeEnum,
|
||||
ParagraphChunkAIModeEnum
|
||||
DataChunkSplitModeEnum
|
||||
} from './constants';
|
||||
import type { LLMModelItemType } from '../ai/model.d';
|
||||
import type { ParentIdType } from 'common/parentFolder/type';
|
||||
@ -34,16 +32,26 @@ export type DatasetUpdateBody = {
|
||||
};
|
||||
|
||||
/* ================= collection ===================== */
|
||||
// Input + store params
|
||||
type DatasetCollectionStoreDataType = ChunkSettingsType & {
|
||||
export type DatasetCollectionChunkMetadataType = {
|
||||
parentId?: string;
|
||||
metadata?: Record<string, any>;
|
||||
|
||||
customPdfParse?: boolean;
|
||||
trainingType?: DatasetCollectionDataProcessModeEnum;
|
||||
imageIndex?: boolean;
|
||||
autoIndexes?: boolean;
|
||||
|
||||
chunkSettingMode?: ChunkSettingModeEnum;
|
||||
chunkSplitMode?: DataChunkSplitModeEnum;
|
||||
|
||||
chunkSize?: number;
|
||||
indexSize?: number;
|
||||
|
||||
chunkSplitter?: string;
|
||||
qaPrompt?: string;
|
||||
metadata?: Record<string, any>;
|
||||
};
|
||||
|
||||
// create collection params
|
||||
export type CreateDatasetCollectionParams = DatasetCollectionStoreDataType & {
|
||||
export type CreateDatasetCollectionParams = DatasetCollectionChunkMetadataType & {
|
||||
datasetId: string;
|
||||
name: string;
|
||||
type: DatasetCollectionTypeEnum;
|
||||
@ -64,7 +72,7 @@ export type CreateDatasetCollectionParams = DatasetCollectionStoreDataType & {
|
||||
nextSyncTime?: Date;
|
||||
};
|
||||
|
||||
export type ApiCreateDatasetCollectionParams = DatasetCollectionStoreDataType & {
|
||||
export type ApiCreateDatasetCollectionParams = DatasetCollectionChunkMetadataType & {
|
||||
datasetId: string;
|
||||
tags?: string[];
|
||||
};
|
||||
@ -82,7 +90,7 @@ export type ApiDatasetCreateDatasetCollectionParams = ApiCreateDatasetCollection
|
||||
export type FileIdCreateDatasetCollectionParams = ApiCreateDatasetCollectionParams & {
|
||||
fileId: string;
|
||||
};
|
||||
export type reTrainingDatasetFileCollectionParams = DatasetCollectionStoreDataType & {
|
||||
export type reTrainingDatasetFileCollectionParams = DatasetCollectionChunkMetadataType & {
|
||||
datasetId: string;
|
||||
collectionId: string;
|
||||
};
|
||||
@ -139,7 +147,6 @@ export type PushDatasetDataProps = {
|
||||
collectionId: string;
|
||||
data: PushDatasetDataChunkProps[];
|
||||
trainingType?: DatasetCollectionDataProcessModeEnum;
|
||||
indexSize?: number;
|
||||
autoIndexes?: boolean;
|
||||
imageIndex?: boolean;
|
||||
prompt?: string;
|
||||
|
||||
@ -120,8 +120,6 @@ export const DatasetCollectionSyncResultMap = {
|
||||
export enum DatasetCollectionDataProcessModeEnum {
|
||||
chunk = 'chunk',
|
||||
qa = 'qa',
|
||||
backup = 'backup',
|
||||
|
||||
auto = 'auto' // abandon
|
||||
}
|
||||
export const DatasetCollectionDataProcessModeMap = {
|
||||
@ -133,35 +131,21 @@ export const DatasetCollectionDataProcessModeMap = {
|
||||
label: i18nT('common:core.dataset.training.QA mode'),
|
||||
tooltip: i18nT('common:core.dataset.import.QA Import Tip')
|
||||
},
|
||||
[DatasetCollectionDataProcessModeEnum.backup]: {
|
||||
label: i18nT('dataset:backup_mode'),
|
||||
tooltip: i18nT('dataset:backup_mode')
|
||||
},
|
||||
[DatasetCollectionDataProcessModeEnum.auto]: {
|
||||
label: i18nT('common:core.dataset.training.Auto mode'),
|
||||
tooltip: i18nT('common:core.dataset.training.Auto mode Tip')
|
||||
}
|
||||
};
|
||||
|
||||
export enum ChunkTriggerConfigTypeEnum {
|
||||
minSize = 'minSize',
|
||||
forceChunk = 'forceChunk',
|
||||
maxSize = 'maxSize'
|
||||
}
|
||||
export enum ChunkSettingModeEnum {
|
||||
auto = 'auto',
|
||||
custom = 'custom'
|
||||
}
|
||||
|
||||
export enum DataChunkSplitModeEnum {
|
||||
paragraph = 'paragraph',
|
||||
size = 'size',
|
||||
char = 'char'
|
||||
}
|
||||
export enum ParagraphChunkAIModeEnum {
|
||||
auto = 'auto',
|
||||
force = 'force'
|
||||
}
|
||||
|
||||
/* ------------ data -------------- */
|
||||
|
||||
@ -170,6 +154,7 @@ export enum ImportDataSourceEnum {
|
||||
fileLocal = 'fileLocal',
|
||||
fileLink = 'fileLink',
|
||||
fileCustom = 'fileCustom',
|
||||
csvTable = 'csvTable',
|
||||
externalFile = 'externalFile',
|
||||
apiDataset = 'apiDataset',
|
||||
reTraining = 'reTraining'
|
||||
|
||||
@ -32,7 +32,7 @@ export const DatasetDataIndexMap: Record<
|
||||
color: 'red'
|
||||
},
|
||||
[DatasetDataIndexTypeEnum.image]: {
|
||||
label: i18nT('dataset:data_index_image'),
|
||||
label: i18nT('common:data_index_image'),
|
||||
color: 'purple'
|
||||
}
|
||||
};
|
||||
|
||||
@ -118,8 +118,9 @@ export const computeChunkSize = (params: {
|
||||
return getLLMMaxChunkSize(params.llmModel);
|
||||
}
|
||||
|
||||
return Math.min(params.chunkSize ?? chunkAutoChunkSize, getLLMMaxChunkSize(params.llmModel));
|
||||
return Math.min(params.chunkSize || chunkAutoChunkSize, getLLMMaxChunkSize(params.llmModel));
|
||||
};
|
||||
|
||||
export const computeChunkSplitter = (params: {
|
||||
chunkSettingMode?: ChunkSettingModeEnum;
|
||||
chunkSplitMode?: DataChunkSplitModeEnum;
|
||||
@ -128,21 +129,8 @@ export const computeChunkSplitter = (params: {
|
||||
if (params.chunkSettingMode === ChunkSettingModeEnum.auto) {
|
||||
return undefined;
|
||||
}
|
||||
if (params.chunkSplitMode !== DataChunkSplitModeEnum.char) {
|
||||
if (params.chunkSplitMode === DataChunkSplitModeEnum.size) {
|
||||
return undefined;
|
||||
}
|
||||
return params.chunkSplitter;
|
||||
};
|
||||
export const computeParagraphChunkDeep = (params: {
|
||||
chunkSettingMode?: ChunkSettingModeEnum;
|
||||
chunkSplitMode?: DataChunkSplitModeEnum;
|
||||
paragraphChunkDeep?: number;
|
||||
}) => {
|
||||
if (params.chunkSettingMode === ChunkSettingModeEnum.auto) {
|
||||
return 5;
|
||||
}
|
||||
if (params.chunkSplitMode === DataChunkSplitModeEnum.paragraph) {
|
||||
return params.paragraphChunkDeep;
|
||||
}
|
||||
return 0;
|
||||
};
|
||||
|
||||
48
packages/global/core/dataset/type.d.ts
vendored
48
packages/global/core/dataset/type.d.ts
vendored
@ -8,42 +8,26 @@ import type {
|
||||
DatasetStatusEnum,
|
||||
DatasetTypeEnum,
|
||||
SearchScoreTypeEnum,
|
||||
TrainingModeEnum,
|
||||
ChunkSettingModeEnum,
|
||||
ChunkTriggerConfigTypeEnum
|
||||
TrainingModeEnum
|
||||
} from './constants';
|
||||
import type { DatasetPermission } from '../../support/permission/dataset/controller';
|
||||
import { Permission } from '../../support/permission/controller';
|
||||
import type { APIFileServer, FeishuServer, YuqueServer } from './apiDataset';
|
||||
import type { SourceMemberType } from 'support/user/type';
|
||||
import type { DatasetDataIndexTypeEnum } from './data/constants';
|
||||
import type { ChunkSettingModeEnum } from './constants';
|
||||
|
||||
export type ChunkSettingsType = {
|
||||
trainingType?: DatasetCollectionDataProcessModeEnum;
|
||||
|
||||
// Chunk trigger
|
||||
chunkTriggerType?: ChunkTriggerConfigTypeEnum;
|
||||
chunkTriggerMinSize?: number; // maxSize from agent model, not store
|
||||
|
||||
// Data enhance
|
||||
dataEnhanceCollectionName?: boolean; // Auto add collection name to data
|
||||
|
||||
// Index enhance
|
||||
imageIndex?: boolean;
|
||||
trainingType: DatasetCollectionDataProcessModeEnum;
|
||||
autoIndexes?: boolean;
|
||||
imageIndex?: boolean;
|
||||
|
||||
// Chunk setting
|
||||
chunkSettingMode?: ChunkSettingModeEnum; // 系统参数/自定义参数
|
||||
chunkSettingMode?: ChunkSettingModeEnum;
|
||||
chunkSplitMode?: DataChunkSplitModeEnum;
|
||||
// Paragraph split
|
||||
paragraphChunkAIMode?: ParagraphChunkAIModeEnum;
|
||||
paragraphChunkDeep?: number; // Paragraph deep
|
||||
paragraphChunkMinSize?: number; // Paragraph min size, if too small, it will merge
|
||||
// Size split
|
||||
chunkSize?: number; // chunk/qa chunk size, Paragraph max chunk size.
|
||||
// Char split
|
||||
chunkSplitter?: string; // chunk/qa chunk splitter
|
||||
indexSize?: number;
|
||||
|
||||
chunkSize?: number;
|
||||
indexSize?: number;
|
||||
chunkSplitter?: string;
|
||||
qaPrompt?: string;
|
||||
};
|
||||
|
||||
@ -82,7 +66,7 @@ export type DatasetSchemaType = {
|
||||
defaultPermission?: number;
|
||||
};
|
||||
|
||||
export type DatasetCollectionSchemaType = ChunkSettingsType & {
|
||||
export type DatasetCollectionSchemaType = {
|
||||
_id: string;
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
@ -117,7 +101,18 @@ export type DatasetCollectionSchemaType = ChunkSettingsType & {
|
||||
|
||||
// Parse settings
|
||||
customPdfParse?: boolean;
|
||||
// Chunk settings
|
||||
autoIndexes?: boolean;
|
||||
imageIndex?: boolean;
|
||||
trainingType: DatasetCollectionDataProcessModeEnum;
|
||||
|
||||
chunkSettingMode?: ChunkSettingModeEnum;
|
||||
chunkSplitMode?: DataChunkSplitModeEnum;
|
||||
|
||||
chunkSize?: number;
|
||||
indexSize?: number;
|
||||
chunkSplitter?: string;
|
||||
qaPrompt?: string;
|
||||
};
|
||||
|
||||
export type DatasetCollectionTagsSchemaType = {
|
||||
@ -180,7 +175,6 @@ export type DatasetTrainingSchemaType = {
|
||||
q: string;
|
||||
a: string;
|
||||
chunkIndex: number;
|
||||
indexSize?: number;
|
||||
weight: number;
|
||||
indexes: Omit<DatasetDataIndexItemType, 'dataId'>[];
|
||||
retryCount: number;
|
||||
|
||||
@ -40,6 +40,5 @@ export function getSourceNameIcon({
|
||||
export const predictDataLimitLength = (mode: TrainingModeEnum, data: any[]) => {
|
||||
if (mode === TrainingModeEnum.qa) return data.length * 20;
|
||||
if (mode === TrainingModeEnum.auto) return data.length * 5;
|
||||
if (mode === TrainingModeEnum.image) return data.length * 2;
|
||||
return data.length;
|
||||
};
|
||||
|
||||
@ -7,7 +7,7 @@ import type {
|
||||
} from '../../chat/type';
|
||||
import { NodeOutputItemType } from '../../chat/type';
|
||||
import type { FlowNodeInputItemType, FlowNodeOutputItemType } from '../type/io.d';
|
||||
import type { NodeToolConfigType, StoreNodeItemType } from '../type/node';
|
||||
import type { StoreNodeItemType } from '../type/node';
|
||||
import type { DispatchNodeResponseKeyEnum } from './constants';
|
||||
import type { StoreEdgeItemType } from '../type/edge';
|
||||
import type { NodeInputKeyEnum } from '../constants';
|
||||
@ -102,9 +102,6 @@ export type RuntimeNodeItemType = {
|
||||
|
||||
pluginId?: string; // workflow id / plugin id
|
||||
version?: string;
|
||||
|
||||
// tool
|
||||
toolConfig?: NodeToolConfigType;
|
||||
};
|
||||
|
||||
export type RuntimeEdgeItemType = StoreEdgeItemType & {
|
||||
@ -117,7 +114,7 @@ export type DispatchNodeResponseType = {
|
||||
runningTime?: number;
|
||||
query?: string;
|
||||
textOutput?: string;
|
||||
error?: Record<string, any> | string;
|
||||
error?: Record<string, any>;
|
||||
customInputs?: Record<string, any>;
|
||||
customOutputs?: Record<string, any>;
|
||||
nodeInputs?: Record<string, any>;
|
||||
|
||||
16
packages/global/core/workflow/type/node.d.ts
vendored
16
packages/global/core/workflow/type/node.d.ts
vendored
@ -20,17 +20,11 @@ import { RuntimeNodeItemType } from '../runtime/type';
|
||||
import { PluginTypeEnum } from '../../plugin/constants';
|
||||
import { RuntimeEdgeItemType, StoreEdgeItemType } from './edge';
|
||||
import { NextApiResponse } from 'next';
|
||||
import type { AppDetailType, AppSchema, McpToolConfigType } from '../../app/type';
|
||||
import { AppDetailType, AppSchema } from '../../app/type';
|
||||
import type { ParentIdType } from 'common/parentFolder/type';
|
||||
import { AppTypeEnum } from '../../app/constants';
|
||||
import { AppTypeEnum } from 'core/app/constants';
|
||||
import type { WorkflowInteractiveResponseType } from '../template/system/interactive/type';
|
||||
|
||||
export type NodeToolConfigType = {
|
||||
mcpTool?: McpToolConfigType & {
|
||||
url: string;
|
||||
};
|
||||
};
|
||||
|
||||
export type FlowNodeCommonType = {
|
||||
parentNodeId?: string;
|
||||
flowNodeType: FlowNodeTypeEnum; // render node card
|
||||
@ -52,13 +46,12 @@ export type FlowNodeCommonType = {
|
||||
// plugin data
|
||||
pluginId?: string;
|
||||
isFolder?: boolean;
|
||||
// pluginType?: AppTypeEnum;
|
||||
pluginData?: PluginDataType;
|
||||
|
||||
// tool data
|
||||
toolData?: NodeToolConfigType;
|
||||
};
|
||||
|
||||
export type PluginDataType = {
|
||||
version?: string;
|
||||
diagram?: string;
|
||||
userGuide?: string;
|
||||
courseUrl?: string;
|
||||
@ -125,7 +118,6 @@ export type FlowNodeItemType = FlowNodeTemplateType & {
|
||||
nodeId: string;
|
||||
parentNodeId?: string;
|
||||
isError?: boolean;
|
||||
searchedText?: string;
|
||||
debugResult?: {
|
||||
status: 'running' | 'success' | 'skipped' | 'failed';
|
||||
message?: string;
|
||||
|
||||
@ -1,5 +1,4 @@
|
||||
export enum OperationLogEventEnum {
|
||||
//Team
|
||||
LOGIN = 'LOGIN',
|
||||
CREATE_INVITATION_LINK = 'CREATE_INVITATION_LINK',
|
||||
JOIN_TEAM = 'JOIN_TEAM',
|
||||
@ -12,52 +11,5 @@ export enum OperationLogEventEnum {
|
||||
RELOCATE_DEPARTMENT = 'RELOCATE_DEPARTMENT',
|
||||
CREATE_GROUP = 'CREATE_GROUP',
|
||||
DELETE_GROUP = 'DELETE_GROUP',
|
||||
ASSIGN_PERMISSION = 'ASSIGN_PERMISSION',
|
||||
//APP
|
||||
CREATE_APP = 'CREATE_APP',
|
||||
UPDATE_APP_INFO = 'UPDATE_APP_INFO',
|
||||
MOVE_APP = 'MOVE_APP',
|
||||
DELETE_APP = 'DELETE_APP',
|
||||
UPDATE_APP_COLLABORATOR = 'UPDATE_APP_COLLABORATOR',
|
||||
DELETE_APP_COLLABORATOR = 'DELETE_APP_COLLABORATOR',
|
||||
TRANSFER_APP_OWNERSHIP = 'TRANSFER_APP_OWNERSHIP',
|
||||
CREATE_APP_COPY = 'CREATE_APP_COPY',
|
||||
CREATE_APP_FOLDER = 'CREATE_APP_FOLDER',
|
||||
UPDATE_PUBLISH_APP = 'UPDATE_PUBLISH_APP',
|
||||
CREATE_APP_PUBLISH_CHANNEL = 'CREATE_APP_PUBLISH_CHANNEL',
|
||||
UPDATE_APP_PUBLISH_CHANNEL = 'UPDATE_APP_PUBLISH_CHANNEL',
|
||||
DELETE_APP_PUBLISH_CHANNEL = 'DELETE_APP_PUBLISH_CHANNEL',
|
||||
EXPORT_APP_CHAT_LOG = 'EXPORT_APP_CHAT_LOG',
|
||||
//Dataset
|
||||
CREATE_DATASET = 'CREATE_DATASET',
|
||||
UPDATE_DATASET = 'UPDATE_DATASET',
|
||||
DELETE_DATASET = 'DELETE_DATASET',
|
||||
MOVE_DATASET = 'MOVE_DATASET',
|
||||
UPDATE_DATASET_COLLABORATOR = 'UPDATE_DATASET_COLLABORATOR',
|
||||
DELETE_DATASET_COLLABORATOR = 'DELETE_DATASET_COLLABORATOR',
|
||||
TRANSFER_DATASET_OWNERSHIP = 'TRANSFER_DATASET_OWNERSHIP',
|
||||
EXPORT_DATASET = 'EXPORT_DATASET',
|
||||
CREATE_DATASET_FOLDER = 'CREATE_DATASET_FOLDER',
|
||||
//Collection
|
||||
CREATE_COLLECTION = 'CREATE_COLLECTION',
|
||||
UPDATE_COLLECTION = 'UPDATE_COLLECTION',
|
||||
DELETE_COLLECTION = 'DELETE_COLLECTION',
|
||||
RETRAIN_COLLECTION = 'RETRAIN_COLLECTION',
|
||||
//Data
|
||||
CREATE_DATA = 'CREATE_DATA',
|
||||
UPDATE_DATA = 'UPDATE_DATA',
|
||||
DELETE_DATA = 'DELETE_DATA',
|
||||
//SearchTest
|
||||
SEARCH_TEST = 'SEARCH_TEST',
|
||||
//Account
|
||||
CHANGE_PASSWORD = 'CHANGE_PASSWORD',
|
||||
CHANGE_NOTIFICATION_SETTINGS = 'CHANGE_NOTIFICATION_SETTINGS',
|
||||
CHANGE_MEMBER_NAME_ACCOUNT = 'CHANGE_MEMBER_NAME_ACCOUNT',
|
||||
PURCHASE_PLAN = 'PURCHASE_PLAN',
|
||||
EXPORT_BILL_RECORDS = 'EXPORT_BILL_RECORDS',
|
||||
CREATE_INVOICE = 'CREATE_INVOICE',
|
||||
SET_INVOICE_HEADER = 'SET_INVOICE_HEADER',
|
||||
CREATE_API_KEY = 'CREATE_API_KEY',
|
||||
UPDATE_API_KEY = 'UPDATE_API_KEY',
|
||||
DELETE_API_KEY = 'DELETE_API_KEY'
|
||||
ASSIGN_PERMISSION = 'ASSIGN_PERMISSION'
|
||||
}
|
||||
|
||||
@ -13,7 +13,6 @@ const staticPluginList = [
|
||||
'WeWorkWebhook',
|
||||
'google',
|
||||
'bing',
|
||||
'bocha',
|
||||
'delay'
|
||||
];
|
||||
// Run in worker thread (Have npm packages)
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
{
|
||||
"author": "",
|
||||
"version": "4816",
|
||||
"name": "钉钉 webhook",
|
||||
"avatar": "plugins/dingding",
|
||||
"intro": "向钉钉机器人发起 webhook 请求。",
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
{
|
||||
"author": "Menghuan1918",
|
||||
"version": "488",
|
||||
"name": "PDF识别",
|
||||
"avatar": "plugins/doc2x",
|
||||
"intro": "将PDF文件发送至Doc2X进行解析,返回结构化的LaTeX公式的文本(markdown),支持传入String类型的URL或者流程输出中的文件链接变量",
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
{
|
||||
"author": "Menghuan1918",
|
||||
"version": "488",
|
||||
"name": "Doc2X服务",
|
||||
"avatar": "plugins/doc2x",
|
||||
"intro": "将传入的图片或PDF文件发送至Doc2X进行解析,返回带LaTeX公式的markdown格式的文本。",
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
{
|
||||
"author": "",
|
||||
"version": "4816",
|
||||
"name": "企业微信 webhook",
|
||||
"avatar": "plugins/qiwei",
|
||||
"intro": "向企业微信机器人发起 webhook 请求。只能内部群使用。",
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
{
|
||||
"author": "",
|
||||
"version": "4811",
|
||||
"name": "Bing搜索",
|
||||
"avatar": "core/workflow/template/bing",
|
||||
"intro": "在Bing中搜索。",
|
||||
|
||||
@ -1,677 +0,0 @@
|
||||
{
|
||||
"author": "",
|
||||
"name": "博查搜索",
|
||||
"avatar": "core/workflow/template/bocha",
|
||||
"intro": "使用博查AI搜索引擎进行网络搜索。",
|
||||
"showStatus": true,
|
||||
"weight": 10,
|
||||
"courseUrl": "",
|
||||
"isTool": true,
|
||||
"templateType": "search",
|
||||
"workflow": {
|
||||
"nodes": [
|
||||
{
|
||||
"nodeId": "pluginInput",
|
||||
"name": "workflow:template.plugin_start",
|
||||
"intro": "workflow:intro_plugin_input",
|
||||
"avatar": "core/workflow/template/workflowStart",
|
||||
"flowNodeType": "pluginInput",
|
||||
"showStatus": false,
|
||||
"position": {
|
||||
"x": 636.3048409085379,
|
||||
"y": -238.61714728578016
|
||||
},
|
||||
"version": "481",
|
||||
"inputs": [
|
||||
{
|
||||
"renderTypeList": [
|
||||
"input"
|
||||
],
|
||||
"selectedTypeIndex": 0,
|
||||
"valueType": "string",
|
||||
"canEdit": true,
|
||||
"key": "apiKey",
|
||||
"label": "apiKey",
|
||||
"description": "博查API密钥",
|
||||
"defaultValue": "",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"renderTypeList": [
|
||||
"input",
|
||||
"reference"
|
||||
],
|
||||
"selectedTypeIndex": 0,
|
||||
"valueType": "string",
|
||||
"canEdit": true,
|
||||
"key": "query",
|
||||
"label": "query",
|
||||
"description": "搜索查询词",
|
||||
"defaultValue": "",
|
||||
"required": true,
|
||||
"toolDescription": "搜索查询词"
|
||||
},
|
||||
{
|
||||
"renderTypeList": [
|
||||
"input",
|
||||
"reference"
|
||||
],
|
||||
"selectedTypeIndex": 0,
|
||||
"valueType": "string",
|
||||
"canEdit": true,
|
||||
"key": "freshness",
|
||||
"label": "freshness",
|
||||
"description": "搜索指定时间范围内的网页。可填值:oneDay(一天内)、oneWeek(一周内)、oneMonth(一个月内)、oneYear(一年内)、noLimit(不限,默认)、YYYY-MM-DD..YYYY-MM-DD(日期范围)、YYYY-MM-DD(指定日期)",
|
||||
"defaultValue": "noLimit",
|
||||
"required": false,
|
||||
"toolDescription": "搜索时间范围"
|
||||
},
|
||||
{
|
||||
"renderTypeList": [
|
||||
"input",
|
||||
"reference"
|
||||
],
|
||||
"selectedTypeIndex": 0,
|
||||
"valueType": "boolean",
|
||||
"canEdit": true,
|
||||
"key": "summary",
|
||||
"label": "summary",
|
||||
"description": "是否显示文本摘要。true显示,false不显示(默认)",
|
||||
"defaultValue": false,
|
||||
"required": false,
|
||||
"toolDescription": "是否显示文本摘要"
|
||||
},
|
||||
{
|
||||
"renderTypeList": [
|
||||
"input",
|
||||
"reference"
|
||||
],
|
||||
"selectedTypeIndex": 0,
|
||||
"valueType": "string",
|
||||
"canEdit": true,
|
||||
"key": "include",
|
||||
"label": "include",
|
||||
"description": "指定搜索的site范围。多个域名使用|或,分隔,最多20个。例如:qq.com|m.163.com",
|
||||
"defaultValue": "",
|
||||
"required": false,
|
||||
"toolDescription": "指定搜索的site范围"
|
||||
},
|
||||
{
|
||||
"renderTypeList": [
|
||||
"input",
|
||||
"reference"
|
||||
],
|
||||
"selectedTypeIndex": 0,
|
||||
"valueType": "string",
|
||||
"canEdit": true,
|
||||
"key": "exclude",
|
||||
"label": "exclude",
|
||||
"description": "排除搜索的网站范围。多个域名使用|或,分隔,最多20个。例如:qq.com|m.163.com",
|
||||
"defaultValue": "",
|
||||
"required": false,
|
||||
"toolDescription": "排除搜索的网站范围"
|
||||
},
|
||||
{
|
||||
"renderTypeList": [
|
||||
"input",
|
||||
"reference"
|
||||
],
|
||||
"selectedTypeIndex": 0,
|
||||
"valueType": "number",
|
||||
"canEdit": true,
|
||||
"key": "count",
|
||||
"label": "count",
|
||||
"description": "返回结果的条数。可填范围:1-50,默认为10",
|
||||
"defaultValue": 10,
|
||||
"required": false,
|
||||
"min": 1,
|
||||
"max": 50,
|
||||
"toolDescription": "返回结果条数"
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"id": "apiKey",
|
||||
"valueType": "string",
|
||||
"key": "apiKey",
|
||||
"label": "apiKey",
|
||||
"type": "hidden"
|
||||
},
|
||||
{
|
||||
"id": "query",
|
||||
"valueType": "string",
|
||||
"key": "query",
|
||||
"label": "query",
|
||||
"type": "hidden"
|
||||
},
|
||||
{
|
||||
"id": "freshness",
|
||||
"valueType": "string",
|
||||
"key": "freshness",
|
||||
"label": "freshness",
|
||||
"type": "hidden"
|
||||
},
|
||||
{
|
||||
"id": "summary",
|
||||
"valueType": "boolean",
|
||||
"key": "summary",
|
||||
"label": "summary",
|
||||
"type": "hidden"
|
||||
},
|
||||
{
|
||||
"id": "include",
|
||||
"valueType": "string",
|
||||
"key": "include",
|
||||
"label": "include",
|
||||
"type": "hidden"
|
||||
},
|
||||
{
|
||||
"id": "exclude",
|
||||
"valueType": "string",
|
||||
"key": "exclude",
|
||||
"label": "exclude",
|
||||
"type": "hidden"
|
||||
},
|
||||
{
|
||||
"id": "count",
|
||||
"valueType": "number",
|
||||
"key": "count",
|
||||
"label": "count",
|
||||
"type": "hidden"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"nodeId": "pluginOutput",
|
||||
"name": "common:core.module.template.self_output",
|
||||
"intro": "workflow:intro_custom_plugin_output",
|
||||
"avatar": "core/workflow/template/pluginOutput",
|
||||
"flowNodeType": "pluginOutput",
|
||||
"showStatus": false,
|
||||
"position": {
|
||||
"x": 2764.1105686698083,
|
||||
"y": -30.617147285780163
|
||||
},
|
||||
"version": "481",
|
||||
"inputs": [
|
||||
{
|
||||
"renderTypeList": [
|
||||
"reference"
|
||||
],
|
||||
"valueType": "object",
|
||||
"canEdit": true,
|
||||
"key": "result",
|
||||
"label": "result",
|
||||
"isToolOutput": true,
|
||||
"description": "",
|
||||
"value": [
|
||||
"nyA6oA8mF1iW",
|
||||
"httpRawResponse"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"nodeId": "pluginConfig",
|
||||
"name": "common:core.module.template.system_config",
|
||||
"intro": "",
|
||||
"avatar": "core/workflow/template/systemConfig",
|
||||
"flowNodeType": "pluginConfig",
|
||||
"position": {
|
||||
"x": 184.66337662472682,
|
||||
"y": -216.05298493910115
|
||||
},
|
||||
"version": "4811",
|
||||
"inputs": [],
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"nodeId": "nyA6oA8mF1iW",
|
||||
"name": "HTTP 请求",
|
||||
"intro": "调用博查搜索API",
|
||||
"avatar": "core/workflow/template/httpRequest",
|
||||
"flowNodeType": "httpRequest468",
|
||||
"showStatus": true,
|
||||
"position": {
|
||||
"x": 1335.0647252518884,
|
||||
"y": -455.9043948565971
|
||||
},
|
||||
"version": "481",
|
||||
"inputs": [
|
||||
{
|
||||
"key": "system_addInputParam",
|
||||
"renderTypeList": [
|
||||
"addInputParam"
|
||||
],
|
||||
"valueType": "dynamic",
|
||||
"label": "",
|
||||
"required": false,
|
||||
"description": "common:core.module.input.description.HTTP Dynamic Input",
|
||||
"customInputConfig": {
|
||||
"selectValueTypeList": [
|
||||
"string",
|
||||
"number",
|
||||
"boolean",
|
||||
"object",
|
||||
"arrayString",
|
||||
"arrayNumber",
|
||||
"arrayBoolean",
|
||||
"arrayObject",
|
||||
"arrayAny",
|
||||
"any",
|
||||
"chatHistory",
|
||||
"datasetQuote",
|
||||
"dynamic",
|
||||
"selectDataset",
|
||||
"selectApp"
|
||||
],
|
||||
"showDescription": false,
|
||||
"showDefaultValue": true
|
||||
},
|
||||
"debugLabel": "",
|
||||
"toolDescription": ""
|
||||
},
|
||||
{
|
||||
"key": "system_httpMethod",
|
||||
"renderTypeList": [
|
||||
"custom"
|
||||
],
|
||||
"valueType": "string",
|
||||
"label": "",
|
||||
"value": "POST",
|
||||
"required": true,
|
||||
"debugLabel": "",
|
||||
"toolDescription": ""
|
||||
},
|
||||
{
|
||||
"key": "system_httpTimeout",
|
||||
"renderTypeList": [
|
||||
"custom"
|
||||
],
|
||||
"valueType": "number",
|
||||
"label": "",
|
||||
"value": 30,
|
||||
"min": 5,
|
||||
"max": 600,
|
||||
"required": true,
|
||||
"debugLabel": "",
|
||||
"toolDescription": ""
|
||||
},
|
||||
{
|
||||
"key": "system_httpReqUrl",
|
||||
"renderTypeList": [
|
||||
"hidden"
|
||||
],
|
||||
"valueType": "string",
|
||||
"label": "",
|
||||
"description": "common:core.module.input.description.Http Request Url",
|
||||
"placeholder": "https://api.ai.com/getInventory",
|
||||
"required": false,
|
||||
"value": "https://api.bochaai.com/v1/web-search",
|
||||
"debugLabel": "",
|
||||
"toolDescription": ""
|
||||
},
|
||||
{
|
||||
"key": "system_httpHeader",
|
||||
"renderTypeList": [
|
||||
"custom"
|
||||
],
|
||||
"valueType": "any",
|
||||
"value": [
|
||||
{
|
||||
"key": "Authorization",
|
||||
"type": "string",
|
||||
"value": "Bearer {{$pluginInput.apiKey$}}"
|
||||
},
|
||||
{
|
||||
"key": "Content-Type",
|
||||
"type": "string",
|
||||
"value": "application/json"
|
||||
}
|
||||
],
|
||||
"label": "",
|
||||
"description": "common:core.module.input.description.Http Request Header",
|
||||
"placeholder": "common:core.module.input.description.Http Request Header",
|
||||
"required": false,
|
||||
"debugLabel": "",
|
||||
"toolDescription": ""
|
||||
},
|
||||
{
|
||||
"key": "system_httpParams",
|
||||
"renderTypeList": [
|
||||
"hidden"
|
||||
],
|
||||
"valueType": "any",
|
||||
"value": [],
|
||||
"label": "",
|
||||
"required": false,
|
||||
"debugLabel": "",
|
||||
"toolDescription": ""
|
||||
},
|
||||
{
|
||||
"key": "system_httpJsonBody",
|
||||
"renderTypeList": [
|
||||
"hidden"
|
||||
],
|
||||
"valueType": "any",
|
||||
"value": "{\n \"query\": \"{{query}}\",\n \"freshness\": \"{{freshness}}\",\n \"summary\": {{summary}},\n \"include\": \"{{include}}\",\n \"exclude\": \"{{exclude}}\",\n \"count\": {{count}}\n}",
|
||||
"label": "",
|
||||
"required": false,
|
||||
"debugLabel": "",
|
||||
"toolDescription": ""
|
||||
},
|
||||
{
|
||||
"key": "system_httpFormBody",
|
||||
"renderTypeList": [
|
||||
"hidden"
|
||||
],
|
||||
"valueType": "any",
|
||||
"value": [],
|
||||
"label": "",
|
||||
"required": false,
|
||||
"debugLabel": "",
|
||||
"toolDescription": ""
|
||||
},
|
||||
{
|
||||
"key": "system_httpContentType",
|
||||
"renderTypeList": [
|
||||
"hidden"
|
||||
],
|
||||
"valueType": "string",
|
||||
"value": "json",
|
||||
"label": "",
|
||||
"required": false,
|
||||
"debugLabel": "",
|
||||
"toolDescription": ""
|
||||
},
|
||||
{
|
||||
"valueType": "string",
|
||||
"renderTypeList": [
|
||||
"reference"
|
||||
],
|
||||
"key": "query",
|
||||
"label": "query",
|
||||
"toolDescription": "博查搜索检索词",
|
||||
"required": true,
|
||||
"canEdit": true,
|
||||
"editField": {
|
||||
"key": true,
|
||||
"description": true
|
||||
},
|
||||
"customInputConfig": {
|
||||
"selectValueTypeList": [
|
||||
"string",
|
||||
"number",
|
||||
"boolean",
|
||||
"object",
|
||||
"arrayString",
|
||||
"arrayNumber",
|
||||
"arrayBoolean",
|
||||
"arrayObject",
|
||||
"arrayAny",
|
||||
"any",
|
||||
"chatHistory",
|
||||
"datasetQuote",
|
||||
"dynamic",
|
||||
"selectApp",
|
||||
"selectDataset"
|
||||
],
|
||||
"showDescription": false,
|
||||
"showDefaultValue": true
|
||||
},
|
||||
"value": [
|
||||
"pluginInput",
|
||||
"query"
|
||||
]
|
||||
},
|
||||
{
|
||||
"valueType": "string",
|
||||
"renderTypeList": [
|
||||
"reference"
|
||||
],
|
||||
"key": "freshness",
|
||||
"label": "freshness",
|
||||
"toolDescription": "搜索时间范围",
|
||||
"required": false,
|
||||
"canEdit": true,
|
||||
"editField": {
|
||||
"key": true,
|
||||
"description": true
|
||||
},
|
||||
"customInputConfig": {
|
||||
"selectValueTypeList": [
|
||||
"string",
|
||||
"number",
|
||||
"boolean",
|
||||
"object",
|
||||
"arrayString",
|
||||
"arrayNumber",
|
||||
"arrayBoolean",
|
||||
"arrayObject",
|
||||
"arrayAny",
|
||||
"any",
|
||||
"chatHistory",
|
||||
"datasetQuote",
|
||||
"dynamic",
|
||||
"selectApp",
|
||||
"selectDataset"
|
||||
],
|
||||
"showDescription": false,
|
||||
"showDefaultValue": true
|
||||
},
|
||||
"value": [
|
||||
"pluginInput",
|
||||
"freshness"
|
||||
]
|
||||
},
|
||||
{
|
||||
"valueType": "boolean",
|
||||
"renderTypeList": [
|
||||
"reference"
|
||||
],
|
||||
"key": "summary",
|
||||
"label": "summary",
|
||||
"toolDescription": "是否显示文本摘要",
|
||||
"required": false,
|
||||
"canEdit": true,
|
||||
"editField": {
|
||||
"key": true,
|
||||
"description": true
|
||||
},
|
||||
"customInputConfig": {
|
||||
"selectValueTypeList": [
|
||||
"string",
|
||||
"number",
|
||||
"boolean",
|
||||
"object",
|
||||
"arrayString",
|
||||
"arrayNumber",
|
||||
"arrayBoolean",
|
||||
"arrayObject",
|
||||
"arrayAny",
|
||||
"any",
|
||||
"chatHistory",
|
||||
"datasetQuote",
|
||||
"dynamic",
|
||||
"selectApp",
|
||||
"selectDataset"
|
||||
],
|
||||
"showDescription": false,
|
||||
"showDefaultValue": true
|
||||
},
|
||||
"value": [
|
||||
"pluginInput",
|
||||
"summary"
|
||||
]
|
||||
},
|
||||
{
|
||||
"valueType": "string",
|
||||
"renderTypeList": [
|
||||
"reference"
|
||||
],
|
||||
"key": "include",
|
||||
"label": "include",
|
||||
"toolDescription": "指定搜索的site范围",
|
||||
"required": false,
|
||||
"canEdit": true,
|
||||
"editField": {
|
||||
"key": true,
|
||||
"description": true
|
||||
},
|
||||
"customInputConfig": {
|
||||
"selectValueTypeList": [
|
||||
"string",
|
||||
"number",
|
||||
"boolean",
|
||||
"object",
|
||||
"arrayString",
|
||||
"arrayNumber",
|
||||
"arrayBoolean",
|
||||
"arrayObject",
|
||||
"arrayAny",
|
||||
"any",
|
||||
"chatHistory",
|
||||
"datasetQuote",
|
||||
"dynamic",
|
||||
"selectApp",
|
||||
"selectDataset"
|
||||
],
|
||||
"showDescription": false,
|
||||
"showDefaultValue": true
|
||||
},
|
||||
"value": [
|
||||
"pluginInput",
|
||||
"include"
|
||||
]
|
||||
},
|
||||
{
|
||||
"valueType": "string",
|
||||
"renderTypeList": [
|
||||
"reference"
|
||||
],
|
||||
"key": "exclude",
|
||||
"label": "exclude",
|
||||
"toolDescription": "排除搜索的网站范围",
|
||||
"required": false,
|
||||
"canEdit": true,
|
||||
"editField": {
|
||||
"key": true,
|
||||
"description": true
|
||||
},
|
||||
"customInputConfig": {
|
||||
"selectValueTypeList": [
|
||||
"string",
|
||||
"number",
|
||||
"boolean",
|
||||
"object",
|
||||
"arrayString",
|
||||
"arrayNumber",
|
||||
"arrayBoolean",
|
||||
"arrayObject",
|
||||
"arrayAny",
|
||||
"any",
|
||||
"chatHistory",
|
||||
"datasetQuote",
|
||||
"dynamic",
|
||||
"selectApp",
|
||||
"selectDataset"
|
||||
],
|
||||
"showDescription": false,
|
||||
"showDefaultValue": true
|
||||
},
|
||||
"value": [
|
||||
"pluginInput",
|
||||
"exclude"
|
||||
]
|
||||
},
|
||||
{
|
||||
"valueType": "number",
|
||||
"renderTypeList": [
|
||||
"reference"
|
||||
],
|
||||
"key": "count",
|
||||
"label": "count",
|
||||
"toolDescription": "返回结果条数",
|
||||
"required": false,
|
||||
"canEdit": true,
|
||||
"editField": {
|
||||
"key": true,
|
||||
"description": true
|
||||
},
|
||||
"customInputConfig": {
|
||||
"selectValueTypeList": [
|
||||
"string",
|
||||
"number",
|
||||
"boolean",
|
||||
"object",
|
||||
"arrayString",
|
||||
"arrayNumber",
|
||||
"arrayBoolean",
|
||||
"arrayObject",
|
||||
"arrayAny",
|
||||
"any",
|
||||
"chatHistory",
|
||||
"datasetQuote",
|
||||
"dynamic",
|
||||
"selectApp",
|
||||
"selectDataset"
|
||||
],
|
||||
"showDescription": false,
|
||||
"showDefaultValue": true
|
||||
},
|
||||
"value": [
|
||||
"pluginInput",
|
||||
"count"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"id": "error",
|
||||
"key": "error",
|
||||
"label": "workflow:request_error",
|
||||
"description": "HTTP请求错误信息,成功时返回空",
|
||||
"valueType": "object",
|
||||
"type": "static"
|
||||
},
|
||||
{
|
||||
"id": "httpRawResponse",
|
||||
"key": "httpRawResponse",
|
||||
"required": true,
|
||||
"label": "workflow:raw_response",
|
||||
"description": "HTTP请求的原始响应。只能接受字符串或JSON类型响应数据。",
|
||||
"valueType": "any",
|
||||
"type": "static"
|
||||
},
|
||||
{
|
||||
"id": "system_addOutputParam",
|
||||
"key": "system_addOutputParam",
|
||||
"type": "dynamic",
|
||||
"valueType": "dynamic",
|
||||
"label": "",
|
||||
"editField": {
|
||||
"key": true,
|
||||
"valueType": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"edges": [
|
||||
{
|
||||
"source": "pluginInput",
|
||||
"target": "nyA6oA8mF1iW",
|
||||
"sourceHandle": "pluginInput-source-right",
|
||||
"targetHandle": "nyA6oA8mF1iW-target-left"
|
||||
},
|
||||
{
|
||||
"source": "nyA6oA8mF1iW",
|
||||
"target": "pluginOutput",
|
||||
"sourceHandle": "nyA6oA8mF1iW-source-right",
|
||||
"targetHandle": "pluginOutput-target-left"
|
||||
}
|
||||
]
|
||||
},
|
||||
"chatConfig": {}
|
||||
}
|
||||
@ -1,5 +1,6 @@
|
||||
{
|
||||
"author": "silencezhang",
|
||||
"version": "4811",
|
||||
"name": "数据库连接",
|
||||
"avatar": "core/workflow/template/datasource",
|
||||
"intro": "可连接常用数据库,并执行sql",
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
{
|
||||
"author": "collin",
|
||||
"version": "4817",
|
||||
"name": "流程等待",
|
||||
"avatar": "core/workflow/template/sleep",
|
||||
"intro": "让工作流等待指定时间后运行",
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
{
|
||||
"author": "silencezhang",
|
||||
"version": "4817",
|
||||
"name": "基础图表",
|
||||
"avatar": "core/workflow/template/baseChart",
|
||||
"intro": "根据数据生成图表,可根据chartType生成柱状图,折线图,饼图",
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
{
|
||||
"author": "silencezhang",
|
||||
"version": "486",
|
||||
"name": "BI图表功能",
|
||||
"avatar": "core/workflow/template/BI",
|
||||
"intro": "BI图表功能,可以生成一些常用的图表,如饼图,柱状图,折线图等",
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
{
|
||||
"author": "",
|
||||
"version": "486",
|
||||
"name": "DuckDuckGo 网络搜索",
|
||||
"avatar": "core/workflow/template/duckduckgo",
|
||||
"intro": "使用 DuckDuckGo 进行网络搜索",
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
{
|
||||
"author": "",
|
||||
"version": "486",
|
||||
"name": "DuckDuckGo 图片搜索",
|
||||
"avatar": "core/workflow/template/duckduckgo",
|
||||
"intro": "使用 DuckDuckGo 进行图片搜索",
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
{
|
||||
"author": "",
|
||||
"version": "486",
|
||||
"name": "DuckDuckGo 新闻检索",
|
||||
"avatar": "core/workflow/template/duckduckgo",
|
||||
"intro": "使用 DuckDuckGo 进行新闻检索",
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
{
|
||||
"author": "",
|
||||
"version": "486",
|
||||
"name": "DuckDuckGo 视频搜索",
|
||||
"avatar": "core/workflow/template/duckduckgo",
|
||||
"intro": "使用 DuckDuckGo 进行视频搜索",
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
{
|
||||
"author": "",
|
||||
"version": "486",
|
||||
"name": "DuckDuckGo服务",
|
||||
"avatar": "core/workflow/template/duckduckgo",
|
||||
"intro": "DuckDuckGo 服务,包含网络搜索、图片搜索、新闻搜索等。",
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
{
|
||||
"author": "",
|
||||
"version": "488",
|
||||
"name": "飞书 webhook",
|
||||
"avatar": "core/app/templates/plugin-feishu",
|
||||
"intro": "向飞书机器人发起 webhook 请求。",
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
{
|
||||
"author": "",
|
||||
"version": "486",
|
||||
"name": "网页内容抓取",
|
||||
"avatar": "core/workflow/template/fetchUrl",
|
||||
"intro": "可获取一个网页链接内容,并以 Markdown 格式输出,仅支持获取静态网站。",
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
{
|
||||
"author": "",
|
||||
"version": "481",
|
||||
"templateType": "tools",
|
||||
"name": "获取当前时间",
|
||||
"avatar": "core/workflow/template/getTime",
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
{
|
||||
"author": "",
|
||||
"version": "4811",
|
||||
"name": "Google搜索",
|
||||
"avatar": "core/workflow/template/google",
|
||||
"intro": "在google中搜索。",
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
{
|
||||
"author": "",
|
||||
"version": "486",
|
||||
"name": "数学公式执行",
|
||||
"avatar": "core/workflow/template/mathCall",
|
||||
"intro": "用于执行数学表达式的工具,通过 js 的 expr-eval 库运行表达式并返回结果。",
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
{
|
||||
"author": "",
|
||||
"version": "4816",
|
||||
"name": "Search XNG 搜索",
|
||||
"avatar": "core/workflow/template/searxng",
|
||||
"intro": "使用 Search XNG 服务进行搜索。",
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
{
|
||||
"author": "cloudpense",
|
||||
"version": "1.0.0",
|
||||
"name": "Email 邮件发送",
|
||||
"avatar": "plugins/email",
|
||||
"intro": "通过SMTP协议发送电子邮件(nodemailer)",
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
{
|
||||
"author": "",
|
||||
"version": "489",
|
||||
"name": "文本加工",
|
||||
"avatar": "/imgs/workflow/textEditor.svg",
|
||||
"intro": "可对固定或传入的文本进行加工后输出,非字符串类型数据最终会转成字符串类型。",
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
{
|
||||
"author": "",
|
||||
"version": "4811",
|
||||
"name": "Wiki搜索",
|
||||
"avatar": "core/workflow/template/wiki",
|
||||
"intro": "在Wiki中查询释义。",
|
||||
|
||||
18
packages/service/common/api/type.d.ts
vendored
18
packages/service/common/api/type.d.ts
vendored
@ -6,6 +6,12 @@ import type {
|
||||
} from '../../core/dataset/search/controller';
|
||||
import type { AuthOpenApiLimitProps } from '../../support/openapi/auth';
|
||||
import type { CreateUsageProps, ConcatUsageProps } from '@fastgpt/global/support/wallet/usage/api';
|
||||
import type {
|
||||
GetProApiDatasetFileContentParams,
|
||||
GetProApiDatasetFileDetailParams,
|
||||
GetProApiDatasetFileListParams,
|
||||
GetProApiDatasetFilePreviewUrlParams
|
||||
} from '../../core/dataset/apiDataset/proApi';
|
||||
|
||||
declare global {
|
||||
var textCensorHandler: (params: { text: string }) => Promise<{ code: number; message?: string }>;
|
||||
@ -13,4 +19,16 @@ declare global {
|
||||
var authOpenApiHandler: (data: AuthOpenApiLimitProps) => Promise<any>;
|
||||
var createUsageHandler: (data: CreateUsageProps) => any;
|
||||
var concatUsageHandler: (data: ConcatUsageProps) => any;
|
||||
|
||||
// API dataset
|
||||
var getProApiDatasetFileList: (data: GetProApiDatasetFileListParams) => Promise<APIFileItem[]>;
|
||||
var getProApiDatasetFileContent: (
|
||||
data: GetProApiDatasetFileContentParams
|
||||
) => Promise<ApiFileReadContentResponse>;
|
||||
var getProApiDatasetFilePreviewUrl: (
|
||||
data: GetProApiDatasetFilePreviewUrlParams
|
||||
) => Promise<string>;
|
||||
var getProApiDatasetFileDetail: (
|
||||
data: GetProApiDatasetFileDetailParams
|
||||
) => Promise<ApiDatasetDetailResponse>;
|
||||
}
|
||||
|
||||
@ -1,178 +0,0 @@
|
||||
import { retryFn } from '@fastgpt/global/common/system/utils';
|
||||
import { connectionMongo } from '../../mongo';
|
||||
import { MongoRawTextBufferSchema, bucketName } from './schema';
|
||||
import { addLog } from '../../system/log';
|
||||
import { setCron } from '../../system/cron';
|
||||
import { checkTimerLock } from '../../system/timerLock/utils';
|
||||
import { TimerIdEnum } from '../../system/timerLock/constants';
|
||||
|
||||
const getGridBucket = () => {
|
||||
return new connectionMongo.mongo.GridFSBucket(connectionMongo.connection.db!, {
|
||||
bucketName: bucketName
|
||||
});
|
||||
};
|
||||
|
||||
export const addRawTextBuffer = async ({
|
||||
sourceId,
|
||||
sourceName,
|
||||
text,
|
||||
expiredTime
|
||||
}: {
|
||||
sourceId: string;
|
||||
sourceName: string;
|
||||
text: string;
|
||||
expiredTime: Date;
|
||||
}) => {
|
||||
const gridBucket = getGridBucket();
|
||||
const metadata = {
|
||||
sourceId,
|
||||
sourceName,
|
||||
expiredTime
|
||||
};
|
||||
|
||||
const buffer = Buffer.from(text);
|
||||
|
||||
const fileSize = buffer.length;
|
||||
// 单块大小:尽可能大,但不超过 14MB,不小于128KB
|
||||
const chunkSizeBytes = (() => {
|
||||
// 计算理想块大小:文件大小 ÷ 目标块数(10)。 并且每个块需要小于 14MB
|
||||
const idealChunkSize = Math.min(Math.ceil(fileSize / 10), 14 * 1024 * 1024);
|
||||
|
||||
// 确保块大小至少为128KB
|
||||
const minChunkSize = 128 * 1024; // 128KB
|
||||
|
||||
// 取理想块大小和最小块大小中的较大值
|
||||
let chunkSize = Math.max(idealChunkSize, minChunkSize);
|
||||
|
||||
// 将块大小向上取整到最接近的64KB的倍数,使其更整齐
|
||||
chunkSize = Math.ceil(chunkSize / (64 * 1024)) * (64 * 1024);
|
||||
|
||||
return chunkSize;
|
||||
})();
|
||||
|
||||
const uploadStream = gridBucket.openUploadStream(sourceId, {
|
||||
metadata,
|
||||
chunkSizeBytes
|
||||
});
|
||||
|
||||
return retryFn(async () => {
|
||||
return new Promise((resolve, reject) => {
|
||||
uploadStream.end(buffer);
|
||||
uploadStream.on('finish', () => {
|
||||
resolve(uploadStream.id);
|
||||
});
|
||||
uploadStream.on('error', (error) => {
|
||||
addLog.error('addRawTextBuffer error', error);
|
||||
resolve('');
|
||||
});
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
export const getRawTextBuffer = async (sourceId: string) => {
|
||||
const gridBucket = getGridBucket();
|
||||
|
||||
return retryFn(async () => {
|
||||
const bufferData = await MongoRawTextBufferSchema.findOne(
|
||||
{
|
||||
'metadata.sourceId': sourceId
|
||||
},
|
||||
'_id metadata'
|
||||
).lean();
|
||||
if (!bufferData) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Read file content
|
||||
const downloadStream = gridBucket.openDownloadStream(bufferData._id);
|
||||
const chunks: Buffer[] = [];
|
||||
|
||||
return new Promise<{
|
||||
text: string;
|
||||
sourceName: string;
|
||||
} | null>((resolve, reject) => {
|
||||
downloadStream.on('data', (chunk) => {
|
||||
chunks.push(chunk);
|
||||
});
|
||||
|
||||
downloadStream.on('end', () => {
|
||||
const buffer = Buffer.concat(chunks);
|
||||
const text = buffer.toString('utf8');
|
||||
resolve({
|
||||
text,
|
||||
sourceName: bufferData.metadata?.sourceName || ''
|
||||
});
|
||||
});
|
||||
|
||||
downloadStream.on('error', (error) => {
|
||||
addLog.error('getRawTextBuffer error', error);
|
||||
resolve(null);
|
||||
});
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
export const deleteRawTextBuffer = async (sourceId: string): Promise<boolean> => {
|
||||
const gridBucket = getGridBucket();
|
||||
|
||||
return retryFn(async () => {
|
||||
const buffer = await MongoRawTextBufferSchema.findOne({ 'metadata.sourceId': sourceId });
|
||||
if (!buffer) {
|
||||
return false;
|
||||
}
|
||||
|
||||
await gridBucket.delete(buffer._id);
|
||||
return true;
|
||||
});
|
||||
};
|
||||
|
||||
export const updateRawTextBufferExpiredTime = async ({
|
||||
sourceId,
|
||||
expiredTime
|
||||
}: {
|
||||
sourceId: string;
|
||||
expiredTime: Date;
|
||||
}) => {
|
||||
return retryFn(async () => {
|
||||
return MongoRawTextBufferSchema.updateOne(
|
||||
{ 'metadata.sourceId': sourceId },
|
||||
{ $set: { 'metadata.expiredTime': expiredTime } }
|
||||
);
|
||||
});
|
||||
};
|
||||
|
||||
export const clearExpiredRawTextBufferCron = async () => {
|
||||
const clearExpiredRawTextBuffer = async () => {
|
||||
addLog.debug('Clear expired raw text buffer start');
|
||||
const gridBucket = getGridBucket();
|
||||
|
||||
return retryFn(async () => {
|
||||
const data = await MongoRawTextBufferSchema.find(
|
||||
{
|
||||
'metadata.expiredTime': { $lt: new Date() }
|
||||
},
|
||||
'_id'
|
||||
).lean();
|
||||
|
||||
for (const item of data) {
|
||||
await gridBucket.delete(item._id);
|
||||
}
|
||||
addLog.debug('Clear expired raw text buffer end');
|
||||
});
|
||||
};
|
||||
|
||||
setCron('*/10 * * * *', async () => {
|
||||
if (
|
||||
await checkTimerLock({
|
||||
timerId: TimerIdEnum.clearExpiredRawTextBuffer,
|
||||
lockMinuted: 9
|
||||
})
|
||||
) {
|
||||
try {
|
||||
await clearExpiredRawTextBuffer();
|
||||
} catch (error) {
|
||||
addLog.error('clearExpiredRawTextBufferCron error', error);
|
||||
}
|
||||
}
|
||||
});
|
||||
};
|
||||
@ -1,22 +1,33 @@
|
||||
import { getMongoModel, type Types, Schema } from '../../mongo';
|
||||
import { getMongoModel, Schema } from '../../mongo';
|
||||
import { type RawTextBufferSchemaType } from './type';
|
||||
|
||||
export const bucketName = 'buffer_rawtext';
|
||||
export const collectionName = 'buffer_rawtexts';
|
||||
|
||||
const RawTextBufferSchema = new Schema({
|
||||
metadata: {
|
||||
sourceId: { type: String, required: true },
|
||||
sourceName: { type: String, required: true },
|
||||
expiredTime: { type: Date, required: true }
|
||||
}
|
||||
sourceId: {
|
||||
type: String,
|
||||
required: true
|
||||
},
|
||||
rawText: {
|
||||
type: String,
|
||||
default: ''
|
||||
},
|
||||
createTime: {
|
||||
type: Date,
|
||||
default: () => new Date()
|
||||
},
|
||||
metadata: Object
|
||||
});
|
||||
RawTextBufferSchema.index({ 'metadata.sourceId': 'hashed' });
|
||||
RawTextBufferSchema.index({ 'metadata.expiredTime': -1 });
|
||||
|
||||
export const MongoRawTextBufferSchema = getMongoModel<{
|
||||
_id: Types.ObjectId;
|
||||
metadata: {
|
||||
sourceId: string;
|
||||
sourceName: string;
|
||||
expiredTime: Date;
|
||||
};
|
||||
}>(`${bucketName}.files`, RawTextBufferSchema);
|
||||
try {
|
||||
RawTextBufferSchema.index({ sourceId: 1 });
|
||||
// 20 minutes
|
||||
RawTextBufferSchema.index({ createTime: 1 }, { expireAfterSeconds: 20 * 60 });
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
}
|
||||
|
||||
export const MongoRawTextBuffer = getMongoModel<RawTextBufferSchemaType>(
|
||||
collectionName,
|
||||
RawTextBufferSchema
|
||||
);
|
||||
|
||||
8
packages/service/common/buffer/rawText/type.d.ts
vendored
Normal file
8
packages/service/common/buffer/rawText/type.d.ts
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
export type RawTextBufferSchemaType = {
|
||||
sourceId: string;
|
||||
rawText: string;
|
||||
createTime: Date;
|
||||
metadata?: {
|
||||
filename: string;
|
||||
};
|
||||
};
|
||||
@ -6,13 +6,13 @@ import { type DatasetFileSchema } from '@fastgpt/global/core/dataset/type';
|
||||
import { MongoChatFileSchema, MongoDatasetFileSchema } from './schema';
|
||||
import { detectFileEncoding, detectFileEncodingByPath } from '@fastgpt/global/common/file/tools';
|
||||
import { CommonErrEnum } from '@fastgpt/global/common/error/code/common';
|
||||
import { MongoRawTextBuffer } from '../../buffer/rawText/schema';
|
||||
import { readRawContentByFileBuffer } from '../read/utils';
|
||||
import { gridFsStream2Buffer, stream2Encoding } from './utils';
|
||||
import { addLog } from '../../system/log';
|
||||
import { readFromSecondary } from '../../mongo/utils';
|
||||
import { parseFileExtensionFromUrl } from '@fastgpt/global/common/string/tools';
|
||||
import { Readable } from 'stream';
|
||||
import { addRawTextBuffer, getRawTextBuffer } from '../../buffer/rawText/controller';
|
||||
import { addMinutes } from 'date-fns';
|
||||
|
||||
export function getGFSCollection(bucket: `${BucketNameEnum}`) {
|
||||
MongoDatasetFileSchema;
|
||||
@ -210,26 +210,28 @@ export const readFileContentFromMongo = async ({
|
||||
tmbId,
|
||||
bucketName,
|
||||
fileId,
|
||||
customPdfParse = false,
|
||||
getFormatText
|
||||
isQAImport = false,
|
||||
customPdfParse = false
|
||||
}: {
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
bucketName: `${BucketNameEnum}`;
|
||||
fileId: string;
|
||||
isQAImport?: boolean;
|
||||
customPdfParse?: boolean;
|
||||
getFormatText?: boolean; // 数据类型都尽可能转化成 markdown 格式
|
||||
}): Promise<{
|
||||
rawText: string;
|
||||
filename: string;
|
||||
}> => {
|
||||
const bufferId = `${String(fileId)}-${customPdfParse}`;
|
||||
const bufferId = `${fileId}-${customPdfParse}`;
|
||||
// read buffer
|
||||
const fileBuffer = await getRawTextBuffer(bufferId);
|
||||
const fileBuffer = await MongoRawTextBuffer.findOne({ sourceId: bufferId }, undefined, {
|
||||
...readFromSecondary
|
||||
}).lean();
|
||||
if (fileBuffer) {
|
||||
return {
|
||||
rawText: fileBuffer.text,
|
||||
filename: fileBuffer?.sourceName
|
||||
rawText: fileBuffer.rawText,
|
||||
filename: fileBuffer.metadata?.filename || ''
|
||||
};
|
||||
}
|
||||
|
||||
@ -252,8 +254,8 @@ export const readFileContentFromMongo = async ({
|
||||
// Get raw text
|
||||
const { rawText } = await readRawContentByFileBuffer({
|
||||
customPdfParse,
|
||||
getFormatText,
|
||||
extension,
|
||||
isQAImport,
|
||||
teamId,
|
||||
tmbId,
|
||||
buffer: fileBuffers,
|
||||
@ -263,13 +265,16 @@ export const readFileContentFromMongo = async ({
|
||||
}
|
||||
});
|
||||
|
||||
// Add buffer
|
||||
addRawTextBuffer({
|
||||
sourceId: bufferId,
|
||||
sourceName: file.filename,
|
||||
text: rawText,
|
||||
expiredTime: addMinutes(new Date(), 20)
|
||||
});
|
||||
// < 14M
|
||||
if (fileBuffers.length < 14 * 1024 * 1024 && rawText.trim()) {
|
||||
MongoRawTextBuffer.create({
|
||||
sourceId: bufferId,
|
||||
rawText,
|
||||
metadata: {
|
||||
filename: file.filename
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
rawText,
|
||||
|
||||
@ -1,16 +1,16 @@
|
||||
import { Schema, getMongoModel } from '../../mongo';
|
||||
|
||||
const DatasetFileSchema = new Schema({
|
||||
metadata: Object
|
||||
});
|
||||
const ChatFileSchema = new Schema({
|
||||
metadata: Object
|
||||
});
|
||||
const DatasetFileSchema = new Schema({});
|
||||
const ChatFileSchema = new Schema({});
|
||||
|
||||
DatasetFileSchema.index({ uploadDate: -1 });
|
||||
try {
|
||||
DatasetFileSchema.index({ uploadDate: -1 });
|
||||
|
||||
ChatFileSchema.index({ uploadDate: -1 });
|
||||
ChatFileSchema.index({ 'metadata.chatId': 1 });
|
||||
ChatFileSchema.index({ uploadDate: -1 });
|
||||
ChatFileSchema.index({ 'metadata.chatId': 1 });
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
}
|
||||
|
||||
export const MongoDatasetFileSchema = getMongoModel('dataset.files', DatasetFileSchema);
|
||||
export const MongoChatFileSchema = getMongoModel('chat.files', ChatFileSchema);
|
||||
|
||||
@ -1,57 +1,5 @@
|
||||
import { detectFileEncoding } from '@fastgpt/global/common/file/tools';
|
||||
import { PassThrough } from 'stream';
|
||||
import { getGridBucket } from './controller';
|
||||
import { type BucketNameEnum } from '@fastgpt/global/common/file/constants';
|
||||
import { retryFn } from '@fastgpt/global/common/system/utils';
|
||||
|
||||
export const createFileFromText = async ({
|
||||
bucket,
|
||||
filename,
|
||||
text,
|
||||
metadata
|
||||
}: {
|
||||
bucket: `${BucketNameEnum}`;
|
||||
filename: string;
|
||||
text: string;
|
||||
metadata: Record<string, any>;
|
||||
}) => {
|
||||
const gridBucket = getGridBucket(bucket);
|
||||
|
||||
const buffer = Buffer.from(text);
|
||||
|
||||
const fileSize = buffer.length;
|
||||
// 单块大小:尽可能大,但不超过 14MB,不小于128KB
|
||||
const chunkSizeBytes = (() => {
|
||||
// 计算理想块大小:文件大小 ÷ 目标块数(10)。 并且每个块需要小于 14MB
|
||||
const idealChunkSize = Math.min(Math.ceil(fileSize / 10), 14 * 1024 * 1024);
|
||||
|
||||
// 确保块大小至少为128KB
|
||||
const minChunkSize = 128 * 1024; // 128KB
|
||||
|
||||
// 取理想块大小和最小块大小中的较大值
|
||||
let chunkSize = Math.max(idealChunkSize, minChunkSize);
|
||||
|
||||
// 将块大小向上取整到最接近的64KB的倍数,使其更整齐
|
||||
chunkSize = Math.ceil(chunkSize / (64 * 1024)) * (64 * 1024);
|
||||
|
||||
return chunkSize;
|
||||
})();
|
||||
|
||||
const uploadStream = gridBucket.openUploadStream(filename, {
|
||||
metadata,
|
||||
chunkSizeBytes
|
||||
});
|
||||
|
||||
return retryFn(async () => {
|
||||
return new Promise<{ fileId: string }>((resolve, reject) => {
|
||||
uploadStream.end(buffer);
|
||||
uploadStream.on('finish', () => {
|
||||
resolve({ fileId: String(uploadStream.id) });
|
||||
});
|
||||
uploadStream.on('error', reject);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
export const gridFsStream2Buffer = (stream: NodeJS.ReadableStream) => {
|
||||
return new Promise<Buffer>((resolve, reject) => {
|
||||
|
||||
@ -16,7 +16,6 @@ export type readRawTextByLocalFileParams = {
|
||||
path: string;
|
||||
encoding: string;
|
||||
customPdfParse?: boolean;
|
||||
getFormatText?: boolean;
|
||||
metadata?: Record<string, any>;
|
||||
};
|
||||
export const readRawTextByLocalFile = async (params: readRawTextByLocalFileParams) => {
|
||||
@ -28,8 +27,8 @@ export const readRawTextByLocalFile = async (params: readRawTextByLocalFileParam
|
||||
|
||||
return readRawContentByFileBuffer({
|
||||
extension,
|
||||
isQAImport: false,
|
||||
customPdfParse: params.customPdfParse,
|
||||
getFormatText: params.getFormatText,
|
||||
teamId: params.teamId,
|
||||
tmbId: params.tmbId,
|
||||
encoding: params.encoding,
|
||||
@ -47,7 +46,7 @@ export const readRawContentByFileBuffer = async ({
|
||||
encoding,
|
||||
metadata,
|
||||
customPdfParse = false,
|
||||
getFormatText = true
|
||||
isQAImport = false
|
||||
}: {
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
@ -58,10 +57,8 @@ export const readRawContentByFileBuffer = async ({
|
||||
metadata?: Record<string, any>;
|
||||
|
||||
customPdfParse?: boolean;
|
||||
getFormatText?: boolean;
|
||||
}): Promise<{
|
||||
rawText: string;
|
||||
}> => {
|
||||
isQAImport: boolean;
|
||||
}): Promise<ReadFileResponse> => {
|
||||
const systemParse = () =>
|
||||
runWorker<ReadFileResponse>(WorkerNameEnum.readFile, {
|
||||
extension,
|
||||
@ -110,7 +107,7 @@ export const readRawContentByFileBuffer = async ({
|
||||
|
||||
return {
|
||||
rawText: text,
|
||||
formatText: text,
|
||||
formatText: rawText,
|
||||
imageList
|
||||
};
|
||||
};
|
||||
@ -152,7 +149,7 @@ export const readRawContentByFileBuffer = async ({
|
||||
return await systemParse();
|
||||
})();
|
||||
|
||||
addLog.debug(`Parse file success, time: ${Date.now() - start}ms. `);
|
||||
addLog.debug(`Parse file success, time: ${Date.now() - start}ms. Uploading file image.`);
|
||||
|
||||
// markdown data format
|
||||
if (imageList) {
|
||||
@ -179,7 +176,16 @@ export const readRawContentByFileBuffer = async ({
|
||||
});
|
||||
}
|
||||
|
||||
addLog.debug(`Upload file success, time: ${Date.now() - start}ms`);
|
||||
if (['csv', 'xlsx'].includes(extension)) {
|
||||
// qa data
|
||||
if (isQAImport) {
|
||||
rawText = rawText || '';
|
||||
} else {
|
||||
rawText = formatText || rawText;
|
||||
}
|
||||
}
|
||||
|
||||
return { rawText: getFormatText ? formatText || rawText : rawText };
|
||||
addLog.debug(`Upload file image success, time: ${Date.now() - start}ms`);
|
||||
|
||||
return { rawText, formatText, imageList };
|
||||
};
|
||||
|
||||
@ -1,10 +1,7 @@
|
||||
import { getGlobalRedisConnection } from './index';
|
||||
import { getGlobalRedisCacheConnection } from './index';
|
||||
import { addLog } from '../system/log';
|
||||
import { retryFn } from '@fastgpt/global/common/system/utils';
|
||||
|
||||
const redisPrefix = 'cache:';
|
||||
const getCacheKey = (key: string) => `${redisPrefix}${key}`;
|
||||
|
||||
export enum CacheKeyEnum {
|
||||
team_vector_count = 'team_vector_count'
|
||||
}
|
||||
@ -16,12 +13,12 @@ export const setRedisCache = async (
|
||||
) => {
|
||||
return await retryFn(async () => {
|
||||
try {
|
||||
const redis = getGlobalRedisConnection();
|
||||
const redis = getGlobalRedisCacheConnection();
|
||||
|
||||
if (expireSeconds) {
|
||||
await redis.set(getCacheKey(key), data, 'EX', expireSeconds);
|
||||
await redis.set(key, data, 'EX', expireSeconds);
|
||||
} else {
|
||||
await redis.set(getCacheKey(key), data);
|
||||
await redis.set(key, data);
|
||||
}
|
||||
} catch (error) {
|
||||
addLog.error('Set cache error:', error);
|
||||
@ -31,11 +28,11 @@ export const setRedisCache = async (
|
||||
};
|
||||
|
||||
export const getRedisCache = async (key: string) => {
|
||||
const redis = getGlobalRedisConnection();
|
||||
return await retryFn(() => redis.get(getCacheKey(key)));
|
||||
const redis = getGlobalRedisCacheConnection();
|
||||
return await retryFn(() => redis.get(key));
|
||||
};
|
||||
|
||||
export const delRedisCache = async (key: string) => {
|
||||
const redis = getGlobalRedisConnection();
|
||||
await retryFn(() => redis.del(getCacheKey(key)));
|
||||
const redis = getGlobalRedisCacheConnection();
|
||||
await retryFn(() => redis.del(key));
|
||||
};
|
||||
|
||||
@ -27,26 +27,17 @@ export const newWorkerRedisConnection = () => {
|
||||
return redis;
|
||||
};
|
||||
|
||||
export const FASTGPT_REDIS_PREFIX = 'fastgpt:';
|
||||
export const getGlobalRedisConnection = () => {
|
||||
if (global.redisClient) return global.redisClient;
|
||||
export const getGlobalRedisCacheConnection = () => {
|
||||
if (global.redisCache) return global.redisCache;
|
||||
|
||||
global.redisClient = new Redis(REDIS_URL, { keyPrefix: FASTGPT_REDIS_PREFIX });
|
||||
global.redisCache = new Redis(REDIS_URL, { keyPrefix: 'fastgpt:cache:' });
|
||||
|
||||
global.redisClient.on('connect', () => {
|
||||
global.redisCache.on('connect', () => {
|
||||
addLog.info('Redis connected');
|
||||
});
|
||||
global.redisClient.on('error', (error) => {
|
||||
global.redisCache.on('error', (error) => {
|
||||
addLog.error('Redis connection error', error);
|
||||
});
|
||||
|
||||
return global.redisClient;
|
||||
};
|
||||
|
||||
export const getAllKeysByPrefix = async (key: string) => {
|
||||
const redis = getGlobalRedisConnection();
|
||||
const keys = (await redis.keys(`${FASTGPT_REDIS_PREFIX}${key}:*`)).map((key) =>
|
||||
key.replace(FASTGPT_REDIS_PREFIX, '')
|
||||
);
|
||||
return keys;
|
||||
return global.redisCache;
|
||||
};
|
||||
|
||||
2
packages/service/common/redis/type.d.ts
vendored
2
packages/service/common/redis/type.d.ts
vendored
@ -1,5 +1,5 @@
|
||||
import type Redis from 'ioredis';
|
||||
|
||||
declare global {
|
||||
var redisClient: Redis | null;
|
||||
var redisCache: Redis | null;
|
||||
}
|
||||
|
||||
@ -10,7 +10,6 @@ let jieba: Jieba | undefined;
|
||||
})();
|
||||
|
||||
const stopWords = new Set([
|
||||
'\n',
|
||||
'--',
|
||||
'?',
|
||||
'“',
|
||||
@ -1520,7 +1519,8 @@ const stopWords = new Set([
|
||||
]);
|
||||
|
||||
export async function jiebaSplit({ text }: { text: string }) {
|
||||
text = text.replace(/[#*`_~>[\](){}|]|\S*https?\S*/g, '').trim();
|
||||
text = text.replace(/[#*`_~>[\](){}|]/g, '').replace(/\S*https?\S*/gi, '');
|
||||
|
||||
const tokens = (await jieba!.cutAsync(text, true)) as string[];
|
||||
|
||||
return (
|
||||
|
||||
@ -2,44 +2,26 @@ import { SystemConfigsTypeEnum } from '@fastgpt/global/common/system/config/cons
|
||||
import { MongoSystemConfigs } from './schema';
|
||||
import { type FastGPTConfigFileType } from '@fastgpt/global/common/system/types';
|
||||
import { FastGPTProUrl } from '../constants';
|
||||
import { type LicenseDataType } from '@fastgpt/global/common/system/types';
|
||||
|
||||
export const getFastGPTConfigFromDB = async (): Promise<{
|
||||
fastgptConfig: FastGPTConfigFileType;
|
||||
licenseData?: LicenseDataType;
|
||||
}> => {
|
||||
export const getFastGPTConfigFromDB = async () => {
|
||||
if (!FastGPTProUrl) {
|
||||
return {
|
||||
fastgptConfig: {} as FastGPTConfigFileType
|
||||
config: {} as FastGPTConfigFileType
|
||||
};
|
||||
}
|
||||
|
||||
const [fastgptConfig, licenseConfig] = await Promise.all([
|
||||
MongoSystemConfigs.findOne({
|
||||
type: SystemConfigsTypeEnum.fastgpt
|
||||
}).sort({
|
||||
createTime: -1
|
||||
}),
|
||||
MongoSystemConfigs.findOne({
|
||||
type: SystemConfigsTypeEnum.license
|
||||
}).sort({
|
||||
createTime: -1
|
||||
})
|
||||
]);
|
||||
const res = await MongoSystemConfigs.findOne({
|
||||
type: SystemConfigsTypeEnum.fastgpt
|
||||
}).sort({
|
||||
createTime: -1
|
||||
});
|
||||
|
||||
const config = fastgptConfig?.value || {};
|
||||
const licenseData = licenseConfig?.value?.data as LicenseDataType | undefined;
|
||||
|
||||
const fastgptConfigTime = fastgptConfig?.createTime.getTime().toString();
|
||||
const licenseConfigTime = licenseConfig?.createTime.getTime().toString();
|
||||
const config = res?.value || {};
|
||||
// 利用配置文件的创建时间(更新时间)来做缓存,如果前端命中缓存,则不需要再返回配置文件
|
||||
global.systemInitBufferId = fastgptConfigTime
|
||||
? `${fastgptConfigTime}-${licenseConfigTime}`
|
||||
: undefined;
|
||||
global.systemInitBufferId = res ? res.createTime.getTime().toString() : undefined;
|
||||
|
||||
return {
|
||||
fastgptConfig: config as FastGPTConfigFileType,
|
||||
licenseData
|
||||
config: config as FastGPTConfigFileType
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@ -57,19 +57,14 @@ export const addLog = {
|
||||
|
||||
level === LogLevelEnum.error && console.error(obj);
|
||||
|
||||
// store log
|
||||
// store
|
||||
if (level >= STORE_LOG_LEVEL && connectionMongo.connection.readyState === 1) {
|
||||
(async () => {
|
||||
try {
|
||||
await getMongoLog().create({
|
||||
text: msg,
|
||||
level,
|
||||
metadata: obj
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('store log error', error);
|
||||
}
|
||||
})();
|
||||
// store log
|
||||
getMongoLog().create({
|
||||
text: msg,
|
||||
level,
|
||||
metadata: obj
|
||||
});
|
||||
}
|
||||
},
|
||||
debug(msg: string, obj?: Record<string, any>) {
|
||||
|
||||
@ -5,8 +5,7 @@ export enum TimerIdEnum {
|
||||
clearExpiredSubPlan = 'clearExpiredSubPlan',
|
||||
updateStandardPlan = 'updateStandardPlan',
|
||||
scheduleTriggerApp = 'scheduleTriggerApp',
|
||||
notification = 'notification',
|
||||
clearExpiredRawTextBuffer = 'clearExpiredRawTextBuffer'
|
||||
notification = 'notification'
|
||||
}
|
||||
|
||||
export enum LockNotificationEnum {
|
||||
|
||||
@ -188,7 +188,6 @@ export class PgVectorCtrl {
|
||||
const results: any = await PgClient.query(
|
||||
`BEGIN;
|
||||
SET LOCAL hnsw.ef_search = ${global.systemEnv?.hnswEfSearch || 100};
|
||||
SET LOCAL hnsw.max_scan_tuples = ${global.systemEnv?.hnswMaxScanTuples || 100000};
|
||||
SET LOCAL hnsw.iterative_scan = relaxed_order;
|
||||
WITH relaxed_results AS MATERIALIZED (
|
||||
select id, collection_id, vector <#> '[${vector}]' AS score
|
||||
@ -200,7 +199,7 @@ export class PgVectorCtrl {
|
||||
) SELECT id, collection_id, score FROM relaxed_results ORDER BY score;
|
||||
COMMIT;`
|
||||
);
|
||||
const rows = results?.[results.length - 2]?.rows as PgSearchRawType[];
|
||||
const rows = results?.[3]?.rows as PgSearchRawType[];
|
||||
|
||||
if (!Array.isArray(rows)) {
|
||||
return {
|
||||
|
||||
@ -78,7 +78,7 @@ export const createChatCompletion = async ({
|
||||
}
|
||||
body.model = modelConstantsData.model;
|
||||
|
||||
const formatTimeout = timeout ? timeout : 600000;
|
||||
const formatTimeout = timeout ? timeout : body.stream ? 60000 : 600000;
|
||||
const ai = getAIApi({
|
||||
userKey,
|
||||
timeout: formatTimeout
|
||||
|
||||
@ -1,54 +1,6 @@
|
||||
{
|
||||
"provider": "Claude",
|
||||
"list": [
|
||||
{
|
||||
"model": "claude-sonnet-4-20250514",
|
||||
"name": "claude-sonnet-4-20250514",
|
||||
"maxContext": 200000,
|
||||
"maxResponse": 8000,
|
||||
"quoteMaxToken": 100000,
|
||||
"maxTemperature": 1,
|
||||
"showTopP": true,
|
||||
"showStopSign": true,
|
||||
"vision": true,
|
||||
"toolChoice": true,
|
||||
"functionCall": false,
|
||||
"defaultSystemChatPrompt": "",
|
||||
"datasetProcess": true,
|
||||
"usedInClassify": true,
|
||||
"customCQPrompt": "",
|
||||
"usedInExtractFields": true,
|
||||
"usedInQueryExtension": true,
|
||||
"customExtractPrompt": "",
|
||||
"usedInToolCall": true,
|
||||
"defaultConfig": {},
|
||||
"fieldMap": {},
|
||||
"type": "llm"
|
||||
},
|
||||
{
|
||||
"model": "claude-opus-4-20250514",
|
||||
"name": "claude-opus-4-20250514",
|
||||
"maxContext": 200000,
|
||||
"maxResponse": 4096,
|
||||
"quoteMaxToken": 100000,
|
||||
"maxTemperature": 1,
|
||||
"showTopP": true,
|
||||
"showStopSign": true,
|
||||
"vision": true,
|
||||
"toolChoice": true,
|
||||
"functionCall": false,
|
||||
"defaultSystemChatPrompt": "",
|
||||
"datasetProcess": true,
|
||||
"usedInClassify": true,
|
||||
"customCQPrompt": "",
|
||||
"usedInExtractFields": true,
|
||||
"usedInQueryExtension": true,
|
||||
"customExtractPrompt": "",
|
||||
"usedInToolCall": true,
|
||||
"defaultConfig": {},
|
||||
"fieldMap": {},
|
||||
"type": "llm"
|
||||
},
|
||||
{
|
||||
"model": "claude-3-7-sonnet-20250219",
|
||||
"name": "claude-3-7-sonnet-20250219",
|
||||
|
||||
@ -25,30 +25,6 @@
|
||||
"showTopP": true,
|
||||
"showStopSign": true
|
||||
},
|
||||
{
|
||||
"model": "gemini-2.5-flash-preview-04-17",
|
||||
"name": "gemini-2.5-flash-preview-04-17",
|
||||
"maxContext": 1000000,
|
||||
"maxResponse": 8000,
|
||||
"quoteMaxToken": 60000,
|
||||
"maxTemperature": 1,
|
||||
"vision": true,
|
||||
"toolChoice": true,
|
||||
"functionCall": false,
|
||||
"defaultSystemChatPrompt": "",
|
||||
"datasetProcess": true,
|
||||
"usedInClassify": true,
|
||||
"customCQPrompt": "",
|
||||
"usedInExtractFields": true,
|
||||
"usedInQueryExtension": true,
|
||||
"customExtractPrompt": "",
|
||||
"usedInToolCall": true,
|
||||
"defaultConfig": {},
|
||||
"fieldMap": {},
|
||||
"type": "llm",
|
||||
"showTopP": true,
|
||||
"showStopSign": true
|
||||
},
|
||||
{
|
||||
"model": "gemini-2.0-flash",
|
||||
"name": "gemini-2.0-flash",
|
||||
|
||||
@ -18,17 +18,15 @@ import json5 from 'json5';
|
||||
*/
|
||||
export const computedMaxToken = ({
|
||||
maxToken,
|
||||
model,
|
||||
min
|
||||
model
|
||||
}: {
|
||||
maxToken?: number;
|
||||
model: LLMModelItemType;
|
||||
min?: number;
|
||||
}) => {
|
||||
if (maxToken === undefined) return;
|
||||
|
||||
maxToken = Math.min(maxToken, model.maxResponse);
|
||||
return Math.max(maxToken, min || 0);
|
||||
return maxToken;
|
||||
};
|
||||
|
||||
// FastGPT temperature range: [0,10], ai temperature:[0,2],{0,1]……
|
||||
@ -137,14 +135,12 @@ export const llmStreamResponseToAnswerText = async (
|
||||
|
||||
// Tool calls
|
||||
if (responseChoice?.tool_calls?.length) {
|
||||
responseChoice.tool_calls.forEach((toolCall, i) => {
|
||||
const index = toolCall.index ?? i;
|
||||
responseChoice.tool_calls.forEach((toolCall) => {
|
||||
const index = toolCall.index;
|
||||
|
||||
// Call new tool
|
||||
const hasNewTool = toolCall?.function?.name || callingTool;
|
||||
if (hasNewTool) {
|
||||
// 有 function name,代表新 call 工具
|
||||
if (toolCall?.function?.name) {
|
||||
if (toolCall.id || callingTool) {
|
||||
// 有 id,代表新 call 工具
|
||||
if (toolCall.id) {
|
||||
callingTool = {
|
||||
name: toolCall.function?.name || '',
|
||||
arguments: toolCall.function?.arguments || ''
|
||||
@ -180,7 +176,7 @@ export const llmStreamResponseToAnswerText = async (
|
||||
}
|
||||
}
|
||||
return {
|
||||
text: removeDatasetCiteText(parseReasoningContent(answer)[1], false),
|
||||
text: parseReasoningContent(answer)[1],
|
||||
usage,
|
||||
toolCalls
|
||||
};
|
||||
@ -194,9 +190,8 @@ export const llmUnStreamResponseToAnswerText = async (
|
||||
}> => {
|
||||
const answer = response.choices?.[0]?.message?.content || '';
|
||||
const toolCalls = response.choices?.[0]?.message?.tool_calls;
|
||||
|
||||
return {
|
||||
text: removeDatasetCiteText(parseReasoningContent(answer)[1], false),
|
||||
text: answer,
|
||||
usage: response.usage,
|
||||
toolCalls
|
||||
};
|
||||
@ -226,9 +221,7 @@ export const parseReasoningContent = (text: string): [string, string] => {
|
||||
};
|
||||
|
||||
export const removeDatasetCiteText = (text: string, retainDatasetCite: boolean) => {
|
||||
return retainDatasetCite
|
||||
? text.replace(/\[id\]\(CITE\)/g, '')
|
||||
: text.replace(/\[([a-f0-9]{24})\](?:\([^\)]*\)?)?/g, '').replace(/\[id\]\(CITE\)/g, '');
|
||||
return retainDatasetCite ? text : text.replace(/\[([a-f0-9]{24})\](?:\([^\)]*\)?)?/g, '');
|
||||
};
|
||||
|
||||
// Parse llm stream part
|
||||
@ -243,12 +236,6 @@ export const parseLLMStreamResponse = () => {
|
||||
let citeBuffer = '';
|
||||
const maxCiteBufferLength = 32; // [Object](CITE)总长度为32
|
||||
|
||||
// Buffer
|
||||
let buffer_finishReason: CompletionFinishReason = null;
|
||||
let buffer_usage: CompletionUsage = getLLMDefaultUsage();
|
||||
let buffer_reasoningContent = '';
|
||||
let buffer_content = '';
|
||||
|
||||
/*
|
||||
parseThinkTag - 只控制是否主动解析 <think></think>,如果接口已经解析了,则不再解析。
|
||||
retainDatasetCite -
|
||||
@ -266,7 +253,6 @@ export const parseLLMStreamResponse = () => {
|
||||
};
|
||||
finish_reason?: CompletionFinishReason;
|
||||
}[];
|
||||
usage?: CompletionUsage;
|
||||
};
|
||||
parseThinkTag?: boolean;
|
||||
retainDatasetCite?: boolean;
|
||||
@ -276,71 +262,72 @@ export const parseLLMStreamResponse = () => {
|
||||
responseContent: string;
|
||||
finishReason: CompletionFinishReason;
|
||||
} => {
|
||||
const data = (() => {
|
||||
buffer_usage = part.usage || buffer_usage;
|
||||
const finishReason = part.choices?.[0]?.finish_reason || null;
|
||||
const content = part.choices?.[0]?.delta?.content || '';
|
||||
// @ts-ignore
|
||||
const reasoningContent = part.choices?.[0]?.delta?.reasoning_content || '';
|
||||
const isStreamEnd = !!finishReason;
|
||||
|
||||
const finishReason = part.choices?.[0]?.finish_reason || null;
|
||||
buffer_finishReason = finishReason || buffer_finishReason;
|
||||
// Parse think
|
||||
const { reasoningContent: parsedThinkReasoningContent, content: parsedThinkContent } = (() => {
|
||||
if (reasoningContent || !parseThinkTag) {
|
||||
isInThinkTag = false;
|
||||
return { reasoningContent, content };
|
||||
}
|
||||
|
||||
const content = part.choices?.[0]?.delta?.content || '';
|
||||
// @ts-ignore
|
||||
const reasoningContent = part.choices?.[0]?.delta?.reasoning_content || '';
|
||||
const isStreamEnd = !!buffer_finishReason;
|
||||
if (!content) {
|
||||
return {
|
||||
reasoningContent: '',
|
||||
content: ''
|
||||
};
|
||||
}
|
||||
|
||||
// Parse think
|
||||
const { reasoningContent: parsedThinkReasoningContent, content: parsedThinkContent } =
|
||||
(() => {
|
||||
if (reasoningContent || !parseThinkTag) {
|
||||
isInThinkTag = false;
|
||||
return { reasoningContent, content };
|
||||
}
|
||||
// 如果不在 think 标签中,或者有 reasoningContent(接口已解析),则返回 reasoningContent 和 content
|
||||
if (isInThinkTag === false) {
|
||||
return {
|
||||
reasoningContent: '',
|
||||
content
|
||||
};
|
||||
}
|
||||
|
||||
// 如果不在 think 标签中,或者有 reasoningContent(接口已解析),则返回 reasoningContent 和 content
|
||||
if (isInThinkTag === false) {
|
||||
// 检测是否为 think 标签开头的数据
|
||||
if (isInThinkTag === undefined) {
|
||||
// Parse content think and answer
|
||||
startTagBuffer += content;
|
||||
// 太少内容时候,暂时不解析
|
||||
if (startTagBuffer.length < thinkStartChars.length) {
|
||||
if (isStreamEnd) {
|
||||
const tmpContent = startTagBuffer;
|
||||
startTagBuffer = '';
|
||||
return {
|
||||
reasoningContent: '',
|
||||
content
|
||||
content: tmpContent
|
||||
};
|
||||
}
|
||||
return {
|
||||
reasoningContent: '',
|
||||
content: ''
|
||||
};
|
||||
}
|
||||
|
||||
// 检测是否为 think 标签开头的数据
|
||||
if (isInThinkTag === undefined) {
|
||||
// Parse content think and answer
|
||||
startTagBuffer += content;
|
||||
// 太少内容时候,暂时不解析
|
||||
if (startTagBuffer.length < thinkStartChars.length) {
|
||||
if (isStreamEnd) {
|
||||
const tmpContent = startTagBuffer;
|
||||
startTagBuffer = '';
|
||||
return {
|
||||
reasoningContent: '',
|
||||
content: tmpContent
|
||||
};
|
||||
}
|
||||
return {
|
||||
reasoningContent: '',
|
||||
content: ''
|
||||
};
|
||||
}
|
||||
if (startTagBuffer.startsWith(thinkStartChars)) {
|
||||
isInThinkTag = true;
|
||||
return {
|
||||
reasoningContent: startTagBuffer.slice(thinkStartChars.length),
|
||||
content: ''
|
||||
};
|
||||
}
|
||||
|
||||
if (startTagBuffer.startsWith(thinkStartChars)) {
|
||||
isInThinkTag = true;
|
||||
return {
|
||||
reasoningContent: startTagBuffer.slice(thinkStartChars.length),
|
||||
content: ''
|
||||
};
|
||||
}
|
||||
// 如果未命中 think 标签,则认为不在 think 标签中,返回 buffer 内容作为 content
|
||||
isInThinkTag = false;
|
||||
return {
|
||||
reasoningContent: '',
|
||||
content: startTagBuffer
|
||||
};
|
||||
}
|
||||
|
||||
// 如果未命中 think 标签,则认为不在 think 标签中,返回 buffer 内容作为 content
|
||||
isInThinkTag = false;
|
||||
return {
|
||||
reasoningContent: '',
|
||||
content: startTagBuffer
|
||||
};
|
||||
}
|
||||
|
||||
// 确认是 think 标签内容,开始返回 think 内容,并实时检测 </think>
|
||||
/*
|
||||
// 确认是 think 标签内容,开始返回 think 内容,并实时检测 </think>
|
||||
/*
|
||||
检测 </think> 方案。
|
||||
存储所有疑似 </think> 的内容,直到检测到完整的 </think> 标签或超出 </think> 长度。
|
||||
content 返回值包含以下几种情况:
|
||||
@ -351,145 +338,124 @@ export const parseLLMStreamResponse = () => {
|
||||
</think>abc - 完全命中尾标签
|
||||
k>abc - 命中一部分尾标签
|
||||
*/
|
||||
// endTagBuffer 专门用来记录疑似尾标签的内容
|
||||
if (endTagBuffer) {
|
||||
endTagBuffer += content;
|
||||
if (endTagBuffer.includes(thinkEndChars)) {
|
||||
isInThinkTag = false;
|
||||
const answer = endTagBuffer.slice(thinkEndChars.length);
|
||||
return {
|
||||
reasoningContent: '',
|
||||
content: answer
|
||||
};
|
||||
} else if (endTagBuffer.length >= thinkEndChars.length) {
|
||||
// 缓存内容超出尾标签长度,且仍未命中 </think>,则认为本次猜测 </think> 失败,仍处于 think 阶段。
|
||||
const tmp = endTagBuffer;
|
||||
endTagBuffer = '';
|
||||
return {
|
||||
reasoningContent: tmp,
|
||||
content: ''
|
||||
};
|
||||
}
|
||||
return {
|
||||
reasoningContent: '',
|
||||
content: ''
|
||||
};
|
||||
} else if (content.includes(thinkEndChars)) {
|
||||
// 返回内容,完整命中</think>,直接结束
|
||||
isInThinkTag = false;
|
||||
const [think, answer] = content.split(thinkEndChars);
|
||||
return {
|
||||
reasoningContent: think,
|
||||
content: answer
|
||||
};
|
||||
} else {
|
||||
// 无 buffer,且未命中 </think>,开始疑似 </think> 检测。
|
||||
for (let i = 1; i < thinkEndChars.length; i++) {
|
||||
const partialEndTag = thinkEndChars.slice(0, i);
|
||||
// 命中一部分尾标签
|
||||
if (content.endsWith(partialEndTag)) {
|
||||
const think = content.slice(0, -partialEndTag.length);
|
||||
endTagBuffer += partialEndTag;
|
||||
return {
|
||||
reasoningContent: think,
|
||||
content: ''
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 完全未命中尾标签,还是 think 阶段。
|
||||
// endTagBuffer 专门用来记录疑似尾标签的内容
|
||||
if (endTagBuffer) {
|
||||
endTagBuffer += content;
|
||||
if (endTagBuffer.includes(thinkEndChars)) {
|
||||
isInThinkTag = false;
|
||||
const answer = endTagBuffer.slice(thinkEndChars.length);
|
||||
return {
|
||||
reasoningContent: content,
|
||||
reasoningContent: '',
|
||||
content: answer
|
||||
};
|
||||
} else if (endTagBuffer.length >= thinkEndChars.length) {
|
||||
// 缓存内容超出尾标签长度,且仍未命中 </think>,则认为本次猜测 </think> 失败,仍处于 think 阶段。
|
||||
const tmp = endTagBuffer;
|
||||
endTagBuffer = '';
|
||||
return {
|
||||
reasoningContent: tmp,
|
||||
content: ''
|
||||
};
|
||||
})();
|
||||
|
||||
// Parse datset cite
|
||||
if (retainDatasetCite) {
|
||||
}
|
||||
return {
|
||||
reasoningContent: parsedThinkReasoningContent,
|
||||
content: parsedThinkContent,
|
||||
responseContent: parsedThinkContent,
|
||||
finishReason: buffer_finishReason
|
||||
reasoningContent: '',
|
||||
content: ''
|
||||
};
|
||||
}
|
||||
|
||||
// 缓存包含 [ 的字符串,直到超出 maxCiteBufferLength 再一次性返回
|
||||
const parseCite = (text: string) => {
|
||||
// 结束时,返回所有剩余内容
|
||||
if (isStreamEnd) {
|
||||
const content = citeBuffer + text;
|
||||
return {
|
||||
content: removeDatasetCiteText(content, false)
|
||||
};
|
||||
}
|
||||
|
||||
// 新内容包含 [,初始化缓冲数据
|
||||
if (text.includes('[')) {
|
||||
const index = text.indexOf('[');
|
||||
const beforeContent = citeBuffer + text.slice(0, index);
|
||||
citeBuffer = text.slice(index);
|
||||
|
||||
// beforeContent 可能是:普通字符串,带 [ 的字符串
|
||||
return {
|
||||
content: removeDatasetCiteText(beforeContent, false)
|
||||
};
|
||||
}
|
||||
// 处于 Cite 缓冲区,判断是否满足条件
|
||||
else if (citeBuffer) {
|
||||
citeBuffer += text;
|
||||
|
||||
// 检查缓冲区长度是否达到完整Quote长度或已经流结束
|
||||
if (citeBuffer.length >= maxCiteBufferLength) {
|
||||
const content = removeDatasetCiteText(citeBuffer, false);
|
||||
citeBuffer = '';
|
||||
|
||||
} else if (content.includes(thinkEndChars)) {
|
||||
// 返回内容,完整命中</think>,直接结束
|
||||
isInThinkTag = false;
|
||||
const [think, answer] = content.split(thinkEndChars);
|
||||
return {
|
||||
reasoningContent: think,
|
||||
content: answer
|
||||
};
|
||||
} else {
|
||||
// 无 buffer,且未命中 </think>,开始疑似 </think> 检测。
|
||||
for (let i = 1; i < thinkEndChars.length; i++) {
|
||||
const partialEndTag = thinkEndChars.slice(0, i);
|
||||
// 命中一部分尾标签
|
||||
if (content.endsWith(partialEndTag)) {
|
||||
const think = content.slice(0, -partialEndTag.length);
|
||||
endTagBuffer += partialEndTag;
|
||||
return {
|
||||
content
|
||||
reasoningContent: think,
|
||||
content: ''
|
||||
};
|
||||
} else {
|
||||
// 暂时不返回内容
|
||||
return { content: '' };
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
content: text
|
||||
};
|
||||
};
|
||||
const { content: pasedCiteContent } = parseCite(parsedThinkContent);
|
||||
|
||||
// 完全未命中尾标签,还是 think 阶段。
|
||||
return {
|
||||
reasoningContent: parsedThinkReasoningContent,
|
||||
content: parsedThinkContent,
|
||||
responseContent: pasedCiteContent,
|
||||
finishReason: buffer_finishReason
|
||||
reasoningContent: content,
|
||||
content: ''
|
||||
};
|
||||
})();
|
||||
|
||||
buffer_reasoningContent += data.reasoningContent;
|
||||
buffer_content += data.content;
|
||||
// Parse datset cite
|
||||
if (retainDatasetCite) {
|
||||
return {
|
||||
reasoningContent: parsedThinkReasoningContent,
|
||||
content: parsedThinkContent,
|
||||
responseContent: parsedThinkContent,
|
||||
finishReason
|
||||
};
|
||||
}
|
||||
|
||||
return data;
|
||||
};
|
||||
// 缓存包含 [ 的字符串,直到超出 maxCiteBufferLength 再一次性返回
|
||||
const parseCite = (text: string) => {
|
||||
// 结束时,返回所有剩余内容
|
||||
if (isStreamEnd) {
|
||||
const content = citeBuffer + text;
|
||||
return {
|
||||
content: removeDatasetCiteText(content, false)
|
||||
};
|
||||
}
|
||||
|
||||
// 新内容包含 [,初始化缓冲数据
|
||||
if (text.includes('[')) {
|
||||
const index = text.indexOf('[');
|
||||
const beforeContent = citeBuffer + text.slice(0, index);
|
||||
citeBuffer = text.slice(index);
|
||||
|
||||
// beforeContent 可能是:普通字符串,带 [ 的字符串
|
||||
return {
|
||||
content: removeDatasetCiteText(beforeContent, false)
|
||||
};
|
||||
}
|
||||
// 处于 Cite 缓冲区,判断是否满足条件
|
||||
else if (citeBuffer) {
|
||||
citeBuffer += text;
|
||||
|
||||
// 检查缓冲区长度是否达到完整Quote长度或已经流结束
|
||||
if (citeBuffer.length >= maxCiteBufferLength) {
|
||||
const content = removeDatasetCiteText(citeBuffer, false);
|
||||
citeBuffer = '';
|
||||
|
||||
return {
|
||||
content
|
||||
};
|
||||
} else {
|
||||
// 暂时不返回内容
|
||||
return { content: '' };
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
content: text
|
||||
};
|
||||
};
|
||||
const { content: pasedCiteContent } = parseCite(parsedThinkContent);
|
||||
|
||||
const getResponseData = () => {
|
||||
return {
|
||||
finish_reason: buffer_finishReason,
|
||||
usage: buffer_usage,
|
||||
reasoningContent: buffer_reasoningContent,
|
||||
content: buffer_content
|
||||
reasoningContent: parsedThinkReasoningContent,
|
||||
content: parsedThinkContent,
|
||||
responseContent: pasedCiteContent,
|
||||
finishReason
|
||||
};
|
||||
};
|
||||
|
||||
const updateFinishReason = (finishReason: CompletionFinishReason) => {
|
||||
buffer_finishReason = finishReason;
|
||||
};
|
||||
|
||||
return {
|
||||
parsePart,
|
||||
getResponseData,
|
||||
updateFinishReason
|
||||
parsePart
|
||||
};
|
||||
};
|
||||
|
||||
@ -11,6 +11,40 @@ export const beforeUpdateAppFormat = <T extends AppSchema['modules'] | undefined
|
||||
nodes: T;
|
||||
isPlugin: boolean;
|
||||
}) => {
|
||||
if (nodes) {
|
||||
// Check dataset maxTokens
|
||||
if (isPlugin) {
|
||||
let maxTokens = 16000;
|
||||
|
||||
nodes.forEach((item) => {
|
||||
if (
|
||||
item.flowNodeType === FlowNodeTypeEnum.chatNode ||
|
||||
item.flowNodeType === FlowNodeTypeEnum.tools
|
||||
) {
|
||||
const model =
|
||||
item.inputs.find((item) => item.key === NodeInputKeyEnum.aiModel)?.value || '';
|
||||
const chatModel = getLLMModel(model);
|
||||
const quoteMaxToken = chatModel.quoteMaxToken || 16000;
|
||||
|
||||
maxTokens = Math.max(maxTokens, quoteMaxToken);
|
||||
}
|
||||
});
|
||||
|
||||
nodes.forEach((item) => {
|
||||
if (item.flowNodeType === FlowNodeTypeEnum.datasetSearchNode) {
|
||||
item.inputs.forEach((input) => {
|
||||
if (input.key === NodeInputKeyEnum.datasetMaxTokens) {
|
||||
const val = input.value as number;
|
||||
if (val > maxTokens) {
|
||||
input.value = maxTokens;
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
nodes
|
||||
};
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
import { Client } from '@modelcontextprotocol/sdk/client/index.js';
|
||||
import { SSEClientTransport } from '@modelcontextprotocol/sdk/client/sse.js';
|
||||
import { StreamableHTTPClientTransport } from '@modelcontextprotocol/sdk/client/streamableHttp.js';
|
||||
import { type McpToolConfigType } from '@fastgpt/global/core/app/type';
|
||||
import { type ToolType } from '@fastgpt/global/core/app/type';
|
||||
import { addLog } from '../../common/system/log';
|
||||
import { retryFn } from '@fastgpt/global/common/system/utils';
|
||||
|
||||
@ -41,7 +41,7 @@ export class MCPClient {
|
||||
* Get available tools list
|
||||
* @returns List of tools
|
||||
*/
|
||||
public async getTools(): Promise<McpToolConfigType[]> {
|
||||
public async getTools(): Promise<ToolType[]> {
|
||||
try {
|
||||
const client = await this.getConnection();
|
||||
const response = await client.listTools();
|
||||
|
||||
@ -22,7 +22,8 @@ import {
|
||||
import { type PluginRuntimeType } from '@fastgpt/global/core/plugin/type';
|
||||
import { MongoSystemPlugin } from './systemPluginSchema';
|
||||
import { PluginErrEnum } from '@fastgpt/global/common/error/code/plugin';
|
||||
import { Types } from 'mongoose';
|
||||
import { MongoAppVersion } from '../version/schema';
|
||||
import { i18nT } from '../../../../web/i18n/utils';
|
||||
|
||||
/*
|
||||
plugin id rule:
|
||||
@ -30,7 +31,8 @@ import { Types } from 'mongoose';
|
||||
community: community-id
|
||||
commercial: commercial-id
|
||||
*/
|
||||
export function splitCombineToolId(id: string) {
|
||||
|
||||
export async function splitCombinePluginId(id: string) {
|
||||
const splitRes = id.split('-');
|
||||
if (splitRes.length === 1) {
|
||||
// app id
|
||||
@ -41,7 +43,7 @@ export function splitCombineToolId(id: string) {
|
||||
}
|
||||
|
||||
const [source, pluginId] = id.split('-') as [PluginSourceEnum, string];
|
||||
if (!source || !pluginId) throw new Error('pluginId not found');
|
||||
if (!source || !pluginId) return Promise.reject('pluginId not found');
|
||||
|
||||
return { source, pluginId: id };
|
||||
}
|
||||
@ -53,7 +55,7 @@ const getSystemPluginTemplateById = async (
|
||||
versionId?: string
|
||||
): Promise<ChildAppType> => {
|
||||
const item = getSystemPluginTemplates().find((plugin) => plugin.id === pluginId);
|
||||
if (!item) return Promise.reject(PluginErrEnum.unExist);
|
||||
if (!item) return Promise.reject(PluginErrEnum.unAuth);
|
||||
|
||||
const plugin = cloneDeep(item);
|
||||
|
||||
@ -63,10 +65,10 @@ const getSystemPluginTemplateById = async (
|
||||
{ pluginId: plugin.id, 'customConfig.associatedPluginId': plugin.associatedPluginId },
|
||||
'associatedPluginId'
|
||||
).lean();
|
||||
if (!systemPlugin) return Promise.reject(PluginErrEnum.unExist);
|
||||
if (!systemPlugin) return Promise.reject(PluginErrEnum.unAuth);
|
||||
|
||||
const app = await MongoApp.findById(plugin.associatedPluginId).lean();
|
||||
if (!app) return Promise.reject(PluginErrEnum.unExist);
|
||||
if (!app) return Promise.reject(PluginErrEnum.unAuth);
|
||||
|
||||
const version = versionId
|
||||
? await getAppVersionById({
|
||||
@ -76,12 +78,6 @@ const getSystemPluginTemplateById = async (
|
||||
})
|
||||
: await getAppLatestVersion(plugin.associatedPluginId, app);
|
||||
if (!version.versionId) return Promise.reject('App version not found');
|
||||
const isLatest = version.versionId
|
||||
? await checkIsLatestVersion({
|
||||
appId: plugin.associatedPluginId,
|
||||
versionId: version.versionId
|
||||
})
|
||||
: true;
|
||||
|
||||
return {
|
||||
...plugin,
|
||||
@ -90,19 +86,12 @@ const getSystemPluginTemplateById = async (
|
||||
edges: version.edges,
|
||||
chatConfig: version.chatConfig
|
||||
},
|
||||
version: versionId ? version?.versionId : '',
|
||||
versionLabel: version?.versionName,
|
||||
isLatestVersion: isLatest,
|
||||
version: versionId || String(version.versionId),
|
||||
teamId: String(app.teamId),
|
||||
tmbId: String(app.tmbId)
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
...plugin,
|
||||
version: undefined,
|
||||
isLatestVersion: true
|
||||
};
|
||||
return plugin;
|
||||
};
|
||||
|
||||
/* Format plugin to workflow preview node data */
|
||||
@ -114,21 +103,27 @@ export async function getChildAppPreviewNode({
|
||||
versionId?: string;
|
||||
}): Promise<FlowNodeTemplateType> {
|
||||
const app: ChildAppType = await (async () => {
|
||||
const { source, pluginId } = splitCombineToolId(appId);
|
||||
const { source, pluginId } = await splitCombinePluginId(appId);
|
||||
|
||||
if (source === PluginSourceEnum.personal) {
|
||||
const item = await MongoApp.findById(appId).lean();
|
||||
if (!item) return Promise.reject(PluginErrEnum.unExist);
|
||||
if (!item) return Promise.reject('plugin not found');
|
||||
|
||||
const version = await getAppVersionById({ appId, versionId, app: item });
|
||||
|
||||
const isLatest =
|
||||
version.versionId && Types.ObjectId.isValid(version.versionId)
|
||||
? await checkIsLatestVersion({
|
||||
appId,
|
||||
versionId: version.versionId
|
||||
})
|
||||
: true;
|
||||
if (!version.versionId) return Promise.reject(i18nT('common:app_not_version'));
|
||||
|
||||
const versionData = await MongoAppVersion.findById(
|
||||
version.versionId,
|
||||
'_id versionName appId time'
|
||||
).lean();
|
||||
|
||||
const isLatest = versionData
|
||||
? await checkIsLatestVersion({
|
||||
appId,
|
||||
versionId: versionData._id
|
||||
})
|
||||
: true;
|
||||
|
||||
return {
|
||||
id: String(item._id),
|
||||
@ -144,8 +139,8 @@ export async function getChildAppPreviewNode({
|
||||
},
|
||||
templateType: FlowNodeTemplateTypeEnum.teamApp,
|
||||
|
||||
version: versionId ? version?.versionId : '',
|
||||
versionLabel: version?.versionName,
|
||||
version: version.versionId,
|
||||
versionLabel: versionData?.versionName || '',
|
||||
isLatestVersion: isLatest,
|
||||
|
||||
originCost: 0,
|
||||
@ -154,7 +149,7 @@ export async function getChildAppPreviewNode({
|
||||
pluginOrder: 0
|
||||
};
|
||||
} else {
|
||||
return getSystemPluginTemplateById(pluginId, versionId);
|
||||
return getSystemPluginTemplateById(pluginId);
|
||||
}
|
||||
})();
|
||||
|
||||
@ -228,12 +223,12 @@ export async function getChildAppRuntimeById(
|
||||
id: string,
|
||||
versionId?: string
|
||||
): Promise<PluginRuntimeType> {
|
||||
const app = await (async () => {
|
||||
const { source, pluginId } = splitCombineToolId(id);
|
||||
const app: ChildAppType = await (async () => {
|
||||
const { source, pluginId } = await splitCombinePluginId(id);
|
||||
|
||||
if (source === PluginSourceEnum.personal) {
|
||||
const item = await MongoApp.findById(id).lean();
|
||||
if (!item) return Promise.reject(PluginErrEnum.unExist);
|
||||
if (!item) return Promise.reject('plugin not found');
|
||||
|
||||
const version = await getAppVersionById({
|
||||
appId: id,
|
||||
@ -256,6 +251,8 @@ export async function getChildAppRuntimeById(
|
||||
},
|
||||
templateType: FlowNodeTemplateTypeEnum.teamApp,
|
||||
|
||||
// 用不到
|
||||
version: item?.pluginData?.nodeVersion,
|
||||
originCost: 0,
|
||||
currentCost: 0,
|
||||
hasTokenFee: false,
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
import { type ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
|
||||
import { type PluginRuntimeType } from '@fastgpt/global/core/plugin/type';
|
||||
import { splitCombineToolId } from './controller';
|
||||
import { splitCombinePluginId } from './controller';
|
||||
import { PluginSourceEnum } from '@fastgpt/global/core/plugin/constants';
|
||||
|
||||
/*
|
||||
@ -20,7 +20,7 @@ export const computedPluginUsage = async ({
|
||||
childrenUsage: ChatNodeUsageType[];
|
||||
error?: boolean;
|
||||
}) => {
|
||||
const { source } = splitCombineToolId(plugin.id);
|
||||
const { source } = await splitCombinePluginId(plugin.id);
|
||||
const childrenUsages = childrenUsage.reduce((sum, item) => sum + (item.totalPoints || 0), 0);
|
||||
|
||||
if (source !== PluginSourceEnum.personal) {
|
||||
|
||||
@ -119,7 +119,6 @@ const AppSchema = new Schema({
|
||||
defaultPermission: Number
|
||||
});
|
||||
|
||||
AppSchema.index({ type: 1 });
|
||||
AppSchema.index({ teamId: 1, updateTime: -1 });
|
||||
AppSchema.index({ teamId: 1, type: 1 });
|
||||
AppSchema.index(
|
||||
|
||||
@ -1,13 +1,14 @@
|
||||
import { MongoDataset } from '../dataset/schema';
|
||||
import { getEmbeddingModel } from '../ai/model';
|
||||
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
|
||||
import {
|
||||
AppNodeFlowNodeTypeMap,
|
||||
FlowNodeTypeEnum
|
||||
} from '@fastgpt/global/core/workflow/node/constant';
|
||||
import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import type { StoreNodeItemType } from '@fastgpt/global/core/workflow/type/node';
|
||||
import { getChildAppPreviewNode, splitCombineToolId } from './plugin/controller';
|
||||
import { PluginSourceEnum } from '@fastgpt/global/core/plugin/constants';
|
||||
import { authAppByTmbId } from '../../support/permission/app/auth';
|
||||
import { ReadPermissionVal } from '@fastgpt/global/support/permission/constant';
|
||||
import { getErrText } from '@fastgpt/global/common/error/utils';
|
||||
import { MongoAppVersion } from './version/schema';
|
||||
import { checkIsLatestVersion } from './version/controller';
|
||||
import { Types } from '../../common/mongo';
|
||||
|
||||
export async function listAppDatasetDataByTeamIdAndDatasetIds({
|
||||
teamId,
|
||||
@ -32,58 +33,52 @@ export async function listAppDatasetDataByTeamIdAndDatasetIds({
|
||||
export async function rewriteAppWorkflowToDetail({
|
||||
nodes,
|
||||
teamId,
|
||||
isRoot,
|
||||
ownerTmbId
|
||||
isRoot
|
||||
}: {
|
||||
nodes: StoreNodeItemType[];
|
||||
teamId: string;
|
||||
isRoot: boolean;
|
||||
ownerTmbId: string;
|
||||
}) {
|
||||
const datasetIdSet = new Set<string>();
|
||||
|
||||
/* Add node(App Type) versionlabel and latest sign ==== */
|
||||
await Promise.all(
|
||||
nodes.map(async (node) => {
|
||||
if (!node.pluginId) return;
|
||||
const { source } = splitCombineToolId(node.pluginId);
|
||||
// Add node(App Type) versionlabel and latest sign
|
||||
const appNodes = nodes.filter((node) => AppNodeFlowNodeTypeMap[node.flowNodeType]);
|
||||
const versionIds = appNodes
|
||||
.filter((node) => node.version && Types.ObjectId.isValid(node.version))
|
||||
.map((node) => node.version);
|
||||
if (versionIds.length > 0) {
|
||||
const versionDataList = await MongoAppVersion.find(
|
||||
{
|
||||
_id: { $in: versionIds }
|
||||
},
|
||||
'_id versionName appId time'
|
||||
).lean();
|
||||
|
||||
try {
|
||||
const [preview] = await Promise.all([
|
||||
getChildAppPreviewNode({
|
||||
appId: node.pluginId,
|
||||
versionId: node.version
|
||||
}),
|
||||
...(source === PluginSourceEnum.personal
|
||||
? [
|
||||
authAppByTmbId({
|
||||
tmbId: ownerTmbId,
|
||||
appId: node.pluginId,
|
||||
per: ReadPermissionVal
|
||||
})
|
||||
]
|
||||
: [])
|
||||
]);
|
||||
const versionMap: Record<string, any> = {};
|
||||
|
||||
node.pluginData = {
|
||||
diagram: preview.diagram,
|
||||
userGuide: preview.userGuide,
|
||||
courseUrl: preview.courseUrl,
|
||||
name: preview.name,
|
||||
avatar: preview.avatar
|
||||
};
|
||||
node.versionLabel = preview.versionLabel;
|
||||
node.isLatestVersion = preview.isLatestVersion;
|
||||
node.version = preview.version;
|
||||
} catch (error) {
|
||||
node.pluginData = {
|
||||
error: getErrText(error)
|
||||
};
|
||||
const isLatestChecks = await Promise.all(
|
||||
versionDataList.map(async (version) => {
|
||||
const isLatest = await checkIsLatestVersion({
|
||||
appId: version.appId,
|
||||
versionId: version._id
|
||||
});
|
||||
|
||||
return { versionId: String(version._id), isLatest };
|
||||
})
|
||||
);
|
||||
const isLatestMap = new Map(isLatestChecks.map((item) => [item.versionId, item.isLatest]));
|
||||
versionDataList.forEach((version) => {
|
||||
versionMap[String(version._id)] = version;
|
||||
});
|
||||
appNodes.forEach((node) => {
|
||||
if (!node.version) return;
|
||||
const versionData = versionMap[String(node.version)];
|
||||
if (versionData) {
|
||||
node.versionLabel = versionData.versionName;
|
||||
node.isLatestVersion = isLatestMap.get(String(node.version)) || false;
|
||||
}
|
||||
})
|
||||
);
|
||||
|
||||
/* Add node(App Type) versionlabel and latest sign ==== */
|
||||
});
|
||||
}
|
||||
|
||||
// Get all dataset ids from nodes
|
||||
nodes.forEach((node) => {
|
||||
|
||||
@ -15,7 +15,6 @@ export const getAppLatestVersion = async (appId: string, app?: AppSchema) => {
|
||||
if (version) {
|
||||
return {
|
||||
versionId: version._id,
|
||||
versionName: version.versionName,
|
||||
nodes: version.nodes,
|
||||
edges: version.edges,
|
||||
chatConfig: version.chatConfig || app?.chatConfig || {}
|
||||
@ -23,7 +22,6 @@ export const getAppLatestVersion = async (appId: string, app?: AppSchema) => {
|
||||
}
|
||||
return {
|
||||
versionId: app?.pluginData?.nodeVersion,
|
||||
versionName: app?.name,
|
||||
nodes: app?.modules || [],
|
||||
edges: app?.edges || [],
|
||||
chatConfig: app?.chatConfig || {}
|
||||
@ -49,7 +47,6 @@ export const getAppVersionById = async ({
|
||||
if (version) {
|
||||
return {
|
||||
versionId: version._id,
|
||||
versionName: version.versionName,
|
||||
nodes: version.nodes,
|
||||
edges: version.edges,
|
||||
chatConfig: version.chatConfig || app?.chatConfig || {}
|
||||
@ -68,9 +65,6 @@ export const checkIsLatestVersion = async ({
|
||||
appId: string;
|
||||
versionId: string;
|
||||
}) => {
|
||||
if (!Types.ObjectId.isValid(versionId)) {
|
||||
return false;
|
||||
}
|
||||
const version = await MongoAppVersion.findOne(
|
||||
{
|
||||
appId,
|
||||
|
||||
@ -61,7 +61,6 @@ const ChatItemSchema = new Schema({
|
||||
type: Array,
|
||||
default: []
|
||||
},
|
||||
errorMsg: String,
|
||||
userGoodFeedback: {
|
||||
type: String
|
||||
},
|
||||
|
||||
@ -34,10 +34,6 @@ const ChatSchema = new Schema({
|
||||
ref: AppCollectionName,
|
||||
required: true
|
||||
},
|
||||
createTime: {
|
||||
type: Date,
|
||||
default: () => new Date()
|
||||
},
|
||||
updateTime: {
|
||||
type: Date,
|
||||
default: () => new Date()
|
||||
|
||||
@ -32,7 +32,6 @@ type Props = {
|
||||
content: [UserChatItemType & { dataId?: string }, AIChatItemType & { dataId?: string }];
|
||||
metadata?: Record<string, any>;
|
||||
durationSeconds: number; //s
|
||||
errorMsg?: string;
|
||||
};
|
||||
|
||||
export async function saveChat({
|
||||
@ -51,7 +50,6 @@ export async function saveChat({
|
||||
outLinkUid,
|
||||
content,
|
||||
durationSeconds,
|
||||
errorMsg,
|
||||
metadata = {}
|
||||
}: Props) {
|
||||
if (!chatId || chatId === 'NO_RECORD_HISTORIES') return;
|
||||
@ -106,8 +104,7 @@ export async function saveChat({
|
||||
return {
|
||||
...item,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: nodeResponse,
|
||||
durationSeconds,
|
||||
errorMsg
|
||||
durationSeconds
|
||||
};
|
||||
}
|
||||
return item;
|
||||
|
||||
@ -65,8 +65,8 @@ export const filterGPTMessageByMaxContext = async ({
|
||||
if (lastMessage.role === ChatCompletionRequestMessageRoleEnum.User) {
|
||||
const tokens = await countGptMessagesTokens([lastMessage, ...tmpChats]);
|
||||
maxContext -= tokens;
|
||||
// 该轮信息整体 tokens 超出范围,这段数据不要了。但是至少保证一组。
|
||||
if (maxContext < 0 && chats.length > 0) {
|
||||
// 该轮信息整体 tokens 超出范围,这段数据不要了
|
||||
if (maxContext < 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
@ -2,9 +2,7 @@ import type {
|
||||
APIFileListResponse,
|
||||
ApiFileReadContentResponse,
|
||||
APIFileReadResponse,
|
||||
ApiDatasetDetailResponse,
|
||||
APIFileServer,
|
||||
APIFileItem
|
||||
APIFileServer
|
||||
} from '@fastgpt/global/core/dataset/apiDataset';
|
||||
import axios, { type Method } from 'axios';
|
||||
import { addLog } from '../../../common/system/log';
|
||||
@ -91,7 +89,7 @@ export const useApiDatasetRequest = ({ apiServer }: { apiServer: APIFileServer }
|
||||
`/v1/file/list`,
|
||||
{
|
||||
searchKey,
|
||||
parentId: parentId || apiServer.basePath
|
||||
parentId
|
||||
},
|
||||
'POST'
|
||||
);
|
||||
@ -146,8 +144,7 @@ export const useApiDatasetRequest = ({ apiServer }: { apiServer: APIFileServer }
|
||||
tmbId,
|
||||
url: previewUrl,
|
||||
relatedId: apiFileId,
|
||||
customPdfParse,
|
||||
getFormatText: true
|
||||
customPdfParse
|
||||
});
|
||||
return {
|
||||
title,
|
||||
@ -167,34 +164,9 @@ export const useApiDatasetRequest = ({ apiServer }: { apiServer: APIFileServer }
|
||||
return url;
|
||||
};
|
||||
|
||||
const getFileDetail = async ({
|
||||
apiFileId
|
||||
}: {
|
||||
apiFileId: string;
|
||||
}): Promise<ApiDatasetDetailResponse> => {
|
||||
const fileData = await request<ApiDatasetDetailResponse>(
|
||||
`/v1/file/detail`,
|
||||
{
|
||||
id: apiFileId
|
||||
},
|
||||
'GET'
|
||||
);
|
||||
|
||||
if (fileData) {
|
||||
return {
|
||||
id: fileData.id,
|
||||
name: fileData.name,
|
||||
parentId: fileData.parentId === null ? '' : fileData.parentId
|
||||
};
|
||||
}
|
||||
|
||||
return Promise.reject('File not found');
|
||||
};
|
||||
|
||||
return {
|
||||
getFileContent,
|
||||
listFiles,
|
||||
getFilePreviewUrl,
|
||||
getFileDetail
|
||||
getFilePreviewUrl
|
||||
};
|
||||
};
|
||||
|
||||
@ -1,27 +0,0 @@
|
||||
import type {
|
||||
APIFileServer,
|
||||
YuqueServer,
|
||||
FeishuServer
|
||||
} from '@fastgpt/global/core/dataset/apiDataset';
|
||||
import { useApiDatasetRequest } from './api';
|
||||
import { useYuqueDatasetRequest } from '../yuqueDataset/api';
|
||||
import { useFeishuDatasetRequest } from '../feishuDataset/api';
|
||||
|
||||
export const getApiDatasetRequest = async (data: {
|
||||
apiServer?: APIFileServer;
|
||||
yuqueServer?: YuqueServer;
|
||||
feishuServer?: FeishuServer;
|
||||
}) => {
|
||||
const { apiServer, yuqueServer, feishuServer } = data;
|
||||
|
||||
if (apiServer) {
|
||||
return useApiDatasetRequest({ apiServer });
|
||||
}
|
||||
if (yuqueServer) {
|
||||
return useYuqueDatasetRequest({ yuqueServer });
|
||||
}
|
||||
if (feishuServer) {
|
||||
return useFeishuDatasetRequest({ feishuServer });
|
||||
}
|
||||
return Promise.reject('Can not find api dataset server');
|
||||
};
|
||||
30
packages/service/core/dataset/apiDataset/proApi.ts
Normal file
30
packages/service/core/dataset/apiDataset/proApi.ts
Normal file
@ -0,0 +1,30 @@
|
||||
import { type ParentIdType } from '@fastgpt/global/common/parentFolder/type';
|
||||
import { type FeishuServer, type YuqueServer } from '@fastgpt/global/core/dataset/apiDataset';
|
||||
|
||||
export enum ProApiDatasetOperationTypeEnum {
|
||||
LIST = 'list',
|
||||
READ = 'read',
|
||||
CONTENT = 'content',
|
||||
DETAIL = 'detail'
|
||||
}
|
||||
|
||||
export type ProApiDatasetCommonParams = {
|
||||
feishuServer?: FeishuServer;
|
||||
yuqueServer?: YuqueServer;
|
||||
};
|
||||
|
||||
export type GetProApiDatasetFileListParams = ProApiDatasetCommonParams & {
|
||||
parentId?: ParentIdType;
|
||||
};
|
||||
|
||||
export type GetProApiDatasetFileContentParams = ProApiDatasetCommonParams & {
|
||||
apiFileId: string;
|
||||
};
|
||||
|
||||
export type GetProApiDatasetFilePreviewUrlParams = ProApiDatasetCommonParams & {
|
||||
apiFileId: string;
|
||||
};
|
||||
|
||||
export type GetProApiDatasetFileDetailParams = ProApiDatasetCommonParams & {
|
||||
apiFileId: string;
|
||||
};
|
||||
@ -34,17 +34,15 @@ import { getTrainingModeByCollection } from './utils';
|
||||
import {
|
||||
computeChunkSize,
|
||||
computeChunkSplitter,
|
||||
computeParagraphChunkDeep,
|
||||
getLLMMaxChunkSize
|
||||
} from '@fastgpt/global/core/dataset/training/utils';
|
||||
import { DatasetDataIndexTypeEnum } from '@fastgpt/global/core/dataset/data/constants';
|
||||
|
||||
export const createCollectionAndInsertData = async ({
|
||||
dataset,
|
||||
rawText,
|
||||
relatedId,
|
||||
createCollectionParams,
|
||||
backupParse = false,
|
||||
isQAImport = false,
|
||||
billId,
|
||||
session
|
||||
}: {
|
||||
@ -52,8 +50,8 @@ export const createCollectionAndInsertData = async ({
|
||||
rawText: string;
|
||||
relatedId?: string;
|
||||
createCollectionParams: CreateOneCollectionParams;
|
||||
backupParse?: boolean;
|
||||
|
||||
isQAImport?: boolean;
|
||||
billId?: string;
|
||||
session?: ClientSession;
|
||||
}) => {
|
||||
@ -75,33 +73,15 @@ export const createCollectionAndInsertData = async ({
|
||||
llmModel: getLLMModel(dataset.agentModel)
|
||||
});
|
||||
const chunkSplitter = computeChunkSplitter(createCollectionParams);
|
||||
const paragraphChunkDeep = computeParagraphChunkDeep(createCollectionParams);
|
||||
|
||||
if (
|
||||
trainingType === DatasetCollectionDataProcessModeEnum.qa ||
|
||||
trainingType === DatasetCollectionDataProcessModeEnum.backup
|
||||
) {
|
||||
delete createCollectionParams.chunkTriggerType;
|
||||
delete createCollectionParams.chunkTriggerMinSize;
|
||||
delete createCollectionParams.dataEnhanceCollectionName;
|
||||
delete createCollectionParams.imageIndex;
|
||||
delete createCollectionParams.autoIndexes;
|
||||
delete createCollectionParams.indexSize;
|
||||
delete createCollectionParams.qaPrompt;
|
||||
}
|
||||
|
||||
// 1. split chunks
|
||||
const chunks = rawText2Chunks({
|
||||
rawText,
|
||||
chunkTriggerType: createCollectionParams.chunkTriggerType,
|
||||
chunkTriggerMinSize: createCollectionParams.chunkTriggerMinSize,
|
||||
chunkSize,
|
||||
paragraphChunkDeep,
|
||||
paragraphChunkMinSize: createCollectionParams.paragraphChunkMinSize,
|
||||
maxSize: getLLMMaxChunkSize(getLLMModel(dataset.agentModel)),
|
||||
overlapRatio: trainingType === DatasetCollectionDataProcessModeEnum.chunk ? 0.2 : 0,
|
||||
customReg: chunkSplitter ? [chunkSplitter] : [],
|
||||
backupParse
|
||||
isQAImport
|
||||
});
|
||||
|
||||
// 2. auth limit
|
||||
@ -122,7 +102,6 @@ export const createCollectionAndInsertData = async ({
|
||||
const { _id: collectionId } = await createOneCollection({
|
||||
...createCollectionParams,
|
||||
trainingType,
|
||||
paragraphChunkDeep,
|
||||
chunkSize,
|
||||
chunkSplitter,
|
||||
|
||||
@ -178,10 +157,6 @@ export const createCollectionAndInsertData = async ({
|
||||
billId: traingBillId,
|
||||
data: chunks.map((item, index) => ({
|
||||
...item,
|
||||
indexes: item.indexes?.map((text) => ({
|
||||
type: DatasetDataIndexTypeEnum.custom,
|
||||
text
|
||||
})),
|
||||
chunkIndex: index
|
||||
})),
|
||||
session
|
||||
@ -223,19 +198,46 @@ export type CreateOneCollectionParams = CreateDatasetCollectionParams & {
|
||||
tmbId: string;
|
||||
session?: ClientSession;
|
||||
};
|
||||
export async function createOneCollection({ session, ...props }: CreateOneCollectionParams) {
|
||||
const {
|
||||
teamId,
|
||||
parentId,
|
||||
datasetId,
|
||||
tags,
|
||||
export async function createOneCollection({
|
||||
teamId,
|
||||
tmbId,
|
||||
name,
|
||||
parentId,
|
||||
datasetId,
|
||||
type,
|
||||
|
||||
fileId,
|
||||
rawLink,
|
||||
externalFileId,
|
||||
externalFileUrl,
|
||||
apiFileId
|
||||
} = props;
|
||||
createTime,
|
||||
updateTime,
|
||||
|
||||
hashRawText,
|
||||
rawTextLength,
|
||||
metadata = {},
|
||||
tags,
|
||||
|
||||
nextSyncTime,
|
||||
|
||||
fileId,
|
||||
rawLink,
|
||||
externalFileId,
|
||||
externalFileUrl,
|
||||
apiFileId,
|
||||
|
||||
// Parse settings
|
||||
customPdfParse,
|
||||
imageIndex,
|
||||
autoIndexes,
|
||||
|
||||
// Chunk settings
|
||||
trainingType,
|
||||
chunkSettingMode,
|
||||
chunkSplitMode,
|
||||
chunkSize,
|
||||
indexSize,
|
||||
chunkSplitter,
|
||||
qaPrompt,
|
||||
|
||||
session
|
||||
}: CreateOneCollectionParams) {
|
||||
// Create collection tags
|
||||
const collectionTags = await createOrGetCollectionTags({ tags, teamId, datasetId, session });
|
||||
|
||||
@ -243,18 +245,41 @@ export async function createOneCollection({ session, ...props }: CreateOneCollec
|
||||
const [collection] = await MongoDatasetCollection.create(
|
||||
[
|
||||
{
|
||||
...props,
|
||||
teamId,
|
||||
tmbId,
|
||||
parentId: parentId || null,
|
||||
datasetId,
|
||||
name,
|
||||
type,
|
||||
|
||||
rawTextLength,
|
||||
hashRawText,
|
||||
tags: collectionTags,
|
||||
metadata,
|
||||
|
||||
createTime,
|
||||
updateTime,
|
||||
nextSyncTime,
|
||||
|
||||
...(fileId ? { fileId } : {}),
|
||||
...(rawLink ? { rawLink } : {}),
|
||||
...(externalFileId ? { externalFileId } : {}),
|
||||
...(externalFileUrl ? { externalFileUrl } : {}),
|
||||
...(apiFileId ? { apiFileId } : {})
|
||||
...(apiFileId ? { apiFileId } : {}),
|
||||
|
||||
// Parse settings
|
||||
customPdfParse,
|
||||
imageIndex,
|
||||
autoIndexes,
|
||||
|
||||
// Chunk settings
|
||||
trainingType,
|
||||
chunkSettingMode,
|
||||
chunkSplitMode,
|
||||
chunkSize,
|
||||
indexSize,
|
||||
chunkSplitter,
|
||||
qaPrompt
|
||||
}
|
||||
],
|
||||
{ session, ordered: true }
|
||||
|
||||
@ -34,9 +34,9 @@ const DatasetDataTextSchema = new Schema({
|
||||
|
||||
try {
|
||||
DatasetDataTextSchema.index(
|
||||
{ teamId: 1, fullTextToken: 'text' },
|
||||
{ teamId: 1, datasetId: 1, fullTextToken: 'text' },
|
||||
{
|
||||
name: 'teamId_1_fullTextToken_text',
|
||||
name: 'teamId_1_datasetId_1_fullTextToken_text',
|
||||
default_language: 'none'
|
||||
}
|
||||
);
|
||||
|
||||
@ -1,208 +0,0 @@
|
||||
import type {
|
||||
APIFileItem,
|
||||
ApiFileReadContentResponse,
|
||||
ApiDatasetDetailResponse,
|
||||
FeishuServer
|
||||
} from '@fastgpt/global/core/dataset/apiDataset';
|
||||
import { type ParentIdType } from '@fastgpt/global/common/parentFolder/type';
|
||||
import axios, { type Method } from 'axios';
|
||||
import { addLog } from '../../../common/system/log';
|
||||
|
||||
type ResponseDataType = {
|
||||
success: boolean;
|
||||
message: string;
|
||||
data: any;
|
||||
};
|
||||
|
||||
type FeishuFileListResponse = {
|
||||
files: {
|
||||
token: string;
|
||||
parent_token: string;
|
||||
name: string;
|
||||
type: string;
|
||||
modified_time: number;
|
||||
created_time: number;
|
||||
url: string;
|
||||
owner_id: string;
|
||||
}[];
|
||||
has_more: boolean;
|
||||
next_page_token: string;
|
||||
};
|
||||
|
||||
const feishuBaseUrl = process.env.FEISHU_BASE_URL || 'https://open.feishu.cn';
|
||||
|
||||
export const useFeishuDatasetRequest = ({ feishuServer }: { feishuServer: FeishuServer }) => {
|
||||
const instance = axios.create({
|
||||
baseURL: feishuBaseUrl,
|
||||
timeout: 60000
|
||||
});
|
||||
|
||||
// 添加请求拦截器
|
||||
instance.interceptors.request.use(async (config) => {
|
||||
if (!config.headers.Authorization) {
|
||||
const { data } = await axios.post<{ tenant_access_token: string }>(
|
||||
`${feishuBaseUrl}/open-apis/auth/v3/tenant_access_token/internal`,
|
||||
{
|
||||
app_id: feishuServer.appId,
|
||||
app_secret: feishuServer.appSecret
|
||||
}
|
||||
);
|
||||
|
||||
config.headers['Authorization'] = `Bearer ${data.tenant_access_token}`;
|
||||
config.headers['Content-Type'] = 'application/json; charset=utf-8';
|
||||
}
|
||||
return config;
|
||||
});
|
||||
|
||||
/**
|
||||
* 响应数据检查
|
||||
*/
|
||||
const checkRes = (data: ResponseDataType) => {
|
||||
if (data === undefined) {
|
||||
addLog.info('yuque dataset data is empty');
|
||||
return Promise.reject('服务器异常');
|
||||
}
|
||||
return data.data;
|
||||
};
|
||||
const responseError = (err: any) => {
|
||||
console.log('error->', '请求错误', err);
|
||||
|
||||
if (!err) {
|
||||
return Promise.reject({ message: '未知错误' });
|
||||
}
|
||||
if (typeof err === 'string') {
|
||||
return Promise.reject({ message: err });
|
||||
}
|
||||
if (typeof err.message === 'string') {
|
||||
return Promise.reject({ message: err.message });
|
||||
}
|
||||
if (typeof err.data === 'string') {
|
||||
return Promise.reject({ message: err.data });
|
||||
}
|
||||
if (err?.response?.data) {
|
||||
return Promise.reject(err?.response?.data);
|
||||
}
|
||||
return Promise.reject(err);
|
||||
};
|
||||
|
||||
const request = <T>(url: string, data: any, method: Method): Promise<T> => {
|
||||
/* 去空 */
|
||||
for (const key in data) {
|
||||
if (data[key] === undefined) {
|
||||
delete data[key];
|
||||
}
|
||||
}
|
||||
|
||||
return instance
|
||||
.request({
|
||||
url,
|
||||
method,
|
||||
data: ['POST', 'PUT'].includes(method) ? data : undefined,
|
||||
params: !['POST', 'PUT'].includes(method) ? data : undefined
|
||||
})
|
||||
.then((res) => checkRes(res.data))
|
||||
.catch((err) => responseError(err));
|
||||
};
|
||||
|
||||
const listFiles = async ({ parentId }: { parentId?: ParentIdType }): Promise<APIFileItem[]> => {
|
||||
const fetchFiles = async (pageToken?: string): Promise<FeishuFileListResponse['files']> => {
|
||||
const data = await request<FeishuFileListResponse>(
|
||||
`/open-apis/drive/v1/files`,
|
||||
{
|
||||
folder_token: parentId || feishuServer.folderToken,
|
||||
page_size: 200,
|
||||
page_token: pageToken
|
||||
},
|
||||
'GET'
|
||||
);
|
||||
|
||||
if (data.has_more) {
|
||||
const nextFiles = await fetchFiles(data.next_page_token);
|
||||
return [...data.files, ...nextFiles];
|
||||
}
|
||||
|
||||
return data.files;
|
||||
};
|
||||
|
||||
const allFiles = await fetchFiles();
|
||||
|
||||
return allFiles
|
||||
.filter((file) => ['folder', 'docx'].includes(file.type))
|
||||
.map((file) => ({
|
||||
id: file.token,
|
||||
parentId: file.parent_token,
|
||||
name: file.name,
|
||||
type: file.type === 'folder' ? ('folder' as const) : ('file' as const),
|
||||
hasChild: file.type === 'folder',
|
||||
updateTime: new Date(file.modified_time * 1000),
|
||||
createTime: new Date(file.created_time * 1000)
|
||||
}));
|
||||
};
|
||||
|
||||
const getFileContent = async ({
|
||||
apiFileId
|
||||
}: {
|
||||
apiFileId: string;
|
||||
}): Promise<ApiFileReadContentResponse> => {
|
||||
const [{ content }, { document }] = await Promise.all([
|
||||
request<{ content: string }>(
|
||||
`/open-apis/docx/v1/documents/${apiFileId}/raw_content`,
|
||||
{},
|
||||
'GET'
|
||||
),
|
||||
request<{ document: { title: string } }>(
|
||||
`/open-apis/docx/v1/documents/${apiFileId}`,
|
||||
{},
|
||||
'GET'
|
||||
)
|
||||
]);
|
||||
|
||||
return {
|
||||
title: document?.title,
|
||||
rawText: content
|
||||
};
|
||||
};
|
||||
|
||||
const getFilePreviewUrl = async ({ apiFileId }: { apiFileId: string }): Promise<string> => {
|
||||
const { metas } = await request<{ metas: { url: string }[] }>(
|
||||
`/open-apis/drive/v1/metas/batch_query`,
|
||||
{
|
||||
request_docs: [
|
||||
{
|
||||
doc_token: apiFileId,
|
||||
doc_type: 'docx'
|
||||
}
|
||||
],
|
||||
with_url: true
|
||||
},
|
||||
'POST'
|
||||
);
|
||||
|
||||
return metas[0].url;
|
||||
};
|
||||
|
||||
const getFileDetail = async ({
|
||||
apiFileId
|
||||
}: {
|
||||
apiFileId: string;
|
||||
}): Promise<ApiDatasetDetailResponse> => {
|
||||
const { document } = await request<{ document: { title: string } }>(
|
||||
`/open-apis/docx/v1/documents/${apiFileId}`,
|
||||
{},
|
||||
'GET'
|
||||
);
|
||||
|
||||
return {
|
||||
name: document?.title,
|
||||
parentId: null,
|
||||
id: apiFileId
|
||||
};
|
||||
};
|
||||
|
||||
return {
|
||||
getFileContent,
|
||||
listFiles,
|
||||
getFilePreviewUrl,
|
||||
getFileDetail
|
||||
};
|
||||
};
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user