V4.9.2 feature (#4354)
* feat: custom dataset split sign (#4221) * feat: custom dataset split sign * feat: custom dataset split sign * add external variable debug (#4204) * add external variable debug * fix ui * plugin variables * perf: custom varialbe (#4225) * fix: invite link (#4229) * fix: invite link * feat: create invite link and copy it directly * feat: sync api collection will refresh title;perf: invite link ux (#4237) * update queue * feat: sync api collection will refresh title * sync collection * remove lock * perf: invite link ux * fix ts (#4239) * sync collection * remove lock * fix ts * fix: ts * Sso (#4235) * feat: redirect url can be inner url (#4138) * fix: update new user sync api (#4145) * feat: post all params to backend (#4151) * pref: sso getauthurl api (#4172) * pref: sso getauthurl api * pref: sso * solve the rootorglist (#4234) --------- Co-authored-by: gggaaallleee <91131304+gggaaallleee@users.noreply.github.com> * fix variable sync & popover button height (#4227) * fix variable sync & popover button height * required * feat: node prompt version (#4141) * feat: node prompt version * fix * delete unused code * fix * fix code * update prompt version (#4242) * sync collection * remove lock * update prompt version * perf: ai proxy (#4265) * sync collection * remove lock * perf: ai proxy * fix: member count (#4269) * feat: chunk index independent config (#4271) * sync collection * remove lock * feat: chunk index independent config * feat: add max chunksize to split chunk function * remove log * update doc * remove * remove log * fix input form label overflow (#4266) * add model test log (#4272) * sync collection * remove lock * add model test log * update ui * update log * fix: channel test * preview chunk ui * test model ux * test model log * perf: dataset selector * fix: system plugin auth * update nextjs * perf: ai proxy log remove retry log;perf: workflow type auto parse;add chunk spliter test (#4296) * sync collection * remove lock * perf: workflow type auto parse * add chunk spliter test * perf: ai proxy log remove retry log * udpate ai proxy field * pref: member/org/gourp list (#4295) * refactor: org api * refactor: org api * pref: member/org/group list * feat: change group owner api * fix: manage org member * pref: member search * tmp org api rewrite (#4304) * sync collection * remove lock * tmp org api rewrite * perf: text splitter (#4313) * sync collection * remove lock * perf: text splitter * update comment * update search filter code (#4317) * sync collection * remove lock * update search filter code * pref: member/group/org (#4316) * feat: change group owner api * pref: member/org/group * fix: member modal select clb * fix: search member when change owner * fix: member list, login button (#4322) * perf: member group (#4324) * sync collection * remove lock * perf: member group * fix: ts (#4325) * sync collection * remove lock * fix: ts * fix: group (#4330) * perf: intro wrap (#4346) * sync collection * remove lock * perf: intro wrap * pref: member list (#4344) * chore: search member new api * chore: permission * fix: ts error * fix: member modal * perf: long org name ui (#4347) * sync collection * remove lock * perf: long org name ui * perf: member tableui (#4353) * fix: ts (#4357) * docs: Add SSO Markdown Doc (#4334) * add sso doc * fix comment * update sso doc (#4358) * pref: useScrollPagination support debounce and throttle. (#4355) * pref: useScrollPagination support debounce and throttle. * fix: useScrollPagination loading * fix: isloading * fix: org search path hide * fix: simple app all_app button (#4365) * add qwen long (#4363) --------- Co-authored-by: heheer <heheer@sealos.io> Co-authored-by: Finley Ge <32237950+FinleyGe@users.noreply.github.com> Co-authored-by: gggaaallleee <91131304+gggaaallleee@users.noreply.github.com>
BIN
docSite/assets/imgs/sso1.png
Normal file
|
After Width: | Height: | Size: 133 KiB |
BIN
docSite/assets/imgs/sso10.png
Normal file
|
After Width: | Height: | Size: 124 KiB |
BIN
docSite/assets/imgs/sso11.png
Normal file
|
After Width: | Height: | Size: 117 KiB |
BIN
docSite/assets/imgs/sso12.png
Normal file
|
After Width: | Height: | Size: 79 KiB |
BIN
docSite/assets/imgs/sso13.png
Normal file
|
After Width: | Height: | Size: 319 KiB |
BIN
docSite/assets/imgs/sso14.png
Normal file
|
After Width: | Height: | Size: 174 KiB |
BIN
docSite/assets/imgs/sso15.png
Normal file
|
After Width: | Height: | Size: 3.2 KiB |
BIN
docSite/assets/imgs/sso16.png
Normal file
|
After Width: | Height: | Size: 2.7 KiB |
BIN
docSite/assets/imgs/sso17.png
Normal file
|
After Width: | Height: | Size: 3.8 KiB |
BIN
docSite/assets/imgs/sso2.png
Normal file
|
After Width: | Height: | Size: 137 KiB |
BIN
docSite/assets/imgs/sso3.png
Normal file
|
After Width: | Height: | Size: 24 KiB |
BIN
docSite/assets/imgs/sso4.png
Normal file
|
After Width: | Height: | Size: 117 KiB |
BIN
docSite/assets/imgs/sso5.png
Normal file
|
After Width: | Height: | Size: 86 KiB |
BIN
docSite/assets/imgs/sso6.png
Normal file
|
After Width: | Height: | Size: 26 KiB |
BIN
docSite/assets/imgs/sso7.png
Normal file
|
After Width: | Height: | Size: 140 KiB |
BIN
docSite/assets/imgs/sso8.png
Normal file
|
After Width: | Height: | Size: 108 KiB |
BIN
docSite/assets/imgs/sso9.png
Normal file
|
After Width: | Height: | Size: 119 KiB |
BIN
docSite/assets/imgs/sso_update1.png
Normal file
|
After Width: | Height: | Size: 39 KiB |
BIN
docSite/assets/imgs/teammode.png
Normal file
|
After Width: | Height: | Size: 265 KiB |
@ -11,8 +11,6 @@ weight: 853
|
||||
| --------------------- | --------------------- |
|
||||
|  |  |
|
||||
|
||||
|
||||
|
||||
## 创建训练订单
|
||||
|
||||
{{< tabs tabTotal="2" >}}
|
||||
@ -289,7 +287,7 @@ curl --location --request DELETE 'http://localhost:3000/api/core/dataset/delete?
|
||||
|
||||
## 集合
|
||||
|
||||
### 通用创建参数说明
|
||||
### 通用创建参数说明(必看)
|
||||
|
||||
**入参**
|
||||
|
||||
@ -300,8 +298,11 @@ curl --location --request DELETE 'http://localhost:3000/api/core/dataset/delete?
|
||||
| trainingType | 数据处理方式。chunk: 按文本长度进行分割;qa: 问答对提取 | ✅ |
|
||||
| autoIndexes | 是否自动生成索引(仅商业版支持) | |
|
||||
| imageIndex | 是否自动生成图片索引(仅商业版支持) | |
|
||||
| chunkSize | 预估块大小 | |
|
||||
| chunkSplitter | 自定义最高优先分割符号 | |
|
||||
| chunkSettingMode | 分块参数模式。auto: 系统默认参数; custom: 手动指定参数 | |
|
||||
| chunkSplitMode | 分块拆分模式。size: 按长度拆分; char: 按字符拆分。chunkSettingMode=auto时不生效。 | |
|
||||
| chunkSize | 分块大小,默认 1500。chunkSettingMode=auto时不生效。 | |
|
||||
| indexSize | 索引大小,默认 512,必须小于索引模型最大token。chunkSettingMode=auto时不生效。 | |
|
||||
| chunkSplitter | 自定义最高优先分割符号,除非超出文件处理最大上下文,否则不会进行进一步拆分。chunkSettingMode=auto时不生效。 | |
|
||||
| qaPrompt | qa拆分提示词 | |
|
||||
| tags | 集合标签(字符串数组) | |
|
||||
| createTime | 文件创建时间(Date / String) | |
|
||||
@ -389,9 +390,8 @@ curl --location --request POST 'http://localhost:3000/api/core/dataset/collectio
|
||||
"name":"测试训练",
|
||||
|
||||
"trainingType": "qa",
|
||||
"chunkSize":8000,
|
||||
"chunkSplitter":"",
|
||||
"qaPrompt":"11",
|
||||
"chunkSettingMode": "auto",
|
||||
"qaPrompt":"",
|
||||
|
||||
"metadata":{}
|
||||
}'
|
||||
@ -409,10 +409,6 @@ curl --location --request POST 'http://localhost:3000/api/core/dataset/collectio
|
||||
- parentId: 父级ID,不填则默认为根目录
|
||||
- name: 集合名称(必填)
|
||||
- metadata: 元数据(暂时没啥用)
|
||||
- trainingType: 训练模式(必填)
|
||||
- chunkSize: 每个 chunk 的长度(可选). chunk模式:100~3000; qa模式: 4000~模型最大token(16k模型通常建议不超过10000)
|
||||
- chunkSplitter: 自定义最高优先分割符号(可选)
|
||||
- qaPrompt: qa拆分自定义提示词(可选)
|
||||
{{% /alert %}}
|
||||
|
||||
{{< /markdownify >}}
|
||||
@ -462,8 +458,7 @@ curl --location --request POST 'http://localhost:3000/api/core/dataset/collectio
|
||||
"parentId": null,
|
||||
|
||||
"trainingType": "chunk",
|
||||
"chunkSize":512,
|
||||
"chunkSplitter":"",
|
||||
"chunkSettingMode": "auto",
|
||||
"qaPrompt":"",
|
||||
|
||||
"metadata":{
|
||||
@ -483,10 +478,6 @@ curl --location --request POST 'http://localhost:3000/api/core/dataset/collectio
|
||||
- datasetId: 知识库的ID(必填)
|
||||
- parentId: 父级ID,不填则默认为根目录
|
||||
- metadata.webPageSelector: 网页选择器,用于指定网页中的哪个元素作为文本(可选)
|
||||
- trainingType:训练模式(必填)
|
||||
- chunkSize: 每个 chunk 的长度(可选). chunk模式:100~3000; qa模式: 4000~模型最大token(16k模型通常建议不超过10000)
|
||||
- chunkSplitter: 自定义最高优先分割符号(可选)
|
||||
- qaPrompt: qa拆分自定义提示词(可选)
|
||||
{{% /alert %}}
|
||||
|
||||
{{< /markdownify >}}
|
||||
@ -545,13 +536,7 @@ curl --location --request POST 'http://localhost:3000/api/core/dataset/collectio
|
||||
|
||||
{{% alert icon=" " context="success" %}}
|
||||
- file: 文件
|
||||
- data: 知识库相关信息(json序列化后传入)
|
||||
- datasetId: 知识库的ID(必填)
|
||||
- parentId: 父级ID,不填则默认为根目录
|
||||
- trainingType:训练模式(必填)
|
||||
- chunkSize: 每个 chunk 的长度(可选). chunk模式:100~3000; qa模式: 4000~模型最大token(16k模型通常建议不超过10000)
|
||||
- chunkSplitter: 自定义最高优先分割符号(可选)
|
||||
- qaPrompt: qa拆分自定义提示词(可选)
|
||||
- data: 知识库相关信息(json序列化后传入),参数说明见上方“通用创建参数说明”
|
||||
{{% /alert %}}
|
||||
|
||||
{{< /markdownify >}}
|
||||
|
||||
55
docSite/content/zh-cn/docs/development/upgrading/492.md
Normal file
@ -0,0 +1,55 @@
|
||||
---
|
||||
title: 'V4.9.2(进行中)'
|
||||
description: 'FastGPT V4.9.2 更新说明'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
toc: true
|
||||
weight: 798
|
||||
---
|
||||
## 更新指南
|
||||
|
||||
### SSO 迁移
|
||||
|
||||
使用了 SSO 或成员同步的商业版用户,并且是对接`钉钉`、`企微`的,需要迁移已有的 SSO 相关配置:
|
||||
|
||||
参考:[SSO & 外部成员同步](/docs/guide/admin/sso.md)中的配置进行`sso-service`的部署和配置。
|
||||
|
||||
1. 先将原商业版后台中的相关配置项复制备份出来(以企微为例,将 AppId, Secret 等复制出来)再进行镜像升级。
|
||||
2. 参考上述文档,部署 SSO 服务,配置相关的环境变量
|
||||
3. 如果原先使用企微组织架构同步的用户,在商业版后台切换团队模式为“同步模式”
|
||||
|
||||
## 重要更新
|
||||
|
||||
- 知识库导入数据 API 变更,增加`chunkSettingMode`,`chunkSplitMode`,`indexSize`可选参数,具体可参考 [知识库导入数据 API](/docs/development/openapi/dataset) 文档。
|
||||
|
||||
## 🚀 新增内容
|
||||
|
||||
1. 知识库分块优化:支持单独配置分块大小和索引大小,允许进行超大分块,以更大的输入 Tokens 换取完整分块。
|
||||
2. 知识库分块增加自定义分隔符预设值,同时支持自定义换行符分割。
|
||||
3. 外部变量改名:自定义变量。 并且支持在测试时调试,在分享链接中,该变量直接隐藏。
|
||||
4. 集合同步时,支持同步修改标题。
|
||||
5. 团队成员管理重构,抽离主流 IM SSO(企微、飞书、钉钉),并支持通过自定义 SSO 接入 FastGPT。同时完善与外部系统的成员同步。
|
||||
|
||||
## ⚙️ 优化
|
||||
|
||||
1. 导出对话日志时,支持导出成员名。
|
||||
2. 邀请链接交互。
|
||||
3. 无 SSL 证书时复制失败,会提示弹窗用于手动复制。
|
||||
4. FastGPT 未内置 ai proxy 渠道时,也能正常展示其名称。
|
||||
5. 升级 nextjs 版本至 14.2.25。
|
||||
6. 工作流节点数组字符串类型,自动适配 string 输入。
|
||||
7. 工作流节点数组类型,自动进行 JSON parse 解析 string 输入。
|
||||
8. AI proxy 日志优化,去除重试失败的日志,仅保留最后一份错误日志。
|
||||
9. 个人信息和通知展示优化。
|
||||
10. 分块算法小调整:
|
||||
* 跨处理符号之间连续性更强。
|
||||
* 代码块分割时,用 LLM 模型上下文作为分块大小,尽可能保证代码块完整性。
|
||||
* 表格分割时,用 LLM 模型上下文作为分块大小,尽可能保证表格完整性。
|
||||
|
||||
## 🐛 修复
|
||||
|
||||
1. 飞书和语雀知识库无法同步。
|
||||
2. 渠道测试时,如果配置了模型自定义请求地址,会走自定义请求地址,而不是渠道请求地址。
|
||||
3. 语音识别模型测试未启用的模型时,无法正常测试。
|
||||
4. 管理员配置系统插件时,如果插件包含其他系统应用,无法正常鉴权。
|
||||
5. 移除 TTS 自定义请求地址时,必须需要填 requestAuth 字段。
|
||||
440
docSite/content/zh-cn/docs/guide/admin/sso.md
Normal file
@ -0,0 +1,440 @@
|
||||
---
|
||||
title: 'SSO & 外部成员同步'
|
||||
description: 'FastGPT 外部成员系统接入设计与配置'
|
||||
icon: ''
|
||||
draft: false
|
||||
toc: true
|
||||
weight: 707
|
||||
---
|
||||
|
||||
如果你不需要用到 SSO/成员同步功能,或者是只需要用 Github、google、microsoft、公众号的快速登录,可以跳过本章节。本章适合需要接入自己的成员系统或主流 办公IM 的用户。
|
||||
|
||||
## 介绍
|
||||
|
||||
为了方便地接入**外部成员系统**,FastGPT 提供一套接入外部系统的**标准接口**,以及一个 FastGPT-SSO-Service 镜像作为**适配器**。
|
||||
|
||||
通过这套标注接口,你可以可以实现:
|
||||
|
||||
1. SSO 登录。从外部系统回调后,在 FastGPT 中创建一个用户。
|
||||
2. 成员和组织架构同步(下面都简称成员同步)。
|
||||
|
||||
**原理**
|
||||
|
||||
FastGPT-pro 中,有一套标准的SSO 和成员同步接口,系统会根据这套接口进行 SSO 和成员同步操作。
|
||||
|
||||
FastGPT-SSO-Service 是为了聚合不同来源的 SSO 和成员同步接口,将他们转成 fastgpt-pro 可识别的接口。
|
||||
|
||||
## 系统配置教程
|
||||
|
||||
### 1. 部署 SSO-service 镜像
|
||||
|
||||
使用 docker-compose 部署:
|
||||
|
||||
```yaml
|
||||
fastgpt-sso:
|
||||
image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sso-service:v4.9.0
|
||||
container_name: fastgpt-sso
|
||||
restart: always
|
||||
networks:
|
||||
- fastgpt
|
||||
environment:
|
||||
- SSO_PROVIDER=example
|
||||
- AUTH_TOKEN=xxxxx # 鉴权信息,fastgpt-pro 会用到。
|
||||
# 具体对接提供商的环境变量。
|
||||
```
|
||||
|
||||
根据不同的提供商,你需要配置不同的环境变量,下面是内置的通用协议/IM:
|
||||
|
||||
{{< table "table-hover table-striped-columns" >}}
|
||||
| 协议/功能 | SSO | 成员同步支持 |
|
||||
|----------------|----------|--------------|
|
||||
| 飞书 | 是 | 是 |
|
||||
| 企业微信 | 是 | 是 |
|
||||
| 钉钉 | 是 | 否 |
|
||||
| Saml2.0 | 是 | 否 |
|
||||
| Oauth2.0 | 是 | 否 |
|
||||
{{< /table >}}
|
||||
|
||||
### 2. 配置 fastgpt-pro
|
||||
|
||||
#### 1. 配置环境变量
|
||||
|
||||
环境变量中的 `EXTERNAL_USER_SERVICE_BASE_URL` 为内网地址,例如上述例子中的配置,环境变量应该设置为
|
||||
|
||||
```yaml
|
||||
EXTERNAL_USER_SERVICE_BASE_URL=http://fastgpt-sso:3000
|
||||
EXTERNAL_USER_SERVICE_AUTH_TOKEN=xxxxx
|
||||
```
|
||||
|
||||
#### 2. 在商业版后台配置按钮文字,图标等。
|
||||
|
||||
{{< table "table-hover table-striped-columns" >}}
|
||||
| <div style="text-align:center">企业微信</div> | <div style="text-align:center">钉钉</div> | <div style="text-align:center">飞书</div> |
|
||||
|-----------|-----------------|--------------|
|
||||
|  |  |  |
|
||||
{{< /table >}}
|
||||
|
||||
#### 3. 开启成员同步(可选)
|
||||
|
||||
如果需要同步外部系统的成员,可以选择开启成员同步。团队模式具体可参考:[团队模式说明文档](/docs/guide/admin/teamMode)
|
||||
|
||||

|
||||
|
||||
#### 4. 可选配置
|
||||
|
||||
1. 自动定时成员同步
|
||||
|
||||
设置 fastgpt-pro 环境变量则可开启自动成员同步
|
||||
|
||||
```bash
|
||||
SYNC_MEMBER_CRON="0 0 * * *" # Cron 表达式,每天 0 点执行
|
||||
```
|
||||
|
||||
## 内置的通用协议/IM 配置示例
|
||||
|
||||
### 飞书
|
||||
|
||||
#### 1. 参数获取
|
||||
|
||||
App ID和App Secret
|
||||
|
||||
进入开发者后台,点击企业自建应用,在凭证与基础信息页面查看应用凭证。
|
||||
|
||||

|
||||
|
||||
#### 2. 权限配置
|
||||
|
||||
进入开发者后台,点击企业自建应用,在开发配置的权限管理页面开通权限。
|
||||
|
||||

|
||||
|
||||
对于开通用户SSO登录而言,开启用户身份权限的以下内容
|
||||
|
||||
1. ***获取通讯录基本信息***
|
||||
2. ***获取用户基本信息***
|
||||
3. ***获取用户邮箱信息***
|
||||
4. ***获取用户 user ID***
|
||||
|
||||
对于开启企业同步相关内容而言,开启身份权限的内容与上面一致,但要注意是开启应用权限
|
||||
|
||||
#### 3. 重定向URL
|
||||
|
||||
进入开发者后台,点击企业自建应用,在开发配置的安全设置中设置重定向URL
|
||||

|
||||
|
||||
#### 4. yml 配置示例
|
||||
|
||||
```bash
|
||||
fastgpt-sso:
|
||||
image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sso-service:v4.9.0
|
||||
container_name: fastgpt-sso
|
||||
restart: always
|
||||
networks:
|
||||
- fastgpt
|
||||
environment:
|
||||
- SSO_PROVIDER=example
|
||||
- AUTH_TOKEN=xxxxx
|
||||
# 飞书 - feishu -如果是私有化部署,这里的配置前缀可能会有变化
|
||||
- SSO_PROVIDER=feishu
|
||||
# oauth 接口(公开的飞书不用改)
|
||||
- SSO_TARGET_URL=https://accounts.feishu.cn/open-apis/authen/v1/authorize
|
||||
# 获取token 接口(公开的飞书不用改)
|
||||
- FEISHU_TOKEN_URL=https://open.feishu.cn/open-apis/authen/v2/oauth/token
|
||||
# 获取用户信息接口(公开的飞书不用改)
|
||||
- FEISHU_GET_USER_INFO_URL=https://open.feishu.cn/open-apis/authen/v1/user_info
|
||||
# 重定向地址,因为飞书获取用户信息要校验所以需要填
|
||||
- FEISHU_REDIRECT_URI=xxx
|
||||
#飞书APP的应用ID,一般以cli开头
|
||||
- FEISHU_APP_ID=xxx
|
||||
#飞书APP的应用密钥
|
||||
- FEISHU_APP_SECRET=xxx
|
||||
```
|
||||
|
||||
### 钉钉
|
||||
|
||||
#### 1. 参数获取
|
||||
|
||||
CLIENT_ID 与 CLIENT_SECRET
|
||||
|
||||
进入钉钉开放平台,点击应用开发,选择自己的应用进入,记录在凭证与基础信息页面下的Client ID与Client secret。
|
||||

|
||||
|
||||
#### 2. 权限配置
|
||||
|
||||
进入钉钉开放平台,点击应用开发,选择自己的应用进入,在开发配置的权限管理页面操作,需要开通的权限包括:
|
||||
|
||||
1. ***个人手机号信息***
|
||||
2. ***通讯录个人信息读权限***
|
||||
3. ***获取钉钉开放接口用户访问凭证的基础权限***
|
||||
|
||||
#### 3. 重定向URL
|
||||
|
||||
进入钉钉开放平台,点击应用开发,选择自己的应用进入,在开发配置的安全设置页面操作
|
||||
需要填写的内容有两个:
|
||||
|
||||
1. 服务器出口IP (调用钉钉服务端API的服务器IP列表)
|
||||
2. 重定向URL(回调域名)
|
||||
|
||||
#### 4. yml 配置示例
|
||||
|
||||
```bash
|
||||
fastgpt-sso:
|
||||
image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sso-service:v4.9.0
|
||||
container_name: fastgpt-sso
|
||||
restart: always
|
||||
networks:
|
||||
- fastgpt
|
||||
environment:
|
||||
- SSO_PROVIDER=dingtalk
|
||||
- AUTH_TOKEN=xxxxx
|
||||
#oauth 接口
|
||||
- SSO_TARGET_URL=https://login.dingtalk.com/oauth2/auth
|
||||
#获取token 接口
|
||||
- DINGTALK_TOKEN_URL=https://api.dingtalk.com/v1.0/oauth2/userAccessToken
|
||||
#获取用户信息接口
|
||||
- DINGTALK_GET_USER_INFO_URL=https://oapi.dingtalk.com/v1.0/contact/users/me
|
||||
#钉钉APP的应用ID
|
||||
- DINGTALK_CLIENT_ID=xxx
|
||||
#钉钉APP的应用密钥
|
||||
- DINGTALK_CLIENT_SECRET=xxx
|
||||
```
|
||||
|
||||
### 企业微信
|
||||
|
||||
#### 1. 参数获取
|
||||
|
||||
1. 企业的 CorpID
|
||||
|
||||
a. 使用管理员账号登陆企业微信管理后台 `https://work.weixin.qq.com/wework_admin/loginpage_wx`
|
||||
|
||||
b. 点击 【我的企业】 页面,查看企业的 **企业ID**
|
||||
|
||||

|
||||
|
||||
2. 创建一个供 FastGPT 使用的内部应用:
|
||||
|
||||
a. 获取应用的 AgentID 和 Secret
|
||||
|
||||
b. 保证这个应用的可见范围为全部(也就是根部门)
|
||||
|
||||

|
||||
|
||||
|
||||

|
||||
|
||||
3. 一个域名。并且要求:
|
||||
|
||||
a. 解析到可公网访问的服务器上
|
||||
|
||||
b. 可以在该服务的根目录地址上挂载静态文件(以便进行域名归属认证 ,按照配置处的提示进行操作,只需要挂载一个静态文件,认证后可以删除)
|
||||
|
||||
c. 配置网页授权,JS-SDK以及企业微信授权登陆
|
||||
|
||||
d. 可以在【企业微信授权登陆】页面下方设置“在工作台隐藏应用”
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
4. 获取 “通讯录同步助手” secret
|
||||
|
||||
获取通讯录,组织成员 ID 需要使用 “通讯录同步助手” secret
|
||||
|
||||
【安全与管理】-- 【管理工具】 -- 【通讯录同步】
|
||||
|
||||

|
||||
|
||||
5. 开启接口同步
|
||||
|
||||
6. 获取 Secret
|
||||
|
||||
7. 配置企业可信 IP
|
||||
|
||||

|
||||
|
||||
#### 2. yml 配置示例
|
||||
|
||||
```bash
|
||||
fastgpt-sso:
|
||||
image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sso-service:v4.9.0
|
||||
container_name: fastgpt-sso
|
||||
restart: always
|
||||
networks:
|
||||
- fastgpt
|
||||
environment:
|
||||
- AUTH_TOKEN=xxxxx
|
||||
- SSO_PROVIDER=wecom
|
||||
# oauth 接口,在企微终端使用
|
||||
- WECOM_TARGET_URL_OAUTH=https://open.weixin.qq.com/connect/oauth2/authorize
|
||||
# sso 接口,扫码
|
||||
- WECOM_TARGET_URL_SSO=https://login.work.weixin.qq.com/wwlogin/sso/login
|
||||
# 获取用户id(只能拿id)
|
||||
- WECOM_GET_USER_ID_URL=https://qyapi.weixin.qq.com/cgi-bin/auth/getuserinfo
|
||||
# 获取用户详细信息(除了名字都有)
|
||||
- WECOM_GET_USER_INFO_URL=https://qyapi.weixin.qq.com/cgi-bin/auth/getuserdetail
|
||||
# 获取用户信息(有名字,没其他信息)
|
||||
- WECOM_GET_USER_NAME_URL=https://qyapi.weixin.qq.com/cgi-bin/user/get
|
||||
# 获取组织 id 列表
|
||||
- WECOM_GET_DEPARTMENT_LIST_URL=https://qyapi.weixin.qq.com/cgi-bin/department/list
|
||||
# 获取用户 id 列表
|
||||
- WECOM_GET_USER_LIST_URL=https://qyapi.weixin.qq.com/cgi-bin/user/list_id
|
||||
# 企微 CorpId
|
||||
- WECOM_CORPID=
|
||||
# 企微 App 的 AgentId 一般是 1000xxx
|
||||
- WECOM_AGENTID=
|
||||
# 企微 App 的 Secret
|
||||
- WECOM_APP_SECRET=
|
||||
# 通讯录同步助手的 Secret
|
||||
- WECOM_SYNC_SECRET=
|
||||
```
|
||||
|
||||
### 标准 OAuth2.0
|
||||
|
||||
#### 参数需求
|
||||
|
||||
我们提供一套标准的 OAuth2.0 接入流程。需要三个地址:
|
||||
|
||||
1. 登陆鉴权地址(登陆后将 code 传入 redirect_uri)
|
||||
- 需要将地址完整写好,除了 redirect_uri 以外(会自动补全)
|
||||
2. 获取 access_token 的地址,请求为 GET 方法,参数 code
|
||||
|
||||
```bash
|
||||
http://example.com/oauth/access_token?code=xxxx
|
||||
```
|
||||
|
||||
3. 获取用户信息的地址
|
||||
|
||||
```bash
|
||||
http://example.com/oauth/user_info
|
||||
|
||||
```
|
||||
|
||||
#### 配置示例
|
||||
|
||||
```bash
|
||||
fastgpt-sso:
|
||||
image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sso-service:v4.9.0
|
||||
container_name: fastgpt-sso
|
||||
restart: always
|
||||
networks:
|
||||
- fastgpt
|
||||
environment:
|
||||
# OAuth2.0
|
||||
- AUTH_TOKEN=xxxxx
|
||||
- SSO_PROVIDER=oauth2
|
||||
# OAuth2 重定向地址
|
||||
- OAUTH2_AUTHORIZE_URL=
|
||||
# OAuth2 获取 AccessToken 地址
|
||||
- OAUTH2_TOKEN_URL=
|
||||
# OAuth2 获取用户信息地址
|
||||
- OAUTH2_USER_INFO_URL=
|
||||
# OAuth2 用户名字段映射(必填)
|
||||
- OAUTH2_USERNAME_MAP=
|
||||
# OAuth2 头像字段映射(选填)
|
||||
- OAUTH2_AVATAR_MAP=
|
||||
# OAuth2 成员名字段映射(选填)
|
||||
- OAUTH2_MEMBER_NAME_MAP=
|
||||
# OAuth2 联系方式字段映射(选填)
|
||||
- OAUTH2_CONTACT_MAP=
|
||||
```
|
||||
|
||||
## 标准接口文档
|
||||
|
||||
以下是 FastGPT-pro 中,SSO 和成员同步的标准接口文档,如果需要对接非标准系统,可以参考该章节进行开发。
|
||||
|
||||

|
||||
|
||||
FastGPT 提供如下标准接口支持:
|
||||
|
||||
1. https://example.com/getAuthURL 获取鉴权重定向地址
|
||||
2. https://example.com/login/oauth/getUserInfo?code=xxxxx 消费 code,换取用户信息
|
||||
3. https://example.com/org/list 获取组织列表
|
||||
4. https://example.com/user/list 获取成员列表
|
||||
|
||||
### 获取登录重定向地址
|
||||
|
||||
返回一个重定向登录地址,fastgpt 会自动重定向到该地址。
|
||||
|
||||
redirect_uri 会自动拼接到该地址的 query中。
|
||||
|
||||
GET /login/oauth/getAuthURL
|
||||
```JSON
|
||||
{
|
||||
"success": true,
|
||||
"message": "错误信息",
|
||||
"authURL": "https://example.com/somepath/login/oauth?redirect_uri=https%3A%2F%2Ffastgpt.cn%2Flogin%2Fprovider%0A"
|
||||
}
|
||||
```
|
||||
|
||||
### 获取用户信息
|
||||
|
||||
该接口接受一个 code 参数作为鉴权,消费 code 返回用户信息。
|
||||
|
||||
https://oauth.example/login/oauth/getUserInfo?code=xxxx
|
||||
|
||||
GET /login/oauth/getUserInfo?code=xxxxxx
|
||||
返回如下信息(JSON):
|
||||
|
||||
```JSON
|
||||
{
|
||||
"success": true,
|
||||
"message": "错误信息",
|
||||
"username": "用户名,用于注册 fastgpt,全局唯一的, fastgpt不会自动拼接任何前缀",
|
||||
"avatar": "头像,可以为空",
|
||||
"contact": "联系方式,最好不为空"
|
||||
}
|
||||
```
|
||||
|
||||
### 获取组织
|
||||
|
||||
1. 同步组织 /org/list
|
||||
|
||||
GET https://example.com/org/list
|
||||
|
||||
⚠️注意:只能存在一个根部门。如果你的系统中存在多个根部门,需要先进行处理,加一个虚拟的根部门。
|
||||
返回值类型:
|
||||
|
||||
```typescript
|
||||
type OrgListResponseType = {
|
||||
message?: string; // 报错信息
|
||||
success: boolean;
|
||||
orgList: {
|
||||
id: string; // 部门的唯一 id
|
||||
name: string; // 名字
|
||||
parentId: string; // parentId,如果为根部门,传空字符串。
|
||||
}[];
|
||||
}
|
||||
```
|
||||
|
||||
### 获取成员
|
||||
|
||||
1. 同步用户 /user/list
|
||||
|
||||
GET https://example.com/user/list
|
||||
|
||||
返回值类型:
|
||||
|
||||
```typescript
|
||||
type UserListResponseListType = {
|
||||
message?: string; // 报错信息
|
||||
success: boolean;
|
||||
userList: {
|
||||
username: string; // 唯一 id username 必须与 SSO 接口返回的用户 username 相同
|
||||
// 必须携带一个前缀,例如: sync-aaaaa,和 sso 接口返回的前缀一致
|
||||
memberName?: string; // 名字,作为 tmbname
|
||||
avatar?: string;
|
||||
contact?: string; // email or phone number
|
||||
orgs?: string[]; // 人员所在组织的 ID。没有组织传 []
|
||||
}[];
|
||||
}
|
||||
```
|
||||
|
||||
## 如何对接非标准系统
|
||||
|
||||
1. 客户自己开发:按 fastgpt 提供的标准接口进行开发,并将部署后的服务地址填入 fastgpt-pro
|
||||
可以参考该模版库:[fastgpt-sso-template](https://github.com/labring/fastgpt-sso-template) 进行开发
|
||||
2. 由 fastgpt 团队定制开发:
|
||||
a. 提供系统的 SSO 文档、获取成员和组织的文档、以及外网测试地址。
|
||||
b. 在 fastgpt-sso-service 中,增加对应的 provider 和环境变量,并编写代码来对接。
|
||||
@ -1,44 +0,0 @@
|
||||
---
|
||||
weight: 490
|
||||
title: '钉钉 SSO 配置'
|
||||
description: '钉钉 SSO 登录'
|
||||
icon: 'chat_bubble'
|
||||
draft: false
|
||||
images: []
|
||||
---
|
||||
|
||||
## 1. 注册钉钉应用
|
||||
|
||||
登录 [钉钉开放平台](https://open-dev.dingtalk.com/fe/app?hash=%23%2Fcorp%2Fapp#/corp/app),创建一个应用。
|
||||
|
||||

|
||||
|
||||
## 2. 配置钉钉应用安全设置
|
||||
|
||||
点击进入创建好的应用后,点开`安全设置`,配置出口 IP(服务器 IP),和重定向 URL。重定向 URL 填写逻辑:
|
||||
|
||||
`{{fastgpt 域名}}/login/provider`
|
||||
|
||||

|
||||
|
||||
## 3. 设置钉钉应用权限
|
||||
|
||||
点击进入创建好的应用后,点开`权限设置`,开放两个权限: `个人手机号信息`和`通讯录个人信息读权限`
|
||||
|
||||

|
||||
|
||||
## 4. 发布应用
|
||||
|
||||
点击进入创建好的应用后,点开`版本管理与发布`,随便创建一个新版本即可。
|
||||
|
||||
## 5. 在 FastGPT Admin 配置钉钉应用 id
|
||||
|
||||
名字都是对应上,直接填写即可。
|
||||
|
||||
| | |
|
||||
| --- | --- |
|
||||
| |  |
|
||||
|
||||
## 6. 测试
|
||||
|
||||

|
||||
81
docSite/content/zh-cn/docs/guide/admin/teamMode.md
Normal file
@ -0,0 +1,81 @@
|
||||
---
|
||||
title: '团队模式说明文档'
|
||||
description: 'FastGPT 团队模式说明文档'
|
||||
icon: ''
|
||||
draft: false
|
||||
toc: true
|
||||
weight: 707
|
||||
---
|
||||
|
||||
## 介绍
|
||||
|
||||
目前支持的团队模式:
|
||||
|
||||
1. 多团队模式(默认模式)
|
||||
2. 单团队模式(全局只有一个团队)
|
||||
3. 成员同步模式(所有成员自外部同步)
|
||||
|
||||
<table class="table-hover table-striped-columns" style="text-align: center;">
|
||||
<tr>
|
||||
<th rowspan="2">团队模式</th>
|
||||
<th colspan="2">短信/邮箱 注册</th>
|
||||
<th colspan="2">管理员直接添加</th>
|
||||
<th colspan="2">SSO 注册</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th>是否创建默认团队</th>
|
||||
<th>是否加入 Root 团队</th>
|
||||
<th>是否创建默认团队</th>
|
||||
<th>是否加入 Root 团队</th>
|
||||
<th>是否创建默认团队</th>
|
||||
<th>是否加入 Root 团队</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>单团队模式</td>
|
||||
<td>❌</td>
|
||||
<td>✅</td>
|
||||
<td>❌</td>
|
||||
<td>✅</td>
|
||||
<td>❌</td>
|
||||
<td>✅</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>多团队模式</td>
|
||||
<td>✅</td>
|
||||
<td>❌</td>
|
||||
<td>✅</td>
|
||||
<td>❌</td>
|
||||
<td>✅</td>
|
||||
<td>❌</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>同步模式</td>
|
||||
<td>❌</td>
|
||||
<td>❌</td>
|
||||
<td>❌</td>
|
||||
<td>✅</td>
|
||||
<td>❌</td>
|
||||
<td>✅</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
### 多团队模式(默认模式)
|
||||
|
||||
多团队模式下,每个用户创建时默认创建以自己为所有者的默认团队。
|
||||
|
||||
### 单团队模式
|
||||
|
||||
单团队模式是 v4.9 推出的新功能。为了简化企业进行人员和资源的管理,开启单团队模式后,所有新增的用户都不再创建自己的默认团队,而是加入 root 用户所在的团队。
|
||||
|
||||
### 同步模式
|
||||
|
||||
在完成系统配置,开启同步模式的情况下,外部成员系统的成员会自动同步到 FastGPT 中。
|
||||
|
||||
具体的同步方式和规则请参考 [SSO & 外部成员同步](/docs/guide/admin/sso.md)。
|
||||
|
||||
|
||||
## 配置
|
||||
|
||||
在 `fastgpt-pro` 的`系统配置-成员配置`中,可以配置团队模式。
|
||||
|
||||

|
||||
@ -124,6 +124,7 @@ curl --location --request GET '{{baseURL}}/v1/file/content?id=xx' \
|
||||
"success": true,
|
||||
"message": "",
|
||||
"data": {
|
||||
"title": "文档标题",
|
||||
"content": "FastGPT 是一个基于 LLM 大语言模型的知识库问答系统,提供开箱即用的数据处理、模型调用等能力。同时可以通过 Flow 可视化进行工作流编排,从而实现复杂的问答场景!\n",
|
||||
"previewUrl": "xxxx"
|
||||
}
|
||||
@ -131,10 +132,13 @@ curl --location --request GET '{{baseURL}}/v1/file/content?id=xx' \
|
||||
```
|
||||
|
||||
{{% alert icon=" " context="success" %}}
|
||||
二选一返回,如果同时返回则 content 优先级更高。
|
||||
|
||||
- title - 文件标题。
|
||||
- content - 文件内容,直接拿来用。
|
||||
- previewUrl - 文件链接,系统会请求该地址获取文件内容。
|
||||
|
||||
`content`和`previewUrl`二选一返回,如果同时返回则 `content` 优先级更高,返回 `previewUrl`时,则会访问该链接进行文档内容读取。
|
||||
|
||||
{{% /alert %}}
|
||||
|
||||
{{< /markdownify >}}
|
||||
|
||||
@ -1,16 +1,17 @@
|
||||
import { defaultMaxChunkSize } from '../../core/dataset/training/utils';
|
||||
import { getErrText } from '../error/utils';
|
||||
import { replaceRegChars } from './tools';
|
||||
|
||||
export const CUSTOM_SPLIT_SIGN = '-----CUSTOM_SPLIT_SIGN-----';
|
||||
|
||||
type SplitProps = {
|
||||
text: string;
|
||||
chunkLen: number;
|
||||
chunkSize: number;
|
||||
maxSize?: number;
|
||||
overlapRatio?: number;
|
||||
customReg?: string[];
|
||||
};
|
||||
export type TextSplitProps = Omit<SplitProps, 'text' | 'chunkLen'> & {
|
||||
chunkLen?: number;
|
||||
export type TextSplitProps = Omit<SplitProps, 'text' | 'chunkSize'> & {
|
||||
chunkSize?: number;
|
||||
};
|
||||
|
||||
type SplitResponse = {
|
||||
@ -56,7 +57,7 @@ const strIsMdTable = (str: string) => {
|
||||
return true;
|
||||
};
|
||||
const markdownTableSplit = (props: SplitProps): SplitResponse => {
|
||||
let { text = '', chunkLen } = props;
|
||||
let { text = '', chunkSize } = props;
|
||||
const splitText2Lines = text.split('\n');
|
||||
const header = splitText2Lines[0];
|
||||
const headerSize = header.split('|').length - 2;
|
||||
@ -72,7 +73,7 @@ ${mdSplitString}
|
||||
`;
|
||||
|
||||
for (let i = 2; i < splitText2Lines.length; i++) {
|
||||
if (chunk.length + splitText2Lines[i].length > chunkLen * 1.2) {
|
||||
if (chunk.length + splitText2Lines[i].length > chunkSize * 1.2) {
|
||||
chunks.push(chunk);
|
||||
chunk = `${header}
|
||||
${mdSplitString}
|
||||
@ -99,11 +100,17 @@ ${mdSplitString}
|
||||
5. 标点分割:重叠
|
||||
*/
|
||||
const commonSplit = (props: SplitProps): SplitResponse => {
|
||||
let { text = '', chunkLen, overlapRatio = 0.15, customReg = [] } = props;
|
||||
let {
|
||||
text = '',
|
||||
chunkSize,
|
||||
maxSize = defaultMaxChunkSize,
|
||||
overlapRatio = 0.15,
|
||||
customReg = []
|
||||
} = props;
|
||||
|
||||
const splitMarker = 'SPLIT_HERE_SPLIT_HERE';
|
||||
const codeBlockMarker = 'CODE_BLOCK_LINE_MARKER';
|
||||
const overlapLen = Math.round(chunkLen * overlapRatio);
|
||||
const overlapLen = Math.round(chunkSize * overlapRatio);
|
||||
|
||||
// replace code block all \n to codeBlockMarker
|
||||
text = text.replace(/(```[\s\S]*?```|~~~[\s\S]*?~~~)/g, function (match) {
|
||||
@ -115,34 +122,38 @@ const commonSplit = (props: SplitProps): SplitResponse => {
|
||||
// The larger maxLen is, the next sentence is less likely to trigger splitting
|
||||
const markdownIndex = 4;
|
||||
const forbidOverlapIndex = 8;
|
||||
const stepReges: { reg: RegExp; maxLen: number }[] = [
|
||||
...customReg.map((text) => ({
|
||||
reg: new RegExp(`(${replaceRegChars(text)})`, 'g'),
|
||||
maxLen: chunkLen * 1.4
|
||||
})),
|
||||
{ reg: /^(#\s[^\n]+\n)/gm, maxLen: chunkLen * 1.2 },
|
||||
{ reg: /^(##\s[^\n]+\n)/gm, maxLen: chunkLen * 1.4 },
|
||||
{ reg: /^(###\s[^\n]+\n)/gm, maxLen: chunkLen * 1.6 },
|
||||
{ reg: /^(####\s[^\n]+\n)/gm, maxLen: chunkLen * 1.8 },
|
||||
{ reg: /^(#####\s[^\n]+\n)/gm, maxLen: chunkLen * 1.8 },
|
||||
|
||||
{ reg: /([\n]([`~]))/g, maxLen: chunkLen * 4 }, // code block
|
||||
{ reg: /([\n](?=\s*[0-9]+\.))/g, maxLen: chunkLen * 2 }, // 增大块,尽可能保证它是一个完整的段落。 (?![\*\-|>`0-9]): markdown special char
|
||||
{ reg: /(\n{2,})/g, maxLen: chunkLen * 1.6 },
|
||||
{ reg: /([\n])/g, maxLen: chunkLen * 1.2 },
|
||||
const stepReges: { reg: RegExp | string; maxLen: number }[] = [
|
||||
...customReg.map((text) => ({
|
||||
reg: text.replaceAll('\\n', '\n'),
|
||||
maxLen: chunkSize
|
||||
})),
|
||||
{ reg: /^(#\s[^\n]+\n)/gm, maxLen: chunkSize },
|
||||
{ reg: /^(##\s[^\n]+\n)/gm, maxLen: chunkSize },
|
||||
{ reg: /^(###\s[^\n]+\n)/gm, maxLen: chunkSize },
|
||||
{ reg: /^(####\s[^\n]+\n)/gm, maxLen: chunkSize },
|
||||
{ reg: /^(#####\s[^\n]+\n)/gm, maxLen: chunkSize },
|
||||
|
||||
{ reg: /([\n](```[\s\S]*?```|~~~[\s\S]*?~~~))/g, maxLen: maxSize }, // code block
|
||||
{
|
||||
reg: /(\n\|(?:(?:[^\n|]+\|){1,})\n\|(?:[:\-\s]+\|){1,}\n(?:\|(?:[^\n|]+\|)*\n)*)/g,
|
||||
maxLen: maxSize
|
||||
}, // Table 尽可能保证完整性
|
||||
{ reg: /(\n{2,})/g, maxLen: chunkSize },
|
||||
{ reg: /([\n])/g, maxLen: chunkSize },
|
||||
// ------ There's no overlap on the top
|
||||
{ reg: /([。]|([a-zA-Z])\.\s)/g, maxLen: chunkLen * 1.2 },
|
||||
{ reg: /([!]|!\s)/g, maxLen: chunkLen * 1.2 },
|
||||
{ reg: /([?]|\?\s)/g, maxLen: chunkLen * 1.4 },
|
||||
{ reg: /([;]|;\s)/g, maxLen: chunkLen * 1.6 },
|
||||
{ reg: /([,]|,\s)/g, maxLen: chunkLen * 2 }
|
||||
{ reg: /([。]|([a-zA-Z])\.\s)/g, maxLen: chunkSize },
|
||||
{ reg: /([!]|!\s)/g, maxLen: chunkSize },
|
||||
{ reg: /([?]|\?\s)/g, maxLen: chunkSize },
|
||||
{ reg: /([;]|;\s)/g, maxLen: chunkSize },
|
||||
{ reg: /([,]|,\s)/g, maxLen: chunkSize }
|
||||
];
|
||||
|
||||
const customRegLen = customReg.length;
|
||||
const checkIsCustomStep = (step: number) => step < customRegLen;
|
||||
const checkIsMarkdownSplit = (step: number) =>
|
||||
step >= customRegLen && step <= markdownIndex + customRegLen;
|
||||
+customReg.length;
|
||||
|
||||
const checkForbidOverlap = (step: number) => step <= forbidOverlapIndex + customRegLen;
|
||||
|
||||
// if use markdown title split, Separate record title
|
||||
@ -151,7 +162,8 @@ const commonSplit = (props: SplitProps): SplitResponse => {
|
||||
return [
|
||||
{
|
||||
text,
|
||||
title: ''
|
||||
title: '',
|
||||
chunkMaxSize: chunkSize
|
||||
}
|
||||
];
|
||||
}
|
||||
@ -159,27 +171,46 @@ const commonSplit = (props: SplitProps): SplitResponse => {
|
||||
const isCustomStep = checkIsCustomStep(step);
|
||||
const isMarkdownSplit = checkIsMarkdownSplit(step);
|
||||
|
||||
const { reg } = stepReges[step];
|
||||
const { reg, maxLen } = stepReges[step];
|
||||
|
||||
const splitTexts = text
|
||||
.replace(
|
||||
const replaceText = (() => {
|
||||
if (typeof reg === 'string') {
|
||||
let tmpText = text;
|
||||
reg.split('|').forEach((itemReg) => {
|
||||
tmpText = tmpText.replaceAll(
|
||||
itemReg,
|
||||
(() => {
|
||||
if (isCustomStep) return splitMarker;
|
||||
if (isMarkdownSplit) return `${splitMarker}$1`;
|
||||
return `$1${splitMarker}`;
|
||||
})()
|
||||
);
|
||||
});
|
||||
return tmpText;
|
||||
}
|
||||
|
||||
return text.replace(
|
||||
reg,
|
||||
(() => {
|
||||
if (isCustomStep) return splitMarker;
|
||||
if (isMarkdownSplit) return `${splitMarker}$1`;
|
||||
return `$1${splitMarker}`;
|
||||
})()
|
||||
)
|
||||
.split(`${splitMarker}`)
|
||||
.filter((part) => part.trim());
|
||||
);
|
||||
})();
|
||||
|
||||
const splitTexts = replaceText.split(splitMarker).filter((part) => part.trim());
|
||||
|
||||
return splitTexts
|
||||
.map((text) => {
|
||||
const matchTitle = isMarkdownSplit ? text.match(reg)?.[0] || '' : '';
|
||||
// 如果一个分块没有匹配到,则使用默认块大小,否则使用最大块大小
|
||||
const chunkMaxSize = text.match(reg) === null ? chunkSize : maxLen;
|
||||
|
||||
return {
|
||||
text: isMarkdownSplit ? text.replace(matchTitle, '') : text,
|
||||
title: matchTitle
|
||||
title: matchTitle,
|
||||
chunkMaxSize
|
||||
};
|
||||
})
|
||||
.filter((item) => !!item.title || !!item.text?.trim());
|
||||
@ -188,7 +219,7 @@ const commonSplit = (props: SplitProps): SplitResponse => {
|
||||
/* Gets the overlap at the end of a text as the beginning of the next block */
|
||||
const getOneTextOverlapText = ({ text, step }: { text: string; step: number }): string => {
|
||||
const forbidOverlap = checkForbidOverlap(step);
|
||||
const maxOverlapLen = chunkLen * 0.4;
|
||||
const maxOverlapLen = chunkSize * 0.4;
|
||||
|
||||
// step >= stepReges.length: Do not overlap incomplete sentences
|
||||
if (forbidOverlap || overlapLen === 0 || step >= stepReges.length) return '';
|
||||
@ -229,15 +260,15 @@ const commonSplit = (props: SplitProps): SplitResponse => {
|
||||
const isCustomStep = checkIsCustomStep(step);
|
||||
const forbidConcat = isCustomStep; // forbid=true时候,lastText肯定为空
|
||||
|
||||
// oversize
|
||||
// Over step
|
||||
if (step >= stepReges.length) {
|
||||
if (text.length < chunkLen * 3) {
|
||||
if (text.length < maxSize) {
|
||||
return [text];
|
||||
}
|
||||
// use slice-chunkLen to split text
|
||||
// use slice-chunkSize to split text
|
||||
const chunks: string[] = [];
|
||||
for (let i = 0; i < text.length; i += chunkLen - overlapLen) {
|
||||
chunks.push(text.slice(i, i + chunkLen));
|
||||
for (let i = 0; i < text.length; i += chunkSize - overlapLen) {
|
||||
chunks.push(text.slice(i, i + chunkSize));
|
||||
}
|
||||
return chunks;
|
||||
}
|
||||
@ -245,19 +276,18 @@ const commonSplit = (props: SplitProps): SplitResponse => {
|
||||
// split text by special char
|
||||
const splitTexts = getSplitTexts({ text, step });
|
||||
|
||||
const maxLen = splitTexts.length > 1 ? stepReges[step].maxLen : chunkLen;
|
||||
const minChunkLen = chunkLen * 0.7;
|
||||
|
||||
const chunks: string[] = [];
|
||||
for (let i = 0; i < splitTexts.length; i++) {
|
||||
const item = splitTexts[i];
|
||||
|
||||
const maxLen = item.chunkMaxSize; // 当前块最大长度
|
||||
|
||||
const lastTextLen = lastText.length;
|
||||
const currentText = item.text;
|
||||
const newText = lastText + currentText;
|
||||
const newTextLen = newText.length;
|
||||
|
||||
// Markdown 模式下,会强制向下拆分最小块,并再最后一个标题时候,给小块都补充上所有标题(包含父级标题)
|
||||
// Markdown 模式下,会强制向下拆分最小块,并再最后一个标题深度,给小块都补充上所有标题(包含父级标题)
|
||||
if (isMarkdownStep) {
|
||||
// split new Text, split chunks must will greater 1 (small lastText)
|
||||
const innerChunks = splitTextRecursively({
|
||||
@ -267,11 +297,13 @@ const commonSplit = (props: SplitProps): SplitResponse => {
|
||||
parentTitle: parentTitle + item.title
|
||||
});
|
||||
|
||||
// 只有标题,没有内容。
|
||||
if (innerChunks.length === 0) {
|
||||
chunks.push(`${parentTitle}${item.title}`);
|
||||
continue;
|
||||
}
|
||||
|
||||
// 在合并最深级标题时,需要补充标题
|
||||
chunks.push(
|
||||
...innerChunks.map(
|
||||
(chunk) =>
|
||||
@ -282,9 +314,18 @@ const commonSplit = (props: SplitProps): SplitResponse => {
|
||||
continue;
|
||||
}
|
||||
|
||||
// newText is too large(now, The lastText must be smaller than chunkLen)
|
||||
// newText is too large(now, The lastText must be smaller than chunkSize)
|
||||
if (newTextLen > maxLen) {
|
||||
// lastText greater minChunkLen, direct push it to chunks, not add to next chunk. (large lastText)
|
||||
const minChunkLen = maxLen * 0.8; // 当前块最小长度
|
||||
const maxChunkLen = maxLen * 1.2; // 当前块最大长度
|
||||
|
||||
// 新文本没有非常大,直接认为它是一个新的块
|
||||
if (newTextLen < maxChunkLen) {
|
||||
chunks.push(newText);
|
||||
lastText = getOneTextOverlapText({ text: newText, step }); // next chunk will start with overlayText
|
||||
continue;
|
||||
}
|
||||
// 上一个文本块已经挺大的,单独做一个块
|
||||
if (lastTextLen > minChunkLen) {
|
||||
chunks.push(lastText);
|
||||
|
||||
@ -294,13 +335,13 @@ const commonSplit = (props: SplitProps): SplitResponse => {
|
||||
continue;
|
||||
}
|
||||
|
||||
// 说明是新的文本块比较大,需要进一步拆分
|
||||
// 说明是当前文本比较大,需要进一步拆分
|
||||
|
||||
// split new Text, split chunks must will greater 1 (small lastText)
|
||||
// 把新的文本块进行一个拆分,并追加到 latestText 中
|
||||
const innerChunks = splitTextRecursively({
|
||||
text: newText,
|
||||
text: currentText,
|
||||
step: step + 1,
|
||||
lastText: '',
|
||||
lastText,
|
||||
parentTitle: parentTitle + item.title
|
||||
});
|
||||
const lastChunk = innerChunks[innerChunks.length - 1];
|
||||
@ -328,16 +369,16 @@ const commonSplit = (props: SplitProps): SplitResponse => {
|
||||
|
||||
// Not overlap
|
||||
if (forbidConcat) {
|
||||
chunks.push(item.text);
|
||||
chunks.push(currentText);
|
||||
continue;
|
||||
}
|
||||
|
||||
lastText += item.text;
|
||||
lastText = newText;
|
||||
}
|
||||
|
||||
/* If the last chunk is independent, it needs to be push chunks. */
|
||||
if (lastText && chunks[chunks.length - 1] && !chunks[chunks.length - 1].endsWith(lastText)) {
|
||||
if (lastText.length < chunkLen * 0.4) {
|
||||
if (lastText.length < chunkSize * 0.4) {
|
||||
chunks[chunks.length - 1] = chunks[chunks.length - 1] + lastText;
|
||||
} else {
|
||||
chunks.push(lastText);
|
||||
@ -371,9 +412,9 @@ const commonSplit = (props: SplitProps): SplitResponse => {
|
||||
|
||||
/**
|
||||
* text split into chunks
|
||||
* chunkLen - one chunk len. max: 3500
|
||||
* chunkSize - one chunk len. max: 3500
|
||||
* overlapLen - The size of the before and after Text
|
||||
* chunkLen > overlapLen
|
||||
* chunkSize > overlapLen
|
||||
* markdown
|
||||
*/
|
||||
export const splitText2Chunks = (props: SplitProps): SplitResponse => {
|
||||
|
||||
@ -56,7 +56,7 @@ export const replaceSensitiveText = (text: string) => {
|
||||
};
|
||||
|
||||
/* Make sure the first letter is definitely lowercase */
|
||||
export const getNanoid = (size = 12) => {
|
||||
export const getNanoid = (size = 16) => {
|
||||
const firstChar = customAlphabet('abcdefghijklmnopqrstuvwxyz', 1)();
|
||||
|
||||
if (size === 1) return firstChar;
|
||||
|
||||
@ -84,11 +84,6 @@ export type FastGPTFeConfigsType = {
|
||||
github?: string;
|
||||
google?: string;
|
||||
wechat?: string;
|
||||
dingtalk?: string;
|
||||
wecom?: {
|
||||
corpid?: string;
|
||||
agentid?: string;
|
||||
};
|
||||
microsoft?: {
|
||||
clientId?: string;
|
||||
tenantId?: string;
|
||||
|
||||
@ -1,54 +1,70 @@
|
||||
import { PromptTemplateItem } from '../type.d';
|
||||
import { i18nT } from '../../../../web/i18n/utils';
|
||||
import { getPromptByVersion } from './utils';
|
||||
|
||||
export const Prompt_QuoteTemplateList: PromptTemplateItem[] = [
|
||||
{
|
||||
title: i18nT('app:template.standard_template'),
|
||||
desc: i18nT('app:template.standard_template_des'),
|
||||
value: `{
|
||||
value: {
|
||||
['4.9.2']: `{
|
||||
"sourceName": "{{source}}",
|
||||
"updateTime": "{{updateTime}}",
|
||||
"content": "{{q}}\n{{a}}"
|
||||
}
|
||||
`
|
||||
}
|
||||
},
|
||||
{
|
||||
title: i18nT('app:template.qa_template'),
|
||||
desc: i18nT('app:template.qa_template_des'),
|
||||
value: `<Question>
|
||||
value: {
|
||||
['4.9.2']: `<Question>
|
||||
{{q}}
|
||||
</Question>
|
||||
<Answer>
|
||||
{{a}}
|
||||
</Answer>`
|
||||
}
|
||||
},
|
||||
{
|
||||
title: i18nT('app:template.standard_strict'),
|
||||
desc: i18nT('app:template.standard_strict_des'),
|
||||
value: `{
|
||||
value: {
|
||||
['4.9.2']: `{
|
||||
"sourceName": "{{source}}",
|
||||
"updateTime": "{{updateTime}}",
|
||||
"content": "{{q}}\n{{a}}"
|
||||
}
|
||||
`
|
||||
}
|
||||
},
|
||||
{
|
||||
title: i18nT('app:template.hard_strict'),
|
||||
desc: i18nT('app:template.hard_strict_des'),
|
||||
value: `<Question>
|
||||
value: {
|
||||
['4.9.2']: `<Question>
|
||||
{{q}}
|
||||
</Question>
|
||||
<Answer>
|
||||
{{a}}
|
||||
</Answer>`
|
||||
}
|
||||
}
|
||||
];
|
||||
|
||||
export const getQuoteTemplate = (version?: string) => {
|
||||
const defaultTemplate = Prompt_QuoteTemplateList[0].value;
|
||||
|
||||
return getPromptByVersion(version, defaultTemplate);
|
||||
};
|
||||
|
||||
export const Prompt_userQuotePromptList: PromptTemplateItem[] = [
|
||||
{
|
||||
title: i18nT('app:template.standard_template'),
|
||||
desc: '',
|
||||
value: `使用 <Reference></Reference> 标记中的内容作为本次对话的参考:
|
||||
value: {
|
||||
['4.9.2']: `使用 <Reference></Reference> 标记中的内容作为本次对话的参考:
|
||||
|
||||
<Reference>
|
||||
{{quote}}
|
||||
@ -62,11 +78,13 @@ export const Prompt_userQuotePromptList: PromptTemplateItem[] = [
|
||||
- 使用与问题相同的语言回答。
|
||||
|
||||
问题:"""{{question}}"""`
|
||||
}
|
||||
},
|
||||
{
|
||||
title: i18nT('app:template.qa_template'),
|
||||
desc: '',
|
||||
value: `使用 <QA></QA> 标记中的问答对进行回答。
|
||||
value: {
|
||||
['4.9.2']: `使用 <QA></QA> 标记中的问答对进行回答。
|
||||
|
||||
<QA>
|
||||
{{quote}}
|
||||
@ -79,11 +97,13 @@ export const Prompt_userQuotePromptList: PromptTemplateItem[] = [
|
||||
- 避免提及你是从 QA 获取的知识,只需要回复答案。
|
||||
|
||||
问题:"""{{question}}"""`
|
||||
}
|
||||
},
|
||||
{
|
||||
title: i18nT('app:template.standard_strict'),
|
||||
desc: '',
|
||||
value: `忘记你已有的知识,仅使用 <Reference></Reference> 标记中的内容作为本次对话的参考:
|
||||
value: {
|
||||
['4.9.2']: `忘记你已有的知识,仅使用 <Reference></Reference> 标记中的内容作为本次对话的参考:
|
||||
|
||||
<Reference>
|
||||
{{quote}}
|
||||
@ -101,11 +121,13 @@ export const Prompt_userQuotePromptList: PromptTemplateItem[] = [
|
||||
- 使用与问题相同的语言回答。
|
||||
|
||||
问题:"""{{question}}"""`
|
||||
}
|
||||
},
|
||||
{
|
||||
title: i18nT('app:template.hard_strict'),
|
||||
desc: '',
|
||||
value: `忘记你已有的知识,仅使用 <QA></QA> 标记中的问答对进行回答。
|
||||
value: {
|
||||
['4.9.2']: `忘记你已有的知识,仅使用 <QA></QA> 标记中的问答对进行回答。
|
||||
|
||||
<QA>
|
||||
{{quote}}
|
||||
@ -126,6 +148,7 @@ export const Prompt_userQuotePromptList: PromptTemplateItem[] = [
|
||||
- 使用与问题相同的语言回答。
|
||||
|
||||
问题:"""{{question}}"""`
|
||||
}
|
||||
}
|
||||
];
|
||||
|
||||
@ -133,7 +156,8 @@ export const Prompt_systemQuotePromptList: PromptTemplateItem[] = [
|
||||
{
|
||||
title: i18nT('app:template.standard_template'),
|
||||
desc: '',
|
||||
value: `使用 <Reference></Reference> 标记中的内容作为本次对话的参考:
|
||||
value: {
|
||||
['4.9.2']: `使用 <Reference></Reference> 标记中的内容作为本次对话的参考:
|
||||
|
||||
<Reference>
|
||||
{{quote}}
|
||||
@ -145,11 +169,13 @@ export const Prompt_systemQuotePromptList: PromptTemplateItem[] = [
|
||||
- 保持答案与 <Reference></Reference> 中描述的一致。
|
||||
- 使用 Markdown 语法优化回答格式。
|
||||
- 使用与问题相同的语言回答。`
|
||||
}
|
||||
},
|
||||
{
|
||||
title: i18nT('app:template.qa_template'),
|
||||
desc: '',
|
||||
value: `使用 <QA></QA> 标记中的问答对进行回答。
|
||||
value: {
|
||||
['4.9.2']: `使用 <QA></QA> 标记中的问答对进行回答。
|
||||
|
||||
<QA>
|
||||
{{quote}}
|
||||
@ -160,11 +186,13 @@ export const Prompt_systemQuotePromptList: PromptTemplateItem[] = [
|
||||
- 回答的内容应尽可能与 <答案></答案> 中的内容一致。
|
||||
- 如果没有相关的问答对,你需要澄清。
|
||||
- 避免提及你是从 QA 获取的知识,只需要回复答案。`
|
||||
}
|
||||
},
|
||||
{
|
||||
title: i18nT('app:template.standard_strict'),
|
||||
desc: '',
|
||||
value: `忘记你已有的知识,仅使用 <Reference></Reference> 标记中的内容作为本次对话的参考:
|
||||
value: {
|
||||
['4.9.2']: `忘记你已有的知识,仅使用 <Reference></Reference> 标记中的内容作为本次对话的参考:
|
||||
|
||||
<Reference>
|
||||
{{quote}}
|
||||
@ -180,11 +208,13 @@ export const Prompt_systemQuotePromptList: PromptTemplateItem[] = [
|
||||
- 保持答案与 <Reference></Reference> 中描述的一致。
|
||||
- 使用 Markdown 语法优化回答格式。
|
||||
- 使用与问题相同的语言回答。`
|
||||
}
|
||||
},
|
||||
{
|
||||
title: i18nT('app:template.hard_strict'),
|
||||
desc: '',
|
||||
value: `忘记你已有的知识,仅使用 <QA></QA> 标记中的问答对进行回答。
|
||||
value: {
|
||||
['4.9.2']: `忘记你已有的知识,仅使用 <QA></QA> 标记中的问答对进行回答。
|
||||
|
||||
<QA>
|
||||
{{quote}}
|
||||
@ -203,12 +233,28 @@ export const Prompt_systemQuotePromptList: PromptTemplateItem[] = [
|
||||
- 避免提及你是从 QA 获取的知识,只需要回复答案。
|
||||
- 使用 Markdown 语法优化回答格式。
|
||||
- 使用与问题相同的语言回答。`
|
||||
}
|
||||
}
|
||||
];
|
||||
|
||||
export const getQuotePrompt = (version?: string, role: 'user' | 'system' = 'user') => {
|
||||
const quotePromptTemplates =
|
||||
role === 'user' ? Prompt_userQuotePromptList : Prompt_systemQuotePromptList;
|
||||
|
||||
const defaultTemplate = quotePromptTemplates[0].value;
|
||||
|
||||
return getPromptByVersion(version, defaultTemplate);
|
||||
};
|
||||
|
||||
// Document quote prompt
|
||||
export const Prompt_DocumentQuote = `将 <FilesContent></FilesContent> 中的内容作为本次对话的参考:
|
||||
<FilesContent>
|
||||
{{quote}}
|
||||
</FilesContent>
|
||||
`;
|
||||
export const getDocumentQuotePrompt = (version: string) => {
|
||||
const promptMap = {
|
||||
['4.9.2']: `将 <FilesContent></FilesContent> 中的内容作为本次对话的参考:
|
||||
<FilesContent>
|
||||
{{quote}}
|
||||
</FilesContent>
|
||||
`
|
||||
};
|
||||
|
||||
return getPromptByVersion(version, promptMap);
|
||||
};
|
||||
|
||||
@ -1,3 +1,5 @@
|
||||
import { getPromptByVersion } from './utils';
|
||||
|
||||
export const Prompt_AgentQA = {
|
||||
description: `<Context></Context> 标记中是一段文本,学习和分析它,并整理学习成果:
|
||||
- 提出问题并给出每个问题的答案。
|
||||
@ -25,7 +27,9 @@ A2:
|
||||
`
|
||||
};
|
||||
|
||||
export const Prompt_ExtractJson = `你可以从 <对话记录></对话记录> 中提取指定 Json 信息,你仅需返回 Json 字符串,无需回答问题。
|
||||
export const getExtractJsonPrompt = (version?: string) => {
|
||||
const promptMap: Record<string, string> = {
|
||||
['4.9.2']: `你可以从 <对话记录></对话记录> 中提取指定 Json 信息,你仅需返回 Json 字符串,无需回答问题。
|
||||
<提取要求>
|
||||
{{description}}
|
||||
</提取要求>
|
||||
@ -44,9 +48,31 @@ export const Prompt_ExtractJson = `你可以从 <对话记录></对话记录>
|
||||
{{text}}
|
||||
</对话记录>
|
||||
|
||||
提取的 json 字符串:`;
|
||||
提取的 json 字符串:`
|
||||
};
|
||||
|
||||
export const Prompt_CQJson = `请帮我执行一个“问题分类”任务,将问题分类为以下几种类型之一:
|
||||
return getPromptByVersion(version, promptMap);
|
||||
};
|
||||
|
||||
export const getExtractJsonToolPrompt = (version?: string) => {
|
||||
const promptMap: Record<string, string> = {
|
||||
['4.9.2']: `我正在执行一个函数,需要你提供一些参数,请以 JSON 字符串格式返回这些参数,要求:
|
||||
"""
|
||||
- {{description}}
|
||||
- 不是每个参数都是必须生成的,如果没有合适的参数值,不要生成该参数,或返回空字符串。
|
||||
- 需要结合前面的对话内容,一起生成合适的参数。
|
||||
"""
|
||||
|
||||
本次输入内容: """{{content}}"""
|
||||
`
|
||||
};
|
||||
|
||||
return getPromptByVersion(version, promptMap);
|
||||
};
|
||||
|
||||
export const getCQPrompt = (version?: string) => {
|
||||
const promptMap: Record<string, string> = {
|
||||
['4.9.2']: `请帮我执行一个"问题分类"任务,将问题分类为以下几种类型之一:
|
||||
|
||||
"""
|
||||
{{typeList}}
|
||||
@ -64,9 +90,13 @@ export const Prompt_CQJson = `请帮我执行一个“问题分类”任务,
|
||||
|
||||
问题:"{{question}}"
|
||||
类型ID=
|
||||
`;
|
||||
`
|
||||
};
|
||||
|
||||
export const PROMPT_QUESTION_GUIDE = `You are an AI assistant tasked with predicting the user's next question based on the conversation history. Your goal is to generate 3 potential questions that will guide the user to continue the conversation. When generating these questions, adhere to the following rules:
|
||||
return getPromptByVersion(version, promptMap);
|
||||
};
|
||||
|
||||
export const QuestionGuidePrompt = `You are an AI assistant tasked with predicting the user's next question based on the conversation history. Your goal is to generate 3 potential questions that will guide the user to continue the conversation. When generating these questions, adhere to the following rules:
|
||||
|
||||
1. Use the same language as the user's last question in the conversation history.
|
||||
2. Keep each question under 20 characters in length.
|
||||
@ -74,4 +104,5 @@ export const PROMPT_QUESTION_GUIDE = `You are an AI assistant tasked with predic
|
||||
Analyze the conversation history provided to you and use it as context to generate relevant and engaging follow-up questions. Your predictions should be logical extensions of the current topic or related areas that the user might be interested in exploring further.
|
||||
|
||||
Remember to maintain consistency in tone and style with the existing conversation while providing diverse options for the user to choose from. Your goal is to keep the conversation flowing naturally and help the user delve deeper into the subject matter or explore related topics.`;
|
||||
export const PROMPT_QUESTION_GUIDE_FOOTER = `Please strictly follow the format rules: \nReturn questions in JSON format: ['Question 1', 'Question 2', 'Question 3']. Your output: `;
|
||||
|
||||
export const QuestionGuideFooterPrompt = `Please strictly follow the format rules: \nReturn questions in JSON format: ['Question 1', 'Question 2', 'Question 3']. Your output: `;
|
||||
|
||||
19
packages/global/core/ai/prompt/utils.ts
Normal file
@ -0,0 +1,19 @@
|
||||
export const getPromptByVersion = (version?: string, promptMap: Record<string, string> = {}) => {
|
||||
const versions = Object.keys(promptMap).sort((a, b) => {
|
||||
const [majorA, minorA, patchA] = a.split('.').map(Number);
|
||||
const [majorB, minorB, patchB] = b.split('.').map(Number);
|
||||
|
||||
if (majorA !== majorB) return majorB - majorA;
|
||||
if (minorA !== minorB) return minorB - minorA;
|
||||
return patchB - patchA;
|
||||
});
|
||||
|
||||
if (!version) {
|
||||
return promptMap[versions[0]];
|
||||
}
|
||||
|
||||
if (version in promptMap) {
|
||||
return promptMap[version];
|
||||
}
|
||||
return promptMap[versions[versions.length - 1]];
|
||||
};
|
||||
2
packages/global/core/ai/type.d.ts
vendored
@ -80,5 +80,5 @@ export * from 'openai';
|
||||
export type PromptTemplateItem = {
|
||||
title: string;
|
||||
desc: string;
|
||||
value: string;
|
||||
value: Record<string, string>;
|
||||
};
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
import { PROMPT_QUESTION_GUIDE } from '../ai/prompt/agent';
|
||||
import {
|
||||
AppTTSConfigType,
|
||||
AppFileSelectConfigType,
|
||||
|
||||
13
packages/global/core/dataset/api.d.ts
vendored
@ -1,5 +1,10 @@
|
||||
import { DatasetDataIndexItemType, DatasetSchemaType } from './type';
|
||||
import { DatasetCollectionTypeEnum, DatasetCollectionDataProcessModeEnum } from './constants';
|
||||
import {
|
||||
DatasetCollectionTypeEnum,
|
||||
DatasetCollectionDataProcessModeEnum,
|
||||
ChunkSettingModeEnum,
|
||||
DataChunkSplitModeEnum
|
||||
} from './constants';
|
||||
import type { LLMModelItemType } from '../ai/model.d';
|
||||
import { ParentIdType } from 'common/parentFolder/type';
|
||||
|
||||
@ -33,7 +38,13 @@ export type DatasetCollectionChunkMetadataType = {
|
||||
trainingType?: DatasetCollectionDataProcessModeEnum;
|
||||
imageIndex?: boolean;
|
||||
autoIndexes?: boolean;
|
||||
|
||||
chunkSettingMode?: ChunkSettingModeEnum;
|
||||
chunkSplitMode?: DataChunkSplitModeEnum;
|
||||
|
||||
chunkSize?: number;
|
||||
indexSize?: number;
|
||||
|
||||
chunkSplitter?: string;
|
||||
qaPrompt?: string;
|
||||
metadata?: Record<string, any>;
|
||||
|
||||
8
packages/global/core/dataset/apiDataset.d.ts
vendored
@ -1,3 +1,5 @@
|
||||
import { RequireOnlyOne } from '../../common/type/utils';
|
||||
|
||||
export type APIFileItem = {
|
||||
id: string;
|
||||
parentId: string | null;
|
||||
@ -15,9 +17,9 @@ export type APIFileServer = {
|
||||
|
||||
export type APIFileListResponse = APIFileItem[];
|
||||
|
||||
export type APIFileContentResponse = {
|
||||
content?: string;
|
||||
previewUrl?: string;
|
||||
export type ApiFileReadContentResponse = {
|
||||
title?: string;
|
||||
rawText: string;
|
||||
};
|
||||
|
||||
export type APIFileReadResponse = {
|
||||
|
||||
@ -16,3 +16,7 @@ export const getCollectionSourceData = (collection?: DatasetCollectionSchemaType
|
||||
export const checkCollectionIsFolder = (type: DatasetCollectionTypeEnum) => {
|
||||
return type === DatasetCollectionTypeEnum.folder || type === DatasetCollectionTypeEnum.virtual;
|
||||
};
|
||||
|
||||
export const collectionCanSync = (type: DatasetCollectionTypeEnum) => {
|
||||
return [DatasetCollectionTypeEnum.link, DatasetCollectionTypeEnum.apiFile].includes(type);
|
||||
};
|
||||
|
||||
@ -13,38 +13,38 @@ export enum DatasetTypeEnum {
|
||||
export const DatasetTypeMap = {
|
||||
[DatasetTypeEnum.folder]: {
|
||||
icon: 'common/folderFill',
|
||||
label: 'folder_dataset',
|
||||
collectionLabel: 'common.Folder'
|
||||
label: i18nT('dataset:folder_dataset'),
|
||||
collectionLabel: i18nT('common:Folder')
|
||||
},
|
||||
[DatasetTypeEnum.dataset]: {
|
||||
icon: 'core/dataset/commonDatasetOutline',
|
||||
label: 'common_dataset',
|
||||
collectionLabel: 'common.File'
|
||||
label: i18nT('dataset:common_dataset'),
|
||||
collectionLabel: i18nT('common:common.File')
|
||||
},
|
||||
[DatasetTypeEnum.websiteDataset]: {
|
||||
icon: 'core/dataset/websiteDatasetOutline',
|
||||
label: 'website_dataset',
|
||||
collectionLabel: 'common.Website'
|
||||
label: i18nT('dataset:website_dataset'),
|
||||
collectionLabel: i18nT('common:common.Website')
|
||||
},
|
||||
[DatasetTypeEnum.externalFile]: {
|
||||
icon: 'core/dataset/externalDatasetOutline',
|
||||
label: 'external_file',
|
||||
collectionLabel: 'common.File'
|
||||
label: i18nT('dataset:external_file'),
|
||||
collectionLabel: i18nT('common:common.File')
|
||||
},
|
||||
[DatasetTypeEnum.apiDataset]: {
|
||||
icon: 'core/dataset/externalDatasetOutline',
|
||||
label: 'api_file',
|
||||
collectionLabel: 'common.File'
|
||||
label: i18nT('dataset:api_file'),
|
||||
collectionLabel: i18nT('common:common.File')
|
||||
},
|
||||
[DatasetTypeEnum.feishu]: {
|
||||
icon: 'core/dataset/feishuDatasetOutline',
|
||||
label: 'feishu_dataset',
|
||||
collectionLabel: 'common.File'
|
||||
label: i18nT('dataset:feishu_dataset'),
|
||||
collectionLabel: i18nT('common:common.File')
|
||||
},
|
||||
[DatasetTypeEnum.yuque]: {
|
||||
icon: 'core/dataset/yuqueDatasetOutline',
|
||||
label: 'yuque_dataset',
|
||||
collectionLabel: 'common.File'
|
||||
label: i18nT('dataset:yuque_dataset'),
|
||||
collectionLabel: i18nT('common:common.File')
|
||||
}
|
||||
};
|
||||
|
||||
@ -129,6 +129,16 @@ export const DatasetCollectionDataProcessModeMap = {
|
||||
}
|
||||
};
|
||||
|
||||
export enum ChunkSettingModeEnum {
|
||||
auto = 'auto',
|
||||
custom = 'custom'
|
||||
}
|
||||
|
||||
export enum DataChunkSplitModeEnum {
|
||||
size = 'size',
|
||||
char = 'char'
|
||||
}
|
||||
|
||||
/* ------------ data -------------- */
|
||||
|
||||
/* ------------ training -------------- */
|
||||
|
||||
1
packages/global/core/dataset/controller.d.ts
vendored
@ -13,6 +13,7 @@ export type CreateDatasetDataProps = {
|
||||
|
||||
export type UpdateDatasetDataProps = {
|
||||
dataId: string;
|
||||
|
||||
q?: string;
|
||||
a?: string;
|
||||
indexes?: (Omit<DatasetDataIndexItemType, 'dataId'> & {
|
||||
|
||||
@ -15,6 +15,8 @@ export type PushDataToTrainingQueueProps = {
|
||||
vectorModel: string;
|
||||
vlmModel?: string;
|
||||
|
||||
indexSize?: number;
|
||||
|
||||
billId?: string;
|
||||
session?: ClientSession;
|
||||
};
|
||||
|
||||
136
packages/global/core/dataset/training/utils.ts
Normal file
@ -0,0 +1,136 @@
|
||||
import { EmbeddingModelItemType, LLMModelItemType } from '../../../core/ai/model.d';
|
||||
import {
|
||||
ChunkSettingModeEnum,
|
||||
DataChunkSplitModeEnum,
|
||||
DatasetCollectionDataProcessModeEnum
|
||||
} from '../constants';
|
||||
|
||||
export const minChunkSize = 64; // min index and chunk size
|
||||
|
||||
// Chunk size
|
||||
export const chunkAutoChunkSize = 1500;
|
||||
export const getMaxChunkSize = (model: LLMModelItemType) => {
|
||||
return Math.max(model.maxContext - model.maxResponse, 2000);
|
||||
};
|
||||
|
||||
// QA
|
||||
export const defaultMaxChunkSize = 8000;
|
||||
export const getLLMDefaultChunkSize = (model?: LLMModelItemType) => {
|
||||
if (!model) return defaultMaxChunkSize;
|
||||
return Math.max(Math.min(model.maxContext - model.maxResponse, defaultMaxChunkSize), 2000);
|
||||
};
|
||||
|
||||
export const getLLMMaxChunkSize = (model?: LLMModelItemType) => {
|
||||
if (!model) return 8000;
|
||||
return Math.max(model.maxContext - model.maxResponse, 2000);
|
||||
};
|
||||
|
||||
// Index size
|
||||
export const getMaxIndexSize = (model?: EmbeddingModelItemType) => {
|
||||
return model?.maxToken || 512;
|
||||
};
|
||||
export const getAutoIndexSize = (model?: EmbeddingModelItemType) => {
|
||||
return model?.defaultToken || 512;
|
||||
};
|
||||
|
||||
const indexSizeSelectList = [
|
||||
{
|
||||
label: '64',
|
||||
value: 64
|
||||
},
|
||||
{
|
||||
label: '128',
|
||||
value: 128
|
||||
},
|
||||
{
|
||||
label: '256',
|
||||
value: 256
|
||||
},
|
||||
{
|
||||
label: '512',
|
||||
value: 512
|
||||
},
|
||||
{
|
||||
label: '768',
|
||||
value: 768
|
||||
},
|
||||
{
|
||||
label: '1024',
|
||||
value: 1024
|
||||
},
|
||||
{
|
||||
label: '1536',
|
||||
value: 1536
|
||||
},
|
||||
{
|
||||
label: '2048',
|
||||
value: 2048
|
||||
},
|
||||
{
|
||||
label: '3072',
|
||||
value: 3072
|
||||
},
|
||||
{
|
||||
label: '4096',
|
||||
value: 4096
|
||||
},
|
||||
{
|
||||
label: '5120',
|
||||
value: 5120
|
||||
},
|
||||
{
|
||||
label: '6144',
|
||||
value: 6144
|
||||
},
|
||||
{
|
||||
label: '7168',
|
||||
value: 7168
|
||||
},
|
||||
{
|
||||
label: '8192',
|
||||
value: 8192
|
||||
}
|
||||
];
|
||||
export const getIndexSizeSelectList = (max = 512) => {
|
||||
return indexSizeSelectList.filter((item) => item.value <= max);
|
||||
};
|
||||
|
||||
// Compute
|
||||
export const computeChunkSize = (params: {
|
||||
trainingType: DatasetCollectionDataProcessModeEnum;
|
||||
chunkSettingMode?: ChunkSettingModeEnum;
|
||||
chunkSplitMode?: DataChunkSplitModeEnum;
|
||||
llmModel?: LLMModelItemType;
|
||||
chunkSize?: number;
|
||||
}) => {
|
||||
if (params.trainingType === DatasetCollectionDataProcessModeEnum.qa) {
|
||||
if (params.chunkSettingMode === ChunkSettingModeEnum.auto) {
|
||||
return getLLMDefaultChunkSize(params.llmModel);
|
||||
}
|
||||
} else {
|
||||
// chunk
|
||||
if (params.chunkSettingMode === ChunkSettingModeEnum.auto) {
|
||||
return chunkAutoChunkSize;
|
||||
}
|
||||
}
|
||||
|
||||
if (params.chunkSplitMode === DataChunkSplitModeEnum.char) {
|
||||
return getLLMMaxChunkSize(params.llmModel);
|
||||
}
|
||||
|
||||
return Math.min(params.chunkSize || chunkAutoChunkSize, getLLMMaxChunkSize(params.llmModel));
|
||||
};
|
||||
|
||||
export const computeChunkSplitter = (params: {
|
||||
chunkSettingMode?: ChunkSettingModeEnum;
|
||||
chunkSplitMode?: DataChunkSplitModeEnum;
|
||||
chunkSplitter?: string;
|
||||
}) => {
|
||||
if (params.chunkSettingMode === ChunkSettingModeEnum.auto) {
|
||||
return undefined;
|
||||
}
|
||||
if (params.chunkSplitMode === DataChunkSplitModeEnum.size) {
|
||||
return undefined;
|
||||
}
|
||||
return params.chunkSplitter;
|
||||
};
|
||||
9
packages/global/core/dataset/type.d.ts
vendored
@ -2,6 +2,7 @@ import type { LLMModelItemType, EmbeddingModelItemType } from '../../core/ai/mod
|
||||
import { PermissionTypeEnum } from '../../support/permission/constant';
|
||||
import { PushDatasetDataChunkProps } from './api';
|
||||
import {
|
||||
DataChunkSplitModeEnum,
|
||||
DatasetCollectionDataProcessModeEnum,
|
||||
DatasetCollectionTypeEnum,
|
||||
DatasetStatusEnum,
|
||||
@ -14,6 +15,7 @@ import { Permission } from '../../support/permission/controller';
|
||||
import { APIFileServer, FeishuServer, YuqueServer } from './apiDataset';
|
||||
import { SourceMemberType } from 'support/user/type';
|
||||
import { DatasetDataIndexTypeEnum } from './data/constants';
|
||||
import { ChunkSettingModeEnum } from './constants';
|
||||
|
||||
export type DatasetSchemaType = {
|
||||
_id: string;
|
||||
@ -88,7 +90,12 @@ export type DatasetCollectionSchemaType = {
|
||||
autoIndexes?: boolean;
|
||||
imageIndex?: boolean;
|
||||
trainingType: DatasetCollectionDataProcessModeEnum;
|
||||
chunkSize: number;
|
||||
|
||||
chunkSettingMode?: ChunkSettingModeEnum;
|
||||
chunkSplitMode?: DataChunkSplitModeEnum;
|
||||
|
||||
chunkSize?: number;
|
||||
indexSize?: number;
|
||||
chunkSplitter?: string;
|
||||
qaPrompt?: string;
|
||||
};
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
import { TrainingModeEnum, DatasetCollectionTypeEnum } from './constants';
|
||||
import { getFileIcon } from '../../common/file/icon';
|
||||
import { strIsLink } from '../../common/string/tools';
|
||||
import { DatasetDataIndexTypeEnum } from './data/constants';
|
||||
|
||||
export function getCollectionIcon(
|
||||
type: DatasetCollectionTypeEnum = DatasetCollectionTypeEnum.file,
|
||||
@ -38,26 +37,6 @@ export function getSourceNameIcon({
|
||||
return 'file/fill/file';
|
||||
}
|
||||
|
||||
/* get dataset data default index */
|
||||
export function getDefaultIndex(props?: { q?: string; a?: string }) {
|
||||
const { q = '', a } = props || {};
|
||||
|
||||
return [
|
||||
{
|
||||
text: q,
|
||||
type: DatasetDataIndexTypeEnum.default
|
||||
},
|
||||
...(a
|
||||
? [
|
||||
{
|
||||
text: a,
|
||||
type: DatasetDataIndexTypeEnum.default
|
||||
}
|
||||
]
|
||||
: [])
|
||||
];
|
||||
}
|
||||
|
||||
export const predictDataLimitLength = (mode: TrainingModeEnum, data: any[]) => {
|
||||
if (mode === TrainingModeEnum.qa) return data.length * 20;
|
||||
if (mode === TrainingModeEnum.auto) return data.length * 5;
|
||||
|
||||
2
packages/global/core/plugin/type.d.ts
vendored
@ -41,6 +41,8 @@ export type PluginTemplateType = PluginRuntimeType & {
|
||||
export type PluginRuntimeType = {
|
||||
id: string;
|
||||
teamId?: string;
|
||||
tmbId?: string;
|
||||
|
||||
name: string;
|
||||
avatar: string;
|
||||
showStatus?: boolean;
|
||||
|
||||
@ -20,6 +20,7 @@ export enum WorkflowIOValueTypeEnum {
|
||||
number = 'number',
|
||||
boolean = 'boolean',
|
||||
object = 'object',
|
||||
|
||||
arrayString = 'arrayString',
|
||||
arrayNumber = 'arrayNumber',
|
||||
arrayBoolean = 'arrayBoolean',
|
||||
|
||||
@ -76,16 +76,9 @@ export const Input_Template_Text_Quote: FlowNodeInputItemType = {
|
||||
valueType: WorkflowIOValueTypeEnum.string
|
||||
};
|
||||
|
||||
export const Input_Template_File_Link_Prompt: FlowNodeInputItemType = {
|
||||
key: NodeInputKeyEnum.fileUrlList,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.reference, FlowNodeInputTypeEnum.input],
|
||||
label: i18nT('app:file_quote_link'),
|
||||
debugLabel: i18nT('app:file_quote_link'),
|
||||
valueType: WorkflowIOValueTypeEnum.arrayString
|
||||
};
|
||||
export const Input_Template_File_Link: FlowNodeInputItemType = {
|
||||
key: NodeInputKeyEnum.fileUrlList,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.reference],
|
||||
renderTypeList: [FlowNodeInputTypeEnum.reference, FlowNodeInputTypeEnum.input],
|
||||
label: i18nT('app:workflow.user_file_input'),
|
||||
debugLabel: i18nT('app:workflow.user_file_input'),
|
||||
description: i18nT('app:workflow.user_file_input_desc'),
|
||||
|
||||
@ -17,7 +17,7 @@ import {
|
||||
Input_Template_History,
|
||||
Input_Template_System_Prompt,
|
||||
Input_Template_UserChatInput,
|
||||
Input_Template_File_Link_Prompt
|
||||
Input_Template_File_Link
|
||||
} from '../../input';
|
||||
import { chatNodeSystemPromptTip, systemPromptTip } from '../../tip';
|
||||
import { getHandleConfig } from '../../utils';
|
||||
@ -55,7 +55,7 @@ export const AiChatModule: FlowNodeTemplateType = {
|
||||
showStatus: true,
|
||||
isTool: true,
|
||||
courseUrl: '/docs/guide/workbench/workflow/ai_chat/',
|
||||
version: '490',
|
||||
version: '4.9.0',
|
||||
inputs: [
|
||||
Input_Template_SettingAiModel,
|
||||
// --- settings modal
|
||||
@ -129,7 +129,7 @@ export const AiChatModule: FlowNodeTemplateType = {
|
||||
},
|
||||
Input_Template_History,
|
||||
Input_Template_Dataset_Quote,
|
||||
Input_Template_File_Link_Prompt,
|
||||
Input_Template_File_Link,
|
||||
{ ...Input_Template_UserChatInput, toolDescription: i18nT('workflow:user_question') }
|
||||
],
|
||||
outputs: [
|
||||
|
||||
@ -30,7 +30,7 @@ export const ClassifyQuestionModule: FlowNodeTemplateType = {
|
||||
name: i18nT('workflow:question_classification'),
|
||||
intro: i18nT('workflow:intro_question_classification'),
|
||||
showStatus: true,
|
||||
version: '481',
|
||||
version: '4.9.2',
|
||||
courseUrl: '/docs/guide/workbench/workflow/question_classify/',
|
||||
inputs: [
|
||||
{
|
||||
|
||||
@ -27,7 +27,7 @@ export const ContextExtractModule: FlowNodeTemplateType = {
|
||||
showStatus: true,
|
||||
isTool: true,
|
||||
courseUrl: '/docs/guide/workbench/workflow/content_extract/',
|
||||
version: '481',
|
||||
version: '4.9.2',
|
||||
inputs: [
|
||||
{
|
||||
...Input_Template_SelectAIModel,
|
||||
|
||||
@ -31,7 +31,7 @@ export const DatasetSearchModule: FlowNodeTemplateType = {
|
||||
showStatus: true,
|
||||
isTool: true,
|
||||
courseUrl: '/docs/guide/workbench/workflow/dataset_search/',
|
||||
version: '481',
|
||||
version: '4.9.2',
|
||||
inputs: [
|
||||
{
|
||||
key: NodeInputKeyEnum.datasetSelectList,
|
||||
|
||||
@ -23,7 +23,7 @@ export const ReadFilesNode: FlowNodeTemplateType = {
|
||||
name: i18nT('app:workflow.read_files'),
|
||||
intro: i18nT('app:workflow.read_files_tip'),
|
||||
showStatus: true,
|
||||
version: '4812',
|
||||
version: '4.9.2',
|
||||
isTool: false,
|
||||
courseUrl: '/docs/guide/course/fileinput/',
|
||||
inputs: [
|
||||
|
||||
@ -20,7 +20,7 @@ import { chatNodeSystemPromptTip, systemPromptTip } from '../tip';
|
||||
import { LLMModelTypeEnum } from '../../../ai/constants';
|
||||
import { getHandleConfig } from '../utils';
|
||||
import { i18nT } from '../../../../../web/i18n/utils';
|
||||
import { Input_Template_File_Link_Prompt } from '../input';
|
||||
import { Input_Template_File_Link } from '../input';
|
||||
|
||||
export const ToolModule: FlowNodeTemplateType = {
|
||||
id: FlowNodeTypeEnum.tools,
|
||||
@ -33,7 +33,7 @@ export const ToolModule: FlowNodeTemplateType = {
|
||||
intro: i18nT('workflow:template.tool_call_intro'),
|
||||
showStatus: true,
|
||||
courseUrl: '/docs/guide/workbench/workflow/tool/',
|
||||
version: '4813',
|
||||
version: '4.9.2',
|
||||
inputs: [
|
||||
{
|
||||
...Input_Template_SettingAiModel,
|
||||
@ -97,7 +97,7 @@ export const ToolModule: FlowNodeTemplateType = {
|
||||
placeholder: chatNodeSystemPromptTip
|
||||
},
|
||||
Input_Template_History,
|
||||
Input_Template_File_Link_Prompt,
|
||||
Input_Template_File_Link,
|
||||
Input_Template_UserChatInput
|
||||
],
|
||||
outputs: [
|
||||
|
||||
@ -10,7 +10,7 @@
|
||||
"js-yaml": "^4.1.0",
|
||||
"jschardet": "3.1.1",
|
||||
"nanoid": "^5.1.3",
|
||||
"next": "14.2.24",
|
||||
"next": "14.2.25",
|
||||
"openai": "4.61.0",
|
||||
"openapi-types": "^12.1.3",
|
||||
"json5": "^2.2.3",
|
||||
|
||||
4
packages/global/support/permission/memberGroup/api.d.ts
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
export type GetGroupListBody = {
|
||||
searchKey?: string;
|
||||
withMembers?: boolean;
|
||||
};
|
||||
@ -1,6 +1,7 @@
|
||||
import { TeamMemberItemType } from 'support/user/team/type';
|
||||
import { TeamPermission } from '../user/controller';
|
||||
import { GroupMemberRole } from './constant';
|
||||
import { Permission } from '../controller';
|
||||
|
||||
type MemberGroupSchemaType = {
|
||||
_id: string;
|
||||
@ -16,12 +17,28 @@ type GroupMemberSchemaType = {
|
||||
role: `${GroupMemberRole}`;
|
||||
};
|
||||
|
||||
type MemberGroupType = MemberGroupSchemaType & {
|
||||
members: {
|
||||
tmbId: string;
|
||||
role: `${GroupMemberRole}`;
|
||||
}[]; // we can get tmb's info from other api. there is no need but only need to get tmb's id
|
||||
permission: TeamPermission;
|
||||
type MemberGroupListItemType<T extends boolean | undefined> = MemberGroupSchemaType & {
|
||||
members: T extends true
|
||||
? {
|
||||
tmbId: string;
|
||||
name: string;
|
||||
avatar: string;
|
||||
}[]
|
||||
: undefined;
|
||||
count: T extends true ? number : undefined;
|
||||
owner?: T extends true
|
||||
? {
|
||||
tmbId: string;
|
||||
name: string;
|
||||
avatar: string;
|
||||
}
|
||||
: undefined;
|
||||
permission: T extends true ? Permission : undefined;
|
||||
};
|
||||
|
||||
type MemberGroupListType = MemberGroupType[];
|
||||
type GroupMemberItemType = {
|
||||
tmbId: string;
|
||||
name: string;
|
||||
avatar: string;
|
||||
role: `${GroupMemberRole}`;
|
||||
};
|
||||
|
||||
7
packages/global/support/user/api.d.ts
vendored
@ -1,4 +1,7 @@
|
||||
import { MemberGroupSchemaType, MemberGroupType } from 'support/permission/memberGroup/type';
|
||||
import {
|
||||
MemberGroupSchemaType,
|
||||
MemberGroupListItemType
|
||||
} from 'support/permission/memberGroup/type';
|
||||
import { OAuthEnum } from './constant';
|
||||
import { TrackRegisterParams } from './login/api';
|
||||
import { TeamMemberStatusEnum } from './team/constant';
|
||||
@ -12,8 +15,8 @@ export type PostLoginProps = {
|
||||
|
||||
export type OauthLoginProps = {
|
||||
type: `${OAuthEnum}`;
|
||||
code: string;
|
||||
callbackUrl: string;
|
||||
props: Record<string, string>;
|
||||
} & TrackRegisterParams;
|
||||
|
||||
export type WxLoginProps = {
|
||||
|
||||
@ -16,7 +16,5 @@ export enum OAuthEnum {
|
||||
google = 'google',
|
||||
wechat = 'wechat',
|
||||
microsoft = 'microsoft',
|
||||
dingtalk = 'dingtalk',
|
||||
wecom = 'wecom',
|
||||
sso = 'sso'
|
||||
}
|
||||
|
||||
@ -1,12 +1,12 @@
|
||||
export type postCreateOrgData = {
|
||||
name: string;
|
||||
parentId: string;
|
||||
description?: string;
|
||||
avatar?: string;
|
||||
orgId?: string;
|
||||
};
|
||||
|
||||
export type putUpdateOrgMembersData = {
|
||||
orgId: string;
|
||||
orgId?: string;
|
||||
members: {
|
||||
tmbId: string;
|
||||
// role: `${OrgMemberRole}`;
|
||||
@ -14,7 +14,7 @@ export type putUpdateOrgMembersData = {
|
||||
};
|
||||
|
||||
export type putUpdateOrgData = {
|
||||
orgId: string;
|
||||
orgId: string; // can not be undefined because can not uppdate root org
|
||||
name?: string;
|
||||
avatar?: string;
|
||||
description?: string;
|
||||
@ -22,7 +22,7 @@ export type putUpdateOrgData = {
|
||||
|
||||
export type putMoveOrgType = {
|
||||
orgId: string;
|
||||
targetOrgId: string;
|
||||
targetOrgId?: string; // '' ===> move to root org
|
||||
};
|
||||
|
||||
// type putChnageOrgOwnerData = {
|
||||
|
||||
@ -3,7 +3,10 @@ import { OrgSchemaType } from './type';
|
||||
export const OrgCollectionName = 'team_orgs';
|
||||
export const OrgMemberCollectionName = 'team_org_members';
|
||||
|
||||
export const getOrgChildrenPath = (org: OrgSchemaType) => `${org.path}/${org.pathId}`;
|
||||
export const getOrgChildrenPath = (org: OrgSchemaType) => {
|
||||
if (org.path === '' && org.pathId === '') return '';
|
||||
return `${org.path ?? ''}/${org.pathId}`;
|
||||
};
|
||||
|
||||
export enum SyncOrgSourceEnum {
|
||||
wecom = 'wecom'
|
||||
|
||||
13
packages/global/support/user/team/org/type.d.ts
vendored
@ -1,5 +1,6 @@
|
||||
import type { TeamPermission } from 'support/permission/user/controller';
|
||||
import type { TeamPermission } from '../../../permission/user/controller';
|
||||
import { ResourcePermissionType } from '../type';
|
||||
import { SourceMemberType } from '../../type';
|
||||
|
||||
type OrgSchemaType = {
|
||||
_id: string;
|
||||
@ -7,7 +8,7 @@ type OrgSchemaType = {
|
||||
pathId: string;
|
||||
path: string;
|
||||
name: string;
|
||||
avatar?: string;
|
||||
avatar: string;
|
||||
description?: string;
|
||||
updateTime: Date;
|
||||
};
|
||||
@ -19,8 +20,14 @@ type OrgMemberSchemaType = {
|
||||
tmbId: string;
|
||||
};
|
||||
|
||||
type OrgType = Omit<OrgSchemaType, 'avatar'> & {
|
||||
export type OrgListItemType = OrgSchemaType & {
|
||||
permission?: TeamPermission;
|
||||
total: number; // members + children orgs
|
||||
};
|
||||
|
||||
export type OrgType = Omit<OrgSchemaType, 'avatar'> & {
|
||||
avatar: string;
|
||||
permission: TeamPermission;
|
||||
members: OrgMemberSchemaType[];
|
||||
total: number; // members + children orgs
|
||||
};
|
||||
|
||||
25
packages/global/support/user/team/type.d.ts
vendored
@ -70,7 +70,13 @@ export type TeamTmbItemType = {
|
||||
permission: TeamPermission;
|
||||
} & ThirdPartyAccountType;
|
||||
|
||||
export type TeamMemberItemType = {
|
||||
export type TeamMemberItemType<
|
||||
Options extends {
|
||||
withPermission?: boolean;
|
||||
withOrgs?: boolean;
|
||||
withGroupRole?: boolean;
|
||||
} = { withPermission: true; withOrgs: true; withGroupRole: false }
|
||||
> = {
|
||||
userId: string;
|
||||
tmbId: string;
|
||||
teamId: string;
|
||||
@ -78,11 +84,24 @@ export type TeamMemberItemType = {
|
||||
avatar: string;
|
||||
role: `${TeamMemberRoleEnum}`;
|
||||
status: `${TeamMemberStatusEnum}`;
|
||||
permission: TeamPermission;
|
||||
contact?: string;
|
||||
createTime: Date;
|
||||
updateTime?: Date;
|
||||
};
|
||||
} & (Options extends { withPermission: true }
|
||||
? {
|
||||
permission: TeamPermission;
|
||||
}
|
||||
: {}) &
|
||||
(Options extends { withOrgs: true }
|
||||
? {
|
||||
orgs?: string[]; // full path name, pattern: /teamName/orgname1/orgname2
|
||||
}
|
||||
: {}) &
|
||||
(Options extends { withGroupRole: true }
|
||||
? {
|
||||
groupRole?: `${GroupMemberRole}`;
|
||||
}
|
||||
: {});
|
||||
|
||||
export type TeamTagItemType = {
|
||||
label: string;
|
||||
|
||||
@ -30,11 +30,11 @@ export async function text2Speech({
|
||||
response_format: 'mp3',
|
||||
speed
|
||||
},
|
||||
modelData.requestUrl && modelData.requestAuth
|
||||
modelData.requestUrl
|
||||
? {
|
||||
path: modelData.requestUrl,
|
||||
headers: {
|
||||
Authorization: `Bearer ${modelData.requestAuth}`
|
||||
...(modelData.requestAuth ? { Authorization: `Bearer ${modelData.requestAuth}` } : {})
|
||||
}
|
||||
}
|
||||
: {}
|
||||
|
||||
@ -3,21 +3,25 @@ import { getAxiosConfig } from '../config';
|
||||
import axios from 'axios';
|
||||
import FormData from 'form-data';
|
||||
import { getSTTModel } from '../model';
|
||||
import { STTModelType } from '@fastgpt/global/core/ai/model.d';
|
||||
|
||||
export const aiTranscriptions = async ({
|
||||
model,
|
||||
model: modelData,
|
||||
fileStream,
|
||||
headers
|
||||
}: {
|
||||
model: string;
|
||||
model: STTModelType;
|
||||
fileStream: fs.ReadStream;
|
||||
headers?: Record<string, string>;
|
||||
}) => {
|
||||
if (!modelData) {
|
||||
return Promise.reject('no model');
|
||||
}
|
||||
|
||||
const data = new FormData();
|
||||
data.append('model', model);
|
||||
data.append('model', modelData.model);
|
||||
data.append('file', fileStream);
|
||||
|
||||
const modelData = getSTTModel(model);
|
||||
const aiAxiosConfig = getAxiosConfig();
|
||||
|
||||
const { data: result } = await axios<{ text: string }>({
|
||||
|
||||
@ -296,6 +296,30 @@
|
||||
"showStopSign": true,
|
||||
"responseFormatList": ["text", "json_object"]
|
||||
},
|
||||
{
|
||||
"model": "qwen-long",
|
||||
"name": "qwen-long",
|
||||
"maxContext": 100000,
|
||||
"maxResponse": 6000,
|
||||
"quoteMaxToken": 10000,
|
||||
"maxTemperature": 1,
|
||||
"vision": false,
|
||||
"toolChoice": false,
|
||||
"functionCall": false,
|
||||
"defaultSystemChatPrompt": "",
|
||||
"datasetProcess": false,
|
||||
"usedInClassify": false,
|
||||
"customCQPrompt": "",
|
||||
"usedInExtractFields": false,
|
||||
"usedInQueryExtension": false,
|
||||
"customExtractPrompt": "",
|
||||
"usedInToolCall": false,
|
||||
"defaultConfig": {},
|
||||
"fieldMap": {},
|
||||
"type": "llm",
|
||||
"showTopP": false,
|
||||
"showStopSign": false
|
||||
},
|
||||
{
|
||||
"model": "text-embedding-v3",
|
||||
"name": "text-embedding-v3",
|
||||
|
||||
@ -4,8 +4,8 @@ import { countGptMessagesTokens, countPromptTokens } from '../../../common/strin
|
||||
import { loadRequestMessages } from '../../chat/utils';
|
||||
import { llmCompletionsBodyFormat } from '../utils';
|
||||
import {
|
||||
PROMPT_QUESTION_GUIDE,
|
||||
PROMPT_QUESTION_GUIDE_FOOTER
|
||||
QuestionGuidePrompt,
|
||||
QuestionGuideFooterPrompt
|
||||
} from '@fastgpt/global/core/ai/prompt/agent';
|
||||
import { addLog } from '../../../common/system/log';
|
||||
import json5 from 'json5';
|
||||
@ -27,7 +27,7 @@ export async function createQuestionGuide({
|
||||
...messages,
|
||||
{
|
||||
role: 'user',
|
||||
content: `${customPrompt || PROMPT_QUESTION_GUIDE}\n${PROMPT_QUESTION_GUIDE_FOOTER}`
|
||||
content: `${customPrompt || QuestionGuidePrompt}\n${QuestionGuideFooterPrompt}`
|
||||
}
|
||||
];
|
||||
const requestMessages = await loadRequestMessages({
|
||||
|
||||
@ -65,6 +65,7 @@ export const llmCompletionsBodyFormat = <T extends CompletionsBodyType>(
|
||||
|
||||
const requestBody: T = {
|
||||
...body,
|
||||
model: modelData.model,
|
||||
temperature:
|
||||
typeof body.temperature === 'number'
|
||||
? computedTemperature({
|
||||
|
||||
@ -37,11 +37,12 @@ export async function splitCombinePluginId(id: string) {
|
||||
return { source, pluginId: id };
|
||||
}
|
||||
|
||||
type ChildAppType = SystemPluginTemplateItemType & { teamId?: string };
|
||||
type ChildAppType = SystemPluginTemplateItemType & { teamId?: string; tmbId?: string };
|
||||
|
||||
const getSystemPluginTemplateById = async (
|
||||
pluginId: string,
|
||||
versionId?: string
|
||||
): Promise<SystemPluginTemplateItemType> => {
|
||||
): Promise<ChildAppType> => {
|
||||
const item = getSystemPluginTemplates().find((plugin) => plugin.id === pluginId);
|
||||
if (!item) return Promise.reject(PluginErrEnum.unAuth);
|
||||
|
||||
@ -67,12 +68,17 @@ const getSystemPluginTemplateById = async (
|
||||
: await getAppLatestVersion(plugin.associatedPluginId, app);
|
||||
if (!version.versionId) return Promise.reject('App version not found');
|
||||
|
||||
plugin.workflow = {
|
||||
nodes: version.nodes,
|
||||
edges: version.edges,
|
||||
chatConfig: version.chatConfig
|
||||
return {
|
||||
...plugin,
|
||||
workflow: {
|
||||
nodes: version.nodes,
|
||||
edges: version.edges,
|
||||
chatConfig: version.chatConfig
|
||||
},
|
||||
version: versionId || String(version.versionId),
|
||||
teamId: String(app.teamId),
|
||||
tmbId: String(app.tmbId)
|
||||
};
|
||||
plugin.version = versionId || String(version.versionId);
|
||||
}
|
||||
return plugin;
|
||||
};
|
||||
@ -168,6 +174,7 @@ export async function getChildAppRuntimeById(
|
||||
return {
|
||||
id: String(item._id),
|
||||
teamId: String(item.teamId),
|
||||
tmbId: String(item.tmbId),
|
||||
name: item.name,
|
||||
avatar: item.avatar,
|
||||
intro: item.intro,
|
||||
@ -187,6 +194,7 @@ export async function getChildAppRuntimeById(
|
||||
pluginOrder: 0
|
||||
};
|
||||
} else {
|
||||
// System
|
||||
return getSystemPluginTemplateById(pluginId, versionId);
|
||||
}
|
||||
})();
|
||||
@ -194,6 +202,7 @@ export async function getChildAppRuntimeById(
|
||||
return {
|
||||
id: app.id,
|
||||
teamId: app.teamId,
|
||||
tmbId: app.tmbId,
|
||||
name: app.name,
|
||||
avatar: app.avatar,
|
||||
showStatus: app.showStatus,
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
import type {
|
||||
APIFileContentResponse,
|
||||
APIFileListResponse,
|
||||
ApiFileReadContentResponse,
|
||||
APIFileReadResponse,
|
||||
APIFileServer
|
||||
} from '@fastgpt/global/core/dataset/apiDataset';
|
||||
@ -8,6 +8,7 @@ import axios, { Method } from 'axios';
|
||||
import { addLog } from '../../../common/system/log';
|
||||
import { readFileRawTextByUrl } from '../read';
|
||||
import { ParentIdType } from '@fastgpt/global/common/parentFolder/type';
|
||||
import { RequireOnlyOne } from '@fastgpt/global/common/type/utils';
|
||||
|
||||
type ResponseDataType = {
|
||||
success: boolean;
|
||||
@ -118,17 +119,24 @@ export const useApiDatasetRequest = ({ apiServer }: { apiServer: APIFileServer }
|
||||
tmbId: string;
|
||||
apiFileId: string;
|
||||
customPdfParse?: boolean;
|
||||
}) => {
|
||||
const data = await request<APIFileContentResponse>(
|
||||
`/v1/file/content`,
|
||||
{ id: apiFileId },
|
||||
'GET'
|
||||
);
|
||||
}): Promise<ApiFileReadContentResponse> => {
|
||||
const data = await request<
|
||||
{
|
||||
title?: string;
|
||||
} & RequireOnlyOne<{
|
||||
content: string;
|
||||
previewUrl: string;
|
||||
}>
|
||||
>(`/v1/file/content`, { id: apiFileId }, 'GET');
|
||||
const title = data.title;
|
||||
const content = data.content;
|
||||
const previewUrl = data.previewUrl;
|
||||
|
||||
if (content) {
|
||||
return content;
|
||||
return {
|
||||
title,
|
||||
rawText: content
|
||||
};
|
||||
}
|
||||
if (previewUrl) {
|
||||
const rawText = await readFileRawTextByUrl({
|
||||
@ -138,7 +146,10 @@ export const useApiDatasetRequest = ({ apiServer }: { apiServer: APIFileServer }
|
||||
relatedId: apiFileId,
|
||||
customPdfParse
|
||||
});
|
||||
return rawText;
|
||||
return {
|
||||
title,
|
||||
rawText
|
||||
};
|
||||
}
|
||||
return Promise.reject('Invalid content type: content or previewUrl is required');
|
||||
};
|
||||
|
||||
@ -27,6 +27,11 @@ import { addDays } from 'date-fns';
|
||||
import { MongoDatasetDataText } from '../data/dataTextSchema';
|
||||
import { retryFn } from '@fastgpt/global/common/system/utils';
|
||||
import { getTrainingModeByCollection } from './utils';
|
||||
import {
|
||||
computeChunkSize,
|
||||
computeChunkSplitter,
|
||||
getLLMMaxChunkSize
|
||||
} from '@fastgpt/global/core/dataset/training/utils';
|
||||
|
||||
export const createCollectionAndInsertData = async ({
|
||||
dataset,
|
||||
@ -54,18 +59,22 @@ export const createCollectionAndInsertData = async ({
|
||||
|
||||
const teamId = createCollectionParams.teamId;
|
||||
const tmbId = createCollectionParams.tmbId;
|
||||
// Chunk split params
|
||||
|
||||
// Set default params
|
||||
const trainingType =
|
||||
createCollectionParams.trainingType || DatasetCollectionDataProcessModeEnum.chunk;
|
||||
const chunkSize = createCollectionParams.chunkSize || 512;
|
||||
const chunkSplitter = createCollectionParams.chunkSplitter;
|
||||
const qaPrompt = createCollectionParams.qaPrompt;
|
||||
const usageName = createCollectionParams.name;
|
||||
const chunkSize = computeChunkSize({
|
||||
...createCollectionParams,
|
||||
trainingType,
|
||||
llmModel: getLLMModel(dataset.agentModel)
|
||||
});
|
||||
const chunkSplitter = computeChunkSplitter(createCollectionParams);
|
||||
|
||||
// 1. split chunks
|
||||
const chunks = rawText2Chunks({
|
||||
rawText,
|
||||
chunkLen: chunkSize,
|
||||
chunkSize,
|
||||
maxSize: getLLMMaxChunkSize(getLLMModel(dataset.agentModel)),
|
||||
overlapRatio: trainingType === DatasetCollectionDataProcessModeEnum.chunk ? 0.2 : 0,
|
||||
customReg: chunkSplitter ? [chunkSplitter] : [],
|
||||
isQAImport
|
||||
@ -76,7 +85,7 @@ export const createCollectionAndInsertData = async ({
|
||||
teamId,
|
||||
insertLen: predictDataLimitLength(
|
||||
getTrainingModeByCollection({
|
||||
trainingType,
|
||||
trainingType: trainingType,
|
||||
autoIndexes: createCollectionParams.autoIndexes,
|
||||
imageIndex: createCollectionParams.imageIndex
|
||||
}),
|
||||
@ -88,6 +97,9 @@ export const createCollectionAndInsertData = async ({
|
||||
// 3. create collection
|
||||
const { _id: collectionId } = await createOneCollection({
|
||||
...createCollectionParams,
|
||||
trainingType,
|
||||
chunkSize,
|
||||
chunkSplitter,
|
||||
|
||||
hashRawText: hashStr(rawText),
|
||||
rawTextLength: rawText.length,
|
||||
@ -111,7 +123,7 @@ export const createCollectionAndInsertData = async ({
|
||||
const { billId: newBillId } = await createTrainingUsage({
|
||||
teamId,
|
||||
tmbId,
|
||||
appName: usageName,
|
||||
appName: createCollectionParams.name,
|
||||
billSource: UsageSourceEnum.training,
|
||||
vectorModel: getEmbeddingModel(dataset.vectorModel)?.name,
|
||||
agentModel: getLLMModel(dataset.agentModel)?.name,
|
||||
@ -130,12 +142,13 @@ export const createCollectionAndInsertData = async ({
|
||||
agentModel: dataset.agentModel,
|
||||
vectorModel: dataset.vectorModel,
|
||||
vlmModel: dataset.vlmModel,
|
||||
indexSize: createCollectionParams.indexSize,
|
||||
mode: getTrainingModeByCollection({
|
||||
trainingType,
|
||||
trainingType: trainingType,
|
||||
autoIndexes: createCollectionParams.autoIndexes,
|
||||
imageIndex: createCollectionParams.imageIndex
|
||||
}),
|
||||
prompt: qaPrompt,
|
||||
prompt: createCollectionParams.qaPrompt,
|
||||
billId: traingBillId,
|
||||
data: chunks.map((item, index) => ({
|
||||
...item,
|
||||
@ -207,11 +220,14 @@ export async function createOneCollection({
|
||||
// Parse settings
|
||||
customPdfParse,
|
||||
imageIndex,
|
||||
autoIndexes,
|
||||
|
||||
// Chunk settings
|
||||
trainingType = DatasetCollectionDataProcessModeEnum.chunk,
|
||||
autoIndexes,
|
||||
chunkSize = 512,
|
||||
trainingType,
|
||||
chunkSettingMode,
|
||||
chunkSplitMode,
|
||||
chunkSize,
|
||||
indexSize,
|
||||
chunkSplitter,
|
||||
qaPrompt,
|
||||
|
||||
@ -249,11 +265,14 @@ export async function createOneCollection({
|
||||
// Parse settings
|
||||
customPdfParse,
|
||||
imageIndex,
|
||||
autoIndexes,
|
||||
|
||||
// Chunk settings
|
||||
trainingType,
|
||||
autoIndexes,
|
||||
chunkSettingMode,
|
||||
chunkSplitMode,
|
||||
chunkSize,
|
||||
indexSize,
|
||||
chunkSplitter,
|
||||
qaPrompt
|
||||
}
|
||||
|
||||
@ -3,7 +3,9 @@ const { Schema, model, models } = connectionMongo;
|
||||
import { DatasetCollectionSchemaType } from '@fastgpt/global/core/dataset/type.d';
|
||||
import {
|
||||
DatasetCollectionTypeMap,
|
||||
DatasetCollectionDataProcessModeEnum
|
||||
DatasetCollectionDataProcessModeEnum,
|
||||
ChunkSettingModeEnum,
|
||||
DataChunkSplitModeEnum
|
||||
} from '@fastgpt/global/core/dataset/constants';
|
||||
import { DatasetCollectionName } from '../schema';
|
||||
import {
|
||||
@ -94,11 +96,18 @@ const DatasetCollectionSchema = new Schema({
|
||||
type: String,
|
||||
enum: Object.values(DatasetCollectionDataProcessModeEnum)
|
||||
},
|
||||
chunkSize: {
|
||||
type: Number,
|
||||
required: true
|
||||
chunkSettingMode: {
|
||||
type: String,
|
||||
enum: Object.values(ChunkSettingModeEnum)
|
||||
},
|
||||
chunkSplitMode: {
|
||||
type: String,
|
||||
enum: Object.values(DataChunkSplitModeEnum)
|
||||
},
|
||||
chunkSize: Number,
|
||||
chunkSplitter: String,
|
||||
|
||||
indexSize: Number,
|
||||
qaPrompt: String
|
||||
});
|
||||
|
||||
|
||||
@ -11,7 +11,6 @@ import {
|
||||
DatasetCollectionSyncResultEnum,
|
||||
DatasetCollectionTypeEnum,
|
||||
DatasetSourceReadTypeEnum,
|
||||
DatasetTypeEnum,
|
||||
TrainingModeEnum
|
||||
} from '@fastgpt/global/core/dataset/constants';
|
||||
import { DatasetErrEnum } from '@fastgpt/global/common/error/code/dataset';
|
||||
@ -19,6 +18,7 @@ import { readDatasetSourceRawText } from '../read';
|
||||
import { hashStr } from '@fastgpt/global/common/string/tools';
|
||||
import { mongoSessionRun } from '../../../common/mongo/sessionRun';
|
||||
import { createCollectionAndInsertData, delCollection } from './controller';
|
||||
import { collectionCanSync } from '@fastgpt/global/core/dataset/collection/utils';
|
||||
|
||||
/**
|
||||
* get all collection by top collectionId
|
||||
@ -137,10 +137,7 @@ export const collectionTagsToTagLabel = async ({
|
||||
export const syncCollection = async (collection: CollectionWithDatasetType) => {
|
||||
const dataset = collection.dataset;
|
||||
|
||||
if (
|
||||
collection.type !== DatasetCollectionTypeEnum.link &&
|
||||
dataset.type !== DatasetTypeEnum.apiDataset
|
||||
) {
|
||||
if (!collectionCanSync(collection.type)) {
|
||||
return Promise.reject(DatasetErrEnum.notSupportSync);
|
||||
}
|
||||
|
||||
@ -155,15 +152,20 @@ export const syncCollection = async (collection: CollectionWithDatasetType) => {
|
||||
};
|
||||
}
|
||||
|
||||
if (!collection.apiFileId) return Promise.reject('apiFileId is missing');
|
||||
if (!dataset.apiServer) return Promise.reject('apiServer not found');
|
||||
const sourceId = collection.apiFileId;
|
||||
|
||||
if (!sourceId) return Promise.reject('apiFileId is missing');
|
||||
|
||||
return {
|
||||
type: DatasetSourceReadTypeEnum.apiFile,
|
||||
sourceId: collection.apiFileId,
|
||||
apiServer: dataset.apiServer
|
||||
sourceId,
|
||||
apiServer: dataset.apiServer,
|
||||
feishuServer: dataset.feishuServer,
|
||||
yuqueServer: dataset.yuqueServer
|
||||
};
|
||||
})();
|
||||
const rawText = await readDatasetSourceRawText({
|
||||
|
||||
const { title, rawText } = await readDatasetSourceRawText({
|
||||
teamId: collection.teamId,
|
||||
tmbId: collection.tmbId,
|
||||
...sourceReadType
|
||||
@ -196,7 +198,7 @@ export const syncCollection = async (collection: CollectionWithDatasetType) => {
|
||||
createCollectionParams: {
|
||||
teamId: collection.teamId,
|
||||
tmbId: collection.tmbId,
|
||||
name: collection.name,
|
||||
name: title || collection.name,
|
||||
datasetId: collection.datasetId,
|
||||
parentId: collection.parentId,
|
||||
type: collection.type,
|
||||
|
||||
@ -79,9 +79,12 @@ export const readDatasetSourceRawText = async ({
|
||||
apiServer?: APIFileServer; // api dataset
|
||||
feishuServer?: FeishuServer; // feishu dataset
|
||||
yuqueServer?: YuqueServer; // yuque dataset
|
||||
}): Promise<string> => {
|
||||
}): Promise<{
|
||||
title?: string;
|
||||
rawText: string;
|
||||
}> => {
|
||||
if (type === DatasetSourceReadTypeEnum.fileLocal) {
|
||||
const { rawText } = await readFileContentFromMongo({
|
||||
const { filename, rawText } = await readFileContentFromMongo({
|
||||
teamId,
|
||||
tmbId,
|
||||
bucketName: BucketNameEnum.dataset,
|
||||
@ -89,14 +92,20 @@ export const readDatasetSourceRawText = async ({
|
||||
isQAImport,
|
||||
customPdfParse
|
||||
});
|
||||
return rawText;
|
||||
return {
|
||||
title: filename,
|
||||
rawText
|
||||
};
|
||||
} else if (type === DatasetSourceReadTypeEnum.link) {
|
||||
const result = await urlsFetch({
|
||||
urlList: [sourceId],
|
||||
selector
|
||||
});
|
||||
|
||||
return result[0]?.content || '';
|
||||
return {
|
||||
title: result[0]?.title,
|
||||
rawText: result[0]?.content || ''
|
||||
};
|
||||
} else if (type === DatasetSourceReadTypeEnum.externalFile) {
|
||||
if (!externalFileId) return Promise.reject('FileId not found');
|
||||
const rawText = await readFileRawTextByUrl({
|
||||
@ -106,9 +115,11 @@ export const readDatasetSourceRawText = async ({
|
||||
relatedId: externalFileId,
|
||||
customPdfParse
|
||||
});
|
||||
return rawText;
|
||||
return {
|
||||
rawText
|
||||
};
|
||||
} else if (type === DatasetSourceReadTypeEnum.apiFile) {
|
||||
const rawText = await readApiServerFileContent({
|
||||
const { title, rawText } = await readApiServerFileContent({
|
||||
apiServer,
|
||||
feishuServer,
|
||||
yuqueServer,
|
||||
@ -116,9 +127,15 @@ export const readDatasetSourceRawText = async ({
|
||||
teamId,
|
||||
tmbId
|
||||
});
|
||||
return rawText;
|
||||
return {
|
||||
title,
|
||||
rawText
|
||||
};
|
||||
}
|
||||
return '';
|
||||
return {
|
||||
title: '',
|
||||
rawText: ''
|
||||
};
|
||||
};
|
||||
|
||||
export const readApiServerFileContent = async ({
|
||||
@ -137,7 +154,10 @@ export const readApiServerFileContent = async ({
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
customPdfParse?: boolean;
|
||||
}) => {
|
||||
}): Promise<{
|
||||
title?: string;
|
||||
rawText: string;
|
||||
}> => {
|
||||
if (apiServer) {
|
||||
return useApiDatasetRequest({ apiServer }).getFileContent({
|
||||
teamId,
|
||||
@ -148,7 +168,10 @@ export const readApiServerFileContent = async ({
|
||||
}
|
||||
|
||||
if (feishuServer || yuqueServer) {
|
||||
return POST<string>(`/core/dataset/systemApiDataset`, {
|
||||
return POST<{
|
||||
title?: string;
|
||||
rawText: string;
|
||||
}>(`/core/dataset/systemApiDataset`, {
|
||||
type: 'content',
|
||||
feishuServer,
|
||||
yuqueServer,
|
||||
@ -162,7 +185,7 @@ export const readApiServerFileContent = async ({
|
||||
export const rawText2Chunks = ({
|
||||
rawText,
|
||||
isQAImport,
|
||||
chunkLen = 512,
|
||||
chunkSize = 512,
|
||||
...splitProps
|
||||
}: {
|
||||
rawText: string;
|
||||
@ -175,7 +198,7 @@ export const rawText2Chunks = ({
|
||||
|
||||
const { chunks } = splitText2Chunks({
|
||||
text: rawText,
|
||||
chunkLen,
|
||||
chunkSize,
|
||||
...splitProps
|
||||
});
|
||||
|
||||
|
||||
@ -134,12 +134,10 @@ export const filterDatasetDataByMaxTokens = async (
|
||||
let totalTokens = 0;
|
||||
|
||||
for await (const item of tokensScoreFilter) {
|
||||
results.push(item);
|
||||
|
||||
totalTokens += item.tokens;
|
||||
|
||||
if (totalTokens > maxTokens + 500) {
|
||||
break;
|
||||
}
|
||||
results.push(item);
|
||||
if (totalTokens > maxTokens) {
|
||||
break;
|
||||
}
|
||||
|
||||
@ -12,6 +12,10 @@ import { getCollectionWithDataset } from '../controller';
|
||||
import { mongoSessionRun } from '../../../common/mongo/sessionRun';
|
||||
import { PushDataToTrainingQueueProps } from '@fastgpt/global/core/dataset/training/type';
|
||||
import { i18nT } from '../../../../web/i18n/utils';
|
||||
import {
|
||||
getLLMDefaultChunkSize,
|
||||
getLLMMaxChunkSize
|
||||
} from '../../../../global/core/dataset/training/utils';
|
||||
|
||||
export const lockTrainingDataByTeamId = async (teamId: string): Promise<any> => {
|
||||
try {
|
||||
@ -55,6 +59,7 @@ export async function pushDataListToTrainingQueue({
|
||||
prompt,
|
||||
billId,
|
||||
mode = TrainingModeEnum.chunk,
|
||||
indexSize,
|
||||
session
|
||||
}: PushDataToTrainingQueueProps): Promise<PushDatasetDataResponse> {
|
||||
const getImageChunkMode = (data: PushDatasetDataChunkProps, mode: TrainingModeEnum) => {
|
||||
@ -68,38 +73,41 @@ export async function pushDataListToTrainingQueue({
|
||||
}
|
||||
return mode;
|
||||
};
|
||||
|
||||
const vectorModelData = getEmbeddingModel(vectorModel);
|
||||
if (!vectorModelData) {
|
||||
return Promise.reject(i18nT('common:error_embedding_not_config'));
|
||||
}
|
||||
const agentModelData = getLLMModel(agentModel);
|
||||
if (!agentModelData) {
|
||||
return Promise.reject(i18nT('common:error_llm_not_config'));
|
||||
}
|
||||
if (mode === TrainingModeEnum.chunk || mode === TrainingModeEnum.auto) {
|
||||
prompt = undefined;
|
||||
}
|
||||
|
||||
const { model, maxToken, weight } = await (async () => {
|
||||
if (mode === TrainingModeEnum.chunk) {
|
||||
const vectorModelData = getEmbeddingModel(vectorModel);
|
||||
if (!vectorModelData) {
|
||||
return Promise.reject(i18nT('common:error_embedding_not_config'));
|
||||
}
|
||||
return {
|
||||
maxToken: vectorModelData.maxToken * 1.5,
|
||||
maxToken: getLLMMaxChunkSize(agentModelData),
|
||||
model: vectorModelData.model,
|
||||
weight: vectorModelData.weight
|
||||
};
|
||||
}
|
||||
|
||||
if (mode === TrainingModeEnum.qa || mode === TrainingModeEnum.auto) {
|
||||
const agentModelData = getLLMModel(agentModel);
|
||||
if (!agentModelData) {
|
||||
return Promise.reject(i18nT('common:error_llm_not_config'));
|
||||
}
|
||||
return {
|
||||
maxToken: agentModelData.maxContext * 0.8,
|
||||
maxToken: getLLMMaxChunkSize(agentModelData),
|
||||
model: agentModelData.model,
|
||||
weight: 0
|
||||
};
|
||||
}
|
||||
|
||||
if (mode === TrainingModeEnum.image) {
|
||||
const vllmModelData = getVlmModel(vlmModel);
|
||||
if (!vllmModelData) {
|
||||
return Promise.reject(i18nT('common:error_vlm_not_config'));
|
||||
}
|
||||
return {
|
||||
maxToken: vllmModelData.maxContext * 0.8,
|
||||
maxToken: getLLMMaxChunkSize(vllmModelData),
|
||||
model: vllmModelData.model,
|
||||
weight: 0
|
||||
};
|
||||
@ -107,10 +115,6 @@ export async function pushDataListToTrainingQueue({
|
||||
|
||||
return Promise.reject(`Training mode "${mode}" is inValid`);
|
||||
})();
|
||||
// Filter redundant params
|
||||
if (mode === TrainingModeEnum.chunk || mode === TrainingModeEnum.auto) {
|
||||
prompt = undefined;
|
||||
}
|
||||
|
||||
// filter repeat or equal content
|
||||
const set = new Set();
|
||||
@ -143,13 +147,13 @@ export async function pushDataListToTrainingQueue({
|
||||
|
||||
const text = item.q + item.a;
|
||||
|
||||
// Oversize llm tokens
|
||||
if (text.length > maxToken) {
|
||||
filterResult.overToken.push(item);
|
||||
return;
|
||||
}
|
||||
|
||||
if (set.has(text)) {
|
||||
console.log('repeat', item);
|
||||
filterResult.repeat.push(item);
|
||||
} else {
|
||||
filterResult.success.push(item);
|
||||
@ -182,6 +186,7 @@ export async function pushDataListToTrainingQueue({
|
||||
q: item.q,
|
||||
a: item.a,
|
||||
chunkIndex: item.chunkIndex ?? 0,
|
||||
indexSize,
|
||||
weight: weight ?? 0,
|
||||
indexes: item.indexes,
|
||||
retryCount: 5
|
||||
|
||||
@ -76,6 +76,7 @@ const TrainingDataSchema = new Schema({
|
||||
type: Number,
|
||||
default: 0
|
||||
},
|
||||
indexSize: Number,
|
||||
weight: {
|
||||
type: Number,
|
||||
default: 0
|
||||
|
||||
@ -10,8 +10,7 @@ import type { ClassifyQuestionAgentItemType } from '@fastgpt/global/core/workflo
|
||||
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { replaceVariable } from '@fastgpt/global/common/string/tools';
|
||||
import { Prompt_CQJson } from '@fastgpt/global/core/ai/prompt/agent';
|
||||
import { getCQPrompt } from '@fastgpt/global/core/ai/prompt/agent';
|
||||
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { getLLMModel } from '../../../ai/model';
|
||||
import { getHistories } from '../utils';
|
||||
@ -23,6 +22,7 @@ import { loadRequestMessages } from '../../../chat/utils';
|
||||
import { llmCompletionsBodyFormat } from '../../../ai/utils';
|
||||
import { addLog } from '../../../../common/system/log';
|
||||
import { ModelTypeEnum } from '../../../../../global/core/ai/model';
|
||||
import { replaceVariable } from '@fastgpt/global/common/string/tools';
|
||||
|
||||
type Props = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.aiModel]: string;
|
||||
@ -99,7 +99,8 @@ const completions = async ({
|
||||
cqModel,
|
||||
externalProvider,
|
||||
histories,
|
||||
params: { agents, systemPrompt = '', userChatInput }
|
||||
params: { agents, systemPrompt = '', userChatInput },
|
||||
node: { version }
|
||||
}: ActionProps) => {
|
||||
const messages: ChatItemType[] = [
|
||||
{
|
||||
@ -108,7 +109,7 @@ const completions = async ({
|
||||
{
|
||||
type: ChatItemValueTypeEnum.text,
|
||||
text: {
|
||||
content: replaceVariable(cqModel.customCQPrompt || Prompt_CQJson, {
|
||||
content: replaceVariable(cqModel.customCQPrompt || getCQPrompt(version), {
|
||||
systemPrompt: systemPrompt || 'null',
|
||||
typeList: agents
|
||||
.map((item) => `{"类型ID":"${item.key}", "问题类型":"${item.value}"}`)
|
||||
|
||||
@ -16,7 +16,6 @@ import {
|
||||
} from '@fastgpt/global/core/workflow/constants';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { Prompt_ExtractJson } from '@fastgpt/global/core/ai/prompt/agent';
|
||||
import { replaceVariable, sliceJsonStr } from '@fastgpt/global/common/string/tools';
|
||||
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { getHistories } from '../utils';
|
||||
@ -33,6 +32,10 @@ import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/ty
|
||||
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
|
||||
import { llmCompletionsBodyFormat } from '../../../ai/utils';
|
||||
import { ModelTypeEnum } from '../../../../../global/core/ai/model';
|
||||
import {
|
||||
getExtractJsonPrompt,
|
||||
getExtractJsonToolPrompt
|
||||
} from '@fastgpt/global/core/ai/prompt/agent';
|
||||
|
||||
type Props = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.history]?: ChatItemType[];
|
||||
@ -154,7 +157,8 @@ export async function dispatchContentExtract(props: Props): Promise<Response> {
|
||||
const getFunctionCallSchema = async ({
|
||||
extractModel,
|
||||
histories,
|
||||
params: { content, extractKeys, description }
|
||||
params: { content, extractKeys, description },
|
||||
node: { version }
|
||||
}: ActionProps) => {
|
||||
const messages: ChatItemType[] = [
|
||||
...histories,
|
||||
@ -164,15 +168,10 @@ const getFunctionCallSchema = async ({
|
||||
{
|
||||
type: ChatItemValueTypeEnum.text,
|
||||
text: {
|
||||
content: `我正在执行一个函数,需要你提供一些参数,请以 JSON 字符串格式返回这些参数,要求:
|
||||
"""
|
||||
${description ? `- ${description}` : ''}
|
||||
- 不是每个参数都是必须生成的,如果没有合适的参数值,不要生成该参数,或返回空字符串。
|
||||
- 需要结合前面的对话内容,一起生成合适的参数。
|
||||
"""
|
||||
|
||||
本次输入内容: """${content}"""
|
||||
`
|
||||
content: replaceVariable(getExtractJsonToolPrompt(version), {
|
||||
description,
|
||||
content
|
||||
})
|
||||
}
|
||||
}
|
||||
]
|
||||
@ -334,7 +333,8 @@ const completions = async ({
|
||||
extractModel,
|
||||
externalProvider,
|
||||
histories,
|
||||
params: { content, extractKeys, description = 'No special requirements' }
|
||||
params: { content, extractKeys, description = 'No special requirements' },
|
||||
node: { version }
|
||||
}: ActionProps) => {
|
||||
const messages: ChatItemType[] = [
|
||||
{
|
||||
@ -343,23 +343,26 @@ const completions = async ({
|
||||
{
|
||||
type: ChatItemValueTypeEnum.text,
|
||||
text: {
|
||||
content: replaceVariable(extractModel.customExtractPrompt || Prompt_ExtractJson, {
|
||||
description,
|
||||
json: extractKeys
|
||||
.map((item) => {
|
||||
const valueType = item.valueType || 'string';
|
||||
if (valueType !== 'string' && valueType !== 'number') {
|
||||
item.enum = undefined;
|
||||
}
|
||||
content: replaceVariable(
|
||||
extractModel.customExtractPrompt || getExtractJsonPrompt(version),
|
||||
{
|
||||
description,
|
||||
json: extractKeys
|
||||
.map((item) => {
|
||||
const valueType = item.valueType || 'string';
|
||||
if (valueType !== 'string' && valueType !== 'number') {
|
||||
item.enum = undefined;
|
||||
}
|
||||
|
||||
return `{"type":${item.valueType || 'string'}, "key":"${item.key}", "description":"${item.desc}" ${
|
||||
item.enum ? `, "enum":"[${item.enum.split('\n')}]"` : ''
|
||||
}}`;
|
||||
})
|
||||
.join('\n'),
|
||||
text: `${histories.map((item) => `${item.obj}:${chatValue2RuntimePrompt(item.value).text}`).join('\n')}
|
||||
return `{"type":${item.valueType || 'string'}, "key":"${item.key}", "description":"${item.desc}" ${
|
||||
item.enum ? `, "enum":"[${item.enum.split('\n')}]"` : ''
|
||||
}}`;
|
||||
})
|
||||
.join('\n'),
|
||||
text: `${histories.map((item) => `${item.obj}:${chatValue2RuntimePrompt(item.value).text}`).join('\n')}
|
||||
Human: ${content}`
|
||||
})
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
@ -28,10 +28,10 @@ import { filterToolResponseToPreview } from './utils';
|
||||
import { InteractiveNodeResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
|
||||
import { getFileContentFromLinks, getHistoryFileLinks } from '../../tools/readFiles';
|
||||
import { parseUrlToFileType } from '@fastgpt/global/common/file/tools';
|
||||
import { Prompt_DocumentQuote } from '@fastgpt/global/core/ai/prompt/AIChat';
|
||||
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
|
||||
import { postTextCensor } from '../../../../../common/api/requestPlusApi';
|
||||
import { ModelTypeEnum } from '@fastgpt/global/core/ai/model';
|
||||
import { getDocumentQuotePrompt } from '@fastgpt/global/core/ai/prompt/AIChat';
|
||||
|
||||
type Response = DispatchNodeResultType<{
|
||||
[NodeOutputKeyEnum.answerText]: string;
|
||||
@ -40,7 +40,7 @@ type Response = DispatchNodeResultType<{
|
||||
|
||||
export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<Response> => {
|
||||
const {
|
||||
node: { nodeId, name, isEntry },
|
||||
node: { nodeId, name, isEntry, version },
|
||||
runtimeNodes,
|
||||
runtimeEdges,
|
||||
histories,
|
||||
@ -118,7 +118,7 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
|
||||
toolModel.defaultSystemChatPrompt,
|
||||
systemPrompt,
|
||||
documentQuoteText
|
||||
? replaceVariable(Prompt_DocumentQuote, {
|
||||
? replaceVariable(getDocumentQuotePrompt(version), {
|
||||
quote: documentQuoteText
|
||||
})
|
||||
: ''
|
||||
|
||||
@ -24,10 +24,9 @@ import {
|
||||
runtimePrompt2ChatsValue
|
||||
} from '@fastgpt/global/core/chat/adapt';
|
||||
import {
|
||||
Prompt_DocumentQuote,
|
||||
Prompt_userQuotePromptList,
|
||||
Prompt_QuoteTemplateList,
|
||||
Prompt_systemQuotePromptList
|
||||
getQuoteTemplate,
|
||||
getQuotePrompt,
|
||||
getDocumentQuotePrompt
|
||||
} from '@fastgpt/global/core/ai/prompt/AIChat';
|
||||
import type { AIChatNodeProps } from '@fastgpt/global/core/workflow/runtime/type.d';
|
||||
import { replaceVariable } from '@fastgpt/global/common/string/tools';
|
||||
@ -70,7 +69,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
stream = false,
|
||||
externalProvider,
|
||||
histories,
|
||||
node: { name },
|
||||
node: { name, version },
|
||||
query,
|
||||
runningUserInfo,
|
||||
workflowStreamResponse,
|
||||
@ -115,7 +114,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
filterDatasetQuote({
|
||||
quoteQA,
|
||||
model: modelConstantsData,
|
||||
quoteTemplate
|
||||
quoteTemplate: quoteTemplate || getQuoteTemplate(version)
|
||||
}),
|
||||
getMultiInput({
|
||||
histories: chatHistories,
|
||||
@ -147,6 +146,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
datasetQuoteText,
|
||||
aiChatQuoteRole,
|
||||
datasetQuotePrompt: quotePrompt,
|
||||
version,
|
||||
userChatInput,
|
||||
systemPrompt,
|
||||
userFiles,
|
||||
@ -326,10 +326,10 @@ async function filterDatasetQuote({
|
||||
}: {
|
||||
quoteQA: ChatProps['params']['quoteQA'];
|
||||
model: LLMModelItemType;
|
||||
quoteTemplate?: string;
|
||||
quoteTemplate: string;
|
||||
}) {
|
||||
function getValue(item: SearchDataResponseItemType, index: number) {
|
||||
return replaceVariable(quoteTemplate || Prompt_QuoteTemplateList[0].value, {
|
||||
return replaceVariable(quoteTemplate, {
|
||||
id: item.id,
|
||||
q: item.q,
|
||||
a: item.a,
|
||||
@ -425,6 +425,7 @@ async function getChatMessages({
|
||||
datasetQuotePrompt = '',
|
||||
datasetQuoteText,
|
||||
useDatasetQuote,
|
||||
version,
|
||||
histories = [],
|
||||
systemPrompt,
|
||||
userChatInput,
|
||||
@ -437,6 +438,7 @@ async function getChatMessages({
|
||||
aiChatQuoteRole: AiChatQuoteRoleType; // user: replace user prompt; system: replace system prompt
|
||||
datasetQuotePrompt?: string;
|
||||
datasetQuoteText: string;
|
||||
version: string;
|
||||
|
||||
useDatasetQuote: boolean;
|
||||
histories: ChatItemType[];
|
||||
@ -451,11 +453,7 @@ async function getChatMessages({
|
||||
const quoteRole =
|
||||
aiChatQuoteRole === 'user' || datasetQuotePrompt.includes('{{question}}') ? 'user' : 'system';
|
||||
|
||||
const datasetQuotePromptTemplate = datasetQuotePrompt
|
||||
? datasetQuotePrompt
|
||||
: quoteRole === 'user'
|
||||
? Prompt_userQuotePromptList[0].value
|
||||
: Prompt_systemQuotePromptList[0].value;
|
||||
const datasetQuotePromptTemplate = datasetQuotePrompt || getQuotePrompt(version, quoteRole);
|
||||
|
||||
// Reset user input, add dataset quote to user input
|
||||
const replaceInputValue =
|
||||
@ -477,7 +475,7 @@ async function getChatMessages({
|
||||
})
|
||||
: '',
|
||||
documentQuoteText
|
||||
? replaceVariable(Prompt_DocumentQuote, {
|
||||
? replaceVariable(getDocumentQuotePrompt(version), {
|
||||
quote: documentQuoteText
|
||||
})
|
||||
: ''
|
||||
|
||||
@ -88,9 +88,9 @@ export const dispatchRunPlugin = async (props: RunPluginProps): Promise<RunPlugi
|
||||
: {}),
|
||||
runningAppInfo: {
|
||||
id: String(plugin.id),
|
||||
// 如果是系统插件,则使用当前团队的 teamId 和 tmbId
|
||||
// 如果系统插件有 teamId 和 tmbId,则使用系统插件的 teamId 和 tmbId(管理员指定了插件作为系统插件)
|
||||
teamId: plugin.teamId || runningAppInfo.teamId,
|
||||
tmbId: pluginData?.tmbId || runningAppInfo.tmbId
|
||||
tmbId: plugin.tmbId || runningAppInfo.tmbId
|
||||
},
|
||||
variables: runtimeVariables,
|
||||
query: getPluginRunUserQuery({
|
||||
|
||||
@ -117,6 +117,9 @@ export const valueTypeFormat = (value: any, type?: WorkflowIOValueTypeEnum) => {
|
||||
return Boolean(value);
|
||||
}
|
||||
try {
|
||||
if (WorkflowIOValueTypeEnum.arrayString && typeof value === 'string') {
|
||||
return [value];
|
||||
}
|
||||
if (
|
||||
type &&
|
||||
[
|
||||
@ -124,7 +127,12 @@ export const valueTypeFormat = (value: any, type?: WorkflowIOValueTypeEnum) => {
|
||||
WorkflowIOValueTypeEnum.chatHistory,
|
||||
WorkflowIOValueTypeEnum.datasetQuote,
|
||||
WorkflowIOValueTypeEnum.selectApp,
|
||||
WorkflowIOValueTypeEnum.selectDataset
|
||||
WorkflowIOValueTypeEnum.selectDataset,
|
||||
WorkflowIOValueTypeEnum.arrayString,
|
||||
WorkflowIOValueTypeEnum.arrayNumber,
|
||||
WorkflowIOValueTypeEnum.arrayBoolean,
|
||||
WorkflowIOValueTypeEnum.arrayObject,
|
||||
WorkflowIOValueTypeEnum.arrayAny
|
||||
].includes(type) &&
|
||||
typeof value !== 'object'
|
||||
) {
|
||||
|
||||
@ -26,7 +26,7 @@
|
||||
"mammoth": "^1.6.0",
|
||||
"mongoose": "^8.10.1",
|
||||
"multer": "1.4.5-lts.1",
|
||||
"next": "14.2.24",
|
||||
"next": "14.2.25",
|
||||
"nextjs-cors": "^2.2.0",
|
||||
"node-cron": "^3.0.3",
|
||||
"node-xlsx": "^0.24.0",
|
||||
|
||||
@ -4,14 +4,14 @@ import { TeamErrEnum } from '@fastgpt/global/common/error/code/team';
|
||||
import { authUserPer } from '../user/auth';
|
||||
import { ManagePermissionVal } from '@fastgpt/global/support/permission/constant';
|
||||
|
||||
/*
|
||||
/*
|
||||
Team manager can control org
|
||||
*/
|
||||
export const authOrgMember = async ({
|
||||
orgIds,
|
||||
...props
|
||||
}: {
|
||||
orgIds: string | string[];
|
||||
orgIds?: string | string[];
|
||||
} & AuthModeType): Promise<AuthResponseType> => {
|
||||
const result = await authUserPer({
|
||||
...props,
|
||||
|
||||
@ -90,6 +90,6 @@ export async function createRootOrg({
|
||||
path: ''
|
||||
}
|
||||
],
|
||||
{ session }
|
||||
{ session, ordered: true }
|
||||
);
|
||||
}
|
||||
|
||||
@ -55,6 +55,14 @@ async function getTeamMember(match: Record<string, any>): Promise<TeamTmbItemTyp
|
||||
};
|
||||
}
|
||||
|
||||
export const getTeamOwner = async (teamId: string) => {
|
||||
const tmb = await MongoTeamMember.findOne({
|
||||
teamId,
|
||||
role: TeamMemberRoleEnum.owner
|
||||
}).lean();
|
||||
return tmb;
|
||||
};
|
||||
|
||||
export async function getTmbInfoByTmbId({ tmbId }: { tmbId: string }) {
|
||||
if (!tmbId) {
|
||||
return Promise.reject('tmbId or userId is required');
|
||||
|
||||
@ -1,49 +0,0 @@
|
||||
import {
|
||||
TeamCollectionName,
|
||||
TeamMemberCollectionName
|
||||
} from '@fastgpt/global/support/user/team/constant';
|
||||
import { connectionMongo, getMongoModel } from '../../../../common/mongo';
|
||||
import { InvitationSchemaType } from './type';
|
||||
import addDays from 'date-fns/esm/fp/addDays/index.js';
|
||||
const { Schema } = connectionMongo;
|
||||
|
||||
export const InvitationCollectionName = 'team_invitation_links';
|
||||
|
||||
const InvitationSchema = new Schema({
|
||||
teamId: {
|
||||
type: Schema.Types.ObjectId,
|
||||
ref: TeamCollectionName,
|
||||
required: true
|
||||
},
|
||||
usedTimesLimit: {
|
||||
type: Number,
|
||||
default: 1,
|
||||
enum: [1, -1]
|
||||
},
|
||||
forbidden: Boolean,
|
||||
expires: Date,
|
||||
description: String,
|
||||
members: {
|
||||
type: [String],
|
||||
default: []
|
||||
}
|
||||
});
|
||||
|
||||
InvitationSchema.virtual('team', {
|
||||
ref: TeamCollectionName,
|
||||
localField: 'teamId',
|
||||
foreignField: '_id',
|
||||
justOne: true
|
||||
});
|
||||
|
||||
try {
|
||||
InvitationSchema.index({ teamId: 1 });
|
||||
InvitationSchema.index({ expires: 1 }, { expireAfterSeconds: 30 * 24 * 60 * 60 });
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
}
|
||||
|
||||
export const MongoInvitationLink = getMongoModel<InvitationSchemaType>(
|
||||
InvitationCollectionName,
|
||||
InvitationSchema
|
||||
);
|
||||
@ -2,6 +2,7 @@ import { TeamMemberSchema } from '@fastgpt/global/support/user/team/type';
|
||||
|
||||
export type InvitationSchemaType = {
|
||||
_id: string;
|
||||
linkId: string;
|
||||
teamId: string;
|
||||
usedTimesLimit?: number;
|
||||
forbidden?: boolean;
|
||||
@ -25,11 +26,10 @@ export type InvitationLinkCreateType = {
|
||||
expires: InvitationLinkExpiresType;
|
||||
usedTimesLimit: 1 | -1;
|
||||
};
|
||||
export type InvitationLinkUpdateType = Partial<
|
||||
Omit<InvitationSchemaType, 'members' | 'teamId' | '_id'>
|
||||
> & {
|
||||
linkId: string;
|
||||
};
|
||||
|
||||
// export type InvitationLinkUpdateType = Partial<
|
||||
// Omit<InvitationSchemaType, 'members' | 'teamId' | '_id'>
|
||||
// >;
|
||||
|
||||
export type InvitationInfoType = InvitationSchemaType & {
|
||||
teamAvatar: string;
|
||||
|
||||
8
packages/web/common/zustand/index.ts
Normal file
@ -0,0 +1,8 @@
|
||||
import zustandNpm from 'zustand';
|
||||
|
||||
export * from 'zustand';
|
||||
export * from 'zustand/middleware';
|
||||
export * from 'zustand/middleware/immer';
|
||||
|
||||
export * from 'zustand';
|
||||
export default zustandNpm;
|
||||
@ -10,7 +10,16 @@ import { Box, Flex } from '@chakra-ui/react';
|
||||
* @param [groupId] - group id to make the key unique
|
||||
* @returns
|
||||
*/
|
||||
function AvatarGroup({ avatars, max = 3 }: { max?: number; avatars: string[] }) {
|
||||
function AvatarGroup({
|
||||
avatars,
|
||||
max = 3,
|
||||
total
|
||||
}: {
|
||||
max?: number;
|
||||
avatars: string[];
|
||||
total?: number;
|
||||
}) {
|
||||
const remain = (total ?? avatars.length) - max;
|
||||
return (
|
||||
<Flex position="relative">
|
||||
{avatars.slice(0, max).map((avatar, index) => (
|
||||
@ -24,10 +33,10 @@ function AvatarGroup({ avatars, max = 3 }: { max?: number; avatars: string[] })
|
||||
borderRadius={'50%'}
|
||||
/>
|
||||
))}
|
||||
{avatars.length > max && (
|
||||
{remain > 0 && (
|
||||
<Box
|
||||
position="relative"
|
||||
left={`${(max - 1) * 15}px`}
|
||||
left={`${(max - 1) * 15 + 15}px`}
|
||||
w={'24px'}
|
||||
h={'24px'}
|
||||
borderRadius="50%"
|
||||
@ -37,7 +46,7 @@ function AvatarGroup({ avatars, max = 3 }: { max?: number; avatars: string[] })
|
||||
fontSize="sm"
|
||||
color="myGray.500"
|
||||
>
|
||||
+{avatars.length - max}
|
||||
+{String(remain)}
|
||||
</Box>
|
||||
)}
|
||||
</Flex>
|
||||
|
||||
@ -72,7 +72,7 @@ const EditFolderModal = ({
|
||||
{...register('name', { required: true })}
|
||||
bg={'myGray.50'}
|
||||
autoFocus
|
||||
maxLength={20}
|
||||
maxLength={100}
|
||||
/>
|
||||
</Box>
|
||||
<Box mt={4}>
|
||||
|
||||
67
packages/web/components/common/Radio/RadioGroup.tsx
Normal file
@ -0,0 +1,67 @@
|
||||
import React from 'react';
|
||||
import { Box, Flex, Grid, type GridProps, HStack } from '@chakra-ui/react';
|
||||
import { useTranslation } from 'next-i18next';
|
||||
import QuestionTip from '../MyTooltip/QuestionTip';
|
||||
|
||||
type Props<T> = Omit<GridProps, 'onChange'> & {
|
||||
list: {
|
||||
title: string;
|
||||
value: T;
|
||||
tooltip?: string;
|
||||
}[];
|
||||
value: T;
|
||||
defaultBg?: string;
|
||||
activeBg?: string;
|
||||
onChange: (e: T) => void;
|
||||
};
|
||||
|
||||
const RadioGroup = <T = any,>({ list, value, onChange, ...props }: Props<T>) => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
return (
|
||||
<Flex gap={[3, 5]} fontSize={['sm', 'md']} alignItems={'center'} {...props}>
|
||||
{list.map((item) => (
|
||||
<Flex
|
||||
alignItems={'center'}
|
||||
key={item.value as any}
|
||||
cursor={'pointer'}
|
||||
userSelect={'none'}
|
||||
gap={1}
|
||||
onClick={() => onChange(item.value)}
|
||||
>
|
||||
<Box
|
||||
w={'18px'}
|
||||
h={'18px'}
|
||||
borderWidth={'2.4px'}
|
||||
borderColor={value === item.value ? 'primary.015' : 'transparent'}
|
||||
borderRadius={'50%'}
|
||||
>
|
||||
<Flex
|
||||
w={'100%'}
|
||||
h={'100%'}
|
||||
borderWidth={'1px'}
|
||||
borderColor={value === item.value ? 'primary.600' : 'borderColor.high'}
|
||||
bg={value === item.value ? 'primary.1' : 'transparent'}
|
||||
borderRadius={'50%'}
|
||||
alignItems={'center'}
|
||||
justifyContent={'center'}
|
||||
>
|
||||
<Box
|
||||
w={'5px'}
|
||||
h={'5px'}
|
||||
borderRadius={'50%'}
|
||||
bg={value === item.value ? 'primary.600' : 'transparent'}
|
||||
/>
|
||||
</Flex>
|
||||
</Box>
|
||||
<HStack spacing={1} color={'myGray.900'} whiteSpace={'nowrap'} fontSize={'sm'}>
|
||||
<Box>{typeof item.title === 'string' ? t(item.title as any) : item.title}</Box>
|
||||
{!!item.tooltip && <QuestionTip label={item.tooltip} color={'myGray.600'} />}
|
||||
</HStack>
|
||||
</Flex>
|
||||
))}
|
||||
</Flex>
|
||||
);
|
||||
};
|
||||
|
||||
export default RadioGroup;
|
||||
@ -3,6 +3,11 @@ import { useToast } from './useToast';
|
||||
import { useCallback } from 'react';
|
||||
import { hasHttps } from '../common/system/utils';
|
||||
import { isProduction } from '@fastgpt/global/common/system/constants';
|
||||
import MyModal from '../components/common/MyModal';
|
||||
import React from 'react';
|
||||
import { Box, ModalBody } from '@chakra-ui/react';
|
||||
import Tag from '../components/common/Tag';
|
||||
import { useCommonStore } from '../store/useCommonStore';
|
||||
|
||||
/**
|
||||
* copy text data
|
||||
@ -10,11 +15,12 @@ import { isProduction } from '@fastgpt/global/common/system/constants';
|
||||
export const useCopyData = () => {
|
||||
const { t } = useTranslation();
|
||||
const { toast } = useToast();
|
||||
const { setCopyContent } = useCommonStore();
|
||||
|
||||
const copyData = useCallback(
|
||||
async (
|
||||
data: string,
|
||||
title: string | null = t('common:common.Copy Successful'),
|
||||
title: string | null | undefined = t('common:common.Copy Successful'),
|
||||
duration = 1000
|
||||
) => {
|
||||
data = data.trim();
|
||||
@ -22,37 +28,18 @@ export const useCopyData = () => {
|
||||
try {
|
||||
if ((hasHttps() || !isProduction) && navigator.clipboard) {
|
||||
await navigator.clipboard.writeText(data);
|
||||
if (title) {
|
||||
toast({
|
||||
title,
|
||||
status: 'success',
|
||||
duration
|
||||
});
|
||||
}
|
||||
} else {
|
||||
throw new Error('');
|
||||
}
|
||||
} catch (error) {
|
||||
// console.log(error);
|
||||
|
||||
const textarea = document.createElement('textarea');
|
||||
textarea.value = data;
|
||||
textarea.style.position = 'absolute';
|
||||
textarea.style.opacity = '0';
|
||||
document.body.appendChild(textarea);
|
||||
|
||||
textarea.select();
|
||||
const res = document.execCommand('copy');
|
||||
document.body.removeChild(textarea);
|
||||
|
||||
if (!res) {
|
||||
return toast({
|
||||
title: t('common:common.Copy_failed'),
|
||||
status: 'error',
|
||||
duration
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if (title) {
|
||||
toast({
|
||||
title,
|
||||
status: 'success',
|
||||
duration
|
||||
});
|
||||
setCopyContent(data);
|
||||
}
|
||||
},
|
||||
[t, toast]
|
||||
@ -62,3 +49,29 @@ export const useCopyData = () => {
|
||||
copyData
|
||||
};
|
||||
};
|
||||
|
||||
export const ManualCopyModal = () => {
|
||||
const { t } = useTranslation();
|
||||
const { copyContent, setCopyContent } = useCommonStore();
|
||||
|
||||
return (
|
||||
<MyModal
|
||||
isOpen={!!copyContent}
|
||||
iconSrc="copy"
|
||||
iconColor="primary.600"
|
||||
title={t('common:common.Copy')}
|
||||
maxW={['90vw', '500px']}
|
||||
w={'100%'}
|
||||
onClose={() => setCopyContent(undefined)}
|
||||
>
|
||||
<ModalBody>
|
||||
<Tag w={'100%'} colorSchema="blue">
|
||||
{t('common:can_copy_content_tip')}
|
||||
</Tag>
|
||||
<Box mt={3} borderRadius={'md'} p={3} border={'base'} userSelect={'all'}>
|
||||
{copyContent}
|
||||
</Box>
|
||||
</ModalBody>
|
||||
</MyModal>
|
||||
);
|
||||
};
|
||||
|
||||
@ -14,6 +14,7 @@ import {
|
||||
} from 'ahooks';
|
||||
import MyBox from '../components/common/MyBox';
|
||||
import { useTranslation } from 'next-i18next';
|
||||
import { useRequest2 } from './useRequest';
|
||||
|
||||
type ItemHeight<T> = (index: number, data: T) => number;
|
||||
const thresholdVal = 100;
|
||||
@ -183,22 +184,21 @@ export function useScrollPagination<
|
||||
>(
|
||||
api: (data: TParams) => Promise<TData>,
|
||||
{
|
||||
refreshDeps,
|
||||
scrollLoadType = 'bottom',
|
||||
|
||||
pageSize = 10,
|
||||
params = {},
|
||||
EmptyTip,
|
||||
showErrorToast = true
|
||||
showErrorToast = true,
|
||||
...props
|
||||
}: {
|
||||
refreshDeps?: any[];
|
||||
scrollLoadType?: 'top' | 'bottom';
|
||||
|
||||
pageSize?: number;
|
||||
params?: Record<string, any>;
|
||||
EmptyTip?: React.JSX.Element;
|
||||
showErrorToast?: boolean;
|
||||
}
|
||||
} & Parameters<typeof useRequest2>[1]
|
||||
) {
|
||||
const { t } = useTranslation();
|
||||
const { toast } = useToast();
|
||||
@ -213,6 +213,7 @@ export function useScrollPagination<
|
||||
const loadData = useLockFn(
|
||||
async (init = false, ScrollContainerRef?: RefObject<HTMLDivElement>) => {
|
||||
if (noMore && !init) return;
|
||||
setTrue();
|
||||
|
||||
if (init) {
|
||||
setData([]);
|
||||
@ -221,8 +222,6 @@ export function useScrollPagination<
|
||||
|
||||
const offset = init ? 0 : data.length;
|
||||
|
||||
setTrue();
|
||||
|
||||
try {
|
||||
const res = await api({
|
||||
offset,
|
||||
@ -274,7 +273,7 @@ export function useScrollPagination<
|
||||
({
|
||||
children,
|
||||
ScrollContainerRef,
|
||||
isLoading,
|
||||
isLoading: isLoadingProp,
|
||||
...props
|
||||
}: {
|
||||
isLoading?: boolean;
|
||||
@ -283,7 +282,7 @@ export function useScrollPagination<
|
||||
} & BoxProps) => {
|
||||
const ref = ScrollContainerRef || ScrollRef;
|
||||
const loadText = useMemo(() => {
|
||||
if (isLoading) return t('common:common.is_requesting');
|
||||
if (isLoading || isLoadingProp) return t('common:common.is_requesting');
|
||||
if (noMore) return t('common:common.request_end');
|
||||
return t('common:common.request_more');
|
||||
}, [isLoading, noMore]);
|
||||
@ -338,13 +337,13 @@ export function useScrollPagination<
|
||||
);
|
||||
|
||||
// Reload data
|
||||
useRequest(
|
||||
useRequest2(
|
||||
async () => {
|
||||
loadData(true);
|
||||
},
|
||||
{
|
||||
manual: false,
|
||||
refreshDeps
|
||||
...props
|
||||
}
|
||||
);
|
||||
|
||||
|
||||
@ -12,6 +12,7 @@
|
||||
"channel_status_unknown": "unknown",
|
||||
"channel_type": "Manufacturer",
|
||||
"clear_model": "Clear the model",
|
||||
"confirm_delete_channel": "Confirm the deletion of the [{{name}}] channel?",
|
||||
"copy_model_id_success": "Copyed model id",
|
||||
"create_channel": "Added channels",
|
||||
"default_url": "Default address",
|
||||
@ -36,6 +37,7 @@
|
||||
"model_tokens": "Input/Output tokens",
|
||||
"request_at": "Request time",
|
||||
"request_duration": "Request duration: {{duration}}s",
|
||||
"retry_times": "Number of retry times",
|
||||
"running_test": "In testing",
|
||||
"search_model": "Search for models",
|
||||
"select_channel": "Select a channel name",
|
||||
|
||||
@ -17,11 +17,11 @@
|
||||
"create_sub_org": "Create sub-organization",
|
||||
"delete": "delete",
|
||||
"delete_org": "Delete organization",
|
||||
"edit_member": "Edit user",
|
||||
"edit_member_tip": "username",
|
||||
"edit_info": "Edit information",
|
||||
"edit_member": "Edit user",
|
||||
"edit_member_tip": "Name",
|
||||
"edit_org_info": "Edit organization information",
|
||||
"expires": "Expiration",
|
||||
"expires": "Expiration time",
|
||||
"forbid_hint": "After forbidden, this invitation link will become invalid. This action is irreversible. Are you sure you want to deactivate?",
|
||||
"forbid_success": "Forbid success",
|
||||
"forbidden": "Forbidden",
|
||||
@ -62,4 +62,4 @@
|
||||
"user_team_leave_team": "Leave the team",
|
||||
"user_team_leave_team_failed": "Failure to leave the team",
|
||||
"waiting": "To be accepted"
|
||||
}
|
||||
}
|
||||
|
||||
@ -59,6 +59,7 @@
|
||||
"to_dataset": "Go to the Knowledge Base",
|
||||
"unsupported_file_type": "Unsupported file types",
|
||||
"upload": "Upload",
|
||||
"variable_invisable_in_share": "Custom variables are not visible in login-free links",
|
||||
"view_citations": "View References",
|
||||
"web_site_sync": "Web Site Sync"
|
||||
}
|
||||
|
||||
@ -37,6 +37,7 @@
|
||||
"add_new_param": "Add new param",
|
||||
"app.templateMarket.templateTags.Writing": "Writing",
|
||||
"back": "Back",
|
||||
"can_copy_content_tip": "It is not possible to copy automatically using the browser, please manually copy the following content",
|
||||
"chose_condition": "Choose Condition",
|
||||
"chosen": "Chosen",
|
||||
"classification": "Classification",
|
||||
@ -128,7 +129,6 @@
|
||||
"common.Continue_Adding": "Continue adding",
|
||||
"common.Copy": "Copy",
|
||||
"common.Copy Successful": "Copied Successfully",
|
||||
"common.Copy_failed": "Copy Failed, Please Copy Manually",
|
||||
"common.Create Failed": "Creation Failed",
|
||||
"common.Create Success": "Created Successfully",
|
||||
"common.Create Time": "Creation Time",
|
||||
@ -569,8 +569,6 @@
|
||||
"core.dataset.import.Custom process": "Custom Rules",
|
||||
"core.dataset.import.Custom process desc": "Customize segmentation and preprocessing rules",
|
||||
"core.dataset.import.Custom prompt": "Custom Prompt",
|
||||
"core.dataset.import.Custom split char": "Custom Separator",
|
||||
"core.dataset.import.Custom split char Tips": "Allows you to segment based on custom separators. Usually used for pre-processed data, using specific separators for precise segmentation.",
|
||||
"core.dataset.import.Custom text": "Custom Text",
|
||||
"core.dataset.import.Custom text desc": "Manually enter a piece of text as a dataset",
|
||||
"core.dataset.import.Data process params": "Data Processing Parameters",
|
||||
@ -646,11 +644,11 @@
|
||||
"core.dataset.training.Auto mode": "Auto index",
|
||||
"core.dataset.training.Auto mode Tip": "Increase the semantic richness of data blocks by generating related questions and summaries through sub-indexes and calling models, making it more conducive to retrieval. Requires more storage space and increases AI call times.",
|
||||
"core.dataset.training.Chunk mode": "Chunk",
|
||||
"core.dataset.training.Full": "Estimated Over 5 Minutes",
|
||||
"core.dataset.training.Full": "It is expected to be more than 20 minutes",
|
||||
"core.dataset.training.Leisure": "Idle",
|
||||
"core.dataset.training.QA mode": "QA",
|
||||
"core.dataset.training.Vector queue": "Index Queue",
|
||||
"core.dataset.training.Waiting": "Estimated 5 Minutes",
|
||||
"core.dataset.training.Waiting": "Estimated 20 minutes",
|
||||
"core.dataset.training.Website Sync": "Website Sync",
|
||||
"core.dataset.training.tag": "Queue Status",
|
||||
"core.dataset.website.Base Url": "Base URL",
|
||||
|
||||
@ -5,6 +5,7 @@
|
||||
"api_url": "API Url",
|
||||
"auto_indexes": "Automatically generate supplementary indexes",
|
||||
"auto_indexes_tips": "Additional index generation is performed through large models to improve semantic richness and improve retrieval accuracy.",
|
||||
"auto_training_queue": "Enhanced index queueing",
|
||||
"chunk_max_tokens": "max_tokens",
|
||||
"close_auto_sync": "Are you sure you want to turn off automatic sync?",
|
||||
"collection.Create update time": "Creation/Update Time",
|
||||
@ -25,7 +26,7 @@
|
||||
"core.dataset.import.Adjust parameters": "Adjust parameters",
|
||||
"custom_data_process_params": "Custom",
|
||||
"custom_data_process_params_desc": "Customize data processing rules",
|
||||
"data.ideal_chunk_length": "ideal block length",
|
||||
"custom_split_sign_tip": "Allows you to chunk according to custom delimiters. \nUsually used for processed data, using specific separators for precise chunking. \nYou can use the | symbol to represent multiple splitters, such as: \".|.\" to represent a period in Chinese and English.\n\nTry to avoid using special symbols related to regular, such as: * () [] {}, etc.",
|
||||
"data_amount": "{{dataAmount}} Datas, {{indexAmount}} Indexes",
|
||||
"data_index_num": "Index {{index}}",
|
||||
"data_process_params": "Params",
|
||||
@ -51,10 +52,9 @@
|
||||
"file_model_function_tip": "Enhances indexing and QA generation",
|
||||
"filename": "Filename",
|
||||
"folder_dataset": "Folder",
|
||||
"ideal_chunk_length": "ideal block length",
|
||||
"ideal_chunk_length_tips": "Segment according to the end symbol and combine multiple segments into one block. This value determines the estimated size of the block, if there is any fluctuation.",
|
||||
"image_auto_parse": "Automatic image indexing",
|
||||
"image_auto_parse_tips": "Call VLM to automatically label the pictures in the document and generate additional search indexes",
|
||||
"image_training_queue": "Queue of image processing",
|
||||
"import.Auto mode Estimated Price Tips": "The text understanding model needs to be called, which requires more points: {{price}} points/1K tokens",
|
||||
"import.Embedding Estimated Price Tips": "Only use the index model and consume a small amount of AI points: {{price}} points/1K tokens",
|
||||
"import_confirm": "Confirm upload",
|
||||
@ -65,6 +65,8 @@
|
||||
"import_param_setting": "Parameter settings",
|
||||
"import_select_file": "Select a file",
|
||||
"import_select_link": "Enter link",
|
||||
"index_size": "Index size",
|
||||
"index_size_tips": "When vectorized, the system will automatically further segment the blocks according to this size.",
|
||||
"is_open_schedule": "Enable scheduled synchronization",
|
||||
"keep_image": "Keep the picture",
|
||||
"move.hint": "After moving, the selected knowledge base/folder will inherit the permission settings of the new folder, and the original permission settings will become invalid.",
|
||||
@ -78,7 +80,7 @@
|
||||
"permission.des.write": "Ability to add and change knowledge base content",
|
||||
"preview_chunk": "Preview chunks",
|
||||
"preview_chunk_empty": "Unable to read the contents of the file",
|
||||
"preview_chunk_intro": "Display up to 10 pieces",
|
||||
"preview_chunk_intro": "A total of {{total}} blocks, up to 10",
|
||||
"preview_chunk_not_selected": "Click on the file on the left to preview",
|
||||
"rebuild_embedding_start_tip": "Index model switching task has started",
|
||||
"rebuilding_index_count": "Number of indexes being rebuilt: {{count}}",
|
||||
@ -86,6 +88,16 @@
|
||||
"retain_collection": "Adjust Training Parameters",
|
||||
"retrain_task_submitted": "The retraining task has been submitted",
|
||||
"same_api_collection": "The same API set exists",
|
||||
"split_chunk_char": "Block by specified splitter",
|
||||
"split_chunk_size": "Block by length",
|
||||
"split_sign_break": "1 newline character",
|
||||
"split_sign_break2": "2 newline characters",
|
||||
"split_sign_custom": "Customize",
|
||||
"split_sign_exclamatiob": "exclamation mark",
|
||||
"split_sign_null": "Not set",
|
||||
"split_sign_period": "period",
|
||||
"split_sign_question": "question mark",
|
||||
"split_sign_semicolon": "semicolon",
|
||||
"start_sync_website_tip": "Confirm to start synchronizing data? \nThe old data will be deleted and retrieved again, please confirm!",
|
||||
"sync_collection_failed": "Synchronization collection error, please check whether the source file can be accessed normally",
|
||||
"sync_schedule": "Timing synchronization",
|
||||
|
||||
@ -12,6 +12,7 @@
|
||||
"channel_status_unknown": "未知",
|
||||
"channel_type": "厂商",
|
||||
"clear_model": "清空模型",
|
||||
"confirm_delete_channel": "确认删除 【{{name}}】渠道?",
|
||||
"copy_model_id_success": "已复制模型id",
|
||||
"create_channel": "新增渠道",
|
||||
"default_url": "默认地址",
|
||||
@ -36,6 +37,7 @@
|
||||
"model_tokens": "输入/输出 Tokens",
|
||||
"request_at": "请求时间",
|
||||
"request_duration": "请求时长: {{duration}}s",
|
||||
"retry_times": "重试次数",
|
||||
"running_test": "测试中",
|
||||
"search_model": "搜索模型",
|
||||
"select_channel": "选择渠道名",
|
||||
|
||||
@ -20,11 +20,11 @@
|
||||
"delete_from_org": "移出部门",
|
||||
"delete_from_team": "移出团队",
|
||||
"delete_org": "删除部门",
|
||||
"edit_member": "编辑用户",
|
||||
"edit_member_tip": "用户名",
|
||||
"edit_info": "编辑信息",
|
||||
"edit_member": "编辑用户",
|
||||
"edit_member_tip": "成员名",
|
||||
"edit_org_info": "编辑部门信息",
|
||||
"expires": "有效期",
|
||||
"expires": "过期时间",
|
||||
"export_members": "导出成员",
|
||||
"forbid_hint": "停用后,该邀请链接将失效。 该操作不可撤销,是否确认停用?",
|
||||
"forbid_success": "停用成功",
|
||||
@ -78,4 +78,4 @@
|
||||
"user_team_leave_team": "离开团队",
|
||||
"user_team_leave_team_failed": "离开团队失败",
|
||||
"waiting": "待接受"
|
||||
}
|
||||
}
|
||||
|
||||
@ -59,6 +59,7 @@
|
||||
"to_dataset": "前往知识库",
|
||||
"unsupported_file_type": "不支持的文件类型",
|
||||
"upload": "上传",
|
||||
"variable_invisable_in_share": "自定义变量在免登录链接中不可见",
|
||||
"view_citations": "查看引用",
|
||||
"web_site_sync": "Web站点同步"
|
||||
}
|
||||
|
||||