Compare commits

..

89 Commits

Author SHA1 Message Date
f22c22a635 记录个人部署的过程
Some checks failed
Build FastGPT images in Personal warehouse / build-fastgpt-images (push) Has been cancelled
2025-06-09 15:23:09 +08:00
yangxin
59b7c608fd 更改一下systemTitle
Some checks failed
Build FastGPT images in Personal warehouse / build-fastgpt-images (push) Has been cancelled
2025-06-06 21:10:06 +08:00
gggaaallleee
9fb5d05865
add audit (#4923)
* add audit

* update audit

* update audit
2025-06-03 21:28:26 +08:00
dependabot[bot]
b974574157
chore(deps): bump tar-fs in /plugins/webcrawler/SPIDER (#4945)
Bumps [tar-fs](https://github.com/mafintosh/tar-fs) from 3.0.8 to 3.0.9.
- [Commits](https://github.com/mafintosh/tar-fs/compare/v3.0.8...v3.0.9)

---
updated-dependencies:
- dependency-name: tar-fs
  dependency-version: 3.0.9
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-03 16:02:05 +08:00
gggaaallleee
5a5367d30b
add Bocha search template (#4933)
* add bocha

* Delete packages/service/support/operationLog/util.ts
2025-05-30 21:07:49 +08:00
Archer
8ed35ffe7e
Update dataset.md (#4927) 2025-05-29 18:25:59 +08:00
Archer
0f866fc552
feat: text collecion auto save for a txt file (#4924) 2025-05-29 17:57:27 +08:00
Archer
05c7ba4483
feat: Workflow node search (#4920)
* add node find (#4902)

* add node find

* plugin header

* fix

* fix

* remove

* type

* add searched status

* optimize

* perf: search nodes

---------

Co-authored-by: heheer <heheer@sealos.io>
2025-05-29 14:29:28 +08:00
heheer
fa80ce3a77
fix child app external variables (#4919) 2025-05-29 13:37:59 +08:00
Archer
830358aa72
remove invalid code (#4915) 2025-05-28 22:11:40 +08:00
Archer
02b214b3ec
feat: remove buffer;fix: custom pdf parse (#4914)
* fix: doc

* fix: remove buffer

* fix: pdf parse
2025-05-28 21:48:10 +08:00
Archer
a171c7b11c
perf: buffer;fix: back up split (#4913)
* perf: buffer

* fix: back up split

* fix: app limit

* doc
2025-05-28 18:18:25 +08:00
heheer
802de11363
fix runtool empty message (#4911)
* fix runtool empty message

* del unused code

* fix
2025-05-28 17:48:30 +08:00
Archer
b4ecfb0b79
Feat: Node latest version (#4905)
* node versions add keep the latest option (#4899)

* node versions add keep the latest option

* i18n

* perf: version code

* fix: ts

* hide system version

* hide system version

* hide system version

* fix: ts

* fix: ts

---------

Co-authored-by: heheer <heheer@sealos.io>
2025-05-28 10:46:32 +08:00
heheer
331b851a78
fix has tool node condition (#4907) 2025-05-28 10:34:02 +08:00
Archer
50d235c42a
fix: i18n (#4898) 2025-05-27 10:45:25 +08:00
Archer
9838593451
version doc (#4897) 2025-05-27 10:33:35 +08:00
Archer
c25cd48e72
perf: chunk trigger and paragraph split (#4893)
* perf: chunk trigger and paragraph split

* update max size computed

* perf: i18n

* remove table
2025-05-26 18:57:22 +08:00
Archer
874300a56a
fix: chinese name export (#4890)
* fix: chinese name export

* fix: xlsx white space

* doc

* doc
2025-05-25 21:19:29 +08:00
Archer
1dea2b71b4
perf: human check;perf: recursion get node response (#4888)
* perf: human check

* version

* perf: recursion get node response
2025-05-25 20:55:29 +08:00
Archer
a8673344b1
Test add menu (#4887)
* Feature: Add additional dataset options and their descriptions, updat… (#4874)

* Feature: Add additional dataset options and their descriptions, update menu components to support submenu functionality

* Optimize the menu component by removing the sub-menu position attribute, introducing the MyPopover component to support sub-menu functionality, and adding new dataset options and their descriptions in the dataset list.

---------

Co-authored-by: dreamer6680 <146868355@qq.com>

* api dataset tip

* remove invalid code

---------

Co-authored-by: dreamer6680 <1468683855@qq.com>
Co-authored-by: dreamer6680 <146868355@qq.com>
2025-05-25 20:16:03 +08:00
Archer
9709ae7a4f
feat: The workflow quickly adds applications (#4882)
* feat: add node by handle (#4860)

* feat: add node by handle

* fix

* fix edge filter

* fix

* move utils

* move context

* scale handle

* move postion to handle params & optimize handle scale (#4878)

* move position to handle params

* close button scale

* perf: node template ui

* remove handle scale (#4880)

* feat: handle connect

* add mouse down duration check (#4881)

* perf: long press time

* tool handle size

* optimize add node by handle (#4883)

---------

Co-authored-by: heheer <heheer@sealos.io>
2025-05-23 19:20:12 +08:00
Archer
fae76e887a
perf: dataset import params code (#4875)
* perf: dataset import params code

* perf: api dataset code

* model
2025-05-23 10:40:25 +08:00
dreamer6680
9af92d1eae
Open Yufu Feishu Knowledge Base Permissions (#4867)
* add feishu yuque dataset

* Open Yufu Feishu Knowledge Base Permissions

* Refactor the dataset request module, optimize the import path, and fix the type definition

---------

Co-authored-by: dreamer6680 <146868355@qq.com>
2025-05-22 23:19:55 +08:00
Archer
6a6719e93d
perf: isPc check;perf: dataset max token checker (#4872)
* perf: isPc check

* perf: dataset max token checker

* perf: dataset max token checker
2025-05-22 18:40:29 +08:00
Compasafe
50481f4ca8
fix: 修改语音组件中判断isPc的逻辑 (#4854)
* fix: 修改语音组件中判断isPc的逻辑

* fix: 修改语音组件中判断isPc的逻辑
2025-05-22 16:29:53 +08:00
Archer
88bd3aaa9e
perf: backup import (#4866)
* i18n

* remove invalid code

* perf: backup import

* backup tip

* fix: indexsize invalid
2025-05-22 15:53:51 +08:00
Archer
dd3c251603
fix: stream response (#4853) 2025-05-21 10:21:20 +08:00
Archer
aa55f059d4
perf: chat history api;perf: full text error (#4852)
* perf: chat history api

* perf: i18n

* perf: full text
2025-05-20 22:31:32 +08:00
dreamer6680
89c9a02650
change ui of price (#4851)
Co-authored-by: dreamer6680 <146868355@qq.com>
2025-05-20 20:51:07 +08:00
heheer
0f3bfa280a
fix quote reader duplicate rendering (#4845) 2025-05-20 20:21:00 +08:00
dependabot[bot]
593ebfd269
chore(deps): bump multer from 1.4.5-lts.1 to 2.0.0 (#4839)
Bumps [multer](https://github.com/expressjs/multer) from 1.4.5-lts.1 to 2.0.0.
- [Release notes](https://github.com/expressjs/multer/releases)
- [Changelog](https://github.com/expressjs/multer/blob/v2.0.0/CHANGELOG.md)
- [Commits](https://github.com/expressjs/multer/compare/v1.4.5-lts.1...v2.0.0)

---
updated-dependencies:
- dependency-name: multer
  dependency-version: 2.0.0
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-05-20 13:58:47 +08:00
John Chen
f6dc2204f5
fix:修正docker-compose-pgvecto.yml文件中,健康检查参数错误 (#4841) 2025-05-20 13:57:32 +08:00
Archer
d44c338059
perf: confirm ux (#4843)
* perf: delete tip ux

* perf: confirm ux
2025-05-20 13:41:56 +08:00
Archer
1dac2b70ec
perf: stream timeout;feat: hnsw max_scan_tuples config;fix: fulltext search merge error (#4838)
* perf: stream timeout

* feat: hnsw max_scan_tuples config

* fix: fulltext search merge error

* perf: jieba code
2025-05-20 09:59:24 +08:00
Archer
9fef3e15fb
Update doc (#4831)
* doc

* doc

* version update
2025-05-18 23:16:31 +08:00
Archer
2d2d0fffe9
Test apidataset (#4830)
* Dataset (#4822)

* apidataset support to basepath

* Resolve the error of the Feishu Knowledge Base modification configuration page not supporting baseurl bug.

* apibasepath

* add

* perf: api dataset

---------

Co-authored-by: dreamer6680 <1468683855@qq.com>
2025-05-17 22:41:10 +08:00
heheer
c6e0b5a1e7
offiaccount welcome text (#4827)
* offiaccount welcome text

* fix

* Update Image.tsx

---------

Co-authored-by: Archer <545436317@qq.com>
2025-05-17 22:03:18 +08:00
dependabot[bot]
932aa28a1f
chore(deps): bump undici in /plugins/webcrawler/SPIDER (#4825)
Bumps [undici](https://github.com/nodejs/undici) from 6.21.1 to 6.21.3.
- [Release notes](https://github.com/nodejs/undici/releases)
- [Commits](https://github.com/nodejs/undici/compare/v6.21.1...v6.21.3)

---
updated-dependencies:
- dependency-name: undici
  dependency-version: 6.21.3
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-05-17 01:16:31 +08:00
heheer
9c59bc2c17
fix: handle optional indexes in InputDataModal (#4828) 2025-05-16 15:07:33 +08:00
Archer
e145f63554
feat: chat error msg (#4826)
* perf: i18n

* feat: chat error msg

* feat: doc
2025-05-16 12:07:11 +08:00
Archer
554b2ca8dc
perf: mcp tool type (#4820) 2025-05-15 18:14:32 +08:00
Archer
4e83840c14
perf: tool call check (#4818)
* i18n

* tool call

* fix: mcp create permission;Plugin unauth tip

* fix: mcp create permission;Plugin unauth tip

* fix: Cite modal permission

* remove invalide cite

* perf: prompt

* filter fulltext search

* fix: ts

* fix: ts

* fix: ts
2025-05-15 15:51:34 +08:00
heheer
a6c80684d1
fix version match (#4814) 2025-05-14 17:45:31 +08:00
Archer
a4db03a3b7
feat: session id (#4817)
* feat: session id

* feat: Add default index
2025-05-14 17:24:02 +08:00
Archer
cba8f773fe
New license (#4809)
* feat: new-license

* perf: volumn watch

* Set use client
2025-05-14 13:55:09 +08:00
Archer
bd93f28d6f
update doc (#4806) 2025-05-13 21:24:35 +08:00
Archer
2063cb6314
i18n (#4805)
* i18n

* version

* copy node
2025-05-13 18:58:57 +08:00
dreamer6680
12acaf491c
change password rule (#4804)
* change password rule

* change password.tset.ts
2025-05-13 18:20:11 +08:00
heheer
3688842cc7
filter tool type version & fix unpublished version (#4803) 2025-05-13 17:58:51 +08:00
Archer
398d131bac
fix api_dataset.md (#4791) (#4801)
Co-authored-by: dreamer6680 <1468683855@qq.com>
2025-05-13 12:28:50 +08:00
Archer
d5f188a1a4
doc (#4798)
* doc

* fix: i18n

* fix: scroll load
2025-05-13 12:16:32 +08:00
heheer
1edca309c4
remove system plugin node version (#4797) 2025-05-13 11:04:48 +08:00
Archer
1470c37ef1
Test media tag (#4796)
* feat: add html video tag convertion (#4784)

Co-authored-by: Zhenyi Wang <zhenyiwang@intl.zju.edu.cn>

* perf: media tag

---------

Co-authored-by: Zhenyi-Wang <47094597+Zhenyi-Wang@users.noreply.github.com>
Co-authored-by: Zhenyi Wang <zhenyiwang@intl.zju.edu.cn>
2025-05-13 10:46:49 +08:00
heheer
bdb1221d94
optimize editor default value code (#4794) 2025-05-12 23:52:22 +08:00
heheer
cac4b1d435
fix monaco editor default value (#4793)
* fix monaco editor default value

* fix
2025-05-12 23:09:15 +08:00
Archer
0ef3d40296
Test version (#4792)
* plugin node version select (#4760)

* plugin node version select

* type

* fix

* fix

* perf: version list

* fix node version (#4787)

* change my select

* fix-ui

* fix test

* add test

* fix

* remove invalid version field

* filter deprecated field

* fix: claude tool call

* fix: test

---------

Co-authored-by: heheer <heheer@sealos.io>
2025-05-12 22:27:01 +08:00
Theresa
3cc6b8a17a
fix: improve handling of interactive node responses in workflow dispatch (#4786)
* fix: improve handling of interactive node responses in workflow dispatch

* fix: simplify interactive response handling in dispatch functions
2025-05-12 18:18:05 +08:00
Archer
681ec30c38
4.9.8 test (#4790)
* fix: doc url

* doc
2025-05-12 18:13:42 +08:00
Archer
24cd1c98dc
Update official_account.md (#4789) 2025-05-12 17:01:18 +08:00
Archer
eaceabcc43
Update official_account.md (#4788) 2025-05-12 16:56:40 +08:00
dreamer6680
a7f9411dca
feat: Update the system configuration type, add visibility controls for datasets and publishing channels (#4778) 2025-05-12 13:51:58 +08:00
Archer
657fa32217
feat: system config type;fix: retraining permission (#4772)
* feat: system config type

* fix: retraining permission
2025-05-08 22:09:55 +08:00
Archer
12d6948ba7
Feat: prelogin (#4773)
* add prelogin api (#4762)

* add prelogin api

* move type.d.ts

* perf: prelogin code

* doc

* fix: ts

---------

Co-authored-by: dreamer6680 <1468683855@qq.com>
2025-05-08 22:09:02 +08:00
Archer
83d54d046d
perf: replace cite;perf: app card ui (#4768)
* perf: replace cite

* perf: app card ui

* fix: test
2025-05-08 13:35:08 +08:00
Archer
c75f154728
Password security policy (#4765)
* Psw (#4748)

* feat: 添加重置密码功能及相关接口

- 在用户模型中新增 passwordUpdateTime 字段以记录密码更新时间。
- 更新用户模式以支持密码更新时间的存储。
- 新增重置密码的模态框组件,允许用户重置密码。
- 实现重置密码的 API 接口,支持根据用户 ID 更新密码。
- 更新相关国际化文件,添加重置密码的提示信息。

* 更新国际化文件,添加重置密码相关提示信息,并优化重置密码模态框的实现。修复部分代码逻辑,确保用户体验流畅。

* 更新国际化文件,添加重置密码相关提示信息,优化重置密码模态框的实现,修复部分代码逻辑,确保用户体验流畅。新增获取用户密码更新时间的API接口,并调整相关逻辑以支持密码重置功能。

* update

* fix

* fix

* Added environment variables NEXT_PUBLIC_PASSWORD_UPDATETIME to support password update time configuration, update related logic to implement password mandatory update function, and optimize the implementation of reset password modal box to improve user experience.

* update index

* 更新用户密码重置功能,调整相关API接口,优化重置密码模态框的实现,确保用户体验流畅。修复部分代码逻辑,更新国际化提示信息。

* 删除获取用户密码更新时间的API接口,并在布局组件中移除不必要的重置密码模态框。优化代码结构,提升可维护性。

* update

* perf: reset expired password code

* perf: layout child components

* doc

* remove invalid env

* perf: update password code

---------

Co-authored-by: dreamer6680 <1468683855@qq.com>
2025-05-08 12:11:08 +08:00
Archer
96e7dd581e
fix: json schema parse error;fix: retraining image reset (#4757)
* i18n

* fix: json schema parse error

* fix: retraining image reset

* update doc
2025-05-07 15:38:03 +08:00
Theresa
2d3117c5da
feat: update ESLint config with @typescript-eslint/consistent-type-imports (#4746)
* update: Add type

* fix: update import statement for NextApiRequest type

* fix: update imports to use type for LexicalEditor and EditorState

* Refactor imports to use 'import type' for type-only imports across multiple files

- Updated imports in various components and API files to use 'import type' for better clarity and to optimize TypeScript's type checking.
- Ensured consistent usage of type imports in files related to chat, dataset, workflow, and user management.
- Improved code readability and maintainability by distinguishing between value and type imports.

* refactor: remove old ESLint configuration and add new rules

- Deleted the old ESLint configuration file from the app project.
- Added a new ESLint configuration file with updated rules and settings.
- Changed imports to use type-only imports in various files for better clarity and performance.
- Updated TypeScript configuration to remove unnecessary options.
- Added an ESLint ignore file to exclude build and dependency directories from linting.

* fix: update imports to use 'import type' for type-only imports in schema files
2025-05-06 17:33:09 +08:00
Deepturn
5361674a2c
Update dataset.md (#4752)
去除:
2025-05-06 17:07:57 +08:00
Archer
ef537f391c
fix: rerank usage (#4751)
* remove invalid index

* perf: i18n

* fix: rerank usage
2025-05-06 16:23:36 +08:00
dependabot[bot]
367ee517ec
chore(deps): bump transformers in /plugins/model/llm-ChatGLM2 (#4741)
Bumps [transformers](https://github.com/huggingface/transformers) from 4.48.0 to 4.50.0.
- [Release notes](https://github.com/huggingface/transformers/releases)
- [Commits](https://github.com/huggingface/transformers/compare/v4.48.0...v4.50.0)

---
updated-dependencies:
- dependency-name: transformers
  dependency-version: 4.50.0
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-05-06 14:27:20 +08:00
Deepturn
39cf001358
Update dataset.md (#4747)
知识库上传文件接口,新增控制PDF解析参数
2025-05-06 14:27:01 +08:00
Archer
7b9f935119
update next version (#4743) 2025-05-05 18:44:56 +08:00
Archer
a6fbfac96f
Yuque dataset baseurl (#4742)
* Yuque dataset baseurl (#4512)

* feat: 增加API数据集功能和国际化支持

* 在apiDataset.d.ts中添加uuid、slug、parent_uuid和children字段
* 更新英文、简体中文和繁体中文的dataset.json文件,增加多条提示信息
* 在ApiDatasetForm组件中实现目录选择功能,支持获取Yuque路径
* 新增BaseUrlSelector组件,用于选择根目录
* 实现getpath API,支持根据Yuque服务器获取路径
* 更新相关API调用,确保兼容性和功能完整性

* feat: 更新Yuque服务器的baseUrl处理逻辑

* 在apiDataset.d.ts中将YuqueServer的baseUrl属性改为必填
* 更新ApiDatasetForm组件,调整baseUrl的状态管理和路径加载逻辑
* 新增getcatalog API以支持获取数据集目录
* 修改相关API调用,确保baseUrl的正确传递和使用
* 优化路径返回值为中文“根目录”

* feat: 更新数据集API调用逻辑

* 将getFeishuAndYuqueDatasetFileList替换为getProApiDatasetFileListRequest,统一API调用方式
* 更新相关文件以确保新API的正确使用
* 优化代码结构,提高可读性和维护性

* 清理代码:移除ApiDatasetForm、BaseUrlSelector和相关API中的调试日志

* 删除不必要的console.log语句,提升代码整洁性
* 确保API逻辑的清晰性,避免冗余输出

* 更新数据集相关类型和API逻辑

- 在apiDataset.d.ts中添加ApiDatasetDetailResponse类型,移除不必要的字段。
- 在proApi.ts中新增DETAIL操作类型及相关参数类型。
- 修改ApiDatasetForm.tsx以支持新的API调用逻辑,统一路径获取方式。
- 更新BaseUrlSelector组件,简化目录选择逻辑。
- 优化getpath.ts和getcatalog.ts中的路径处理逻辑,确保API调用的一致性和正确性。
- 清理不必要的代码和注释,提高代码可读性。

* 清理ApiDatasetForm组件中的调试日志,移除console.log语句以提升代码整洁性和可读性。

* fix

* updata apidatasetform

* remove console

* updata

* updata

* updata editapiservermodal

* updata i18n

* add type

* update getpath

* add type

* perf: yuque dataset baseurl

* perf: remove rerank records

* fix: ts

---------

Co-authored-by: dreamer6680 <1468683855@qq.com>
2025-05-05 18:37:14 +08:00
Archer
864eff47c7
perf: i18n (#4740)
* feat: login limit time config

* doc

* perf: code

* i18n update

* update lock

* fix: ts

* update package
2025-05-05 16:16:59 +08:00
Archer
fdd4e9edbd
Test parse cite and add tool call parallel (#4737)
* add quote response filter (#4727)

* chatting

* add quote response filter

* add test

* remove comment

* perf: cite hidden

* perf: format llm response

* feat: comment

* update default chunk size

* update default chunk size

---------

Co-authored-by: heheer <heheer@sealos.io>
2025-04-30 17:43:50 +08:00
dependabot[bot]
683ab6c17d
chore(deps): bump transformers in /plugins/model/llm-Baichuan2 (#4733)
Bumps [transformers](https://github.com/huggingface/transformers) from 4.48.0 to 4.50.0.
- [Release notes](https://github.com/huggingface/transformers/releases)
- [Commits](https://github.com/huggingface/transformers/compare/v4.48.0...v4.50.0)

---
updated-dependencies:
- dependency-name: transformers
  dependency-version: 4.50.0
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-04-30 12:08:12 +08:00
Finley Ge
9f21add7b5
fix: app/dataset list folders' side menu permission bug (#4734) 2025-04-30 12:07:53 +08:00
Archer
4b8db293ce
perf: init token worker (#4726)
* perf: init token worker

* init worker

* preload worker

* preload worker

* remove invalid code
2025-04-29 16:00:35 +08:00
Archer
5e3ec4d6f3
Update doc (#4725)
* doc

* doc

* config
2025-04-29 13:29:50 +08:00
Archer
360a2ec392
Remove quote prompt in api request (#4724)
* chat completion add parsequote param (#4720)

* chat completion add parsequote param

* fix

* perf: quote prompt

---------

Co-authored-by: heheer <heheer@sealos.io>
2025-04-29 13:10:17 +08:00
Archer
b0297d2915
Add test (#4721)
* Add unit tests for Markdown utility functions and CodeClassNameEnum. (#4716)

Co-authored-by: gru-agent[bot] <185149714+gru-agent[bot]@users.noreply.github.com>

* Add unit tests for authChatCrud and authCollectionInChat functions in chat service. (#4718)

Co-authored-by: gru-agent[bot] <185149714+gru-agent[bot]@users.noreply.github.com>

---------

Co-authored-by: gru-agent[bot] <185149714+gru-agent[bot]@users.noreply.github.com>
2025-04-29 12:15:07 +08:00
Archer
5023da4489
Feat: Quote auth (#4715)
* fix outlink quote number auth (#4705)

* perf: quote auth

* feat: qwen3 config

---------

Co-authored-by: heheer <heheer@sealos.io>
2025-04-29 12:05:04 +08:00
Finley Ge
26e320b0df
fix: app/dataset list peredit bug (#4714) 2025-04-29 11:12:49 +08:00
Hwting
14ad6aef41
perf: Add Redis Health Check (#4707)
* Update docker.md

Fix a document error

* Update docker.md

* Update docker-compose-milvus.yml

1.Redis Health Check

* Update docker-compose-pgvector.yml

1.Redis Health Check

* Update docker-compose-zilliz.yml

1.Redis Health Check
2025-04-29 10:05:00 +08:00
Archer
d2a32c363d
perf: chat log list (#4704)
* perf: chat log list

* remove log
2025-04-28 18:19:15 +08:00
dreamer6680
e0b85ca4c2
Test (#4701)
* fix utmparams

* add utmparams

* fix

* fix

* Optimized the JsonImportModal shutdown logic to ensure that UTM parameters and workflows are removed when closing.

* remove some import
2025-04-28 17:55:43 +08:00
heheer
3a911c5130
fix chat log list api (#4700) 2025-04-28 17:51:57 +08:00
Archer
433e7ed911
Update doc (#4697)
* perf: share link tip

* udpate doc
2025-04-28 14:19:42 +08:00
1116 changed files with 17114 additions and 9127 deletions

25
.eslintignore Normal file
View File

@ -0,0 +1,25 @@
# 构建输出目录
dist/
build/
.next/
out/
# 依赖目录
node_modules/
# 缓存和生成文件
coverage/
.coverage/
.nyc_output/
*.log
# 其他不需要检查的文件
*.min.js
*.config.js
vitest.config.mts
# 特定目录
bin/
scripts/
deploy/
docSite/

17
.eslintrc.json Normal file
View File

@ -0,0 +1,17 @@
{
"root": true,
"parser": "@typescript-eslint/parser",
"plugins": ["@typescript-eslint"],
"extends": ["next/core-web-vitals"],
"rules": {
"react-hooks/rules-of-hooks": 0,
"@typescript-eslint/consistent-type-imports": [
"error",
{
"prefer": "type-imports",
"disallowTypeAnnotations": false
}
]
},
"ignorePatterns": ["node_modules/", "dist/", "build/", "coverage/"]
}

View File

@ -16,9 +16,6 @@ usageMatchRegex:
# the `{key}` will be placed by a proper keypath matching regex,
# you can ignore it and use your own matching rules as well
- "[^\\w\\d]t\\(['\"`]({key})['\"`]"
- "[^\\w\\d]commonT\\(['\"`]({key})['\"`]"
- "[^\\w\\d]fileT\\(['\"`]({key})['\"`]"
- "[^\\w\\d]workflowT\\(['\"`]({key})['\"`]"
- "[^\\w\\d]i18nT\\(['\"`]({key})['\"`]"
# A RegEx to set a custom scope range. This scope will be used as a prefix when detecting keys

View File

@ -21,7 +21,7 @@
"i18n-ally.namespace": true,
"i18n-ally.pathMatcher": "{locale}/{namespaces}.json",
"i18n-ally.extract.targetPickingStrategy": "most-similar-by-key",
"i18n-ally.translate.engines": ["google"],
"i18n-ally.translate.engines": ["deepl","google"],
"[typescript]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
},

View File

@ -120,21 +120,27 @@ services:
restart: always
command: |
redis-server --requirepass mypassword --loglevel warning --maxclients 10000 --appendonly yes --save 60 10 --maxmemory 4gb --maxmemory-policy noeviction
healthcheck:
test: ['CMD', 'redis-cli', '-a', 'mypassword', 'ping']
interval: 10s
timeout: 3s
retries: 3
start_period: 30s
volumes:
- ./redis/data:/data
# fastgpt
sandbox:
container_name: sandbox
image: ghcr.io/labring/fastgpt-sandbox:v4.9.7 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.7 # 阿里云
image: ghcr.io/labring/fastgpt-sandbox:v4.9.10-fix2 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.10-fix2 # 阿里云
networks:
- fastgpt
restart: always
fastgpt-mcp-server:
container_name: fastgpt-mcp-server
image: ghcr.io/labring/fastgpt-mcp_server:v4.9.7 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-mcp_server:v4.9.7 # 阿里云
image: ghcr.io/labring/fastgpt-mcp_server:v4.9.10-fix2 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-mcp_server:v4.9.10-fix2 # 阿里云
ports:
- 3005:3000
networks:
@ -144,8 +150,8 @@ services:
- FASTGPT_ENDPOINT=http://fastgpt:3000
fastgpt:
container_name: fastgpt
image: ghcr.io/labring/fastgpt:v4.9.7 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.7 # 阿里云
image: ghcr.io/labring/fastgpt:v4.9.10-fix2 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.10-fix2 # 阿里云
ports:
- 3000:3000
networks:

View File

@ -99,19 +99,25 @@ services:
redis-server --requirepass mypassword --loglevel warning --maxclients 10000 --appendonly yes --save 60 10 --maxmemory 4gb --maxmemory-policy noeviction
volumes:
- ./redis/data:/data
healthcheck:
test: ['CMD', 'redis-cli', '-a', 'mypassword', 'ping']
interval: 10s
timeout: 3s
retries: 3
start_period: 30s
# fastgpt
sandbox:
container_name: sandbox
image: ghcr.io/labring/fastgpt-sandbox:v4.9.7 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.7 # 阿里云
image: ghcr.io/labring/fastgpt-sandbox:v4.9.10-fix2 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.10-fix2 # 阿里云
networks:
- fastgpt
restart: always
fastgpt-mcp-server:
container_name: fastgpt-mcp-server
image: ghcr.io/labring/fastgpt-mcp_server:v4.9.7 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-mcp_server:v4.9.7 # 阿里云
image: ghcr.io/labring/fastgpt-mcp_server:v4.9.10-fix2 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-mcp_server:v4.9.10-fix2 # 阿里云
ports:
- 3005:3000
networks:
@ -121,8 +127,8 @@ services:
- FASTGPT_ENDPOINT=http://fastgpt:3000
fastgpt:
container_name: fastgpt
image: ghcr.io/labring/fastgpt:v4.9.7 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.7 # 阿里云
image: ghcr.io/labring/fastgpt:v4.9.10-fix2 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.10-fix2 # 阿里云
ports:
- 3000:3000
networks:

View File

@ -22,6 +22,11 @@ services:
- POSTGRES_DB=postgres
volumes:
- ./pg/data:/var/lib/postgresql/data
healthcheck:
test: ['CMD', 'pg_isready', '-U', 'postgres', '-d', 'postgres']
interval: 5s
timeout: 5s
retries: 10
mongo:
image: mongo:5.0.18 # dockerhub
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/mongo:5.0.18 # 阿里云
@ -79,21 +84,27 @@ services:
restart: always
command: |
redis-server --requirepass mypassword --loglevel warning --maxclients 10000 --appendonly yes --save 60 10 --maxmemory 4gb --maxmemory-policy noeviction
healthcheck:
test: ['CMD', 'redis-cli', '-a', 'mypassword', 'ping']
interval: 10s
timeout: 3s
retries: 3
start_period: 30s
volumes:
- ./redis/data:/data
# fastgpt
sandbox:
container_name: sandbox
image: ghcr.io/labring/fastgpt-sandbox:v4.9.7 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.7 # 阿里云
image: ghcr.io/labring/fastgpt-sandbox:v4.9.10-fix2 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.10-fix2 # 阿里云
networks:
- fastgpt
restart: always
fastgpt-mcp-server:
container_name: fastgpt-mcp-server
image: ghcr.io/labring/fastgpt-mcp_server:v4.9.7 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-mcp_server:v4.9.7 # 阿里云
image: ghcr.io/labring/fastgpt-mcp_server:v4.9.10-fix2 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-mcp_server:v4.9.10-fix2 # 阿里云
ports:
- 3005:3000
networks:
@ -103,8 +114,8 @@ services:
- FASTGPT_ENDPOINT=http://fastgpt:3000
fastgpt:
container_name: fastgpt
image: ghcr.io/labring/fastgpt:v4.9.7 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.7 # 阿里云
image: ghcr.io/labring/fastgpt:v4.9.10-fix2 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.10-fix2 # 阿里云
ports:
- 3000:3000
networks:

View File

@ -61,20 +61,26 @@ services:
restart: always
command: |
redis-server --requirepass mypassword --loglevel warning --maxclients 10000 --appendonly yes --save 60 10 --maxmemory 4gb --maxmemory-policy noeviction
healthcheck:
test: ['CMD', 'redis-cli', '-a', 'mypassword', 'ping']
interval: 10s
timeout: 3s
retries: 3
start_period: 30s
volumes:
- ./redis/data:/data
sandbox:
container_name: sandbox
image: ghcr.io/labring/fastgpt-sandbox:v4.9.7 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.7 # 阿里云
image: ghcr.io/labring/fastgpt-sandbox:v4.9.10-fix2 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.10-fix2 # 阿里云
networks:
- fastgpt
restart: always
fastgpt-mcp-server:
container_name: fastgpt-mcp-server
image: ghcr.io/labring/fastgpt-mcp_server:v4.9.7 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-mcp_server:v4.9.7 # 阿里云
image: ghcr.io/labring/fastgpt-mcp_server:v4.9.10-fix2 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-mcp_server:v4.9.10-fix2 # 阿里云
ports:
- 3005:3000
networks:
@ -84,8 +90,8 @@ services:
- FASTGPT_ENDPOINT=http://fastgpt:3000
fastgpt:
container_name: fastgpt
image: ghcr.io/labring/fastgpt:v4.9.7 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.7 # 阿里云
image: ghcr.io/labring/fastgpt:v4.9.10-fix2 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.10-fix2 # 阿里云
ports:
- 3000:3000
networks:

232
dev.md
View File

@ -1,114 +1,118 @@
## Premise
Since FastGPT is managed in the same way as monorepo, it is recommended to install make first during development.
monorepo Project Name:
- app: main project
-......
## Dev
```sh
# Give automatic script code execution permission (on non-Linux systems, you can manually execute the postinstall.sh file content)
chmod -R +x ./scripts/
# Executing under the code root directory installs all dependencies within the root package, projects, and packages
pnpm i
# Not make cmd
cd projects/app
pnpm dev
# Make cmd
make dev name=app
```
Note: If the Node version is >= 20, you need to pass the `--no-node-snapshot` parameter to Node when running `pnpm i`
```sh
NODE_OPTIONS=--no-node-snapshot pnpm i
```
### Jest
https://fael3z0zfze.feishu.cn/docx/ZOI1dABpxoGhS7xzhkXcKPxZnDL
## I18N
### Install i18n-ally Plugin
1. Open the Extensions Marketplace in VSCode, search for and install the `i18n Ally` plugin.
### Code Optimization Examples
#### Fetch Specific Namespace Translations in `getServerSideProps`
```typescript
// pages/yourPage.tsx
export async function getServerSideProps(context: any) {
return {
props: {
currentTab: context?.query?.currentTab || TabEnum.info,
...(await serverSideTranslations(context.locale, ['publish', 'user']))
}
};
}
```
#### Use useTranslation Hook in Page
```typescript
// pages/yourPage.tsx
import { useTranslation } from 'next-i18next';
const YourComponent = () => {
const { t } = useTranslation();
return (
<Button
variant="outline"
size="sm"
mr={2}
onClick={() => setShowSelected(false)}
>
{t('common:close')}
</Button>
);
};
export default YourComponent;
```
#### Handle Static File Translations
```typescript
// utils/i18n.ts
import { i18nT } from '@fastgpt/web/i18n/utils';
const staticContent = {
id: 'simpleChat',
avatar: 'core/workflow/template/aiChat',
name: i18nT('app:template.simple_robot'),
};
export default staticContent;
```
### Standardize Translation Format
- Use the t(namespace:key) format to ensure consistent naming.
- Translation keys should use lowercase letters and underscores, e.g., common.close.
## Build
```sh
# Docker cmd: Build image, not proxy
docker build -f ./projects/app/Dockerfile -t registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.1 . --build-arg name=app
# Make cmd: Build image, not proxy
make build name=app image=registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.1
# Docker cmd: Build image with proxy
docker build -f ./projects/app/Dockerfile -t registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.1 . --build-arg name=app --build-arg proxy=taobao
# Make cmd: Build image with proxy
make build name=app image=registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.1 proxy=taobao
```
## Premise
Since FastGPT is managed in the same way as monorepo, it is recommended to install make first during development.
monorepo Project Name:
- app: main project
-......
## Dev
```sh
# Give automatic script code execution permission (on non-Linux systems, you can manually execute the postinstall.sh file content)
chmod -R +x ./scripts/
# Executing under the code root directory installs all dependencies within the root package, projects, and packages
pnpm i
# Not make cmd
cd projects/app
pnpm dev
# Make cmd
make dev name=app
```
Note: If the Node version is >= 20, you need to pass the `--no-node-snapshot` parameter to Node when running `pnpm i`
```sh
NODE_OPTIONS=--no-node-snapshot pnpm i
```
### Jest
https://fael3z0zfze.feishu.cn/docx/ZOI1dABpxoGhS7xzhkXcKPxZnDL
## I18N
### Install i18n-ally Plugin
1. Open the Extensions Marketplace in VSCode, search for and install the `i18n Ally` plugin.
### Code Optimization Examples
#### Fetch Specific Namespace Translations in `getServerSideProps`
```typescript
// pages/yourPage.tsx
export async function getServerSideProps(context: any) {
return {
props: {
currentTab: context?.query?.currentTab || TabEnum.info,
...(await serverSideTranslations(context.locale, ['publish', 'user']))
}
};
}
```
#### Use useTranslation Hook in Page
```typescript
// pages/yourPage.tsx
import { useTranslation } from 'next-i18next';
const YourComponent = () => {
const { t } = useTranslation();
return (
<Button
variant="outline"
size="sm"
mr={2}
onClick={() => setShowSelected(false)}
>
{t('common:close')}
</Button>
);
};
export default YourComponent;
```
#### Handle Static File Translations
```typescript
// utils/i18n.ts
import { i18nT } from '@fastgpt/web/i18n/utils';
const staticContent = {
id: 'simpleChat',
avatar: 'core/workflow/template/aiChat',
name: i18nT('app:template.simple_robot'),
};
export default staticContent;
```
### Standardize Translation Format
- Use the t(namespace:key) format to ensure consistent naming.
- Translation keys should use lowercase letters and underscores, e.g., common.close.
## audit
Please fill the OperationLogEventEnum and operationLog/audit function is added to the ts, and on the corresponding position to fill i18n, at the same time to add the location of the log using addOpearationLog function add function
## Build
```sh
# Docker cmd: Build image, not proxy
docker build -f ./projects/app/Dockerfile -t registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.1 . --build-arg name=app
# Make cmd: Build image, not proxy
make build name=app image=registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.1
# Docker cmd: Build image with proxy
docker build -f ./projects/app/Dockerfile -t registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.1 . --build-arg name=app --build-arg proxy=taobao
# Make cmd: Build image with proxy
make build name=app image=registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.1 proxy=taobao
```

Binary file not shown.

After

Width:  |  Height:  |  Size: 386 KiB

View File

@ -959,10 +959,16 @@ curl --location --request POST 'http://localhost:3000/api/core/chat/getHistories
{{< markdownify >}}
{{% alert icon=" " context="success" %}}
目前仅能获取到当前 API key 的创建者的对话。
- appId - 应用 Id
- offset - 偏移量,即从第几条数据开始取
- pageSize - 记录数量
- source - 对话源。source=api表示获取通过 API 创建的对话(不会获取到页面上的对话记录)
- startCreateTime - 开始创建时间(可选)
- endCreateTime - 结束创建时间(可选)
- startUpdateTime - 开始更新时间(可选)
- endUpdateTime - 结束更新时间(可选)
{{% /alert %}}
{{< /markdownify >}}

View File

@ -296,6 +296,7 @@ curl --location --request DELETE 'http://localhost:3000/api/core/dataset/delete?
| datasetId | 知识库ID | ✅ |
| parentId | 父级ID不填则默认为根目录 | |
| trainingType | 数据处理方式。chunk: 按文本长度进行分割;qa: 问答对提取 | ✅ |
| customPdfParse | PDF增强解析。true: 开启PDF增强解析;不填则默认为false | |
| autoIndexes | 是否自动生成索引(仅商业版支持) | |
| imageIndex | 是否自动生成图片索引(仅商业版支持) | |
| chunkSettingMode | 分块参数模式。auto: 系统默认参数; custom: 手动指定参数 | |
@ -644,7 +645,7 @@ data 为集合的 ID。
{{< /tab >}}
{{< /tabs >}}
### 创建一个外部文件库集合(商业版
### 创建一个外部文件库集合(弃用
{{< tabs tabTotal="3" >}}
{{< tab tabName="请求示例" >}}

View File

@ -0,0 +1,50 @@
---
title: 'V4.9.10'
description: 'FastGPT V4.9.10 更新说明'
icon: 'upgrade'
draft: false
toc: true
weight: 790
---
## 升级指南
重要提示本次更新会重新构建全文索引构建期间全文检索结果会为空4c16g 700 万组全文索引大致消耗 25 分钟。如需无缝升级,需自行做表同步工程。
### 1. 做好数据备份
### 2. 更新镜像 tag
- 更新 FastGPT 镜像 tag: v4.9.10-fix2
- 更新 FastGPT 商业版镜像 tag: v4.9.10-fix2
- mcp_server 无需更新
- Sandbox 无需更新
- AIProxy 无需更新
## 🚀 新增内容
1. 支持 PG 设置`systemEnv.hnswMaxScanTuples`参数,提高迭代搜索的数据总量。
2. 知识库预处理参数增加 “分块条件”,可控制某些情况下不进行分块处理。
3. 知识库预处理参数增加 “段落优先” 模式,可控制最大段落深度。原“长度优先”模式,不再内嵌段落优先逻辑。
4. 工作流调整为单向接入和接出,支持快速的添加下一步节点。
5. 开放飞书和语雀知识库到开源版。
6. gemini 和 claude 最新模型预设。
## ⚙️ 优化
1. LLM stream调用默认超时调大。
2. 部分确认交互优化。
3. 纠正原先知识库的“表格数据集”名称,改成“备份导入”。同时支持知识库索引的导出和导入。
4. 工作流知识库引用上限,如果工作流中没有相关 AI 节点,则交互模式改成纯手动输入,并且上限为 1000万。
5. 语音输入,移动端判断逻辑,准确判断是否为手机,而不是小屏。
6. 优化上下文截取算法,至少保证留下一组 Human 信息。
## 🐛 修复
1. 全文检索多知识库时排序得分排序不正确。
2. 流响应捕获 finish_reason 可能不正确。
3. 工具调用模式,未保存思考输出。
4. 知识库 indexSize 参数未生效。
5. 工作流嵌套 2 层后,获取预览引用、上下文不正确。
6. xlsx 转成 Markdown 时候,前面会多出一个空格。
7. 读取 Markdown 文件时Base64 图片未进行额外抓换保存。

View File

@ -0,0 +1,25 @@
---
title: 'V4.9.11(进行中)'
description: 'FastGPT V4.9.11 更新说明'
icon: 'upgrade'
draft: false
toc: true
weight: 789
---
## 🚀 新增内容
1. 工作流中增加节点搜索功能。
2. 工作流中,子流程版本控制,可选择“保持最新版本”,无需手动更新。
## ⚙️ 优化
1. 原文缓存改用 gridfs 存储,提高上限。
## 🐛 修复
1. 工作流中,管理员声明的全局系统工具,无法进行版本管理。
2. 工具调用节点前,有交互节点时,上下文异常。
3. 修复备份导入,小于 1000 字时,无法分块问题。
4. 自定义 PDF 解析,无法保存 base64 图片。

View File

@ -13,7 +13,7 @@ weight: 793
### 2. 更新镜像 tag
- 更新 FastGPT 镜像 tag: v4.9.7
- 更新 FastGPT 镜像 tag: v4.9.7-fix2
- 更新 FastGPT 商业版镜像 tag: v4.9.7
- mcp_server 无需更新
- Sandbox 无需更新
@ -40,6 +40,7 @@ weight: 793
3. 对话时间统计,准确返回工作流整体运行时间。
4. 从 ai_proxy 获取音频解析时长。
5. AI 模型 Token 值均优先采用 API usage确保 tokens 值准确,若为空,则再采用 GPT3.5 的估算方式。
6. 优化对话日志 list 接口,适配单个对话框,大量对话的场景。
## 🐛 修复
@ -54,4 +55,5 @@ weight: 793
9. 文本内容提取节点,默认值赋值逻辑。
10. 分享链接中,会强制返回嵌套应用中的引用内容。
11. 知识库集合元数据过滤时,不同知识库的同名标签使用 $and 筛选无法获取结果。
12. 修复应用列表,权限配置可能出现 index 刷新问题。

View File

@ -1,5 +1,5 @@
---
title: 'V4.9.8(进行中)'
title: 'V4.9.8'
description: 'FastGPT V4.9.8 更新说明'
icon: 'upgrade'
draft: false
@ -7,14 +7,43 @@ toc: true
weight: 792
---
## 升级指南
### 1. 做好数据备份
### 2. 更新镜像 tag
- 更新 FastGPT 镜像 tag: v4.9.8
- 更新 FastGPT 商业版镜像 tag: v4.9.8
- mcp_server 无需更新
- Sandbox 无需更新
- AIProxy 无需更新
## 🚀 新增内容
1. 支持 Streamable http mcp 模式,无需额外安装 mcp_server 即可以 MCP 方式对外提供 fastgpt 调用。
1. 支持 Toolcalls 并行执行。
2. 将所有内置任务,从非 stream 模式调整成 stream 模式,避免部分模型不支持非 stream 模式。如需覆盖,则可以在模型`额外 Body`参数中,强制指定`stream=false`
3. qwen3 模型预设
4. 语雀知识库支持设置根目录。
5. 可配置密码过期时间,过期后下次登录会强制要求修改密码。
6. 密码登录增加 preLogin 临时密钥校验。
7. 支持 Admin 后台配置发布渠道和第三方知识库的显示隐藏。
## ⚙️ 优化
1. Chat log list 优化,避免大数据时超出内存限制。
2. 预加载 token 计算 worker避免主任务中并发创建导致线程阻塞。
3. 工作流节点版本控制交互优化。
4. 网络获取以及 html2md 优化,支持视频和音频标签的转换。
## 🐛 修复
1. 应用列表/知识库列表,删除行权限展示问题。
2. 打开知识库搜索参数后,重排选项自动被打开。
3. LLM json_schema 模式 API 请求格式错误。
4. 重新训练时,图片过期索引未成功清除,导致图片会丢失。
5. 重新训练权限问题。
6. 文档链接地址。
7. Claude 工具调用,由于 index 为空,导致工具调用失败。
8. 嵌套工作流,工具调用下包含交互节点时,流程异常。

View File

@ -0,0 +1,43 @@
---
title: 'V4.9.9'
description: 'FastGPT V4.9.9 更新说明'
icon: 'upgrade'
draft: false
toc: true
weight: 791
---
## 升级指南
### 1. 做好数据备份
### 2. 商业版用户替换新 License
商业版用户可以联系 FastGPT 团队支持同学,获取 License 替换方案。替换后,可以直接升级系统,管理后台会提示输入新 License。
### 3. 更新镜像 tag
- 更新 FastGPT 镜像 tag: v4.9.9
- 更新 FastGPT 商业版镜像 tag: v4.9.9
- mcp_server 无需更新
- Sandbox 无需更新
- AIProxy 无需更新
## 🚀 新增内容
1. 切换 SessionId 来替代 JWT 实现登录鉴权,可控制最大登录客户端数量。
2. 新的商业版 License 管理模式。
3. 公众号调用,显示记录 chat 对话错误,方便排查。
4. API 知识库支持 BasePath 选择,需增加 API 接口,具体可见[API 知识库介绍](/docs/guide/knowledge_base/api_dataset/#4-获取文件详细信息用于获取文件信息)
## ⚙️ 优化
1. 优化工具调用,新工具的判断逻辑。
2. 调整 Cite 引用提示词。
## 🐛 修复
1. 无法正常获取应用历史保存/发布记录。
2. 成员创建 MCP 工具权限问题。
3. 来源引用展示,存在 ID 传递错误,导致提示无权操作该文件。
4. 回答标注前端数据报错。

View File

@ -43,7 +43,7 @@ type ResponseType = {
// 文件列表中,单项的文件类型
type FileListItem = {
id: string;
parentId: string | null;
parentId: string //也可能为 null 或者 undefined 类型;
name: string;
type: 'file' | 'folder';
updateTime: Date;
@ -59,7 +59,7 @@ type FileListItem = {
{{< markdownify >}}
{{% alert icon=" " context="success" %}}
- parentId - 父级 id可选或者 null。
- parentId - 父级 id可选或者 null | undefined
- searchKey - 检索词,可选
{{% /alert %}}
@ -68,7 +68,7 @@ curl --location --request POST '{{baseURL}}/v1/file/list' \
--header 'Authorization: Bearer {{authorization}}' \
--header 'Content-Type: application/json' \
--data-raw '{
"parentId": null,
"parentId": "",
"searchKey": ""
}'
```
@ -185,3 +185,40 @@ curl --location --request GET '{{baseURL}}/v1/file/read?id=xx' \
{{< /tabs >}}
### 4. 获取文件详细信息(用于获取文件信息)
{{< tabs tabTotal="2" >}}
{{< tab tabName="请求示例" >}}
{{< markdownify >}}
id 为文件的 id。
```bash
curl --location --request GET '{{baseURL}}/v1/file/detail?id=xx' \
--header 'Authorization: Bearer {{authorization}}'
```
{{< /markdownify >}}
{{< /tab >}}
{{< tab tabName="响应示例" >}}
{{< markdownify >}}
```json
{
"code": 200,
"success": true,
"message": "",
"data": {
"id": "docs",
"parentId": "",
"name": "docs"
}
}
```
{{< /markdownify >}}
{{< /tab >}}
{{< /tabs >}}

View File

@ -28,7 +28,6 @@ FastGPT 商业版是基于 FastGPT 开源版的增强版本,增加了一些独
| 应用发布安全配置 | ❌ | ✅ | ✅ |
| 内容审核 | ❌ | ✅ | ✅ |
| web站点同步 | ❌ | ✅ | ✅ |
| 主流文档库接入(目前支持:语雀、飞书) | ❌ | ✅ | ✅ |
| 增强训练模式 | ❌ | ✅ | ✅ |
| 第三方应用快速接入(飞书、公众号) | ❌ | ✅ | ✅ |
| 管理后台 | ❌ | ✅ | 不需要 |

View File

@ -563,7 +563,7 @@ HTTP模块中需要设置 3 个工具参数:
"hidden"
],
"label": "",
"value": 1500,
"value": 5000,
"valueType": "number"
},
{

View File

@ -95,6 +95,10 @@ weight: 506
121.196.228.45
121.43.126.202
120.26.144.37
47.98.196.113
47.97.204.90
118.31.41.236
118.178.185.61
```
## 4. 获取AES Key选择加密方式
@ -125,6 +129,13 @@ weight: 506
## FAQ
### 公众号没响应
检查应用对话日志,如果有对话日志,但是微信公众号无响应,则是白名单 IP未成功。
添加白名单IP 后,通常需要等待几分钟微信更新。可以在对话日志中,找点错误日志。
![](/imgs/official_account_faq.png)
### 如何新开一个聊天记录
如果你想重置你的聊天记录,可以给机器人发送 `Reset` 消息(注意大小写),机器人会新开一个聊天记录。
如果你想重置你的聊天记录,可以给机器人发送 `Reset` 消息(注意大小写),机器人会新开一个聊天记录。

4
env.d.ts vendored
View File

@ -4,7 +4,6 @@ declare global {
LOG_DEPTH: string;
DEFAULT_ROOT_PSW: string;
DB_MAX_LINK: string;
TOKEN_KEY: string;
FILE_TOKEN_KEY: string;
ROOT_KEY: string;
OPENAI_BASE_URL: string;
@ -35,6 +34,9 @@ declare global {
ALLOWED_ORIGINS?: string;
SHOW_COUPON?: string;
CONFIG_JSON_PATH?: string;
PASSWORD_LOGIN_LOCK_SECONDS?: string;
PASSWORD_EXPIRED_MONTH?: string;
MAX_LOGIN_SESSION?: string;
}
}
}

View File

@ -13,12 +13,18 @@
"previewIcon": "node ./scripts/icon/index.js",
"api:gen": "tsc ./scripts/openapi/index.ts && node ./scripts/openapi/index.js && npx @redocly/cli build-docs ./scripts/openapi/openapi.json -o ./projects/app/public/openapi/index.html",
"create:i18n": "node ./scripts/i18n/index.js",
"lint": "eslint \"**/*.{ts,tsx}\" --ignore-path .eslintignore",
"lint:fix": "eslint \"**/*.{ts,tsx}\" --fix --ignore-path .eslintignore",
"test": "vitest run",
"test:workflow": "vitest run workflow"
},
"devDependencies": {
"@chakra-ui/cli": "^2.4.1",
"@typescript-eslint/eslint-plugin": "^6.21.0",
"@typescript-eslint/parser": "^6.21.0",
"@vitest/coverage-v8": "^3.0.9",
"eslint": "^8.57.0",
"eslint-config-next": "^14.1.0",
"husky": "^8.0.3",
"i18next": "23.16.8",
"lint-staged": "^13.3.0",
@ -31,7 +37,10 @@
"zhlint": "^0.7.4"
},
"lint-staged": {
"./**/**/*.{ts,tsx,scss}": "npm run format-code",
"./**/**/*.{ts,tsx,scss}": [
"npm run format-code",
"npm run lint:fix"
],
"./docSite/**/**/*.md": "npm run format-doc && npm run gen:llms"
},
"resolutions": {

View File

@ -1,4 +1,4 @@
import { ErrType } from '../errorCode';
import { type ErrType } from '../errorCode';
import { i18nT } from '../../../../web/i18n/utils';
/* dataset: 502000 */
export enum AppErrEnum {

View File

@ -1,4 +1,4 @@
import { ErrType } from '../errorCode';
import { type ErrType } from '../errorCode';
import { i18nT } from '../../../../web/i18n/utils';
/* dataset: 504000 */
export enum ChatErrEnum {

View File

@ -1,5 +1,5 @@
import { i18nT } from '../../../../web/i18n/utils';
import { ErrType } from '../errorCode';
import { type ErrType } from '../errorCode';
/* dataset: 507000 */
const startCode = 507000;

View File

@ -1,5 +1,5 @@
import { i18nT } from '../../../../web/i18n/utils';
import { ErrType } from '../errorCode';
import { type ErrType } from '../errorCode';
/* dataset: 501000 */
export enum DatasetErrEnum {
@ -27,7 +27,7 @@ const datasetErr = [
},
{
statusText: DatasetErrEnum.unExist,
message: 'core.dataset.error.unExistDataset'
message: i18nT('common:core.dataset.error.unExistDataset')
},
{
statusText: DatasetErrEnum.unExistCollection,

View File

@ -1,4 +1,4 @@
import { ErrType } from '../errorCode';
import { type ErrType } from '../errorCode';
import { i18nT } from '../../../../web/i18n/utils';
/* dataset: 506000 */
export enum OpenApiErrEnum {

View File

@ -1,4 +1,4 @@
import { ErrType } from '../errorCode';
import { type ErrType } from '../errorCode';
import { i18nT } from '../../../../web/i18n/utils';
/* dataset: 505000 */
export enum OutLinkErrEnum {

View File

@ -1,4 +1,4 @@
import { ErrType } from '../errorCode';
import { type ErrType } from '../errorCode';
import { i18nT } from '../../../../web/i18n/utils';
/* dataset: 508000 */
export enum PluginErrEnum {

View File

@ -1,14 +1,29 @@
import { ErrType } from '../errorCode';
import { type ErrType } from '../errorCode';
import { i18nT } from '../../../../web/i18n/utils';
/* dataset: 509000 */
export enum SystemErrEnum {
communityVersionNumLimit = 'communityVersionNumLimit'
communityVersionNumLimit = 'communityVersionNumLimit',
licenseAppAmountLimit = 'licenseAppAmountLimit',
licenseDatasetAmountLimit = 'licenseDatasetAmountLimit',
licenseUserAmountLimit = 'licenseUserAmountLimit'
}
const systemErr = [
{
statusText: SystemErrEnum.communityVersionNumLimit,
message: i18nT('common:code_error.system_error.community_version_num_limit')
},
{
statusText: SystemErrEnum.licenseAppAmountLimit,
message: i18nT('common:code_error.system_error.license_app_amount_limit')
},
{
statusText: SystemErrEnum.licenseDatasetAmountLimit,
message: i18nT('common:code_error.system_error.license_dataset_amount_limit')
},
{
statusText: SystemErrEnum.licenseUserAmountLimit,
message: i18nT('common:code_error.system_error.license_user_amount_limit')
}
];

View File

@ -1,4 +1,4 @@
import { ErrType } from '../errorCode';
import { type ErrType } from '../errorCode';
import { i18nT } from '../../../../web/i18n/utils';
/* team: 503000 */
export enum UserErrEnum {

View File

@ -1,4 +1,4 @@
import { OutLinkChatAuthProps } from '../../support/permission/chat.d';
import type { OutLinkChatAuthProps } from '../../support/permission/chat.d';
export type preUploadImgProps = OutLinkChatAuthProps & {
// expiredTime?: Date;

View File

@ -1,7 +1,7 @@
import { detect } from 'jschardet';
import { documentFileType } from './constants';
import { ChatFileTypeEnum } from '../../core/chat/constants';
import { UserChatItemValueItemType } from '../../core/chat/type';
import { type UserChatItemValueItemType } from '../../core/chat/type';
import * as fs from 'fs';
export const formatFileSize = (bytes: number): string => {

View File

@ -1,4 +1,4 @@
import { BucketNameEnum } from './constants';
import type { BucketNameEnum } from './constants';
export type FileTokenQuery = {
bucketName: `${BucketNameEnum}`;

View File

@ -1,4 +1,4 @@
import { TrackEnum } from './constants';
import type { TrackEnum } from './constants';
import { OAuthEnum } from '../../../support/user/constant';
import { AppTypeEnum } from '../../../core/app/constants';

View File

@ -1,4 +1,4 @@
import { ParentIdType } from './type';
import { type ParentIdType } from './type';
export const parseParentIdInMongo = (parentId: ParentIdType) => {
if (parentId === undefined) return {};

View File

@ -0,0 +1,18 @@
export const checkPasswordRule = (password: string) => {
const patterns = [
/\d/, // Contains digits
/[a-z]/, // Contains lowercase letters
/[A-Z]/, // Contains uppercase letters
/[!@#$%^&*()_+=-]/ // Contains special characters
];
const validChars = /^[\dA-Za-z!@#$%^&*()_+=-]{8,100}$/;
// Check length and valid characters
if (!validChars.test(password)) return false;
// Count how many patterns are satisfied
const matchCount = patterns.filter((pattern) => pattern.test(password)).length;
// Must satisfy at least 2 patterns
return matchCount >= 2;
};

View File

@ -7,6 +7,10 @@ export const CUSTOM_SPLIT_SIGN = '-----CUSTOM_SPLIT_SIGN-----';
type SplitProps = {
text: string;
chunkSize: number;
paragraphChunkDeep?: number; // Paragraph deep
paragraphChunkMinSize?: number; // Paragraph min size, if too small, it will merge
maxSize?: number;
overlapRatio?: number;
customReg?: string[];
@ -108,6 +112,8 @@ const commonSplit = (props: SplitProps): SplitResponse => {
let {
text = '',
chunkSize,
paragraphChunkDeep = 5,
paragraphChunkMinSize = 100,
maxSize = defaultMaxChunkSize,
overlapRatio = 0.15,
customReg = []
@ -123,7 +129,7 @@ const commonSplit = (props: SplitProps): SplitResponse => {
text = text.replace(/(```[\s\S]*?```|~~~[\s\S]*?~~~)/g, function (match) {
return match.replace(/\n/g, codeBlockMarker);
});
// 2. 表格处理 - 单独提取表格出来,进行表头合并
// 2. Markdown 表格处理 - 单独提取表格出来,进行表头合并
const tableReg =
/(\n\|(?:(?:[^\n|]+\|){1,})\n\|(?:[:\-\s]+\|){1,}\n(?:\|(?:[^\n|]+\|)*\n?)*)(?:\n|$)/g;
const tableDataList = text.match(tableReg);
@ -143,25 +149,40 @@ const commonSplit = (props: SplitProps): SplitResponse => {
text = text.replace(/(\r?\n|\r){3,}/g, '\n\n\n');
// The larger maxLen is, the next sentence is less likely to trigger splitting
const markdownIndex = 4;
const forbidOverlapIndex = 8;
const customRegLen = customReg.length;
const markdownIndex = paragraphChunkDeep - 1;
const forbidOverlapIndex = customRegLen + markdownIndex + 4;
const markdownHeaderRules = ((deep?: number): { reg: RegExp; maxLen: number }[] => {
if (!deep || deep === 0) return [];
const maxDeep = Math.min(deep, 8); // Maximum 8 levels
const rules: { reg: RegExp; maxLen: number }[] = [];
for (let i = 1; i <= maxDeep; i++) {
const hashSymbols = '#'.repeat(i);
rules.push({
reg: new RegExp(`^(${hashSymbols}\\s[^\\n]+\\n)`, 'gm'),
maxLen: chunkSize
});
}
return rules;
})(paragraphChunkDeep);
const stepReges: { reg: RegExp | string; maxLen: number }[] = [
...customReg.map((text) => ({
reg: text.replaceAll('\\n', '\n'),
maxLen: chunkSize
})),
{ reg: /^(#\s[^\n]+\n)/gm, maxLen: chunkSize },
{ reg: /^(##\s[^\n]+\n)/gm, maxLen: chunkSize },
{ reg: /^(###\s[^\n]+\n)/gm, maxLen: chunkSize },
{ reg: /^(####\s[^\n]+\n)/gm, maxLen: chunkSize },
{ reg: /^(#####\s[^\n]+\n)/gm, maxLen: chunkSize },
...markdownHeaderRules,
{ reg: /([\n](```[\s\S]*?```|~~~[\s\S]*?~~~))/g, maxLen: maxSize }, // code block
// HTML Table tag 尽可能保障完整
{
reg: /(\n\|(?:(?:[^\n|]+\|){1,})\n\|(?:[:\-\s]+\|){1,}\n(?:\|(?:[^\n|]+\|)*\n)*)/g,
maxLen: Math.min(chunkSize * 1.5, maxSize)
}, // Table 尽可能保证完整性
maxLen: chunkSize
}, // Markdown Table 尽可能保证完整性
{ reg: /(\n{2,})/g, maxLen: chunkSize },
{ reg: /([\n])/g, maxLen: chunkSize },
// ------ There's no overlap on the top
@ -172,12 +193,10 @@ const commonSplit = (props: SplitProps): SplitResponse => {
{ reg: /([]|,\s)/g, maxLen: chunkSize }
];
const customRegLen = customReg.length;
const checkIsCustomStep = (step: number) => step < customRegLen;
const checkIsMarkdownSplit = (step: number) =>
step >= customRegLen && step <= markdownIndex + customRegLen;
const checkForbidOverlap = (step: number) => step <= forbidOverlapIndex + customRegLen;
const checkForbidOverlap = (step: number) => step <= forbidOverlapIndex;
// if use markdown title split, Separate record title
const getSplitTexts = ({ text, step }: { text: string; step: number }) => {
@ -301,6 +320,7 @@ const commonSplit = (props: SplitProps): SplitResponse => {
const splitTexts = getSplitTexts({ text, step });
const chunks: string[] = [];
for (let i = 0; i < splitTexts.length; i++) {
const item = splitTexts[i];
@ -443,7 +463,6 @@ const commonSplit = (props: SplitProps): SplitResponse => {
*/
export const splitText2Chunks = (props: SplitProps): SplitResponse => {
let { text = '' } = props;
const start = Date.now();
const splitWithCustomSign = text.split(CUSTOM_SPLIT_SIGN);
const splitResult = splitWithCustomSign.map((item) => {

View File

@ -1,7 +1,8 @@
export enum SystemConfigsTypeEnum {
fastgpt = 'fastgpt',
fastgptPro = 'fastgptPro',
systemMsgModal = 'systemMsgModal'
systemMsgModal = 'systemMsgModal',
license = 'license'
}
export const SystemConfigsTypeMap = {
@ -13,5 +14,8 @@ export const SystemConfigsTypeMap = {
},
[SystemConfigsTypeEnum.systemMsgModal]: {
label: 'systemMsgModal'
},
[SystemConfigsTypeEnum.license]: {
label: 'license'
}
};

View File

@ -1,8 +1,8 @@
import { SystemConfigsTypeEnum } from "./constants";
import type { SystemConfigsTypeEnum } from './constants';
export type SystemConfigsType = {
_id: string;
type: `${SystemConfigsTypeEnum}`;
value: Record<string, any>;
createTime: Date;
};
};

View File

@ -1,4 +1,5 @@
import { StandSubPlanLevelMapType, SubPlanType } from '../../../support/wallet/sub/type';
import type { SubPlanType } from '../../../support/wallet/sub/type';
import { StandSubPlanLevelMapType } from '../../../support/wallet/sub/type';
import type {
ChatModelItemType,
FunctionModelItemType,
@ -63,6 +64,15 @@ export type FastGPTFeConfigsType = {
show_coupon?: boolean;
concatMd?: string;
show_dataset_feishu?: boolean;
show_dataset_yuque?: boolean;
show_publish_feishu?: boolean;
show_publish_dingtalk?: boolean;
show_publish_offiaccount?: boolean;
show_dataset_enhance?: boolean;
show_batch_eval?: boolean;
concatMd?: string;
docUrl?: string;
openAPIDocUrl?: string;
@ -120,9 +130,11 @@ export type SystemEnvType = {
vectorMaxProcess: number;
qaMaxProcess: number;
vlmMaxProcess: number;
hnswEfSearch: number;
tokenWorkers: number; // token count max worker
hnswEfSearch: number;
hnswMaxScanTuples: number;
oneapiUrl?: string;
chatApiKey?: string;
@ -135,3 +147,21 @@ export type customPdfParseType = {
doc2xKey?: string;
price?: number;
};
export type LicenseDataType = {
startTime: string;
expiredTime: string;
company: string;
description?: string; // 描述
hosts?: string[]; // 管理端有效域名
maxUsers?: number; // 最大用户数,不填默认不上限
maxApps?: number; // 最大应用数,不填默认不上限
maxDatasets?: number; // 最大数据集数,不填默认不上限
functions: {
sso: boolean;
pay: boolean;
customTemplates: boolean;
datasetEnhance: boolean;
batchEval: boolean;
};
};

View File

@ -1,4 +1,4 @@
import { ModelTypeEnum } from './model';
import type { ModelTypeEnum } from './model';
import type { ModelProviderIdType } from './provider';
type PriceType = {

View File

@ -1,6 +1,6 @@
import { i18nT } from '../../../web/i18n/utils';
import type { LLMModelItemType, STTModelType, EmbeddingModelItemType } from './model.d';
import { getModelProvider, ModelProviderIdType } from './provider';
import { getModelProvider, type ModelProviderIdType } from './provider';
export enum ModelTypeEnum {
llm = 'llm',

View File

@ -1,7 +1,249 @@
import { PromptTemplateItem } from '../type.d';
import { type PromptTemplateItem } from '../type.d';
import { i18nT } from '../../../../web/i18n/utils';
import { getPromptByVersion } from './utils';
export const Prompt_userQuotePromptList: PromptTemplateItem[] = [
{
title: i18nT('app:template.standard_template'),
desc: '',
value: {
['4.9.7']: `## 任务描述
使 <Cites></Cites>
使
##
- 使 [id](CITE) <Cites></Cites> CITE , id id
- **** : "Nginx是一款轻量级的Web服务器、反向代理服务器[67e517e74767063e882d6861](CITE)。"
- ****"Nginx是一款轻量级的Web服务器、反向代理服务器[67e517e74767063e882d6861](CITE)[67e517e74767063e882d6862](CITE)。\n 它的特点是非常轻量[67e517e74767063e882d6863](CITE)。"
-
- id id <Cites></Cites>
##
-
- <Cites></Cites>
- <Cites></Cites>
- 使 Markdown
- 使
<Cites>
{{quote}}
</Cites>
##
{{question}}
##
`
}
},
{
title: i18nT('app:template.qa_template'),
desc: '',
value: {
['4.9.7']: `## 任务描述
使 <QA></QA>
##
-
- <Answer></Answer>
-
- <QA></QA>
- 使
<QA>
{{quote}}
</QA>
##
{{question}}
##
`
}
},
{
title: i18nT('app:template.standard_strict'),
desc: '',
value: {
['4.9.7']: `## 任务描述
使 <Cites></Cites>
使
##
- 使 [id](CITE) <Cites></Cites> CITE , id id
- **** : "Nginx是一款轻量级的Web服务器、反向代理服务器[67e517e74767063e882d6861](CITE)。"
- ****"Nginx是一款轻量级的Web服务器、反向代理服务器[67e517e74767063e882d6861](CITE)[67e517e74767063e882d6862](CITE)。\n 它的特点是非常轻量[67e517e74767063e882d6863](CITE)。"
-
- id id <Cites></Cites>
##
-
- <Cites></Cites>
- <Cites></Cites>
- 使 Markdown
- 使
##
使 <Cites></Cites> 使 <Cites></Cites>
<Cites>
{{quote}}
</Cites>
##
{{question}}
##
`
}
},
{
title: i18nT('app:template.hard_strict'),
desc: '',
value: {
['4.9.7']: `## 任务描述
使 <QA></QA>
##
-
- <Answer></Answer>
-
- <QA></QA>
- 使
##
使 <QA></QA> 使 <QA></QA>
<QA>
{{quote}}
</QA>
##
{{question}}
##
`
}
}
];
export const Prompt_systemQuotePromptList: PromptTemplateItem[] = [
{
title: i18nT('app:template.standard_template'),
desc: '',
value: {
['4.9.7']: `## 任务描述
使 <Cites></Cites>
使
##
- 使 [id](CITE) <Cites></Cites> CITE , id id
- **** : "Nginx是一款轻量级的Web服务器、反向代理服务器[67e517e74767063e882d6861](CITE)。"
- ****"Nginx是一款轻量级的Web服务器、反向代理服务器[67e517e74767063e882d6861](CITE)[67e517e74767063e882d6862](CITE)。\n 它的特点是非常轻量[67e517e74767063e882d6863](CITE)。"
-
- id id <Cites></Cites>
##
-
- <Cites></Cites>
- <Cites></Cites>
- 使 Markdown
- 使
<Cites>
{{quote}}
</Cites>`
}
},
{
title: i18nT('app:template.qa_template'),
desc: '',
value: {
['4.9.8']: `## 任务描述
使 <QA></QA>
##
-
- <Answer></Answer>
-
- <QA></QA>
- 使
<QA>
{{quote}}
</QA>`
}
},
{
title: i18nT('app:template.standard_strict'),
desc: '',
value: {
['4.9.7']: `## 任务描述
使 <Cites></Cites>
使
##
- 使 [id](CITE) <Cites></Cites> CITE , id id
- **** : "Nginx是一款轻量级的Web服务器、反向代理服务器[67e517e74767063e882d6861](CITE)。"
- ****"Nginx是一款轻量级的Web服务器、反向代理服务器[67e517e74767063e882d6861](CITE)[67e517e74767063e882d6862](CITE)。\n 它的特点是非常轻量[67e517e74767063e882d6863](CITE)。"
-
- id id <Cites></Cites>
##
-
- <Cites></Cites>
- <Cites></Cites>
- 使 Markdown
- 使
##
使 <Cites></Cites> 使 <Cites></Cites>
<Cites>
{{quote}}
</Cites>`
}
},
{
title: i18nT('app:template.hard_strict'),
desc: '',
value: {
['4.9.7']: `## 任务描述
使 <QA></QA>
##
-
- <Answer></Answer>
-
- <QA></QA>
- 使
##
使 <QA></QA> 使 <QA></QA>
<QA>
{{quote}}
</QA>`
}
}
];
export const Prompt_QuoteTemplateList: PromptTemplateItem[] = [
{
title: i18nT('app:template.standard_template'),
@ -10,11 +252,6 @@ export const Prompt_QuoteTemplateList: PromptTemplateItem[] = [
['4.9.7']: `{
"id": "{{id}}",
"sourceName": "{{source}}",
"content": "{{q}}\n{{a}}"
}
`,
['4.9.2']: `{
"sourceName": "{{source}}",
"updateTime": "{{updateTime}}",
"content": "{{q}}\n{{a}}"
}
@ -25,7 +262,7 @@ export const Prompt_QuoteTemplateList: PromptTemplateItem[] = [
title: i18nT('app:template.qa_template'),
desc: i18nT('app:template.qa_template_des'),
value: {
['4.9.2']: `<Question>
['4.9.7']: `<Question>
{{q}}
</Question>
<Answer>
@ -40,11 +277,6 @@ export const Prompt_QuoteTemplateList: PromptTemplateItem[] = [
['4.9.7']: `{
"id": "{{id}}",
"sourceName": "{{source}}",
"content": "{{q}}\n{{a}}"
}
`,
['4.9.2']: `{
"sourceName": "{{source}}",
"updateTime": "{{updateTime}}",
"content": "{{q}}\n{{a}}"
}
@ -55,7 +287,7 @@ export const Prompt_QuoteTemplateList: PromptTemplateItem[] = [
title: i18nT('app:template.hard_strict'),
desc: i18nT('app:template.hard_strict_des'),
value: {
['4.9.2']: `<Question>
['4.9.7']: `<Question>
{{q}}
</Question>
<Answer>
@ -64,263 +296,12 @@ export const Prompt_QuoteTemplateList: PromptTemplateItem[] = [
}
}
];
export const getQuoteTemplate = (version?: string) => {
const defaultTemplate = Prompt_QuoteTemplateList[0].value;
return getPromptByVersion(version, defaultTemplate);
};
export const Prompt_userQuotePromptList: PromptTemplateItem[] = [
{
title: i18nT('app:template.standard_template'),
desc: '',
value: {
['4.9.7']: `使用 <Reference></Reference> 标记中的内容作为本次对话的参考:
<Reference>
{{quote}}
</Reference>
-
- <Reference></Reference>
- <Reference></Reference>
- 使 Markdown
- 使
- 使 [id](QUOTE) <Reference></Reference> QUOTE , id id
- : "FastGPT 是一个基于大语言模型(LLM)的知识库问答系统[67e517e74767063e882d6861](QUOTE)。"
- `,
['4.9.2']: `使用 <Reference></Reference> 标记中的内容作为本次对话的参考:
<Reference>
{{quote}}
</Reference>
-
- <Reference></Reference>
- <Reference></Reference>
- 使 Markdown
- 使
:"""{{question}}"""`
}
},
{
title: i18nT('app:template.qa_template'),
desc: '',
value: {
['4.9.2']: `使用 <QA></QA> 标记中的问答对进行回答。
<QA>
{{quote}}
</QA>
-
- <答案></答案>
-
- QA
:"""{{question}}"""`
}
},
{
title: i18nT('app:template.standard_strict'),
desc: '',
value: {
['4.9.7']: `忘记你已有的知识,仅使用 <Reference></Reference> 标记中的内容作为本次对话的参考:
<Reference>
{{quote}}
</Reference>
1. <Reference></Reference>
2.
3.
- <Reference></Reference>
- <Reference></Reference>
- 使 Markdown
- 使
- 使 [id](QUOTE) <Reference></Reference> QUOTE , id id
- : "FastGPT 是一个基于大语言模型(LLM)的知识库问答系统[67e517e74767063e882d6861](QUOTE)。"
-
:"""{{question}}"""`,
['4.9.2']: `忘记你已有的知识,仅使用 <Reference></Reference> 标记中的内容作为本次对话的参考:
<Reference>
{{quote}}
</Reference>
1. <Reference></Reference>
2.
3.
- <Reference></Reference>
- <Reference></Reference>
- 使 Markdown
- 使
:"""{{question}}"""`
}
},
{
title: i18nT('app:template.hard_strict'),
desc: '',
value: {
['4.9.2']: `忘记你已有的知识,仅使用 <QA></QA> 标记中的问答对进行回答。
<QA>
{{quote}}
</QA>
1. <QA></QA>
2.
3.
4.
5.
-
- <QA></QA>
- QA
- 使 Markdown
- 使
:"""{{question}}"""`
}
}
];
export const Prompt_systemQuotePromptList: PromptTemplateItem[] = [
{
title: i18nT('app:template.standard_template'),
desc: '',
value: {
['4.9.7']: `使用 <Reference></Reference> 标记中的内容作为本次对话的参考:
<Reference>
{{quote}}
</Reference>
-
- <Reference></Reference>
- <Reference></Reference>
- 使 Markdown
- 使
- 使 [id](QUOTE) <Reference></Reference> QUOTE , id id
- : "FastGPT 是一个基于大语言模型(LLM)的知识库问答系统[67e517e74767063e882d6861](QUOTE)。"
- `,
['4.9.2']: `使用 <Reference></Reference> 标记中的内容作为本次对话的参考:
<Reference>
{{quote}}
</Reference>
-
- <Reference></Reference>
- <Reference></Reference>
- 使 Markdown
- 使`
}
},
{
title: i18nT('app:template.qa_template'),
desc: '',
value: {
['4.9.2']: `使用 <QA></QA> 标记中的问答对进行回答。
<QA>
{{quote}}
</QA>
-
- <答案></答案>
-
- QA `
}
},
{
title: i18nT('app:template.standard_strict'),
desc: '',
value: {
['4.9.7']: `忘记你已有的知识,仅使用 <Reference></Reference> 标记中的内容作为本次对话的参考:
<Reference>
{{quote}}
</Reference>
1. <Reference></Reference>
2.
3.
- <Reference></Reference>
- <Reference></Reference>
- 使 Markdown
- 使
- 使 [id](QUOTE) <Reference></Reference> QUOTE , id id
- : "FastGPT 是一个基于大语言模型(LLM)的知识库问答系统[67e517e74767063e882d6861](QUOTE)。"
-
:"""{{question}}"""`,
['4.9.2']: `忘记你已有的知识,仅使用 <Reference></Reference> 标记中的内容作为本次对话的参考:
<Reference>
{{quote}}
</Reference>
1. <Reference></Reference>
2.
3.
- <Reference></Reference>
- <Reference></Reference>
- 使 Markdown
- 使`
}
},
{
title: i18nT('app:template.hard_strict'),
desc: '',
value: {
['4.9.2']: `忘记你已有的知识,仅使用 <QA></QA> 标记中的问答对进行回答。
<QA>
{{quote}}
</QA>
1. <QA></QA>
2.
3.
4.
5.
-
- <QA></QA>
- QA
- 使 Markdown
- 使`
}
}
];
export const getQuotePrompt = (version?: string, role: 'user' | 'system' = 'user') => {
const quotePromptTemplates =
role === 'user' ? Prompt_userQuotePromptList : Prompt_systemQuotePromptList;
@ -331,9 +312,9 @@ export const getQuotePrompt = (version?: string, role: 'user' | 'system' = 'user
};
// Document quote prompt
export const getDocumentQuotePrompt = (version: string) => {
export const getDocumentQuotePrompt = (version?: string) => {
const promptMap = {
['4.9.2']: `将 <FilesContent></FilesContent> 中的内容作为本次对话的参考:
['4.9.7']: `将 <FilesContent></FilesContent> 中的内容作为本次对话的参考:
<FilesContent>
{{quote}}
</FilesContent>

View File

@ -60,7 +60,7 @@ export const getExtractJsonToolPrompt = (version?: string) => {
"""
- {{description}}
-
-
-
"""
: """{{content}}"""

View File

@ -1,14 +1,19 @@
export const getDatasetSearchToolResponsePrompt = () => {
return `## Role
"quotes" 使
"cites" 使
## Rules
##
- 使 **[id](CITE)** "cites" CITE , id id
- **** : "Nginx是一款轻量级的Web服务器、反向代理服务器[67e517e74767063e882d6861](CITE)。"
- ****"Nginx是一款轻量级的Web服务器、反向代理服务器[67e517e74767063e882d6861](CITE)[67e517e74767063e882d6862](CITE)。\n 它的特点是非常轻量[67e517e74767063e882d6863](CITE)。"
-
- id id cites
##
-
- "quotes"
- "quotes"
- "cites"
- "cites"
- 使 Markdown
- 使
- 使 [id](QUOTE) "quotes" QUOTE , id id
- : "FastGPT 是一个基于大语言模型(LLM)的知识库问答系统[67e517e74767063e882d6861](QUOTE)。"
- `;
- 使`;
};

View File

@ -1,4 +1,5 @@
export const getPromptByVersion = (version?: string, promptMap: Record<string, string> = {}) => {
// 版本号大的在前面
const versions = Object.keys(promptMap).sort((a, b) => {
const [majorA, minorA, patchA] = a.split('.').map(Number);
const [majorB, minorB, patchB] = b.split('.').map(Number);
@ -15,5 +16,5 @@ export const getPromptByVersion = (version?: string, promptMap: Record<string, s
if (version in promptMap) {
return promptMap[version];
}
return promptMap[versions[versions.length - 1]];
return promptMap[versions[0]];
};

View File

@ -9,8 +9,8 @@ import type {
ChatCompletionAssistantMessageParam as SdkChatCompletionAssistantMessageParam
} from 'openai/resources';
import { ChatMessageTypeEnum } from './constants';
import { WorkflowInteractiveResponseType } from '../workflow/template/system/interactive/type';
import { Stream } from 'openai/streaming';
import type { WorkflowInteractiveResponseType } from '../workflow/template/system/interactive/type';
import type { Stream } from 'openai/streaming';
export * from 'openai/resources';
// Extension of ChatCompletionMessageParam, Add file url type
@ -60,6 +60,7 @@ export type ChatCompletionAssistantToolParam = {
tool_calls: ChatCompletionMessageToolCall[];
};
export type ChatCompletionMessageToolCall = ChatCompletionMessageToolCall & {
index?: number;
toolName?: string;
toolAvatar?: string;
};

View File

@ -1,9 +1,9 @@
import {
AppTTSConfigType,
AppFileSelectConfigType,
AppWhisperConfigType,
AppAutoExecuteConfigType,
AppQGConfigType
type AppTTSConfigType,
type AppFileSelectConfigType,
type AppWhisperConfigType,
type AppAutoExecuteConfigType,
type AppQGConfigType
} from './type';
export enum AppTypeEnum {
@ -60,5 +60,3 @@ export enum AppTemplateTypeEnum {
// special type
contribute = 'contribute'
}
export const defaultDatasetMaxTokens = 16000;

View File

@ -1,6 +1,6 @@
import { ParentIdType } from 'common/parentFolder/type';
import { AppSchema } from './type';
import { AppTypeEnum } from './constants';
import type { ParentIdType } from 'common/parentFolder/type';
import type { AppSchema } from './type';
import type { AppTypeEnum } from './constants';
export type CreateAppProps = {
parentId?: ParentIdType;

View File

@ -1,19 +1,19 @@
import { getNanoid } from '../../../common/string/tools';
import { OpenApiJsonSchema } from './type';
import { type OpenApiJsonSchema } from './type';
import yaml from 'js-yaml';
import { OpenAPIV3 } from 'openapi-types';
import { FlowNodeInputItemType, FlowNodeOutputItemType } from '../../workflow/type/io';
import type { OpenAPIV3 } from 'openapi-types';
import { type FlowNodeInputItemType, type FlowNodeOutputItemType } from '../../workflow/type/io';
import { FlowNodeInputTypeEnum, FlowNodeOutputTypeEnum } from '../../workflow/node/constant';
import { WorkflowIOValueTypeEnum } from '../../workflow/constants';
import { PluginInputModule } from '../../workflow/template/system/pluginInput';
import { PluginOutputModule } from '../../workflow/template/system/pluginOutput';
import { HttpNode468 } from '../../workflow/template/system/http468';
import { HttpParamAndHeaderItemType } from '../../workflow/api';
import { StoreNodeItemType } from '../../workflow/type/node';
import { type HttpParamAndHeaderItemType } from '../../workflow/api';
import { type StoreNodeItemType } from '../../workflow/type/node';
import { HttpImgUrl } from '../../../common/file/image/constants';
import SwaggerParser from '@apidevtools/swagger-parser';
import { getHandleId } from '../../workflow/utils';
import { CreateHttpPluginChildrenPros } from '../controller';
import { type CreateHttpPluginChildrenPros } from '../controller';
import { AppTypeEnum } from '../constants';
import type { StoreEdgeItemType } from '../../workflow/type/edge';

View File

@ -5,9 +5,9 @@ import {
FlowNodeTypeEnum
} from '../../workflow/node/constant';
import { nanoid } from 'nanoid';
import { ToolType } from '../type';
import { type McpToolConfigType } from '../type';
import { i18nT } from '../../../../web/i18n/utils';
import { RuntimeNodeItemType } from '../../workflow/runtime/type';
import { type RuntimeNodeItemType } from '../../workflow/runtime/type';
export const getMCPToolSetRuntimeNode = ({
url,
@ -16,7 +16,7 @@ export const getMCPToolSetRuntimeNode = ({
avatar
}: {
url: string;
toolList: ToolType[];
toolList: McpToolConfigType[];
name?: string;
avatar?: string;
}): RuntimeNodeItemType => {
@ -45,7 +45,7 @@ export const getMCPToolRuntimeNode = ({
url,
avatar = 'core/app/type/mcpToolsFill'
}: {
tool: ToolType;
tool: McpToolConfigType;
url: string;
avatar?: string;
}): RuntimeNodeItemType => {
@ -65,7 +65,7 @@ export const getMCPToolRuntimeNode = ({
...Object.entries(tool.inputSchema?.properties || {}).map(([key, value]) => ({
key,
label: key,
valueType: value.type as WorkflowIOValueTypeEnum,
valueType: value.type as WorkflowIOValueTypeEnum, // TODO: 这里需要做一个映射
description: value.description,
toolDescription: value.description || key,
required: tool.inputSchema?.required?.includes(key) || false,

View File

@ -1,5 +1,5 @@
import { StoreNodeItemType } from '../../workflow/type/node';
import { FlowNodeInputItemType } from '../../workflow/type/io';
import { type StoreNodeItemType } from '../../workflow/type/node';
import { type FlowNodeInputItemType } from '../../workflow/type/io';
import { FlowNodeTypeEnum } from '../../workflow/node/constant';
export const getPluginInputsFromStoreNodes = (nodes: StoreNodeItemType[]) => {

View File

@ -1,30 +1,20 @@
import type { FlowNodeTemplateType, StoreNodeItemType } from '../workflow/type/node';
import { AppTypeEnum } from './constants';
import type { AppTypeEnum } from './constants';
import { PermissionTypeEnum } from '../../support/permission/constant';
import {
import type {
NodeInputKeyEnum,
VariableInputEnum,
WorkflowIOValueTypeEnum
} from '../workflow/constants';
import { SelectedDatasetType } from '../workflow/api';
import { DatasetSearchModeEnum } from '../dataset/constants';
import type { SelectedDatasetType } from '../workflow/api';
import type { DatasetSearchModeEnum } from '../dataset/constants';
import { TeamTagSchema as TeamTagsSchemaType } from '@fastgpt/global/support/user/team/type.d';
import { StoreEdgeItemType } from '../workflow/type/edge';
import { AppPermission } from '../../support/permission/app/controller';
import { ParentIdType } from '../../common/parentFolder/type';
import type { StoreEdgeItemType } from '../workflow/type/edge';
import type { AppPermission } from '../../support/permission/app/controller';
import type { ParentIdType } from '../../common/parentFolder/type';
import { FlowNodeInputTypeEnum } from '../../core/workflow/node/constant';
import { WorkflowTemplateBasicType } from '@fastgpt/global/core/workflow/type';
import { SourceMemberType } from '../../support/user/type';
export type ToolType = {
name: string;
description: string;
inputSchema: {
type: string;
properties?: Record<string, { type: string; description?: string }>;
required?: string[];
};
};
import type { WorkflowTemplateBasicType } from '@fastgpt/global/core/workflow/type';
import type { SourceMemberType } from '../../support/user/type';
export type AppSchema = {
_id: string;
@ -117,6 +107,16 @@ export type AppSimpleEditFormType = {
chatConfig: AppChatConfigType;
};
export type McpToolConfigType = {
name: string;
description: string;
inputSchema: {
type: string;
properties?: Record<string, { type: string; description?: string }>;
required?: string[];
};
};
/* app chat config type */
export type AppChatConfigType = {
welcomeText?: string;

View File

@ -3,12 +3,15 @@ import { FlowNodeTypeEnum } from '../workflow/node/constant';
import { NodeInputKeyEnum, FlowNodeTemplateTypeEnum } from '../workflow/constants';
import type { FlowNodeInputItemType } from '../workflow/type/io.d';
import { getAppChatConfig } from '../workflow/utils';
import { StoreNodeItemType } from '../workflow/type/node';
import { type StoreNodeItemType } from '../workflow/type/node';
import { DatasetSearchModeEnum } from '../dataset/constants';
import { WorkflowTemplateBasicType } from '../workflow/type';
import { type WorkflowTemplateBasicType } from '../workflow/type';
import { AppTypeEnum } from './constants';
import { AppErrEnum } from '../../common/error/code/app';
import { PluginErrEnum } from '../../common/error/code/plugin';
import { i18nT } from '../../../web/i18n/utils';
import appErrList from '../../common/error/code/app';
import pluginErrList from '../../common/error/code/plugin';
export const getDefaultAppForm = (): AppSimpleEditFormType => {
return {
@ -189,17 +192,10 @@ export const getAppType = (config?: WorkflowTemplateBasicType | AppSimpleEditFor
return '';
};
export const checkAppUnExistError = (error?: string) => {
const unExistError: Array<string> = [
AppErrEnum.unAuthApp,
AppErrEnum.unExist,
PluginErrEnum.unAuth,
PluginErrEnum.unExist
];
export const formatToolError = (error?: any) => {
if (!error || typeof error !== 'string') return;
if (!!error && unExistError.includes(error)) {
return error;
} else {
return undefined;
}
const errorText = appErrList[error]?.message || pluginErrList[error]?.message;
return errorText || error;
};

View File

@ -1,7 +1,8 @@
import { TeamMemberStatusEnum } from 'support/user/team/constant';
import { StoreEdgeItemType } from '../workflow/type/edge';
import { AppChatConfigType, AppSchema } from './type';
import { SourceMemberType } from 'support/user/type';
import type { AppSchema } from './type';
import { AppChatConfigType } from './type';
import type { SourceMemberType } from 'support/user/type';
export type AppVersionSchemaType = {
_id: string;

View File

@ -1,4 +1,4 @@
import { OutLinkChatAuthProps } from '../../support/permission/chat';
import type { OutLinkChatAuthProps } from '../../support/permission/chat';
export type UpdateChatFeedbackProps = OutLinkChatAuthProps & {
appId: string;

View File

@ -1,22 +1,23 @@
import { ClassifyQuestionAgentItemType } from '../workflow/template/system/classifyQuestion/type';
import { SearchDataResponseItemType } from '../dataset/type';
import {
import type { SearchDataResponseItemType } from '../dataset/type';
import type {
ChatFileTypeEnum,
ChatItemValueTypeEnum,
ChatRoleEnum,
ChatSourceEnum,
ChatStatusEnum
} from './constants';
import { FlowNodeTypeEnum } from '../workflow/node/constant';
import { NodeOutputKeyEnum } from '../workflow/constants';
import { DispatchNodeResponseKeyEnum } from '../workflow/runtime/constants';
import { AppChatConfigType, AppSchema, VariableItemType } from '../app/type';
import type { FlowNodeTypeEnum } from '../workflow/node/constant';
import type { NodeOutputKeyEnum } from '../workflow/constants';
import type { DispatchNodeResponseKeyEnum } from '../workflow/runtime/constants';
import type { AppSchema, VariableItemType } from '../app/type';
import { AppChatConfigType } from '../app/type';
import type { AppSchema as AppType } from '@fastgpt/global/core/app/type.d';
import { DatasetSearchModeEnum } from '../dataset/constants';
import { DispatchNodeResponseType } from '../workflow/runtime/type.d';
import { ChatBoxInputType } from '../../../../projects/app/src/components/core/chat/ChatContainer/ChatBox/type';
import { WorkflowInteractiveResponseType } from '../workflow/template/system/interactive/type';
import { FlowNodeInputItemType } from '../workflow/type/io';
import type { DispatchNodeResponseType } from '../workflow/runtime/type.d';
import type { ChatBoxInputType } from '../../../../projects/app/src/components/core/chat/ChatContainer/ChatBox/type';
import type { WorkflowInteractiveResponseType } from '../workflow/template/system/interactive/type';
import type { FlowNodeInputItemType } from '../workflow/type/io';
export type ChatSchema = {
_id: string;
@ -25,6 +26,7 @@ export type ChatSchema = {
teamId: string;
tmbId: string;
appId: string;
createTime: Date;
updateTime: Date;
title: string;
customTitle: string;
@ -111,6 +113,7 @@ export type ChatItemSchema = (UserChatItemType | SystemChatItemType | AIChatItem
appId: string;
time: Date;
durationSeconds?: number;
errorMsg?: string;
};
export type AdminFbkType = {
@ -142,6 +145,7 @@ export type ChatSiteItemType = (UserChatItemType | SystemChatItemType | AIChatIt
responseData?: ChatHistoryItemResType[];
time?: Date;
durationSeconds?: number;
errorMsg?: string;
} & ChatBoxInputType &
ResponseTagItemType;

View File

@ -1,9 +1,15 @@
import { DispatchNodeResponseType } from '../workflow/runtime/type';
import { type DispatchNodeResponseType } from '../workflow/runtime/type';
import { FlowNodeTypeEnum } from '../workflow/node/constant';
import { ChatItemValueTypeEnum, ChatRoleEnum, ChatSourceEnum } from './constants';
import { ChatHistoryItemResType, ChatItemType, UserChatItemValueItemType } from './type.d';
import {
type AIChatItemValueItemType,
type ChatHistoryItemResType,
type ChatItemType,
type UserChatItemValueItemType
} from './type.d';
import { sliceStrStartEnd } from '../../common/string/tools';
import { PublishChannelEnum } from '../../support/outLink/constant';
import { removeDatasetCiteText } from '../../../service/core/ai/utils';
// Concat 2 -> 1, and sort by role
export const concatHistories = (histories1: ChatItemType[], histories2: ChatItemType[]) => {
@ -77,6 +83,7 @@ export const getHistoryPreview = (
});
};
// Filter workflow public response
export const filterPublicNodeResponseData = ({
flowResponses = [],
responseDetail = false
@ -112,6 +119,40 @@ export const filterPublicNodeResponseData = ({
});
};
// Remove dataset cite in ai response
export const removeAIResponseCite = <T extends AIChatItemValueItemType[] | string>(
value: T,
retainCite: boolean
): T => {
if (retainCite) return value;
if (typeof value === 'string') {
return removeDatasetCiteText(value, false) as T;
}
return value.map<AIChatItemValueItemType>((item) => {
if (item.text?.content) {
return {
...item,
text: {
...item.text,
content: removeDatasetCiteText(item.text.content, false)
}
};
}
if (item.reasoning?.content) {
return {
...item,
reasoning: {
...item.reasoning,
content: removeDatasetCiteText(item.reasoning.content, false)
}
};
}
return item;
}) as T;
};
export const removeEmptyUserInput = (input?: UserChatItemValueItemType[]) => {
return (
input?.filter((item) => {

View File

@ -1,12 +1,14 @@
import { DatasetDataIndexItemType, DatasetSchemaType } from './type';
import {
import type { ChunkSettingsType, DatasetDataIndexItemType, DatasetSchemaType } from './type';
import type {
DatasetCollectionTypeEnum,
DatasetCollectionDataProcessModeEnum,
ChunkSettingModeEnum,
DataChunkSplitModeEnum
DataChunkSplitModeEnum,
ChunkTriggerConfigTypeEnum,
ParagraphChunkAIModeEnum
} from './constants';
import type { LLMModelItemType } from '../ai/model.d';
import { ParentIdType } from 'common/parentFolder/type';
import type { ParentIdType } from 'common/parentFolder/type';
/* ================= dataset ===================== */
export type DatasetUpdateBody = {
@ -32,26 +34,16 @@ export type DatasetUpdateBody = {
};
/* ================= collection ===================== */
export type DatasetCollectionChunkMetadataType = {
// Input + store params
type DatasetCollectionStoreDataType = ChunkSettingsType & {
parentId?: string;
customPdfParse?: boolean;
trainingType?: DatasetCollectionDataProcessModeEnum;
imageIndex?: boolean;
autoIndexes?: boolean;
chunkSettingMode?: ChunkSettingModeEnum;
chunkSplitMode?: DataChunkSplitModeEnum;
chunkSize?: number;
indexSize?: number;
chunkSplitter?: string;
qaPrompt?: string;
metadata?: Record<string, any>;
customPdfParse?: boolean;
};
// create collection params
export type CreateDatasetCollectionParams = DatasetCollectionChunkMetadataType & {
export type CreateDatasetCollectionParams = DatasetCollectionStoreDataType & {
datasetId: string;
name: string;
type: DatasetCollectionTypeEnum;
@ -72,7 +64,7 @@ export type CreateDatasetCollectionParams = DatasetCollectionChunkMetadataType &
nextSyncTime?: Date;
};
export type ApiCreateDatasetCollectionParams = DatasetCollectionChunkMetadataType & {
export type ApiCreateDatasetCollectionParams = DatasetCollectionStoreDataType & {
datasetId: string;
tags?: string[];
};
@ -90,7 +82,7 @@ export type ApiDatasetCreateDatasetCollectionParams = ApiCreateDatasetCollection
export type FileIdCreateDatasetCollectionParams = ApiCreateDatasetCollectionParams & {
fileId: string;
};
export type reTrainingDatasetFileCollectionParams = DatasetCollectionChunkMetadataType & {
export type reTrainingDatasetFileCollectionParams = DatasetCollectionStoreDataType & {
datasetId: string;
collectionId: string;
};
@ -147,6 +139,7 @@ export type PushDatasetDataProps = {
collectionId: string;
data: PushDatasetDataChunkProps[];
trainingType?: DatasetCollectionDataProcessModeEnum;
indexSize?: number;
autoIndexes?: boolean;
imageIndex?: boolean;
prompt?: string;

View File

@ -1,8 +1,9 @@
import { RequireOnlyOne } from '../../common/type/utils';
import type { ParentIdType } from '../../common/parentFolder/type.d';
export type APIFileItem = {
id: string;
parentId: string | null;
parentId: ParentIdType;
name: string;
type: 'file' | 'folder';
updateTime: Date;
@ -10,10 +11,24 @@ export type APIFileItem = {
hasChild?: boolean;
};
// Api dataset config
export type APIFileServer = {
baseUrl: string;
authorization: string;
authorization?: string;
basePath?: string;
};
export type FeishuServer = {
appId: string;
appSecret?: string;
folderToken: string;
};
export type YuqueServer = {
userId: string;
token?: string;
basePath?: string;
};
// Api dataset api
export type APIFileListResponse = APIFileItem[];
@ -26,13 +41,8 @@ export type APIFileReadResponse = {
url: string;
};
export type FeishuServer = {
appId: string;
appSecret: string;
folderToken: string;
};
export type YuqueServer = {
userId: string;
token: string;
export type ApiDatasetDetailResponse = {
id: string;
name: string;
parentId: ParentIdType;
};

View File

@ -1,6 +1,6 @@
import { UpdateClbPermissionProps } from '../../support/permission/collaborator';
import type { UpdateClbPermissionProps } from '../../support/permission/collaborator';
import { PermissionValueType } from '../../support/permission/type';
import { RequireOnlyOne } from '../../common/type/utils';
import type { RequireOnlyOne } from '../../common/type/utils';
export type UpdateDatasetCollaboratorBody = UpdateClbPermissionProps & {
datasetId: string;

View File

@ -1,5 +1,5 @@
import { DatasetCollectionTypeEnum } from '../constants';
import { DatasetCollectionSchemaType } from '../type';
import { type DatasetCollectionSchemaType } from '../type';
export const getCollectionSourceData = (collection?: DatasetCollectionSchemaType) => {
return {

View File

@ -19,32 +19,32 @@ export const DatasetTypeMap = {
[DatasetTypeEnum.dataset]: {
icon: 'core/dataset/commonDatasetOutline',
label: i18nT('dataset:common_dataset'),
collectionLabel: i18nT('common:common.File')
collectionLabel: i18nT('common:File')
},
[DatasetTypeEnum.websiteDataset]: {
icon: 'core/dataset/websiteDatasetOutline',
label: i18nT('dataset:website_dataset'),
collectionLabel: i18nT('common:common.Website')
collectionLabel: i18nT('common:Website')
},
[DatasetTypeEnum.externalFile]: {
icon: 'core/dataset/externalDatasetOutline',
label: i18nT('dataset:external_file'),
collectionLabel: i18nT('common:common.File')
collectionLabel: i18nT('common:File')
},
[DatasetTypeEnum.apiDataset]: {
icon: 'core/dataset/externalDatasetOutline',
label: i18nT('dataset:api_file'),
collectionLabel: i18nT('common:common.File')
collectionLabel: i18nT('common:File')
},
[DatasetTypeEnum.feishu]: {
icon: 'core/dataset/feishuDatasetOutline',
label: i18nT('dataset:feishu_dataset'),
collectionLabel: i18nT('common:common.File')
collectionLabel: i18nT('common:File')
},
[DatasetTypeEnum.yuque]: {
icon: 'core/dataset/yuqueDatasetOutline',
label: i18nT('dataset:yuque_dataset'),
collectionLabel: i18nT('common:common.File')
collectionLabel: i18nT('common:File')
}
};
@ -120,6 +120,8 @@ export const DatasetCollectionSyncResultMap = {
export enum DatasetCollectionDataProcessModeEnum {
chunk = 'chunk',
qa = 'qa',
backup = 'backup',
auto = 'auto' // abandon
}
export const DatasetCollectionDataProcessModeMap = {
@ -131,21 +133,35 @@ export const DatasetCollectionDataProcessModeMap = {
label: i18nT('common:core.dataset.training.QA mode'),
tooltip: i18nT('common:core.dataset.import.QA Import Tip')
},
[DatasetCollectionDataProcessModeEnum.backup]: {
label: i18nT('dataset:backup_mode'),
tooltip: i18nT('dataset:backup_mode')
},
[DatasetCollectionDataProcessModeEnum.auto]: {
label: i18nT('common:core.dataset.training.Auto mode'),
tooltip: i18nT('common:core.dataset.training.Auto mode Tip')
}
};
export enum ChunkTriggerConfigTypeEnum {
minSize = 'minSize',
forceChunk = 'forceChunk',
maxSize = 'maxSize'
}
export enum ChunkSettingModeEnum {
auto = 'auto',
custom = 'custom'
}
export enum DataChunkSplitModeEnum {
paragraph = 'paragraph',
size = 'size',
char = 'char'
}
export enum ParagraphChunkAIModeEnum {
auto = 'auto',
force = 'force'
}
/* ------------ data -------------- */
@ -154,7 +170,6 @@ export enum ImportDataSourceEnum {
fileLocal = 'fileLocal',
fileLink = 'fileLink',
fileCustom = 'fileCustom',
csvTable = 'csvTable',
externalFile = 'externalFile',
apiDataset = 'apiDataset',
reTraining = 'reTraining'

View File

@ -32,7 +32,7 @@ export const DatasetDataIndexMap: Record<
color: 'red'
},
[DatasetDataIndexTypeEnum.image]: {
label: i18nT('common:data_index_image'),
label: i18nT('dataset:data_index_image'),
color: 'purple'
}
};

View File

@ -1,5 +1,5 @@
import { SearchScoreTypeEnum } from '../constants';
import { SearchDataResponseItemType } from '../type';
import { type SearchDataResponseItemType } from '../type';
/* dataset search result concat */
export const datasetSearchResultConcat = (

View File

@ -1,5 +1,5 @@
import { PushDatasetDataChunkProps } from '../api';
import { TrainingModeEnum } from '../constants';
import type { PushDatasetDataChunkProps } from '../api';
import type { TrainingModeEnum } from '../constants';
export type PushDataToTrainingQueueProps = {
teamId: string;

View File

@ -1,4 +1,4 @@
import { EmbeddingModelItemType, LLMModelItemType } from '../../../core/ai/model.d';
import { type EmbeddingModelItemType, type LLMModelItemType } from '../../../core/ai/model.d';
import {
ChunkSettingModeEnum,
DataChunkSplitModeEnum,
@ -8,7 +8,7 @@ import {
export const minChunkSize = 64; // min index and chunk size
// Chunk size
export const chunkAutoChunkSize = 1500;
export const chunkAutoChunkSize = 1000;
export const getMaxChunkSize = (model: LLMModelItemType) => {
return Math.max(model.maxContext - model.maxResponse, 2000);
};
@ -118,9 +118,8 @@ export const computeChunkSize = (params: {
return getLLMMaxChunkSize(params.llmModel);
}
return Math.min(params.chunkSize || chunkAutoChunkSize, getLLMMaxChunkSize(params.llmModel));
return Math.min(params.chunkSize ?? chunkAutoChunkSize, getLLMMaxChunkSize(params.llmModel));
};
export const computeChunkSplitter = (params: {
chunkSettingMode?: ChunkSettingModeEnum;
chunkSplitMode?: DataChunkSplitModeEnum;
@ -129,8 +128,21 @@ export const computeChunkSplitter = (params: {
if (params.chunkSettingMode === ChunkSettingModeEnum.auto) {
return undefined;
}
if (params.chunkSplitMode === DataChunkSplitModeEnum.size) {
if (params.chunkSplitMode !== DataChunkSplitModeEnum.char) {
return undefined;
}
return params.chunkSplitter;
};
export const computeParagraphChunkDeep = (params: {
chunkSettingMode?: ChunkSettingModeEnum;
chunkSplitMode?: DataChunkSplitModeEnum;
paragraphChunkDeep?: number;
}) => {
if (params.chunkSettingMode === ChunkSettingModeEnum.auto) {
return 5;
}
if (params.chunkSplitMode === DataChunkSplitModeEnum.paragraph) {
return params.paragraphChunkDeep;
}
return 0;
};

View File

@ -1,33 +1,49 @@
import type { LLMModelItemType, EmbeddingModelItemType } from '../../core/ai/model.d';
import { PermissionTypeEnum } from '../../support/permission/constant';
import { PushDatasetDataChunkProps } from './api';
import {
import type {
DataChunkSplitModeEnum,
DatasetCollectionDataProcessModeEnum,
DatasetCollectionTypeEnum,
DatasetStatusEnum,
DatasetTypeEnum,
SearchScoreTypeEnum,
TrainingModeEnum
TrainingModeEnum,
ChunkSettingModeEnum,
ChunkTriggerConfigTypeEnum
} from './constants';
import { DatasetPermission } from '../../support/permission/dataset/controller';
import { Permission } from '../../support/permission/controller';
import { APIFileServer, FeishuServer, YuqueServer } from './apiDataset';
import { SourceMemberType } from 'support/user/type';
import { DatasetDataIndexTypeEnum } from './data/constants';
import { ChunkSettingModeEnum } from './constants';
import type { DatasetPermission } from '../../support/permission/dataset/controller';
import type { APIFileServer, FeishuServer, YuqueServer } from './apiDataset';
import type { SourceMemberType } from 'support/user/type';
import type { DatasetDataIndexTypeEnum } from './data/constants';
export type ChunkSettingsType = {
trainingType: DatasetCollectionDataProcessModeEnum;
autoIndexes?: boolean;
trainingType?: DatasetCollectionDataProcessModeEnum;
// Chunk trigger
chunkTriggerType?: ChunkTriggerConfigTypeEnum;
chunkTriggerMinSize?: number; // maxSize from agent model, not store
// Data enhance
dataEnhanceCollectionName?: boolean; // Auto add collection name to data
// Index enhance
imageIndex?: boolean;
autoIndexes?: boolean;
chunkSettingMode?: ChunkSettingModeEnum;
// Chunk setting
chunkSettingMode?: ChunkSettingModeEnum; // 系统参数/自定义参数
chunkSplitMode?: DataChunkSplitModeEnum;
chunkSize?: number;
// Paragraph split
paragraphChunkAIMode?: ParagraphChunkAIModeEnum;
paragraphChunkDeep?: number; // Paragraph deep
paragraphChunkMinSize?: number; // Paragraph min size, if too small, it will merge
// Size split
chunkSize?: number; // chunk/qa chunk size, Paragraph max chunk size.
// Char split
chunkSplitter?: string; // chunk/qa chunk splitter
indexSize?: number;
chunkSplitter?: string;
qaPrompt?: string;
};
@ -66,7 +82,7 @@ export type DatasetSchemaType = {
defaultPermission?: number;
};
export type DatasetCollectionSchemaType = {
export type DatasetCollectionSchemaType = ChunkSettingsType & {
_id: string;
teamId: string;
tmbId: string;
@ -101,18 +117,7 @@ export type DatasetCollectionSchemaType = {
// Parse settings
customPdfParse?: boolean;
// Chunk settings
autoIndexes?: boolean;
imageIndex?: boolean;
trainingType: DatasetCollectionDataProcessModeEnum;
chunkSettingMode?: ChunkSettingModeEnum;
chunkSplitMode?: DataChunkSplitModeEnum;
chunkSize?: number;
indexSize?: number;
chunkSplitter?: string;
qaPrompt?: string;
};
export type DatasetCollectionTagsSchemaType = {
@ -175,6 +180,7 @@ export type DatasetTrainingSchemaType = {
q: string;
a: string;
chunkIndex: number;
indexSize?: number;
weight: number;
indexes: Omit<DatasetDataIndexItemType, 'dataId'>[];
retryCount: number;

View File

@ -40,5 +40,6 @@ export function getSourceNameIcon({
export const predictDataLimitLength = (mode: TrainingModeEnum, data: any[]) => {
if (mode === TrainingModeEnum.qa) return data.length * 20;
if (mode === TrainingModeEnum.auto) return data.length * 5;
if (mode === TrainingModeEnum.image) return data.length * 2;
return data.length;
};

View File

@ -1,6 +1,6 @@
import { StoreEdgeItemType } from 'core/workflow/type/edge';
import type { StoreEdgeItemType } from 'core/workflow/type/edge';
import type { StoreNodeItemType } from '../workflow/type/node';
import { PluginTypeEnum } from './constants';
import type { PluginTypeEnum } from './constants';
import { HttpAuthMethodType } from '../app/httpPlugin/type';
export type CreateOnePluginParams = {

View File

@ -1,9 +1,9 @@
import { StoreEdgeItemType } from 'core/workflow/type/edge';
import type { StoreEdgeItemType } from 'core/workflow/type/edge';
import { ModuleTemplateTypeEnum } from '../workflow/constants';
import type { StoreNodeItemType } from '../workflow/type/node';
import { PluginSourceEnum, PluginTypeEnum } from './constants';
import type { PluginSourceEnum, PluginTypeEnum } from './constants';
import { MethodType } from './controller';
import { FlowNodeTemplateType } from '../workflow/type/node';
import type { FlowNodeTemplateType } from '../workflow/type/node';
export type PluginItemSchema = {
_id: string;

View File

@ -1,4 +1,4 @@
import { EmbeddingModelItemType } from '../ai/model.d';
import type { EmbeddingModelItemType } from '../ai/model.d';
import { NodeInputKeyEnum } from './constants';
export type SelectedDatasetType = {

View File

@ -218,7 +218,6 @@ export const FlowValueTypeMap: Record<
};
export const EDGE_TYPE = 'default';
export const defaultNodeVersion = '481';
export const chatHistoryValueDesc = `{
obj: System | Human | AI;
@ -236,3 +235,10 @@ export const datasetQuoteValueDesc = `{
export const datasetSelectValueDesc = `{
datasetId: string;
}[]`;
export const AppNodeFlowNodeTypeMap: Record<any, boolean> = {
[FlowNodeTypeEnum.pluginModule]: true,
[FlowNodeTypeEnum.appModule]: true,
[FlowNodeTypeEnum.tool]: true,
[FlowNodeTypeEnum.toolSet]: true
};

View File

@ -1,4 +1,4 @@
import { FlowNodeInputItemType } from '../../type/io';
import { type FlowNodeInputItemType } from '../../type/io';
export const getInputComponentProps = (input: FlowNodeInputItemType) => {
return {

View File

@ -1,30 +1,32 @@
import { ChatNodeUsageType } from '../../../support/wallet/bill/type';
import {
import type { ChatNodeUsageType } from '../../../support/wallet/bill/type';
import type {
ChatItemType,
UserChatItemValueItemType,
ToolRunResponseItemType,
NodeOutputItemType,
AIChatItemValueItemType
} from '../../chat/type';
import { FlowNodeInputItemType, FlowNodeOutputItemType } from '../type/io.d';
import { StoreNodeItemType } from '../type/node';
import { DispatchNodeResponseKeyEnum } from './constants';
import { StoreEdgeItemType } from '../type/edge';
import { NodeInputKeyEnum } from '../constants';
import { ClassifyQuestionAgentItemType } from '../template/system/classifyQuestion/type';
import { NextApiResponse } from 'next';
import { NodeOutputItemType } from '../../chat/type';
import type { FlowNodeInputItemType, FlowNodeOutputItemType } from '../type/io.d';
import type { NodeToolConfigType, StoreNodeItemType } from '../type/node';
import type { DispatchNodeResponseKeyEnum } from './constants';
import type { StoreEdgeItemType } from '../type/edge';
import type { NodeInputKeyEnum } from '../constants';
import type { ClassifyQuestionAgentItemType } from '../template/system/classifyQuestion/type';
import type { NextApiResponse } from 'next';
import { UserModelSchema } from '../../../support/user/type';
import { AppDetailType, AppSchema } from '../../app/type';
import { RuntimeNodeItemType } from '../runtime/type';
import { RuntimeEdgeItemType } from './edge';
import { ReadFileNodeResponse } from '../template/system/readFiles/type';
import type { AppSchema } from '../../app/type';
import { AppDetailType } from '../../app/type';
import type { RuntimeNodeItemType } from '../runtime/type';
import type { RuntimeEdgeItemType } from './edge';
import type { ReadFileNodeResponse } from '../template/system/readFiles/type';
import { UserSelectOptionType } from '../template/system/userSelect/type';
import { WorkflowResponseType } from '../../../../service/core/workflow/dispatch/type';
import { AiChatQuoteRoleType } from '../template/system/aiChat/type';
import { LafAccountType, OpenaiAccountType } from '../../../support/user/team/type';
import { CompletionFinishReason } from '../../ai/type';
import { WorkflowInteractiveResponseType } from '../template/system/interactive/type';
import { SearchDataResponseItemType } from '../../dataset/type';
import type { WorkflowResponseType } from '../../../../service/core/workflow/dispatch/type';
import type { AiChatQuoteRoleType } from '../template/system/aiChat/type';
import type { OpenaiAccountType } from '../../../support/user/team/type';
import { LafAccountType } from '../../../support/user/team/type';
import type { CompletionFinishReason } from '../../ai/type';
import type { WorkflowInteractiveResponseType } from '../template/system/interactive/type';
import type { SearchDataResponseItemType } from '../../dataset/type';
export type ExternalProviderType = {
openaiAccount?: OpenaiAccountType;
externalWorkflowVariables?: Record<string, string>;
@ -58,6 +60,7 @@ export type ChatDispatchProps = {
chatConfig: AppSchema['chatConfig'];
lastInteractive?: WorkflowInteractiveResponseType; // last interactive response
stream: boolean;
retainDatasetCite?: boolean;
maxRunTimes: number;
isToolCall?: boolean;
workflowStreamResponse?: WorkflowResponseType;
@ -98,7 +101,10 @@ export type RuntimeNodeItemType = {
outputs: FlowNodeOutputItemType[];
pluginId?: string; // workflow id / plugin id
version: string;
version?: string;
// tool
toolConfig?: NodeToolConfigType;
};
export type RuntimeEdgeItemType = StoreEdgeItemType & {
@ -111,7 +117,7 @@ export type DispatchNodeResponseType = {
runningTime?: number;
query?: string;
textOutput?: string;
error?: Record<string, any>;
error?: Record<string, any> | string;
customInputs?: Record<string, any>;
customOutputs?: Record<string, any>;
nodeInputs?: Record<string, any>;

View File

@ -1,19 +1,19 @@
import { ChatCompletionRequestMessageRoleEnum } from '../../ai/constants';
import { NodeInputKeyEnum, NodeOutputKeyEnum, WorkflowIOValueTypeEnum } from '../constants';
import { FlowNodeTypeEnum } from '../node/constant';
import { StoreNodeItemType } from '../type/node';
import { StoreEdgeItemType } from '../type/edge';
import { RuntimeEdgeItemType, RuntimeNodeItemType } from './type';
import { type StoreNodeItemType } from '../type/node';
import { type StoreEdgeItemType } from '../type/edge';
import { type RuntimeEdgeItemType, type RuntimeNodeItemType } from './type';
import { VARIABLE_NODE_ID } from '../constants';
import { isValidReferenceValueFormat } from '../utils';
import { FlowNodeOutputItemType, ReferenceValueType } from '../type/io';
import { ChatItemType, NodeOutputItemType } from '../../../core/chat/type';
import { type FlowNodeOutputItemType, type ReferenceValueType } from '../type/io';
import { type ChatItemType, type NodeOutputItemType } from '../../../core/chat/type';
import { ChatItemValueTypeEnum, ChatRoleEnum } from '../../../core/chat/constants';
import { replaceVariable, valToStr } from '../../../common/string/tools';
import json5 from 'json5';
import {
InteractiveNodeResponseType,
WorkflowInteractiveResponseType
type InteractiveNodeResponseType,
type WorkflowInteractiveResponseType
} from '../template/system/interactive/type';
export const extractDeepestInteractive = (

View File

@ -2,7 +2,7 @@ import { NodeInputKeyEnum } from '../constants';
import { FlowNodeInputTypeEnum } from '../node/constant';
import { WorkflowIOValueTypeEnum } from '../constants';
import { chatNodeSystemPromptTip, systemPromptTip } from './tip';
import { FlowNodeInputItemType } from '../type/io';
import { type FlowNodeInputItemType } from '../type/io';
import { i18nT } from '../../../../web/i18n/utils';
export const Input_Template_History: FlowNodeInputItemType = {

View File

@ -4,7 +4,7 @@ import {
FlowNodeOutputTypeEnum,
FlowNodeTypeEnum
} from '../../../../node/constant';
import { FlowNodeTemplateType } from '../../../../type/node';
import { type FlowNodeTemplateType } from '../../../../type/node';
import {
WorkflowIOValueTypeEnum,
NodeInputKeyEnum,
@ -25,7 +25,6 @@ export const RunAppModule: FlowNodeTemplateType = {
name: i18nT('workflow:application_call'),
intro: i18nT('workflow:select_another_application_to_call'),
showStatus: true,
version: '481',
isTool: true,
inputs: [
{

View File

@ -4,7 +4,7 @@ import {
FlowNodeOutputTypeEnum,
FlowNodeTypeEnum
} from '../../../node/constant';
import { FlowNodeTemplateType } from '../../../type/node';
import { type FlowNodeTemplateType } from '../../../type/node';
import {
WorkflowIOValueTypeEnum,
NodeInputKeyEnum,
@ -19,7 +19,6 @@ import {
Input_Template_UserChatInput,
Input_Template_File_Link
} from '../../input';
import { chatNodeSystemPromptTip, systemPromptTip } from '../../tip';
import { getHandleConfig } from '../../utils';
import { i18nT } from '../../../../../../web/i18n/utils';
@ -54,7 +53,7 @@ export const AiChatModule: FlowNodeTemplateType = {
intro: i18nT('workflow:template.ai_chat_intro'),
showStatus: true,
isTool: true,
courseUrl: '/docs/guide/workbench/workflow/ai_chat/',
courseUrl: '/docs/guide/dashboard/workflow/ai_chat/',
version: '4.9.7',
inputs: [
Input_Template_SettingAiModel,
@ -121,12 +120,7 @@ export const AiChatModule: FlowNodeTemplateType = {
valueType: WorkflowIOValueTypeEnum.string
},
// settings modal ---
{
...Input_Template_System_Prompt,
label: i18nT('common:core.ai.Prompt'),
description: systemPromptTip,
placeholder: chatNodeSystemPromptTip
},
Input_Template_System_Prompt,
Input_Template_History,
Input_Template_Dataset_Quote,
Input_Template_File_Link,

View File

@ -1,5 +1,5 @@
import { FlowNodeInputTypeEnum, FlowNodeTypeEnum } from '../../node/constant';
import { FlowNodeTemplateType } from '../../type/node.d';
import { type FlowNodeTemplateType } from '../../type/node.d';
import {
WorkflowIOValueTypeEnum,
NodeInputKeyEnum,
@ -17,8 +17,7 @@ export const AssignedAnswerModule: FlowNodeTemplateType = {
avatar: 'core/workflow/template/reply',
name: i18nT('workflow:assigned_reply'),
intro: i18nT('workflow:intro_assigned_reply'),
courseUrl: '/docs/guide/workbench/workflow/reply/',
version: '481',
courseUrl: '/docs/guide/dashboard/workflow/reply/',
isTool: true,
inputs: [
{

View File

@ -3,7 +3,7 @@ import {
FlowNodeOutputTypeEnum,
FlowNodeTypeEnum
} from '../../../node/constant';
import { FlowNodeTemplateType } from '../../../type/node';
import { type FlowNodeTemplateType } from '../../../type/node';
import {
WorkflowIOValueTypeEnum,
NodeInputKeyEnum,
@ -31,7 +31,7 @@ export const ClassifyQuestionModule: FlowNodeTemplateType = {
intro: i18nT('workflow:intro_question_classification'),
showStatus: true,
version: '4.9.2',
courseUrl: '/docs/guide/workbench/workflow/question_classify/',
courseUrl: '/docs/guide/dashboard/workflow/question_classify/',
inputs: [
{
...Input_Template_SelectAIModel,

View File

@ -1,5 +1,5 @@
import { FlowNodeTypeEnum } from '../../node/constant';
import { FlowNodeTemplateType } from '../../type/node.d';
import { type FlowNodeTemplateType } from '../../type/node.d';
import {
FlowNodeTemplateTypeEnum,
NodeInputKeyEnum,

View File

@ -3,7 +3,7 @@ import {
FlowNodeOutputTypeEnum,
FlowNodeTypeEnum
} from '../../../node/constant';
import { FlowNodeTemplateType } from '../../../type/node';
import { type FlowNodeTemplateType } from '../../../type/node';
import {
WorkflowIOValueTypeEnum,
NodeInputKeyEnum,
@ -26,7 +26,7 @@ export const ContextExtractModule: FlowNodeTemplateType = {
intro: i18nT('workflow:intro_text_content_extraction'),
showStatus: true,
isTool: true,
courseUrl: '/docs/guide/workbench/workflow/content_extract/',
courseUrl: '/docs/guide/dashboard/workflow/content_extract/',
version: '4.9.2',
inputs: [
{

View File

@ -1,4 +1,4 @@
import { WorkflowIOValueTypeEnum } from '../../../constants';
import type { WorkflowIOValueTypeEnum } from '../../../constants';
export type ContextExtractAgentItemType = {
valueType:

View File

@ -1,5 +1,5 @@
import { FlowNodeInputTypeEnum, FlowNodeTypeEnum } from '../../node/constant';
import { FlowNodeTemplateType } from '../../type/node.d';
import { type FlowNodeTemplateType } from '../../type/node.d';
import {
WorkflowIOValueTypeEnum,
FlowNodeTemplateTypeEnum,
@ -17,8 +17,7 @@ export const CustomFeedbackNode: FlowNodeTemplateType = {
avatar: 'core/workflow/template/customFeedback',
name: i18nT('workflow:custom_feedback'),
intro: i18nT('workflow:intro_custom_feedback'),
courseUrl: '/docs/guide/workbench/workflow/custom_feedback/',
version: '486',
courseUrl: '/docs/guide/dashboard/workflow/custom_feedback/',
inputs: [
{
key: NodeInputKeyEnum.textareaInput,

View File

@ -4,7 +4,7 @@ import {
FlowNodeOutputTypeEnum,
FlowNodeTypeEnum
} from '../../node/constant';
import { FlowNodeTemplateType } from '../../type/node';
import { type FlowNodeTemplateType } from '../../type/node';
import {
WorkflowIOValueTypeEnum,
NodeInputKeyEnum,
@ -13,7 +13,7 @@ import {
} from '../../constants';
import { getNanoid } from '../../../../common/string/tools';
import { getHandleConfig } from '../utils';
import { FlowNodeInputItemType } from '../../type/io.d';
import { type FlowNodeInputItemType } from '../../type/io.d';
import { i18nT } from '../../../../../web/i18n/utils';
export const getOneQuoteInputTemplate = ({
@ -42,8 +42,7 @@ export const DatasetConcatModule: FlowNodeTemplateType = {
intro: i18nT('workflow:intro_knowledge_base_search_merge'),
showStatus: false,
version: '486',
courseUrl: '/docs/guide/workbench/workflow/knowledge_base_search_merge/',
courseUrl: '/docs/guide/dashboard/workflow/knowledge_base_search_merge/',
inputs: [
{
key: NodeInputKeyEnum.datasetMaxTokens,

View File

@ -5,7 +5,7 @@ import {
FlowNodeOutputTypeEnum,
FlowNodeTypeEnum
} from '../../node/constant';
import { FlowNodeTemplateType } from '../../type/node';
import { type FlowNodeTemplateType } from '../../type/node';
import {
WorkflowIOValueTypeEnum,
NodeInputKeyEnum,
@ -30,7 +30,7 @@ export const DatasetSearchModule: FlowNodeTemplateType = {
intro: Dataset_SEARCH_DESC,
showStatus: true,
isTool: true,
courseUrl: '/docs/guide/workbench/workflow/dataset_search/',
courseUrl: '/docs/guide/dashboard/workflow/dataset_search/',
version: '4.9.2',
inputs: [
{
@ -54,7 +54,7 @@ export const DatasetSearchModule: FlowNodeTemplateType = {
key: NodeInputKeyEnum.datasetMaxTokens,
renderTypeList: [FlowNodeInputTypeEnum.hidden],
label: '',
value: 1500,
value: 5000,
valueType: WorkflowIOValueTypeEnum.number
},
{

View File

@ -1,5 +1,5 @@
import { FlowNodeTypeEnum } from '../../node/constant';
import { FlowNodeTemplateType } from '../../type/node';
import { type FlowNodeTemplateType } from '../../type/node';
import { FlowNodeTemplateTypeEnum } from '../../constants';
import { getHandleConfig } from '../utils';

View File

@ -3,7 +3,7 @@ import {
FlowNodeOutputTypeEnum,
FlowNodeTypeEnum
} from '../../node/constant';
import { FlowNodeTemplateType } from '../../type/node.d';
import { type FlowNodeTemplateType } from '../../type/node.d';
import {
WorkflowIOValueTypeEnum,
NodeInputKeyEnum,
@ -27,8 +27,7 @@ export const HttpNode468: FlowNodeTemplateType = {
intro: i18nT('workflow:intro_http_request'),
showStatus: true,
isTool: true,
courseUrl: '/docs/guide/workbench/workflow/http/',
version: '481',
courseUrl: '/docs/guide/dashboard/workflow/http/',
inputs: [
{
...Input_Template_DynamicInput,

View File

@ -10,7 +10,7 @@ import {
FlowNodeOutputTypeEnum,
FlowNodeTypeEnum
} from '../../../node/constant';
import { FlowNodeTemplateType } from '../../../type/node';
import { type FlowNodeTemplateType } from '../../../type/node';
import { getHandleConfig } from '../../utils';
export const IfElseNode: FlowNodeTemplateType = {
@ -23,8 +23,7 @@ export const IfElseNode: FlowNodeTemplateType = {
name: i18nT('workflow:condition_checker'),
intro: i18nT('workflow:execute_different_branches_based_on_conditions'),
showStatus: true,
courseUrl: '/docs/guide/workbench/workflow/tfswitch/',
version: '481',
courseUrl: '/docs/guide/dashboard/workflow/tfswitch/',
inputs: [
{
key: NodeInputKeyEnum.ifElseList,

View File

@ -1,5 +1,5 @@
import { ReferenceItemValueType } from '../../../type/io';
import { VariableConditionEnum } from './constant';
import type { ReferenceItemValueType } from '../../../type/io';
import type { VariableConditionEnum } from './constant';
export type IfElseConditionType = 'AND' | 'OR';
export type ConditionListItemType = {

View File

@ -10,7 +10,7 @@ import {
FlowNodeOutputTypeEnum,
FlowNodeTypeEnum
} from '../../../node/constant';
import { FlowNodeTemplateType } from '../../../type/node';
import { type FlowNodeTemplateType } from '../../../type/node';
import { getHandleConfig } from '../../utils';
export const FormInputNode: FlowNodeTemplateType = {
@ -23,7 +23,6 @@ export const FormInputNode: FlowNodeTemplateType = {
name: i18nT('app:workflow.form_input'),
intro: i18nT(`app:workflow.form_input_tip`),
isTool: true,
version: '4811',
inputs: [
{
key: NodeInputKeyEnum.description,

View File

@ -1,7 +1,7 @@
import type { NodeOutputItemType } from '../../../../chat/type';
import type { FlowNodeOutputItemType } from '../../../type/io';
import { FlowNodeInputTypeEnum } from 'core/workflow/node/constant';
import { WorkflowIOValueTypeEnum } from 'core/workflow/constants';
import type { FlowNodeInputTypeEnum } from 'core/workflow/node/constant';
import type { WorkflowIOValueTypeEnum } from 'core/workflow/constants';
import type { ChatCompletionMessageParam } from '../../../../ai/type';
type InteractiveBasicType = {

View File

@ -10,7 +10,7 @@ import {
FlowNodeOutputTypeEnum,
FlowNodeTypeEnum
} from '../../../node/constant';
import { FlowNodeTemplateType } from '../../../type/node.d';
import { type FlowNodeTemplateType } from '../../../type/node.d';
import { getHandleConfig } from '../../utils';
export const UserSelectNode: FlowNodeTemplateType = {
@ -24,8 +24,7 @@ export const UserSelectNode: FlowNodeTemplateType = {
name: i18nT('app:workflow.user_select'),
intro: i18nT(`app:workflow.user_select_tip`),
isTool: true,
version: '489',
courseUrl: '/docs/guide/workbench/workflow/user-selection/',
courseUrl: '/docs/guide/dashboard/workflow/user-selection/',
inputs: [
{
key: NodeInputKeyEnum.description,

View File

@ -3,7 +3,7 @@ import {
FlowNodeOutputTypeEnum,
FlowNodeTypeEnum
} from '../../node/constant';
import { FlowNodeTemplateType } from '../../type/node.d';
import { type FlowNodeTemplateType } from '../../type/node.d';
import {
WorkflowIOValueTypeEnum,
NodeInputKeyEnum,
@ -32,8 +32,7 @@ export const LafModule: FlowNodeTemplateType = {
intro: i18nT('workflow:intro_laf_function_call'),
showStatus: true,
isTool: true,
courseUrl: '/docs/guide/workbench/workflow/laf/',
version: '481',
courseUrl: '/docs/guide/dashboard/workflow/laf/',
inputs: [
{
...Input_Template_DynamicInput,

View File

@ -3,7 +3,7 @@ import {
FlowNodeOutputTypeEnum,
FlowNodeTypeEnum
} from '../../../node/constant';
import { FlowNodeTemplateType } from '../../../type/node';
import { type FlowNodeTemplateType } from '../../../type/node';
import {
FlowNodeTemplateTypeEnum,
NodeInputKeyEnum,
@ -29,8 +29,7 @@ export const LoopNode: FlowNodeTemplateType = {
name: i18nT('workflow:loop'),
intro: i18nT('workflow:intro_loop'),
showStatus: true,
version: '4811',
courseUrl: '/docs/guide/workbench/workflow/loop/',
courseUrl: '/docs/guide/dashboard/workflow/loop/',
inputs: [
{
key: NodeInputKeyEnum.loopInputArray,

View File

@ -5,7 +5,7 @@ import {
WorkflowIOValueTypeEnum
} from '../../../constants';
import { FlowNodeInputTypeEnum, FlowNodeTypeEnum } from '../../../node/constant';
import { FlowNodeTemplateType } from '../../../type/node';
import { type FlowNodeTemplateType } from '../../../type/node';
import { getHandleConfig } from '../../utils';
export const LoopEndNode: FlowNodeTemplateType = {
@ -19,7 +19,6 @@ export const LoopEndNode: FlowNodeTemplateType = {
avatar: 'core/workflow/template/loopEnd',
name: i18nT('workflow:loop_end'),
showStatus: false,
version: '4811',
inputs: [
{
key: NodeInputKeyEnum.loopEndInput,

View File

@ -3,7 +3,7 @@ import {
FlowNodeOutputTypeEnum,
FlowNodeTypeEnum
} from '../../../node/constant';
import { FlowNodeTemplateType } from '../../../type/node.d';
import { type FlowNodeTemplateType } from '../../../type/node.d';
import {
FlowNodeTemplateTypeEnum,
NodeInputKeyEnum,
@ -24,7 +24,6 @@ export const LoopStartNode: FlowNodeTemplateType = {
unique: true,
forbidDelete: true,
showStatus: false,
version: '4811',
inputs: [
{
key: NodeInputKeyEnum.loopStartInput,

Some files were not shown because too many files have changed in this diff Show More