feat: model select support close params;perf: dataset params slider;update doc (#3453)

* feat: model select support close params

* perf: dataset params slider

* update doc

* update doc

* add delete log

* perf: ai config overflow

* test

* test

* test

* delete collection tags

* delete collection tags
This commit is contained in:
Archer 2024-12-23 23:47:33 +08:00 committed by GitHub
parent a7f25994d5
commit f646ef8595
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
25 changed files with 256 additions and 188 deletions

View File

@ -0,0 +1,15 @@
---
title: 'V4.8.17(进行中)'
description: 'FastGPT V4.8.17 更新说明'
icon: 'upgrade'
draft: false
toc: true
weight: 807
---
## 完整更新内容
1.
2. 新增 - LLM 模型参数支持关闭 max_tokens 和 temperature。
3. 优化 - 知识库搜索参数,滑动条支持输入模式,可以更精准的控制。

View File

@ -44,9 +44,9 @@ weight: 104
被放置在上下文数组的最前面role 为 system用于引导模型。 被放置在上下文数组的最前面role 为 system用于引导模型。
### 最大对话轮数(仅简易模式) ### 记忆轮数(仅简易模式)
可以配置模型支持的最大对话轮数,如果模型的超出上下文,系统会自动截断,尽可能保证不超模型上下文。 可以配置模型支持的记忆轮数,如果模型的超出上下文,系统会自动截断,尽可能保证不超模型上下文。
所以尽管配置 30 轮对话,实际运行时候,不一定会达到 30 轮。 所以尽管配置 30 轮对话,实际运行时候,不一定会达到 30 轮。

View File

@ -17,9 +17,11 @@ weight: 506
![图片](/imgs/offiaccount-1.png) ![图片](/imgs/offiaccount-1.png)
## 2. 登录微信公众平台,获取 AppID 、 Secret和Token ## 2. 获取 AppID 、 Secret和Token
### 1. https://mp.weixin.qq.com 登录微信公众平台,选择您的公众号。 ### 1. 登录微信公众平台,选择您的公众号。
打开微信公众号官网https://mp.weixin.qq.com
**只支持通过验证的公众号,未通过验证的公众号暂不支持。** **只支持通过验证的公众号,未通过验证的公众号暂不支持。**
@ -28,6 +30,7 @@ weight: 506
![图片](/imgs/offiaccount-2.png) ![图片](/imgs/offiaccount-2.png)
### 2. 把3个参数填入 FastGPT 配置弹窗中。 ### 2. 把3个参数填入 FastGPT 配置弹窗中。
![图片](/imgs/offiaccount-3.png) ![图片](/imgs/offiaccount-3.png)
## 3. 在 IP 白名单中加入 FastGPT 的 IP ## 3. 在 IP 白名单中加入 FastGPT 的 IP
@ -36,7 +39,7 @@ weight: 506
私有部署的用户可自行查阅自己的 IP 地址。 私有部署的用户可自行查阅自己的 IP 地址。
海外版用户cloud.tryfastgpt.ai)可以填写下面的 IP 白名单: 海外版用户cloud.tryfastgpt.ai可以填写下面的 IP 白名单:
``` ```
35.240.227.100 35.240.227.100

View File

@ -73,8 +73,8 @@ export type AppSimpleEditFormType = {
aiSettings: { aiSettings: {
model: string; model: string;
systemPrompt?: string | undefined; systemPrompt?: string | undefined;
temperature: number; temperature?: number;
maxToken: number; maxToken?: number;
isResponseAnswerText: boolean; isResponseAnswerText: boolean;
maxHistories: number; maxHistories: number;
}; };
@ -109,8 +109,8 @@ export type AppChatConfigType = {
}; };
export type SettingAIDataType = { export type SettingAIDataType = {
model: string; model: string;
temperature: number; temperature?: number;
maxToken: number; maxToken?: number;
isResponseAnswerText?: boolean; isResponseAnswerText?: boolean;
maxHistories?: number; maxHistories?: number;
[NodeInputKeyEnum.aiChatVision]?: boolean; // Is open vision mode [NodeInputKeyEnum.aiChatVision]?: boolean; // Is open vision mode

View File

@ -201,13 +201,14 @@ export type DispatchNodeResultType<T = {}> = {
export type AIChatNodeProps = { export type AIChatNodeProps = {
[NodeInputKeyEnum.aiModel]: string; [NodeInputKeyEnum.aiModel]: string;
[NodeInputKeyEnum.aiSystemPrompt]?: string; [NodeInputKeyEnum.aiSystemPrompt]?: string;
[NodeInputKeyEnum.aiChatTemperature]: number; [NodeInputKeyEnum.aiChatTemperature]?: number;
[NodeInputKeyEnum.aiChatMaxToken]: number; [NodeInputKeyEnum.aiChatMaxToken]?: number;
[NodeInputKeyEnum.aiChatIsResponseText]: boolean; [NodeInputKeyEnum.aiChatIsResponseText]: boolean;
[NodeInputKeyEnum.aiChatVision]?: boolean;
[NodeInputKeyEnum.aiChatQuoteRole]?: AiChatQuoteRoleType; [NodeInputKeyEnum.aiChatQuoteRole]?: AiChatQuoteRoleType;
[NodeInputKeyEnum.aiChatQuoteTemplate]?: string; [NodeInputKeyEnum.aiChatQuoteTemplate]?: string;
[NodeInputKeyEnum.aiChatQuotePrompt]?: string; [NodeInputKeyEnum.aiChatQuotePrompt]?: string;
[NodeInputKeyEnum.aiChatVision]?: boolean;
[NodeInputKeyEnum.stringQuoteText]?: string; [NodeInputKeyEnum.stringQuoteText]?: string;
[NodeInputKeyEnum.fileUrlList]?: string[]; [NodeInputKeyEnum.fileUrlList]?: string[];

View File

@ -12,10 +12,12 @@ export const computedMaxToken = async ({
model, model,
filterMessages = [] filterMessages = []
}: { }: {
maxToken: number; maxToken?: number;
model: LLMModelItemType; model: LLMModelItemType;
filterMessages: ChatCompletionMessageParam[]; filterMessages: ChatCompletionMessageParam[];
}) => { }) => {
if (maxToken === undefined) return;
maxToken = Math.min(maxToken, model.maxResponse); maxToken = Math.min(maxToken, model.maxResponse);
const tokensLimit = model.maxContext; const tokensLimit = model.maxContext;
@ -63,12 +65,13 @@ export const llmCompletionsBodyFormat = <T extends CompletionsBodyType>(
const requestBody: T = { const requestBody: T = {
...body, ...body,
temperature: body.temperature temperature:
? computedTemperature({ typeof body.temperature === 'number'
model: modelData, ? computedTemperature({
temperature: body.temperature model: modelData,
}) temperature: body.temperature
: undefined, })
: undefined,
...modelData?.defaultConfig ...modelData?.defaultConfig
}; };

View File

@ -71,19 +71,10 @@ export async function delDatasetRelevantData({
const teamId = datasets[0].teamId; const teamId = datasets[0].teamId;
if (!teamId) { if (!teamId) {
return Promise.reject('teamId is required'); return Promise.reject('TeamId is required');
} }
const datasetIds = datasets.map((item) => String(item._id)); const datasetIds = datasets.map((item) => item._id);
// Get _id, teamId, fileId, metadata.relatedImgId for all collections
const collections = await MongoDatasetCollection.find(
{
teamId,
datasetId: { $in: datasetIds }
},
'_id teamId datasetId fileId metadata'
).lean();
// delete training data // delete training data
await MongoDatasetTraining.deleteMany({ await MongoDatasetTraining.deleteMany({
@ -91,20 +82,27 @@ export async function delDatasetRelevantData({
datasetId: { $in: datasetIds } datasetId: { $in: datasetIds }
}); });
// image and file // Get _id, teamId, fileId, metadata.relatedImgId for all collections
await delCollectionRelatedSource({ collections, session }); const collections = await MongoDatasetCollection.find(
// delete dataset.datas
await MongoDatasetData.deleteMany({ teamId, datasetId: { $in: datasetIds } }, { session });
// delete collections
await MongoDatasetCollection.deleteMany(
{ {
teamId, teamId,
datasetId: { $in: datasetIds } datasetId: { $in: datasetIds }
}, },
'_id teamId datasetId fileId metadata',
{ session } { session }
); ).lean();
// image and file
await delCollectionRelatedSource({ collections, session });
// delete collections
await MongoDatasetCollection.deleteMany({
teamId,
datasetId: { $in: datasetIds }
}).session(session);
// delete dataset.datas(Not need session)
await MongoDatasetData.deleteMany({ teamId, datasetId: { $in: datasetIds } });
// no session delete: delete files, vector data // no session delete: delete files, vector data
await deleteDatasetDataVector({ teamId, datasetIds }); await deleteDatasetDataVector({ teamId, datasetIds });

View File

@ -46,7 +46,7 @@ export const runToolWithFunctionCall = async (
user, user,
stream, stream,
workflowStreamResponse, workflowStreamResponse,
params: { temperature = 0, maxToken = 4000, aiChatVision } params: { temperature, maxToken, aiChatVision }
} = workflowProps; } = workflowProps;
// Interactive // Interactive

View File

@ -54,7 +54,7 @@ export const runToolWithPromptCall = async (
user, user,
stream, stream,
workflowStreamResponse, workflowStreamResponse,
params: { temperature = 0, maxToken = 4000, aiChatVision } params: { temperature, maxToken, aiChatVision }
} = workflowProps; } = workflowProps;
if (interactiveEntryToolParams) { if (interactiveEntryToolParams) {

View File

@ -94,7 +94,7 @@ export const runToolWithToolChoice = async (
stream, stream,
user, user,
workflowStreamResponse, workflowStreamResponse,
params: { temperature = 0, maxToken = 4000, aiChatVision } params: { temperature, maxToken, aiChatVision }
} = workflowProps; } = workflowProps;
if (maxRunToolTimes <= 0 && response) { if (maxRunToolTimes <= 0 && response) {

View File

@ -71,8 +71,8 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
chatConfig, chatConfig,
params: { params: {
model, model,
temperature = 0, temperature,
maxToken = 4000, maxToken,
history = 6, history = 6,
quoteQA, quoteQA,
userChatInput, userChatInput,

View File

@ -7,13 +7,15 @@ const InputSlider = ({
value, value,
max = 100, max = 100,
min = 0, min = 0,
step = 1 step = 1,
isDisabled
}: { }: {
value: number; value?: number;
onChange: (index: number) => void; onChange: (index: number) => void;
max: number; max: number;
min: number; min: number;
step?: number; step?: number;
isDisabled?: boolean;
}) => { }) => {
const markList = useMemo(() => { const markList = useMemo(() => {
const valLen = max - min; const valLen = max - min;
@ -36,6 +38,7 @@ const InputSlider = ({
value={value} value={value}
focusThumbOnChange={false} focusThumbOnChange={false}
onChange={onChange} onChange={onChange}
isDisabled={isDisabled}
> >
<SliderTrack bg={'myGray.100'} h={'4px'} /> <SliderTrack bg={'myGray.100'} h={'4px'} />
{markList.map((val, i) => ( {markList.map((val, i) => (
@ -67,6 +70,7 @@ const InputSlider = ({
max={max} max={max}
step={step} step={step}
value={value} value={value}
isDisabled={isDisabled}
onChange={(e) => onChange(e ?? min)} onChange={(e) => onChange(e ?? min)}
/> />
</HStack> </HStack>

View File

@ -79,6 +79,12 @@
"look_ai_point_price": "View all model billing standards", "look_ai_point_price": "View all model billing standards",
"mark_count": "Number of Marked Answers", "mark_count": "Number of Marked Answers",
"max_histories_number": "Max histories", "max_histories_number": "Max histories",
"max_histories_number_tip": "The maximum number of rounds of dialogue that the model can carry into memory. If the memory exceeds the model context, the system will force truncation. \nTherefore, even if 30 rounds of dialogue are configured, the actual number may not reach 30 rounds during operation.",
"max_quote_tokens": "Max quote",
"max_quote_tokens_tips": "The maximum number of tokens in a single search, about 1 character in Chinese = 1.7 tokens, and about 1 character in English = 1 token",
"max_tokens": "Max tokens",
"min_similarity": "lowest correlation",
"min_similarity_tip": "The relevance of different index models is different. Please select the appropriate value through search testing. \nWhen using Result Rearrange , use the rearranged results for filtering.",
"module.Custom Title Tip": "This title will be displayed during the conversation.", "module.Custom Title Tip": "This title will be displayed during the conversation.",
"module.No Modules": "No Plugins Found", "module.No Modules": "No Plugins Found",
"module.type": "\"{{type}}\" type\n{{description}}", "module.type": "\"{{type}}\" type\n{{description}}",
@ -109,6 +115,8 @@
"setting_plugin": "Workflow", "setting_plugin": "Workflow",
"stream_response": "Stream", "stream_response": "Stream",
"stream_response_tip": "Turning this switch off forces the model to use non-streaming mode and will not output content directly. \nIn the output of the AI reply, the content output by this model can be obtained for secondary processing.", "stream_response_tip": "Turning this switch off forces the model to use non-streaming mode and will not output content directly. \nIn the output of the AI reply, the content output by this model can be obtained for secondary processing.",
"temperature": "Temperature",
"temperature_tip": "Range 0~10. \nThe larger the value, the more divergent the models answer is; the smaller the value, the more rigorous the answer.",
"template.hard_strict": "Strict Q&A template", "template.hard_strict": "Strict Q&A template",
"template.hard_strict_des": "Based on the question and answer template, stricter requirements are imposed on the model's answers.", "template.hard_strict_des": "Based on the question and answer template, stricter requirements are imposed on the model's answers.",
"template.qa_template": "Q&A template", "template.qa_template": "Q&A template",

View File

@ -281,7 +281,6 @@
"core.app.Interval timer run": "Scheduled Execution", "core.app.Interval timer run": "Scheduled Execution",
"core.app.Interval timer tip": "Can Execute App on Schedule", "core.app.Interval timer tip": "Can Execute App on Schedule",
"core.app.Make a brief introduction of your app": "Give Your AI App an Introduction", "core.app.Make a brief introduction of your app": "Give Your AI App an Introduction",
"core.app.Max tokens": "Max response",
"core.app.Name and avatar": "Avatar & Name", "core.app.Name and avatar": "Avatar & Name",
"core.app.Publish": "Publish", "core.app.Publish": "Publish",
"core.app.Publish Confirm": "Confirm to Publish App? This Will Immediately Update the App Status on All Publishing Channels.", "core.app.Publish Confirm": "Confirm to Publish App? This Will Immediately Update the App Status on All Publishing Channels.",
@ -308,7 +307,6 @@
"core.app.TTS Tip": "After enabling, you can use the voice playback function after each conversation. Using this feature may incur additional costs.", "core.app.TTS Tip": "After enabling, you can use the voice playback function after each conversation. Using this feature may incur additional costs.",
"core.app.TTS start": "Read Content", "core.app.TTS start": "Read Content",
"core.app.Team tags": "Team Tags", "core.app.Team tags": "Team Tags",
"core.app.Temperature": "Temperature",
"core.app.Tool call": "Tool Call", "core.app.Tool call": "Tool Call",
"core.app.ToolCall.No plugin": "No Available Plugins", "core.app.ToolCall.No plugin": "No Available Plugins",
"core.app.ToolCall.Parameter setting": "Input Parameters", "core.app.ToolCall.Parameter setting": "Input Parameters",
@ -599,10 +597,6 @@
"core.dataset.search.Dataset Search Params": "Dataset Search Configuration", "core.dataset.search.Dataset Search Params": "Dataset Search Configuration",
"core.dataset.search.Empty result response": "Empty Search Response", "core.dataset.search.Empty result response": "Empty Search Response",
"core.dataset.search.Filter": "Search Filter", "core.dataset.search.Filter": "Search Filter",
"core.dataset.search.Max Tokens": "Quote Limit",
"core.dataset.search.Max Tokens Tips": "The maximum number of tokens for a single search. About 1 Chinese character = 1.7 tokens, 1 English word = 1 token",
"core.dataset.search.Min Similarity": "Minimum Similarity",
"core.dataset.search.Min Similarity Tips": "The similarity of different index models varies. Please choose an appropriate value through search testing. When using Re-rank, the similarity may be very low.",
"core.dataset.search.No support similarity": "Only supported when using result re-rank or semantic search", "core.dataset.search.No support similarity": "Only supported when using result re-rank or semantic search",
"core.dataset.search.Nonsupport": "Not Supported", "core.dataset.search.Nonsupport": "Not Supported",
"core.dataset.search.Params Setting": "Search Parameter Settings", "core.dataset.search.Params Setting": "Search Parameter Settings",

View File

@ -78,7 +78,13 @@
"logs_title": "标题", "logs_title": "标题",
"look_ai_point_price": "查看所有模型计费标准", "look_ai_point_price": "查看所有模型计费标准",
"mark_count": "标注答案数量", "mark_count": "标注答案数量",
"max_histories_number": "最大对话轮数", "max_histories_number": "记忆轮数",
"max_histories_number_tip": "模型最多携带多少轮对话进入记忆中,如果记忆超出模型上下文,系统会强制截断。所以尽管配置 30 轮对话,实际运行时候,不一定会达到 30 轮。",
"max_quote_tokens": "引用上限",
"max_quote_tokens_tips": "单次搜索最大的 token 数量,中文约 1 字=1.7 tokens英文约 1 字=1 token",
"max_tokens": "回复上限",
"min_similarity": "最低相关度",
"min_similarity_tip": "不同索引模型的相关度有区别,请通过搜索测试来选择合适的数值。使用 结果重排 时,使用重排结果进行过滤。",
"module.Custom Title Tip": "该标题名字会展示在对话过程中", "module.Custom Title Tip": "该标题名字会展示在对话过程中",
"module.No Modules": "没找到插件", "module.No Modules": "没找到插件",
"module.type": "\"{{type}}\"类型\n{{description}}", "module.type": "\"{{type}}\"类型\n{{description}}",
@ -109,6 +115,8 @@
"setting_plugin": "插件配置", "setting_plugin": "插件配置",
"stream_response": "流输出", "stream_response": "流输出",
"stream_response_tip": "关闭该开关,可以强制模型使用非流模式,并且不会直接进行内容输出。可以在 AI 回复的输出中,获取本次模型输出的内容进行二次处理。", "stream_response_tip": "关闭该开关,可以强制模型使用非流模式,并且不会直接进行内容输出。可以在 AI 回复的输出中,获取本次模型输出的内容进行二次处理。",
"temperature": "温度",
"temperature_tip": "范围 010。值越大代表模型回答越发散值越小代表回答越严谨。",
"template.hard_strict": "严格问答模板", "template.hard_strict": "严格问答模板",
"template.hard_strict_des": "在问答模板基础上,对模型的回答做更严格的要求。", "template.hard_strict_des": "在问答模板基础上,对模型的回答做更严格的要求。",
"template.qa_template": "问答模板", "template.qa_template": "问答模板",

View File

@ -280,7 +280,6 @@
"core.app.Interval timer run": "定时执行", "core.app.Interval timer run": "定时执行",
"core.app.Interval timer tip": "可定时执行应用", "core.app.Interval timer tip": "可定时执行应用",
"core.app.Make a brief introduction of your app": "给你的 AI 应用一个介绍", "core.app.Make a brief introduction of your app": "给你的 AI 应用一个介绍",
"core.app.Max tokens": "回复上限",
"core.app.Name and avatar": "头像 & 名称", "core.app.Name and avatar": "头像 & 名称",
"core.app.Publish": "发布", "core.app.Publish": "发布",
"core.app.Publish Confirm": "确认发布应用?会立即更新所有发布渠道的应用状态。", "core.app.Publish Confirm": "确认发布应用?会立即更新所有发布渠道的应用状态。",
@ -307,7 +306,6 @@
"core.app.TTS Tip": "开启后,每次对话后可使用语音播放功能。使用该功能可能产生额外费用。", "core.app.TTS Tip": "开启后,每次对话后可使用语音播放功能。使用该功能可能产生额外费用。",
"core.app.TTS start": "朗读内容", "core.app.TTS start": "朗读内容",
"core.app.Team tags": "团队标签", "core.app.Team tags": "团队标签",
"core.app.Temperature": "温度",
"core.app.Tool call": "工具调用", "core.app.Tool call": "工具调用",
"core.app.ToolCall.No plugin": "没有可用的插件", "core.app.ToolCall.No plugin": "没有可用的插件",
"core.app.ToolCall.Parameter setting": "输入参数", "core.app.ToolCall.Parameter setting": "输入参数",
@ -598,10 +596,6 @@
"core.dataset.search.Dataset Search Params": "知识库搜索配置", "core.dataset.search.Dataset Search Params": "知识库搜索配置",
"core.dataset.search.Empty result response": "空搜索回复", "core.dataset.search.Empty result response": "空搜索回复",
"core.dataset.search.Filter": "搜索过滤", "core.dataset.search.Filter": "搜索过滤",
"core.dataset.search.Max Tokens": "引用上限",
"core.dataset.search.Max Tokens Tips": "单次搜索最大的 token 数量,中文约 1 字=1.7 tokens英文约 1 字=1 token",
"core.dataset.search.Min Similarity": "最低相关度",
"core.dataset.search.Min Similarity Tips": "不同索引模型的相关度有区别,请通过搜索测试来选择合适的数值,使用 Rerank 时,相关度可能会很低。",
"core.dataset.search.No support similarity": "仅使用结果重排或语义检索时,支持相关度过滤", "core.dataset.search.No support similarity": "仅使用结果重排或语义检索时,支持相关度过滤",
"core.dataset.search.Nonsupport": "不支持", "core.dataset.search.Nonsupport": "不支持",
"core.dataset.search.Params Setting": "搜索参数设置", "core.dataset.search.Params Setting": "搜索参数设置",

View File

@ -78,7 +78,13 @@
"logs_title": "標題", "logs_title": "標題",
"look_ai_point_price": "查看所有模型計費標準", "look_ai_point_price": "查看所有模型計費標準",
"mark_count": "標記答案數量", "mark_count": "標記答案數量",
"max_histories_number": "最大對話輪數", "max_histories_number": "記憶輪數",
"max_histories_number_tip": "模型最多攜帶多少輪對話進入記憶中,如果記憶超出模型上下文,系統會強制截斷。\n所以儘管配置 30 輪對話,實際運行時候,不一定會達到 30 輪。",
"max_quote_tokens": "引用上限",
"max_quote_tokens_tips": "單次搜尋最大的 token 數量,中文約 1 字=1.7 tokens英文約 1 字=1 token",
"max_tokens": "回覆上限",
"min_similarity": "最低相關度",
"min_similarity_tip": "不同索引模型的相關度有區別,請透過搜尋測試來選擇合適的數值。\n使用 結果重排 時,使用重排結果過濾。",
"module.Custom Title Tip": "這個標題會顯示在對話過程中", "module.Custom Title Tip": "這個標題會顯示在對話過程中",
"module.No Modules": "找不到外掛", "module.No Modules": "找不到外掛",
"module.type": "\"{{type}}\" 類型\n{{description}}", "module.type": "\"{{type}}\" 類型\n{{description}}",
@ -109,6 +115,8 @@
"setting_plugin": "外掛設定", "setting_plugin": "外掛設定",
"stream_response": "流輸出", "stream_response": "流輸出",
"stream_response_tip": "關閉該開關​​,可以強制模型使用非流模式,並且不會直接進行內容輸出。\n可在 AI 回覆的輸出中,取得本次模型輸出的內容進行二次處理。", "stream_response_tip": "關閉該開關​​,可以強制模型使用非流模式,並且不會直接進行內容輸出。\n可在 AI 回覆的輸出中,取得本次模型輸出的內容進行二次處理。",
"temperature": "溫度",
"temperature_tip": "範圍 010。\n值越大代表模型回答越發散值越小代表回答越嚴謹。",
"template.hard_strict": "嚴格問答範本", "template.hard_strict": "嚴格問答範本",
"template.hard_strict_des": "在問答範本基礎上,對模型的回答做出更嚴格的要求。", "template.hard_strict_des": "在問答範本基礎上,對模型的回答做出更嚴格的要求。",
"template.qa_template": "問答範本", "template.qa_template": "問答範本",

View File

@ -281,7 +281,6 @@
"core.app.Interval timer run": "排程執行", "core.app.Interval timer run": "排程執行",
"core.app.Interval timer tip": "可排程執行應用程式", "core.app.Interval timer tip": "可排程執行應用程式",
"core.app.Make a brief introduction of your app": "為您的 AI 應用程式寫一段介紹", "core.app.Make a brief introduction of your app": "為您的 AI 應用程式寫一段介紹",
"core.app.Max tokens": "回應上限",
"core.app.Name and avatar": "頭像與名稱", "core.app.Name and avatar": "頭像與名稱",
"core.app.Publish": "發布", "core.app.Publish": "發布",
"core.app.Publish Confirm": "確認發布應用程式?這將立即更新所有發布管道的應用程式狀態。", "core.app.Publish Confirm": "確認發布應用程式?這將立即更新所有發布管道的應用程式狀態。",
@ -308,7 +307,6 @@
"core.app.TTS Tip": "開啟後,每次對話後可使用語音播放功能。使用此功能可能會產生額外費用。", "core.app.TTS Tip": "開啟後,每次對話後可使用語音播放功能。使用此功能可能會產生額外費用。",
"core.app.TTS start": "朗讀內容", "core.app.TTS start": "朗讀內容",
"core.app.Team tags": "團隊標籤", "core.app.Team tags": "團隊標籤",
"core.app.Temperature": "溫度",
"core.app.Tool call": "工具呼叫", "core.app.Tool call": "工具呼叫",
"core.app.ToolCall.No plugin": "沒有可用的外掛程式", "core.app.ToolCall.No plugin": "沒有可用的外掛程式",
"core.app.ToolCall.Parameter setting": "輸入參數", "core.app.ToolCall.Parameter setting": "輸入參數",
@ -599,10 +597,6 @@
"core.dataset.search.Dataset Search Params": "知識庫搜尋設定", "core.dataset.search.Dataset Search Params": "知識庫搜尋設定",
"core.dataset.search.Empty result response": "空搜尋回應", "core.dataset.search.Empty result response": "空搜尋回應",
"core.dataset.search.Filter": "搜尋篩選", "core.dataset.search.Filter": "搜尋篩選",
"core.dataset.search.Max Tokens": "引用上限",
"core.dataset.search.Max Tokens Tips": "單次搜尋最大的 Token 數量,中文約 1 字=1.7 tokens英文約 1 字=1 token",
"core.dataset.search.Min Similarity": "最低相關度",
"core.dataset.search.Min Similarity Tips": "不同索引模型的相關度有所差異,請透過搜尋測試來選擇合適的數值。使用重新排名時,相關度可能會很低。",
"core.dataset.search.No support similarity": "僅使用結果重新排名或語意搜尋時,支援相關度篩選", "core.dataset.search.No support similarity": "僅使用結果重新排名或語意搜尋時,支援相關度篩選",
"core.dataset.search.Nonsupport": "不支援", "core.dataset.search.Nonsupport": "不支援",
"core.dataset.search.Params Setting": "搜尋參數設定", "core.dataset.search.Params Setting": "搜尋參數設定",

View File

@ -18,10 +18,10 @@ import {
Thead, Thead,
Tr, Tr,
Table, Table,
useDisclosure useDisclosure,
FlexProps
} from '@chakra-ui/react'; } from '@chakra-ui/react';
import { useSystemStore } from '@/web/common/system/useSystemStore'; import { useSystemStore } from '@/web/common/system/useSystemStore';
import MySlider from '@/components/Slider';
import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants'; import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import type { SettingAIDataType } from '@fastgpt/global/core/app/type.d'; import type { SettingAIDataType } from '@fastgpt/global/core/app/type.d';
import { getDocPath } from '@/web/common/system/doc'; import { getDocPath } from '@/web/common/system/doc';
@ -37,16 +37,33 @@ const AiPointsModal = dynamic(() =>
import('@/pages/price/components/Points').then((mod) => mod.AiPointsModal) import('@/pages/price/components/Points').then((mod) => mod.AiPointsModal)
); );
const FlexItemStyles: FlexProps = {
mt: 5,
alignItems: 'center',
h: '35px'
};
const LabelStyles: BoxProps = {
display: 'flex',
alignItems: 'center',
justifyContent: 'space-between',
fontSize: 'sm',
color: 'myGray.900',
width: '9rem',
mr: 5
};
export type AIChatSettingsModalProps = {};
const AIChatSettingsModal = ({ const AIChatSettingsModal = ({
onClose, onClose,
onSuccess, onSuccess,
defaultData, defaultData,
llmModels = [] llmModels = []
}: { }: AIChatSettingsModalProps & {
onClose: () => void; onClose: () => void;
onSuccess: (e: SettingAIDataType) => void; onSuccess: (e: SettingAIDataType) => void;
defaultData: SettingAIDataType; defaultData: SettingAIDataType;
llmModels?: LLMModelItemType[]; llmModels: LLMModelItemType[];
}) => { }) => {
const { t } = useTranslation(); const { t } = useTranslation();
const [refresh, setRefresh] = useState(false); const [refresh, setRefresh] = useState(false);
@ -59,7 +76,11 @@ const AIChatSettingsModal = ({
const showResponseAnswerText = watch(NodeInputKeyEnum.aiChatIsResponseText) !== undefined; const showResponseAnswerText = watch(NodeInputKeyEnum.aiChatIsResponseText) !== undefined;
const showVisionSwitch = watch(NodeInputKeyEnum.aiChatVision) !== undefined; const showVisionSwitch = watch(NodeInputKeyEnum.aiChatVision) !== undefined;
const showMaxHistoriesSlider = watch('maxHistories') !== undefined; const showMaxHistoriesSlider = watch('maxHistories') !== undefined;
const maxToken = watch('maxToken');
const temperature = watch('temperature');
const useVision = watch('aiChatVision'); const useVision = watch('aiChatVision');
const selectedModel = getWebLLMModel(model); const selectedModel = getWebLLMModel(model);
const llmSupportVision = !!selectedModel?.vision; const llmSupportVision = !!selectedModel?.vision;
@ -79,14 +100,6 @@ const AIChatSettingsModal = ({
setRefresh(!refresh); setRefresh(!refresh);
}; };
const LabelStyles: BoxProps = {
display: 'flex',
alignItems: 'center',
fontSize: 'sm',
color: 'myGray.900',
width: '7rem'
};
const { const {
isOpen: isOpenAiPointsModal, isOpen: isOpenAiPointsModal,
onClose: onCloseAiPointsModal, onClose: onCloseAiPointsModal,
@ -116,9 +129,9 @@ const AIChatSettingsModal = ({
} }
w={'500px'} w={'500px'}
> >
<ModalBody overflowY={'auto'}> <ModalBody overflowY={'auto'} overflowX={'hidden'}>
<Flex alignItems={'center'}> <Flex alignItems={'center'}>
<Box {...LabelStyles} mr={2}> <Box {...LabelStyles} w={'5rem'}>
{t('common:core.ai.Model')} {t('common:core.ai.Model')}
</Box> </Box>
<Box flex={'1 0 0'}> <Box flex={'1 0 0'}>
@ -184,44 +197,13 @@ const AIChatSettingsModal = ({
</Table> </Table>
</TableContainer> </TableContainer>
<Flex>
<Box {...LabelStyles} mr={2}>
{t('common:core.app.Temperature')}
</Box>
<Box flex={'1 0 0'}>
<InputSlider
min={0}
max={10}
step={1}
value={getValues(NodeInputKeyEnum.aiChatTemperature)}
onChange={(e) => {
setValue(NodeInputKeyEnum.aiChatTemperature, e);
setRefresh(!refresh);
}}
/>
</Box>
</Flex>
<Flex mt={5}>
<Box {...LabelStyles} mr={2}>
{t('common:core.app.Max tokens')}
</Box>
<Box flex={'1 0 0'}>
<InputSlider
min={100}
max={tokenLimit}
step={200}
value={getValues(NodeInputKeyEnum.aiChatMaxToken)}
onChange={(val) => {
setValue(NodeInputKeyEnum.aiChatMaxToken, val);
setRefresh(!refresh);
}}
/>
</Box>
</Flex>
{showMaxHistoriesSlider && ( {showMaxHistoriesSlider && (
<Flex mt={5}> <Flex {...FlexItemStyles}>
<Box {...LabelStyles} mr={2}> <Box {...LabelStyles}>
{t('app:max_histories_number')} <Flex alignItems={'center'}>
<Box>{t('app:max_histories_number')}</Box>
<QuestionTip label={t('app:max_histories_number_tip')} />
</Flex>
</Box> </Box>
<Box flex={'1 0 0'}> <Box flex={'1 0 0'}>
<InputSlider <InputSlider
@ -237,15 +219,71 @@ const AIChatSettingsModal = ({
</Box> </Box>
</Flex> </Flex>
)} )}
<Flex {...FlexItemStyles}>
<Box {...LabelStyles}>
<Box>{t('app:max_tokens')}</Box>
<Switch
isChecked={maxToken !== undefined}
size={'sm'}
onChange={(e) => {
setValue('maxToken', e.target.checked ? tokenLimit / 2 : undefined);
}}
/>
</Box>
<Box flex={'1 0 0'}>
<InputSlider
min={100}
max={tokenLimit}
step={200}
isDisabled={maxToken === undefined}
value={maxToken}
onChange={(val) => {
setValue(NodeInputKeyEnum.aiChatMaxToken, val);
setRefresh(!refresh);
}}
/>
</Box>
</Flex>
<Flex {...FlexItemStyles}>
<Box {...LabelStyles}>
<Flex alignItems={'center'}>
{t('app:temperature')}
<QuestionTip label={t('app:temperature_tip')} />
</Flex>
<Switch
isChecked={temperature !== undefined}
size={'sm'}
onChange={(e) => {
setValue('temperature', e.target.checked ? 0 : undefined);
}}
/>
</Box>
<Box flex={'1 0 0'}>
<InputSlider
min={0}
max={10}
step={1}
value={temperature}
isDisabled={temperature === undefined}
onChange={(e) => {
setValue(NodeInputKeyEnum.aiChatTemperature, e);
setRefresh(!refresh);
}}
/>
</Box>
</Flex>
{showResponseAnswerText && ( {showResponseAnswerText && (
<Flex mt={5} alignItems={'center'}> <Flex {...FlexItemStyles} h={'25px'}>
<Box {...LabelStyles}> <Box {...LabelStyles}>
{t('app:stream_response')} <Flex alignItems={'center'}>
<QuestionTip ml={1} label={t('app:stream_response_tip')}></QuestionTip> {t('app:stream_response')}
</Box> <QuestionTip ml={1} label={t('app:stream_response_tip')}></QuestionTip>
<Box flex={1}> </Flex>
<Switch <Switch
isChecked={getValues(NodeInputKeyEnum.aiChatIsResponseText)} isChecked={getValues(NodeInputKeyEnum.aiChatIsResponseText)}
size={'sm'}
onChange={(e) => { onChange={(e) => {
const value = e.target.checked; const value = e.target.checked;
setValue(NodeInputKeyEnum.aiChatIsResponseText, value); setValue(NodeInputKeyEnum.aiChatIsResponseText, value);
@ -256,15 +294,17 @@ const AIChatSettingsModal = ({
</Flex> </Flex>
)} )}
{showVisionSwitch && ( {showVisionSwitch && (
<Flex mt={5} alignItems={'center'}> <Flex {...FlexItemStyles} h={'25px'}>
<Box {...LabelStyles}> <Box {...LabelStyles}>
{t('app:llm_use_vision')} <Flex alignItems={'center'}>
<QuestionTip ml={1} label={t('app:llm_use_vision_tip')}></QuestionTip> {t('app:llm_use_vision')}
</Box> <QuestionTip ml={1} label={t('app:llm_use_vision_tip')}></QuestionTip>
<Box flex={1}> </Flex>
{llmSupportVision ? ( {llmSupportVision ? (
<Switch <Switch
isChecked={useVision} isChecked={useVision}
size={'sm'}
onChange={(e) => { onChange={(e) => {
const value = e.target.checked; const value = e.target.checked;
setValue(NodeInputKeyEnum.aiChatVision, value); setValue(NodeInputKeyEnum.aiChatVision, value);

View File

@ -3,7 +3,7 @@ import { useSystemStore } from '@/web/common/system/useSystemStore';
import { LLMModelTypeEnum, llmModelTypeFilterMap } from '@fastgpt/global/core/ai/constants'; import { LLMModelTypeEnum, llmModelTypeFilterMap } from '@fastgpt/global/core/ai/constants';
import { Box, css, HStack, IconButton, useDisclosure } from '@chakra-ui/react'; import { Box, css, HStack, IconButton, useDisclosure } from '@chakra-ui/react';
import type { SettingAIDataType } from '@fastgpt/global/core/app/type.d'; import type { SettingAIDataType } from '@fastgpt/global/core/app/type.d';
import AISettingModal from '@/components/core/ai/AISettingModal'; import AISettingModal, { AIChatSettingsModalProps } from '@/components/core/ai/AISettingModal';
import MyTooltip from '@fastgpt/web/components/common/MyTooltip'; import MyTooltip from '@fastgpt/web/components/common/MyTooltip';
import { useTranslation } from 'next-i18next'; import { useTranslation } from 'next-i18next';
import MyIcon from '@fastgpt/web/components/common/Icon'; import MyIcon from '@fastgpt/web/components/common/Icon';
@ -17,7 +17,12 @@ type Props = {
bg?: string; bg?: string;
}; };
const SettingLLMModel = ({ llmModelType = LLMModelTypeEnum.all, defaultData, onChange }: Props) => { const SettingLLMModel = ({
llmModelType = LLMModelTypeEnum.all,
defaultData,
onChange,
...props
}: AIChatSettingsModalProps & Props) => {
const { t } = useTranslation(); const { t } = useTranslation();
const { llmModelList } = useSystemStore(); const { llmModelList } = useSystemStore();
@ -95,6 +100,7 @@ const SettingLLMModel = ({ llmModelType = LLMModelTypeEnum.all, defaultData, onC
}} }}
defaultData={defaultData} defaultData={defaultData}
llmModels={modelList} llmModels={modelList}
{...props}
/> />
)} )}
</Box> </Box>

View File

@ -30,6 +30,7 @@ import QuestionTip from '@fastgpt/web/components/common/MyTooltip/QuestionTip';
import FormLabel from '@fastgpt/web/components/common/MyBox/FormLabel'; import FormLabel from '@fastgpt/web/components/common/MyBox/FormLabel';
import MyTextarea from '@/components/common/Textarea/MyTextarea'; import MyTextarea from '@/components/common/Textarea/MyTextarea';
import { defaultDatasetMaxTokens } from '@fastgpt/global/core/app/constants'; import { defaultDatasetMaxTokens } from '@fastgpt/global/core/app/constants';
import InputSlider from '@fastgpt/web/components/common/MySlider/InputSlider';
export type DatasetParamsProps = { export type DatasetParamsProps = {
searchMode: `${DatasetSearchModeEnum}`; searchMode: `${DatasetSearchModeEnum}`;
@ -224,19 +225,12 @@ const DatasetParamsModal = ({
<Box pt={5}> <Box pt={5}>
{limit !== undefined && ( {limit !== undefined && (
<Box display={['block', 'flex']}> <Box display={['block', 'flex']}>
<Flex flex={'0 0 120px'} mb={[8, 0]}> <Flex flex={'0 0 120px'} alignItems={'center'} mb={[5, 0]}>
<FormLabel>{t('common:core.dataset.search.Max Tokens')}</FormLabel> <FormLabel>{t('app:max_quote_tokens')}</FormLabel>
<QuestionTip <QuestionTip label={t('app:max_quote_tokens_tips')} />
ml={1}
label={t('common:core.dataset.search.Max Tokens Tips')}
></QuestionTip>
</Flex> </Flex>
<Box flex={1} mx={4}> <Box flex={'1 0 0'}>
<MySlider <InputSlider
markList={[
{ label: '100', value: 100 },
{ label: maxTokens, value: maxTokens }
]}
min={100} min={100}
max={maxTokens} max={maxTokens}
step={maxTokenStep} step={maxTokenStep}
@ -249,21 +243,14 @@ const DatasetParamsModal = ({
</Box> </Box>
</Box> </Box>
)} )}
<Box display={['block', 'flex']} mt={10}> <Box display={['block', 'flex']} mt={[6, 10]} mb={4}>
<Flex flex={'0 0 120px'} mb={[8, 0]}> <Flex flex={'0 0 120px'} alignItems={'center'} mb={[5, 0]}>
<FormLabel>{t('common:core.dataset.search.Min Similarity')}</FormLabel> <FormLabel>{t('app:min_similarity')}</FormLabel>
<QuestionTip <QuestionTip label={t('app:min_similarity_tip')} />
ml={1}
label={t('common:core.dataset.search.Min Similarity Tips')}
></QuestionTip>
</Flex> </Flex>
<Box flex={1} mx={4}> <Box flex={'1 0 0'}>
{showSimilarity ? ( {showSimilarity ? (
<MySlider <InputSlider
markList={[
{ label: '0', value: 0 },
{ label: '1', value: 1 }
]}
min={0} min={0}
max={1} max={1}
step={0.01} step={0.01}

View File

@ -62,8 +62,8 @@ const SearchParamsTip = ({
<Thead> <Thead>
<Tr bg={'transparent !important'}> <Tr bg={'transparent !important'}>
<Th fontSize={'mini'}>{t('common:core.dataset.search.search mode')}</Th> <Th fontSize={'mini'}>{t('common:core.dataset.search.search mode')}</Th>
<Th fontSize={'mini'}>{t('common:core.dataset.search.Max Tokens')}</Th> <Th fontSize={'mini'}>{t('app:max_quote_tokens')}</Th>
<Th fontSize={'mini'}>{t('common:core.dataset.search.Min Similarity')}</Th> <Th fontSize={'mini'}>{t('app:min_similarity')}</Th>
{hasReRankModel && <Th fontSize={'mini'}>{t('common:core.dataset.search.ReRank')}</Th>} {hasReRankModel && <Th fontSize={'mini'}>{t('common:core.dataset.search.ReRank')}</Th>}
<Th fontSize={'mini'}>{t('common:core.module.template.Query extension')}</Th> <Th fontSize={'mini'}>{t('common:core.module.template.Query extension')}</Th>
{hasEmptyResponseMode && ( {hasEmptyResponseMode && (

View File

@ -16,8 +16,8 @@ import { findAppAndAllChildren } from '@fastgpt/service/core/app/controller';
import { MongoResourcePermission } from '@fastgpt/service/support/permission/schema'; import { MongoResourcePermission } from '@fastgpt/service/support/permission/schema';
import { ClientSession } from '@fastgpt/service/common/mongo'; import { ClientSession } from '@fastgpt/service/common/mongo';
import { deleteChatFiles } from '@fastgpt/service/core/chat/controller'; import { deleteChatFiles } from '@fastgpt/service/core/chat/controller';
import { getAppLatestVersion } from '@fastgpt/service/core/app/version/controller';
import { pushTrack } from '@fastgpt/service/common/middle/tracks/utils'; import { pushTrack } from '@fastgpt/service/common/middle/tracks/utils';
import { MongoOpenApi } from '@fastgpt/service/support/openapi/schema';
async function handler(req: NextApiRequest, res: NextApiResponse<any>) { async function handler(req: NextApiRequest, res: NextApiResponse<any>) {
const { appId } = req.query as { appId: string }; const { appId } = req.query as { appId: string };
@ -77,34 +77,31 @@ export const onDelOneApp = async ({
}, },
{ session } { session }
); );
// 删除分享链接 // 删除分享链接
await MongoOutLink.deleteMany( await MongoOutLink.deleteMany({
{ appId
appId }).session(session);
}, // Openapi
{ session } await MongoOpenApi.deleteMany({
); appId
}).session(session);
// delete version // delete version
await MongoAppVersion.deleteMany( await MongoAppVersion.deleteMany({
{ appId
appId }).session(session);
},
{ session } await MongoChatInputGuide.deleteMany({
); appId
await MongoChatInputGuide.deleteMany( }).session(session);
{
appId await MongoResourcePermission.deleteMany({
}, resourceType: PerResourceTypeEnum.app,
{ session } teamId,
); resourceId: appId
await MongoResourcePermission.deleteMany( }).session(session);
{
resourceType: PerResourceTypeEnum.app,
teamId,
resourceId: appId
},
{ session }
);
// delete app // delete app
await MongoApp.deleteOne( await MongoApp.deleteOne(
{ {

View File

@ -7,6 +7,7 @@ import { mongoSessionRun } from '@fastgpt/service/common/mongo/sessionRun';
import { NextAPI } from '@/service/middleware/entry'; import { NextAPI } from '@/service/middleware/entry';
import { OwnerPermissionVal } from '@fastgpt/global/support/permission/constant'; import { OwnerPermissionVal } from '@fastgpt/global/support/permission/constant';
import { CommonErrEnum } from '@fastgpt/global/common/error/code/common'; import { CommonErrEnum } from '@fastgpt/global/common/error/code/common';
import { MongoDatasetCollectionTags } from '@fastgpt/service/core/dataset/tag/schema';
async function handler(req: NextApiRequest) { async function handler(req: NextApiRequest) {
const { id: datasetId } = req.query as { const { id: datasetId } = req.query as {
@ -30,14 +31,23 @@ async function handler(req: NextApiRequest) {
teamId, teamId,
datasetId datasetId
}); });
const datasetIds = datasets.map((d) => d._id);
// delete all dataset.data and pg data // delete all dataset.data and pg data
await mongoSessionRun(async (session) => { await mongoSessionRun(async (session) => {
// delete dataset data // delete dataset data
await delDatasetRelevantData({ datasets, session }); await delDatasetRelevantData({ datasets, session });
// delete collection.tags
await MongoDatasetCollectionTags.deleteMany({
teamId,
datasetId: { $in: datasetIds }
}).session(session);
// delete dataset
await MongoDataset.deleteMany( await MongoDataset.deleteMany(
{ {
_id: { $in: datasets.map((d) => d._id) } _id: { $in: datasetIds }
}, },
{ session } { session }
); );

View File

@ -32,10 +32,8 @@ const SelectAiModelRender = ({ item, inputs = [], nodeId }: RenderInputProps) =>
const llmModelData: SettingAIDataType = useMemo( const llmModelData: SettingAIDataType = useMemo(
() => ({ () => ({
model: inputs.find((input) => input.key === NodeInputKeyEnum.aiModel)?.value ?? '', model: inputs.find((input) => input.key === NodeInputKeyEnum.aiModel)?.value ?? '',
maxToken: maxToken: inputs.find((input) => input.key === NodeInputKeyEnum.aiChatMaxToken)?.value,
inputs.find((input) => input.key === NodeInputKeyEnum.aiChatMaxToken)?.value ?? 2048, temperature: inputs.find((input) => input.key === NodeInputKeyEnum.aiChatTemperature)?.value,
temperature:
inputs.find((input) => input.key === NodeInputKeyEnum.aiChatTemperature)?.value ?? 1,
isResponseAnswerText: inputs.find( isResponseAnswerText: inputs.find(
(input) => input.key === NodeInputKeyEnum.aiChatIsResponseText (input) => input.key === NodeInputKeyEnum.aiChatIsResponseText
)?.value, )?.value,