perf: max_token count;feat: support resoner output;fix: member scroll (#3681)
* perf: supplement assistant empty response * check array * perf: max_token count * feat: support resoner output * member scroll * update provider order * i18n
This commit is contained in:
parent
9e0379382f
commit
54defd8a3c
@ -31,9 +31,14 @@ curl --location --request POST 'https://{{host}}/api/admin/initv4820' \
|
|||||||
## 完整更新内容
|
## 完整更新内容
|
||||||
|
|
||||||
1. 新增 - 可视化模型参数配置。预设超过 100 个模型配置。同时支持所有类型模型的一键测试。(预计下个版本会完全支持在页面上配置渠道)。
|
1. 新增 - 可视化模型参数配置。预设超过 100 个模型配置。同时支持所有类型模型的一键测试。(预计下个版本会完全支持在页面上配置渠道)。
|
||||||
2. 新增 - 使用记录导出和仪表盘。
|
2. 新增 - DeepSeek resoner 模型支持输出思考过程。
|
||||||
3. 新增 - markdown 语法扩展,支持音视频(代码块 audio 和 video)。
|
3. 新增 - 使用记录导出和仪表盘。
|
||||||
4. 优化 - 页面组件抽离,减少页面组件路由。
|
4. 新增 - markdown 语法扩展,支持音视频(代码块 audio 和 video)。
|
||||||
5. 优化 - 全文检索,忽略大小写。
|
5. 新增 - 调整 max_tokens 计算逻辑。优先保证 max_tokens 为配置值,如超出最大上下文,则减少历史记录。例如:如果申请 8000 的 max_tokens,则上下文长度会减少 8000。
|
||||||
6. 优化 - 问答生成和增强索引改成流输出,避免部分模型超时。
|
6. 优化 - 问题优化增加上下文过滤,避免超出上下文。
|
||||||
7. 优化 - 自动给 assistant 空 content,补充 null,同时合并连续的 text assistant,避免部分模型抛错。
|
7. 优化 - 页面组件抽离,减少页面组件路由。
|
||||||
|
8. 优化 - 全文检索,忽略大小写。
|
||||||
|
9. 优化 - 问答生成和增强索引改成流输出,避免部分模型超时。
|
||||||
|
10. 优化 - 自动给 assistant 空 content,补充 null,同时合并连续的 text assistant,避免部分模型抛错。
|
||||||
|
11. 优化 - 调整图片 Host, 取消上传时补充 FE_DOMAIN,改成发送对话前
|
||||||
|
12. 修复 - 部分场景成员列表无法触底加载。
|
||||||
3
packages/global/core/ai/model.d.ts
vendored
3
packages/global/core/ai/model.d.ts
vendored
@ -29,10 +29,11 @@ export type LLMModelItemType = PriceType &
|
|||||||
maxContext: number;
|
maxContext: number;
|
||||||
maxResponse: number;
|
maxResponse: number;
|
||||||
quoteMaxToken: number;
|
quoteMaxToken: number;
|
||||||
maxTemperature: number;
|
maxTemperature?: number;
|
||||||
|
|
||||||
censor?: boolean;
|
censor?: boolean;
|
||||||
vision?: boolean;
|
vision?: boolean;
|
||||||
|
reasoning?: boolean;
|
||||||
|
|
||||||
// diff function model
|
// diff function model
|
||||||
datasetProcess?: boolean; // dataset
|
datasetProcess?: boolean; // dataset
|
||||||
|
|||||||
@ -11,8 +11,8 @@ export type ModelProviderIdType =
|
|||||||
| 'AliCloud'
|
| 'AliCloud'
|
||||||
| 'Qwen'
|
| 'Qwen'
|
||||||
| 'Doubao'
|
| 'Doubao'
|
||||||
| 'ChatGLM'
|
|
||||||
| 'DeepSeek'
|
| 'DeepSeek'
|
||||||
|
| 'ChatGLM'
|
||||||
| 'Ernie'
|
| 'Ernie'
|
||||||
| 'Moonshot'
|
| 'Moonshot'
|
||||||
| 'MiniMax'
|
| 'MiniMax'
|
||||||
|
|||||||
1
packages/global/core/app/type.d.ts
vendored
1
packages/global/core/app/type.d.ts
vendored
@ -117,6 +117,7 @@ export type SettingAIDataType = {
|
|||||||
isResponseAnswerText?: boolean;
|
isResponseAnswerText?: boolean;
|
||||||
maxHistories?: number;
|
maxHistories?: number;
|
||||||
[NodeInputKeyEnum.aiChatVision]?: boolean; // Is open vision mode
|
[NodeInputKeyEnum.aiChatVision]?: boolean; // Is open vision mode
|
||||||
|
[NodeInputKeyEnum.aiChatReasoning]?: boolean; // Is open reasoning mode
|
||||||
};
|
};
|
||||||
|
|
||||||
// variable
|
// variable
|
||||||
|
|||||||
@ -25,7 +25,8 @@ export enum ChatItemValueTypeEnum {
|
|||||||
text = 'text',
|
text = 'text',
|
||||||
file = 'file',
|
file = 'file',
|
||||||
tool = 'tool',
|
tool = 'tool',
|
||||||
interactive = 'interactive'
|
interactive = 'interactive',
|
||||||
|
reasoning = 'reasoning'
|
||||||
}
|
}
|
||||||
|
|
||||||
export enum ChatSourceEnum {
|
export enum ChatSourceEnum {
|
||||||
|
|||||||
11
packages/global/core/chat/type.d.ts
vendored
11
packages/global/core/chat/type.d.ts
vendored
@ -70,14 +70,23 @@ export type SystemChatItemType = {
|
|||||||
obj: ChatRoleEnum.System;
|
obj: ChatRoleEnum.System;
|
||||||
value: SystemChatItemValueItemType[];
|
value: SystemChatItemValueItemType[];
|
||||||
};
|
};
|
||||||
|
|
||||||
export type AIChatItemValueItemType = {
|
export type AIChatItemValueItemType = {
|
||||||
type: ChatItemValueTypeEnum.text | ChatItemValueTypeEnum.tool | ChatItemValueTypeEnum.interactive;
|
type:
|
||||||
|
| ChatItemValueTypeEnum.text
|
||||||
|
| ChatItemValueTypeEnum.reasoning
|
||||||
|
| ChatItemValueTypeEnum.tool
|
||||||
|
| ChatItemValueTypeEnum.interactive;
|
||||||
text?: {
|
text?: {
|
||||||
content: string;
|
content: string;
|
||||||
};
|
};
|
||||||
|
reasoning?: {
|
||||||
|
content: string;
|
||||||
|
};
|
||||||
tools?: ToolModuleResponseItemType[];
|
tools?: ToolModuleResponseItemType[];
|
||||||
interactive?: WorkflowInteractiveResponseType;
|
interactive?: WorkflowInteractiveResponseType;
|
||||||
};
|
};
|
||||||
|
|
||||||
export type AIChatItemType = {
|
export type AIChatItemType = {
|
||||||
obj: ChatRoleEnum.AI;
|
obj: ChatRoleEnum.AI;
|
||||||
value: AIChatItemValueItemType[];
|
value: AIChatItemValueItemType[];
|
||||||
|
|||||||
@ -141,6 +141,7 @@ export enum NodeInputKeyEnum {
|
|||||||
aiChatDatasetQuote = 'quoteQA',
|
aiChatDatasetQuote = 'quoteQA',
|
||||||
aiChatVision = 'aiChatVision',
|
aiChatVision = 'aiChatVision',
|
||||||
stringQuoteText = 'stringQuoteText',
|
stringQuoteText = 'stringQuoteText',
|
||||||
|
aiChatReasoning = 'aiChatReasoning',
|
||||||
|
|
||||||
// dataset
|
// dataset
|
||||||
datasetSelectList = 'datasets',
|
datasetSelectList = 'datasets',
|
||||||
@ -220,7 +221,8 @@ export enum NodeOutputKeyEnum {
|
|||||||
// common
|
// common
|
||||||
userChatInput = 'userChatInput',
|
userChatInput = 'userChatInput',
|
||||||
history = 'history',
|
history = 'history',
|
||||||
answerText = 'answerText', // module answer. the value will be show and save to history
|
answerText = 'answerText', // node answer. the value will be show and save to history
|
||||||
|
reasoningText = 'reasoningText', // node reasoning. the value will be show but not save to history
|
||||||
success = 'success',
|
success = 'success',
|
||||||
failed = 'failed',
|
failed = 'failed',
|
||||||
error = 'error',
|
error = 'error',
|
||||||
|
|||||||
@ -220,6 +220,7 @@ export type AIChatNodeProps = {
|
|||||||
[NodeInputKeyEnum.aiChatMaxToken]?: number;
|
[NodeInputKeyEnum.aiChatMaxToken]?: number;
|
||||||
[NodeInputKeyEnum.aiChatIsResponseText]: boolean;
|
[NodeInputKeyEnum.aiChatIsResponseText]: boolean;
|
||||||
[NodeInputKeyEnum.aiChatVision]?: boolean;
|
[NodeInputKeyEnum.aiChatVision]?: boolean;
|
||||||
|
[NodeInputKeyEnum.aiChatReasoning]?: boolean;
|
||||||
|
|
||||||
[NodeInputKeyEnum.aiChatQuoteRole]?: AiChatQuoteRoleType;
|
[NodeInputKeyEnum.aiChatQuoteRole]?: AiChatQuoteRoleType;
|
||||||
[NodeInputKeyEnum.aiChatQuoteTemplate]?: string;
|
[NodeInputKeyEnum.aiChatQuoteTemplate]?: string;
|
||||||
|
|||||||
@ -364,12 +364,14 @@ export function replaceEditorVariable({
|
|||||||
|
|
||||||
export const textAdaptGptResponse = ({
|
export const textAdaptGptResponse = ({
|
||||||
text,
|
text,
|
||||||
|
reasoning_content,
|
||||||
model = '',
|
model = '',
|
||||||
finish_reason = null,
|
finish_reason = null,
|
||||||
extraData = {}
|
extraData = {}
|
||||||
}: {
|
}: {
|
||||||
model?: string;
|
model?: string;
|
||||||
text: string | null;
|
text?: string | null;
|
||||||
|
reasoning_content?: string | null;
|
||||||
finish_reason?: null | 'stop';
|
finish_reason?: null | 'stop';
|
||||||
extraData?: Object;
|
extraData?: Object;
|
||||||
}) => {
|
}) => {
|
||||||
@ -381,10 +383,11 @@ export const textAdaptGptResponse = ({
|
|||||||
model,
|
model,
|
||||||
choices: [
|
choices: [
|
||||||
{
|
{
|
||||||
delta:
|
delta: {
|
||||||
text === null
|
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||||
? {}
|
content: text,
|
||||||
: { role: ChatCompletionRequestMessageRoleEnum.Assistant, content: text },
|
...(reasoning_content && { reasoning_content })
|
||||||
|
},
|
||||||
index: 0,
|
index: 0,
|
||||||
finish_reason
|
finish_reason
|
||||||
}
|
}
|
||||||
|
|||||||
@ -63,14 +63,14 @@ export const AiChatModule: FlowNodeTemplateType = {
|
|||||||
key: NodeInputKeyEnum.aiChatTemperature,
|
key: NodeInputKeyEnum.aiChatTemperature,
|
||||||
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
|
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
|
||||||
label: '',
|
label: '',
|
||||||
value: 0,
|
value: undefined,
|
||||||
valueType: WorkflowIOValueTypeEnum.number
|
valueType: WorkflowIOValueTypeEnum.number
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
key: NodeInputKeyEnum.aiChatMaxToken,
|
key: NodeInputKeyEnum.aiChatMaxToken,
|
||||||
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
|
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
|
||||||
label: '',
|
label: '',
|
||||||
value: 2000,
|
value: undefined,
|
||||||
valueType: WorkflowIOValueTypeEnum.number
|
valueType: WorkflowIOValueTypeEnum.number
|
||||||
},
|
},
|
||||||
|
|
||||||
@ -91,6 +91,13 @@ export const AiChatModule: FlowNodeTemplateType = {
|
|||||||
valueType: WorkflowIOValueTypeEnum.boolean,
|
valueType: WorkflowIOValueTypeEnum.boolean,
|
||||||
value: true
|
value: true
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
key: NodeInputKeyEnum.aiChatReasoning,
|
||||||
|
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
||||||
|
label: '',
|
||||||
|
valueType: WorkflowIOValueTypeEnum.boolean,
|
||||||
|
value: true
|
||||||
|
},
|
||||||
// settings modal ---
|
// settings modal ---
|
||||||
{
|
{
|
||||||
...Input_Template_System_Prompt,
|
...Input_Template_System_Prompt,
|
||||||
|
|||||||
@ -43,14 +43,14 @@ export const ToolModule: FlowNodeTemplateType = {
|
|||||||
key: NodeInputKeyEnum.aiChatTemperature,
|
key: NodeInputKeyEnum.aiChatTemperature,
|
||||||
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
|
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
|
||||||
label: '',
|
label: '',
|
||||||
value: 0,
|
value: undefined,
|
||||||
valueType: WorkflowIOValueTypeEnum.number
|
valueType: WorkflowIOValueTypeEnum.number
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
key: NodeInputKeyEnum.aiChatMaxToken,
|
key: NodeInputKeyEnum.aiChatMaxToken,
|
||||||
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
|
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
|
||||||
label: '',
|
label: '',
|
||||||
value: 2000,
|
value: undefined,
|
||||||
valueType: WorkflowIOValueTypeEnum.number
|
valueType: WorkflowIOValueTypeEnum.number
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@ -40,7 +40,7 @@ export async function uploadMongoImg({
|
|||||||
expiredTime: forever ? undefined : addHours(new Date(), 1)
|
expiredTime: forever ? undefined : addHours(new Date(), 1)
|
||||||
});
|
});
|
||||||
|
|
||||||
return `${process.env.FE_DOMAIN || ''}${process.env.NEXT_PUBLIC_BASE_URL || ''}${imageBaseUrl}${String(_id)}.${extension}`;
|
return `${process.env.NEXT_PUBLIC_BASE_URL || ''}${imageBaseUrl}${String(_id)}.${extension}`;
|
||||||
}
|
}
|
||||||
|
|
||||||
const getIdFromPath = (path?: string) => {
|
const getIdFromPath = (path?: string) => {
|
||||||
|
|||||||
@ -27,8 +27,9 @@
|
|||||||
"maxContext": 64000,
|
"maxContext": 64000,
|
||||||
"maxResponse": 4096,
|
"maxResponse": 4096,
|
||||||
"quoteMaxToken": 60000,
|
"quoteMaxToken": 60000,
|
||||||
"maxTemperature": 1.5,
|
"maxTemperature": null,
|
||||||
"vision": false,
|
"vision": false,
|
||||||
|
"reasoning": true,
|
||||||
"toolChoice": false,
|
"toolChoice": false,
|
||||||
"functionCall": false,
|
"functionCall": false,
|
||||||
"defaultSystemChatPrompt": "",
|
"defaultSystemChatPrompt": "",
|
||||||
@ -39,9 +40,7 @@
|
|||||||
"usedInQueryExtension": true,
|
"usedInQueryExtension": true,
|
||||||
"customExtractPrompt": "",
|
"customExtractPrompt": "",
|
||||||
"usedInToolCall": true,
|
"usedInToolCall": true,
|
||||||
"defaultConfig": {
|
"defaultConfig": {},
|
||||||
"temperature": null
|
|
||||||
},
|
|
||||||
"fieldMap": {},
|
"fieldMap": {},
|
||||||
"type": "llm"
|
"type": "llm"
|
||||||
}
|
}
|
||||||
|
|||||||
@ -50,10 +50,10 @@
|
|||||||
"maxContext": 128000,
|
"maxContext": 128000,
|
||||||
"maxResponse": 4000,
|
"maxResponse": 4000,
|
||||||
"quoteMaxToken": 120000,
|
"quoteMaxToken": 120000,
|
||||||
"maxTemperature": 1.2,
|
"maxTemperature": null,
|
||||||
"vision": false,
|
"vision": false,
|
||||||
"toolChoice": false,
|
"toolChoice": false,
|
||||||
"functionCall": true,
|
"functionCall": false,
|
||||||
"defaultSystemChatPrompt": "",
|
"defaultSystemChatPrompt": "",
|
||||||
"datasetProcess": true,
|
"datasetProcess": true,
|
||||||
"usedInClassify": true,
|
"usedInClassify": true,
|
||||||
@ -63,8 +63,10 @@
|
|||||||
"customExtractPrompt": "",
|
"customExtractPrompt": "",
|
||||||
"usedInToolCall": true,
|
"usedInToolCall": true,
|
||||||
"defaultConfig": {
|
"defaultConfig": {
|
||||||
"temperature": 1,
|
"stream": false
|
||||||
"max_tokens": null
|
},
|
||||||
|
"fieldMap": {
|
||||||
|
"max_tokens": "max_completion_tokens"
|
||||||
},
|
},
|
||||||
"type": "llm"
|
"type": "llm"
|
||||||
},
|
},
|
||||||
@ -74,10 +76,10 @@
|
|||||||
"maxContext": 128000,
|
"maxContext": 128000,
|
||||||
"maxResponse": 4000,
|
"maxResponse": 4000,
|
||||||
"quoteMaxToken": 120000,
|
"quoteMaxToken": 120000,
|
||||||
"maxTemperature": 1.2,
|
"maxTemperature": null,
|
||||||
"vision": false,
|
"vision": false,
|
||||||
"toolChoice": false,
|
"toolChoice": false,
|
||||||
"functionCall": true,
|
"functionCall": false,
|
||||||
"defaultSystemChatPrompt": "",
|
"defaultSystemChatPrompt": "",
|
||||||
"datasetProcess": true,
|
"datasetProcess": true,
|
||||||
"usedInClassify": true,
|
"usedInClassify": true,
|
||||||
@ -87,10 +89,11 @@
|
|||||||
"customExtractPrompt": "",
|
"customExtractPrompt": "",
|
||||||
"usedInToolCall": true,
|
"usedInToolCall": true,
|
||||||
"defaultConfig": {
|
"defaultConfig": {
|
||||||
"temperature": 1,
|
|
||||||
"max_tokens": null,
|
|
||||||
"stream": false
|
"stream": false
|
||||||
},
|
},
|
||||||
|
"fieldMap": {
|
||||||
|
"max_tokens": "max_completion_tokens"
|
||||||
|
},
|
||||||
"type": "llm"
|
"type": "llm"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -99,10 +102,10 @@
|
|||||||
"maxContext": 195000,
|
"maxContext": 195000,
|
||||||
"maxResponse": 8000,
|
"maxResponse": 8000,
|
||||||
"quoteMaxToken": 120000,
|
"quoteMaxToken": 120000,
|
||||||
"maxTemperature": 1.2,
|
"maxTemperature": null,
|
||||||
"vision": false,
|
"vision": false,
|
||||||
"toolChoice": false,
|
"toolChoice": false,
|
||||||
"functionCall": true,
|
"functionCall": false,
|
||||||
"defaultSystemChatPrompt": "",
|
"defaultSystemChatPrompt": "",
|
||||||
"datasetProcess": true,
|
"datasetProcess": true,
|
||||||
"usedInClassify": true,
|
"usedInClassify": true,
|
||||||
@ -112,10 +115,11 @@
|
|||||||
"customExtractPrompt": "",
|
"customExtractPrompt": "",
|
||||||
"usedInToolCall": true,
|
"usedInToolCall": true,
|
||||||
"defaultConfig": {
|
"defaultConfig": {
|
||||||
"temperature": 1,
|
|
||||||
"max_tokens": null,
|
|
||||||
"stream": false
|
"stream": false
|
||||||
},
|
},
|
||||||
|
"fieldMap": {
|
||||||
|
"max_tokens": "max_completion_tokens"
|
||||||
|
},
|
||||||
"type": "llm"
|
"type": "llm"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@ -2,10 +2,12 @@ import { replaceVariable } from '@fastgpt/global/common/string/tools';
|
|||||||
import { createChatCompletion } from '../config';
|
import { createChatCompletion } from '../config';
|
||||||
import { ChatItemType } from '@fastgpt/global/core/chat/type';
|
import { ChatItemType } from '@fastgpt/global/core/chat/type';
|
||||||
import { countGptMessagesTokens, countPromptTokens } from '../../../common/string/tiktoken/index';
|
import { countGptMessagesTokens, countPromptTokens } from '../../../common/string/tiktoken/index';
|
||||||
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
|
import { chats2GPTMessages } from '@fastgpt/global/core/chat/adapt';
|
||||||
import { getLLMModel } from '../model';
|
import { getLLMModel } from '../model';
|
||||||
import { llmCompletionsBodyFormat } from '../utils';
|
import { llmCompletionsBodyFormat } from '../utils';
|
||||||
import { addLog } from '../../../common/system/log';
|
import { addLog } from '../../../common/system/log';
|
||||||
|
import { filterGPTMessageByMaxContext } from '../../chat/utils';
|
||||||
|
import json5 from 'json5';
|
||||||
|
|
||||||
/*
|
/*
|
||||||
query extension - 问题扩展
|
query extension - 问题扩展
|
||||||
@ -13,72 +15,73 @@ import { addLog } from '../../../common/system/log';
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
const title = global.feConfigs?.systemTitle || 'FastAI';
|
const title = global.feConfigs?.systemTitle || 'FastAI';
|
||||||
const defaultPrompt = `作为一个向量检索助手,你的任务是结合历史记录,从不同角度,为“原问题”生成个不同版本的“检索词”,从而提高向量检索的语义丰富度,提高向量检索的精度。
|
const defaultPrompt = `## 你的任务
|
||||||
|
你作为一个向量检索助手,你的任务是结合历史记录,从不同角度,为“原问题”生成个不同版本的“检索词”,从而提高向量检索的语义丰富度,提高向量检索的精度。
|
||||||
生成的问题要求指向对象清晰明确,并与“原问题语言相同”。
|
生成的问题要求指向对象清晰明确,并与“原问题语言相同”。
|
||||||
|
|
||||||
参考 <Example></Example> 标中的示例来完成任务。
|
## 参考示例
|
||||||
|
|
||||||
<Example>
|
|
||||||
历史记录:
|
历史记录:
|
||||||
"""
|
"""
|
||||||
|
null
|
||||||
"""
|
"""
|
||||||
原问题: 介绍下剧情。
|
原问题: 介绍下剧情。
|
||||||
检索词: ["介绍下故事的背景。","故事的主题是什么?","介绍下故事的主要人物。"]
|
检索词: ["介绍下故事的背景。","故事的主题是什么?","介绍下故事的主要人物。"]
|
||||||
----------------
|
----------------
|
||||||
历史记录:
|
历史记录:
|
||||||
"""
|
"""
|
||||||
Q: 对话背景。
|
user: 对话背景。
|
||||||
A: 当前对话是关于 Nginx 的介绍和使用等。
|
assistant: 当前对话是关于 Nginx 的介绍和使用等。
|
||||||
"""
|
"""
|
||||||
原问题: 怎么下载
|
原问题: 怎么下载
|
||||||
检索词: ["Nginx 如何下载?","下载 Nginx 需要什么条件?","有哪些渠道可以下载 Nginx?"]
|
检索词: ["Nginx 如何下载?","下载 Nginx 需要什么条件?","有哪些渠道可以下载 Nginx?"]
|
||||||
----------------
|
----------------
|
||||||
历史记录:
|
历史记录:
|
||||||
"""
|
"""
|
||||||
Q: 对话背景。
|
user: 对话背景。
|
||||||
A: 当前对话是关于 Nginx 的介绍和使用等。
|
assistant: 当前对话是关于 Nginx 的介绍和使用等。
|
||||||
Q: 报错 "no connection"
|
user: 报错 "no connection"
|
||||||
A: 报错"no connection"可能是因为……
|
assistant: 报错"no connection"可能是因为……
|
||||||
"""
|
"""
|
||||||
原问题: 怎么解决
|
原问题: 怎么解决
|
||||||
检索词: ["Nginx报错"no connection"如何解决?","造成'no connection'报错的原因。","Nginx提示'no connection',要怎么办?"]
|
检索词: ["Nginx报错"no connection"如何解决?","造成'no connection'报错的原因。","Nginx提示'no connection',要怎么办?"]
|
||||||
----------------
|
----------------
|
||||||
历史记录:
|
历史记录:
|
||||||
"""
|
"""
|
||||||
Q: 护产假多少天?
|
user: How long is the maternity leave?
|
||||||
A: 护产假的天数根据员工所在的城市而定。请提供您所在的城市,以便我回答您的问题。
|
assistant: The number of days of maternity leave depends on the city in which the employee is located. Please provide your city so that I can answer your questions.
|
||||||
"""
|
"""
|
||||||
原问题: 沈阳
|
原问题: ShenYang
|
||||||
检索词: ["沈阳的护产假多少天?","沈阳的护产假政策。","沈阳的护产假标准。"]
|
检索词: ["How many days is maternity leave in Shenyang?","Shenyang's maternity leave policy.","The standard of maternity leave in Shenyang."]
|
||||||
----------------
|
----------------
|
||||||
历史记录:
|
历史记录:
|
||||||
"""
|
"""
|
||||||
Q: 作者是谁?
|
user: 作者是谁?
|
||||||
A: ${title} 的作者是 labring。
|
assistant: ${title} 的作者是 labring。
|
||||||
"""
|
"""
|
||||||
原问题: Tell me about him
|
原问题: Tell me about him
|
||||||
检索词: ["Introduce labring, the author of ${title}." ," Background information on author labring." "," Why does labring do ${title}?"]
|
检索词: ["Introduce labring, the author of ${title}." ," Background information on author labring." "," Why does labring do ${title}?"]
|
||||||
----------------
|
----------------
|
||||||
历史记录:
|
历史记录:
|
||||||
"""
|
"""
|
||||||
Q: 对话背景。
|
user: 对话背景。
|
||||||
A: 关于 ${title} 的介绍和使用等问题。
|
assistant: 关于 ${title} 的介绍和使用等问题。
|
||||||
"""
|
"""
|
||||||
原问题: 你好。
|
原问题: 你好。
|
||||||
检索词: ["你好"]
|
检索词: ["你好"]
|
||||||
----------------
|
----------------
|
||||||
历史记录:
|
历史记录:
|
||||||
"""
|
"""
|
||||||
Q: ${title} 如何收费?
|
user: ${title} 如何收费?
|
||||||
A: ${title} 收费可以参考……
|
assistant: ${title} 收费可以参考……
|
||||||
"""
|
"""
|
||||||
原问题: 你知道 laf 么?
|
原问题: 你知道 laf 么?
|
||||||
检索词: ["laf 的官网地址是多少?","laf 的使用教程。","laf 有什么特点和优势。"]
|
检索词: ["laf 的官网地址是多少?","laf 的使用教程。","laf 有什么特点和优势。"]
|
||||||
----------------
|
----------------
|
||||||
历史记录:
|
历史记录:
|
||||||
"""
|
"""
|
||||||
Q: ${title} 的优势
|
user: ${title} 的优势
|
||||||
A: 1. 开源
|
assistant: 1. 开源
|
||||||
2. 简便
|
2. 简便
|
||||||
3. 扩展性强
|
3. 扩展性强
|
||||||
"""
|
"""
|
||||||
@ -87,18 +90,20 @@ A: 1. 开源
|
|||||||
----------------
|
----------------
|
||||||
历史记录:
|
历史记录:
|
||||||
"""
|
"""
|
||||||
Q: 什么是 ${title}?
|
user: 什么是 ${title}?
|
||||||
A: ${title} 是一个 RAG 平台。
|
assistant: ${title} 是一个 RAG 平台。
|
||||||
Q: 什么是 Laf?
|
user: 什么是 Laf?
|
||||||
A: Laf 是一个云函数开发平台。
|
assistant: Laf 是一个云函数开发平台。
|
||||||
"""
|
"""
|
||||||
原问题: 它们有什么关系?
|
原问题: 它们有什么关系?
|
||||||
检索词: ["${title}和Laf有什么关系?","介绍下${title}","介绍下Laf"]
|
检索词: ["${title}和Laf有什么关系?","介绍下${title}","介绍下Laf"]
|
||||||
</Example>
|
|
||||||
|
|
||||||
-----
|
## 输出要求
|
||||||
|
|
||||||
下面是正式的任务:
|
1. 输出格式为 JSON 数组,数组中每个元素为字符串。无需对输出进行任何解释。
|
||||||
|
2. 输出语言与原问题相同。原问题为中文则输出中文;原问题为英文则输出英文。
|
||||||
|
|
||||||
|
## 开始任务
|
||||||
|
|
||||||
历史记录:
|
历史记录:
|
||||||
"""
|
"""
|
||||||
@ -125,26 +130,39 @@ export const queryExtension = async ({
|
|||||||
outputTokens: number;
|
outputTokens: number;
|
||||||
}> => {
|
}> => {
|
||||||
const systemFewShot = chatBg
|
const systemFewShot = chatBg
|
||||||
? `Q: 对话背景。
|
? `user: 对话背景。
|
||||||
A: ${chatBg}
|
assistant: ${chatBg}
|
||||||
`
|
`
|
||||||
: '';
|
: '';
|
||||||
const historyFewShot = histories
|
|
||||||
.map((item) => {
|
|
||||||
const role = item.obj === 'Human' ? 'Q' : 'A';
|
|
||||||
return `${role}: ${chatValue2RuntimePrompt(item.value).text}`;
|
|
||||||
})
|
|
||||||
.join('\n');
|
|
||||||
const concatFewShot = `${systemFewShot}${historyFewShot}`.trim();
|
|
||||||
|
|
||||||
const modelData = getLLMModel(model);
|
const modelData = getLLMModel(model);
|
||||||
|
const filterHistories = await filterGPTMessageByMaxContext({
|
||||||
|
messages: chats2GPTMessages({ messages: histories, reserveId: false }),
|
||||||
|
maxContext: modelData.maxContext - 1000
|
||||||
|
});
|
||||||
|
|
||||||
|
const historyFewShot = filterHistories
|
||||||
|
.map((item) => {
|
||||||
|
const role = item.role;
|
||||||
|
const content = item.content;
|
||||||
|
if ((role === 'user' || role === 'assistant') && content) {
|
||||||
|
if (typeof content === 'string') {
|
||||||
|
return `${role}: ${content}`;
|
||||||
|
} else {
|
||||||
|
return `${role}: ${content.map((item) => (item.type === 'text' ? item.text : '')).join('\n')}`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.filter(Boolean)
|
||||||
|
.join('\n');
|
||||||
|
const concatFewShot = `${systemFewShot}${historyFewShot}`.trim();
|
||||||
|
|
||||||
const messages = [
|
const messages = [
|
||||||
{
|
{
|
||||||
role: 'user',
|
role: 'user',
|
||||||
content: replaceVariable(defaultPrompt, {
|
content: replaceVariable(defaultPrompt, {
|
||||||
query: `${query}`,
|
query: `${query}`,
|
||||||
histories: concatFewShot
|
histories: concatFewShot || 'null'
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
] as any;
|
] as any;
|
||||||
@ -154,7 +172,7 @@ A: ${chatBg}
|
|||||||
{
|
{
|
||||||
stream: false,
|
stream: false,
|
||||||
model: modelData.model,
|
model: modelData.model,
|
||||||
temperature: 0.01,
|
temperature: 0.1,
|
||||||
messages
|
messages
|
||||||
},
|
},
|
||||||
modelData
|
modelData
|
||||||
@ -172,22 +190,41 @@ A: ${chatBg}
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const start = answer.indexOf('[');
|
||||||
|
const end = answer.lastIndexOf(']');
|
||||||
|
if (start === -1 || end === -1) {
|
||||||
|
addLog.warn('Query extension failed, not a valid JSON', {
|
||||||
|
answer
|
||||||
|
});
|
||||||
|
return {
|
||||||
|
rawQuery: query,
|
||||||
|
extensionQueries: [],
|
||||||
|
model,
|
||||||
|
inputTokens: 0,
|
||||||
|
outputTokens: 0
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
// Intercept the content of [] and retain []
|
// Intercept the content of [] and retain []
|
||||||
answer = answer.match(/\[.*?\]/)?.[0] || '';
|
const jsonStr = answer
|
||||||
answer = answer.replace(/\\"/g, '"');
|
.substring(start, end + 1)
|
||||||
|
.replace(/(\\n|\\)/g, '')
|
||||||
|
.replace(/ /g, '');
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const queries = JSON.parse(answer) as string[];
|
const queries = json5.parse(jsonStr) as string[];
|
||||||
|
|
||||||
return {
|
return {
|
||||||
rawQuery: query,
|
rawQuery: query,
|
||||||
extensionQueries: Array.isArray(queries) ? queries : [],
|
extensionQueries: (Array.isArray(queries) ? queries : []).slice(0, 5),
|
||||||
model,
|
model,
|
||||||
inputTokens: await countGptMessagesTokens(messages),
|
inputTokens: await countGptMessagesTokens(messages),
|
||||||
outputTokens: await countPromptTokens(answer)
|
outputTokens: await countPromptTokens(answer)
|
||||||
};
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
addLog.error(`Query extension error`, error);
|
addLog.warn('Query extension failed, not a valid JSON', {
|
||||||
|
answer
|
||||||
|
});
|
||||||
return {
|
return {
|
||||||
rawQuery: query,
|
rawQuery: query,
|
||||||
extensionQueries: [],
|
extensionQueries: [],
|
||||||
|
|||||||
@ -2,33 +2,23 @@ import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
|||||||
import {
|
import {
|
||||||
ChatCompletionCreateParamsNonStreaming,
|
ChatCompletionCreateParamsNonStreaming,
|
||||||
ChatCompletionCreateParamsStreaming,
|
ChatCompletionCreateParamsStreaming,
|
||||||
ChatCompletionMessageParam,
|
|
||||||
StreamChatType
|
StreamChatType
|
||||||
} from '@fastgpt/global/core/ai/type';
|
} from '@fastgpt/global/core/ai/type';
|
||||||
import { countGptMessagesTokens } from '../../common/string/tiktoken';
|
|
||||||
import { getLLMModel } from './model';
|
import { getLLMModel } from './model';
|
||||||
|
|
||||||
export const computedMaxToken = async ({
|
/*
|
||||||
|
Count response max token
|
||||||
|
*/
|
||||||
|
export const computedMaxToken = ({
|
||||||
maxToken,
|
maxToken,
|
||||||
model,
|
model
|
||||||
filterMessages = []
|
|
||||||
}: {
|
}: {
|
||||||
maxToken?: number;
|
maxToken?: number;
|
||||||
model: LLMModelItemType;
|
model: LLMModelItemType;
|
||||||
filterMessages: ChatCompletionMessageParam[];
|
|
||||||
}) => {
|
}) => {
|
||||||
if (maxToken === undefined) return;
|
if (maxToken === undefined) return;
|
||||||
|
|
||||||
maxToken = Math.min(maxToken, model.maxResponse);
|
maxToken = Math.min(maxToken, model.maxResponse);
|
||||||
const tokensLimit = model.maxContext;
|
|
||||||
|
|
||||||
/* count response max token */
|
|
||||||
const promptsToken = await countGptMessagesTokens(filterMessages);
|
|
||||||
maxToken = promptsToken + maxToken > tokensLimit ? tokensLimit - promptsToken : maxToken;
|
|
||||||
|
|
||||||
if (maxToken <= 0) {
|
|
||||||
maxToken = 200;
|
|
||||||
}
|
|
||||||
return maxToken;
|
return maxToken;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -40,6 +30,7 @@ export const computedTemperature = ({
|
|||||||
model: LLMModelItemType;
|
model: LLMModelItemType;
|
||||||
temperature: number;
|
temperature: number;
|
||||||
}) => {
|
}) => {
|
||||||
|
if (typeof model.maxTemperature !== 'number') return undefined;
|
||||||
temperature = +(model.maxTemperature * (temperature / 10)).toFixed(2);
|
temperature = +(model.maxTemperature * (temperature / 10)).toFixed(2);
|
||||||
temperature = Math.max(temperature, 0.01);
|
temperature = Math.max(temperature, 0.01);
|
||||||
|
|
||||||
|
|||||||
@ -14,36 +14,19 @@ import { serverRequestBaseUrl } from '../../common/api/serverRequest';
|
|||||||
import { i18nT } from '../../../web/i18n/utils';
|
import { i18nT } from '../../../web/i18n/utils';
|
||||||
import { addLog } from '../../common/system/log';
|
import { addLog } from '../../common/system/log';
|
||||||
|
|
||||||
export const filterGPTMessageByMaxTokens = async ({
|
export const filterGPTMessageByMaxContext = async ({
|
||||||
messages = [],
|
messages = [],
|
||||||
maxTokens
|
maxContext
|
||||||
}: {
|
}: {
|
||||||
messages: ChatCompletionMessageParam[];
|
messages: ChatCompletionMessageParam[];
|
||||||
maxTokens: number;
|
maxContext: number;
|
||||||
}) => {
|
}) => {
|
||||||
if (!Array.isArray(messages)) {
|
if (!Array.isArray(messages)) {
|
||||||
return [];
|
return [];
|
||||||
}
|
}
|
||||||
const rawTextLen = messages.reduce((sum, item) => {
|
|
||||||
if (typeof item.content === 'string') {
|
|
||||||
return sum + item.content.length;
|
|
||||||
}
|
|
||||||
if (Array.isArray(item.content)) {
|
|
||||||
return (
|
|
||||||
sum +
|
|
||||||
item.content.reduce((sum, item) => {
|
|
||||||
if (item.type === 'text') {
|
|
||||||
return sum + item.text.length;
|
|
||||||
}
|
|
||||||
return sum;
|
|
||||||
}, 0)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
return sum;
|
|
||||||
}, 0);
|
|
||||||
|
|
||||||
// If the text length is less than half of the maximum token, no calculation is required
|
// If the text length is less than half of the maximum token, no calculation is required
|
||||||
if (rawTextLen < maxTokens * 0.5) {
|
if (messages.length < 4) {
|
||||||
return messages;
|
return messages;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -55,7 +38,7 @@ export const filterGPTMessageByMaxTokens = async ({
|
|||||||
const chatPrompts: ChatCompletionMessageParam[] = messages.slice(chatStartIndex);
|
const chatPrompts: ChatCompletionMessageParam[] = messages.slice(chatStartIndex);
|
||||||
|
|
||||||
// reduce token of systemPrompt
|
// reduce token of systemPrompt
|
||||||
maxTokens -= await countGptMessagesTokens(systemPrompts);
|
maxContext -= await countGptMessagesTokens(systemPrompts);
|
||||||
|
|
||||||
// Save the last chat prompt(question)
|
// Save the last chat prompt(question)
|
||||||
const question = chatPrompts.pop();
|
const question = chatPrompts.pop();
|
||||||
@ -73,9 +56,9 @@ export const filterGPTMessageByMaxTokens = async ({
|
|||||||
}
|
}
|
||||||
|
|
||||||
const tokens = await countGptMessagesTokens([assistant, user]);
|
const tokens = await countGptMessagesTokens([assistant, user]);
|
||||||
maxTokens -= tokens;
|
maxContext -= tokens;
|
||||||
/* 整体 tokens 超出范围,截断 */
|
/* 整体 tokens 超出范围,截断 */
|
||||||
if (maxTokens < 0) {
|
if (maxContext < 0) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
import { chats2GPTMessages } from '@fastgpt/global/core/chat/adapt';
|
import { chats2GPTMessages } from '@fastgpt/global/core/chat/adapt';
|
||||||
import { filterGPTMessageByMaxTokens, loadRequestMessages } from '../../../chat/utils';
|
import { filterGPTMessageByMaxContext, loadRequestMessages } from '../../../chat/utils';
|
||||||
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
|
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
|
||||||
import {
|
import {
|
||||||
countMessagesTokens,
|
countMessagesTokens,
|
||||||
@ -175,9 +175,9 @@ ${description ? `- ${description}` : ''}
|
|||||||
}
|
}
|
||||||
];
|
];
|
||||||
const adaptMessages = chats2GPTMessages({ messages, reserveId: false });
|
const adaptMessages = chats2GPTMessages({ messages, reserveId: false });
|
||||||
const filterMessages = await filterGPTMessageByMaxTokens({
|
const filterMessages = await filterGPTMessageByMaxContext({
|
||||||
messages: adaptMessages,
|
messages: adaptMessages,
|
||||||
maxTokens: extractModel.maxContext
|
maxContext: extractModel.maxContext
|
||||||
});
|
});
|
||||||
const requestMessages = await loadRequestMessages({
|
const requestMessages = await loadRequestMessages({
|
||||||
messages: filterMessages,
|
messages: filterMessages,
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
import { createChatCompletion } from '../../../../ai/config';
|
import { createChatCompletion } from '../../../../ai/config';
|
||||||
import { filterGPTMessageByMaxTokens, loadRequestMessages } from '../../../../chat/utils';
|
import { filterGPTMessageByMaxContext, loadRequestMessages } from '../../../../chat/utils';
|
||||||
import {
|
import {
|
||||||
ChatCompletion,
|
ChatCompletion,
|
||||||
StreamChatType,
|
StreamChatType,
|
||||||
@ -172,10 +172,14 @@ export const runToolWithFunctionCall = async (
|
|||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
const max_tokens = computedMaxToken({
|
||||||
|
model: toolModel,
|
||||||
|
maxToken
|
||||||
|
});
|
||||||
const filterMessages = (
|
const filterMessages = (
|
||||||
await filterGPTMessageByMaxTokens({
|
await filterGPTMessageByMaxContext({
|
||||||
messages,
|
messages,
|
||||||
maxTokens: toolModel.maxContext - 300 // filter token. not response maxToken
|
maxContext: toolModel.maxContext - (max_tokens || 0) // filter token. not response maxToken
|
||||||
})
|
})
|
||||||
).map((item) => {
|
).map((item) => {
|
||||||
if (item.role === ChatCompletionRequestMessageRoleEnum.Assistant && item.function_call) {
|
if (item.role === ChatCompletionRequestMessageRoleEnum.Assistant && item.function_call) {
|
||||||
@ -190,16 +194,11 @@ export const runToolWithFunctionCall = async (
|
|||||||
}
|
}
|
||||||
return item;
|
return item;
|
||||||
});
|
});
|
||||||
const [requestMessages, max_tokens] = await Promise.all([
|
const [requestMessages] = await Promise.all([
|
||||||
loadRequestMessages({
|
loadRequestMessages({
|
||||||
messages: filterMessages,
|
messages: filterMessages,
|
||||||
useVision: toolModel.vision && aiChatVision,
|
useVision: toolModel.vision && aiChatVision,
|
||||||
origin: requestOrigin
|
origin: requestOrigin
|
||||||
}),
|
|
||||||
computedMaxToken({
|
|
||||||
model: toolModel,
|
|
||||||
maxToken,
|
|
||||||
filterMessages
|
|
||||||
})
|
})
|
||||||
]);
|
]);
|
||||||
const requestBody = llmCompletionsBodyFormat(
|
const requestBody = llmCompletionsBodyFormat(
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
import { createChatCompletion } from '../../../../ai/config';
|
import { createChatCompletion } from '../../../../ai/config';
|
||||||
import { filterGPTMessageByMaxTokens, loadRequestMessages } from '../../../../chat/utils';
|
import { filterGPTMessageByMaxContext, loadRequestMessages } from '../../../../chat/utils';
|
||||||
import {
|
import {
|
||||||
ChatCompletion,
|
ChatCompletion,
|
||||||
StreamChatType,
|
StreamChatType,
|
||||||
@ -196,21 +196,20 @@ export const runToolWithPromptCall = async (
|
|||||||
return Promise.reject('Prompt call invalid input');
|
return Promise.reject('Prompt call invalid input');
|
||||||
}
|
}
|
||||||
|
|
||||||
const filterMessages = await filterGPTMessageByMaxTokens({
|
const max_tokens = computedMaxToken({
|
||||||
|
model: toolModel,
|
||||||
|
maxToken
|
||||||
|
});
|
||||||
|
const filterMessages = await filterGPTMessageByMaxContext({
|
||||||
messages,
|
messages,
|
||||||
maxTokens: toolModel.maxContext - 500 // filter token. not response maxToken
|
maxContext: toolModel.maxContext - (max_tokens || 0) // filter token. not response maxToken
|
||||||
});
|
});
|
||||||
|
|
||||||
const [requestMessages, max_tokens] = await Promise.all([
|
const [requestMessages] = await Promise.all([
|
||||||
loadRequestMessages({
|
loadRequestMessages({
|
||||||
messages: filterMessages,
|
messages: filterMessages,
|
||||||
useVision: toolModel.vision && aiChatVision,
|
useVision: toolModel.vision && aiChatVision,
|
||||||
origin: requestOrigin
|
origin: requestOrigin
|
||||||
}),
|
|
||||||
computedMaxToken({
|
|
||||||
model: toolModel,
|
|
||||||
maxToken,
|
|
||||||
filterMessages
|
|
||||||
})
|
})
|
||||||
]);
|
]);
|
||||||
const requestBody = llmCompletionsBodyFormat(
|
const requestBody = llmCompletionsBodyFormat(
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
import { createChatCompletion } from '../../../../ai/config';
|
import { createChatCompletion } from '../../../../ai/config';
|
||||||
import { filterGPTMessageByMaxTokens, loadRequestMessages } from '../../../../chat/utils';
|
import { filterGPTMessageByMaxContext, loadRequestMessages } from '../../../../chat/utils';
|
||||||
import {
|
import {
|
||||||
ChatCompletion,
|
ChatCompletion,
|
||||||
ChatCompletionMessageToolCall,
|
ChatCompletionMessageToolCall,
|
||||||
@ -228,11 +228,16 @@ export const runToolWithToolChoice = async (
|
|||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
const max_tokens = computedMaxToken({
|
||||||
|
model: toolModel,
|
||||||
|
maxToken
|
||||||
|
});
|
||||||
|
|
||||||
// Filter histories by maxToken
|
// Filter histories by maxToken
|
||||||
const filterMessages = (
|
const filterMessages = (
|
||||||
await filterGPTMessageByMaxTokens({
|
await filterGPTMessageByMaxContext({
|
||||||
messages,
|
messages,
|
||||||
maxTokens: toolModel.maxContext - 300 // filter token. not response maxToken
|
maxContext: toolModel.maxContext - (max_tokens || 0) // filter token. not response maxToken
|
||||||
})
|
})
|
||||||
).map((item) => {
|
).map((item) => {
|
||||||
if (item.role === 'assistant' && item.tool_calls) {
|
if (item.role === 'assistant' && item.tool_calls) {
|
||||||
@ -248,16 +253,11 @@ export const runToolWithToolChoice = async (
|
|||||||
return item;
|
return item;
|
||||||
});
|
});
|
||||||
|
|
||||||
const [requestMessages, max_tokens] = await Promise.all([
|
const [requestMessages] = await Promise.all([
|
||||||
loadRequestMessages({
|
loadRequestMessages({
|
||||||
messages: filterMessages,
|
messages: filterMessages,
|
||||||
useVision: toolModel.vision && aiChatVision,
|
useVision: toolModel.vision && aiChatVision,
|
||||||
origin: requestOrigin
|
origin: requestOrigin
|
||||||
}),
|
|
||||||
computedMaxToken({
|
|
||||||
model: toolModel,
|
|
||||||
maxToken,
|
|
||||||
filterMessages
|
|
||||||
})
|
})
|
||||||
]);
|
]);
|
||||||
const requestBody = llmCompletionsBodyFormat(
|
const requestBody = llmCompletionsBodyFormat(
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
import type { NextApiResponse } from 'next';
|
import type { NextApiResponse } from 'next';
|
||||||
import { filterGPTMessageByMaxTokens, loadRequestMessages } from '../../../chat/utils';
|
import { filterGPTMessageByMaxContext, loadRequestMessages } from '../../../chat/utils';
|
||||||
import type { ChatItemType, UserChatItemValueItemType } from '@fastgpt/global/core/chat/type.d';
|
import type { ChatItemType, UserChatItemValueItemType } from '@fastgpt/global/core/chat/type.d';
|
||||||
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
|
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
|
||||||
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||||
@ -58,6 +58,7 @@ export type ChatProps = ModuleDispatchProps<
|
|||||||
>;
|
>;
|
||||||
export type ChatResponse = DispatchNodeResultType<{
|
export type ChatResponse = DispatchNodeResultType<{
|
||||||
[NodeOutputKeyEnum.answerText]: string;
|
[NodeOutputKeyEnum.answerText]: string;
|
||||||
|
[NodeOutputKeyEnum.reasoningText]?: string;
|
||||||
[NodeOutputKeyEnum.history]: ChatItemType[];
|
[NodeOutputKeyEnum.history]: ChatItemType[];
|
||||||
}>;
|
}>;
|
||||||
|
|
||||||
@ -87,22 +88,24 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
|||||||
quoteTemplate,
|
quoteTemplate,
|
||||||
quotePrompt,
|
quotePrompt,
|
||||||
aiChatVision,
|
aiChatVision,
|
||||||
|
aiChatReasoning,
|
||||||
fileUrlList: fileLinks, // node quote file links
|
fileUrlList: fileLinks, // node quote file links
|
||||||
stringQuoteText //abandon
|
stringQuoteText //abandon
|
||||||
}
|
}
|
||||||
} = props;
|
} = props;
|
||||||
const { files: inputFiles } = chatValue2RuntimePrompt(query); // Chat box input files
|
const { files: inputFiles } = chatValue2RuntimePrompt(query); // Chat box input files
|
||||||
|
|
||||||
stream = stream && isResponseAnswerText;
|
|
||||||
|
|
||||||
const chatHistories = getHistories(history, histories);
|
|
||||||
quoteQA = checkQuoteQAValue(quoteQA);
|
|
||||||
|
|
||||||
const modelConstantsData = getLLMModel(model);
|
const modelConstantsData = getLLMModel(model);
|
||||||
if (!modelConstantsData) {
|
if (!modelConstantsData) {
|
||||||
return Promise.reject('The chat model is undefined, you need to select a chat model.');
|
return Promise.reject('The chat model is undefined, you need to select a chat model.');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
stream = stream && isResponseAnswerText;
|
||||||
|
aiChatReasoning = !!aiChatReasoning && !!modelConstantsData.reasoning;
|
||||||
|
|
||||||
|
const chatHistories = getHistories(history, histories);
|
||||||
|
quoteQA = checkQuoteQAValue(quoteQA);
|
||||||
|
|
||||||
const [{ datasetQuoteText }, { documentQuoteText, userFiles }] = await Promise.all([
|
const [{ datasetQuoteText }, { documentQuoteText, userFiles }] = await Promise.all([
|
||||||
filterDatasetQuote({
|
filterDatasetQuote({
|
||||||
quoteQA,
|
quoteQA,
|
||||||
@ -124,9 +127,15 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
|||||||
return Promise.reject(i18nT('chat:AI_input_is_empty'));
|
return Promise.reject(i18nT('chat:AI_input_is_empty'));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const max_tokens = computedMaxToken({
|
||||||
|
model: modelConstantsData,
|
||||||
|
maxToken
|
||||||
|
});
|
||||||
|
|
||||||
const [{ filterMessages }] = await Promise.all([
|
const [{ filterMessages }] = await Promise.all([
|
||||||
getChatMessages({
|
getChatMessages({
|
||||||
model: modelConstantsData,
|
model: modelConstantsData,
|
||||||
|
maxTokens: max_tokens,
|
||||||
histories: chatHistories,
|
histories: chatHistories,
|
||||||
useDatasetQuote: quoteQA !== undefined,
|
useDatasetQuote: quoteQA !== undefined,
|
||||||
datasetQuoteText,
|
datasetQuoteText,
|
||||||
@ -137,8 +146,8 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
|||||||
userFiles,
|
userFiles,
|
||||||
documentQuoteText
|
documentQuoteText
|
||||||
}),
|
}),
|
||||||
|
// Censor = true and system key, will check content
|
||||||
(() => {
|
(() => {
|
||||||
// censor model and system key
|
|
||||||
if (modelConstantsData.censor && !externalProvider.openaiAccount?.key) {
|
if (modelConstantsData.censor && !externalProvider.openaiAccount?.key) {
|
||||||
return postTextCensor({
|
return postTextCensor({
|
||||||
text: `${systemPrompt}
|
text: `${systemPrompt}
|
||||||
@ -149,18 +158,11 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
|||||||
})()
|
})()
|
||||||
]);
|
]);
|
||||||
|
|
||||||
const [requestMessages, max_tokens] = await Promise.all([
|
const requestMessages = await loadRequestMessages({
|
||||||
loadRequestMessages({
|
messages: filterMessages,
|
||||||
messages: filterMessages,
|
useVision: modelConstantsData.vision && aiChatVision,
|
||||||
useVision: modelConstantsData.vision && aiChatVision,
|
origin: requestOrigin
|
||||||
origin: requestOrigin
|
});
|
||||||
}),
|
|
||||||
computedMaxToken({
|
|
||||||
model: modelConstantsData,
|
|
||||||
maxToken,
|
|
||||||
filterMessages
|
|
||||||
})
|
|
||||||
]);
|
|
||||||
|
|
||||||
const requestBody = llmCompletionsBodyFormat(
|
const requestBody = llmCompletionsBodyFormat(
|
||||||
{
|
{
|
||||||
@ -183,34 +185,42 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
const { answerText } = await (async () => {
|
const { answerText, reasoningText } = await (async () => {
|
||||||
if (res && isStreamResponse) {
|
if (res && isStreamResponse) {
|
||||||
// sse response
|
// sse response
|
||||||
const { answer } = await streamResponse({
|
const { answer, reasoning } = await streamResponse({
|
||||||
res,
|
res,
|
||||||
stream: response,
|
stream: response,
|
||||||
|
aiChatReasoning,
|
||||||
workflowStreamResponse
|
workflowStreamResponse
|
||||||
});
|
});
|
||||||
|
|
||||||
return {
|
return {
|
||||||
answerText: answer
|
answerText: answer,
|
||||||
|
reasoningText: reasoning
|
||||||
};
|
};
|
||||||
} else {
|
} else {
|
||||||
const unStreamResponse = response as ChatCompletion;
|
const unStreamResponse = response as ChatCompletion;
|
||||||
const answer = unStreamResponse.choices?.[0]?.message?.content || '';
|
const answer = unStreamResponse.choices?.[0]?.message?.content || '';
|
||||||
|
const reasoning = aiChatReasoning
|
||||||
|
? // @ts-ignore
|
||||||
|
unStreamResponse.choices?.[0]?.message?.reasoning_content || ''
|
||||||
|
: '';
|
||||||
if (stream) {
|
if (stream) {
|
||||||
// Some models do not support streaming
|
// Some models do not support streaming
|
||||||
workflowStreamResponse?.({
|
reasoning &&
|
||||||
event: SseResponseEventEnum.fastAnswer,
|
workflowStreamResponse?.({
|
||||||
data: textAdaptGptResponse({
|
event: SseResponseEventEnum.fastAnswer,
|
||||||
text: answer
|
data: textAdaptGptResponse({
|
||||||
})
|
text: answer,
|
||||||
});
|
reasoning_content: reasoning
|
||||||
|
})
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
answerText: answer
|
answerText: answer,
|
||||||
|
reasoningText: reasoning
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
})();
|
})();
|
||||||
@ -241,6 +251,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
|||||||
|
|
||||||
return {
|
return {
|
||||||
answerText,
|
answerText,
|
||||||
|
reasoningText,
|
||||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||||
totalPoints: externalProvider.openaiAccount?.key ? 0 : totalPoints,
|
totalPoints: externalProvider.openaiAccount?.key ? 0 : totalPoints,
|
||||||
model: modelName,
|
model: modelName,
|
||||||
@ -367,6 +378,7 @@ async function getMultiInput({
|
|||||||
|
|
||||||
async function getChatMessages({
|
async function getChatMessages({
|
||||||
model,
|
model,
|
||||||
|
maxTokens = 0,
|
||||||
aiChatQuoteRole,
|
aiChatQuoteRole,
|
||||||
datasetQuotePrompt = '',
|
datasetQuotePrompt = '',
|
||||||
datasetQuoteText,
|
datasetQuoteText,
|
||||||
@ -378,6 +390,7 @@ async function getChatMessages({
|
|||||||
documentQuoteText
|
documentQuoteText
|
||||||
}: {
|
}: {
|
||||||
model: LLMModelItemType;
|
model: LLMModelItemType;
|
||||||
|
maxTokens?: number;
|
||||||
// dataset quote
|
// dataset quote
|
||||||
aiChatQuoteRole: AiChatQuoteRoleType; // user: replace user prompt; system: replace system prompt
|
aiChatQuoteRole: AiChatQuoteRoleType; // user: replace user prompt; system: replace system prompt
|
||||||
datasetQuotePrompt?: string;
|
datasetQuotePrompt?: string;
|
||||||
@ -444,9 +457,9 @@ async function getChatMessages({
|
|||||||
|
|
||||||
const adaptMessages = chats2GPTMessages({ messages, reserveId: false });
|
const adaptMessages = chats2GPTMessages({ messages, reserveId: false });
|
||||||
|
|
||||||
const filterMessages = await filterGPTMessageByMaxTokens({
|
const filterMessages = await filterGPTMessageByMaxContext({
|
||||||
messages: adaptMessages,
|
messages: adaptMessages,
|
||||||
maxTokens: model.maxContext - 300 // filter token. not response maxToken
|
maxContext: model.maxContext - maxTokens // filter token. not response maxToken
|
||||||
});
|
});
|
||||||
|
|
||||||
return {
|
return {
|
||||||
@ -457,33 +470,43 @@ async function getChatMessages({
|
|||||||
async function streamResponse({
|
async function streamResponse({
|
||||||
res,
|
res,
|
||||||
stream,
|
stream,
|
||||||
workflowStreamResponse
|
workflowStreamResponse,
|
||||||
|
aiChatReasoning
|
||||||
}: {
|
}: {
|
||||||
res: NextApiResponse;
|
res: NextApiResponse;
|
||||||
stream: StreamChatType;
|
stream: StreamChatType;
|
||||||
workflowStreamResponse?: WorkflowResponseType;
|
workflowStreamResponse?: WorkflowResponseType;
|
||||||
|
aiChatReasoning?: boolean;
|
||||||
}) {
|
}) {
|
||||||
const write = responseWriteController({
|
const write = responseWriteController({
|
||||||
res,
|
res,
|
||||||
readStream: stream
|
readStream: stream
|
||||||
});
|
});
|
||||||
let answer = '';
|
let answer = '';
|
||||||
|
let reasoning = '';
|
||||||
for await (const part of stream) {
|
for await (const part of stream) {
|
||||||
if (res.closed) {
|
if (res.closed) {
|
||||||
stream.controller?.abort();
|
stream.controller?.abort();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
const content = part.choices?.[0]?.delta?.content || '';
|
const content = part.choices?.[0]?.delta?.content || '';
|
||||||
answer += content;
|
answer += content;
|
||||||
|
|
||||||
|
const reasoningContent = aiChatReasoning
|
||||||
|
? part.choices?.[0]?.delta?.reasoning_content || ''
|
||||||
|
: '';
|
||||||
|
reasoning += reasoningContent;
|
||||||
|
|
||||||
workflowStreamResponse?.({
|
workflowStreamResponse?.({
|
||||||
write,
|
write,
|
||||||
event: SseResponseEventEnum.answer,
|
event: SseResponseEventEnum.answer,
|
||||||
data: textAdaptGptResponse({
|
data: textAdaptGptResponse({
|
||||||
text: content
|
text: content,
|
||||||
|
reasoning_content: reasoningContent
|
||||||
})
|
})
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
return { answer };
|
return { answer, reasoning };
|
||||||
}
|
}
|
||||||
|
|||||||
@ -204,6 +204,7 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
|
|||||||
{ inputs = [] }: RuntimeNodeItemType,
|
{ inputs = [] }: RuntimeNodeItemType,
|
||||||
{
|
{
|
||||||
answerText = '',
|
answerText = '',
|
||||||
|
reasoningText,
|
||||||
responseData,
|
responseData,
|
||||||
nodeDispatchUsages,
|
nodeDispatchUsages,
|
||||||
toolResponses,
|
toolResponses,
|
||||||
@ -213,6 +214,7 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
|
|||||||
}: Omit<
|
}: Omit<
|
||||||
DispatchNodeResultType<{
|
DispatchNodeResultType<{
|
||||||
[NodeOutputKeyEnum.answerText]?: string;
|
[NodeOutputKeyEnum.answerText]?: string;
|
||||||
|
[NodeOutputKeyEnum.reasoningText]?: string;
|
||||||
[DispatchNodeResponseKeyEnum.nodeResponse]?: ChatHistoryItemResType;
|
[DispatchNodeResponseKeyEnum.nodeResponse]?: ChatHistoryItemResType;
|
||||||
}>,
|
}>,
|
||||||
'nodeResponse'
|
'nodeResponse'
|
||||||
@ -251,6 +253,13 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
} else if (reasoningText) {
|
||||||
|
chatAssistantResponse.push({
|
||||||
|
type: ChatItemValueTypeEnum.reasoning,
|
||||||
|
reasoning: {
|
||||||
|
content: reasoningText
|
||||||
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rewriteHistories) {
|
if (rewriteHistories) {
|
||||||
|
|||||||
@ -178,6 +178,7 @@ export const iconPaths = {
|
|||||||
'core/chat/sideLine': () => import('./icons/core/chat/sideLine.svg'),
|
'core/chat/sideLine': () => import('./icons/core/chat/sideLine.svg'),
|
||||||
'core/chat/speaking': () => import('./icons/core/chat/speaking.svg'),
|
'core/chat/speaking': () => import('./icons/core/chat/speaking.svg'),
|
||||||
'core/chat/stopSpeech': () => import('./icons/core/chat/stopSpeech.svg'),
|
'core/chat/stopSpeech': () => import('./icons/core/chat/stopSpeech.svg'),
|
||||||
|
'core/chat/think': () => import('./icons/core/chat/think.svg'),
|
||||||
'core/dataset/commonDataset': () => import('./icons/core/dataset/commonDataset.svg'),
|
'core/dataset/commonDataset': () => import('./icons/core/dataset/commonDataset.svg'),
|
||||||
'core/dataset/commonDatasetColor': () => import('./icons/core/dataset/commonDatasetColor.svg'),
|
'core/dataset/commonDatasetColor': () => import('./icons/core/dataset/commonDatasetColor.svg'),
|
||||||
'core/dataset/commonDatasetOutline': () =>
|
'core/dataset/commonDatasetOutline': () =>
|
||||||
|
|||||||
@ -0,0 +1 @@
|
|||||||
|
<svg t="1737983662269" class="icon" viewBox="0 0 1024 1024" version="1.1" xmlns="http://www.w3.org/2000/svg" p-id="6134" width="64" height="64"><path d="M512 512m-91.264 0a91.264 91.264 0 1 0 182.528 0 91.264 91.264 0 1 0-182.528 0Z" fill="" p-id="6135"></path><path d="M256.341333 693.546667l-20.138666-5.12C86.101333 650.496 0 586.112 0 511.829333s86.101333-138.666667 236.202667-176.597333l20.138666-5.077333 5.674667 19.968a1003.946667 1003.946667 0 0 0 58.154667 152.661333l4.309333 9.088-4.309333 9.088a994.432 994.432 0 0 0-58.154667 152.661333l-5.674667 19.925334zM226.858667 381.866667c-114.090667 32.042667-184.106667 81.066667-184.106667 129.962666 0 48.853333 70.016 97.877333 184.106667 129.962667a1064.533333 1064.533333 0 0 1 50.432-129.962667A1056.085333 1056.085333 0 0 1 226.858667 381.866667z m540.8 311.68l-5.674667-20.010667a996.565333 996.565333 0 0 0-58.197333-152.618667l-4.309334-9.088 4.309334-9.088a999.253333 999.253333 0 0 0 58.197333-152.661333l5.674667-19.968 20.181333 5.077333c150.058667 37.930667 236.16 102.314667 236.16 176.64s-86.101333 138.666667-236.16 176.597334l-20.181333 5.12z m-20.949334-181.717334c20.48 44.330667 37.418667 87.893333 50.432 129.962667 114.133333-32.085333 184.106667-81.109333 184.106667-129.962667 0-48.896-70.016-97.877333-184.106667-129.962666a1057.621333 1057.621333 0 0 1-50.432 129.962666z" fill="" p-id="6136"></path><path d="M226.56 381.653333l-5.674667-19.925333C178.688 212.992 191.488 106.410667 256 69.205333c63.274667-36.522667 164.864 6.613333 271.317333 115.882667l14.506667 14.890667-14.506667 14.890666a1004.885333 1004.885333 0 0 0-103.338666 126.592l-5.76 8.234667-10.026667 0.853333a1009.365333 1009.365333 0 0 0-161.493333 26.026667l-20.138667 5.077333z m80.896-282.88c-11.434667 0-21.546667 2.474667-30.08 7.381334-42.410667 24.448-49.92 109.44-20.693333 224.128a1071.872 1071.872 0 0 1 137.941333-21.376 1060.138667 1060.138667 0 0 1 87.552-108.544c-66.56-64.810667-129.578667-101.589333-174.72-101.589334z m409.130667 868.778667c-0.042667 0-0.042667 0 0 0-60.8 0-138.88-45.781333-219.904-128.981333l-14.506667-14.890667 14.506667-14.890667a1003.946667 1003.946667 0 0 0 103.296-126.634666l5.76-8.234667 9.984-0.853333a1008.213333 1008.213333 0 0 0 161.578666-25.984l20.138667-5.077334 5.717333 19.968c42.112 148.650667 29.354667 255.274667-35.157333 292.437334a101.546667 101.546667 0 0 1-51.413333 13.141333z m-174.762667-144.256c66.56 64.810667 129.578667 101.589333 174.72 101.589333h0.042667c11.392 0 21.546667-2.474667 30.037333-7.381333 42.410667-24.448 49.962667-109.482667 20.693333-224.170667a1067.52 1067.52 0 0 1-137.984 21.376 1052.757333 1052.757333 0 0 1-87.509333 108.586667z" fill="" p-id="6137"></path><path d="M797.44 381.653333l-20.138667-5.077333a1001.770667 1001.770667 0 0 0-161.578666-26.026667l-9.984-0.853333-5.76-8.234667a998.997333 998.997333 0 0 0-103.296-126.592l-14.506667-14.890666 14.506667-14.890667C603.093333 75.861333 704.64 32.725333 768 69.205333c64.512 37.205333 77.312 143.786667 35.157333 292.48l-5.717333 19.968zM629.333333 308.906667c48.725333 4.437333 95.018667 11.648 137.984 21.376 29.269333-114.688 21.717333-199.68-20.693333-224.128-42.154667-24.362667-121.386667 12.970667-204.8 94.208A1060.224 1060.224 0 0 1 629.333333 308.906667zM307.456 967.552A101.546667 101.546667 0 0 1 256 954.410667c-64.512-37.162667-77.312-143.744-35.114667-292.437334l5.632-19.968 20.138667 5.077334c49.28 12.416 103.637333 21.162667 161.493333 25.984l10.026667 0.853333 5.717333 8.234667a1006.762667 1006.762667 0 0 0 103.338667 126.634666l14.506667 14.890667-14.506667 14.890667c-80.981333 83.2-159.061333 128.981333-219.776 128.981333z m-50.773333-274.218667c-29.269333 114.688-21.717333 199.722667 20.693333 224.170667 42.112 24.021333 121.301333-13.013333 204.8-94.208a1066.581333 1066.581333 0 0 1-87.552-108.586667 1065.642667 1065.642667 0 0 1-137.941333-21.376z" fill="" p-id="6138"></path><path d="M512 720.128c-35.114667 0-71.210667-1.536-107.349333-4.522667l-10.026667-0.853333-5.76-8.234667a1296.554667 1296.554667 0 0 1-57.6-90.538666 1295.104 1295.104 0 0 1-49.749333-95.061334l-4.266667-9.088 4.266667-9.088a1292.8 1292.8 0 0 1 49.749333-95.061333c17.664-30.549333 37.077333-61.013333 57.6-90.538667l5.76-8.234666 10.026667-0.853334a1270.826667 1270.826667 0 0 1 214.741333 0l9.984 0.853334 5.717333 8.234666a1280.256 1280.256 0 0 1 107.392 185.6l4.309334 9.088-4.309334 9.088a1262.933333 1262.933333 0 0 1-107.392 185.6l-5.717333 8.234667-9.984 0.853333c-36.138667 2.986667-72.277333 4.522667-107.392 4.522667z m-93.738667-46.250667c63.146667 4.736 124.330667 4.736 187.52 0a1237.589333 1237.589333 0 0 0 93.696-162.048 1219.626667 1219.626667 0 0 0-93.738666-162.048 1238.656 1238.656 0 0 0-187.477334 0 1215.018667 1215.018667 0 0 0-93.738666 162.048 1242.197333 1242.197333 0 0 0 93.738666 162.048z" p-id="6139"></path></svg>
|
||||||
|
After Width: | Height: | Size: 4.7 KiB |
@ -304,7 +304,7 @@ export function useScrollPagination<
|
|||||||
);
|
);
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<MyBox {...props} ref={ref} overflow={'overlay'} isLoading={isLoading}>
|
<MyBox ref={ref} h={'100%'} overflow={'auto'} isLoading={isLoading} {...props}>
|
||||||
{scrollLoadType === 'top' && total > 0 && isLoading && (
|
{scrollLoadType === 'top' && total > 0 && isLoading && (
|
||||||
<Box mt={2} fontSize={'xs'} color={'blackAlpha.500'} textAlign={'center'}>
|
<Box mt={2} fontSize={'xs'} color={'blackAlpha.500'} textAlign={'center'}>
|
||||||
{t('common:common.is_requesting')}
|
{t('common:common.is_requesting')}
|
||||||
|
|||||||
@ -49,10 +49,10 @@
|
|||||||
"model.output_price": "Output price",
|
"model.output_price": "Output price",
|
||||||
"model.output_price_tip": "The language model output price. If this item is configured, the model comprehensive price will be invalid.",
|
"model.output_price_tip": "The language model output price. If this item is configured, the model comprehensive price will be invalid.",
|
||||||
"model.param_name": "Parameter name",
|
"model.param_name": "Parameter name",
|
||||||
"model.request_auth": "Custom token",
|
"model.request_auth": "Custom key",
|
||||||
"model.request_auth_tip": "When making a request to a custom request address, carry the request header: Authorization: Bearer xxx to make the request.",
|
"model.request_auth_tip": "When making a request to a custom request address, carry the request header: Authorization: Bearer xxx to make the request.",
|
||||||
"model.request_url": "Custom url",
|
"model.request_url": "Custom url",
|
||||||
"model.request_url_tip": "If this value is filled in, a request will be made directly to this address without going through OneAPI",
|
"model.request_url_tip": "If you fill in this value, you will initiate a request directly without passing. \nYou need to follow the API format of Openai and fill in the full request address, such as\n\nLLM: {Host}}/v1/Chat/Completions\n\nEmbedding: {host}}/v1/embeddings\n\nSTT: {Host}/v1/Audio/Transcriptions\n\nTTS: {Host}}/v1/Audio/Speech\n\nRERARARARARARARANK: {Host}}/v1/RERARARARARARARARARARANK",
|
||||||
"model.test_model": "Model testing",
|
"model.test_model": "Model testing",
|
||||||
"model.tool_choice": "Tool choice",
|
"model.tool_choice": "Tool choice",
|
||||||
"model.tool_choice_tag": "ToolCall",
|
"model.tool_choice_tag": "ToolCall",
|
||||||
|
|||||||
@ -109,6 +109,7 @@
|
|||||||
"publish_channel": "Publish",
|
"publish_channel": "Publish",
|
||||||
"publish_success": "Publish Successful",
|
"publish_success": "Publish Successful",
|
||||||
"question_guide_tip": "After the conversation, 3 guiding questions will be generated for you.",
|
"question_guide_tip": "After the conversation, 3 guiding questions will be generated for you.",
|
||||||
|
"reasoning_response": "Output thinking",
|
||||||
"saved_success": "Saved successfully! \nTo use this version externally, click Save and Publish",
|
"saved_success": "Saved successfully! \nTo use this version externally, click Save and Publish",
|
||||||
"search_app": "Search apps",
|
"search_app": "Search apps",
|
||||||
"setting_app": "Workflow",
|
"setting_app": "Workflow",
|
||||||
|
|||||||
@ -2,6 +2,7 @@
|
|||||||
"AI_input_is_empty": "The content passed to the AI node is empty",
|
"AI_input_is_empty": "The content passed to the AI node is empty",
|
||||||
"Delete_all": "Clear All Lexicon",
|
"Delete_all": "Clear All Lexicon",
|
||||||
"LLM_model_response_empty": "The model flow response is empty, please check whether the model flow output is normal.",
|
"LLM_model_response_empty": "The model flow response is empty, please check whether the model flow output is normal.",
|
||||||
|
"ai_reasoning": "Thinking process",
|
||||||
"chat_history": "Conversation History",
|
"chat_history": "Conversation History",
|
||||||
"chat_input_guide_lexicon_is_empty": "Lexicon not configured yet",
|
"chat_input_guide_lexicon_is_empty": "Lexicon not configured yet",
|
||||||
"chat_test_app": "Debug-{{name}}",
|
"chat_test_app": "Debug-{{name}}",
|
||||||
|
|||||||
@ -49,10 +49,10 @@
|
|||||||
"model.output_price": "模型输出价格",
|
"model.output_price": "模型输出价格",
|
||||||
"model.output_price_tip": "语言模型输出价格,如果配置了该项,则模型综合价格会失效",
|
"model.output_price_tip": "语言模型输出价格,如果配置了该项,则模型综合价格会失效",
|
||||||
"model.param_name": "参数名",
|
"model.param_name": "参数名",
|
||||||
"model.request_auth": "自定义请求 Tokens",
|
"model.request_auth": "自定义请求 Key",
|
||||||
"model.request_auth_tip": "向自定义请求地址发起请求时候,携带请求头:Authorization: Bearer xxx 进行请求",
|
"model.request_auth_tip": "向自定义请求地址发起请求时候,携带请求头:Authorization: Bearer xxx 进行请求",
|
||||||
"model.request_url": "自定义请求地址",
|
"model.request_url": "自定义请求地址",
|
||||||
"model.request_url_tip": "如果填写该值,则会直接向该地址发起请求,不经过 OneAPI",
|
"model.request_url_tip": "如果填写该值,则会直接向该地址发起请求,不经过 OneAPI。需要遵循 OpenAI 的 API格式,并填写完整请求地址,例如:\nLLM: {{host}}/v1/chat/completions\nEmbedding: {{host}}/v1/embeddings\nSTT: {{host}}/v1/audio/transcriptions\nTTS: {{host}}/v1/audio/speech\nRerank: {{host}}/v1/rerank",
|
||||||
"model.test_model": "模型测试",
|
"model.test_model": "模型测试",
|
||||||
"model.tool_choice": "支持工具调用",
|
"model.tool_choice": "支持工具调用",
|
||||||
"model.tool_choice_tag": "工具调用",
|
"model.tool_choice_tag": "工具调用",
|
||||||
|
|||||||
@ -109,6 +109,7 @@
|
|||||||
"publish_channel": "发布渠道",
|
"publish_channel": "发布渠道",
|
||||||
"publish_success": "发布成功",
|
"publish_success": "发布成功",
|
||||||
"question_guide_tip": "对话结束后,会为你生成 3 个引导性问题。",
|
"question_guide_tip": "对话结束后,会为你生成 3 个引导性问题。",
|
||||||
|
"reasoning_response": "输出思考",
|
||||||
"saved_success": "保存成功!如需在外部使用该版本,请点击“保存并发布”",
|
"saved_success": "保存成功!如需在外部使用该版本,请点击“保存并发布”",
|
||||||
"search_app": "搜索应用",
|
"search_app": "搜索应用",
|
||||||
"setting_app": "应用配置",
|
"setting_app": "应用配置",
|
||||||
|
|||||||
@ -2,6 +2,7 @@
|
|||||||
"AI_input_is_empty": "传入AI 节点的内容为空",
|
"AI_input_is_empty": "传入AI 节点的内容为空",
|
||||||
"Delete_all": "清空词库",
|
"Delete_all": "清空词库",
|
||||||
"LLM_model_response_empty": "模型流响应为空,请检查模型流输出是否正常",
|
"LLM_model_response_empty": "模型流响应为空,请检查模型流输出是否正常",
|
||||||
|
"ai_reasoning": "思考过程",
|
||||||
"chat_history": "聊天记录",
|
"chat_history": "聊天记录",
|
||||||
"chat_input_guide_lexicon_is_empty": "还没有配置词库",
|
"chat_input_guide_lexicon_is_empty": "还没有配置词库",
|
||||||
"chat_test_app": "调试-{{name}}",
|
"chat_test_app": "调试-{{name}}",
|
||||||
|
|||||||
@ -48,10 +48,10 @@
|
|||||||
"model.output_price": "模型輸出價格",
|
"model.output_price": "模型輸出價格",
|
||||||
"model.output_price_tip": "語言模型輸出價格,如果配置了該項,則模型綜合價格會失效",
|
"model.output_price_tip": "語言模型輸出價格,如果配置了該項,則模型綜合價格會失效",
|
||||||
"model.param_name": "參數名",
|
"model.param_name": "參數名",
|
||||||
"model.request_auth": "自訂請求 Tokens",
|
"model.request_auth": "自訂請求 Key",
|
||||||
"model.request_auth_tip": "向自訂請求地址發起請求時候,攜帶請求頭:Authorization: Bearer xxx 進行請求",
|
"model.request_auth_tip": "向自訂請求地址發起請求時候,攜帶請求頭:Authorization: Bearer xxx 進行請求",
|
||||||
"model.request_url": "自訂請求地址",
|
"model.request_url": "自訂請求地址",
|
||||||
"model.request_url_tip": "如果填入該值,則會直接向該位址發起請求,不經過 OneAPI",
|
"model.request_url_tip": "如果填寫該值,則會直接向該地址發起請求,不經過 OneAPI。\n需要遵循 OpenAI 的 API格式,並填寫完整請求地址,例如:\n\nLLM: {{host}}/v1/chat/completions\n\nEmbedding: {{host}}/v1/embeddings\n\nSTT: {{host}}/v1/audio/transcriptions\n\nTTS: {{host}}/v1/audio/speech\n\nRerank: {{host}}/v1/rerank",
|
||||||
"model.test_model": "模型測試",
|
"model.test_model": "模型測試",
|
||||||
"model.tool_choice": "支援工具調用",
|
"model.tool_choice": "支援工具調用",
|
||||||
"model.tool_choice_tag": "工具調用",
|
"model.tool_choice_tag": "工具調用",
|
||||||
|
|||||||
@ -109,6 +109,7 @@
|
|||||||
"publish_channel": "發布通道",
|
"publish_channel": "發布通道",
|
||||||
"publish_success": "發布成功",
|
"publish_success": "發布成功",
|
||||||
"question_guide_tip": "對話結束後,會為你產生 3 個引導性問題。",
|
"question_guide_tip": "對話結束後,會為你產生 3 個引導性問題。",
|
||||||
|
"reasoning_response": "輸出思考",
|
||||||
"saved_success": "保存成功!\n如需在外部使用該版本,請點擊“儲存並發布”",
|
"saved_success": "保存成功!\n如需在外部使用該版本,請點擊“儲存並發布”",
|
||||||
"search_app": "搜尋應用程式",
|
"search_app": "搜尋應用程式",
|
||||||
"setting_app": "應用程式設定",
|
"setting_app": "應用程式設定",
|
||||||
|
|||||||
@ -2,6 +2,7 @@
|
|||||||
"AI_input_is_empty": "傳送至 AI 節點的內容為空",
|
"AI_input_is_empty": "傳送至 AI 節點的內容為空",
|
||||||
"Delete_all": "清除所有詞彙",
|
"Delete_all": "清除所有詞彙",
|
||||||
"LLM_model_response_empty": "模型流程回應為空,請檢查模型流程輸出是否正常",
|
"LLM_model_response_empty": "模型流程回應為空,請檢查模型流程輸出是否正常",
|
||||||
|
"ai_reasoning": "思考過程",
|
||||||
"chat_history": "對話紀錄",
|
"chat_history": "對話紀錄",
|
||||||
"chat_input_guide_lexicon_is_empty": "尚未設定詞彙庫",
|
"chat_input_guide_lexicon_is_empty": "尚未設定詞彙庫",
|
||||||
"chat_test_app": "調試-{{name}}",
|
"chat_test_app": "調試-{{name}}",
|
||||||
|
|||||||
@ -72,6 +72,7 @@ const AIChatSettingsModal = ({
|
|||||||
defaultValues: defaultData
|
defaultValues: defaultData
|
||||||
});
|
});
|
||||||
const model = watch('model');
|
const model = watch('model');
|
||||||
|
const reasoning = watch(NodeInputKeyEnum.aiChatReasoning);
|
||||||
const showResponseAnswerText = watch(NodeInputKeyEnum.aiChatIsResponseText) !== undefined;
|
const showResponseAnswerText = watch(NodeInputKeyEnum.aiChatIsResponseText) !== undefined;
|
||||||
const showVisionSwitch = watch(NodeInputKeyEnum.aiChatVision) !== undefined;
|
const showVisionSwitch = watch(NodeInputKeyEnum.aiChatVision) !== undefined;
|
||||||
const showMaxHistoriesSlider = watch('maxHistories') !== undefined;
|
const showMaxHistoriesSlider = watch('maxHistories') !== undefined;
|
||||||
@ -84,6 +85,8 @@ const AIChatSettingsModal = ({
|
|||||||
return getWebLLMModel(model);
|
return getWebLLMModel(model);
|
||||||
}, [model]);
|
}, [model]);
|
||||||
const llmSupportVision = !!selectedModel?.vision;
|
const llmSupportVision = !!selectedModel?.vision;
|
||||||
|
const llmSupportTemperature = typeof selectedModel?.maxTemperature === 'number';
|
||||||
|
const llmSupportReasoning = !!selectedModel?.reasoning;
|
||||||
|
|
||||||
const tokenLimit = useMemo(() => {
|
const tokenLimit = useMemo(() => {
|
||||||
return selectedModel?.maxResponse || 4096;
|
return selectedModel?.maxResponse || 4096;
|
||||||
@ -258,36 +261,51 @@ const AIChatSettingsModal = ({
|
|||||||
/>
|
/>
|
||||||
</Box>
|
</Box>
|
||||||
</Flex>
|
</Flex>
|
||||||
<Flex {...FlexItemStyles}>
|
{llmSupportTemperature && (
|
||||||
<Box {...LabelStyles}>
|
<Flex {...FlexItemStyles}>
|
||||||
<Flex alignItems={'center'}>
|
<Box {...LabelStyles}>
|
||||||
{t('app:temperature')}
|
<Flex alignItems={'center'}>
|
||||||
<QuestionTip label={t('app:temperature_tip')} />
|
{t('app:temperature')}
|
||||||
</Flex>
|
<QuestionTip label={t('app:temperature_tip')} />
|
||||||
<Switch
|
</Flex>
|
||||||
isChecked={temperature !== undefined}
|
<Switch
|
||||||
size={'sm'}
|
isChecked={temperature !== undefined}
|
||||||
onChange={(e) => {
|
size={'sm'}
|
||||||
setValue('temperature', e.target.checked ? 0 : undefined);
|
onChange={(e) => {
|
||||||
}}
|
setValue('temperature', e.target.checked ? 0 : undefined);
|
||||||
/>
|
}}
|
||||||
</Box>
|
/>
|
||||||
|
</Box>
|
||||||
<Box flex={'1 0 0'}>
|
<Box flex={'1 0 0'}>
|
||||||
<InputSlider
|
<InputSlider
|
||||||
min={0}
|
min={0}
|
||||||
max={10}
|
max={10}
|
||||||
step={1}
|
step={1}
|
||||||
value={temperature}
|
value={temperature}
|
||||||
isDisabled={temperature === undefined}
|
isDisabled={temperature === undefined}
|
||||||
onChange={(e) => {
|
onChange={(e) => {
|
||||||
setValue(NodeInputKeyEnum.aiChatTemperature, e);
|
setValue(NodeInputKeyEnum.aiChatTemperature, e);
|
||||||
setRefresh(!refresh);
|
setRefresh(!refresh);
|
||||||
}}
|
}}
|
||||||
/>
|
/>
|
||||||
</Box>
|
</Box>
|
||||||
</Flex>
|
</Flex>
|
||||||
|
)}
|
||||||
|
{llmSupportReasoning && (
|
||||||
|
<Flex {...FlexItemStyles} h={'25px'}>
|
||||||
|
<Box {...LabelStyles}>
|
||||||
|
<Flex alignItems={'center'}>{t('app:reasoning_response')}</Flex>
|
||||||
|
<Switch
|
||||||
|
isChecked={reasoning || false}
|
||||||
|
size={'sm'}
|
||||||
|
onChange={(e) => {
|
||||||
|
const value = e.target.checked;
|
||||||
|
setValue(NodeInputKeyEnum.aiChatReasoning, value);
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
</Box>
|
||||||
|
</Flex>
|
||||||
|
)}
|
||||||
{showResponseAnswerText && (
|
{showResponseAnswerText && (
|
||||||
<Flex {...FlexItemStyles} h={'25px'}>
|
<Flex {...FlexItemStyles} h={'25px'}>
|
||||||
<Box {...LabelStyles}>
|
<Box {...LabelStyles}>
|
||||||
|
|||||||
@ -201,6 +201,7 @@ const ChatBox = ({
|
|||||||
({
|
({
|
||||||
event,
|
event,
|
||||||
text = '',
|
text = '',
|
||||||
|
reasoningText,
|
||||||
status,
|
status,
|
||||||
name,
|
name,
|
||||||
tool,
|
tool,
|
||||||
@ -247,6 +248,25 @@ const ChatBox = ({
|
|||||||
value: item.value.slice(0, -1).concat(lastValue)
|
value: item.value.slice(0, -1).concat(lastValue)
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
} else if (event === SseResponseEventEnum.answer && reasoningText) {
|
||||||
|
if (lastValue.type === ChatItemValueTypeEnum.reasoning && lastValue.reasoning) {
|
||||||
|
lastValue.reasoning.content += reasoningText;
|
||||||
|
return {
|
||||||
|
...item,
|
||||||
|
value: item.value.slice(0, -1).concat(lastValue)
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
const val: AIChatItemValueItemType = {
|
||||||
|
type: ChatItemValueTypeEnum.reasoning,
|
||||||
|
reasoning: {
|
||||||
|
content: reasoningText
|
||||||
|
}
|
||||||
|
};
|
||||||
|
return {
|
||||||
|
...item,
|
||||||
|
value: item.value.concat(val)
|
||||||
|
};
|
||||||
|
}
|
||||||
} else if (event === SseResponseEventEnum.toolCall && tool) {
|
} else if (event === SseResponseEventEnum.toolCall && tool) {
|
||||||
const val: AIChatItemValueItemType = {
|
const val: AIChatItemValueItemType = {
|
||||||
type: ChatItemValueTypeEnum.tool,
|
type: ChatItemValueTypeEnum.tool,
|
||||||
|
|||||||
@ -6,6 +6,7 @@ import { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/t
|
|||||||
export type generatingMessageProps = {
|
export type generatingMessageProps = {
|
||||||
event: SseResponseEventEnum;
|
event: SseResponseEventEnum;
|
||||||
text?: string;
|
text?: string;
|
||||||
|
reasoningText?: string;
|
||||||
name?: string;
|
name?: string;
|
||||||
status?: 'running' | 'finish';
|
status?: 'running' | 'finish';
|
||||||
tool?: ToolModuleResponseItemType;
|
tool?: ToolModuleResponseItemType;
|
||||||
|
|||||||
@ -8,6 +8,7 @@ import {
|
|||||||
Box,
|
Box,
|
||||||
Button,
|
Button,
|
||||||
Flex,
|
Flex,
|
||||||
|
HStack,
|
||||||
Textarea
|
Textarea
|
||||||
} from '@chakra-ui/react';
|
} from '@chakra-ui/react';
|
||||||
import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants';
|
import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants';
|
||||||
@ -139,6 +140,55 @@ ${toolResponse}`}
|
|||||||
},
|
},
|
||||||
(prevProps, nextProps) => isEqual(prevProps, nextProps)
|
(prevProps, nextProps) => isEqual(prevProps, nextProps)
|
||||||
);
|
);
|
||||||
|
const RenderResoningContent = React.memo(function RenderResoningContent({
|
||||||
|
content,
|
||||||
|
showAnimation
|
||||||
|
}: {
|
||||||
|
content: string;
|
||||||
|
showAnimation: boolean;
|
||||||
|
}) {
|
||||||
|
const { t } = useTranslation();
|
||||||
|
|
||||||
|
return (
|
||||||
|
<Accordion allowToggle defaultIndex={0}>
|
||||||
|
<AccordionItem borderTop={'none'} borderBottom={'none'}>
|
||||||
|
<AccordionButton
|
||||||
|
w={'auto'}
|
||||||
|
bg={'white'}
|
||||||
|
borderRadius={'md'}
|
||||||
|
borderWidth={'1px'}
|
||||||
|
borderColor={'myGray.200'}
|
||||||
|
boxShadow={'1'}
|
||||||
|
pl={3}
|
||||||
|
pr={2.5}
|
||||||
|
py={1}
|
||||||
|
_hover={{
|
||||||
|
bg: 'auto'
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<HStack mr={2} spacing={1}>
|
||||||
|
<MyIcon name={'core/chat/think'} w={'0.85rem'} />
|
||||||
|
<Box fontSize={'sm'}>{t('chat:ai_reasoning')}</Box>
|
||||||
|
</HStack>
|
||||||
|
|
||||||
|
{showAnimation && <MyIcon name={'common/loading'} w={'0.85rem'} />}
|
||||||
|
<AccordionIcon color={'myGray.600'} ml={5} />
|
||||||
|
</AccordionButton>
|
||||||
|
<AccordionPanel
|
||||||
|
py={0}
|
||||||
|
pr={0}
|
||||||
|
pl={3}
|
||||||
|
mt={2}
|
||||||
|
borderLeft={'2px solid'}
|
||||||
|
borderColor={'myGray.300'}
|
||||||
|
color={'myGray.500'}
|
||||||
|
>
|
||||||
|
<Markdown source={content} />
|
||||||
|
</AccordionPanel>
|
||||||
|
</AccordionItem>
|
||||||
|
</Accordion>
|
||||||
|
);
|
||||||
|
});
|
||||||
const RenderUserSelectInteractive = React.memo(function RenderInteractive({
|
const RenderUserSelectInteractive = React.memo(function RenderInteractive({
|
||||||
interactive
|
interactive
|
||||||
}: {
|
}: {
|
||||||
@ -290,6 +340,8 @@ const AIResponseBox = ({ value, isLastResponseValue, isChatting }: props) => {
|
|||||||
return (
|
return (
|
||||||
<RenderText showAnimation={isChatting && isLastResponseValue} text={value.text.content} />
|
<RenderText showAnimation={isChatting && isLastResponseValue} text={value.text.content} />
|
||||||
);
|
);
|
||||||
|
if (value.type === ChatItemValueTypeEnum.reasoning && value.reasoning)
|
||||||
|
return <RenderResoningContent showAnimation={isChatting} content={value.reasoning.content} />;
|
||||||
if (value.type === ChatItemValueTypeEnum.tool && value.tools)
|
if (value.type === ChatItemValueTypeEnum.tool && value.tools)
|
||||||
return <RenderTool showAnimation={isChatting} tools={value.tools} />;
|
return <RenderTool showAnimation={isChatting} tools={value.tools} />;
|
||||||
if (value.type === ChatItemValueTypeEnum.interactive && value.interactive) {
|
if (value.type === ChatItemValueTypeEnum.interactive && value.interactive) {
|
||||||
|
|||||||
@ -803,6 +803,10 @@ const ModelEditModal = ({
|
|||||||
<JsonEditor
|
<JsonEditor
|
||||||
value={JSON.stringify(getValues('defaultConfig'), null, 2)}
|
value={JSON.stringify(getValues('defaultConfig'), null, 2)}
|
||||||
onChange={(e) => {
|
onChange={(e) => {
|
||||||
|
if (!e) {
|
||||||
|
setValue('defaultConfig', undefined);
|
||||||
|
return;
|
||||||
|
}
|
||||||
try {
|
try {
|
||||||
setValue('defaultConfig', JSON.parse(e));
|
setValue('defaultConfig', JSON.parse(e));
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
@ -1009,6 +1013,10 @@ const ModelEditModal = ({
|
|||||||
value={JSON.stringify(getValues('defaultConfig'), null, 2)}
|
value={JSON.stringify(getValues('defaultConfig'), null, 2)}
|
||||||
resize
|
resize
|
||||||
onChange={(e) => {
|
onChange={(e) => {
|
||||||
|
if (!e) {
|
||||||
|
setValue('defaultConfig', undefined);
|
||||||
|
return;
|
||||||
|
}
|
||||||
try {
|
try {
|
||||||
setValue('defaultConfig', JSON.parse(e));
|
setValue('defaultConfig', JSON.parse(e));
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
|
|||||||
@ -14,7 +14,7 @@ import Avatar from '@fastgpt/web/components/common/Avatar';
|
|||||||
import Tag from '@fastgpt/web/components/common/Tag';
|
import Tag from '@fastgpt/web/components/common/Tag';
|
||||||
|
|
||||||
import { useTranslation } from 'next-i18next';
|
import { useTranslation } from 'next-i18next';
|
||||||
import React, { useMemo, useState } from 'react';
|
import React, { useMemo, useRef, useState } from 'react';
|
||||||
import { useRequest2 } from '@fastgpt/web/hooks/useRequest';
|
import { useRequest2 } from '@fastgpt/web/hooks/useRequest';
|
||||||
import { useContextSelector } from 'use-context-selector';
|
import { useContextSelector } from 'use-context-selector';
|
||||||
import { TeamContext } from '../context';
|
import { TeamContext } from '../context';
|
||||||
@ -50,6 +50,8 @@ function GroupEditModal({ onClose, editGroupId }: { onClose: () => void; editGro
|
|||||||
const refetchMembers = useContextSelector(TeamContext, (v) => v.refetchMembers);
|
const refetchMembers = useContextSelector(TeamContext, (v) => v.refetchMembers);
|
||||||
const MemberScrollData = useContextSelector(TeamContext, (v) => v.MemberScrollData);
|
const MemberScrollData = useContextSelector(TeamContext, (v) => v.MemberScrollData);
|
||||||
const [hoveredMemberId, setHoveredMemberId] = useState<string>();
|
const [hoveredMemberId, setHoveredMemberId] = useState<string>();
|
||||||
|
|
||||||
|
const selectedMembersRef = useRef<HTMLDivElement>(null);
|
||||||
const [members, setMembers] = useState(group?.members || []);
|
const [members, setMembers] = useState(group?.members || []);
|
||||||
|
|
||||||
const [searchKey, setSearchKey] = useState('');
|
const [searchKey, setSearchKey] = useState('');
|
||||||
@ -155,7 +157,7 @@ function GroupEditModal({ onClose, editGroupId }: { onClose: () => void; editGro
|
|||||||
setSearchKey(e.target.value);
|
setSearchKey(e.target.value);
|
||||||
}}
|
}}
|
||||||
/>
|
/>
|
||||||
<MemberScrollData mt={3} flex={'1 0 0'} h={0}>
|
<MemberScrollData mt={3} flexGrow="1" overflow={'auto'} maxH={'400px'}>
|
||||||
{filtered.map((member) => {
|
{filtered.map((member) => {
|
||||||
return (
|
return (
|
||||||
<HStack
|
<HStack
|
||||||
@ -185,7 +187,7 @@ function GroupEditModal({ onClose, editGroupId }: { onClose: () => void; editGro
|
|||||||
</Flex>
|
</Flex>
|
||||||
<Flex borderLeft="1px" borderColor="myGray.200" flexDirection="column" p="4" h={'100%'}>
|
<Flex borderLeft="1px" borderColor="myGray.200" flexDirection="column" p="4" h={'100%'}>
|
||||||
<Box mt={2}>{t('common:chosen') + ': ' + members.length}</Box>
|
<Box mt={2}>{t('common:chosen') + ': ' + members.length}</Box>
|
||||||
<MemberScrollData mt={3} flex={'1 0 0'} h={0}>
|
<MemberScrollData ScrollContainerRef={selectedMembersRef} mt={3} flex={'1 0 0'} h={0}>
|
||||||
{members.map((member) => {
|
{members.map((member) => {
|
||||||
return (
|
return (
|
||||||
<HStack
|
<HStack
|
||||||
|
|||||||
@ -169,8 +169,8 @@ function MemberTable({ Tabs }: { Tabs: React.ReactNode }) {
|
|||||||
</Flex>
|
</Flex>
|
||||||
|
|
||||||
<Box flex={'1 0 0'} overflow={'auto'}>
|
<Box flex={'1 0 0'} overflow={'auto'}>
|
||||||
<TableContainer overflow={'unset'} fontSize={'sm'}>
|
<MemberScrollData>
|
||||||
<MemberScrollData>
|
<TableContainer overflow={'unset'} fontSize={'sm'}>
|
||||||
<Table overflow={'unset'}>
|
<Table overflow={'unset'}>
|
||||||
<Thead>
|
<Thead>
|
||||||
<Tr bgColor={'white !important'}>
|
<Tr bgColor={'white !important'}>
|
||||||
@ -246,9 +246,9 @@ function MemberTable({ Tabs }: { Tabs: React.ReactNode }) {
|
|||||||
))}
|
))}
|
||||||
</Tbody>
|
</Tbody>
|
||||||
</Table>
|
</Table>
|
||||||
</MemberScrollData>
|
<ConfirmRemoveMemberModal />
|
||||||
<ConfirmRemoveMemberModal />
|
</TableContainer>
|
||||||
</TableContainer>
|
</MemberScrollData>
|
||||||
</Box>
|
</Box>
|
||||||
|
|
||||||
<ConfirmLeaveTeamModal />
|
<ConfirmLeaveTeamModal />
|
||||||
|
|||||||
@ -121,36 +121,34 @@ function OrgMemberManageModal({
|
|||||||
setSearchKey(e.target.value);
|
setSearchKey(e.target.value);
|
||||||
}}
|
}}
|
||||||
/>
|
/>
|
||||||
<Flex flexDirection="column" mt={3} flexGrow="1" overflow={'auto'} maxH={'400px'}>
|
<MemberScrollData mt={3} flexGrow="1" overflow={'auto'} maxH={'400px'}>
|
||||||
<MemberScrollData>
|
{filterMembers.map((member) => {
|
||||||
{filterMembers.map((member) => {
|
return (
|
||||||
return (
|
<HStack
|
||||||
<HStack
|
py="2"
|
||||||
py="2"
|
px={3}
|
||||||
px={3}
|
borderRadius={'md'}
|
||||||
borderRadius={'md'}
|
alignItems="center"
|
||||||
alignItems="center"
|
key={member.tmbId}
|
||||||
key={member.tmbId}
|
cursor={'pointer'}
|
||||||
cursor={'pointer'}
|
_hover={{
|
||||||
_hover={{
|
bg: 'myGray.50',
|
||||||
bg: 'myGray.50',
|
...(!isSelected(member.tmbId) ? { svg: { color: 'myGray.50' } } : {})
|
||||||
...(!isSelected(member.tmbId) ? { svg: { color: 'myGray.50' } } : {})
|
}}
|
||||||
}}
|
_notLast={{ mb: 2 }}
|
||||||
_notLast={{ mb: 2 }}
|
onClick={() => handleToggleSelect(member.tmbId)}
|
||||||
onClick={() => handleToggleSelect(member.tmbId)}
|
>
|
||||||
>
|
<Checkbox
|
||||||
<Checkbox
|
isChecked={!!isSelected(member.tmbId)}
|
||||||
isChecked={!!isSelected(member.tmbId)}
|
icon={<CheckboxIcon name={'common/check'} />}
|
||||||
icon={<CheckboxIcon name={'common/check'} />}
|
pointerEvents="none"
|
||||||
pointerEvents="none"
|
/>
|
||||||
/>
|
<Avatar src={member.avatar} w="1.5rem" borderRadius={'50%'} />
|
||||||
<Avatar src={member.avatar} w="1.5rem" borderRadius={'50%'} />
|
<Box>{member.memberName}</Box>
|
||||||
<Box>{member.memberName}</Box>
|
</HStack>
|
||||||
</HStack>
|
);
|
||||||
);
|
})}
|
||||||
})}
|
</MemberScrollData>
|
||||||
</MemberScrollData>
|
|
||||||
</Flex>
|
|
||||||
</Flex>
|
</Flex>
|
||||||
<Flex borderLeft="1px" borderColor="myGray.200" flexDirection="column" p="4" h={'100%'}>
|
<Flex borderLeft="1px" borderColor="myGray.200" flexDirection="column" p="4" h={'100%'}>
|
||||||
<Box mt={2}>{`${t('common:chosen')}:${selectedMembers.length}`}</Box>
|
<Box mt={2}>{`${t('common:chosen')}:${selectedMembers.length}`}</Box>
|
||||||
|
|||||||
@ -38,7 +38,9 @@ const SelectAiModelRender = ({ item, inputs = [], nodeId }: RenderInputProps) =>
|
|||||||
(input) => input.key === NodeInputKeyEnum.aiChatIsResponseText
|
(input) => input.key === NodeInputKeyEnum.aiChatIsResponseText
|
||||||
)?.value,
|
)?.value,
|
||||||
aiChatVision:
|
aiChatVision:
|
||||||
inputs.find((input) => input.key === NodeInputKeyEnum.aiChatVision)?.value ?? true
|
inputs.find((input) => input.key === NodeInputKeyEnum.aiChatVision)?.value ?? true,
|
||||||
|
aiChatReasoning:
|
||||||
|
inputs.find((input) => input.key === NodeInputKeyEnum.aiChatReasoning)?.value ?? true
|
||||||
}),
|
}),
|
||||||
[inputs]
|
[inputs]
|
||||||
);
|
);
|
||||||
|
|||||||
@ -186,6 +186,12 @@ export const streamFetch = ({
|
|||||||
text: item
|
text: item
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const reasoningText = parseJson.choices?.[0]?.delta?.reasoning_content || '';
|
||||||
|
onMessage({
|
||||||
|
event,
|
||||||
|
reasoningText
|
||||||
|
});
|
||||||
} else if (event === SseResponseEventEnum.fastAnswer) {
|
} else if (event === SseResponseEventEnum.fastAnswer) {
|
||||||
const text = parseJson.choices?.[0]?.delta?.content || '';
|
const text = parseJson.choices?.[0]?.delta?.content || '';
|
||||||
pushDataToQueue({
|
pushDataToQueue({
|
||||||
|
|||||||
@ -1,7 +1,7 @@
|
|||||||
import { parseCurl } from '@fastgpt/global/common/string/http';
|
import { parseCurl } from '@fastgpt/global/common/string/http';
|
||||||
import { AppTypeEnum } from '@fastgpt/global/core/app/constants';
|
import { AppTypeEnum } from '@fastgpt/global/core/app/constants';
|
||||||
import { AppSchema } from '@fastgpt/global/core/app/type';
|
import { AppSchema } from '@fastgpt/global/core/app/type';
|
||||||
import { WorkflowIOValueTypeEnum } from '@fastgpt/global/core/workflow/constants';
|
import { NodeInputKeyEnum, WorkflowIOValueTypeEnum } from '@fastgpt/global/core/workflow/constants';
|
||||||
import {
|
import {
|
||||||
FlowNodeInputTypeEnum,
|
FlowNodeInputTypeEnum,
|
||||||
FlowNodeOutputTypeEnum,
|
FlowNodeOutputTypeEnum,
|
||||||
@ -150,7 +150,7 @@ export const emptyTemplates: Record<
|
|||||||
key: 'temperature',
|
key: 'temperature',
|
||||||
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
||||||
label: '',
|
label: '',
|
||||||
value: 0,
|
value: undefined,
|
||||||
valueType: WorkflowIOValueTypeEnum.number,
|
valueType: WorkflowIOValueTypeEnum.number,
|
||||||
min: 0,
|
min: 0,
|
||||||
max: 10,
|
max: 10,
|
||||||
@ -160,7 +160,7 @@ export const emptyTemplates: Record<
|
|||||||
key: 'maxToken',
|
key: 'maxToken',
|
||||||
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
||||||
label: '',
|
label: '',
|
||||||
value: 2000,
|
value: undefined,
|
||||||
valueType: WorkflowIOValueTypeEnum.number,
|
valueType: WorkflowIOValueTypeEnum.number,
|
||||||
min: 100,
|
min: 100,
|
||||||
max: 4000,
|
max: 4000,
|
||||||
@ -221,6 +221,13 @@ export const emptyTemplates: Record<
|
|||||||
debugLabel: i18nT('common:core.module.Dataset quote.label'),
|
debugLabel: i18nT('common:core.module.Dataset quote.label'),
|
||||||
description: '',
|
description: '',
|
||||||
valueType: WorkflowIOValueTypeEnum.datasetQuote
|
valueType: WorkflowIOValueTypeEnum.datasetQuote
|
||||||
|
},
|
||||||
|
{
|
||||||
|
key: NodeInputKeyEnum.aiChatReasoning,
|
||||||
|
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
||||||
|
label: '',
|
||||||
|
valueType: WorkflowIOValueTypeEnum.boolean,
|
||||||
|
value: true
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
outputs: [
|
outputs: [
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user