* model config

* feat: model config ui

* perf: rename variable

* feat: custom request url

* perf: model buffer

* perf: init model

* feat: json model config

* auto login

* fix: ts

* update packages

* package

* fix: dockerfile
This commit is contained in:
Archer 2025-01-22 22:59:28 +08:00 committed by GitHub
parent 16629e32a7
commit e009be51e7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
93 changed files with 2361 additions and 564 deletions

View File

@ -42,7 +42,6 @@ weight: 707
"usedInClassify": true, // 是否用于问题分类务必保证至少有一个为true
"usedInExtractFields": true, // 是否用于内容提取务必保证至少有一个为true
"usedInToolCall": true, // 是否用于工具调用务必保证至少有一个为true
"usedInQueryExtension": true, // 是否用于问题优化务必保证至少有一个为true
"toolChoice": true, // 是否支持工具选择(分类,内容提取,工具调用会用到。)
"functionCall": false, // 是否支持函数调用(分类,内容提取,工具调用会用到。会优先使用 toolChoice如果为false则使用 functionCall如果仍为 false则使用提示词模式
"customCQPrompt": "", // 自定义文本分类提示词(不支持工具和函数调用的模型
@ -66,7 +65,6 @@ weight: 707
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
"usedInQueryExtension": true,
"toolChoice": true,
"functionCall": false,
"customCQPrompt": "",
@ -90,7 +88,6 @@ weight: 707
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
"usedInQueryExtension": true,
"toolChoice": false,
"functionCall": false,
"customCQPrompt": "",
@ -117,7 +114,6 @@ weight: 707
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
"usedInQueryExtension": true,
"toolChoice": false,
"functionCall": false,
"customCQPrompt": "",

View File

@ -144,7 +144,6 @@ curl --location --request POST 'https://<oneapi_url>/v1/chat/completions' \
"usedInClassify": true, // 是否用于问题分类务必保证至少有一个为true
"usedInExtractFields": true, // 是否用于内容提取务必保证至少有一个为true
"usedInToolCall": true, // 是否用于工具调用务必保证至少有一个为true
"usedInQueryExtension": true, // 是否用于问题优化务必保证至少有一个为true
"toolChoice": true, // 是否支持工具选择(分类,内容提取,工具调用会用到。)
"functionCall": false, // 是否支持函数调用(分类,内容提取,工具调用会用到。会优先使用 toolChoice如果为false则使用 functionCall如果仍为 false则使用提示词模式
"customCQPrompt": "", // 自定义文本分类提示词(不支持工具和函数调用的模型

View File

@ -118,7 +118,6 @@ CHAT_API_KEY=sk-xxxxxx
"usedInClassify": true, // 是否用于问题分类
"usedInExtractFields": true, // 是否用于字段提取
"usedInToolCall": true, // 是否用于工具调用
"usedInQueryExtension": true, // 是否用于问题优化
"toolChoice": true, // 是否支持工具选择
"functionCall": false, // 是否支持函数调用
"customCQPrompt": "", // 自定义文本分类提示词(不支持工具和函数调用的模型

View File

@ -51,7 +51,6 @@ CHAT_API_KEY=sk-xxxxxx
"usedInClassify": true, // 是否用于问题分类务必保证至少有一个为true
"usedInExtractFields": true, // 是否用于内容提取务必保证至少有一个为true
"usedInToolCall": true, // 是否用于工具调用务必保证至少有一个为true
"usedInQueryExtension": true, // 是否用于问题优化务必保证至少有一个为true
"toolChoice": true, // 是否支持工具选择(分类,内容提取,工具调用会用到。)
"functionCall": false, // 是否支持函数调用(分类,内容提取,工具调用会用到。会优先使用 toolChoice如果为false则使用 functionCall如果仍为 false则使用提示词模式
"customCQPrompt": "", // 自定义文本分类提示词(不支持工具和函数调用的模型
@ -75,7 +74,6 @@ CHAT_API_KEY=sk-xxxxxx
"usedInClassify": false,
"usedInExtractFields": false,
"usedInToolCall": false,
"usedInQueryExtension": false,
"toolChoice": false,
"functionCall": false,
"customCQPrompt": "",

View File

@ -31,7 +31,6 @@ weight: 813
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
"usedInQueryExtension": true,
"toolChoice": false,
"functionCall": false,
"customCQPrompt": "",
@ -56,7 +55,6 @@ weight: 813
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
"usedInQueryExtension": true,
"toolChoice": false,
"functionCall": false,
"customCQPrompt": "",

View File

@ -0,0 +1,29 @@
---
title: 'V4.8.20(进行中)'
description: 'FastGPT V4.8.20 更新说明'
icon: 'upgrade'
draft: false
toc: true
weight: 804
---
## 更新指南
### 1. 更新镜像:
### 2. 运行升级脚本
从任意终端,发起 1 个 HTTP 请求。其中 {{rootkey}} 替换成环境变量里的 `rootkey`{{host}} 替换成**FastGPT 域名**。
```bash
curl --location --request POST 'https://{{host}}/api/admin/initv4820' \
--header 'rootkey: {{rootkey}}' \
--header 'Content-Type: application/json'
```
自动把原配置文件的模型加载到新版模型配置中
## 完整更新内容
1. 新增 - 可视化模型配置。预设超过 100 个模型,方便进行模型配置。

View File

@ -23,7 +23,6 @@ data:
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
"usedInQueryExtension": true,
"toolChoice": true,
"functionCall": false,
"customCQPrompt": "",
@ -45,7 +44,6 @@ data:
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
"usedInQueryExtension": true,
"toolChoice": true,
"functionCall": false,
"customCQPrompt": "",
@ -67,7 +65,6 @@ data:
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
"usedInQueryExtension": true,
"toolChoice": true,
"functionCall": false,
"customCQPrompt": "",
@ -89,7 +86,6 @@ data:
"usedInClassify": false,
"usedInExtractFields": false,
"usedInToolCall": false,
"usedInQueryExtension": false,
"toolChoice": true,
"functionCall": false,
"customCQPrompt": "",

View File

@ -3,7 +3,7 @@ import type {
ChatModelItemType,
FunctionModelItemType,
LLMModelItemType,
VectorModelItemType,
EmbeddingModelItemType,
AudioSpeechModels,
STTModelType,
ReRankModelItemType
@ -31,11 +31,13 @@ export type FastGPTConfigFileType = {
feConfigs: FastGPTFeConfigsType;
systemEnv: SystemEnvType;
subPlans?: SubPlanType;
llmModels: ChatModelItemType[];
vectorModels: VectorModelItemType[];
reRankModels: ReRankModelItemType[];
audioSpeechModels: AudioSpeechModelType[];
whisperModel: STTModelType;
// Abandon
llmModels?: ChatModelItemType[];
vectorModels?: EmbeddingModelItemType[];
reRankModels?: ReRankModelItemType[];
audioSpeechModels?: TTSModelType[];
whisperModel?: STTModelType;
};
export type FastGPTFeConfigsType = {

View File

@ -15,15 +15,13 @@ export enum LLMModelTypeEnum {
all = 'all',
classify = 'classify',
extractFields = 'extractFields',
toolCall = 'toolCall',
queryExtension = 'queryExtension'
toolCall = 'toolCall'
}
export const llmModelTypeFilterMap = {
[LLMModelTypeEnum.all]: 'model',
[LLMModelTypeEnum.classify]: 'usedInClassify',
[LLMModelTypeEnum.extractFields]: 'usedInExtractFields',
[LLMModelTypeEnum.toolCall]: 'usedInToolCall',
[LLMModelTypeEnum.queryExtension]: 'usedInQueryExtension'
[LLMModelTypeEnum.toolCall]: 'usedInToolCall'
};
export enum EmbeddingTypeEnm {

View File

@ -1,3 +1,4 @@
import { ModelTypeEnum } from './model';
import type { ModelProviderIdType } from './provider';
type PriceType = {
@ -7,68 +8,72 @@ type PriceType = {
inputPrice?: number; // 1k tokens=n points
outputPrice?: number; // 1k tokens=n points
};
export type LLMModelItemType = PriceType & {
type BaseModelItemType = {
provider: ModelProviderIdType;
model: string;
name: string;
avatar?: string; // model icon, from provider
maxContext: number;
maxResponse: number;
quoteMaxToken: number;
maxTemperature: number;
censor?: boolean;
vision?: boolean;
isActive?: boolean;
isCustom?: boolean;
// diff function model
datasetProcess?: boolean; // dataset
usedInClassify?: boolean; // classify
usedInExtractFields?: boolean; // extract fields
usedInToolCall?: boolean; // tool call
usedInQueryExtension?: boolean; // query extension
functionCall: boolean;
toolChoice: boolean;
customCQPrompt: string;
customExtractPrompt: string;
defaultSystemChatPrompt?: string;
defaultConfig?: Record<string, any>;
fieldMap?: Record<string, string>;
// If has requestUrl, it will request the model directly
requestUrl?: string;
requestAuth?: string;
};
export type VectorModelItemType = PriceType & {
provider: ModelProviderIdType;
model: string; // model name
name: string; // show name
avatar?: string;
defaultToken: number; // split text default token
maxToken: number; // model max token
weight: number; // training weight
hidden?: boolean; // Disallow creation
defaultConfig?: Record<string, any>; // post request config
dbConfig?: Record<string, any>; // Custom parameters for storage
queryConfig?: Record<string, any>; // Custom parameters for query
};
export type LLMModelItemType = PriceType &
BaseModelItemType & {
type: ModelTypeEnum.llm;
maxContext: number;
maxResponse: number;
quoteMaxToken: number;
maxTemperature: number;
export type ReRankModelItemType = PriceType & {
provider: ModelProviderIdType;
model: string;
name: string;
requestUrl: string;
requestAuth: string;
};
censor?: boolean;
vision?: boolean;
export type AudioSpeechModelType = PriceType & {
provider: ModelProviderIdType;
model: string;
name: string;
voices: { label: string; value: string; bufferId: string }[];
};
// diff function model
datasetProcess?: boolean; // dataset
usedInClassify?: boolean; // classify
usedInExtractFields?: boolean; // extract fields
usedInToolCall?: boolean; // tool call
export type STTModelType = PriceType & {
provider: ModelProviderIdType;
model: string;
name: string;
};
functionCall: boolean;
toolChoice: boolean;
customCQPrompt: string;
customExtractPrompt: string;
defaultSystemChatPrompt?: string;
defaultConfig?: Record<string, any>;
fieldMap?: Record<string, string>;
};
export type EmbeddingModelItemType = PriceType &
BaseModelItemType & {
type: ModelTypeEnum.embedding;
defaultToken: number; // split text default token
maxToken: number; // model max token
weight: number; // training weight
hidden?: boolean; // Disallow creation
defaultConfig?: Record<string, any>; // post request config
dbConfig?: Record<string, any>; // Custom parameters for storage
queryConfig?: Record<string, any>; // Custom parameters for query
};
export type ReRankModelItemType = PriceType &
BaseModelItemType & {
type: ModelTypeEnum.rerank;
};
export type TTSModelType = PriceType &
BaseModelItemType & {
type: ModelTypeEnum.tts;
voices: { label: string; value: string }[];
};
export type STTModelType = PriceType &
BaseModelItemType & {
type: ModelTypeEnum.stt;
};

View File

@ -1,9 +1,18 @@
import { i18nT } from '../../../web/i18n/utils';
import type { LLMModelItemType, STTModelType, VectorModelItemType } from './model.d';
import type { LLMModelItemType, STTModelType, EmbeddingModelItemType } from './model.d';
import { getModelProvider, ModelProviderIdType } from './provider';
export enum ModelTypeEnum {
llm = 'llm',
embedding = 'embedding',
tts = 'tts',
stt = 'stt',
rerank = 'rerank'
}
export const defaultQAModels: LLMModelItemType[] = [
{
type: ModelTypeEnum.llm,
provider: 'OpenAI',
model: 'gpt-4o-mini',
name: 'gpt-4o-mini',
@ -24,8 +33,9 @@ export const defaultQAModels: LLMModelItemType[] = [
}
];
export const defaultVectorModels: VectorModelItemType[] = [
export const defaultVectorModels: EmbeddingModelItemType[] = [
{
type: ModelTypeEnum.embedding,
provider: 'OpenAI',
model: 'text-embedding-3-small',
name: 'Embedding-2',
@ -36,12 +46,15 @@ export const defaultVectorModels: VectorModelItemType[] = [
}
];
export const defaultWhisperModel: STTModelType = {
provider: 'OpenAI',
model: 'whisper-1',
name: 'whisper-1',
charsPointsPrice: 0
};
export const defaultSTTModels: STTModelType[] = [
{
type: ModelTypeEnum.stt,
provider: 'OpenAI',
model: 'whisper-1',
name: 'whisper-1',
charsPointsPrice: 0
}
];
export const getModelFromList = (
modelList: { provider: ModelProviderIdType; name: string; model: string }[],
@ -55,15 +68,10 @@ export const getModelFromList = (
};
};
export enum ModelTypeEnum {
chat = 'chat',
embedding = 'embedding',
tts = 'tts',
stt = 'stt'
}
export const modelTypeList = [
{ label: i18nT('common:model.type.chat'), value: ModelTypeEnum.chat },
{ label: i18nT('common:model.type.chat'), value: ModelTypeEnum.llm },
{ label: i18nT('common:model.type.embedding'), value: ModelTypeEnum.embedding },
{ label: i18nT('common:model.type.tts'), value: ModelTypeEnum.tts },
{ label: i18nT('common:model.type.stt'), value: ModelTypeEnum.stt }
{ label: i18nT('common:model.type.stt'), value: ModelTypeEnum.stt },
{ label: i18nT('common:model.type.reRank'), value: ModelTypeEnum.rerank }
];

View File

@ -29,7 +29,7 @@ export type ModelProviderIdType =
export type ModelProviderType = {
id: ModelProviderIdType;
name: string;
name: any;
avatar: string;
};
@ -165,6 +165,7 @@ export const ModelProviderMap = Object.fromEntries(
ModelProviderList.map((item, index) => [item.id, { ...item, order: index }])
);
export const getModelProvider = (provider: ModelProviderIdType) => {
export const getModelProvider = (provider?: ModelProviderIdType) => {
if (!provider) return ModelProviderMap.Other;
return ModelProviderMap[provider] ?? ModelProviderMap.Other;
};

View File

@ -1,4 +1,4 @@
import type { LLMModelItemType, VectorModelItemType } from '../../core/ai/model.d';
import type { LLMModelItemType, EmbeddingModelItemType } from '../../core/ai/model.d';
import { PermissionTypeEnum } from '../../support/permission/constant';
import { PushDatasetDataChunkProps } from './api';
import {
@ -152,7 +152,7 @@ export type DatasetSimpleItemType = {
_id: string;
avatar: string;
name: string;
vectorModel: VectorModelItemType;
vectorModel: EmbeddingModelItemType;
};
export type DatasetListItemType = {
_id: string;
@ -163,14 +163,14 @@ export type DatasetListItemType = {
intro: string;
type: `${DatasetTypeEnum}`;
permission: DatasetPermission;
vectorModel: VectorModelItemType;
vectorModel: EmbeddingModelItemType;
inheritPermission: boolean;
private?: boolean;
sourceMember?: SourceMemberType;
};
export type DatasetItemType = Omit<DatasetSchemaType, 'vectorModel' | 'agentModel'> & {
vectorModel: VectorModelItemType;
vectorModel: EmbeddingModelItemType;
agentModel: LLMModelItemType;
permission: DatasetPermission;
};

View File

@ -1,4 +1,4 @@
import { VectorModelItemType } from '../ai/model.d';
import { EmbeddingModelItemType } from '../ai/model.d';
import { NodeInputKeyEnum } from './constants';
export type SelectedDatasetType = { datasetId: string }[];

View File

@ -31,10 +31,7 @@ export const AiQueryExtension: FlowNodeTemplateType = {
showStatus: true,
version: '481',
inputs: [
{
...Input_Template_SelectAIModel,
llmModelType: LLMModelTypeEnum.queryExtension
},
Input_Template_SelectAIModel,
{
key: NodeInputKeyEnum.aiSystemPrompt,
renderTypeList: [FlowNodeInputTypeEnum.textarea, FlowNodeInputTypeEnum.reference],

View File

@ -6,8 +6,7 @@ import { FastGPTProUrl } from '../constants';
export const getFastGPTConfigFromDB = async () => {
if (!FastGPTProUrl) {
return {
config: {} as FastGPTConfigFileType,
configId: undefined
config: {} as FastGPTConfigFileType
};
}
@ -18,9 +17,25 @@ export const getFastGPTConfigFromDB = async () => {
});
const config = res?.value || {};
// 利用配置文件的创建时间(更新时间)来做缓存,如果前端命中缓存,则不需要再返回配置文件
global.systemInitBufferId = res ? res.createTime.getTime().toString() : undefined;
return {
configId: res ? String(res._id) : undefined,
config: config as FastGPTConfigFileType
};
};
export const updateFastGPTConfigBuffer = async () => {
const res = await MongoSystemConfigs.findOne({
type: SystemConfigsTypeEnum.fastgpt
}).sort({
createTime: -1
});
if (!res) return;
res.createTime = new Date();
await res.save();
global.systemInitBufferId = res.createTime.getTime().toString();
};

View File

@ -13,15 +13,6 @@ export const initFastGPTConfig = (config?: FastGPTConfigFileType) => {
global.feConfigs = config.feConfigs;
global.systemEnv = config.systemEnv;
global.subPlans = config.subPlans;
global.llmModels = config.llmModels;
global.llmModelPriceType = global.llmModels.some((item) => typeof item.inputPrice === 'number')
? 'IO'
: 'Tokens';
global.vectorModels = config.vectorModels;
global.audioSpeechModels = config.audioSpeechModels;
global.whisperModel = config.whisperModel;
global.reRankModels = config.reRankModels;
};
export const systemStartCb = () => {

View File

@ -2,7 +2,7 @@
import { PgVectorCtrl } from './pg/class';
import { getVectorsByText } from '../../core/ai/embedding';
import { InsertVectorProps } from './controller.d';
import { VectorModelItemType } from '@fastgpt/global/core/ai/model.d';
import { EmbeddingModelItemType } from '@fastgpt/global/core/ai/model.d';
import { MILVUS_ADDRESS, PG_ADDRESS } from './constants';
import { MilvusCtrl } from './milvus/class';
@ -28,7 +28,7 @@ export const insertDatasetDataVector = async ({
...props
}: InsertVectorProps & {
query: string;
model: VectorModelItemType;
model: EmbeddingModelItemType;
}) => {
const { vectors, tokens } = await getVectorsByText({
model,

View File

@ -2,6 +2,7 @@ import fs from 'fs';
import { getAxiosConfig } from '../config';
import axios from 'axios';
import FormData from 'form-data';
import { getSTTModel } from '../model';
export const aiTranscriptions = async ({
model,
@ -14,13 +15,21 @@ export const aiTranscriptions = async ({
data.append('model', model);
data.append('file', fileStream);
const modelData = getSTTModel(model);
const aiAxiosConfig = getAxiosConfig();
const { data: result } = await axios<{ text: string }>({
method: 'post',
baseURL: aiAxiosConfig.baseUrl,
url: '/audio/transcriptions',
...(modelData.requestUrl
? { url: modelData.requestUrl }
: {
baseURL: aiAxiosConfig.baseUrl,
url: modelData.requestUrl || '/audio/transcriptions'
}),
headers: {
Authorization: aiAxiosConfig.authorization,
Authorization: modelData.requestAuth
? `Bearer ${modelData.requestAuth}`
: aiAxiosConfig.authorization,
...data.getHeaders()
},
data: data

View File

@ -7,6 +7,7 @@ import { getErrText } from '@fastgpt/global/common/error/utils';
import { addLog } from '../../common/system/log';
import { i18nT } from '../../../web/i18n/utils';
import { OpenaiAccountType } from '@fastgpt/global/support/user/team/type';
import { getLLMModel } from './model';
export const openaiBaseUrl = process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1';
@ -63,12 +64,23 @@ export const createChatCompletion = async <T extends CompletionsBodyType>({
getEmptyResponseTip: () => string;
}> => {
try {
const modelConstantsData = getLLMModel(body.model);
const formatTimeout = timeout ? timeout : body.stream ? 60000 : 600000;
const ai = getAIApi({
userKey,
timeout: formatTimeout
});
const response = await ai.chat.completions.create(body, options);
const response = await ai.chat.completions.create(body, {
...options,
...(modelConstantsData.requestUrl ? { path: modelConstantsData.requestUrl } : {}),
headers: {
...options?.headers,
...(modelConstantsData.requestAuth
? { Authorization: `Bearer ${modelConstantsData.requestAuth}` }
: {})
}
});
const isStreamResponse =
typeof response === 'object' &&

View File

@ -0,0 +1,10 @@
{
"provider": "OpenAI",
"model": "text-embedding-3-small",
"name": "text-embedding-3-small",
"defaultToken": 512,
"maxToken": 3000,
"charsPointsPrice": 0
}

View File

@ -3,9 +3,8 @@
"model": "text-embedding-ada-002",
"name": "text-embedding-ada-002",
"defaultToken": 512, // token
"maxToken": 3000, // token
"weight": 0, //
"defaultToken": 512,
"maxToken": 3000,
"charsPointsPrice": 0 // /1k token
"charsPointsPrice": 0
}

View File

@ -1,33 +1,28 @@
{
"provider": "OpenAI",
"model": "gpt-4o-mini",
"name": "GPT-4o-mini", // alias
"name": "GPT-4o-mini",
"maxContext": 125000, //
"maxResponse": 16000, //
"quoteMaxToken": 60000, //
"maxTemperature": 1.2, //
"presencePenaltyRange": [-2, 2], //
"frequencyPenaltyRange": [-2, 2], //
"responseFormatList": ["text", "json_object", "json_schema"], //
"showStopSign": true, //
"censor": false,
"charsPointsPrice": 0,
"vision": true, //
"toolChoice": true, //
"functionCall": false, // false
"defaultSystemChatPrompt": "", //
"maxContext": 125000,
"maxResponse": 16000,
"quoteMaxToken": 60000,
"maxTemperature": 1.2,
"datasetProcess": true, //
"usedInClassify": true, //
"customCQPrompt": "", //
"usedInExtractFields": true, //
"customExtractPrompt": "", //
"usedInToolCall": true, //
"usedInQueryExtension": true, //
"vision": true,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"defaultConfig": {}, // body
"fieldMap": {}, // body
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"censor": false, //
"charsPointsPrice": 0 // n /1k token
"defaultConfig": {},
"fieldMap": {}
}

View File

@ -0,0 +1,21 @@
import { connectionMongo, getMongoModel } from '../../../common/mongo';
const { Schema } = connectionMongo;
import type { SystemModelSchemaType } from '../type';
const SystemModelSchema = new Schema({
model: {
type: String,
required: true,
unique: true
},
metadata: {
type: Object,
required: true,
default: {}
}
});
export const MongoSystemModel = getMongoModel<SystemModelSchemaType>(
'system_models',
SystemModelSchema
);

View File

@ -0,0 +1,118 @@
import path from 'path';
import * as fs from 'fs';
import { SystemModelItemType } from '../type';
import { ModelTypeEnum } from '@fastgpt/global/core/ai/model';
import { MongoSystemModel } from './schema';
import {
LLMModelItemType,
EmbeddingModelItemType,
TTSModelType,
STTModelType,
ReRankModelItemType
} from '@fastgpt/global/core/ai/model.d';
import { debounce } from 'lodash';
type FolderBaseType = `${ModelTypeEnum}`;
export const loadSystemModels = async (init = false) => {
const getModelNameList = (base: FolderBaseType) => {
const currentFileUrl = new URL(import.meta.url);
const modelsPath = path.join(path.dirname(currentFileUrl.pathname), base);
return fs.readdirSync(modelsPath) as string[];
};
const pushModel = (model: SystemModelItemType) => {
global.systemModelList.push(model);
if (model.isActive) {
global.systemActiveModelList.push(model);
if (model.type === ModelTypeEnum.llm) {
global.llmModelMap.set(model.model, model);
global.llmModelMap.set(model.name, model);
} else if (model.type === ModelTypeEnum.embedding) {
global.embeddingModelMap.set(model.model, model);
global.embeddingModelMap.set(model.name, model);
} else if (model.type === ModelTypeEnum.tts) {
global.ttsModelMap.set(model.model, model);
global.ttsModelMap.set(model.name, model);
} else if (model.type === ModelTypeEnum.stt) {
global.sttModelMap.set(model.model, model);
global.sttModelMap.set(model.name, model);
} else if (model.type === ModelTypeEnum.rerank) {
global.reRankModelMap.set(model.model, model);
global.reRankModelMap.set(model.name, model);
}
}
};
if (!init && global.systemModelList && global.systemModelList.length > 0) return;
const dbModels = await MongoSystemModel.find({}).lean();
global.systemModelList = [];
global.systemActiveModelList = [];
global.llmModelMap = new Map<string, LLMModelItemType>();
global.embeddingModelMap = new Map<string, EmbeddingModelItemType>();
global.ttsModelMap = new Map<string, TTSModelType>();
global.sttModelMap = new Map<string, STTModelType>();
global.reRankModelMap = new Map<string, ReRankModelItemType>();
const baseList: FolderBaseType[] = [
ModelTypeEnum.llm,
ModelTypeEnum.embedding,
ModelTypeEnum.tts,
ModelTypeEnum.stt,
ModelTypeEnum.rerank
];
// System model
await Promise.all(
baseList.map(async (base) => {
const modelList = getModelNameList(base);
const nameList = modelList.map((name) => `${base}/${name}`);
await Promise.all(
nameList.map(async (name) => {
const fileContent = (await import(`./${name}`))?.default as SystemModelItemType;
const dbModel = dbModels.find((item) => item.model === fileContent.model);
const model: any = {
...fileContent,
...dbModel?.metadata,
type: dbModel?.metadata?.type || base,
isCustom: false
};
pushModel(model);
})
);
})
);
// Custom model
dbModels.forEach((dbModel) => {
if (global.systemModelList.find((item) => item.model === dbModel.model)) return;
pushModel({
...dbModel.metadata,
isCustom: true
});
});
console.log('Load models success', JSON.stringify(global.systemActiveModelList, null, 2));
};
export const watchSystemModelUpdate = () => {
const changeStream = MongoSystemModel.watch();
changeStream.on(
'change',
debounce(async () => {
try {
await loadSystemModels(true);
} catch (error) {}
}, 500)
);
};

View File

@ -1,11 +1,11 @@
import { VectorModelItemType } from '@fastgpt/global/core/ai/model.d';
import { EmbeddingModelItemType } from '@fastgpt/global/core/ai/model.d';
import { getAIApi } from '../config';
import { countPromptTokens } from '../../../common/string/tiktoken/index';
import { EmbeddingTypeEnm } from '@fastgpt/global/core/ai/constants';
import { addLog } from '../../../common/system/log';
type GetVectorProps = {
model: VectorModelItemType;
model: EmbeddingModelItemType;
input: string;
type?: `${EmbeddingTypeEnm}`;
};
@ -24,13 +24,23 @@ export async function getVectorsByText({ model, input, type }: GetVectorProps) {
// input text to vector
const result = await ai.embeddings
.create({
...model.defaultConfig,
...(type === EmbeddingTypeEnm.db && model.dbConfig),
...(type === EmbeddingTypeEnm.query && model.queryConfig),
model: model.model,
input: [input]
})
.create(
{
...model.defaultConfig,
...(type === EmbeddingTypeEnm.db && model.dbConfig),
...(type === EmbeddingTypeEnm.query && model.queryConfig),
model: model.model,
input: [input]
},
model.requestUrl && model.requestAuth
? {
path: model.requestUrl,
headers: {
Authorization: `Bearer ${model.requestAuth}`
}
}
: {}
)
.then(async (res) => {
if (!res.data) {
addLog.error('Embedding API is not responding', res);

View File

@ -1,51 +1,54 @@
import { SystemModelItemType } from './type';
export const getFirstLLMModel = () => {
return Array.from(global.llmModelMap.values())[0];
};
export const getLLMModel = (model?: string) => {
return (
global.llmModels.find((item) => item.model === model || item.name === model) ??
global.llmModels[0]
);
if (!model) return getFirstLLMModel();
return global.llmModelMap.get(model) || getFirstLLMModel();
};
export const getDatasetModel = (model?: string) => {
return (
global.llmModels
Array.from(global.llmModelMap.values())
?.filter((item) => item.datasetProcess)
?.find((item) => item.model === model || item.name === model) ?? global.llmModels[0]
?.find((item) => item.model === model || item.name === model) ?? getFirstLLMModel()
);
};
export const getVectorModel = (model?: string) => {
return (
global.vectorModels.find((item) => item.model === model || item.name === model) ||
global.vectorModels[0]
);
export const getFirstEmbeddingModel = () => Array.from(global.embeddingModelMap.values())[0];
export const getEmbeddingModel = (model?: string) => {
if (!model) return getFirstEmbeddingModel();
return global.embeddingModelMap.get(model) || getFirstEmbeddingModel();
};
export function getAudioSpeechModel(model?: string) {
return (
global.audioSpeechModels.find((item) => item.model === model || item.name === model) ||
global.audioSpeechModels[0]
);
export const getFirstTTSModel = () => Array.from(global.ttsModelMap.values())[0];
export function getTTSModel(model?: string) {
if (!model) return getFirstTTSModel();
return global.ttsModelMap.get(model) || getFirstTTSModel();
}
export function getWhisperModel(model?: string) {
return global.whisperModel;
export const getFirstSTTModel = () => Array.from(global.sttModelMap.values())[0];
export function getSTTModel(model?: string) {
if (!model) return getFirstSTTModel();
return global.sttModelMap.get(model) || getFirstSTTModel();
}
export const getFirstReRankModel = () => Array.from(global.reRankModelMap.values())[0];
export function getReRankModel(model?: string) {
return global.reRankModels.find((item) => item.model === model);
if (!model) return getFirstReRankModel();
return global.reRankModelMap.get(model) || getFirstReRankModel();
}
export enum ModelTypeEnum {
llm = 'llm',
vector = 'vector',
audioSpeech = 'audioSpeech',
whisper = 'whisper',
rerank = 'rerank'
}
export const getModelMap = {
[ModelTypeEnum.llm]: getLLMModel,
[ModelTypeEnum.vector]: getVectorModel,
[ModelTypeEnum.audioSpeech]: getAudioSpeechModel,
[ModelTypeEnum.whisper]: getWhisperModel,
[ModelTypeEnum.rerank]: getReRankModel
export const findAIModel = (model: string): SystemModelItemType | undefined => {
return (
global.llmModelMap.get(model) ||
global.embeddingModelMap.get(model) ||
global.ttsModelMap.get(model) ||
global.sttModelMap.get(model) ||
global.reRankModelMap.get(model)
);
};
export const findModelFromAlldata = (model: string) => {
return global.systemModelList.find((item) => item.model === model);
};

View File

@ -1,5 +1,6 @@
import { addLog } from '../../../common/system/log';
import { POST } from '../../../common/api/serverRequest';
import { getFirstReRankModel } from '../model';
type PostReRankResponse = {
id: string;
@ -17,7 +18,7 @@ export function reRankRecall({
query: string;
documents: { id: string; text: string }[];
}): Promise<ReRankCallResult> {
const model = global.reRankModels[0];
const model = getFirstReRankModel();
if (!model || !model?.requestUrl) {
return Promise.reject('no rerank model');

33
packages/service/core/ai/type.d.ts vendored Normal file
View File

@ -0,0 +1,33 @@
import { ModelTypeEnum } from '@fastgpt/global/core/ai/model';
import {
STTModelType,
ReRankModelItemType,
TTSModelType,
EmbeddingModelItemType,
LLMModelItemType
} from '@fastgpt/global/core/ai/model.d';
export type SystemModelSchemaType = {
_id: string;
model: string;
metadata: SystemModelItemType;
};
export type SystemModelItemType =
| LLMModelItemType
| EmbeddingModelItemType
| TTSModelType
| STTModelType
| ReRankModelItemType;
declare global {
var systemModelList: SystemModelItemType[];
// var systemModelMap: Map<string, SystemModelItemType>;
var llmModelMap: Map<string, LLMModelItemType>;
var embeddingModelMap: Map<string, EmbeddingModelItemType>;
var ttsModelMap: Map<string, TTSModelType>;
var sttModelMap: Map<string, STTModelType>;
var reRankModelMap: Map<string, ReRankModelItemType>;
var systemActiveModelList: SystemModelItemType[];
}

View File

@ -19,7 +19,7 @@ import { predictDataLimitLength } from '../../../../global/core/dataset/utils';
import { mongoSessionRun } from '../../../common/mongo/sessionRun';
import { createTrainingUsage } from '../../../support/wallet/usage/controller';
import { UsageSourceEnum } from '@fastgpt/global/support/wallet/usage/constants';
import { getLLMModel, getVectorModel } from '../../ai/model';
import { getLLMModel, getEmbeddingModel } from '../../ai/model';
import { pushDataListToTrainingQueue } from '../training/controller';
import { MongoImage } from '../../../common/file/image/schema';
import { hashStr } from '@fastgpt/global/common/string/tools';
@ -93,7 +93,7 @@ export const createCollectionAndInsertData = async ({
tmbId,
appName: usageName,
billSource: UsageSourceEnum.training,
vectorModel: getVectorModel(dataset.vectorModel)?.name,
vectorModel: getEmbeddingModel(dataset.vectorModel)?.name,
agentModel: getLLMModel(dataset.agentModel)?.name,
session
});

View File

@ -5,7 +5,7 @@ import {
} from '@fastgpt/global/core/dataset/constants';
import { recallFromVectorStore } from '../../../common/vectorStore/controller';
import { getVectorsByText } from '../../ai/embedding';
import { getVectorModel } from '../../ai/model';
import { getEmbeddingModel, getFirstReRankModel } from '../../ai/model';
import { MongoDatasetData } from '../data/schema';
import {
DatasetDataSchemaType,
@ -67,7 +67,7 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
/* init params */
searchMode = DatasetSearchModeMap[searchMode] ? searchMode : DatasetSearchModeEnum.embedding;
usingReRank = usingReRank && global.reRankModels.length > 0;
usingReRank = usingReRank && !!getFirstReRankModel();
// Compatible with topk limit
let set = new Set<string>();
@ -253,7 +253,7 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
filterCollectionIdList?: string[];
}) => {
const { vectors, tokens } = await getVectorsByText({
model: getVectorModel(model),
model: getEmbeddingModel(model),
input: query,
type: 'query'
});

View File

@ -7,7 +7,7 @@ import type {
import { TrainingModeEnum } from '@fastgpt/global/core/dataset/constants';
import { simpleText } from '@fastgpt/global/common/string/tools';
import { ClientSession } from '../../../common/mongo';
import { getLLMModel, getVectorModel } from '../../ai/model';
import { getLLMModel, getEmbeddingModel } from '../../ai/model';
import { addLog } from '../../../common/system/log';
import { getCollectionWithDataset } from '../controller';
import { mongoSessionRun } from '../../../common/mongo/sessionRun';
@ -70,7 +70,7 @@ export async function pushDataListToTrainingQueue({
if (!agentModelData) {
return Promise.reject(`File model ${agentModel} is inValid`);
}
const vectorModelData = getVectorModel(vectorModel);
const vectorModelData = getEmbeddingModel(vectorModel);
if (!vectorModelData) {
return Promise.reject(`Vector model ${vectorModel} is inValid`);
}

View File

@ -13,7 +13,7 @@ import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/
import { replaceVariable } from '@fastgpt/global/common/string/tools';
import { Prompt_CQJson } from '@fastgpt/global/core/ai/prompt/agent';
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { ModelTypeEnum, getLLMModel } from '../../../ai/model';
import { getLLMModel } from '../../../ai/model';
import { getHistories } from '../utils';
import { formatModelChars2Points } from '../../../../support/wallet/usage/utils';
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
@ -22,6 +22,7 @@ import { getHandleId } from '@fastgpt/global/core/workflow/utils';
import { loadRequestMessages } from '../../../chat/utils';
import { llmCompletionsBodyFormat } from '../../../ai/utils';
import { addLog } from '../../../../common/system/log';
import { ModelTypeEnum } from '../../../../../global/core/ai/model';
type Props = ModuleDispatchProps<{
[NodeInputKeyEnum.aiModel]: string;

View File

@ -16,7 +16,7 @@ import { Prompt_ExtractJson } from '@fastgpt/global/core/ai/prompt/agent';
import { replaceVariable, sliceJsonStr } from '@fastgpt/global/common/string/tools';
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { getHistories } from '../utils';
import { ModelTypeEnum, getLLMModel } from '../../../ai/model';
import { getLLMModel } from '../../../ai/model';
import { formatModelChars2Points } from '../../../../support/wallet/usage/utils';
import json5 from 'json5';
import {
@ -28,6 +28,7 @@ import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/co
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
import { llmCompletionsBodyFormat } from '../../../ai/utils';
import { ModelTypeEnum } from '../../../../../global/core/ai/model';
type Props = ModuleDispatchProps<{
[NodeInputKeyEnum.history]?: ChatItemType[];

View File

@ -4,7 +4,7 @@ import type {
DispatchNodeResultType,
RuntimeNodeItemType
} from '@fastgpt/global/core/workflow/runtime/type';
import { ModelTypeEnum, getLLMModel } from '../../../../ai/model';
import { getLLMModel } from '../../../../ai/model';
import { filterToolNodeIdByEdges, getHistories } from '../../utils';
import { runToolWithToolChoice } from './toolChoice';
import { DispatchToolModuleProps, ToolNodeItemType } from './type.d';
@ -30,6 +30,7 @@ import { parseUrlToFileType } from '@fastgpt/global/common/file/tools';
import { Prompt_DocumentQuote } from '@fastgpt/global/core/ai/prompt/AIChat';
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
import { postTextCensor } from '../../../../../common/api/requestPlusApi';
import { ModelTypeEnum } from '@fastgpt/global/core/ai/model';
type Response = DispatchNodeResultType<{
[NodeOutputKeyEnum.answerText]: string;

View File

@ -33,7 +33,7 @@ import type { AIChatNodeProps } from '@fastgpt/global/core/workflow/runtime/type
import { replaceVariable } from '@fastgpt/global/common/string/tools';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
import { responseWriteController } from '../../../../common/response';
import { getLLMModel, ModelTypeEnum } from '../../../ai/model';
import { getLLMModel } from '../../../ai/model';
import type { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
@ -47,6 +47,7 @@ import { AiChatQuoteRoleType } from '@fastgpt/global/core/workflow/template/syst
import { getFileContentFromLinks, getHistoryFileLinks } from '../tools/readFiles';
import { parseUrlToFileType } from '@fastgpt/global/common/file/tools';
import { i18nT } from '../../../../../web/i18n/utils';
import { ModelTypeEnum } from '@fastgpt/global/core/ai/model';
export type ChatProps = ModuleDispatchProps<
AIChatNodeProps & {

View File

@ -6,7 +6,7 @@ import { formatModelChars2Points } from '../../../../support/wallet/usage/utils'
import type { SelectedDatasetType } from '@fastgpt/global/core/workflow/api.d';
import type { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
import { ModelTypeEnum, getLLMModel, getVectorModel } from '../../../ai/model';
import { getLLMModel, getEmbeddingModel } from '../../../ai/model';
import { searchDatasetData } from '../../../dataset/search/controller';
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
@ -18,6 +18,7 @@ import { checkTeamReRankPermission } from '../../../../support/permission/teamLi
import { MongoDataset } from '../../../dataset/schema';
import { i18nT } from '../../../../../web/i18n/utils';
import { filterDatasetsByTmbId } from '../../../dataset/utils';
import { ModelTypeEnum } from '@fastgpt/global/core/ai/model';
type DatasetSearchProps = ModuleDispatchProps<{
[NodeInputKeyEnum.datasetSelectList]: SelectedDatasetType;
@ -110,7 +111,7 @@ export async function dispatchDatasetSearch(
// console.log(concatQueries, rewriteQuery, aiExtensionResult);
// get vector
const vectorModel = getVectorModel(
const vectorModel = getEmbeddingModel(
(await MongoDataset.findById(datasets[0].datasetId, 'vectorModel').lean())?.vectorModel
);
@ -138,7 +139,7 @@ export async function dispatchDatasetSearch(
const { totalPoints, modelName } = formatModelChars2Points({
model: vectorModel.model,
inputTokens: tokens,
modelType: ModelTypeEnum.vector
modelType: ModelTypeEnum.embedding
});
const responseData: DispatchNodeResponseType & { totalPoints: number } = {
totalPoints,

View File

@ -2,12 +2,13 @@ import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { ModelTypeEnum, getLLMModel } from '../../../../core/ai/model';
import { getLLMModel } from '../../../../core/ai/model';
import { formatModelChars2Points } from '../../../../support/wallet/usage/utils';
import { queryExtension } from '../../../../core/ai/functions/queryExtension';
import { getHistories } from '../utils';
import { hashStr } from '@fastgpt/global/common/string/tools';
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
import { ModelTypeEnum } from '@fastgpt/global/core/ai/model';
type Props = ModuleDispatchProps<{
[NodeInputKeyEnum.aiModel]: string;

View File

@ -5,6 +5,10 @@ import { TeamErrEnum } from '@fastgpt/global/common/error/code/team';
import { AuthModeType, AuthResponseType } from '../type';
import { NullPermission } from '@fastgpt/global/support/permission/constant';
import { TeamPermission } from '@fastgpt/global/support/permission/user/controller';
import { authCert } from '../auth/common';
import { MongoUser } from '../../user/schema';
import { ERROR_ENUM } from '@fastgpt/global/common/error/errorCode';
import { ApiRequestProps } from '../../../type/next';
/* auth user role */
export async function authUserPer(props: AuthModeType): Promise<
@ -34,3 +38,19 @@ export async function authUserPer(props: AuthModeType): Promise<
tmb
};
}
export const authSystemAdmin = async ({ req }: { req: ApiRequestProps }) => {
try {
const result = await authCert({ req, authToken: true });
const user = await MongoUser.findOne({
_id: result.userId
});
if (user && user.username !== 'root') {
return Promise.reject(ERROR_ENUM.unAuthorization);
}
return result;
} catch (error) {
throw error;
}
};

View File

@ -1,5 +1,5 @@
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { ModelTypeEnum, getModelMap } from '../../../core/ai/model';
import { findAIModel } from '../../../core/ai/model';
import { ModelTypeEnum } from '@fastgpt/global/core/ai/model';
export const formatModelChars2Points = ({
model,
@ -14,7 +14,7 @@ export const formatModelChars2Points = ({
modelType: `${ModelTypeEnum}`;
multiple?: number;
}) => {
const modelData = getModelMap?.[modelType]?.(model) as LLMModelItemType;
const modelData = findAIModel(model);
if (!modelData) {
return {
totalPoints: 0,

View File

@ -1,9 +1,9 @@
import { FastGPTFeConfigsType, SystemEnvType } from '@fastgpt/global/common/system/types';
import {
AudioSpeechModelType,
TTSModelType,
ReRankModelItemType,
STTModelType,
VectorModelItemType,
EmbeddingModelItemType,
LLMModelItemType
} from '@fastgpt/global/core/ai/model.d';
import { SubPlanType } from '@fastgpt/global/support/wallet/sub/type';
@ -12,17 +12,11 @@ import { Worker } from 'worker_threads';
declare global {
var systemInitBufferId: string | undefined;
var systemVersion: string;
var feConfigs: FastGPTFeConfigsType;
var systemEnv: SystemEnvType;
var subPlans: SubPlanType | undefined;
var llmModels: LLMModelItemType[];
var llmModelPriceType: 'IO' | 'Tokens';
var vectorModels: VectorModelItemType[];
var audioSpeechModels: AudioSpeechModelType[];
var whisperModel: STTModelType;
var reRankModels: ReRankModelItemType[];
var workerPoll: Record<WorkerNameEnum, WorkerPool>;
}

View File

@ -8,13 +8,13 @@ import {
MenuItem,
MenuItemProps,
MenuList,
useDisclosure,
useOutsideClick
useDisclosure
} from '@chakra-ui/react';
import React, { useRef } from 'react';
import { useTranslation } from 'next-i18next';
import MyTag from '../Tag/index';
import MyIcon from '../Icon';
import MyAvatar from '../Avatar';
export type SelectProps<T = any> = {
value: T[];
@ -26,6 +26,7 @@ export type SelectProps<T = any> = {
}[];
maxH?: number;
onSelect: (val: T[]) => void;
closeable?: boolean;
} & Omit<ButtonProps, 'onSelect'>;
const MultipleSelect = <T = any,>({
@ -35,6 +36,7 @@ const MultipleSelect = <T = any,>({
width = '100%',
maxH = 400,
onSelect,
closeable = false,
...props
}: SelectProps<T>) => {
const { t } = useTranslation();
@ -106,19 +108,24 @@ const MultipleSelect = <T = any,>({
if (!listItem) return null;
return (
<MyTag key={i} colorSchema="blue" type={'borderFill'}>
<MyTag className="tag-icon" key={i} colorSchema="blue" type={'borderFill'}>
{listItem.label}
{/* <MyIcon
name={'common/closeLight'}
ml={1}
w="14px"
cursor={'pointer'}
onClickCapture={(e) => {
console.log(111);
e.stopPropagation();
onclickItem(item);
}}
/> */}
{closeable && (
<MyIcon
name={'common/closeLight'}
ml={1}
w="0.8rem"
cursor={'pointer'}
_hover={{
color: 'red.500'
}}
onClick={(e) => {
console.log(111);
e.stopPropagation();
onclickItem(item);
}}
/>
)}
</MyTag>
);
})}
@ -164,10 +171,11 @@ const MultipleSelect = <T = any,>({
fontSize={'sm'}
gap={2}
>
{item.icon && <MyAvatar src={item.icon} w={'1rem'} borderRadius={'0'} />}
<Box flex={'1 0 0'}>{item.label}</Box>
<Box w={'0.8rem'} lineHeight={1}>
{value.includes(item.value) && <MyIcon name={'price/right'} w={'1rem'} />}
</Box>
<Box>{item.label}</Box>
</MenuItem>
))}
</MenuList>

View File

@ -4,12 +4,60 @@
"api_key": "API key",
"bills_and_invoices": "Bills",
"channel": "Channel",
"config_model": "Model configuration",
"confirm_logout": "Confirm to log out?",
"create_channel": "Add new channel",
"create_model": "Add new model",
"custom_model": "custom model",
"default_model": "Default model",
"logout": "Sign out",
"model.active": "Active",
"model.alias": "Alias",
"model.alias_tip": "The name of the model displayed in the system is convenient for users to understand.",
"model.censor": "Censor check",
"model.censor_tip": "If sensitive verification is required, turn on this switch",
"model.charsPointsPrice": "Chars Price",
"model.charsPointsPrice_tip": "Combine the model input and output for Token billing. If the language model is configured with input and output billing separately, the input and output will be calculated separately.",
"model.custom_cq_prompt": "Custom question classification prompt words",
"model.custom_cq_prompt_tip": "Override the system's default question classification prompt words, which default to:\n\"\"\"\n请帮我执行一个“问题分类”任务将问题分类为以下几种类型之一\n\n\"\"\"\n{{typeList}}\n\"\"\"\n\n## 背景知识\n{{systemPrompt}}\n\n## 对话记录\n{{history}}\n\n## 开始任务\n\n现在我们开始分类我会给你一个\"问题\"请结合背景知识和对话记录将问题分类到对应的类型中并返回类型ID。\n\n问题\"{{question}}\"\n类型ID=\n\"\"\"",
"model.custom_extract_prompt": "Custom content extraction prompt words",
"model.custom_extract_prompt_tip": "Override system prompt word, default is:\n\"\"\"\n你可以从 <对话记录></对话记录> 中提取指定 Json 信息,你仅需返回 Json 字符串,无需回答问题。\n<提取要求>\n{{description}}\n</提取要求>\n\n<提取规则>\n- 本次需提取的 json 字符串,需符合 JsonSchema 的规则。\n- type 代表数据类型; key 代表字段名; description 代表字段的描述; enum 是枚举值,代表可选的 value。\n- 如果没有可提取的内容,忽略该字段。\n</提取规则>\n\n<JsonSchema>\n{{json}}\n</JsonSchema>\n\n<对话记录>\n{{text}}\n</对话记录>\n\n提取的 json 字符串:\n\"\"\"",
"model.dataset_process": "Dataset file parse",
"model.default_config": "Body extra fields",
"model.default_config_tip": "When initiating a conversation request, merge this configuration. \nFor example:\n\"\"\"\n{\n \"temperature\": 1,\n \"max_tokens\": null\n}\n\"\"\"",
"model.default_system_chat_prompt": "Default prompt",
"model.default_system_chat_prompt_tip": "When the model talks, it will carry this default prompt word.",
"model.default_token": "Default tokens",
"model.default_token_tip": "The length of the default text block of the index model must be less than the maximum length above",
"model.delete_model_confirm": "Confirm to delete this model?",
"model.edit_model": "Model parameter editing",
"model.function_call": "Function Call",
"model.function_call_tip": "If the model supports function calling, turn on this switch. \nTool calls have higher priority.",
"model.input_price": "Input price",
"model.input_price_tip": "Language model input price. If this item is configured, the model comprehensive price will be invalid.",
"model.json_config": "File config",
"model.json_config_confirm": "Confirm to use this configuration for override?",
"model.json_config_tip": "Configure the model through the configuration file. After clicking Confirm, the entered configuration will be used for full coverage. Please ensure that the configuration file is entered correctly. \nIt is recommended to copy the current configuration file for backup before operation.",
"model.max_quote": "KB max quote",
"model.max_temperature": "Max temperature",
"model.model_id": "Model ID",
"model.model_id_tip": "The unique identifier of the model, that is, the value of the actual request to the service provider model, needs to correspond to the model in the OneAPI channel.",
"model.output_price": "Output price",
"model.output_price_tip": "The language model output price. If this item is configured, the model comprehensive price will be invalid.",
"model.param_name": "Parameter name",
"model.request_auth": "Custom token",
"model.request_auth_tip": "When making a request to a custom request address, carry the request header: Authorization: Bearer xxx to make the request.",
"model.request_url": "Custom url",
"model.request_url_tip": "If this value is filled in, a request will be made directly to this address without going through OneAPI",
"model.tool_choice": "Tool choice",
"model.tool_choice_tip": "If the model supports tool calling, turn on this switch",
"model.used_in_classify": "Used for problem classification",
"model.used_in_extract_fields": "for text extraction",
"model.used_in_tool_call": "Used for tool call nodes",
"model.vision": "Vision model",
"model.vision_tip": "If the model supports image recognition, turn on this switch.",
"model.voices": "voice role",
"model.voices_tip": "Configure multiple through an array, for example:\n\n[\n {\n \"label\": \"Alloy\",\n \"value\": \"alloy\"\n },\n {\n \"label\": \"Echo\",\n \"value\": \"echo\"\n }\n]",
"model_provider": "Model Provider",
"notifications": "Notify",
"personal_information": "Personal",

View File

@ -922,6 +922,7 @@
"model.search_name_placeholder": "Search by model name",
"model.type.chat": "language model",
"model.type.embedding": "Embedding",
"model.type.reRank": "ReRank",
"model.type.stt": "speech recognition",
"model.type.tts": "TTS",
"model_alicloud": "Ali Cloud",

View File

@ -4,12 +4,61 @@
"api_key": "API 密钥",
"bills_and_invoices": "账单与发票",
"channel": "渠道",
"config_model": "模型配置",
"confirm_logout": "确认退出登录?",
"create_channel": "新增渠道",
"create_model": "新增模型",
"custom_model": "自定义模型",
"default_model": "预设模型",
"logout": "登出",
"model.active": "启用",
"model.alias": "别名",
"model.alias_tip": "模型在系统中展示的名字,方便用户理解",
"model.censor": "启用敏感校验",
"model.censor_tip": "如果需要进行敏感校验,则开启该开关",
"model.charsPointsPrice": "模型综合价格",
"model.charsPointsPrice_tip": "将模型输入和输出合并起来进行 Token 计费,语言模型如果单独配置了输入和输出计费,则按输入和输出分别计算",
"model.custom_cq_prompt": "自定义问题分类提示词",
"model.custom_cq_prompt_tip": "覆盖系统默认的问题分类提示词,默认为:\n",
"model.custom_extract_prompt": "自定义内容提取提示词",
"model.custom_extract_prompt_tip": "覆盖系统的提示词,默认为:\n\"\"\"\n你可以从 <对话记录></对话记录> 中提取指定 Json 信息,你仅需返回 Json 字符串,无需回答问题。\n<提取要求>\n{{description}}\n</提取要求>\n\n<提取规则>\n- 本次需提取的 json 字符串,需符合 JsonSchema 的规则。\n- type 代表数据类型; key 代表字段名; description 代表字段的描述; enum 是枚举值,代表可选的 value。\n- 如果没有可提取的内容,忽略该字段。\n</提取规则>\n\n<JsonSchema>\n{{json}}\n</JsonSchema>\n\n<对话记录>\n{{text}}\n</对话记录>\n\n提取的 json 字符串:\n\"\"\"",
"model.dataset_process": "用于知识库文件处理",
"model.default_config": "Body 额外字段",
"model.default_config_tip": "发起对话请求时候,合并该配置。例如:\n\"\"\"\n{\n \"temperature\": 1,\n \"max_tokens\": null\n}\n\"\"\"",
"model.default_system_chat_prompt": "默认提示词",
"model.default_system_chat_prompt_tip": "模型对话时,都会携带该默认提示词",
"model.default_token": "默认分块长度",
"model.default_token_tip": "索引模型默认文本分块的长度,必须小于最大上文",
"model.delete_model_confirm": "确认删除该模型?",
"model.edit_model": "模型参数编辑",
"model.function_call": "支持函数调用",
"model.function_call_tip": "如果模型支持函数调用,则开启该开关。工具调用优先级更高。",
"model.input_price": "模型输入价格",
"model.input_price_tip": "语言模型输入价格,如果配置了该项,则模型综合价格会失效",
"model.json_config": "配置文件",
"model.json_config_confirm": "确认使用该配置进行覆盖?",
"model.json_config_tip": "通过配置文件配置模型,点击确认后,会使用输入的配置进行全量覆盖,请确保配置文件输入正确。建议操作前,复制当前配置文件进行备份。",
"model.max_quote": "知识库最大引用",
"model.max_temperature": "最大温度",
"model.model_id": "模型ID",
"model.model_id_tip": "模型的唯一标识也就是实际请求到服务商model 的值,需要与 OneAPI 渠道中的模型对应。",
"model.output_price": "模型输出价格",
"model.output_price_tip": "语言模型输出价格,如果配置了该项,则模型综合价格会失效",
"model.param_name": "参数名",
"model.request_auth": "自定义请求 Tokens",
"model.request_auth_tip": "向自定义请求地址发起请求时候携带请求头Authorization: Bearer xxx 进行请求",
"model.request_url": "自定义请求地址",
"model.request_url_tip": "如果填写该值,则会直接向该地址发起请求,不经过 OneAPI",
"model.tool_choice": "支持工具调用",
"model.tool_choice_tip": "如果该模型支持工具调用,则开启该开关",
"model.used_in_classify": "用于问题分类",
"model.used_in_extract_fields": "用于文本提取",
"model.used_in_query_extension": "用于问题优化",
"model.used_in_tool_call": "用于工具调用节点",
"model.vision": "支持图片识别",
"model.vision_tip": "如果模型支持图片识别,则打开该开关。",
"model.voices": "声音角色",
"model.voices_tip": "通过一个数组配置多个,例如:\n[\n {\n \"label\": \"Alloy\",\n \"value\": \"alloy\"\n },\n {\n \"label\": \"Echo\",\n \"value\": \"echo\"\n }\n]",
"model_provider": "模型提供商",
"notifications": "通知",
"personal_information": "个人信息",

View File

@ -925,6 +925,7 @@
"model.search_name_placeholder": "根据模型名搜索",
"model.type.chat": "语言模型",
"model.type.embedding": "索引模型",
"model.type.reRank": "重排模型",
"model.type.stt": "语音识别",
"model.type.tts": "语音合成",
"model_alicloud": "阿里云",

View File

@ -4,12 +4,59 @@
"api_key": "API 金鑰",
"bills_and_invoices": "帳單與發票",
"channel": "頻道",
"config_model": "模型配置",
"confirm_logout": "確認登出登入?",
"create_channel": "新增頻道",
"create_model": "新增模型",
"custom_model": "自訂模型",
"default_model": "預設模型",
"logout": "登出",
"model.active": "啟用",
"model.alias": "別名",
"model.alias_tip": "模型在系統中展示的名字,方便使用者理解",
"model.censor_tip": "如果需要進行敏感校驗,則開啟該開關",
"model.charsPointsPrice": "模型綜合價格",
"model.charsPointsPrice_tip": "將模型輸入和輸出合併起來進行 Token 計費,語言模型如果單獨配置了輸入和輸出計費,則按輸入和輸出分別計算",
"model.custom_cq_prompt": "自訂問題分類提示詞",
"model.custom_cq_prompt_tip": "覆蓋系統預設的問題分類提示詞,預設為:\n\"\"\"\n请帮我执行一个“问题分类”任务将问题分类为以下几种类型之一\n\n\"\"\"\n{{typeList}}\n\"\"\"\n\n## 背景知识\n{{systemPrompt}}\n\n## 对话记录\n{{history}}\n\n## 开始任务\n\n现在我们开始分类我会给你一个\"问题\"请结合背景知识和对话记录将问题分类到对应的类型中并返回类型ID。\n\n问题\"{{question}}\"\n类型ID=\n\"\"\"",
"model.custom_extract_prompt": "自訂內容提取提示詞",
"model.custom_extract_prompt_tip": "覆蓋系統的提示詞,預設為:\n\"\"\"\n你可以从 <对话记录></对话记录> 中提取指定 Json 信息,你仅需返回 Json 字符串,无需回答问题。\n<提取要求>\n{{description}}\n</提取要求>\n\n<提取规则>\n- 本次需提取的 json 字符串,需符合 JsonSchema 的规则。\n- type 代表数据类型; key 代表字段名; description 代表字段的描述; enum 是枚举值,代表可选的 value。\n- 如果没有可提取的内容,忽略该字段。\n</提取规则>\n\n<JsonSchema>\n{{json}}\n</JsonSchema>\n\n<对话记录>\n{{text}}\n</对话记录>\n\n提取的 json 字符串:\n\"\"\"",
"model.dataset_process": "用於知識庫文件處理",
"model.default_config": "Body 額外字段",
"model.default_config_tip": "發起對話請求時候,合併該配置。例如:\n\"\"\"\n{\n \"temperature\": 1,\n \"max_tokens\": null\n}\n\"\"\"",
"model.default_system_chat_prompt": "預設提示詞",
"model.default_system_chat_prompt_tip": "模型對話時,都會攜帶該預設提示詞",
"model.default_token": "預設分塊長度",
"model.default_token_tip": "索引模型預設文字分塊的長度,必須小於最大上文",
"model.delete_model_confirm": "確認刪除該模型?",
"model.edit_model": "模型參數編輯",
"model.function_call": "支援函數調用",
"model.function_call_tip": "如果模型支援函數調用,則開啟該開關。\n工具呼叫優先權更高。",
"model.input_price": "模型輸入價格",
"model.input_price_tip": "語言模型輸入價格,如果配置了該項,則模型綜合價格會失效",
"model.json_config": "設定檔",
"model.json_config_confirm": "確認使用該配置進行覆蓋?",
"model.json_config_tip": "透過設定檔設定模型,點選確認後,會使用輸入的配置進行全量覆蓋,請確保設定檔輸入正確。\n建議操作前複製目前設定檔進行備份。",
"model.max_quote": "知識庫最大引用",
"model.max_temperature": "最大溫度",
"model.model_id": "模型ID",
"model.model_id_tip": "模型的唯一標識也就是實際請求到服務商model 的值,需要與 OneAPI 頻道中的模型對應。",
"model.output_price": "模型輸出價格",
"model.output_price_tip": "語言模型輸出價格,如果配置了該項,則模型綜合價格會失效",
"model.param_name": "參數名",
"model.request_auth": "自訂請求 Tokens",
"model.request_auth_tip": "向自訂請求地址發起請求時候攜帶請求頭Authorization: Bearer xxx 進行請求",
"model.request_url": "自訂請求地址",
"model.request_url_tip": "如果填入該值,則會直接向該位址發起請求,不經過 OneAPI",
"model.tool_choice": "支援工具調用",
"model.tool_choice_tip": "如果該模型支援工具調用,則開啟該開關",
"model.used_in_classify": "用於問題分類",
"model.used_in_extract_fields": "用於文字擷取",
"model.used_in_tool_call": "用於工具呼叫節點",
"model.vision": "支援圖片識別",
"model.vision_tip": "如果模型支援圖片識別,則開啟該開關。",
"model.voices": "聲音角色",
"model.voices_tip": "透過一個數組配置多個,例如:\n\n[\n {\n \"label\": \"Alloy\",\n \"value\": \"alloy\"\n },\n {\n \"label\": \"Echo\",\n \"value\": \"echo\"\n }\n]",
"model_provider": "模型提供者",
"notifications": "通知",
"personal_information": "個人資訊",

View File

@ -922,6 +922,7 @@
"model.provider": "模型提供者",
"model.search_name_placeholder": "根據模型名搜尋",
"model.type.chat": "語言模型",
"model.type.reRank": "重排模型",
"model.type.stt": "語音辨識",
"model.type.tts": "語音合成",
"model_alicloud": "阿里雲",

View File

@ -25,7 +25,7 @@ const { definePartsStyle: tablePart, defineMultiStyleConfig: tableMultiStyle } =
const { definePartsStyle: radioParts, defineMultiStyleConfig: radioStyle } =
createMultiStyleConfigHelpers(radioAnatomy.keys);
const shadowLight = '0px 0px 0px 2.4px rgba(51, 112, 255, 0.15)';
export const shadowLight = '0px 0px 0px 2.4px rgba(51, 112, 255, 0.15)';
// 按键
const Button = defineStyleConfig({

View File

@ -25,7 +25,6 @@
"usedInClassify": true, // true
"usedInExtractFields": true, // true
"usedInToolCall": true, // true
"usedInQueryExtension": true, // true
"toolChoice": true, //
"functionCall": false, // 使 toolChoicefalse使 functionCall false使
"customCQPrompt": "", //
@ -49,7 +48,6 @@
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
"usedInQueryExtension": true,
"toolChoice": true,
"functionCall": false,
"customCQPrompt": "",
@ -73,7 +71,6 @@
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
"usedInQueryExtension": true,
"toolChoice": false,
"functionCall": false,
"customCQPrompt": "",
@ -100,7 +97,6 @@
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
"usedInQueryExtension": true,
"toolChoice": false,
"functionCall": false,
"customCQPrompt": "",

View File

@ -22,7 +22,7 @@ type Props = SelectProps & {
const OneRowSelector = ({ list, onchange, disableTip, ...props }: Props) => {
const { t } = useTranslation();
const { feConfigs, llmModelList, vectorModelList } = useSystemStore();
const { feConfigs, llmModelList, embeddingModelList } = useSystemStore();
const avatarSize = useMemo(() => {
const size = {
@ -35,7 +35,7 @@ const OneRowSelector = ({ list, onchange, disableTip, ...props }: Props) => {
}, [props.size]);
const avatarList = list.map((item) => {
const modelData = getModelFromList([...llmModelList, ...vectorModelList], item.value);
const modelData = getModelFromList([...llmModelList, ...embeddingModelList], item.value);
return {
value: item.value,
@ -100,7 +100,7 @@ const OneRowSelector = ({ list, onchange, disableTip, ...props }: Props) => {
};
const MultipleRowSelector = ({ list, onchange, disableTip, ...props }: Props) => {
const { t } = useTranslation();
const { llmModelList, vectorModelList } = useSystemStore();
const { llmModelList, embeddingModelList } = useSystemStore();
const [value, setValue] = useState<string[]>([]);
const avatarSize = useMemo(() => {
@ -136,7 +136,7 @@ const MultipleRowSelector = ({ list, onchange, disableTip, ...props }: Props) =>
}));
for (const item of list) {
const modelData = getModelFromList([...llmModelList, ...vectorModelList], item.value);
const modelData = getModelFromList([...llmModelList, ...embeddingModelList], item.value);
const provider =
renderList.find((item) => item.value === (modelData?.provider || 'Other')) ??
renderList[renderList.length - 1];
@ -148,7 +148,7 @@ const MultipleRowSelector = ({ list, onchange, disableTip, ...props }: Props) =>
}
return renderList.filter((item) => item.children.length > 0);
}, [avatarSize, list, llmModelList, t, vectorModelList]);
}, [avatarSize, list, llmModelList, t, embeddingModelList]);
const onSelect = useCallback(
(e: string[]) => {
@ -158,7 +158,7 @@ const MultipleRowSelector = ({ list, onchange, disableTip, ...props }: Props) =>
);
const SelectedModel = useMemo(() => {
const modelData = getModelFromList([...llmModelList, ...vectorModelList], props.value);
const modelData = getModelFromList([...llmModelList, ...embeddingModelList], props.value);
setValue([modelData.provider, props.value]);
@ -174,7 +174,7 @@ const MultipleRowSelector = ({ list, onchange, disableTip, ...props }: Props) =>
<Box>{modelData?.name}</Box>
</HStack>
);
}, [avatarSize, llmModelList, props.value, vectorModelList]);
}, [avatarSize, llmModelList, props.value, embeddingModelList]);
return (
<Box

View File

@ -53,7 +53,8 @@ const ModelTable = () => {
const [search, setSearch] = useState('');
const { llmModelList, audioSpeechModelList, vectorModelList, whisperModel } = useSystemStore();
const { llmModelList, ttsModelList, embeddingModelList, sttModelList, reRankModelList } =
useSystemStore();
const modelList = useMemo(() => {
const formatLLMModelList = llmModelList.map((item) => ({
@ -87,7 +88,7 @@ const ModelTable = () => {
),
tagColor: 'blue'
}));
const formatVectorModelList = vectorModelList.map((item) => ({
const formatVectorModelList = embeddingModelList.map((item) => ({
...item,
typeLabel: t('common:model.type.embedding'),
priceLabel: (
@ -100,7 +101,7 @@ const ModelTable = () => {
),
tagColor: 'yellow'
}));
const formatAudioSpeechModelList = audioSpeechModelList.map((item) => ({
const formatAudioSpeechModelList = ttsModelList.map((item) => ({
...item,
typeLabel: t('common:model.type.tts'),
priceLabel: (
@ -113,31 +114,39 @@ const ModelTable = () => {
),
tagColor: 'green'
}));
const formatWhisperModel = {
...whisperModel,
const formatWhisperModelList = sttModelList.map((item) => ({
...item,
typeLabel: t('common:model.type.stt'),
priceLabel: (
<Flex color={'myGray.700'}>
<Box fontWeight={'bold'} color={'myGray.900'} mr={0.5}>
{whisperModel.charsPointsPrice}
{item.charsPointsPrice}
</Box>
{` ${t('common:support.wallet.subscription.point')} / 60${t('common:unit.seconds')}`}
</Flex>
),
tagColor: 'purple'
};
}));
const formatRerankModelList = reRankModelList.map((item) => ({
...item,
typeLabel: t('common:model.type.reRank'),
priceLabel: <Flex color={'myGray.700'}>- </Flex>,
tagColor: 'red'
}));
const list = (() => {
if (modelType === ModelTypeEnum.chat) return formatLLMModelList;
if (modelType === ModelTypeEnum.llm) return formatLLMModelList;
if (modelType === ModelTypeEnum.embedding) return formatVectorModelList;
if (modelType === ModelTypeEnum.tts) return formatAudioSpeechModelList;
if (modelType === ModelTypeEnum.stt) return [formatWhisperModel];
if (modelType === ModelTypeEnum.stt) return formatWhisperModelList;
if (modelType === ModelTypeEnum.rerank) return formatRerankModelList;
return [
...formatLLMModelList,
...formatVectorModelList,
...formatAudioSpeechModelList,
formatWhisperModel
...formatWhisperModelList,
...formatRerankModelList
];
})();
const formatList = list.map((item) => {
@ -167,9 +176,10 @@ const ModelTable = () => {
return filterList;
}, [
llmModelList,
vectorModelList,
audioSpeechModelList,
whisperModel,
embeddingModelList,
ttsModelList,
sttModelList,
reRankModelList,
t,
modelType,
provider,
@ -179,15 +189,16 @@ const ModelTable = () => {
const filterProviderList = useMemo(() => {
const allProviderIds: string[] = [
...llmModelList,
...vectorModelList,
...audioSpeechModelList,
whisperModel
...embeddingModelList,
...ttsModelList,
...sttModelList,
...reRankModelList
].map((model) => model.provider);
return providerList.current.filter(
(item) => allProviderIds.includes(item.value) || item.value === ''
);
}, [audioSpeechModelList, llmModelList, vectorModelList, whisperModel]);
}, [ttsModelList, llmModelList, embeddingModelList, sttModelList, reRankModelList]);
return (
<Flex flexDirection={'column'} h={'100%'}>

View File

@ -70,12 +70,10 @@ const DatasetParamsModal = ({
const [currentTabType, setCurrentTabType] = useState(SearchSettingTabEnum.searchMode);
const chatModelSelectList = (() =>
llmModelList
.filter((model) => model.usedInQueryExtension)
.map((item) => ({
value: item.model,
label: item.name
})))();
llmModelList.map((item) => ({
value: item.model,
label: item.name
})))();
const { register, setValue, getValues, handleSubmit, watch } = useForm<DatasetParamsProps>({
defaultValues: {

View File

@ -25,7 +25,7 @@ const TTSSelect = ({
onChange: (e: AppTTSConfigType) => void;
}) => {
const { t } = useTranslation();
const { audioSpeechModelList } = useSystemStore();
const { ttsModelList } = useSystemStore();
const { isOpen, onOpen, onClose } = useDisclosure();
const appId = useContextSelector(AppContext, (v) => v.appId);
@ -34,9 +34,9 @@ const TTSSelect = ({
() => [
{ label: t('common:core.app.tts.Close'), value: TTSTypeEnum.none },
{ label: t('common:core.app.tts.Web'), value: TTSTypeEnum.web },
...audioSpeechModelList.map((item) => item?.voices || []).flat()
...ttsModelList.map((item) => item?.voices || []).flat()
],
[audioSpeechModelList, t]
[ttsModelList, t]
);
const formatValue = useMemo(() => {
@ -63,7 +63,7 @@ const TTSSelect = ({
if (e === TTSTypeEnum.none || e === TTSTypeEnum.web) {
onChange({ type: e as `${TTSTypeEnum}` });
} else {
const audioModel = audioSpeechModelList.find((item) =>
const audioModel = ttsModelList.find((item) =>
item.voices?.find((voice) => voice.value === e)
);
if (!audioModel) {
@ -77,7 +77,7 @@ const TTSSelect = ({
});
}
},
[audioSpeechModelList, onChange, value]
[ttsModelList, onChange, value]
);
const onCloseTTSModal = useCallback(() => {

View File

@ -107,7 +107,7 @@ const ChatInput = ({
);
/* whisper init */
const { whisperModel } = useSystemStore();
const { sttModelList } = useSystemStore();
const canvasRef = useRef<HTMLCanvasElement>(null);
const {
isSpeaking,
@ -293,7 +293,7 @@ const ChatInput = ({
/>
<Flex alignItems={'center'} position={'absolute'} right={[2, 4]} bottom={['10px', '12px']}>
{/* voice-input */}
{whisperConfig.open && !inputValue && !isChatting && !!whisperModel && (
{whisperConfig.open && !inputValue && !isChatting && sttModelList.length > 0 && (
<>
<canvas
ref={canvasRef}
@ -431,7 +431,7 @@ const ChatInput = ({
stopSpeak,
t,
whisperConfig.open,
whisperModel
sttModelList
]
);

View File

@ -1,6 +1,6 @@
import type {
LLMModelItemType,
VectorModelItemType,
EmbeddingModelItemType,
AudioSpeechModels,
STTModelType,
ReRankModelItemType
@ -8,15 +8,14 @@ import type {
import type { FastGPTFeConfigsType } from '@fastgpt/global/common/system/types/index.d';
import { SubPlanType } from '@fastgpt/global/support/wallet/sub/type';
import { SystemModelItemType } from '@fastgpt/service/core/ai/type';
export type InitDateResponse = {
bufferId?: string;
llmModels: LLMModelItemType[];
vectorModels: VectorModelItemType[];
audioSpeechModels: AudioSpeechModels[];
reRankModels: ReRankModelItemType[];
whisperModel: STTModelType;
feConfigs: FastGPTFeConfigsType;
subPlans?: SubPlanType;
systemVersion: string;
activeModelList?: SystemModelItemType[];
};

View File

@ -1,72 +1,43 @@
import React, { useMemo, useState } from 'react';
import MyModal from '@fastgpt/web/components/common/MyModal';
import { useTranslation } from 'next-i18next';
import { Box, Flex, ModalBody } from '@chakra-ui/react';
import { MultipleRowArraySelect } from '@fastgpt/web/components/common/MySelect/MultipleRowSelect';
import { ModalBody } from '@chakra-ui/react';
import { useSystemStore } from '@/web/common/system/useSystemStore';
import { ModelProviderList } from '@fastgpt/global/core/ai/provider';
import Avatar from '@fastgpt/web/components/common/Avatar';
import { HUGGING_FACE_ICON } from '@fastgpt/global/common/system/constants';
import { getModelFromList } from '@fastgpt/global/core/ai/model';
import { getSystemModelList } from '@/web/core/ai/config';
import MultipleSelect from '@fastgpt/web/components/common/MySelect/MultipleSelect';
import { useRequest2 } from '@fastgpt/web/hooks/useRequest';
const DefaultModal = ({ onClose }: { onClose: () => void }) => {
const { t } = useTranslation();
const { llmModelList, vectorModelList, whisperModel, audioSpeechModelList, reRankModelList } =
const { data: systemModelList = [] } = useRequest2(getSystemModelList, {
manual: false
});
const selectorList = useMemo(() => {
return systemModelList.map((item) => ({
icon: item.avatar,
label: item.name,
value: item.model
}));
}, [systemModelList]);
const { llmModelList, embeddingModelList, sttModelList, ttsModelList, reRankModelList } =
useSystemStore();
const [value, setValue] = useState<string[]>([]);
const modelList = useMemo(() => {
return [
...llmModelList,
...vectorModelList,
...audioSpeechModelList,
...embeddingModelList,
...ttsModelList,
...reRankModelList,
whisperModel
...sttModelList
].map((item) => ({
provider: item.provider,
name: item.name,
model: item.model
}));
}, [llmModelList, vectorModelList, whisperModel, audioSpeechModelList, reRankModelList]);
const selectorList = useMemo(() => {
const renderList = ModelProviderList.map<{
label: React.JSX.Element;
value: string;
children: { label: string | React.ReactNode; value: string }[];
}>((provider) => ({
label: (
<Flex alignItems={'center'} py={1}>
<Avatar
borderRadius={'0'}
mr={2}
src={provider?.avatar || HUGGING_FACE_ICON}
fallbackSrc={HUGGING_FACE_ICON}
w={'1rem'}
/>
<Box>{t(provider.name as any)}</Box>
</Flex>
),
value: provider.id,
children: []
}));
for (const item of modelList) {
const modelData = getModelFromList(modelList, item.model);
const provider =
renderList.find((item) => item.value === (modelData?.provider || 'Other')) ??
renderList[renderList.length - 1];
provider.children.push({
label: modelData.name,
value: modelData.model
});
}
return renderList.filter((item) => item.children.length > 0);
}, [modelList, t]);
console.log(selectorList);
}, [llmModelList, embeddingModelList, sttModelList, ttsModelList, reRankModelList]);
return (
<MyModal
@ -76,7 +47,15 @@ const DefaultModal = ({ onClose }: { onClose: () => void }) => {
iconColor="primary.600"
onClose={onClose}
>
<ModalBody>11</ModalBody>
<ModalBody>
<MultipleSelect<string>
list={selectorList}
value={value}
onSelect={(e) => {
setValue(e);
}}
/>
</ModalBody>
</MyModal>
);
};

File diff suppressed because it is too large Load Diff

View File

@ -1,72 +1,43 @@
import { serviceSideProps } from '@fastgpt/web/common/system/nextjs';
import React, { useState } from 'react';
import React, { useMemo, useState } from 'react';
import AccountContainer from '../components/AccountContainer';
import { Box, Button, Flex, useDisclosure } from '@chakra-ui/react';
import { Box, Flex } from '@chakra-ui/react';
import ModelTable from '@/components/core/ai/ModelTable';
import { useUserStore } from '@/web/support/user/useUserStore';
import FillRowTabs from '@fastgpt/web/components/common/Tabs/FillRowTabs';
import { useTranslation } from 'next-i18next';
import MyMenu from '@fastgpt/web/components/common/MyMenu';
import dynamic from 'next/dynamic';
const DefaultModal = dynamic(() => import('./components/DefaultModal'), {
ssr: false
});
const ModelConfigTable = dynamic(() => import('./components/ModelConfigTable'));
type TabType = 'model' | 'config' | 'channel';
const ModelProvider = () => {
const { t } = useTranslation();
const { userInfo } = useUserStore();
const isRoot = userInfo?.username === 'root';
const [tab, setTab] = useState<'model' | 'channel'>('model');
const [tab, setTab] = useState<TabType>('model');
const { isOpen: isOpenDefault, onOpen: onOpenDefault, onClose: onCloseDefault } = useDisclosure();
const Tab = useMemo(() => {
return (
<FillRowTabs<TabType>
list={[
{ label: t('account:active_model'), value: 'model' },
{ label: t('account:config_model'), value: 'config' }
// { label: t('account:channel'), value: 'channel' }
]}
value={tab}
py={1}
onChange={setTab}
/>
);
}, [t, tab]);
return (
<AccountContainer>
<Flex h={'100%'} flexDirection={'column'} gap={4} py={4} px={6}>
{/* Header */}
{/* <Flex justifyContent={'space-between'}>
<FillRowTabs<'model' | 'channel'>
list={[
{ label: t('account:active_model'), value: 'model' },
{ label: t('account:channel'), value: 'channel' }
]}
value={tab}
px={8}
py={1}
onChange={setTab}
/>
{tab === 'model' && (
<MyMenu
trigger="hover"
size="mini"
Button={<Button>{t('account:create_model')}</Button>}
menuList={[
{
children: [
{
label: t('account:default_model'),
onClick: onOpenDefault
},
{
label: t('account:custom_model')
}
]
}
]}
/>
)}
{tab === 'channel' && <Button>{t('account:create_channel')}</Button>}
</Flex> */}
<Box flex={'1 0 0'}>
{tab === 'model' && <ModelTable />}
{/* {tab === 'channel' && <ChannelTable />} */}
</Box>
{tab === 'model' && <ValidModelTable Tab={Tab} />}
{tab === 'config' && <ModelConfigTable Tab={Tab} />}
</Flex>
{isOpenDefault && <DefaultModal onClose={onCloseDefault} />}
</AccountContainer>
);
};
@ -80,3 +51,16 @@ export async function getServerSideProps(content: any) {
}
export default ModelProvider;
const ValidModelTable = ({ Tab }: { Tab: React.ReactNode }) => {
const { userInfo } = useUserStore();
const isRoot = userInfo?.username === 'root';
return (
<>
{isRoot && <Flex justifyContent={'space-between'}>{Tab}</Flex>}
<Box flex={'1 0 0'}>
<ModelTable />
</Box>
</>
);
};

View File

@ -0,0 +1,77 @@
import { readConfigData } from '@/service/common/system';
import { NextAPI } from '@/service/middleware/entry';
import {
getFastGPTConfigFromDB,
updateFastGPTConfigBuffer
} from '@fastgpt/service/common/system/config/controller';
import { authCert } from '@fastgpt/service/support/permission/auth/common';
import { NextApiRequest, NextApiResponse } from 'next';
import json5 from 'json5';
import { FastGPTConfigFileType } from '@fastgpt/global/common/system/types';
import { MongoSystemModel } from '@fastgpt/service/core/ai/config/schema';
import { loadSystemModels } from '@fastgpt/service/core/ai/config/utils';
import { ModelTypeEnum } from '@fastgpt/global/core/ai/model';
/*
MongoDatasetData
1. User avatar TeamMember
*/
async function handler(req: NextApiRequest, res: NextApiResponse) {
await authCert({ req, authRoot: true });
// load config
const [{ config: dbConfig }, fileConfig] = await Promise.all([
getFastGPTConfigFromDB(),
readConfigData('config.json')
]);
const fileRes = json5.parse(fileConfig) as FastGPTConfigFileType;
const llmModels = dbConfig.llmModels || fileRes.llmModels || [];
const vectorModels = dbConfig.vectorModels || fileRes.vectorModels || [];
const reRankModels = dbConfig.reRankModels || fileRes.reRankModels || [];
const audioSpeechModels = dbConfig.audioSpeechModels || fileRes.audioSpeechModels || [];
const whisperModel = dbConfig.whisperModel || fileRes.whisperModel;
const list = [
...llmModels.map((item) => ({
...item,
type: ModelTypeEnum.llm
})),
...vectorModels.map((item) => ({
...item,
type: ModelTypeEnum.embedding
})),
...reRankModels.map((item) => ({
...item,
type: ModelTypeEnum.rerank
})),
...audioSpeechModels.map((item) => ({
...item,
type: ModelTypeEnum.tts
})),
{
...whisperModel,
type: ModelTypeEnum.stt
}
];
for await (const item of list) {
try {
await MongoSystemModel.updateOne(
{ model: item.model },
{ $set: { model: item.model, metadata: { ...item, isActive: true } } },
{ upsert: true }
);
} catch (error) {
console.log(error);
}
}
await loadSystemModels(true);
await updateFastGPTConfigBuffer();
return { success: true };
}
export default NextAPI(handler);

View File

@ -5,6 +5,20 @@ import { NextAPI } from '@/service/middleware/entry';
async function handler(req: ApiRequestProps<{}, { bufferId?: string }>, res: NextApiResponse) {
const { bufferId } = req.query;
const activeModelList = global.systemActiveModelList.map((model) => ({
...model,
customCQPrompt: undefined,
customExtractPrompt: undefined,
defaultSystemChatPrompt: undefined,
fieldMap: undefined,
defaultConfig: undefined,
weight: undefined,
dbConfig: undefined,
queryConfig: undefined,
requestUrl: undefined,
requestAuth: undefined
}));
// If bufferId is the same as the current bufferId, return directly
if (bufferId && global.systemInitBufferId && global.systemInitBufferId === bufferId) {
return {
@ -17,21 +31,7 @@ async function handler(req: ApiRequestProps<{}, { bufferId?: string }>, res: Nex
bufferId: global.systemInitBufferId,
feConfigs: global.feConfigs,
subPlans: global.subPlans,
llmModels: global.llmModels.map((model) => ({
...model,
customCQPrompt: '',
customExtractPrompt: '',
defaultSystemChatPrompt: ''
})),
vectorModels: global.vectorModels,
reRankModels:
global.reRankModels?.map((item) => ({
...item,
requestUrl: '',
requestAuth: ''
})) || [],
whisperModel: global.whisperModel,
audioSpeechModels: global.audioSpeechModels,
activeModelList,
systemVersion: global.systemVersion || '0.0.0'
};
}

View File

@ -16,6 +16,7 @@ import { MongoTeamMember } from '@fastgpt/service/support/user/team/teamMemberSc
import { TeamMemberRoleEnum } from '@fastgpt/global/support/user/team/constant';
import { ChatErrEnum } from '@fastgpt/global/common/error/code/chat';
import { authCert } from '@fastgpt/service/support/permission/auth/common';
import { getFirstLLMModel } from '@fastgpt/service/core/ai/model';
async function handler(
req: ApiRequestProps<
@ -35,7 +36,7 @@ async function handler(
authApiKey: true
});
const qgModel = global.llmModels[0];
const qgModel = getFirstLLMModel();
const { result, inputTokens, outputTokens } = await createQuestionGuide({
messages,

View File

@ -9,6 +9,7 @@ import { OutLinkChatAuthProps } from '@fastgpt/global/support/permission/chat';
import { getChatItems } from '@fastgpt/service/core/chat/controller';
import { chats2GPTMessages } from '@fastgpt/global/core/chat/adapt';
import { getAppLatestVersion } from '@fastgpt/service/core/app/version/controller';
import { getFirstLLMModel } from '@fastgpt/service/core/ai/model';
export type CreateQuestionGuideParams = OutLinkChatAuthProps & {
appId: string;
@ -50,7 +51,7 @@ async function handler(req: ApiRequestProps<CreateQuestionGuideParams>, res: Nex
});
const messages = chats2GPTMessages({ messages: histories, reserveId: false });
const qgModel = questionGuide?.model || global.llmModels[0].model;
const qgModel = questionGuide?.model || getFirstLLMModel().model;
const { result, inputTokens, outputTokens } = await createQuestionGuide({
messages,

View File

@ -0,0 +1,43 @@
import type { ApiRequestProps, ApiResponseType } from '@fastgpt/service/type/next';
import { NextAPI } from '@/service/middleware/entry';
import { MongoSystemModel } from '@fastgpt/service/core/ai/config/schema';
import { authSystemAdmin } from '@fastgpt/service/support/permission/user/auth';
import { findModelFromAlldata } from '@fastgpt/service/core/ai/model';
import { updateFastGPTConfigBuffer } from '@fastgpt/service/common/system/config/controller';
import { loadSystemModels } from '@fastgpt/service/core/ai/config/utils';
export type deleteQuery = {
model: string;
};
export type deleteBody = {};
export type deleteResponse = {};
async function handler(
req: ApiRequestProps<deleteBody, deleteQuery>,
res: ApiResponseType<any>
): Promise<deleteResponse> {
await authSystemAdmin({ req });
const { model } = req.query;
const modelData = findModelFromAlldata(model);
if (!modelData) {
return Promise.reject('Model not found');
}
if (!modelData.isCustom) {
return Promise.reject('System model cannot be deleted');
}
await MongoSystemModel.deleteOne({ model });
await loadSystemModels(true);
await updateFastGPTConfigBuffer();
return {};
}
export default NextAPI(handler);

View File

@ -0,0 +1,29 @@
import type { ApiRequestProps, ApiResponseType } from '@fastgpt/service/type/next';
import { NextAPI } from '@/service/middleware/entry';
import { SystemModelItemType } from '@fastgpt/service/core/ai/type';
import { authSystemAdmin } from '@fastgpt/service/support/permission/user/auth';
import { findModelFromAlldata } from '@fastgpt/service/core/ai/model';
export type detailQuery = {
model: string;
};
export type detailBody = {};
export type detailResponse = SystemModelItemType;
async function handler(
req: ApiRequestProps<detailBody, detailQuery>,
res: ApiResponseType<any>
): Promise<detailResponse> {
await authSystemAdmin({ req });
const { model } = req.query;
const modelItem = findModelFromAlldata(model);
if (!modelItem) {
return Promise.reject('Model not found');
}
return modelItem;
}
export default NextAPI(handler);

View File

@ -0,0 +1,29 @@
import type { ApiRequestProps, ApiResponseType } from '@fastgpt/service/type/next';
import { NextAPI } from '@/service/middleware/entry';
import { authSystemAdmin } from '@fastgpt/service/support/permission/user/auth';
import { MongoSystemModel } from '@fastgpt/service/core/ai/config/schema';
export type getConfigJsonQuery = {};
export type getConfigJsonBody = {};
export type getConfigJsonResponse = {};
async function handler(
req: ApiRequestProps<getConfigJsonBody, getConfigJsonQuery>,
res: ApiResponseType<any>
): Promise<getConfigJsonResponse> {
await authSystemAdmin({ req });
const data = await MongoSystemModel.find({}).lean();
return JSON.stringify(
data.map((item) => ({
model: item.model,
metadata: item.metadata
})),
null,
2
);
}
export default NextAPI(handler);

View File

@ -0,0 +1,46 @@
import type { ApiRequestProps, ApiResponseType } from '@fastgpt/service/type/next';
import { NextAPI } from '@/service/middleware/entry';
import { ModelProviderIdType } from '@fastgpt/global/core/ai/provider';
import { ModelTypeEnum } from '@fastgpt/global/core/ai/model';
import { authSystemAdmin } from '@fastgpt/service/support/permission/user/auth';
export type listQuery = {};
export type listBody = {};
export type listResponse = {
type: `${ModelTypeEnum}`;
name: string;
avatar: string | undefined;
provider: ModelProviderIdType;
model: string;
charsPointsPrice?: number;
inputPrice?: number;
outputPrice?: number;
isActive: boolean;
isCustom: boolean;
}[];
async function handler(
req: ApiRequestProps<listBody, listQuery>,
res: ApiResponseType<any>
): Promise<listResponse> {
await authSystemAdmin({ req });
// Read db
return global.systemModelList.map((model) => ({
type: model.type,
provider: model.provider,
model: model.model,
name: model.name,
avatar: model.avatar,
charsPointsPrice: model.charsPointsPrice,
inputPrice: model.inputPrice,
outputPrice: model.outputPrice,
isActive: model.isActive ?? false,
isCustom: model.isCustom ?? false
}));
}
export default NextAPI(handler);

View File

@ -0,0 +1,67 @@
import type { ApiRequestProps, ApiResponseType } from '@fastgpt/service/type/next';
import { NextAPI } from '@/service/middleware/entry';
import { authSystemAdmin } from '@fastgpt/service/support/permission/user/auth';
import { MongoSystemModel } from '@fastgpt/service/core/ai/config/schema';
import { delay } from '@fastgpt/global/common/system/utils';
import { updateFastGPTConfigBuffer } from '@fastgpt/service/common/system/config/controller';
import { findModelFromAlldata } from '@fastgpt/service/core/ai/model';
import { loadSystemModels } from '@fastgpt/service/core/ai/config/utils';
export type updateQuery = {};
export type updateBody = {
model: string;
metadata?: Record<string, any>;
};
export type updateResponse = {};
async function handler(
req: ApiRequestProps<updateBody, updateQuery>,
res: ApiResponseType<any>
): Promise<updateResponse> {
await authSystemAdmin({ req });
let { model, metadata } = req.body;
if (!model) return Promise.reject(new Error('model is required'));
model = model.trim();
const dbModel = await MongoSystemModel.findOne({ model }).lean();
const modelData = findModelFromAlldata(model);
const metadataConcat: Record<string, any> = {
...modelData, // system config
...dbModel?.metadata, // db config
...metadata // user config
};
delete metadataConcat.avatar;
delete metadataConcat.isCustom;
// 强制赋值 model避免脏的 metadata 覆盖真实 model
metadataConcat.model = model;
metadataConcat.name = metadataConcat?.name?.trim();
// Delete null value
Object.keys(metadataConcat).forEach((key) => {
if (metadataConcat[key] === null || metadataConcat[key] === undefined) {
delete metadataConcat[key];
}
});
await MongoSystemModel.updateOne(
{ model },
{
model,
metadata: metadataConcat
},
{
upsert: true
}
);
await loadSystemModels(true);
await updateFastGPTConfigBuffer();
return {};
}
export default NextAPI(handler);

View File

@ -0,0 +1,61 @@
import type { ApiRequestProps, ApiResponseType } from '@fastgpt/service/type/next';
import { NextAPI } from '@/service/middleware/entry';
import { SystemModelSchemaType } from '@fastgpt/service/core/ai/type';
import { authSystemAdmin } from '@fastgpt/service/support/permission/user/auth';
import { mongoSessionRun } from '@fastgpt/service/common/mongo/sessionRun';
import { MongoSystemModel } from '@fastgpt/service/core/ai/config/schema';
import { updateFastGPTConfigBuffer } from '@fastgpt/service/common/system/config/controller';
import { loadSystemModels } from '@fastgpt/service/core/ai/config/utils';
export type updateWithJsonQuery = {};
export type updateWithJsonBody = {
config: string;
};
export type updateWithJsonResponse = {};
async function handler(
req: ApiRequestProps<updateWithJsonBody, updateWithJsonQuery>,
res: ApiResponseType<any>
): Promise<updateWithJsonResponse> {
await authSystemAdmin({ req });
const { config } = req.body;
const data = JSON.parse(config) as SystemModelSchemaType[];
// Check
for (const item of data) {
if (!item.model || !item.metadata || typeof item.metadata !== 'object') {
return Promise.reject('Invalid model or metadata');
}
if (!item.metadata.type) {
return Promise.reject(`${item.model} metadata.type is required`);
}
if (!item.metadata.model) {
return Promise.reject(`${item.model} metadata.model is required`);
}
if (!item.metadata.provider) {
return Promise.reject(`${item.model} metadata.provider is required`);
}
item.metadata.model = item.model.trim();
}
await mongoSessionRun(async (session) => {
await MongoSystemModel.deleteMany({}, { session });
for await (const item of data) {
await MongoSystemModel.updateOne(
{ model: item.model },
{ $set: { model: item.model, metadata: item.metadata } },
{ upsert: true, session }
);
}
});
await loadSystemModels(true);
await updateFastGPTConfigBuffer();
return {};
}
export default NextAPI(handler);

View File

@ -6,7 +6,7 @@ import { text2Speech } from '@fastgpt/service/core/ai/audio/speech';
import { pushAudioSpeechUsage } from '@/service/support/wallet/usage/push';
import { authChatCrud } from '@/service/support/permission/auth/chat';
import { authType2UsageSource } from '@/service/support/wallet/usage/utils';
import { getAudioSpeechModel } from '@fastgpt/service/core/ai/model';
import { getTTSModel } from '@fastgpt/service/core/ai/model';
import { MongoTTSBuffer } from '@fastgpt/service/common/buffer/tts/schema';
import { ApiRequestProps } from '@fastgpt/service/type/next';
@ -31,17 +31,19 @@ async function handler(req: ApiRequestProps<GetChatSpeechProps>, res: NextApiRes
...req.body
});
const ttsModel = getAudioSpeechModel(ttsConfig.model);
const ttsModel = getTTSModel(ttsConfig.model);
const voiceData = ttsModel.voices?.find((item) => item.value === ttsConfig.voice);
if (!voiceData) {
throw new Error('voice not found');
}
const bufferId = `${ttsModel.model}-${ttsConfig.voice}`;
/* get audio from buffer */
const ttsBuffer = await MongoTTSBuffer.findOne(
{
bufferId: voiceData.bufferId,
bufferId,
text: JSON.stringify({ text: input, speed: ttsConfig.speed })
},
'buffer'
@ -70,11 +72,21 @@ async function handler(req: ApiRequestProps<GetChatSpeechProps>, res: NextApiRes
});
/* create buffer */
await MongoTTSBuffer.create({
bufferId: voiceData.bufferId,
text: JSON.stringify({ text: input, speed: ttsConfig.speed }),
buffer
});
await MongoTTSBuffer.create(
{
bufferId,
text: JSON.stringify({ text: input, speed: ttsConfig.speed }),
buffer
},
ttsModel.requestUrl && ttsModel.requestAuth
? {
path: ttsModel.requestUrl,
headers: {
Authorization: `Bearer ${ttsModel.requestAuth}`
}
}
: {}
);
} catch (error) {}
},
onError: (err) => {

View File

@ -1,6 +1,6 @@
import type { NextApiRequest } from 'next';
import { MongoDataset } from '@fastgpt/service/core/dataset/schema';
import { getVectorModel } from '@fastgpt/service/core/ai/model';
import { getEmbeddingModel } from '@fastgpt/service/core/ai/model';
import type { DatasetSimpleItemType } from '@fastgpt/global/core/dataset/type.d';
import { NextAPI } from '@/service/middleware/entry';
import { ReadPermissionVal } from '@fastgpt/global/support/permission/constant';
@ -31,7 +31,7 @@ async function handler(req: NextApiRequest): Promise<DatasetSimpleItemType[]> {
_id: item._id,
avatar: item.avatar,
name: item.name,
vectorModel: getVectorModel(item.vectorModel)
vectorModel: getEmbeddingModel(item.vectorModel)
}));
}

View File

@ -2,7 +2,12 @@ import { MongoDataset } from '@fastgpt/service/core/dataset/schema';
import type { CreateDatasetParams } from '@/global/core/dataset/api.d';
import { authUserPer } from '@fastgpt/service/support/permission/user/auth';
import { DatasetTypeEnum } from '@fastgpt/global/core/dataset/constants';
import { getLLMModel, getVectorModel, getDatasetModel } from '@fastgpt/service/core/ai/model';
import {
getLLMModel,
getEmbeddingModel,
getDatasetModel,
getFirstEmbeddingModel
} from '@fastgpt/service/core/ai/model';
import { checkTeamDatasetLimit } from '@fastgpt/service/support/permission/teamLimit';
import { WritePermissionVal } from '@fastgpt/global/support/permission/constant';
import { NextAPI } from '@/service/middleware/entry';
@ -27,7 +32,7 @@ async function handler(
intro,
type = DatasetTypeEnum.dataset,
avatar,
vectorModel = global.vectorModels[0].model,
vectorModel = getFirstEmbeddingModel().model,
agentModel = getDatasetModel().model,
apiServer,
feishuServer,
@ -56,7 +61,7 @@ async function handler(
]);
// check model valid
const vectorModelStore = getVectorModel(vectorModel);
const vectorModelStore = getEmbeddingModel(vectorModel);
const agentModelStore = getLLMModel(agentModel);
if (!vectorModelStore || !agentModelStore) {
return Promise.reject(DatasetErrEnum.invalidVectorModelOrQAModel);

View File

@ -4,7 +4,7 @@
*/
import type { NextApiRequest } from 'next';
import { countPromptTokens } from '@fastgpt/service/common/string/tiktoken/index';
import { getVectorModel } from '@fastgpt/service/core/ai/model';
import { getEmbeddingModel } from '@fastgpt/service/core/ai/model';
import { hasSameValue } from '@/service/core/dataset/data/utils';
import { insertData2Dataset } from '@/service/core/dataset/data/controller';
import { authDatasetCollection } from '@fastgpt/service/support/permission/dataset/auth';
@ -59,7 +59,7 @@ async function handler(req: NextApiRequest) {
// token check
const token = await countPromptTokens(formatQ + formatA, '');
const vectorModelData = getVectorModel(vectorModel);
const vectorModelData = getEmbeddingModel(vectorModel);
if (token > vectorModelData.maxToken) {
return Promise.reject('Q Over Tokens');

View File

@ -1,4 +1,4 @@
import { getLLMModel, getVectorModel } from '@fastgpt/service/core/ai/model';
import { getLLMModel, getEmbeddingModel } from '@fastgpt/service/core/ai/model';
import { authDataset } from '@fastgpt/service/support/permission/dataset/auth';
import { ReadPermissionVal } from '@fastgpt/global/support/permission/constant';
import { NextAPI } from '@/service/middleware/entry';
@ -50,7 +50,7 @@ async function handler(req: ApiRequestProps<Query>): Promise<DatasetItemType> {
}
: undefined,
permission,
vectorModel: getVectorModel(dataset.vectorModel),
vectorModel: getEmbeddingModel(dataset.vectorModel),
agentModel: getLLMModel(dataset.agentModel)
};
}

View File

@ -18,7 +18,7 @@ import { getGroupsByTmbId } from '@fastgpt/service/support/permission/memberGrou
import { concatPer } from '@fastgpt/service/support/permission/controller';
import { getOrgIdSetWithParentByTmbId } from '@fastgpt/service/support/permission/org/controllers';
import { addSourceMember } from '@fastgpt/service/support/user/utils';
import { getVectorModel } from '@fastgpt/service/core/ai/model';
import { getEmbeddingModel } from '@fastgpt/service/core/ai/model';
export type GetDatasetListBody = {
parentId: ParentIdType;
@ -172,7 +172,7 @@ async function handler(req: ApiRequestProps<GetDatasetListBody>) {
name: dataset.name,
intro: dataset.intro,
type: dataset.type,
vectorModel: getVectorModel(dataset.vectorModel),
vectorModel: getEmbeddingModel(dataset.vectorModel),
inheritPermission: dataset.inheritPermission,
tmbId: dataset.tmbId,
updateTime: dataset.updateTime,

View File

@ -6,7 +6,7 @@ import { MongoDatasetData } from '@fastgpt/service/core/dataset/data/schema';
import { MongoDatasetTraining } from '@fastgpt/service/core/dataset/training/schema';
import { createTrainingUsage } from '@fastgpt/service/support/wallet/usage/controller';
import { UsageSourceEnum } from '@fastgpt/global/support/wallet/usage/constants';
import { getLLMModel, getVectorModel } from '@fastgpt/service/core/ai/model';
import { getLLMModel, getEmbeddingModel } from '@fastgpt/service/core/ai/model';
import { TrainingModeEnum } from '@fastgpt/global/core/dataset/constants';
import { ApiRequestProps } from '@fastgpt/service/type/next';
import { OwnerPermissionVal } from '@fastgpt/global/support/permission/constant';
@ -49,7 +49,7 @@ async function handler(req: ApiRequestProps<rebuildEmbeddingBody>): Promise<Resp
tmbId,
appName: '切换索引模型',
billSource: UsageSourceEnum.training,
vectorModel: getVectorModel(dataset.vectorModel)?.name,
vectorModel: getEmbeddingModel(dataset.vectorModel)?.name,
agentModel: getLLMModel(dataset.agentModel)?.name
});

View File

@ -1,7 +1,7 @@
import type { NextApiRequest, NextApiResponse } from 'next';
import { UsageSourceEnum } from '@fastgpt/global/support/wallet/usage/constants';
import { CreateTrainingUsageProps } from '@fastgpt/global/support/wallet/usage/api.d';
import { getLLMModel, getVectorModel } from '@fastgpt/service/core/ai/model';
import { getLLMModel, getEmbeddingModel } from '@fastgpt/service/core/ai/model';
import { createTrainingUsage } from '@fastgpt/service/support/wallet/usage/controller';
import { authDataset } from '@fastgpt/service/support/permission/dataset/auth';
import { WritePermissionVal } from '@fastgpt/global/support/permission/constant';
@ -23,7 +23,7 @@ async function handler(req: NextApiRequest) {
tmbId,
appName: name,
billSource: UsageSourceEnum.training,
vectorModel: getVectorModel(dataset.vectorModel).name,
vectorModel: getEmbeddingModel(dataset.vectorModel).name,
agentModel: getLLMModel(dataset.agentModel).name
});

View File

@ -9,6 +9,7 @@ import { OutLinkChatAuthProps } from '@fastgpt/global/support/permission/chat';
import { NextAPI } from '@/service/middleware/entry';
import { aiTranscriptions } from '@fastgpt/service/core/ai/audio/transcriptions';
import { useReqFrequencyLimit } from '@fastgpt/service/common/middle/reqFrequencyLimit';
import { getFirstSTTModel } from '@fastgpt/service/core/ai/model';
const upload = getUploadModel({
maxSize: 5
@ -36,7 +37,7 @@ async function handler(req: NextApiRequest, res: NextApiResponse<any>) {
filePaths = [file.path];
if (!global.whisperModel) {
if (!getFirstSTTModel()) {
throw new Error('whisper model not found');
}
@ -65,7 +66,7 @@ async function handler(req: NextApiRequest, res: NextApiResponse<any>) {
// }
const result = await aiTranscriptions({
model: global.whisperModel.model,
model: getFirstSTTModel().model,
fileStream: fs.createReadStream(file.path)
});

View File

@ -4,7 +4,7 @@ import { pushGenerateVectorUsage } from '@/service/support/wallet/usage/push';
import { getVectorsByText } from '@fastgpt/service/core/ai/embedding';
import { updateApiKeyUsage } from '@fastgpt/service/support/openapi/tools';
import { getUsageSourceByAuthType } from '@fastgpt/global/support/wallet/usage/tools';
import { getVectorModel } from '@fastgpt/service/core/ai/model';
import { getEmbeddingModel } from '@fastgpt/service/core/ai/model';
import { checkTeamAIPoints } from '@fastgpt/service/support/permission/teamLimit';
import { EmbeddingTypeEnm } from '@fastgpt/global/core/ai/constants';
import { NextAPI } from '@/service/middleware/entry';
@ -36,7 +36,7 @@ async function handler(req: NextApiRequest, res: NextApiResponse<any>) {
const { tokens, vectors } = await getVectorsByText({
input: query,
model: getVectorModel(model),
model: getEmbeddingModel(model),
type
});

View File

@ -9,7 +9,7 @@ import { useSystemStore } from '@/web/common/system/useSystemStore';
import { useRequest2 } from '@fastgpt/web/hooks/useRequest';
import AIModelSelector from '@/components/Select/AIModelSelector';
import { postRebuildEmbedding } from '@/web/core/dataset/api';
import type { VectorModelItemType } from '@fastgpt/global/core/ai/model.d';
import type { EmbeddingModelItemType } from '@fastgpt/global/core/ai/model.d';
import { useContextSelector } from 'use-context-selector';
import { DatasetPageContext } from '@/web/core/dataset/context/datasetPageContext';
import MyDivider from '@fastgpt/web/components/common/MyDivider/index';
@ -50,7 +50,7 @@ const Info = ({ datasetId }: { datasetId: string }) => {
const vectorModel = watch('vectorModel');
const agentModel = watch('agentModel');
const { feConfigs, datasetModelList, vectorModelList } = useSystemStore();
const { feConfigs, datasetModelList, embeddingModelList } = useSystemStore();
const { ConfirmModal: ConfirmDelModal } = useConfirm({
content: t('common:core.dataset.Delete Confirm'),
type: 'delete'
@ -80,7 +80,7 @@ const Info = ({ datasetId }: { datasetId: string }) => {
);
const { runAsync: onRebuilding } = useRequest2(
(vectorModel: VectorModelItemType) => {
(vectorModel: EmbeddingModelItemType) => {
return postRebuildEmbedding({
datasetId,
vectorModel: vectorModel.model
@ -186,12 +186,12 @@ const Info = ({ datasetId }: { datasetId: string }) => {
)
: undefined
}
list={vectorModelList.map((item) => ({
list={embeddingModelList.map((item) => ({
label: item.name,
value: item.model
}))}
onchange={(e) => {
const vectorModel = vectorModelList.find((item) => item.model === e);
const vectorModel = embeddingModelList.find((item) => item.model === e);
if (!vectorModel) return;
return onOpenConfirmRebuild(async () => {
await onRebuilding(vectorModel);

View File

@ -67,7 +67,7 @@ const InputDataModal = ({
const theme = useTheme();
const { toast } = useToast();
const [currentTab, setCurrentTab] = useState(TabEnum.content);
const { vectorModelList } = useSystemStore();
const { embeddingModelList } = useSystemStore();
const { isPc } = useSystem();
const { register, handleSubmit, reset, control } = useForm<InputDataType>();
const {
@ -158,11 +158,11 @@ const InputDataModal = ({
const maxToken = useMemo(() => {
const vectorModel =
vectorModelList.find((item) => item.model === collection.dataset.vectorModel) ||
vectorModelList[0];
embeddingModelList.find((item) => item.model === collection.dataset.vectorModel) ||
embeddingModelList[0];
return vectorModel?.maxToken || 3000;
}, [collection.dataset.vectorModel, vectorModelList]);
}, [collection.dataset.vectorModel, embeddingModelList]);
// import new data
const { mutate: sureImportData, isLoading: isImporting } = useRequest({

View File

@ -41,7 +41,7 @@ const CreateModal = ({
const { t } = useTranslation();
const { toast } = useToast();
const router = useRouter();
const { vectorModelList, datasetModelList } = useSystemStore();
const { embeddingModelList, datasetModelList } = useSystemStore();
const { isPc } = useSystem();
const datasetTypeMap = useMemo(() => {
@ -69,7 +69,7 @@ const CreateModal = ({
};
}, [t]);
const filterNotHiddenVectorModelList = vectorModelList.filter((item) => !item.hidden);
const filterNotHiddenVectorModelList = embeddingModelList.filter((item) => !item.hidden);
const form = useForm<CreateDatasetParams>({
defaultValues: {
@ -172,73 +172,69 @@ const CreateModal = ({
/>
</Flex>
</Box>
{filterNotHiddenVectorModelList.length > 1 && (
<Flex
mt={6}
alignItems={['flex-start', 'center']}
justify={'space-between'}
flexDir={['column', 'row']}
<Flex
mt={6}
alignItems={['flex-start', 'center']}
justify={'space-between'}
flexDir={['column', 'row']}
>
<HStack
spacing={1}
alignItems={'center'}
flex={['', '0 0 110px']}
fontSize={'sm'}
color={'myGray.900'}
fontWeight={500}
pb={['12px', '0']}
>
<HStack
spacing={1}
alignItems={'center'}
flex={['', '0 0 110px']}
fontSize={'sm'}
color={'myGray.900'}
fontWeight={500}
pb={['12px', '0']}
>
<Box>{t('common:core.ai.model.Vector Model')}</Box>
<QuestionTip label={t('common:core.dataset.embedding model tip')} />
</HStack>
<Box w={['100%', '300px']}>
<AIModelSelector
w={['100%', '300px']}
value={vectorModel}
list={filterNotHiddenVectorModelList.map((item) => ({
label: item.name,
value: item.model
}))}
onchange={(e) => {
setValue('vectorModel' as const, e);
}}
/>
</Box>
</Flex>
)}
{datasetModelList.length > 1 && (
<Flex
mt={6}
alignItems={['flex-start', 'center']}
justify={'space-between'}
flexDir={['column', 'row']}
<Box>{t('common:core.ai.model.Vector Model')}</Box>
<QuestionTip label={t('common:core.dataset.embedding model tip')} />
</HStack>
<Box w={['100%', '300px']}>
<AIModelSelector
w={['100%', '300px']}
value={vectorModel}
list={filterNotHiddenVectorModelList.map((item) => ({
label: item.name,
value: item.model
}))}
onchange={(e) => {
setValue('vectorModel' as const, e);
}}
/>
</Box>
</Flex>
<Flex
mt={6}
alignItems={['flex-start', 'center']}
justify={'space-between'}
flexDir={['column', 'row']}
>
<HStack
spacing={1}
flex={['', '0 0 110px']}
fontSize={'sm'}
color={'myGray.900'}
fontWeight={500}
pb={['12px', '0']}
>
<HStack
spacing={1}
flex={['', '0 0 110px']}
fontSize={'sm'}
color={'myGray.900'}
fontWeight={500}
pb={['12px', '0']}
>
<Box>{t('common:core.ai.model.Dataset Agent Model')}</Box>
<QuestionTip label={t('dataset:file_model_function_tip')} />
</HStack>
<Box w={['100%', '300px']}>
<AIModelSelector
w={['100%', '300px']}
value={agentModel}
list={datasetModelList.map((item) => ({
label: item.name,
value: item.model
}))}
onchange={(e) => {
setValue('agentModel' as const, e);
}}
/>
</Box>
</Flex>
)}
<Box>{t('common:core.ai.model.Dataset Agent Model')}</Box>
<QuestionTip label={t('dataset:file_model_function_tip')} />
</HStack>
<Box w={['100%', '300px']}>
<AIModelSelector
w={['100%', '300px']}
value={agentModel}
list={datasetModelList.map((item) => ({
label: item.name,
value: item.model
}))}
onchange={(e) => {
setValue('agentModel' as const, e);
}}
/>
</Box>
</Flex>
{/* @ts-ignore */}
<ApiDatasetForm type={type} form={form} />
</ModalBody>

View File

@ -31,6 +31,7 @@ type OAuthItem = {
const FormLayout = ({ children, setPageType, pageType }: Props) => {
const { t } = useTranslation();
const router = useRouter();
const rootLogin = router.query.rootLogin === '1';
const { setLoginStore, feConfigs } = useSystemStore();
const { isPc } = useSystem();
@ -147,7 +148,9 @@ const FormLayout = ({ children, setPageType, pageType }: Props) => {
[lastRoute, router, setLoginStore, setPageType]
);
// Auto login
useEffect(() => {
if (rootLogin) return;
const sso = oAuthList.find((item) => item.provider === OAuthEnum.sso);
const wecom = oAuthList.find((item) => item.provider === OAuthEnum.wecom);
if (feConfigs?.sso?.autoLogin && sso) {
@ -157,7 +160,7 @@ const FormLayout = ({ children, setPageType, pageType }: Props) => {
// Auto wecom login
onClickOauth(wecom);
}
}, [feConfigs?.sso?.autoLogin, isWecomWorkTerminal, onClickOauth]);
}, [rootLogin, feConfigs?.sso?.autoLogin, isWecomWorkTerminal, onClickOauth]);
return (
<Flex flexDirection={'column'} h={'100%'}>

View File

@ -31,7 +31,7 @@ export default React.memo(Points);
export const AiPointsTable = () => {
const { t } = useTranslation();
const { llmModelList, audioSpeechModelList, vectorModelList, whisperModel } = useSystemStore();
const { llmModelList, ttsModelList, embeddingModelList, sttModelList } = useSystemStore();
return (
<Grid gap={6} w={'100%'} color={'myGray.900'}>
@ -85,7 +85,7 @@ export const AiPointsTable = () => {
</Box>
</Box>
<Box flex={4} textAlign={'center'}>
{vectorModelList?.map((item, i) => (
{embeddingModelList?.map((item, i) => (
<Flex key={item.model} py={4} bg={i % 2 !== 0 ? 'myGray.100' : ''}>
<Box flex={'1 0 0'}>{item.name}</Box>
<Box flex={'1 0 0'}>
@ -111,7 +111,7 @@ export const AiPointsTable = () => {
</Box>
</Box>
<Box flex={4} textAlign={'center'}>
{audioSpeechModelList?.map((item, i) => (
{ttsModelList?.map((item, i) => (
<Flex key={item.model} py={4} bg={i % 2 !== 0 ? 'myGray.50' : ''}>
<Box flex={'1 0 0'}>{item.name}</Box>
<Box flex={'1 0 0'}>
@ -138,15 +138,17 @@ export const AiPointsTable = () => {
</Box>
</Box>
<Box flex={4} textAlign={'center'} h={'100%'}>
<Flex py={4}>
<Box flex={'1 0 0'}>{whisperModel?.name}</Box>
<Box flex={'1 0 0'}>
{whisperModel?.charsPointsPrice +
t('common:support.wallet.subscription.point') +
' / 60' +
t('common:unit.seconds')}
</Box>
</Flex>
{sttModelList.map((item) => (
<Flex key={item.model} py={4}>
<Box flex={'1 0 0'}>{item.name}</Box>
<Box flex={'1 0 0'}>
{item.charsPointsPrice +
t('common:support.wallet.subscription.point') +
' / 60' +
t('common:unit.seconds')}
</Box>
</Flex>
))}
</Box>
</Box>
</Grid>

View File

@ -12,6 +12,7 @@ import { SystemPluginTemplateItemType } from '@fastgpt/global/core/workflow/type
import { defaultGroup, defaultTemplateTypes } from '@fastgpt/web/core/workflow/constants';
import { MongoPluginGroups } from '@fastgpt/service/core/app/plugin/pluginGroupSchema';
import { MongoTemplateTypes } from '@fastgpt/service/core/app/templates/templateTypeSchema';
import { loadSystemModels } from '@fastgpt/service/core/ai/config/utils';
export const readConfigData = async (name: string) => {
const splitName = name.split('.');
@ -50,6 +51,7 @@ export async function getInitConfig() {
return Promise.all([
initSystemConfig(),
getSystemVersion(),
loadSystemModels(),
// abandon
getSystemPlugin()
@ -78,7 +80,7 @@ const defaultFeConfigs: FastGPTFeConfigsType = {
export async function initSystemConfig() {
// load config
const [{ config: dbConfig, configId }, fileConfig] = await Promise.all([
const [{ config: dbConfig }, fileConfig] = await Promise.all([
getFastGPTConfigFromDB(),
readConfigData('config.json')
]);
@ -96,27 +98,16 @@ export async function initSystemConfig() {
...fileRes.systemEnv,
...(dbConfig.systemEnv || {})
},
subPlans: dbConfig.subPlans || fileRes.subPlans,
llmModels: dbConfig.llmModels || fileRes.llmModels || [],
vectorModels: dbConfig.vectorModels || fileRes.vectorModels || [],
reRankModels: dbConfig.reRankModels || fileRes.reRankModels || [],
audioSpeechModels: dbConfig.audioSpeechModels || fileRes.audioSpeechModels || [],
whisperModel: dbConfig.whisperModel || fileRes.whisperModel
subPlans: dbConfig.subPlans || fileRes.subPlans
};
// set config
global.systemInitBufferId = configId;
initFastGPTConfig(config);
console.log({
feConfigs: global.feConfigs,
systemEnv: global.systemEnv,
subPlans: global.subPlans,
llmModels: global.llmModels,
vectorModels: global.vectorModels,
reRankModels: global.reRankModels,
audioSpeechModels: global.audioSpeechModels,
whisperModel: global.whisperModel
subPlans: global.subPlans
});
}

View File

@ -6,12 +6,14 @@ import { MongoSystemPlugin } from '@fastgpt/service/core/app/plugin/systemPlugin
import { debounce } from 'lodash';
import { MongoAppTemplate } from '@fastgpt/service/core/app/templates/templateSchema';
import { getAppTemplatesAndLoadThem } from '@fastgpt/templates/register';
import { watchSystemModelUpdate } from '@fastgpt/service/core/ai/config/utils';
export const startMongoWatch = async () => {
reloadConfigWatch();
refetchSystemPlugins();
createDatasetTrainingMongoWatch();
refetchAppTemplates();
watchSystemModelUpdate();
};
const reloadConfigWatch = () => {

View File

@ -1,11 +1,12 @@
import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import type { StoreNodeItemType } from '@fastgpt/global/core/workflow/type/node.d';
import { getLLMModel } from '@fastgpt/service/core/ai/model';
export const getChatModelNameListByModules = (nodes: StoreNodeItemType[]): string[] => {
const modelList = nodes
.map((item) => {
const model = item.inputs.find((input) => input.key === NodeInputKeyEnum.aiModel)?.value;
return global.llmModels.find((item) => item.model === model)?.name || '';
return getLLMModel(model)?.name || '';
})
.filter(Boolean);

View File

@ -9,7 +9,7 @@ import { getDefaultIndex } from '@fastgpt/global/core/dataset/utils';
import { jiebaSplit } from '@fastgpt/service/common/string/jieba';
import { deleteDatasetDataVector } from '@fastgpt/service/common/vectorStore/controller';
import { DatasetDataItemType } from '@fastgpt/global/core/dataset/type';
import { getVectorModel } from '@fastgpt/service/core/ai/model';
import { getEmbeddingModel } from '@fastgpt/service/core/ai/model';
import { mongoSessionRun } from '@fastgpt/service/common/mongo/sessionRun';
import { ClientSession } from '@fastgpt/service/common/mongo';
import { MongoDatasetDataText } from '@fastgpt/service/core/dataset/data/dataTextSchema';
@ -71,7 +71,7 @@ export async function insertData2Dataset({
indexes.map((item) =>
insertDatasetDataVector({
query: item.text,
model: getVectorModel(model),
model: getEmbeddingModel(model),
teamId,
datasetId,
collectionId
@ -219,7 +219,7 @@ export async function updateData2Dataset({
if (item.type === 'create' || item.type === 'update') {
const result = await insertDatasetDataVector({
query: item.index.text,
model: getVectorModel(model),
model: getEmbeddingModel(model),
teamId: mongoData.teamId,
datasetId: mongoData.datasetId,
collectionId: mongoData.collectionId

View File

@ -11,7 +11,7 @@ import {
deleteDatasetDataVector,
insertDatasetDataVector
} from '@fastgpt/service/common/vectorStore/controller';
import { getVectorModel } from '@fastgpt/service/core/ai/model';
import { getEmbeddingModel } from '@fastgpt/service/core/ai/model';
import { mongoSessionRun } from '@fastgpt/service/common/mongo/sessionRun';
import { DatasetTrainingSchemaType } from '@fastgpt/global/core/dataset/type';
import { Document } from '@fastgpt/service/common/mongo';
@ -207,7 +207,7 @@ const rebuildData = async ({
mongoData.indexes.map(async (index, i) => {
const result = await insertDatasetDataVector({
query: index.text,
model: getVectorModel(trainingData.model),
model: getEmbeddingModel(trainingData.model),
teamId: mongoData.teamId,
datasetId: mongoData.datasetId,
collectionId: mongoData.collectionId

View File

@ -1,10 +1,11 @@
import { UsageSourceEnum } from '@fastgpt/global/support/wallet/usage/constants';
import { ModelTypeEnum } from '@fastgpt/service/core/ai/model';
import { addLog } from '@fastgpt/service/common/system/log';
import { createUsage, concatUsage } from './controller';
import { formatModelChars2Points } from '@fastgpt/service/support/wallet/usage/utils';
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
import { i18nT } from '@fastgpt/web/i18n/utils';
import { ModelTypeEnum } from '@fastgpt/global/core/ai/model';
import { getFirstLLMModel, getFirstSTTModel } from '@fastgpt/service/core/ai/model';
export const pushChatUsage = ({
appName,
@ -108,7 +109,7 @@ export const pushGenerateVectorUsage = ({
extensionOutputTokens?: number;
}) => {
const { totalPoints: totalVector, modelName: vectorModelName } = formatModelChars2Points({
modelType: ModelTypeEnum.vector,
modelType: ModelTypeEnum.embedding,
model,
inputTokens
});
@ -185,7 +186,7 @@ export const pushQuestionGuideUsage = ({
teamId: string;
tmbId: string;
}) => {
const qgModel = global.llmModels[0];
const qgModel = getFirstLLMModel();
const { totalPoints, modelName } = formatModelChars2Points({
inputTokens,
outputTokens,
@ -229,7 +230,7 @@ export function pushAudioSpeechUsage({
const { totalPoints, modelName } = formatModelChars2Points({
model,
inputTokens: charsLength,
modelType: ModelTypeEnum.audioSpeech
modelType: ModelTypeEnum.tts
});
createUsage({
@ -258,14 +259,14 @@ export function pushWhisperUsage({
tmbId: string;
duration: number;
}) {
const whisperModel = global.whisperModel;
const whisperModel = getFirstSTTModel();
if (!whisperModel) return;
const { totalPoints, modelName } = formatModelChars2Points({
model: whisperModel.model,
inputTokens: duration,
modelType: ModelTypeEnum.whisper,
modelType: ModelTypeEnum.stt,
multiple: 60
});

View File

@ -1,10 +1,10 @@
import {
AudioSpeechModelType,
TTSModelType,
ChatModelItemType,
FunctionModelItemType,
LLMModelItemType,
ReRankModelItemType,
VectorModelItemType,
EmbeddingModelItemType,
STTModelType
} from '@fastgpt/global/core/ai/model.d';
import { TrackEventName } from '@/web/common/system/constants';

View File

@ -4,16 +4,16 @@ import { immer } from 'zustand/middleware/immer';
import axios from 'axios';
import { OAuthEnum } from '@fastgpt/global/support/user/constant';
import type {
AudioSpeechModelType,
TTSModelType,
LLMModelItemType,
ReRankModelItemType,
VectorModelItemType,
EmbeddingModelItemType,
STTModelType
} from '@fastgpt/global/core/ai/model.d';
import { InitDateResponse } from '@/global/common/api/systemRes';
import { FastGPTFeConfigsType } from '@fastgpt/global/common/system/types';
import { SubPlanType } from '@fastgpt/global/support/wallet/sub/type';
import { defaultWhisperModel } from '@fastgpt/global/core/ai/model';
import { ModelTypeEnum } from '@fastgpt/global/core/ai/model';
import { TeamErrEnum } from '@fastgpt/global/common/error/code/team';
type LoginStoreType = { provider: `${OAuthEnum}`; lastRoute: string; state: string };
@ -51,10 +51,10 @@ type State = {
systemVersion: string;
llmModelList: LLMModelItemType[];
datasetModelList: LLMModelItemType[];
vectorModelList: VectorModelItemType[];
audioSpeechModelList: AudioSpeechModelType[];
embeddingModelList: EmbeddingModelItemType[];
ttsModelList: TTSModelType[];
reRankModelList: ReRankModelItemType[];
whisperModel: STTModelType;
sttModelList: STTModelType[];
initStaticData: (e: InitDateResponse) => void;
appType?: string;
setAppType: (e?: string) => void;
@ -127,10 +127,10 @@ export const useSystemStore = create<State>()(
systemVersion: '0.0.0',
llmModelList: [],
datasetModelList: [],
vectorModelList: [],
audioSpeechModelList: [],
embeddingModelList: [],
ttsModelList: [],
reRankModelList: [],
whisperModel: defaultWhisperModel,
sttModelList: [],
initStaticData(res) {
set((state) => {
state.initDataBufferId = res.bufferId;
@ -139,12 +139,22 @@ export const useSystemStore = create<State>()(
state.subPlans = res.subPlans ?? state.subPlans;
state.systemVersion = res.systemVersion ?? state.systemVersion;
state.llmModelList = res.llmModels ?? state.llmModelList;
state.llmModelList =
res.activeModelList?.filter((item) => item.type === ModelTypeEnum.llm) ??
state.llmModelList;
state.datasetModelList = state.llmModelList.filter((item) => item.datasetProcess);
state.vectorModelList = res.vectorModels ?? state.vectorModelList;
state.audioSpeechModelList = res.audioSpeechModels ?? state.audioSpeechModelList;
state.reRankModelList = res.reRankModels ?? state.reRankModelList;
state.whisperModel = res.whisperModel ?? state.whisperModel;
state.embeddingModelList =
res.activeModelList?.filter((item) => item.type === ModelTypeEnum.embedding) ??
state.embeddingModelList;
state.ttsModelList =
res.activeModelList?.filter((item) => item.type === ModelTypeEnum.tts) ??
state.ttsModelList;
state.reRankModelList =
res.activeModelList?.filter((item) => item.type === ModelTypeEnum.rerank) ??
state.reRankModelList;
state.sttModelList =
res.activeModelList?.filter((item) => item.type === ModelTypeEnum.stt) ??
state.sttModelList;
});
}
})),
@ -158,10 +168,10 @@ export const useSystemStore = create<State>()(
systemVersion: state.systemVersion,
llmModelList: state.llmModelList,
datasetModelList: state.datasetModelList,
vectorModelList: state.vectorModelList,
audioSpeechModelList: state.audioSpeechModelList,
embeddingModelList: state.embeddingModelList,
ttsModelList: state.ttsModelList,
reRankModelList: state.reRankModelList,
whisperModel: state.whisperModel
sttModelList: state.sttModelList
})
}
)

View File

@ -0,0 +1,18 @@
import { GET, PUT, DELETE } from '@/web/common/api/request';
import type { listResponse } from '@/pages/api/core/ai/model/list';
import type { updateBody } from '@/pages/api/core/ai/model/update';
import type { deleteQuery } from '@/pages/api/core/ai/model/delete';
import type { SystemModelItemType } from '@fastgpt/service/core/ai/type';
import type { updateWithJsonBody } from '@/pages/api/core/ai/model/updateWithJson';
export const getSystemModelList = () => GET<listResponse>('/core/ai/model/list');
export const getSystemModelDetail = (model: string) =>
GET<SystemModelItemType>('/core/ai/model/detail', { model });
export const putSystemModel = (data: updateBody) => PUT('/core/ai/model/update', data);
export const deleteSystemModel = (data: deleteQuery) => DELETE('/core/ai/model/delete', data);
export const getModelConfigJson = () => GET<string>('/core/ai/model/getConfigJson');
export const putUpdateWithJson = (data: updateWithJsonBody) =>
PUT('/core/ai/model/updateWithJson', data);