* Aiproxy (#3649) * model config * feat: model config ui * perf: rename variable * feat: custom request url * perf: model buffer * perf: init model * feat: json model config * auto login * fix: ts * update packages * package * fix: dockerfile * feat: usage filter & export & dashbord (#3538) * feat: usage filter & export & dashbord * adjust ui * fix tmb scroll * fix code & selecte all * merge * perf: usages list;perf: move components (#3654) * perf: usages list * team sub plan load * perf: usage dashboard code * perf: dashboard ui * perf: move components * add default model config (#3653) * 4.8.20 test (#3656) * provider * perf: model config * model perf (#3657) * fix: model * dataset quote * perf: model config * model tag * doubao model config * perf: config model * feat: model test * fix: POST 500 error on dingtalk bot (#3655) * feat: default model (#3662) * move model config * feat: default model * fix: false triggerd org selection (#3661) * export usage csv i18n (#3660) * export usage csv i18n * fix build * feat: markdown extension (#3663) * feat: markdown extension * media cros * rerank test * default price * perf: default model * fix: cannot custom provider * fix: default model select * update bg * perf: default model selector * fix: usage export * i18n * fix: rerank * update init extension * perf: ip limit check * doubao model order * web default modle * perf: tts selector * perf: tts error * qrcode package * reload buffer (#3665) * reload buffer * reload buffer * tts selector * fix: err tip (#3666) * fix: err tip * perf: training queue * doc * fix interactive edge (#3659) * fix interactive edge * fix * comment * add gemini model * fix: chat model select * perf: supplement assistant empty response (#3669) * perf: supplement assistant empty response * check array * perf: max_token count;feat: support resoner output;fix: member scroll (#3681) * perf: supplement assistant empty response * check array * perf: max_token count * feat: support resoner output * member scroll * update provider order * i18n * fix: stream response (#3682) * perf: supplement assistant empty response * check array * fix: stream response * fix: model config cannot set to null * fix: reasoning response (#3684) * perf: supplement assistant empty response * check array * fix: reasoning response * fix: reasoning response * doc (#3685) * perf: supplement assistant empty response * check array * doc * lock * animation * update doc * update compose * doc * doc --------- Co-authored-by: heheer <heheer@sealos.io> Co-authored-by: a.e. <49438478+I-Info@users.noreply.github.com>
78 lines
2.0 KiB
TypeScript
78 lines
2.0 KiB
TypeScript
import { i18nT } from '../../../web/i18n/utils';
|
|
import type { LLMModelItemType, STTModelType, EmbeddingModelItemType } from './model.d';
|
|
import { getModelProvider, ModelProviderIdType } from './provider';
|
|
|
|
export enum ModelTypeEnum {
|
|
llm = 'llm',
|
|
embedding = 'embedding',
|
|
tts = 'tts',
|
|
stt = 'stt',
|
|
rerank = 'rerank'
|
|
}
|
|
|
|
export const defaultQAModels: LLMModelItemType[] = [
|
|
{
|
|
type: ModelTypeEnum.llm,
|
|
provider: 'OpenAI',
|
|
model: 'gpt-4o-mini',
|
|
name: 'gpt-4o-mini',
|
|
maxContext: 16000,
|
|
maxResponse: 16000,
|
|
quoteMaxToken: 13000,
|
|
maxTemperature: 1.2,
|
|
charsPointsPrice: 0,
|
|
censor: false,
|
|
vision: false,
|
|
datasetProcess: true,
|
|
toolChoice: true,
|
|
functionCall: false,
|
|
customCQPrompt: '',
|
|
customExtractPrompt: '',
|
|
defaultSystemChatPrompt: '',
|
|
defaultConfig: {}
|
|
}
|
|
];
|
|
|
|
export const defaultVectorModels: EmbeddingModelItemType[] = [
|
|
{
|
|
type: ModelTypeEnum.embedding,
|
|
provider: 'OpenAI',
|
|
model: 'text-embedding-3-small',
|
|
name: 'Embedding-2',
|
|
charsPointsPrice: 0,
|
|
defaultToken: 500,
|
|
maxToken: 3000,
|
|
weight: 100
|
|
}
|
|
];
|
|
|
|
export const defaultSTTModels: STTModelType[] = [
|
|
{
|
|
type: ModelTypeEnum.stt,
|
|
provider: 'OpenAI',
|
|
model: 'whisper-1',
|
|
name: 'whisper-1',
|
|
charsPointsPrice: 0
|
|
}
|
|
];
|
|
|
|
export const getModelFromList = (
|
|
modelList: { provider: ModelProviderIdType; name: string; model: string }[],
|
|
model: string
|
|
) => {
|
|
const modelData = modelList.find((item) => item.model === model) ?? modelList[0];
|
|
const provider = getModelProvider(modelData.provider);
|
|
return {
|
|
...modelData,
|
|
avatar: provider.avatar
|
|
};
|
|
};
|
|
|
|
export const modelTypeList = [
|
|
{ label: i18nT('common:model.type.chat'), value: ModelTypeEnum.llm },
|
|
{ label: i18nT('common:model.type.embedding'), value: ModelTypeEnum.embedding },
|
|
{ label: i18nT('common:model.type.tts'), value: ModelTypeEnum.tts },
|
|
{ label: i18nT('common:model.type.stt'), value: ModelTypeEnum.stt },
|
|
{ label: i18nT('common:model.type.reRank'), value: ModelTypeEnum.rerank }
|
|
];
|