FastGPT/packages/web/i18n/en/account.json
Archer e009be51e7
Aiproxy (#3649)
* model config

* feat: model config ui

* perf: rename variable

* feat: custom request url

* perf: model buffer

* perf: init model

* feat: json model config

* auto login

* fix: ts

* update packages

* package

* fix: dockerfile
2025-01-22 22:59:28 +08:00

70 lines
5.5 KiB
JSON
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

{
"active_model": "Available models",
"add_default_model": "Add a preset model",
"api_key": "API key",
"bills_and_invoices": "Bills",
"channel": "Channel",
"config_model": "Model configuration",
"confirm_logout": "Confirm to log out?",
"create_channel": "Add new channel",
"create_model": "Add new model",
"custom_model": "custom model",
"default_model": "Default model",
"logout": "Sign out",
"model.active": "Active",
"model.alias": "Alias",
"model.alias_tip": "The name of the model displayed in the system is convenient for users to understand.",
"model.censor": "Censor check",
"model.censor_tip": "If sensitive verification is required, turn on this switch",
"model.charsPointsPrice": "Chars Price",
"model.charsPointsPrice_tip": "Combine the model input and output for Token billing. If the language model is configured with input and output billing separately, the input and output will be calculated separately.",
"model.custom_cq_prompt": "Custom question classification prompt words",
"model.custom_cq_prompt_tip": "Override the system's default question classification prompt words, which default to:\n\"\"\"\n请帮我执行一个“问题分类”任务将问题分类为以下几种类型之一\n\n\"\"\"\n{{typeList}}\n\"\"\"\n\n## 背景知识\n{{systemPrompt}}\n\n## 对话记录\n{{history}}\n\n## 开始任务\n\n现在我们开始分类我会给你一个\"问题\"请结合背景知识和对话记录将问题分类到对应的类型中并返回类型ID。\n\n问题\"{{question}}\"\n类型ID=\n\"\"\"",
"model.custom_extract_prompt": "Custom content extraction prompt words",
"model.custom_extract_prompt_tip": "Override system prompt word, default is:\n\"\"\"\n你可以从 <对话记录></对话记录> 中提取指定 Json 信息,你仅需返回 Json 字符串,无需回答问题。\n<提取要求>\n{{description}}\n</提取要求>\n\n<提取规则>\n- 本次需提取的 json 字符串,需符合 JsonSchema 的规则。\n- type 代表数据类型; key 代表字段名; description 代表字段的描述; enum 是枚举值,代表可选的 value。\n- 如果没有可提取的内容,忽略该字段。\n</提取规则>\n\n<JsonSchema>\n{{json}}\n</JsonSchema>\n\n<对话记录>\n{{text}}\n</对话记录>\n\n提取的 json 字符串:\n\"\"\"",
"model.dataset_process": "Dataset file parse",
"model.default_config": "Body extra fields",
"model.default_config_tip": "When initiating a conversation request, merge this configuration. \nFor example:\n\"\"\"\n{\n \"temperature\": 1,\n \"max_tokens\": null\n}\n\"\"\"",
"model.default_system_chat_prompt": "Default prompt",
"model.default_system_chat_prompt_tip": "When the model talks, it will carry this default prompt word.",
"model.default_token": "Default tokens",
"model.default_token_tip": "The length of the default text block of the index model must be less than the maximum length above",
"model.delete_model_confirm": "Confirm to delete this model?",
"model.edit_model": "Model parameter editing",
"model.function_call": "Function Call",
"model.function_call_tip": "If the model supports function calling, turn on this switch. \nTool calls have higher priority.",
"model.input_price": "Input price",
"model.input_price_tip": "Language model input price. If this item is configured, the model comprehensive price will be invalid.",
"model.json_config": "File config",
"model.json_config_confirm": "Confirm to use this configuration for override?",
"model.json_config_tip": "Configure the model through the configuration file. After clicking Confirm, the entered configuration will be used for full coverage. Please ensure that the configuration file is entered correctly. \nIt is recommended to copy the current configuration file for backup before operation.",
"model.max_quote": "KB max quote",
"model.max_temperature": "Max temperature",
"model.model_id": "Model ID",
"model.model_id_tip": "The unique identifier of the model, that is, the value of the actual request to the service provider model, needs to correspond to the model in the OneAPI channel.",
"model.output_price": "Output price",
"model.output_price_tip": "The language model output price. If this item is configured, the model comprehensive price will be invalid.",
"model.param_name": "Parameter name",
"model.request_auth": "Custom token",
"model.request_auth_tip": "When making a request to a custom request address, carry the request header: Authorization: Bearer xxx to make the request.",
"model.request_url": "Custom url",
"model.request_url_tip": "If this value is filled in, a request will be made directly to this address without going through OneAPI",
"model.tool_choice": "Tool choice",
"model.tool_choice_tip": "If the model supports tool calling, turn on this switch",
"model.used_in_classify": "Used for problem classification",
"model.used_in_extract_fields": "for text extraction",
"model.used_in_tool_call": "Used for tool call nodes",
"model.vision": "Vision model",
"model.vision_tip": "If the model supports image recognition, turn on this switch.",
"model.voices": "voice role",
"model.voices_tip": "Configure multiple through an array, for example:\n\n[\n {\n \"label\": \"Alloy\",\n \"value\": \"alloy\"\n },\n {\n \"label\": \"Echo\",\n \"value\": \"echo\"\n }\n]",
"model_provider": "Model Provider",
"notifications": "Notify",
"personal_information": "Personal",
"personalization": "Personalization",
"promotion_records": "Promotions",
"team": "Team",
"third_party": "Third Party",
"usage_records": "Usage"
}