diff --git a/packages/global/core/ai/provider.ts b/packages/global/core/ai/provider.ts
index 32e8c9915..05bc78407 100644
--- a/packages/global/core/ai/provider.ts
+++ b/packages/global/core/ai/provider.ts
@@ -20,6 +20,7 @@ export type ModelProviderIdType =
| 'Baichuan'
| 'StepFun'
| 'Yi'
+ | 'Siliconflow'
| 'Ollama'
| 'BAAI'
| 'FishAudio'
@@ -155,6 +156,11 @@ export const ModelProviderList: ModelProviderType[] = [
name: i18nT('common:model_moka'),
avatar: 'model/moka'
},
+ {
+ id: 'Siliconflow',
+ name: i18nT('common:model_siliconflow'),
+ avatar: 'model/siliconflow'
+ },
{
id: 'Other',
name: i18nT('common:model_other'),
diff --git a/packages/service/core/ai/config/embedding/Embedding-V1.json b/packages/service/core/ai/config/embedding/Embedding-V1.json
new file mode 100644
index 000000000..7c71ad428
--- /dev/null
+++ b/packages/service/core/ai/config/embedding/Embedding-V1.json
@@ -0,0 +1,10 @@
+{
+ "provider": "Ernie",
+ "model": "Embedding-V1",
+ "name": "Embedding-V1",
+
+ "defaultToken": 512,
+ "maxToken": 1000,
+
+ "charsPointsPrice": 0
+}
diff --git a/packages/service/core/ai/config/embedding/baidu-tao-8k.json b/packages/service/core/ai/config/embedding/baidu-tao-8k.json
new file mode 100644
index 000000000..606aa906d
--- /dev/null
+++ b/packages/service/core/ai/config/embedding/baidu-tao-8k.json
@@ -0,0 +1,10 @@
+{
+ "provider": "Ernie",
+ "model": "tao-8k",
+ "name": "tao-8k",
+
+ "defaultToken": 512,
+ "maxToken": 8000,
+
+ "charsPointsPrice": 0
+}
diff --git a/packages/service/core/ai/config/embedding/bge-m3.json b/packages/service/core/ai/config/embedding/bge-m3.json
new file mode 100644
index 000000000..4963c7302
--- /dev/null
+++ b/packages/service/core/ai/config/embedding/bge-m3.json
@@ -0,0 +1,10 @@
+{
+ "provider": "BAAI",
+ "model": "bge-m3",
+ "name": "bge-m3",
+
+ "defaultToken": 512,
+ "maxToken": 8000,
+
+ "charsPointsPrice": 0
+}
diff --git a/packages/service/core/ai/config/embedding/doubao-embedding-large.json b/packages/service/core/ai/config/embedding/doubao-embedding-large.json
new file mode 100644
index 000000000..ee474f990
--- /dev/null
+++ b/packages/service/core/ai/config/embedding/doubao-embedding-large.json
@@ -0,0 +1,10 @@
+{
+ "provider": "Doubao",
+ "model": "Doubao-embedding-large",
+ "name": "Doubao-embedding-large",
+
+ "defaultToken": 512,
+ "maxToken": 4096,
+
+ "charsPointsPrice": 0
+}
diff --git a/packages/service/core/ai/config/embedding/doubao-embedding.json b/packages/service/core/ai/config/embedding/doubao-embedding.json
new file mode 100644
index 000000000..acdde31eb
--- /dev/null
+++ b/packages/service/core/ai/config/embedding/doubao-embedding.json
@@ -0,0 +1,10 @@
+{
+ "provider": "Doubao",
+ "model": "Doubao-embedding",
+ "name": "Doubao-embedding",
+
+ "defaultToken": 512,
+ "maxToken": 4096,
+
+ "charsPointsPrice": 0
+}
diff --git a/packages/service/core/ai/config/embedding/hunyuan-embedding.json b/packages/service/core/ai/config/embedding/hunyuan-embedding.json
new file mode 100644
index 000000000..725e572f5
--- /dev/null
+++ b/packages/service/core/ai/config/embedding/hunyuan-embedding.json
@@ -0,0 +1,10 @@
+{
+ "provider": "Hunyuan",
+ "model": "hunyuan-embedding",
+ "name": "hunyuan-embedding",
+
+ "defaultToken": 512,
+ "maxToken": 1024,
+
+ "charsPointsPrice": 0
+}
diff --git a/packages/service/core/ai/config/embedding/zhipu-embedding-3.json b/packages/service/core/ai/config/embedding/zhipu-embedding-3.json
new file mode 100644
index 000000000..275a13d2b
--- /dev/null
+++ b/packages/service/core/ai/config/embedding/zhipu-embedding-3.json
@@ -0,0 +1,13 @@
+{
+ "provider": "ChatGLM",
+ "model": "embedding-3",
+ "name": "embedding-3",
+
+ "defaultToken": 512,
+ "maxToken": 8000,
+ "defaultConfig": {
+ "dimensions": 1024
+ },
+
+ "charsPointsPrice": 0
+}
diff --git a/packages/service/core/ai/config/llm/gemini-exp-1206.json b/packages/service/core/ai/config/llm/gemini-exp-1206.json
new file mode 100644
index 000000000..d5ba89541
--- /dev/null
+++ b/packages/service/core/ai/config/llm/gemini-exp-1206.json
@@ -0,0 +1,29 @@
+{
+ "provider": "Gemini",
+ "model": "gemini-exp-1206",
+ "name": "gemini-exp-1206",
+
+ "censor": false,
+ "charsPointsPrice": 0,
+
+ "maxContext": 128000,
+ "maxResponse": 8000,
+ "quoteMaxToken": 120000,
+ "maxTemperature": 1,
+
+ "vision": true,
+ "toolChoice": true,
+ "functionCall": false,
+ "defaultSystemChatPrompt": "",
+
+ "datasetProcess": true,
+ "usedInClassify": true,
+ "customCQPrompt": "",
+ "usedInExtractFields": true,
+ "usedInQueryExtension": true,
+ "customExtractPrompt": "",
+ "usedInToolCall": true,
+
+ "defaultConfig": {},
+ "fieldMap": {}
+}
diff --git a/packages/service/core/ai/config/llm/hunyuan-pro-32k.json b/packages/service/core/ai/config/llm/hunyuan-pro-32k.json
new file mode 100644
index 000000000..616a41839
--- /dev/null
+++ b/packages/service/core/ai/config/llm/hunyuan-pro-32k.json
@@ -0,0 +1,29 @@
+{
+ "provider": "Hunyuan",
+ "model": "hunyuan-pro-32k(测试)",
+ "name": "hunyuan-pro-32k(测试)",
+
+ "censor": false,
+ "charsPointsPrice": 0,
+
+ "maxContext": 28000,
+ "maxResponse": 4000,
+ "quoteMaxToken": 28000,
+ "maxTemperature": 1,
+
+ "vision": false,
+ "toolChoice": false,
+ "functionCall": false,
+ "defaultSystemChatPrompt": "",
+
+ "datasetProcess": true,
+ "usedInClassify": true,
+ "customCQPrompt": "",
+ "usedInExtractFields": true,
+ "usedInQueryExtension": true,
+ "customExtractPrompt": "",
+ "usedInToolCall": true,
+
+ "defaultConfig": {},
+ "fieldMap": {}
+}
diff --git a/packages/service/core/ai/config/llm/hunyuan-turbo-vision.json b/packages/service/core/ai/config/llm/hunyuan-turbo-vision.json
new file mode 100644
index 000000000..c5f0072f8
--- /dev/null
+++ b/packages/service/core/ai/config/llm/hunyuan-turbo-vision.json
@@ -0,0 +1,29 @@
+{
+ "provider": "Hunyuan",
+ "model": "hunyuan-turbo-vision",
+ "name": "hunyuan-turbo-vision",
+
+ "censor": false,
+ "charsPointsPrice": 0,
+
+ "maxContext": 6000,
+ "maxResponse": 2000,
+ "quoteMaxToken": 6000,
+ "maxTemperature": 1,
+
+ "vision": true,
+ "toolChoice": false,
+ "functionCall": false,
+ "defaultSystemChatPrompt": "",
+
+ "datasetProcess": false,
+ "usedInClassify": false,
+ "customCQPrompt": "",
+ "usedInExtractFields": false,
+ "usedInQueryExtension": false,
+ "customExtractPrompt": "",
+ "usedInToolCall": false,
+
+ "defaultConfig": {},
+ "fieldMap": {}
+}
diff --git a/packages/service/core/ai/config/llm/qwen-coder-turbo.json b/packages/service/core/ai/config/llm/qwen-coder-turbo.json
new file mode 100644
index 000000000..48491c157
--- /dev/null
+++ b/packages/service/core/ai/config/llm/qwen-coder-turbo.json
@@ -0,0 +1,29 @@
+{
+ "provider": "Qwen",
+ "model": "qwen-coder-turbo",
+ "name": "qwen-coder-turbo",
+
+ "censor": false,
+ "charsPointsPrice": 0,
+
+ "maxContext": 128000,
+ "maxResponse": 8000,
+ "quoteMaxToken": 50000,
+ "maxTemperature": 1,
+
+ "vision": false,
+ "toolChoice": false,
+ "functionCall": false,
+ "defaultSystemChatPrompt": "",
+
+ "datasetProcess": false,
+ "usedInClassify": false,
+ "customCQPrompt": "",
+ "usedInExtractFields": false,
+ "usedInQueryExtension": false,
+ "customExtractPrompt": "",
+ "usedInToolCall": false,
+
+ "defaultConfig": {},
+ "fieldMap": {}
+}
diff --git a/packages/service/core/ai/config/rerank/siliconflow-bge-reranker-v2-m3.json b/packages/service/core/ai/config/rerank/siliconflow-bge-reranker-v2-m3.json
new file mode 100644
index 000000000..96219e218
--- /dev/null
+++ b/packages/service/core/ai/config/rerank/siliconflow-bge-reranker-v2-m3.json
@@ -0,0 +1,6 @@
+{
+ "provider": "Siliconflow",
+ "model": "BAAI/bge-reranker-v2-m3",
+ "name": "BAAI/bge-reranker-v2-m3",
+ "charsPointsPrice": 0
+}
diff --git a/packages/service/core/ai/config/stt/FunAudioLLM-SenseVoiceSmall.json b/packages/service/core/ai/config/stt/FunAudioLLM-SenseVoiceSmall.json
new file mode 100644
index 000000000..d923b42fa
--- /dev/null
+++ b/packages/service/core/ai/config/stt/FunAudioLLM-SenseVoiceSmall.json
@@ -0,0 +1,6 @@
+{
+ "provider": "Siliconflow",
+ "model": "FunAudioLLM/SenseVoiceSmall",
+ "name": "FunAudioLLM/SenseVoiceSmall",
+ "charsPointsPrice": 0
+}
diff --git a/packages/service/core/ai/config/tts/Minimax-speech-01-turbo.json b/packages/service/core/ai/config/tts/Minimax-speech-01-turbo.json
new file mode 100644
index 000000000..7bf8babe4
--- /dev/null
+++ b/packages/service/core/ai/config/tts/Minimax-speech-01-turbo.json
@@ -0,0 +1,192 @@
+{
+ "provider": "MiniMax",
+ "model": "speech-01-turbo",
+ "name": "Minimax-speech-01-turbo",
+ "charsPointsPrice": 0,
+ "voices": [
+ {
+ "label": "minimax-male-qn-qingse",
+ "value": "male-qn-qingse"
+ },
+ {
+ "label": "minimax-male-qn-jingying",
+ "value": "male-qn-jingying"
+ },
+ {
+ "label": "minimax-male-qn-badao",
+ "value": "male-qn-badao"
+ },
+ {
+ "label": "minimax-male-qn-daxuesheng",
+ "value": "male-qn-daxuesheng"
+ },
+ {
+ "label": "minimax-female-shaonv",
+ "value": "female-shaonv"
+ },
+ {
+ "label": "minimax-female-yujie",
+ "value": "female-yujie"
+ },
+ {
+ "label": "minimax-female-chengshu",
+ "value": "female-chengshu"
+ },
+ {
+ "label": "minimax-female-tianmei",
+ "value": "female-tianmei"
+ },
+ {
+ "label": "minimax-presenter_male",
+ "value": "presenter_male"
+ },
+ {
+ "label": "minimax-presenter_female",
+ "value": "presenter_female"
+ },
+ {
+ "label": "minimax-audiobook_male_1",
+ "value": "audiobook_male_1"
+ },
+ {
+ "label": "minimax-audiobook_male_2",
+ "value": "audiobook_male_2"
+ },
+ {
+ "label": "minimax-audiobook_female_1",
+ "value": "audiobook_female_1"
+ },
+ {
+ "label": "minimax-audiobook_female_2",
+ "value": "audiobook_female_2"
+ },
+ {
+ "label": "minimax-male-qn-qingse-jingpin",
+ "value": "male-qn-qingse-jingpin"
+ },
+ {
+ "label": "minimax-male-qn-jingying-jingpin",
+ "value": "male-qn-jingying-jingpin"
+ },
+ {
+ "label": "minimax-male-qn-badao-jingpin",
+ "value": "male-qn-badao-jingpin"
+ },
+ {
+ "label": "minimax-male-qn-daxuesheng-jingpin",
+ "value": "male-qn-daxuesheng-jingpin"
+ },
+ {
+ "label": "minimax-female-shaonv-jingpin",
+ "value": "female-shaonv-jingpin"
+ },
+ {
+ "label": "minimax-female-yujie-jingpin",
+ "value": "female-yujie-jingpin"
+ },
+ {
+ "label": "minimax-female-chengshu-jingpin",
+ "value": "female-chengshu-jingpin"
+ },
+ {
+ "label": "minimax-female-tianmei-jingpin",
+ "value": "female-tianmei-jingpin"
+ },
+ {
+ "label": "minimax-clever_boy",
+ "value": "clever_boy"
+ },
+ {
+ "label": "minimax-cute_boy",
+ "value": "cute_boy"
+ },
+ {
+ "label": "minimax-lovely_girl",
+ "value": "lovely_girl"
+ },
+ {
+ "label": "minimax-cartoon_pig",
+ "value": "cartoon_pig"
+ },
+ {
+ "label": "minimax-bingjiao_didi",
+ "value": "bingjiao_didi"
+ },
+ {
+ "label": "minimax-junlang_nanyou",
+ "value": "junlang_nanyou"
+ },
+ {
+ "label": "minimax-chunzhen_xuedi",
+ "value": "chunzhen_xuedi"
+ },
+ {
+ "label": "minimax-lengdan_xiongzhang",
+ "value": "lengdan_xiongzhang"
+ },
+ {
+ "label": "minimax-badao_shaoye",
+ "value": "badao_shaoye"
+ },
+ {
+ "label": "minimax-tianxin_xiaoling",
+ "value": "tianxin_xiaoling"
+ },
+ {
+ "label": "minimax-qiaopi_mengmei",
+ "value": "qiaopi_mengmei"
+ },
+ {
+ "label": "minimax-wumei_yujie",
+ "value": "wumei_yujie"
+ },
+ {
+ "label": "minimax-diadia_xuemei",
+ "value": "diadia_xuemei"
+ },
+ {
+ "label": "minimax-danya_xuejie",
+ "value": "danya_xuejie"
+ },
+ {
+ "label": "minimax-Santa_Claus",
+ "value": "Santa_Claus"
+ },
+ {
+ "label": "minimax-Grinch",
+ "value": "Grinch"
+ },
+ {
+ "label": "minimax-Rudolph",
+ "value": "Rudolph"
+ },
+ {
+ "label": "minimax-Arnold",
+ "value": "Arnold"
+ },
+ {
+ "label": "minimax-Charming_Santa",
+ "value": "Charming_Santa"
+ },
+ {
+ "label": "minimax-Charming_Lady",
+ "value": "Charming_Lady"
+ },
+ {
+ "label": "minimax-Sweet_Girl",
+ "value": "Sweet_Girl"
+ },
+ {
+ "label": "minimax-Cute_Elf",
+ "value": "Cute_Elf"
+ },
+ {
+ "label": "minimax-Attractive_Girl",
+ "value": "Attractive_Girl"
+ },
+ {
+ "label": "minimax-Serene_Woman",
+ "value": "Serene_Woman"
+ }
+ ]
+}
diff --git a/packages/service/core/ai/config/tts/siliconflow-CosyVoice2-0.5B.json b/packages/service/core/ai/config/tts/siliconflow-CosyVoice2-0.5B.json
new file mode 100644
index 000000000..e03e39de0
--- /dev/null
+++ b/packages/service/core/ai/config/tts/siliconflow-CosyVoice2-0.5B.json
@@ -0,0 +1,40 @@
+{
+ "provider": "Siliconflow",
+ "model": "FunAudioLLM/CosyVoice2-0.5B",
+ "name": "FunAudioLLM/CosyVoice2-0.5B",
+ "charsPointsPrice": 0,
+ "voices": [
+ {
+ "label": "alex",
+ "value": "FunAudioLLM/CosyVoice2-0.5B:alex"
+ },
+ {
+ "label": "anna",
+ "value": "FunAudioLLM/CosyVoice2-0.5B:anna"
+ },
+ {
+ "label": "bella",
+ "value": "FunAudioLLM/CosyVoice2-0.5B:bella"
+ },
+ {
+ "label": "benjamin",
+ "value": "FunAudioLLM/CosyVoice2-0.5B:benjamin"
+ },
+ {
+ "label": "charles",
+ "value": "FunAudioLLM/CosyVoice2-0.5B:charles"
+ },
+ {
+ "label": "claire",
+ "value": "FunAudioLLM/CosyVoice2-0.5B:claire"
+ },
+ {
+ "label": "david",
+ "value": "FunAudioLLM/CosyVoice2-0.5B:david"
+ },
+ {
+ "label": "diana",
+ "value": "FunAudioLLM/CosyVoice2-0.5B:diana"
+ }
+ ]
+}
diff --git a/packages/service/core/ai/config/tts/siliconflow-RVC-Boss-GPT-SoVITS.json b/packages/service/core/ai/config/tts/siliconflow-RVC-Boss-GPT-SoVITS.json
new file mode 100644
index 000000000..f1c18a5de
--- /dev/null
+++ b/packages/service/core/ai/config/tts/siliconflow-RVC-Boss-GPT-SoVITS.json
@@ -0,0 +1,40 @@
+{
+ "provider": "Siliconflow",
+ "model": "RVC-Boss/GPT-SoVITS",
+ "name": "RVC-Boss/GPT-SoVITS",
+ "charsPointsPrice": 0,
+ "voices": [
+ {
+ "label": "alex",
+ "value": "RVC-Boss/GPT-SoVITS:alex"
+ },
+ {
+ "label": "anna",
+ "value": "RVC-Boss/GPT-SoVITS:anna"
+ },
+ {
+ "label": "bella",
+ "value": "RVC-Boss/GPT-SoVITS:bella"
+ },
+ {
+ "label": "benjamin",
+ "value": "RVC-Boss/GPT-SoVITS:benjamin"
+ },
+ {
+ "label": "charles",
+ "value": "RVC-Boss/GPT-SoVITS:charles"
+ },
+ {
+ "label": "claire",
+ "value": "RVC-Boss/GPT-SoVITS:claire"
+ },
+ {
+ "label": "david",
+ "value": "RVC-Boss/GPT-SoVITS:david"
+ },
+ {
+ "label": "diana",
+ "value": "RVC-Boss/GPT-SoVITS:diana"
+ }
+ ]
+}
diff --git a/packages/service/core/ai/config/tts/siliconflow-fish-speech-1.5.json b/packages/service/core/ai/config/tts/siliconflow-fish-speech-1.5.json
new file mode 100644
index 000000000..45fa6b4c4
--- /dev/null
+++ b/packages/service/core/ai/config/tts/siliconflow-fish-speech-1.5.json
@@ -0,0 +1,40 @@
+{
+ "provider": "Siliconflow",
+ "model": "fishaudio/fish-speech-1.5",
+ "name": "fish-speech-1.5",
+ "charsPointsPrice": 0,
+ "voices": [
+ {
+ "label": "alex",
+ "value": "fishaudio/fish-speech-1.5:alex"
+ },
+ {
+ "label": "anna",
+ "value": "fishaudio/fish-speech-1.5:anna"
+ },
+ {
+ "label": "bella",
+ "value": "fishaudio/fish-speech-1.5:bella"
+ },
+ {
+ "label": "benjamin",
+ "value": "fishaudio/fish-speech-1.5:benjamin"
+ },
+ {
+ "label": "charles",
+ "value": "fishaudio/fish-speech-1.5:charles"
+ },
+ {
+ "label": "claire",
+ "value": "fishaudio/fish-speech-1.5:claire"
+ },
+ {
+ "label": "david",
+ "value": "fishaudio/fish-speech-1.5:david"
+ },
+ {
+ "label": "diana",
+ "value": "fishaudio/fish-speech-1.5:diana"
+ }
+ ]
+}
diff --git a/packages/web/components/common/Icon/constants.ts b/packages/web/components/common/Icon/constants.ts
index acdec5f1a..4a34e0b54 100644
--- a/packages/web/components/common/Icon/constants.ts
+++ b/packages/web/components/common/Icon/constants.ts
@@ -270,7 +270,6 @@ export const iconPaths = {
import('./icons/core/workflow/template/datasource.svg'),
'core/workflow/template/duckduckgo': () =>
import('./icons/core/workflow/template/duckduckgo.svg'),
- 'core/workflow/template/sleep': () => import('./icons/core/workflow/template/sleep.svg'),
'core/workflow/template/extractJson': () =>
import('./icons/core/workflow/template/extractJson.svg'),
'core/workflow/template/fetchUrl': () => import('./icons/core/workflow/template/fetchUrl.svg'),
@@ -296,6 +295,7 @@ export const iconPaths = {
'core/workflow/template/reply': () => import('./icons/core/workflow/template/reply.svg'),
'core/workflow/template/runApp': () => import('./icons/core/workflow/template/runApp.svg'),
'core/workflow/template/searxng': () => import('./icons/core/workflow/template/searxng.svg'),
+ 'core/workflow/template/sleep': () => import('./icons/core/workflow/template/sleep.svg'),
'core/workflow/template/stopTool': () => import('./icons/core/workflow/template/stopTool.svg'),
'core/workflow/template/systemConfig': () =>
import('./icons/core/workflow/template/systemConfig.svg'),
@@ -377,18 +377,19 @@ export const iconPaths = {
'model/groq': () => import('./icons/model/groq.svg'),
'model/huggingface': () => import('./icons/model/huggingface.svg'),
'model/hunyuan': () => import('./icons/model/hunyuan.svg'),
+ 'model/intern': () => import('./icons/model/intern.svg'),
'model/meta': () => import('./icons/model/meta.svg'),
'model/minimax': () => import('./icons/model/minimax.svg'),
'model/mistral': () => import('./icons/model/mistral.svg'),
+ 'model/moka': () => import('./icons/model/moka.svg'),
'model/moonshot': () => import('./icons/model/moonshot.svg'),
'model/ollama': () => import('./icons/model/ollama.svg'),
'model/openai': () => import('./icons/model/openai.svg'),
'model/qwen': () => import('./icons/model/qwen.svg'),
+ 'model/siliconflow': () => import('./icons/model/siliconflow.svg'),
'model/sparkDesk': () => import('./icons/model/sparkDesk.svg'),
'model/stepfun': () => import('./icons/model/stepfun.svg'),
'model/yi': () => import('./icons/model/yi.svg'),
- 'model/intern': () => import('./icons/model/intern.svg'),
- 'model/moka': () => import('./icons/model/moka.svg'),
more: () => import('./icons/more.svg'),
moreLine: () => import('./icons/moreLine.svg'),
out: () => import('./icons/out.svg'),
diff --git a/packages/web/components/common/Icon/icons/model/siliconflow.svg b/packages/web/components/common/Icon/icons/model/siliconflow.svg
new file mode 100644
index 000000000..b11c61059
--- /dev/null
+++ b/packages/web/components/common/Icon/icons/model/siliconflow.svg
@@ -0,0 +1,3 @@
+
\ No newline at end of file
diff --git a/packages/web/i18n/en/account.json b/packages/web/i18n/en/account.json
index 01b72486e..45a7bb341 100644
--- a/packages/web/i18n/en/account.json
+++ b/packages/web/i18n/en/account.json
@@ -23,6 +23,8 @@
"model.custom_extract_prompt": "Custom content extraction prompt words",
"model.custom_extract_prompt_tip": "Override system prompt word, default is:\n\"\"\"\n你可以从 <对话记录>对话记录> 中提取指定 Json 信息,你仅需返回 Json 字符串,无需回答问题。\n<提取要求>\n{{description}}\n提取要求>\n\n<提取规则>\n- 本次需提取的 json 字符串,需符合 JsonSchema 的规则。\n- type 代表数据类型; key 代表字段名; description 代表字段的描述; enum 是枚举值,代表可选的 value。\n- 如果没有可提取的内容,忽略该字段。\n提取规则>\n\n