simple model config
This commit is contained in:
parent
bf29a42baa
commit
025dc85c59
@ -59,12 +59,14 @@
|
||||
"@radix-ui/react-dialog": "^1.1.2",
|
||||
"@radix-ui/react-dropdown-menu": "^2.1.2",
|
||||
"@radix-ui/react-popover": "^1.1.2",
|
||||
"@radix-ui/react-select": "^2.1.6",
|
||||
"@radix-ui/react-tooltip": "^1.1.3",
|
||||
"@tanstack/react-query": "^5.56.2",
|
||||
"clsx": "^2.1.1",
|
||||
"diff": "^7.0.0",
|
||||
"drizzle-orm": "^0.35.2",
|
||||
"exponential-backoff": "^3.1.1",
|
||||
"fuse.js": "^7.1.0",
|
||||
"fuzzysort": "^3.1.0",
|
||||
"groq-sdk": "^0.7.0",
|
||||
"handlebars": "^4.7.7",
|
||||
|
||||
9493
pnpm-lock.yaml
generated
Normal file
9493
pnpm-lock.yaml
generated
Normal file
File diff suppressed because it is too large
Load Diff
@ -62,7 +62,7 @@ function LLMResponesInfoButton({ message }: { message: ChatAssistantMessage }) {
|
||||
<LLMResponseInfoPopover
|
||||
usage={message.metadata?.usage}
|
||||
estimatedPrice={cost}
|
||||
model={message.metadata?.model?.name}
|
||||
model={message.metadata?.model?.modelId}
|
||||
/>
|
||||
</div>
|
||||
</Tooltip.Trigger>
|
||||
|
||||
@ -279,7 +279,7 @@ const Chat = forwardRef<ChatRef, ChatProps>((props, ref) => {
|
||||
const stream = await streamResponse(
|
||||
chatModel,
|
||||
{
|
||||
model: chatModel.name,
|
||||
model: chatModel.modelId,
|
||||
messages: requestMessages,
|
||||
stream: true,
|
||||
},
|
||||
|
||||
@ -1,14 +1,18 @@
|
||||
import * as DropdownMenu from '@radix-ui/react-dropdown-menu'
|
||||
import { ChevronDown, ChevronUp } from 'lucide-react'
|
||||
import { useState } from 'react'
|
||||
import { useMemo, useState } from 'react'
|
||||
|
||||
import { useSettings } from '../../../contexts/SettingsContext'
|
||||
|
||||
import { GetProviderModelIds } from "../../../utils/api"
|
||||
export function ModelSelect() {
|
||||
const { settings, setSettings } = useSettings()
|
||||
const [isOpen, setIsOpen] = useState(false)
|
||||
|
||||
const activeModels = settings.activeModels.filter((model) => model.enabled)
|
||||
const[chatModelId, setChatModelId] = useState(settings.chatModelId)
|
||||
|
||||
const currProviderModels = useMemo(() => {
|
||||
return GetProviderModelIds(settings.chatModelProvider)
|
||||
}, [settings.chatModelProvider])
|
||||
|
||||
return (
|
||||
<DropdownMenu.Root open={isOpen} onOpenChange={setIsOpen}>
|
||||
@ -17,11 +21,7 @@ export function ModelSelect() {
|
||||
{isOpen ? <ChevronUp size={12} /> : <ChevronDown size={12} />}
|
||||
</div>
|
||||
<div className="infio-chat-input-model-select__model-name">
|
||||
{
|
||||
activeModels.find(
|
||||
(option) => option.name === settings.chatModelId,
|
||||
)?.name
|
||||
}
|
||||
{chatModelId}
|
||||
</div>
|
||||
</DropdownMenu.Trigger>
|
||||
|
||||
@ -29,18 +29,19 @@ export function ModelSelect() {
|
||||
<DropdownMenu.Content
|
||||
className="infio-popover">
|
||||
<ul>
|
||||
{activeModels.map((model) => (
|
||||
{currProviderModels.map((modelId) => (
|
||||
<DropdownMenu.Item
|
||||
key={model.name}
|
||||
key={modelId}
|
||||
onSelect={() => {
|
||||
setChatModelId(modelId)
|
||||
setSettings({
|
||||
...settings,
|
||||
chatModelId: model.name,
|
||||
chatModelId: modelId,
|
||||
})
|
||||
}}
|
||||
asChild
|
||||
>
|
||||
<li>{model.name}</li>
|
||||
<li>{modelId}</li>
|
||||
</DropdownMenu.Item>
|
||||
))}
|
||||
</ul>
|
||||
|
||||
@ -1,10 +1,10 @@
|
||||
import { MarkdownView, Plugin } from 'obsidian';
|
||||
import React, { useEffect, useRef, useState } from 'react';
|
||||
import React, { useEffect, useMemo, useRef, useState } from 'react';
|
||||
|
||||
import { APPLY_VIEW_TYPE } from '../../constants';
|
||||
import LLMManager from '../../core/llm/manager';
|
||||
import { CustomLLMModel } from '../../types/llm/model';
|
||||
import { InfioSettings } from '../../types/settings';
|
||||
import { GetProviderModelIds } from '../../utils/api';
|
||||
import { manualApplyChangesToFile } from '../../utils/apply';
|
||||
import { removeAITags } from '../../utils/content-filter';
|
||||
import { PromptGenerator } from '../../utils/prompt-generator';
|
||||
@ -57,31 +57,35 @@ const ControlArea: React.FC<ControlAreaProps> = ({
|
||||
selectedModel,
|
||||
onModelChange,
|
||||
isSubmitting,
|
||||
}) => (
|
||||
<div className="infio-ai-block-controls">
|
||||
<select
|
||||
className="infio-ai-block-model-select"
|
||||
value={selectedModel}
|
||||
onChange={(e) => onModelChange(e.target.value)}
|
||||
disabled={isSubmitting}
|
||||
>
|
||||
{settings.activeModels
|
||||
.filter((model) => !model.isEmbeddingModel && model.enabled)
|
||||
.map((model) => (
|
||||
<option key={model.name} value={model.name}>
|
||||
{model.name}
|
||||
</option>
|
||||
))}
|
||||
</select>
|
||||
<button
|
||||
className="infio-ai-block-submit-button"
|
||||
onClick={onSubmit}
|
||||
disabled={isSubmitting}
|
||||
>
|
||||
{isSubmitting ? "Submitting..." : "Submit"}
|
||||
</button>
|
||||
</div>
|
||||
);
|
||||
}) => {
|
||||
const currProviderModels = useMemo(() => {
|
||||
return GetProviderModelIds(settings.chatModelProvider)
|
||||
.map((modelId) => (
|
||||
<option key={modelId} value={modelId}>
|
||||
{modelId}
|
||||
</option>
|
||||
))
|
||||
}, [settings])
|
||||
|
||||
return (
|
||||
<div className="infio-ai-block-controls">
|
||||
<select
|
||||
className="infio-ai-block-model-select"
|
||||
value={selectedModel}
|
||||
onChange={(e) => onModelChange(e.target.value)}
|
||||
disabled={isSubmitting}
|
||||
>
|
||||
{currProviderModels}
|
||||
</select>
|
||||
<button
|
||||
className="infio-ai-block-submit-button"
|
||||
onClick={onSubmit}
|
||||
disabled={isSubmitting}
|
||||
>
|
||||
{isSubmitting ? "Submitting..." : "Submit"}
|
||||
</button>
|
||||
</div>);
|
||||
};
|
||||
|
||||
export const InlineEdit: React.FC<InlineEditProps> = ({
|
||||
source,
|
||||
@ -94,14 +98,7 @@ export const InlineEdit: React.FC<InlineEditProps> = ({
|
||||
const [selectedModel, setSelectedModel] = useState(settings.chatModelId);
|
||||
const [isSubmitting, setIsSubmitting] = useState(false);
|
||||
|
||||
const llmManager = new LLMManager({
|
||||
deepseek: settings.deepseekApiKey,
|
||||
openai: settings.openAIApiKey,
|
||||
anthropic: settings.anthropicApiKey,
|
||||
gemini: settings.geminiApiKey,
|
||||
groq: settings.groqApiKey,
|
||||
infio: settings.infioApiKey,
|
||||
});
|
||||
const llmManager = new LLMManager(settings);
|
||||
|
||||
const promptGenerator = new PromptGenerator(
|
||||
async () => {
|
||||
@ -171,9 +168,10 @@ export const InlineEdit: React.FC<InlineEditProps> = ({
|
||||
return;
|
||||
}
|
||||
|
||||
const chatModel = settings.activeModels.find(
|
||||
(model) => model.name === selectedModel
|
||||
) as CustomLLMModel;
|
||||
const chatModel = {
|
||||
provider: settings.chatModelProvider,
|
||||
modelId: settings.chatModelId,
|
||||
};
|
||||
if (!chatModel) {
|
||||
setIsSubmitting(false);
|
||||
throw new Error("Invalid chat model");
|
||||
@ -193,7 +191,7 @@ export const InlineEdit: React.FC<InlineEditProps> = ({
|
||||
});
|
||||
|
||||
const response = await llmManager.generateResponse(chatModel, {
|
||||
model: chatModel.name,
|
||||
model: chatModel.modelId,
|
||||
messages: requestMessages,
|
||||
stream: false,
|
||||
});
|
||||
|
||||
135
src/constants.ts
135
src/constants.ts
@ -1,120 +1,22 @@
|
||||
import { CustomLLMModel } from './types/llm/model'
|
||||
|
||||
import { LLMModel } from './types/llm/model'
|
||||
// import { ApiProvider } from './utils/api'
|
||||
export const CHAT_VIEW_TYPE = 'infio-chat-view'
|
||||
export const APPLY_VIEW_TYPE = 'infio-apply-view'
|
||||
|
||||
export const DEFAULT_MODELS: CustomLLMModel[] = [
|
||||
{
|
||||
name: 'claude-3.5-sonnet',
|
||||
provider: 'anthropic',
|
||||
enabled: true,
|
||||
isEmbeddingModel: false,
|
||||
isBuiltIn: true,
|
||||
},
|
||||
{
|
||||
name: 'o1-mini',
|
||||
provider: 'openai',
|
||||
enabled: true,
|
||||
isEmbeddingModel: false,
|
||||
isBuiltIn: true,
|
||||
},
|
||||
{
|
||||
name: 'o1-preview',
|
||||
provider: 'openai',
|
||||
enabled: false,
|
||||
isEmbeddingModel: false,
|
||||
isBuiltIn: true,
|
||||
},
|
||||
{
|
||||
name: 'gpt-4o',
|
||||
provider: 'openai',
|
||||
enabled: true,
|
||||
isEmbeddingModel: false,
|
||||
isBuiltIn: true,
|
||||
},
|
||||
{
|
||||
name: 'gpt-4o-mini',
|
||||
provider: 'openai',
|
||||
enabled: false,
|
||||
isEmbeddingModel: false,
|
||||
isBuiltIn: true,
|
||||
},
|
||||
{
|
||||
name: 'deepseek-chat',
|
||||
provider: 'deepseek',
|
||||
enabled: true,
|
||||
isEmbeddingModel: false,
|
||||
isBuiltIn: true,
|
||||
},
|
||||
{
|
||||
name: 'gemini-1.5-pro',
|
||||
provider: 'google',
|
||||
enabled: true,
|
||||
isEmbeddingModel: false,
|
||||
isBuiltIn: true,
|
||||
},
|
||||
{
|
||||
name: 'gemini-2.0-flash-exp',
|
||||
provider: 'google',
|
||||
enabled: true,
|
||||
isEmbeddingModel: false,
|
||||
isBuiltIn: true,
|
||||
},
|
||||
{
|
||||
name: 'gemini-2.0-flash-thinking-exp-1219',
|
||||
provider: 'google',
|
||||
enabled: false,
|
||||
isEmbeddingModel: false,
|
||||
isBuiltIn: true,
|
||||
},
|
||||
{
|
||||
name: 'llama-3.1-70b-versatile',
|
||||
provider: 'groq',
|
||||
enabled: true,
|
||||
isEmbeddingModel: false,
|
||||
isBuiltIn: true,
|
||||
},
|
||||
{
|
||||
name: 'text-embedding-3-small',
|
||||
provider: 'openai',
|
||||
dimension: 1536,
|
||||
enabled: true,
|
||||
isEmbeddingModel: true,
|
||||
isBuiltIn: true,
|
||||
},
|
||||
{
|
||||
name: 'text-embedding-004',
|
||||
provider: 'google',
|
||||
dimension: 768,
|
||||
enabled: true,
|
||||
isEmbeddingModel: true,
|
||||
isBuiltIn: true,
|
||||
},
|
||||
{
|
||||
name: 'nomic-embed-text',
|
||||
provider: 'ollama',
|
||||
dimension: 768,
|
||||
enabled: true,
|
||||
isEmbeddingModel: true,
|
||||
isBuiltIn: true,
|
||||
},
|
||||
{
|
||||
name: 'mxbai-embed-large',
|
||||
provider: 'ollama',
|
||||
dimension: 1024,
|
||||
enabled: true,
|
||||
isEmbeddingModel: true,
|
||||
isBuiltIn: true,
|
||||
},
|
||||
{
|
||||
name: 'bge-m3',
|
||||
provider: 'ollama',
|
||||
dimension: 1024,
|
||||
enabled: true,
|
||||
isEmbeddingModel: true,
|
||||
isBuiltIn: true,
|
||||
}
|
||||
]
|
||||
export const DEFAULT_MODELS: LLMModel[] = []
|
||||
|
||||
// export const PROVIDERS: ApiProvider[] = [
|
||||
// 'Infio',
|
||||
// 'OpenRouter',
|
||||
// 'SiliconFlow',
|
||||
// 'Anthropic',
|
||||
// 'Deepseek',
|
||||
// 'OpenAI',
|
||||
// 'Google',
|
||||
// 'Groq',
|
||||
// 'Ollama',
|
||||
// 'OpenAICompatible',
|
||||
// ]
|
||||
|
||||
export const SUPPORT_EMBEDDING_SIMENTION: number[] = [
|
||||
384,
|
||||
@ -124,7 +26,12 @@ export const SUPPORT_EMBEDDING_SIMENTION: number[] = [
|
||||
1536
|
||||
]
|
||||
|
||||
export const OPENAI_BASE_URL = 'https://api.openai.com/v1'
|
||||
export const DEEPSEEK_BASE_URL = 'https://api.deepseek.com'
|
||||
export const OPENROUTER_BASE_URL = 'https://openrouter.ai/api/v1'
|
||||
export const SILICONFLOW_BASE_URL = 'https://api.siliconflow.cn/v1'
|
||||
export const ALIBABA_QWEN_BASE_URL = 'https://dashscope.aliyuncs.com/compatible-mode/v1'
|
||||
export const INFIO_BASE_URL = 'https://api.infio.com/api/raw_message'
|
||||
|
||||
// Pricing in dollars per million tokens
|
||||
type ModelPricing = {
|
||||
|
||||
@ -9,7 +9,7 @@ import {
|
||||
} from 'react'
|
||||
|
||||
import LLMManager from '../core/llm/manager'
|
||||
import { CustomLLMModel } from '../types/llm/model'
|
||||
import { LLMModel } from '../types/llm/model'
|
||||
import {
|
||||
LLMOptions,
|
||||
LLMRequestNonStreaming,
|
||||
@ -24,17 +24,17 @@ import { useSettings } from './SettingsContext'
|
||||
|
||||
export type LLMContextType = {
|
||||
generateResponse: (
|
||||
model: CustomLLMModel,
|
||||
model: LLMModel,
|
||||
request: LLMRequestNonStreaming,
|
||||
options?: LLMOptions,
|
||||
) => Promise<LLMResponseNonStreaming>
|
||||
streamResponse: (
|
||||
model: CustomLLMModel,
|
||||
model: LLMModel,
|
||||
request: LLMRequestStreaming,
|
||||
options?: LLMOptions,
|
||||
) => Promise<AsyncIterable<LLMResponseStreaming>>
|
||||
chatModel: CustomLLMModel
|
||||
applyModel: CustomLLMModel
|
||||
chatModel: LLMModel
|
||||
applyModel: LLMModel
|
||||
}
|
||||
|
||||
const LLMContext = createContext<LLMContextType | null>(null)
|
||||
@ -43,55 +43,28 @@ export function LLMProvider({ children }: PropsWithChildren) {
|
||||
const [llmManager, setLLMManager] = useState<LLMManager | null>(null)
|
||||
const { settings } = useSettings()
|
||||
|
||||
const chatModel = useMemo((): CustomLLMModel => {
|
||||
const model = settings.activeModels.find(
|
||||
(option) => option.name === settings.chatModelId,
|
||||
)
|
||||
if (!model) {
|
||||
throw new Error('Invalid chat model ID')
|
||||
const chatModel = useMemo((): LLMModel => {
|
||||
return {
|
||||
provider: settings.chatModelProvider,
|
||||
modelId: settings.chatModelId,
|
||||
}
|
||||
return model as CustomLLMModel
|
||||
}, [settings])
|
||||
|
||||
const applyModel = useMemo((): CustomLLMModel => {
|
||||
const model = settings.activeModels.find(
|
||||
(option) => option.name === settings.applyModelId,
|
||||
)
|
||||
if (!model) {
|
||||
throw new Error('Invalid apply model ID')
|
||||
const applyModel = useMemo((): LLMModel => {
|
||||
return {
|
||||
provider: settings.applyModelProvider,
|
||||
modelId: settings.applyModelId,
|
||||
}
|
||||
if (model.provider === 'ollama') {
|
||||
return {
|
||||
...model,
|
||||
baseUrl: settings.ollamaApplyModel.baseUrl,
|
||||
name: settings.ollamaApplyModel.model,
|
||||
} as CustomLLMModel
|
||||
}
|
||||
return model as CustomLLMModel
|
||||
}, [settings])
|
||||
|
||||
useEffect(() => {
|
||||
const manager = new LLMManager({
|
||||
deepseek: settings.deepseekApiKey,
|
||||
openai: settings.openAIApiKey,
|
||||
anthropic: settings.anthropicApiKey,
|
||||
gemini: settings.geminiApiKey,
|
||||
groq: settings.groqApiKey,
|
||||
infio: settings.infioApiKey,
|
||||
})
|
||||
const manager = new LLMManager(settings)
|
||||
setLLMManager(manager)
|
||||
}, [
|
||||
settings.deepseekApiKey,
|
||||
settings.openAIApiKey,
|
||||
settings.anthropicApiKey,
|
||||
settings.geminiApiKey,
|
||||
settings.groqApiKey,
|
||||
settings.infioApiKey,
|
||||
])
|
||||
}, [settings])
|
||||
|
||||
const generateResponse = useCallback(
|
||||
async (
|
||||
model: CustomLLMModel,
|
||||
model: LLMModel,
|
||||
request: LLMRequestNonStreaming,
|
||||
options?: LLMOptions,
|
||||
) => {
|
||||
@ -105,7 +78,7 @@ export function LLMProvider({ children }: PropsWithChildren) {
|
||||
|
||||
const streamResponse = useCallback(
|
||||
async (
|
||||
model: CustomLLMModel,
|
||||
model: LLMModel,
|
||||
request: LLMRequestStreaming,
|
||||
options?: LLMOptions,
|
||||
) => {
|
||||
|
||||
@ -2,7 +2,7 @@ import * as Handlebars from "handlebars";
|
||||
import { Result, err, ok } from "neverthrow";
|
||||
|
||||
import { FewShotExample } from "../../settings/versions";
|
||||
import { CustomLLMModel } from "../../types/llm/model";
|
||||
import { LLMModel } from "../../types/llm/model";
|
||||
import { RequestMessage } from '../../types/llm/request';
|
||||
import { InfioSettings } from "../../types/settings";
|
||||
import LLMManager from '../llm/manager';
|
||||
@ -25,9 +25,9 @@ import {
|
||||
|
||||
class LLMClient {
|
||||
private llm: LLMManager;
|
||||
private model: CustomLLMModel;
|
||||
private model: LLMModel;
|
||||
|
||||
constructor(llm: LLMManager, model: CustomLLMModel) {
|
||||
constructor(llm: LLMManager, model: LLMModel) {
|
||||
this.llm = llm;
|
||||
this.model = model;
|
||||
}
|
||||
@ -100,17 +100,11 @@ class AutoComplete implements AutocompleteService {
|
||||
postProcessors.push(new RemoveOverlap());
|
||||
postProcessors.push(new RemoveWhitespace());
|
||||
|
||||
const llm_manager = new LLMManager({
|
||||
deepseek: settings.deepseekApiKey,
|
||||
openai: settings.openAIApiKey,
|
||||
anthropic: settings.anthropicApiKey,
|
||||
gemini: settings.geminiApiKey,
|
||||
groq: settings.groqApiKey,
|
||||
infio: settings.infioApiKey,
|
||||
})
|
||||
const model = settings.activeModels.find(
|
||||
(option) => option.name === settings.chatModelId,
|
||||
) as CustomLLMModel;
|
||||
const llm_manager = new LLMManager(settings)
|
||||
const model = {
|
||||
provider: settings.applyModelProvider,
|
||||
modelId: settings.applyModelId,
|
||||
}
|
||||
const llm = new LLMClient(llm_manager, model);
|
||||
|
||||
return new AutoComplete(
|
||||
|
||||
@ -6,7 +6,7 @@ import {
|
||||
TextBlockParam,
|
||||
} from '@anthropic-ai/sdk/resources/messages'
|
||||
|
||||
import { CustomLLMModel } from '../../types/llm/model'
|
||||
import { LLMModel } from '../../types/llm/model'
|
||||
import {
|
||||
LLMOptions,
|
||||
LLMRequestNonStreaming,
|
||||
@ -36,21 +36,14 @@ export class AnthropicProvider implements BaseLLMProvider {
|
||||
}
|
||||
|
||||
async generateResponse(
|
||||
model: CustomLLMModel,
|
||||
model: LLMModel,
|
||||
request: LLMRequestNonStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<LLMResponseNonStreaming> {
|
||||
if (!this.client.apiKey) {
|
||||
if (!model.apiKey) {
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
'Anthropic API key is missing. Please set it in settings menu.',
|
||||
)
|
||||
}
|
||||
this.client = new Anthropic({
|
||||
baseURL: model.baseUrl,
|
||||
apiKey: model.apiKey,
|
||||
dangerouslyAllowBrowser: true
|
||||
})
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
'Anthropic API key is missing. Please set it in settings menu.',
|
||||
)
|
||||
}
|
||||
|
||||
const systemMessage = AnthropicProvider.validateSystemMessages(
|
||||
@ -89,21 +82,14 @@ export class AnthropicProvider implements BaseLLMProvider {
|
||||
}
|
||||
|
||||
async streamResponse(
|
||||
model: CustomLLMModel,
|
||||
model: LLMModel,
|
||||
request: LLMRequestStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<AsyncIterable<LLMResponseStreaming>> {
|
||||
if (!this.client.apiKey) {
|
||||
if (!model.apiKey) {
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
'Anthropic API key is missing. Please set it in settings menu.',
|
||||
)
|
||||
}
|
||||
this.client = new Anthropic({
|
||||
baseURL: model.baseUrl,
|
||||
apiKey: model.apiKey,
|
||||
dangerouslyAllowBrowser: true
|
||||
})
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
'Anthropic API key is missing. Please set it in settings menu.',
|
||||
)
|
||||
}
|
||||
|
||||
const systemMessage = AnthropicProvider.validateSystemMessages(
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
import { CustomLLMModel } from '../../types/llm/model'
|
||||
import { LLMModel } from '../../types/llm/model'
|
||||
import {
|
||||
LLMOptions,
|
||||
LLMRequestNonStreaming,
|
||||
@ -11,12 +11,12 @@ import {
|
||||
|
||||
export type BaseLLMProvider = {
|
||||
generateResponse(
|
||||
model: CustomLLMModel,
|
||||
model: LLMModel,
|
||||
request: LLMRequestNonStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<LLMResponseNonStreaming>
|
||||
streamResponse(
|
||||
model: CustomLLMModel,
|
||||
model: LLMModel,
|
||||
request: LLMRequestStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<AsyncIterable<LLMResponseStreaming>>
|
||||
|
||||
@ -7,7 +7,7 @@ import {
|
||||
Part,
|
||||
} from '@google/generative-ai'
|
||||
|
||||
import { CustomLLMModel } from '../../types/llm/model'
|
||||
import { LLMModel } from '../../types/llm/model'
|
||||
import {
|
||||
LLMOptions,
|
||||
LLMRequestNonStreaming,
|
||||
@ -43,18 +43,14 @@ export class GeminiProvider implements BaseLLMProvider {
|
||||
}
|
||||
|
||||
async generateResponse(
|
||||
model: CustomLLMModel,
|
||||
model: LLMModel,
|
||||
request: LLMRequestNonStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<LLMResponseNonStreaming> {
|
||||
if (!this.apiKey) {
|
||||
if (!model.apiKey) {
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
`Gemini API key is missing. Please set it in settings menu.`,
|
||||
)
|
||||
}
|
||||
this.apiKey = model.apiKey
|
||||
this.client = new GoogleGenerativeAI(model.apiKey)
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
`Gemini API key is missing. Please set it in settings menu.`,
|
||||
)
|
||||
}
|
||||
|
||||
const systemMessages = request.messages.filter((m) => m.role === 'system')
|
||||
@ -110,18 +106,14 @@ export class GeminiProvider implements BaseLLMProvider {
|
||||
}
|
||||
|
||||
async streamResponse(
|
||||
model: CustomLLMModel,
|
||||
model: LLMModel,
|
||||
request: LLMRequestStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<AsyncIterable<LLMResponseStreaming>> {
|
||||
if (!this.apiKey) {
|
||||
if (!model.apiKey) {
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
`Gemini API key is missing. Please set it in settings menu.`,
|
||||
)
|
||||
}
|
||||
this.apiKey = model.apiKey
|
||||
this.client = new GoogleGenerativeAI(model.apiKey)
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
`Gemini API key is missing. Please set it in settings menu.`,
|
||||
)
|
||||
}
|
||||
|
||||
const systemMessages = request.messages.filter((m) => m.role === 'system')
|
||||
|
||||
@ -6,7 +6,7 @@ import {
|
||||
ChatCompletionMessageParam,
|
||||
} from 'groq-sdk/resources/chat/completions'
|
||||
|
||||
import { CustomLLMModel } from '../../types/llm/model'
|
||||
import { LLMModel } from '../../types/llm/model'
|
||||
import {
|
||||
LLMOptions,
|
||||
LLMRequestNonStreaming,
|
||||
@ -35,20 +35,14 @@ export class GroqProvider implements BaseLLMProvider {
|
||||
}
|
||||
|
||||
async generateResponse(
|
||||
model: CustomLLMModel,
|
||||
model: LLMModel,
|
||||
request: LLMRequestNonStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<LLMResponseNonStreaming> {
|
||||
if (!this.client.apiKey) {
|
||||
if (!model.apiKey) {
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
'Groq API key is missing. Please set it in settings menu.',
|
||||
)
|
||||
}
|
||||
this.client = new Groq({
|
||||
apiKey: model.apiKey,
|
||||
dangerouslyAllowBrowser: true,
|
||||
})
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
'Groq API key is missing. Please set it in settings menu.',
|
||||
)
|
||||
}
|
||||
|
||||
try {
|
||||
@ -78,20 +72,14 @@ export class GroqProvider implements BaseLLMProvider {
|
||||
}
|
||||
|
||||
async streamResponse(
|
||||
model: CustomLLMModel,
|
||||
model: LLMModel,
|
||||
request: LLMRequestStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<AsyncIterable<LLMResponseStreaming>> {
|
||||
if (!this.client.apiKey) {
|
||||
if (!model.apiKey) {
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
'Groq API key is missing. Please set it in settings menu.',
|
||||
)
|
||||
}
|
||||
this.client = new Groq({
|
||||
apiKey: model.apiKey,
|
||||
dangerouslyAllowBrowser: true,
|
||||
})
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
'Groq API key is missing. Please set it in settings menu.',
|
||||
)
|
||||
}
|
||||
|
||||
try {
|
||||
|
||||
@ -4,12 +4,12 @@ import {
|
||||
ChatCompletionChunk,
|
||||
} from 'openai/resources/chat/completions'
|
||||
|
||||
import { CustomLLMModel } from '../../types/llm/model'
|
||||
import { INFIO_BASE_URL } from '../../constants'
|
||||
import { LLMModel } from '../../types/llm/model'
|
||||
import {
|
||||
LLMOptions,
|
||||
LLMRequestNonStreaming,
|
||||
LLMRequestStreaming,
|
||||
RequestMessage,
|
||||
RequestMessage
|
||||
} from '../../types/llm/request'
|
||||
import {
|
||||
LLMResponseNonStreaming,
|
||||
@ -85,13 +85,13 @@ export class InfioProvider implements BaseLLMProvider {
|
||||
// this.client = new OpenAI({ apiKey, dangerouslyAllowBrowser: true })
|
||||
// this.adapter = new OpenAIMessageAdapter()
|
||||
this.apiKey = apiKey
|
||||
this.baseUrl = 'https://api.infio.com/api/raw_message'
|
||||
this.baseUrl = INFIO_BASE_URL
|
||||
}
|
||||
|
||||
async generateResponse(
|
||||
model: CustomLLMModel,
|
||||
model: LLMModel,
|
||||
request: LLMRequestNonStreaming,
|
||||
options?: LLMOptions,
|
||||
// options?: LLMOptions,
|
||||
): Promise<LLMResponseNonStreaming> {
|
||||
if (!this.apiKey) {
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
@ -107,7 +107,7 @@ export class InfioProvider implements BaseLLMProvider {
|
||||
presence_penalty: request.presence_penalty,
|
||||
max_tokens: request.max_tokens,
|
||||
}
|
||||
const options = {
|
||||
const req_options = {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
Authorization: this.apiKey,
|
||||
@ -117,7 +117,7 @@ export class InfioProvider implements BaseLLMProvider {
|
||||
body: JSON.stringify(req)
|
||||
};
|
||||
|
||||
const response = await fetch(this.baseUrl, options);
|
||||
const response = await fetch(this.baseUrl, req_options);
|
||||
if (!response.ok) {
|
||||
throw new Error(`HTTP error! status: ${response.status}`);
|
||||
}
|
||||
@ -134,9 +134,8 @@ export class InfioProvider implements BaseLLMProvider {
|
||||
}
|
||||
|
||||
async streamResponse(
|
||||
model: CustomLLMModel,
|
||||
model: LLMModel,
|
||||
request: LLMRequestStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<AsyncIterable<LLMResponseStreaming>> {
|
||||
if (!this.apiKey) {
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
@ -154,7 +153,7 @@ export class InfioProvider implements BaseLLMProvider {
|
||||
presence_penalty: request.presence_penalty,
|
||||
max_tokens: request.max_tokens,
|
||||
}
|
||||
const options = {
|
||||
const req_options = {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
Authorization: this.apiKey,
|
||||
@ -164,7 +163,7 @@ export class InfioProvider implements BaseLLMProvider {
|
||||
body: JSON.stringify(req)
|
||||
};
|
||||
|
||||
const response = await fetch(this.baseUrl, options);
|
||||
const response = await fetch(this.baseUrl, req_options);
|
||||
if (!response.ok) {
|
||||
throw new Error(`HTTP error! status: ${response.status}`);
|
||||
}
|
||||
|
||||
@ -1,14 +1,15 @@
|
||||
import { DEEPSEEK_BASE_URL } from '../../constants'
|
||||
import { CustomLLMModel } from '../../types/llm/model'
|
||||
import { ALIBABA_QWEN_BASE_URL, DEEPSEEK_BASE_URL, OPENROUTER_BASE_URL, SILICONFLOW_BASE_URL } from '../../constants'
|
||||
import { ApiProvider, LLMModel } from '../../types/llm/model'
|
||||
import {
|
||||
LLMOptions,
|
||||
LLMRequestNonStreaming,
|
||||
LLMRequestStreaming,
|
||||
LLMOptions,
|
||||
LLMRequestNonStreaming,
|
||||
LLMRequestStreaming,
|
||||
} from '../../types/llm/request'
|
||||
import {
|
||||
LLMResponseNonStreaming,
|
||||
LLMResponseStreaming,
|
||||
LLMResponseNonStreaming,
|
||||
LLMResponseStreaming,
|
||||
} from '../../types/llm/response'
|
||||
import { InfioSettings } from '../../types/settings'
|
||||
|
||||
import { AnthropicProvider } from './anthropic'
|
||||
import { GeminiProvider } from './gemini'
|
||||
@ -20,123 +21,147 @@ import { OpenAICompatibleProvider } from './openai-compatible-provider'
|
||||
|
||||
|
||||
export type LLMManagerInterface = {
|
||||
generateResponse(
|
||||
model: CustomLLMModel,
|
||||
request: LLMRequestNonStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<LLMResponseNonStreaming>
|
||||
streamResponse(
|
||||
model: CustomLLMModel,
|
||||
request: LLMRequestStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<AsyncIterable<LLMResponseStreaming>>
|
||||
generateResponse(
|
||||
model: LLMModel,
|
||||
request: LLMRequestNonStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<LLMResponseNonStreaming>
|
||||
streamResponse(
|
||||
model: LLMModel,
|
||||
request: LLMRequestStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<AsyncIterable<LLMResponseStreaming>>
|
||||
}
|
||||
|
||||
class LLMManager implements LLMManagerInterface {
|
||||
private openaiProvider: OpenAIAuthenticatedProvider
|
||||
private deepseekProvider: OpenAICompatibleProvider
|
||||
private anthropicProvider: AnthropicProvider
|
||||
private googleProvider: GeminiProvider
|
||||
private groqProvider: GroqProvider
|
||||
private infioProvider: InfioProvider
|
||||
private ollamaProvider: OllamaProvider
|
||||
private isInfioEnabled: boolean
|
||||
private openaiProvider: OpenAIAuthenticatedProvider
|
||||
private deepseekProvider: OpenAICompatibleProvider
|
||||
private anthropicProvider: AnthropicProvider
|
||||
private googleProvider: GeminiProvider
|
||||
private groqProvider: GroqProvider
|
||||
private infioProvider: InfioProvider
|
||||
private openrouterProvider: OpenAICompatibleProvider
|
||||
private siliconflowProvider: OpenAICompatibleProvider
|
||||
private alibabaQwenProvider: OpenAICompatibleProvider
|
||||
private ollamaProvider: OllamaProvider
|
||||
private isInfioEnabled: boolean
|
||||
|
||||
constructor(apiKeys: {
|
||||
deepseek?: string
|
||||
openai?: string
|
||||
anthropic?: string
|
||||
gemini?: string
|
||||
groq?: string
|
||||
infio?: string
|
||||
}) {
|
||||
this.deepseekProvider = new OpenAICompatibleProvider(apiKeys.deepseek ?? '', DEEPSEEK_BASE_URL)
|
||||
this.openaiProvider = new OpenAIAuthenticatedProvider(apiKeys.openai ?? '')
|
||||
this.anthropicProvider = new AnthropicProvider(apiKeys.anthropic ?? '')
|
||||
this.googleProvider = new GeminiProvider(apiKeys.gemini ?? '')
|
||||
this.groqProvider = new GroqProvider(apiKeys.groq ?? '')
|
||||
this.infioProvider = new InfioProvider(apiKeys.infio ?? '')
|
||||
this.ollamaProvider = new OllamaProvider()
|
||||
this.isInfioEnabled = !!apiKeys.infio
|
||||
}
|
||||
constructor(settings: InfioSettings) {
|
||||
this.infioProvider = new InfioProvider(settings.infioProvider.apiKey)
|
||||
this.openrouterProvider = new OpenAICompatibleProvider(settings.openrouterProvider.apiKey, OPENROUTER_BASE_URL)
|
||||
this.siliconflowProvider = new OpenAICompatibleProvider(settings.siliconflowProvider.apiKey, SILICONFLOW_BASE_URL)
|
||||
this.alibabaQwenProvider = new OpenAICompatibleProvider(settings.alibabaQwenProvider.apiKey, ALIBABA_QWEN_BASE_URL)
|
||||
this.deepseekProvider = new OpenAICompatibleProvider(settings.deepseekProvider.apiKey, DEEPSEEK_BASE_URL)
|
||||
this.openaiProvider = new OpenAIAuthenticatedProvider(settings.openaiProvider.apiKey)
|
||||
this.anthropicProvider = new AnthropicProvider(settings.anthropicProvider.apiKey)
|
||||
this.googleProvider = new GeminiProvider(settings.googleProvider.apiKey)
|
||||
this.groqProvider = new GroqProvider(settings.groqProvider.apiKey)
|
||||
this.ollamaProvider = new OllamaProvider(settings.groqProvider.baseUrl)
|
||||
this.isInfioEnabled = !!settings.infioProvider.apiKey
|
||||
}
|
||||
|
||||
async generateResponse(
|
||||
model: CustomLLMModel,
|
||||
request: LLMRequestNonStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<LLMResponseNonStreaming> {
|
||||
if (this.isInfioEnabled) {
|
||||
return await this.infioProvider.generateResponse(
|
||||
model,
|
||||
request,
|
||||
options,
|
||||
)
|
||||
}
|
||||
// use custom provider
|
||||
switch (model.provider) {
|
||||
case 'deepseek':
|
||||
return await this.deepseekProvider.generateResponse(
|
||||
model,
|
||||
request,
|
||||
options,
|
||||
)
|
||||
case 'openai':
|
||||
return await this.openaiProvider.generateResponse(
|
||||
model,
|
||||
request,
|
||||
options,
|
||||
)
|
||||
case 'anthropic':
|
||||
return await this.anthropicProvider.generateResponse(
|
||||
model,
|
||||
request,
|
||||
options,
|
||||
)
|
||||
case 'google':
|
||||
return await this.googleProvider.generateResponse(
|
||||
model,
|
||||
request,
|
||||
options,
|
||||
)
|
||||
case 'groq':
|
||||
return await this.groqProvider.generateResponse(model, request, options)
|
||||
case 'ollama':
|
||||
return await this.ollamaProvider.generateResponse(
|
||||
model,
|
||||
request,
|
||||
options,
|
||||
)
|
||||
}
|
||||
}
|
||||
async generateResponse(
|
||||
model: LLMModel,
|
||||
request: LLMRequestNonStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<LLMResponseNonStreaming> {
|
||||
if (this.isInfioEnabled) {
|
||||
return await this.infioProvider.generateResponse(
|
||||
model,
|
||||
request,
|
||||
)
|
||||
}
|
||||
// use custom provider
|
||||
switch (model.provider) {
|
||||
case ApiProvider.OpenRouter:
|
||||
return await this.openrouterProvider.generateResponse(
|
||||
model,
|
||||
request,
|
||||
options,
|
||||
)
|
||||
case ApiProvider.SiliconFlow:
|
||||
return await this.siliconflowProvider.generateResponse(
|
||||
model,
|
||||
request,
|
||||
options,
|
||||
)
|
||||
case ApiProvider.AlibabaQwen:
|
||||
return await this.alibabaQwenProvider.generateResponse(
|
||||
model,
|
||||
request,
|
||||
options,
|
||||
)
|
||||
case ApiProvider.Deepseek:
|
||||
return await this.deepseekProvider.generateResponse(
|
||||
model,
|
||||
request,
|
||||
options,
|
||||
)
|
||||
case ApiProvider.OpenAI:
|
||||
return await this.openaiProvider.generateResponse(
|
||||
model,
|
||||
request,
|
||||
options,
|
||||
)
|
||||
case ApiProvider.Anthropic:
|
||||
return await this.anthropicProvider.generateResponse(
|
||||
model,
|
||||
request,
|
||||
options,
|
||||
)
|
||||
case ApiProvider.Google:
|
||||
return await this.googleProvider.generateResponse(
|
||||
model,
|
||||
request,
|
||||
options,
|
||||
)
|
||||
case ApiProvider.Groq:
|
||||
return await this.groqProvider.generateResponse(model, request, options)
|
||||
case ApiProvider.Ollama:
|
||||
return await this.ollamaProvider.generateResponse(
|
||||
model,
|
||||
request,
|
||||
options,
|
||||
)
|
||||
default:
|
||||
throw new Error(`Unsupported model provider: ${model.provider}`)
|
||||
}
|
||||
}
|
||||
|
||||
async streamResponse(
|
||||
model: CustomLLMModel,
|
||||
request: LLMRequestStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<AsyncIterable<LLMResponseStreaming>> {
|
||||
if (this.isInfioEnabled) {
|
||||
return await this.infioProvider.streamResponse(model, request, options)
|
||||
}
|
||||
// use custom provider
|
||||
switch (model.provider) {
|
||||
case 'deepseek':
|
||||
return await this.deepseekProvider.streamResponse(model, request, options)
|
||||
case 'openai':
|
||||
return await this.openaiProvider.streamResponse(model, request, options)
|
||||
case 'anthropic':
|
||||
return await this.anthropicProvider.streamResponse(
|
||||
model,
|
||||
request,
|
||||
options,
|
||||
)
|
||||
case 'google':
|
||||
return await this.googleProvider.streamResponse(model, request, options)
|
||||
case 'groq':
|
||||
return await this.groqProvider.streamResponse(model, request, options)
|
||||
case 'ollama':
|
||||
return await this.ollamaProvider.streamResponse(model, request, options)
|
||||
}
|
||||
}
|
||||
async streamResponse(
|
||||
model: LLMModel,
|
||||
request: LLMRequestStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<AsyncIterable<LLMResponseStreaming>> {
|
||||
if (this.isInfioEnabled) {
|
||||
return await this.infioProvider.streamResponse(model, request)
|
||||
}
|
||||
// use custom provider
|
||||
switch (model.provider) {
|
||||
case ApiProvider.OpenRouter:
|
||||
return await this.openrouterProvider.streamResponse(model, request, options)
|
||||
case ApiProvider.SiliconFlow:
|
||||
return await this.siliconflowProvider.streamResponse(model, request, options)
|
||||
case ApiProvider.AlibabaQwen:
|
||||
return await this.alibabaQwenProvider.streamResponse(model, request, options)
|
||||
case ApiProvider.Deepseek:
|
||||
return await this.deepseekProvider.streamResponse(model, request, options)
|
||||
case ApiProvider.OpenAI:
|
||||
return await this.openaiProvider.streamResponse(model, request, options)
|
||||
case ApiProvider.Anthropic:
|
||||
return await this.anthropicProvider.streamResponse(
|
||||
model,
|
||||
request,
|
||||
options,
|
||||
)
|
||||
case ApiProvider.Google:
|
||||
return await this.googleProvider.streamResponse(model, request, options)
|
||||
case ApiProvider.Groq:
|
||||
return await this.groqProvider.streamResponse(model, request, options)
|
||||
case ApiProvider.Ollama:
|
||||
return await this.ollamaProvider.streamResponse(model, request, options)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export default LLMManager
|
||||
|
||||
@ -7,7 +7,7 @@
|
||||
import OpenAI from 'openai'
|
||||
import { FinalRequestOptions } from 'openai/core'
|
||||
|
||||
import { CustomLLMModel } from '../../types/llm/model'
|
||||
import { LLMModel } from '../../types/llm/model'
|
||||
import {
|
||||
LLMOptions,
|
||||
LLMRequestNonStreaming,
|
||||
@ -19,7 +19,7 @@ import {
|
||||
} from '../../types/llm/response'
|
||||
|
||||
import { BaseLLMProvider } from './base'
|
||||
import { LLMBaseUrlNotSetException, LLMModelNotSetException } from './exception'
|
||||
import { LLMBaseUrlNotSetException } from './exception'
|
||||
import { OpenAIMessageAdapter } from './openai-message-adapter'
|
||||
|
||||
export class NoStainlessOpenAI extends OpenAI {
|
||||
@ -35,7 +35,7 @@ export class NoStainlessOpenAI extends OpenAI {
|
||||
{ retryCount = 0 }: { retryCount?: number } = {},
|
||||
): { req: RequestInit; url: string; timeout: number } {
|
||||
const req = super.buildRequest(options, { retryCount })
|
||||
const headers = req.req.headers as Record<string, string>
|
||||
const headers: Record<string, string> = req.req.headers
|
||||
Object.keys(headers).forEach((k) => {
|
||||
if (k.startsWith('x-stainless')) {
|
||||
// eslint-disable-next-line @typescript-eslint/no-dynamic-delete
|
||||
@ -48,30 +48,26 @@ export class NoStainlessOpenAI extends OpenAI {
|
||||
|
||||
export class OllamaProvider implements BaseLLMProvider {
|
||||
private adapter: OpenAIMessageAdapter
|
||||
private baseUrl: string
|
||||
|
||||
constructor() {
|
||||
constructor(baseUrl: string) {
|
||||
this.adapter = new OpenAIMessageAdapter()
|
||||
this.baseUrl = baseUrl
|
||||
}
|
||||
|
||||
async generateResponse(
|
||||
model: CustomLLMModel,
|
||||
model: LLMModel,
|
||||
request: LLMRequestNonStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<LLMResponseNonStreaming> {
|
||||
if (!model.baseUrl) {
|
||||
if (!this.baseUrl) {
|
||||
throw new LLMBaseUrlNotSetException(
|
||||
'Ollama base URL is missing. Please set it in settings menu.',
|
||||
)
|
||||
}
|
||||
|
||||
if (!model.name) {
|
||||
throw new LLMModelNotSetException(
|
||||
'Ollama model is missing. Please set it in settings menu.',
|
||||
)
|
||||
}
|
||||
|
||||
const client = new NoStainlessOpenAI({
|
||||
baseURL: `${model.baseUrl}/v1`,
|
||||
baseURL: `${this.baseUrl}/v1`,
|
||||
apiKey: '',
|
||||
dangerouslyAllowBrowser: true,
|
||||
})
|
||||
@ -79,24 +75,18 @@ export class OllamaProvider implements BaseLLMProvider {
|
||||
}
|
||||
|
||||
async streamResponse(
|
||||
model: CustomLLMModel,
|
||||
model: LLMModel,
|
||||
request: LLMRequestStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<AsyncIterable<LLMResponseStreaming>> {
|
||||
if (!model.baseUrl) {
|
||||
if (!this.baseUrl) {
|
||||
throw new LLMBaseUrlNotSetException(
|
||||
'Ollama base URL is missing. Please set it in settings menu.',
|
||||
)
|
||||
}
|
||||
|
||||
if (!model.name) {
|
||||
throw new LLMModelNotSetException(
|
||||
'Ollama model is missing. Please set it in settings menu.',
|
||||
)
|
||||
}
|
||||
|
||||
const client = new NoStainlessOpenAI({
|
||||
baseURL: `${model.baseUrl}/v1`,
|
||||
baseURL: `${this.baseUrl}/v1`,
|
||||
apiKey: '',
|
||||
dangerouslyAllowBrowser: true,
|
||||
})
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
import OpenAI from 'openai'
|
||||
|
||||
import { CustomLLMModel } from '../../types/llm/model'
|
||||
import { LLMModel } from '../../types/llm/model'
|
||||
import {
|
||||
LLMOptions,
|
||||
LLMRequestNonStreaming,
|
||||
@ -33,7 +33,7 @@ export class OpenAICompatibleProvider implements BaseLLMProvider {
|
||||
}
|
||||
|
||||
async generateResponse(
|
||||
model: CustomLLMModel,
|
||||
model: LLMModel,
|
||||
request: LLMRequestNonStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<LLMResponseNonStreaming> {
|
||||
@ -47,7 +47,7 @@ export class OpenAICompatibleProvider implements BaseLLMProvider {
|
||||
}
|
||||
|
||||
async streamResponse(
|
||||
model: CustomLLMModel,
|
||||
model: LLMModel,
|
||||
request: LLMRequestStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<AsyncIterable<LLMResponseStreaming>> {
|
||||
|
||||
@ -1,91 +1,77 @@
|
||||
import OpenAI from 'openai'
|
||||
|
||||
import { CustomLLMModel } from '../../types/llm/model'
|
||||
import { LLMModel } from '../../types/llm/model'
|
||||
import {
|
||||
LLMOptions,
|
||||
LLMRequestNonStreaming,
|
||||
LLMRequestStreaming,
|
||||
LLMOptions,
|
||||
LLMRequestNonStreaming,
|
||||
LLMRequestStreaming,
|
||||
} from '../../types/llm/request'
|
||||
import {
|
||||
LLMResponseNonStreaming,
|
||||
LLMResponseStreaming,
|
||||
LLMResponseNonStreaming,
|
||||
LLMResponseStreaming,
|
||||
} from '../../types/llm/response'
|
||||
|
||||
import { BaseLLMProvider } from './base'
|
||||
import {
|
||||
LLMAPIKeyInvalidException,
|
||||
LLMAPIKeyNotSetException,
|
||||
LLMAPIKeyInvalidException,
|
||||
LLMAPIKeyNotSetException,
|
||||
} from './exception'
|
||||
import { OpenAIMessageAdapter } from './openai-message-adapter'
|
||||
|
||||
export class OpenAIAuthenticatedProvider implements BaseLLMProvider {
|
||||
private adapter: OpenAIMessageAdapter
|
||||
private client: OpenAI
|
||||
private adapter: OpenAIMessageAdapter
|
||||
private client: OpenAI
|
||||
|
||||
constructor(apiKey: string) {
|
||||
this.client = new OpenAI({
|
||||
apiKey,
|
||||
dangerouslyAllowBrowser: true,
|
||||
})
|
||||
this.adapter = new OpenAIMessageAdapter()
|
||||
}
|
||||
constructor(apiKey: string) {
|
||||
this.client = new OpenAI({
|
||||
apiKey,
|
||||
dangerouslyAllowBrowser: true,
|
||||
})
|
||||
this.adapter = new OpenAIMessageAdapter()
|
||||
}
|
||||
|
||||
async generateResponse(
|
||||
model: CustomLLMModel,
|
||||
request: LLMRequestNonStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<LLMResponseNonStreaming> {
|
||||
if (!this.client.apiKey) {
|
||||
if (!model.baseUrl) {
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
'OpenAI API key is missing. Please set it in settings menu.',
|
||||
)
|
||||
}
|
||||
this.client = new OpenAI({
|
||||
apiKey: model.apiKey,
|
||||
baseURL: model.baseUrl,
|
||||
dangerouslyAllowBrowser: true,
|
||||
})
|
||||
}
|
||||
try {
|
||||
return this.adapter.generateResponse(this.client, request, options)
|
||||
} catch (error) {
|
||||
if (error instanceof OpenAI.AuthenticationError) {
|
||||
throw new LLMAPIKeyInvalidException(
|
||||
'OpenAI API key is invalid. Please update it in settings menu.',
|
||||
)
|
||||
}
|
||||
throw error
|
||||
}
|
||||
}
|
||||
async generateResponse(
|
||||
model: LLMModel,
|
||||
request: LLMRequestNonStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<LLMResponseNonStreaming> {
|
||||
if (!this.client.apiKey) {
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
'OpenAI API key is missing. Please set it in settings menu.',
|
||||
)
|
||||
}
|
||||
try {
|
||||
return this.adapter.generateResponse(this.client, request, options)
|
||||
} catch (error) {
|
||||
if (error instanceof OpenAI.AuthenticationError) {
|
||||
throw new LLMAPIKeyInvalidException(
|
||||
'OpenAI API key is invalid. Please update it in settings menu.',
|
||||
)
|
||||
}
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
async streamResponse(
|
||||
model: CustomLLMModel,
|
||||
request: LLMRequestStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<AsyncIterable<LLMResponseStreaming>> {
|
||||
if (!this.client.apiKey) {
|
||||
if (!model.baseUrl) {
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
'OpenAI API key is missing. Please set it in settings menu.',
|
||||
)
|
||||
}
|
||||
this.client = new OpenAI({
|
||||
apiKey: model.apiKey,
|
||||
baseURL: model.baseUrl,
|
||||
dangerouslyAllowBrowser: true,
|
||||
})
|
||||
}
|
||||
async streamResponse(
|
||||
model: LLMModel,
|
||||
request: LLMRequestStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<AsyncIterable<LLMResponseStreaming>> {
|
||||
if (!this.client.apiKey) {
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
'OpenAI API key is missing. Please set it in settings menu.',
|
||||
)
|
||||
}
|
||||
|
||||
try {
|
||||
return this.adapter.streamResponse(this.client, request, options)
|
||||
} catch (error) {
|
||||
if (error instanceof OpenAI.AuthenticationError) {
|
||||
throw new LLMAPIKeyInvalidException(
|
||||
'OpenAI API key is invalid. Please update it in settings menu.',
|
||||
)
|
||||
}
|
||||
throw error
|
||||
}
|
||||
}
|
||||
try {
|
||||
return this.adapter.streamResponse(this.client, request, options)
|
||||
} catch (error) {
|
||||
if (error instanceof OpenAI.AuthenticationError) {
|
||||
throw new LLMAPIKeyInvalidException(
|
||||
'OpenAI API key is invalid. Please update it in settings menu.',
|
||||
)
|
||||
}
|
||||
throw error
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,7 +1,11 @@
|
||||
import { GoogleGenerativeAI } from '@google/generative-ai'
|
||||
import { OpenAI } from 'openai'
|
||||
|
||||
import { ALIBABA_QWEN_BASE_URL, OPENAI_BASE_URL, SILICONFLOW_BASE_URL } from "../../constants"
|
||||
import { EmbeddingModel } from '../../types/embedding'
|
||||
import { ApiProvider } from '../../types/llm/model'
|
||||
import { InfioSettings } from '../../types/settings'
|
||||
import { GetEmbeddingModelInfo } from '../../utils/api'
|
||||
import {
|
||||
LLMAPIKeyNotSetException,
|
||||
LLMBaseUrlNotSetException,
|
||||
@ -10,22 +14,20 @@ import {
|
||||
import { NoStainlessOpenAI } from '../llm/ollama'
|
||||
|
||||
export const getEmbeddingModel = (
|
||||
embeddingModelId: string,
|
||||
apiKeys: {
|
||||
openAIApiKey: string
|
||||
geminiApiKey: string
|
||||
},
|
||||
ollamaBaseUrl: string,
|
||||
settings: InfioSettings,
|
||||
): EmbeddingModel => {
|
||||
switch (embeddingModelId) {
|
||||
case 'text-embedding-3-small': {
|
||||
switch (settings.embeddingModelProvider) {
|
||||
case ApiProvider.OpenAI: {
|
||||
const baseURL = settings.openaiProvider.useCustomUrl ? settings.openaiProvider.baseUrl : OPENAI_BASE_URL
|
||||
const openai = new OpenAI({
|
||||
apiKey: apiKeys.openAIApiKey,
|
||||
apiKey: settings.openaiProvider.apiKey,
|
||||
baseURL: baseURL,
|
||||
dangerouslyAllowBrowser: true,
|
||||
})
|
||||
const modelInfo = GetEmbeddingModelInfo(settings.embeddingModelProvider, settings.embeddingModelId)
|
||||
return {
|
||||
id: 'text-embedding-3-small',
|
||||
dimension: 1536,
|
||||
id: settings.embeddingModelId,
|
||||
dimension: modelInfo.dimensions,
|
||||
getEmbedding: async (text: string) => {
|
||||
try {
|
||||
if (!openai.apiKey) {
|
||||
@ -34,7 +36,7 @@ export const getEmbeddingModel = (
|
||||
)
|
||||
}
|
||||
const embedding = await openai.embeddings.create({
|
||||
model: 'text-embedding-3-small',
|
||||
model: settings.embeddingModelId,
|
||||
input: text,
|
||||
})
|
||||
return embedding.data[0].embedding
|
||||
@ -52,12 +54,87 @@ export const getEmbeddingModel = (
|
||||
},
|
||||
}
|
||||
}
|
||||
case 'text-embedding-004': {
|
||||
const client = new GoogleGenerativeAI(apiKeys.geminiApiKey)
|
||||
const model = client.getGenerativeModel({ model: 'text-embedding-004' })
|
||||
case ApiProvider.SiliconFlow: {
|
||||
const baseURL = settings.siliconflowProvider.useCustomUrl ? settings.siliconflowProvider.baseUrl : SILICONFLOW_BASE_URL
|
||||
const openai = new OpenAI({
|
||||
apiKey: settings.siliconflowProvider.apiKey,
|
||||
baseURL: baseURL,
|
||||
dangerouslyAllowBrowser: true,
|
||||
})
|
||||
const modelInfo = GetEmbeddingModelInfo(settings.embeddingModelProvider, settings.embeddingModelId)
|
||||
return {
|
||||
id: 'text-embedding-004',
|
||||
dimension: 768,
|
||||
id: settings.embeddingModelId,
|
||||
dimension: modelInfo.dimensions,
|
||||
getEmbedding: async (text: string) => {
|
||||
try {
|
||||
if (!openai.apiKey) {
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
'SiliconFlow API key is missing. Please set it in settings menu.',
|
||||
)
|
||||
}
|
||||
const embedding = await openai.embeddings.create({
|
||||
model: settings.embeddingModelId,
|
||||
input: text,
|
||||
})
|
||||
return embedding.data[0].embedding
|
||||
} catch (error) {
|
||||
if (
|
||||
error.status === 429 &&
|
||||
error.message.toLowerCase().includes('rate limit')
|
||||
) {
|
||||
throw new LLMRateLimitExceededException(
|
||||
'SiliconFlow API rate limit exceeded. Please try again later.',
|
||||
)
|
||||
}
|
||||
throw error
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
case ApiProvider.AlibabaQwen: {
|
||||
const baseURL = settings.alibabaQwenProvider.useCustomUrl ? settings.alibabaQwenProvider.baseUrl : ALIBABA_QWEN_BASE_URL
|
||||
const openai = new OpenAI({
|
||||
apiKey: settings.alibabaQwenProvider.apiKey,
|
||||
baseURL: baseURL,
|
||||
dangerouslyAllowBrowser: true,
|
||||
})
|
||||
const modelInfo = GetEmbeddingModelInfo(settings.embeddingModelProvider, settings.embeddingModelId)
|
||||
return {
|
||||
id: settings.embeddingModelId,
|
||||
dimension: modelInfo.dimensions,
|
||||
getEmbedding: async (text: string) => {
|
||||
try {
|
||||
if (!openai.apiKey) {
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
'Alibaba Qwen API key is missing. Please set it in settings menu.',
|
||||
)
|
||||
}
|
||||
const embedding = await openai.embeddings.create({
|
||||
model: settings.embeddingModelId,
|
||||
input: text,
|
||||
})
|
||||
return embedding.data[0].embedding
|
||||
} catch (error) {
|
||||
if (
|
||||
error.status === 429 &&
|
||||
error.message.toLowerCase().includes('rate limit')
|
||||
) {
|
||||
throw new LLMRateLimitExceededException(
|
||||
'Alibaba Qwen API rate limit exceeded. Please try again later.',
|
||||
)
|
||||
}
|
||||
throw error
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
case ApiProvider.Google: {
|
||||
const client = new GoogleGenerativeAI(settings.googleProvider.apiKey)
|
||||
const model = client.getGenerativeModel({ model: settings.embeddingModelId })
|
||||
const modelInfo = GetEmbeddingModelInfo(settings.embeddingModelProvider, settings.embeddingModelId)
|
||||
return {
|
||||
id: settings.embeddingModelId,
|
||||
dimension: modelInfo.dimensions,
|
||||
getEmbedding: async (text: string) => {
|
||||
try {
|
||||
const response = await model.embedContent(text)
|
||||
@ -76,69 +153,24 @@ export const getEmbeddingModel = (
|
||||
},
|
||||
}
|
||||
}
|
||||
case 'nomic-embed-text': {
|
||||
case ApiProvider.Ollama: {
|
||||
const openai = new NoStainlessOpenAI({
|
||||
apiKey: '',
|
||||
apiKey: settings.ollamaProvider.apiKey,
|
||||
dangerouslyAllowBrowser: true,
|
||||
baseURL: `${ollamaBaseUrl}/v1`,
|
||||
baseURL: `${settings.ollamaProvider.baseUrl}/v1`,
|
||||
})
|
||||
const modelInfo = GetEmbeddingModelInfo(settings.embeddingModelProvider, settings.embeddingModelId)
|
||||
return {
|
||||
id: 'nomic-embed-text',
|
||||
dimension: 768,
|
||||
id: settings.embeddingModelId,
|
||||
dimension: modelInfo.dimensions,
|
||||
getEmbedding: async (text: string) => {
|
||||
if (!ollamaBaseUrl) {
|
||||
if (!settings.ollamaProvider.baseUrl) {
|
||||
throw new LLMBaseUrlNotSetException(
|
||||
'Ollama Address is missing. Please set it in settings menu.',
|
||||
)
|
||||
}
|
||||
const embedding = await openai.embeddings.create({
|
||||
model: 'nomic-embed-text',
|
||||
input: text,
|
||||
})
|
||||
return embedding.data[0].embedding
|
||||
},
|
||||
}
|
||||
}
|
||||
case 'mxbai-embed-large': {
|
||||
const openai = new NoStainlessOpenAI({
|
||||
apiKey: '',
|
||||
dangerouslyAllowBrowser: true,
|
||||
baseURL: `${ollamaBaseUrl}/v1`,
|
||||
})
|
||||
return {
|
||||
id: 'mxbai-embed-large',
|
||||
dimension: 1024,
|
||||
getEmbedding: async (text: string) => {
|
||||
if (!ollamaBaseUrl) {
|
||||
throw new LLMBaseUrlNotSetException(
|
||||
'Ollama Address is missing. Please set it in settings menu.',
|
||||
)
|
||||
}
|
||||
const embedding = await openai.embeddings.create({
|
||||
model: 'mxbai-embed-large',
|
||||
input: text,
|
||||
})
|
||||
return embedding.data[0].embedding
|
||||
},
|
||||
}
|
||||
}
|
||||
case 'bge-m3': {
|
||||
const openai = new NoStainlessOpenAI({
|
||||
apiKey: '',
|
||||
dangerouslyAllowBrowser: true,
|
||||
baseURL: `${ollamaBaseUrl}/v1`,
|
||||
})
|
||||
return {
|
||||
id: 'bge-m3',
|
||||
dimension: 1024,
|
||||
getEmbedding: async (text: string) => {
|
||||
if (!ollamaBaseUrl) {
|
||||
throw new LLMBaseUrlNotSetException(
|
||||
'Ollama Address is missing. Please set it in settings menu.',
|
||||
)
|
||||
}
|
||||
const embedding = await openai.embeddings.create({
|
||||
model: 'bge-m3',
|
||||
model: settings.embeddingModelId,
|
||||
input: text,
|
||||
})
|
||||
return embedding.data[0].embedding
|
||||
|
||||
@ -23,26 +23,12 @@ export class RAGEngine {
|
||||
this.app = app
|
||||
this.settings = settings
|
||||
this.vectorManager = dbManager.getVectorManager()
|
||||
this.embeddingModel = getEmbeddingModel(
|
||||
settings.embeddingModelId,
|
||||
{
|
||||
openAIApiKey: settings.openAIApiKey,
|
||||
geminiApiKey: settings.geminiApiKey,
|
||||
},
|
||||
settings.ollamaEmbeddingModel.baseUrl,
|
||||
)
|
||||
this.embeddingModel = getEmbeddingModel(settings)
|
||||
}
|
||||
|
||||
setSettings(settings: InfioSettings) {
|
||||
this.settings = settings
|
||||
this.embeddingModel = getEmbeddingModel(
|
||||
settings.embeddingModelId,
|
||||
{
|
||||
openAIApiKey: settings.openAIApiKey,
|
||||
geminiApiKey: settings.geminiApiKey,
|
||||
},
|
||||
settings.ollamaEmbeddingModel.baseUrl,
|
||||
)
|
||||
this.embeddingModel = getEmbeddingModel(settings)
|
||||
}
|
||||
|
||||
// TODO: Implement automatic vault re-indexing when settings are changed.
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
import { SerializedLexicalNode } from 'lexical'
|
||||
|
||||
import { SUPPORT_EMBEDDING_SIMENTION } from '../constants'
|
||||
import { EmbeddingModelId } from '../types/embedding'
|
||||
// import { EmbeddingModelId } from '../types/embedding'
|
||||
|
||||
// PostgreSQL column types
|
||||
type ColumnDefinition = {
|
||||
|
||||
@ -30,6 +30,7 @@ import { getMentionableBlockData } from './utils/obsidian'
|
||||
// Remember to rename these classes and interfaces!
|
||||
export default class InfioPlugin extends Plugin {
|
||||
settings: InfioSettings
|
||||
settingTab: InfioSettingTab
|
||||
settingsListeners: ((newSettings: InfioSettings) => void)[] = []
|
||||
initChatProps?: ChatProps
|
||||
dbManager: DBManager | null = null
|
||||
@ -41,6 +42,10 @@ export default class InfioPlugin extends Plugin {
|
||||
async onload() {
|
||||
await this.loadSettings()
|
||||
|
||||
// Add settings tab
|
||||
this.settingTab = new InfioSettingTab(this.app, this)
|
||||
this.addSettingTab(this.settingTab)
|
||||
|
||||
// This creates an icon in the left ribbon.
|
||||
this.addRibbonIcon('wand-sparkles', 'Open infio copilot', () =>
|
||||
this.openChatView(),
|
||||
@ -49,14 +54,12 @@ export default class InfioPlugin extends Plugin {
|
||||
this.registerView(CHAT_VIEW_TYPE, (leaf) => new ChatView(leaf, this))
|
||||
this.registerView(APPLY_VIEW_TYPE, (leaf) => new ApplyView(leaf))
|
||||
|
||||
// This adds a settings tab so the user can configure various aspects of the plugin
|
||||
this.addSettingTab(new InfioSettingTab(this.app, this))
|
||||
|
||||
// Register markdown processor for ai blocks
|
||||
this.inlineEdit = new InlineEdit(this, this.settings);
|
||||
this.registerMarkdownCodeBlockProcessor("infioedit", (source, el, ctx) => {
|
||||
this.inlineEdit?.Processor(source, el, ctx);
|
||||
});
|
||||
|
||||
// Update inlineEdit when settings change
|
||||
this.addSettingsListener((newSettings) => {
|
||||
this.inlineEdit = new InlineEdit(this, newSettings);
|
||||
|
||||
@ -3,7 +3,8 @@ import React from "react";
|
||||
import InfioPlugin from "../main";
|
||||
import { InfioSettings } from "../types/settings";
|
||||
|
||||
import ModelsSettings from "./ModelsSettings";
|
||||
// import ModelsSettings from "./ModelsSettings";
|
||||
import ProviderSettings from "./ProviderSettings";
|
||||
|
||||
type CustomSettingsProps = {
|
||||
plugin: InfioPlugin;
|
||||
@ -14,16 +15,18 @@ const CustomSettings: React.FC<CustomSettingsProps> = ({ plugin }) => {
|
||||
|
||||
const handleSettingsUpdate = async (newSettings: InfioSettings) => {
|
||||
await plugin.setSettings(newSettings);
|
||||
// Force refresh the settings page to update dropdowns
|
||||
plugin.settingTab.display();
|
||||
};
|
||||
|
||||
return (
|
||||
<div>
|
||||
<h1 style={{ display: "flex", alignItems: "center", justifyContent: "space-between" }}>
|
||||
<h1 className="infio-llm-setting-title">
|
||||
<div>
|
||||
Infio Settings <small>v{settings.version}</small>
|
||||
</div>
|
||||
</h1>
|
||||
<ModelsSettings settings={settings} setSettings={handleSettingsUpdate} />
|
||||
<ProviderSettings settings={settings} setSettings={handleSettingsUpdate} />
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
import React from "react";
|
||||
import React, { useEffect, useState } from "react";
|
||||
|
||||
export type DropdownComponentProps = {
|
||||
name: string;
|
||||
@ -21,7 +21,7 @@ export const DropdownComponent: React.FC<DropdownComponentProps> = ({
|
||||
<select
|
||||
value={value}
|
||||
onChange={(e) => onChange(e.target.value)}
|
||||
className="infio-llm-setting-item-control"
|
||||
className="infio-llm-setting-item-control, infio-llm-setting-model-id"
|
||||
>
|
||||
{options.map((option) => (
|
||||
<option key={option} value={option}>
|
||||
@ -33,7 +33,7 @@ export const DropdownComponent: React.FC<DropdownComponentProps> = ({
|
||||
);
|
||||
|
||||
export type TextComponentProps = {
|
||||
name: string;
|
||||
name?: string;
|
||||
description?: string;
|
||||
placeholder: string;
|
||||
value: string;
|
||||
@ -48,23 +48,49 @@ export const TextComponent: React.FC<TextComponentProps> = ({
|
||||
value,
|
||||
type = "text",
|
||||
onChange,
|
||||
}) => (
|
||||
<div className="infio-llm-setting-item">
|
||||
<div className="infio-llm-setting-item-name">{name}</div>
|
||||
{description && <div className="infio-llm-setting-item-description">{description}</div>}
|
||||
<input
|
||||
type={type}
|
||||
className="infio-llm-setting-item-control"
|
||||
placeholder={placeholder}
|
||||
value={value}
|
||||
onChange={(e) => onChange(e.target.value)}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
}) => {
|
||||
const [localValue, setLocalValue] = useState(value);
|
||||
|
||||
// Update local value when prop value changes (e.g., provider change)
|
||||
useEffect(() => {
|
||||
setLocalValue(value);
|
||||
}, [value]);
|
||||
|
||||
const handleChange = (e: React.ChangeEvent<HTMLInputElement>) => {
|
||||
setLocalValue(e.target.value);
|
||||
};
|
||||
|
||||
const handleBlur = () => {
|
||||
if (localValue !== value) {
|
||||
onChange(localValue);
|
||||
}
|
||||
};
|
||||
|
||||
const handleKeyDown = (e: React.KeyboardEvent<HTMLInputElement>) => {
|
||||
if (e.key === 'Enter') {
|
||||
e.currentTarget.blur();
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="infio-llm-setting-item">
|
||||
<div className="infio-llm-setting-item-name">{name}</div>
|
||||
{description && <div className="infio-llm-setting-item-description">{description}</div>}
|
||||
<input
|
||||
type={type}
|
||||
className="infio-llm-setting-item-control"
|
||||
placeholder={placeholder}
|
||||
value={localValue}
|
||||
onChange={handleChange}
|
||||
onBlur={handleBlur}
|
||||
onKeyDown={handleKeyDown}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export type ToggleComponentProps = {
|
||||
name?: string;
|
||||
description?: string;
|
||||
name: string;
|
||||
value: boolean;
|
||||
onChange: (value: boolean) => void;
|
||||
disabled?: boolean;
|
||||
@ -72,14 +98,11 @@ export type ToggleComponentProps = {
|
||||
|
||||
export const ToggleComponent: React.FC<ToggleComponentProps> = ({
|
||||
name,
|
||||
description,
|
||||
value,
|
||||
onChange,
|
||||
disabled = false,
|
||||
}) => (
|
||||
<div className="infio-llm-setting-item">
|
||||
{name && <div className="infio-llm-setting-item-name">{name}</div>}
|
||||
{description && <div className="infio-llm-setting-item-description">{description}</div>}
|
||||
<label className={`switch ${disabled ? "disabled" : ""}`}>
|
||||
<input
|
||||
type="checkbox"
|
||||
@ -87,7 +110,7 @@ export const ToggleComponent: React.FC<ToggleComponentProps> = ({
|
||||
onChange={(e) => onChange(e.target.checked)}
|
||||
disabled={disabled}
|
||||
/>
|
||||
<span className="slider round"></span>
|
||||
<span className="infio-llm-setting-checkbox-name">{name}</span>
|
||||
</label>
|
||||
</div>
|
||||
);
|
||||
|
||||
314
src/settings/ProviderModelsPicker.tsx
Normal file
314
src/settings/ProviderModelsPicker.tsx
Normal file
@ -0,0 +1,314 @@
|
||||
import * as Popover from "@radix-ui/react-popover";
|
||||
import Fuse, { FuseResult } from "fuse.js";
|
||||
import React, { useEffect, useMemo, useRef, useState } from "react";
|
||||
|
||||
import { ApiProvider } from "../types/llm/model";
|
||||
// import { PROVIDERS } from '../constants';
|
||||
import { GetAllProviders, GetEmbeddingProviderModelIds, GetEmbeddingProviders, GetProviderModelIds } from "../utils/api";
|
||||
|
||||
type TextSegment = {
|
||||
text: string;
|
||||
isHighlighted: boolean;
|
||||
};
|
||||
|
||||
type SearchableItem = {
|
||||
id: string;
|
||||
html: string | TextSegment[];
|
||||
};
|
||||
|
||||
type HighlightedItem = {
|
||||
id: string;
|
||||
html: TextSegment[];
|
||||
};
|
||||
|
||||
// Type guard for Record<string, unknown>
|
||||
function isRecord(value: unknown): value is Record<string, unknown> {
|
||||
return typeof value === 'object' && value !== null;
|
||||
}
|
||||
|
||||
// https://gist.github.com/evenfrost/1ba123656ded32fb7a0cd4651efd4db0
|
||||
export const highlight = (fuseSearchResult: FuseResult<SearchableItem>[]): HighlightedItem[] => {
|
||||
const set = (obj: Record<string, unknown>, path: string, value: TextSegment[]): void => {
|
||||
const pathValue = path.split(".")
|
||||
let i: number
|
||||
let current = obj
|
||||
|
||||
for (i = 0; i < pathValue.length - 1; i++) {
|
||||
const nextValue = current[pathValue[i]]
|
||||
if (isRecord(nextValue)) {
|
||||
current = nextValue
|
||||
} else {
|
||||
throw new Error(`Invalid path: ${path}`)
|
||||
}
|
||||
}
|
||||
|
||||
current[pathValue[i]] = value
|
||||
}
|
||||
|
||||
// Function to merge overlapping regions
|
||||
const mergeRegions = (regions: [number, number][]): [number, number][] => {
|
||||
if (regions.length === 0) return regions
|
||||
|
||||
// Sort regions by start index
|
||||
regions.sort((a, b) => a[0] - b[0])
|
||||
|
||||
const merged: [number, number][] = [regions[0]]
|
||||
|
||||
for (let i = 1; i < regions.length; i++) {
|
||||
const last = merged[merged.length - 1]
|
||||
const current = regions[i]
|
||||
|
||||
if (current[0] <= last[1] + 1) {
|
||||
// Overlapping or adjacent regions
|
||||
last[1] = Math.max(last[1], current[1])
|
||||
} else {
|
||||
merged.push(current)
|
||||
}
|
||||
}
|
||||
|
||||
return merged
|
||||
}
|
||||
|
||||
const generateHighlightedSegments = (inputText: string, regions: [number, number][] = []): TextSegment[] => {
|
||||
if (regions.length === 0) {
|
||||
return [{ text: inputText, isHighlighted: false }];
|
||||
}
|
||||
|
||||
// Sort and merge overlapping regions
|
||||
const mergedRegions = mergeRegions(regions);
|
||||
const segments: TextSegment[] = [];
|
||||
let nextUnhighlightedRegionStartingIndex = 0;
|
||||
|
||||
mergedRegions.forEach((region) => {
|
||||
const start = region[0];
|
||||
const end = region[1];
|
||||
const lastRegionNextIndex = end + 1;
|
||||
|
||||
// Add unhighlighted segment before the highlight
|
||||
if (nextUnhighlightedRegionStartingIndex < start) {
|
||||
segments.push({
|
||||
text: inputText.substring(nextUnhighlightedRegionStartingIndex, start),
|
||||
isHighlighted: false,
|
||||
});
|
||||
}
|
||||
|
||||
// Add highlighted segment
|
||||
segments.push({
|
||||
text: inputText.substring(start, lastRegionNextIndex),
|
||||
isHighlighted: true,
|
||||
});
|
||||
|
||||
nextUnhighlightedRegionStartingIndex = lastRegionNextIndex;
|
||||
});
|
||||
|
||||
// Add remaining unhighlighted text
|
||||
if (nextUnhighlightedRegionStartingIndex < inputText.length) {
|
||||
segments.push({
|
||||
text: inputText.substring(nextUnhighlightedRegionStartingIndex),
|
||||
isHighlighted: false,
|
||||
});
|
||||
}
|
||||
|
||||
return segments;
|
||||
}
|
||||
|
||||
return fuseSearchResult
|
||||
.filter(({ matches }) => matches && matches.length)
|
||||
.map(({ item, matches }): HighlightedItem => {
|
||||
const highlightedItem: HighlightedItem = {
|
||||
id: item.id,
|
||||
html: typeof item.html === 'string' ? [{ text: item.html, isHighlighted: false }] : [...item.html]
|
||||
}
|
||||
|
||||
matches?.forEach((match) => {
|
||||
if (match.key && typeof match.value === "string" && match.indices) {
|
||||
const mergedIndices = mergeRegions([...match.indices])
|
||||
set(highlightedItem, match.key, generateHighlightedSegments(match.value, mergedIndices))
|
||||
}
|
||||
})
|
||||
|
||||
return highlightedItem
|
||||
})
|
||||
}
|
||||
|
||||
const HighlightedText: React.FC<{ segments: TextSegment[] }> = ({ segments }) => {
|
||||
return (
|
||||
<>
|
||||
{segments.map((segment, index) => (
|
||||
segment.isHighlighted ? (
|
||||
<span key={index} className="infio-llm-setting-model-item-highlight">{segment.text}</span>
|
||||
) : (
|
||||
<span key={index}>{segment.text}</span>
|
||||
)
|
||||
))}
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
export type ComboBoxComponentProps = {
|
||||
name: string;
|
||||
provider: ApiProvider;
|
||||
modelId: string;
|
||||
isEmbedding?: boolean,
|
||||
updateModel: (provider: ApiProvider, modelId: string) => void;
|
||||
};
|
||||
|
||||
export const ComboBoxComponent: React.FC<ComboBoxComponentProps> = ({
|
||||
name,
|
||||
provider,
|
||||
modelId,
|
||||
isEmbedding = false,
|
||||
updateModel,
|
||||
}) => {
|
||||
// 提供商选择状态
|
||||
const [modelProvider, setModelProvider] = useState(provider);
|
||||
|
||||
// 搜索输入状态
|
||||
const [searchTerm, setSearchTerm] = useState("");
|
||||
const [isOpen, setIsOpen] = useState(false);
|
||||
const [selectedIndex, setSelectedIndex] = useState(0);
|
||||
|
||||
const providers = isEmbedding ? GetEmbeddingProviders() : GetAllProviders()
|
||||
|
||||
const modelIds = useMemo(() => {
|
||||
return isEmbedding ? GetEmbeddingProviderModelIds(modelProvider) : GetProviderModelIds(modelProvider)
|
||||
}, [modelProvider])
|
||||
|
||||
const searchableItems = useMemo(() => {
|
||||
return modelIds.map((id) => ({
|
||||
id,
|
||||
html: id,
|
||||
}))
|
||||
}, [modelIds])
|
||||
|
||||
// 初始化 fuse,用于模糊搜索,简单配置 threshold 可按需调整
|
||||
const fuse = useMemo(() => {
|
||||
return new Fuse(searchableItems, {
|
||||
keys: ["html"],
|
||||
threshold: 0.6,
|
||||
shouldSort: true,
|
||||
isCaseSensitive: false,
|
||||
ignoreLocation: false,
|
||||
includeMatches: true,
|
||||
minMatchCharLength: 1,
|
||||
})
|
||||
}, [searchableItems])
|
||||
|
||||
// 根据 searchTerm 得到过滤后的数据列表
|
||||
const filteredOptions = useMemo(() => {
|
||||
const results: HighlightedItem[] = searchTerm
|
||||
? highlight(fuse.search(searchTerm))
|
||||
: searchableItems.map(item => ({
|
||||
...item,
|
||||
html: [{ text: item.html, isHighlighted: false }]
|
||||
}))
|
||||
return results
|
||||
}, [searchableItems, searchTerm, fuse])
|
||||
|
||||
const listRef = useRef<HTMLDivElement>(null);
|
||||
const itemRefs = useRef<Array<HTMLDivElement | null>>([]);
|
||||
|
||||
// 当选中项发生变化时,滚动到可视区域内
|
||||
useEffect(() => {
|
||||
if (itemRefs.current[selectedIndex]) {
|
||||
itemRefs.current[selectedIndex]?.scrollIntoView({
|
||||
block: "nearest",
|
||||
behavior: "smooth"
|
||||
});
|
||||
}
|
||||
}, [selectedIndex]);
|
||||
|
||||
return (
|
||||
<div className="infio-llm-setting-item">
|
||||
<div className="infio-llm-setting-item-name">{name}</div>
|
||||
<Popover.Root modal={false} open={isOpen} onOpenChange={setIsOpen}>
|
||||
<Popover.Trigger asChild>
|
||||
<div className="infio-llm-setting-item-control">
|
||||
<span className="infio-llm-setting-model-id">{modelId}</span>
|
||||
</div>
|
||||
</Popover.Trigger>
|
||||
<Popover.Content
|
||||
side="bottom"
|
||||
align="start"
|
||||
sideOffset={4}
|
||||
className="infio-llm-setting-combobox-dropdown"
|
||||
>
|
||||
<div ref={listRef}>
|
||||
<div className="infio-llm-setting-search-container">
|
||||
<select
|
||||
className="infio-llm-setting-provider-switch"
|
||||
value={modelProvider}
|
||||
onChange={(e) => setModelProvider(e.target.value)}
|
||||
>
|
||||
{providers.map((provider) => (
|
||||
<option
|
||||
key={provider}
|
||||
value={provider}
|
||||
className={`infio-llm-setting-provider-option ${provider === modelProvider ? 'is-active' : ''}`}
|
||||
>
|
||||
{provider}
|
||||
</option>
|
||||
))}
|
||||
</select>
|
||||
<input
|
||||
type="text"
|
||||
className="infio-llm-setting-item-search"
|
||||
placeholder="search model..."
|
||||
value={searchTerm}
|
||||
onChange={(e) => {
|
||||
setSearchTerm(e.target.value);
|
||||
setSelectedIndex(0);
|
||||
}}
|
||||
onKeyDown={(e) => {
|
||||
switch (e.key) {
|
||||
case "ArrowDown":
|
||||
e.preventDefault();
|
||||
setSelectedIndex((prev) =>
|
||||
Math.min(prev + 1, filteredOptions.length - 1)
|
||||
);
|
||||
break;
|
||||
case "ArrowUp":
|
||||
e.preventDefault();
|
||||
setSelectedIndex((prev) => Math.max(prev - 1, 0));
|
||||
break;
|
||||
case "Enter": {
|
||||
e.preventDefault();
|
||||
const selectedOption = filteredOptions[selectedIndex];
|
||||
if (selectedOption) {
|
||||
updateModel(modelProvider, selectedOption.id);
|
||||
setSearchTerm("");
|
||||
setIsOpen(false);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case "Escape":
|
||||
e.preventDefault();
|
||||
setIsOpen(false);
|
||||
setSearchTerm("");
|
||||
break;
|
||||
}
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
{filteredOptions.map((option, index) => (
|
||||
<Popover.Close key={option.id} asChild>
|
||||
<div
|
||||
ref={(el) => (itemRefs.current[index] = el)}
|
||||
onMouseEnter={() => setSelectedIndex(index)}
|
||||
onClick={() => {
|
||||
updateModel(modelProvider, option.id);
|
||||
setSearchTerm("");
|
||||
setIsOpen(false);
|
||||
}}
|
||||
className={`infio-llm-setting-combobox-option ${index === selectedIndex ? 'is-selected' : ''}`}
|
||||
>
|
||||
<HighlightedText segments={option.html} />
|
||||
</div>
|
||||
</Popover.Close>
|
||||
))}
|
||||
</div>
|
||||
</Popover.Content>
|
||||
</Popover.Root>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
186
src/settings/ProviderSettings.tsx
Normal file
186
src/settings/ProviderSettings.tsx
Normal file
@ -0,0 +1,186 @@
|
||||
import React, { useMemo, useState } from 'react';
|
||||
|
||||
// import { PROVIDERS } from '../constants';
|
||||
import { ApiProvider } from '../types/llm/model';
|
||||
import { InfioSettings } from '../types/settings';
|
||||
import { GetAllProviders } from '../utils/api';
|
||||
// import { siliconFlowDefaultModelId } from '../utils/api';
|
||||
|
||||
import { DropdownComponent, TextComponent, ToggleComponent } from './FormComponents';
|
||||
import { ComboBoxComponent } from './ProviderModelsPicker';
|
||||
|
||||
type ProviderSettingKey =
|
||||
| 'infioProvider'
|
||||
| 'openrouterProvider'
|
||||
| 'openaiProvider'
|
||||
| 'siliconflowProvider'
|
||||
| 'alibabaQwenProvider'
|
||||
| 'anthropicProvider'
|
||||
| 'deepseekProvider'
|
||||
| 'googleProvider'
|
||||
| 'groqProvider'
|
||||
| 'ollamaProvider'
|
||||
| 'openaicompatibleProvider';
|
||||
|
||||
interface ProviderSettingsProps {
|
||||
settings: InfioSettings;
|
||||
setSettings: (settings: InfioSettings) => Promise<void>;
|
||||
}
|
||||
|
||||
const keyMap: Record<ApiProvider, ProviderSettingKey> = {
|
||||
'Infio': 'infioProvider',
|
||||
'OpenRouter': 'openrouterProvider',
|
||||
'OpenAI': 'openaiProvider',
|
||||
'SiliconFlow': 'siliconflowProvider',
|
||||
'AlibabaQwen': 'alibabaQwenProvider',
|
||||
'Anthropic': 'anthropicProvider',
|
||||
'Deepseek': 'deepseekProvider',
|
||||
'Google': 'googleProvider',
|
||||
'Groq': 'groqProvider',
|
||||
'Ollama': 'ollamaProvider',
|
||||
'OpenAICompatible': 'openaicompatibleProvider'
|
||||
};
|
||||
|
||||
const getProviderSettingKey = (provider: ApiProvider): ProviderSettingKey => {
|
||||
return keyMap[provider];
|
||||
};
|
||||
|
||||
const PROVIDERS = GetAllProviders();
|
||||
|
||||
const ProviderSettings: React.FC<ProviderSettingsProps> = ({ settings, setSettings }) => {
|
||||
const [currProvider, setCurrProvider] = useState(settings.defaultProvider);
|
||||
|
||||
const providerSetting = useMemo(() => {
|
||||
const providerKey = getProviderSettingKey(currProvider);
|
||||
return settings[providerKey] || {};
|
||||
}, [currProvider, settings]);
|
||||
|
||||
const updateProvider = (provider: ApiProvider) => {
|
||||
setCurrProvider(provider);
|
||||
setSettings({
|
||||
...settings,
|
||||
defaultProvider: provider
|
||||
});
|
||||
};
|
||||
|
||||
const updateProviderApiKey = (value: string) => {
|
||||
const providerKey = getProviderSettingKey(currProvider);
|
||||
const providerSettings = settings[providerKey];
|
||||
|
||||
setSettings({
|
||||
...settings,
|
||||
[providerKey]: {
|
||||
...providerSettings,
|
||||
apiKey: value
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
const updateProviderUseCustomUrl = (value: boolean) => {
|
||||
const providerKey = getProviderSettingKey(currProvider);
|
||||
const providerSettings = settings[providerKey];
|
||||
|
||||
setSettings({
|
||||
...settings,
|
||||
[providerKey]: {
|
||||
...providerSettings,
|
||||
useCustomUrl: value
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
const updateProviderBaseUrl = (value: string) => {
|
||||
const providerKey = getProviderSettingKey(currProvider);
|
||||
const providerSettings = settings[providerKey];
|
||||
|
||||
setSettings({
|
||||
...settings,
|
||||
[providerKey]: {
|
||||
...providerSettings,
|
||||
baseUrl: value
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
const updateChatModelId = (provider: ApiProvider, modelId: string) => {
|
||||
setSettings({
|
||||
...settings,
|
||||
chatModelProvider: provider,
|
||||
chatModelId: modelId
|
||||
});
|
||||
};
|
||||
|
||||
const updateApplyModelId = (provider: ApiProvider, modelId: string) => {
|
||||
setSettings({
|
||||
...settings,
|
||||
applyModelProvider: provider,
|
||||
applyModelId: modelId
|
||||
});
|
||||
};
|
||||
|
||||
const updateEmbeddingModelId = (provider: ApiProvider, modelId: string) => {
|
||||
setSettings({
|
||||
...settings,
|
||||
embeddingModelProvider: provider,
|
||||
embeddingModelId: modelId
|
||||
});
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="infio-provider">
|
||||
<DropdownComponent
|
||||
name="API Provider:"
|
||||
value={currProvider}
|
||||
options={PROVIDERS}
|
||||
onChange={updateProvider}
|
||||
/>
|
||||
<div className="iinfio-llm-setting-divider"></div>
|
||||
<TextComponent
|
||||
name={currProvider + " API Key:"}
|
||||
placeholder="Enter your API key"
|
||||
value={providerSetting.apiKey || ''}
|
||||
onChange={updateProviderApiKey}
|
||||
type="password"
|
||||
/>
|
||||
<div className="iinfio-llm-setting-divider"></div>
|
||||
<ToggleComponent
|
||||
name="Use custom base URL"
|
||||
value={providerSetting.useCustomUrl || false}
|
||||
onChange={updateProviderUseCustomUrl}
|
||||
/>
|
||||
{providerSetting.useCustomUrl && (
|
||||
<TextComponent
|
||||
placeholder="Enter your custom API endpoint URL"
|
||||
value={providerSetting.baseUrl || ''}
|
||||
onChange={updateProviderBaseUrl}
|
||||
/>
|
||||
)}
|
||||
|
||||
<div className="iinfio-llm-setting-divider"></div>
|
||||
<div className="iinfio-llm-setting-divider"></div>
|
||||
<ComboBoxComponent
|
||||
name="Chat Model:"
|
||||
provider={settings.chatModelProvider || currProvider}
|
||||
modelId={settings.chatModelId}
|
||||
updateModel={updateChatModelId}
|
||||
/>
|
||||
<div className="iinfio-llm-setting-divider"></div>
|
||||
<ComboBoxComponent
|
||||
name="Apply Model:"
|
||||
provider={settings.applyModelProvider || currProvider}
|
||||
modelId={settings.applyModelId}
|
||||
updateModel={updateApplyModelId}
|
||||
/>
|
||||
<div className="iinfio-llm-setting-divider"></div>
|
||||
<ComboBoxComponent
|
||||
name="Embedding Model:"
|
||||
provider={settings.embeddingModelProvider || ApiProvider.Google}
|
||||
modelId={settings.embeddingModelId}
|
||||
isEmbedding={true}
|
||||
updateModel={updateEmbeddingModelId}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default ProviderSettings;
|
||||
@ -31,8 +31,6 @@ export class InfioSettingTab extends PluginSettingTab {
|
||||
const { containerEl } = this
|
||||
containerEl.empty()
|
||||
this.renderModelsSection(containerEl)
|
||||
this.renderAPIKeysSection(containerEl)
|
||||
this.renderDefaultModelSection(containerEl)
|
||||
this.renderRAGSection(containerEl)
|
||||
this.renderAutoCompleteSection(containerEl)
|
||||
}
|
||||
@ -108,7 +106,7 @@ export class InfioSettingTab extends PluginSettingTab {
|
||||
)
|
||||
|
||||
new Setting(containerEl)
|
||||
.setName('Gemini API key')
|
||||
.setName('Google API key')
|
||||
.setClass("infio-chat-setting-item-container-append")
|
||||
.addText((text) =>
|
||||
text
|
||||
@ -725,6 +723,8 @@ export class InfioSettingTab extends PluginSettingTab {
|
||||
<AutoCompleteSettings
|
||||
onSettingsChanged={async (settings) => {
|
||||
this.plugin.setSettings(settings);
|
||||
// Force refresh the settings page to update dropdowns
|
||||
this.plugin.settingTab.display();
|
||||
}}
|
||||
settings={this.plugin.settings}
|
||||
/>
|
||||
|
||||
@ -2,7 +2,7 @@ import { SerializedEditorState } from 'lexical'
|
||||
|
||||
import { SelectVector } from '../database/schema'
|
||||
|
||||
import { CustomLLMModel } from './llm/model'
|
||||
import { LLMModel } from './llm/model'
|
||||
import { ContentPart } from './llm/request'
|
||||
import { ResponseUsage } from './llm/response'
|
||||
import { Mentionable, SerializedMentionable } from './mentionable'
|
||||
@ -17,15 +17,17 @@ export type ChatUserMessage = {
|
||||
similarity: number
|
||||
})[]
|
||||
}
|
||||
|
||||
export type ChatAssistantMessage = {
|
||||
role: 'assistant'
|
||||
content: string
|
||||
id: string
|
||||
metadata?: {
|
||||
usage?: ResponseUsage
|
||||
model?: CustomLLMModel
|
||||
model?: LLMModel
|
||||
}
|
||||
}
|
||||
|
||||
export type ChatMessage = ChatUserMessage | ChatAssistantMessage
|
||||
|
||||
export type SerializedChatUserMessage = {
|
||||
@ -38,15 +40,17 @@ export type SerializedChatUserMessage = {
|
||||
similarity: number
|
||||
})[]
|
||||
}
|
||||
|
||||
export type SerializedChatAssistantMessage = {
|
||||
role: 'assistant'
|
||||
content: string
|
||||
id: string
|
||||
metadata?: {
|
||||
usage?: ResponseUsage
|
||||
model?: CustomLLMModel
|
||||
model?: LLMModel
|
||||
}
|
||||
}
|
||||
|
||||
export type SerializedChatMessage =
|
||||
| SerializedChatUserMessage
|
||||
| SerializedChatAssistantMessage
|
||||
|
||||
@ -1,4 +1,6 @@
|
||||
import { CustomLLMModel } from './llm/model'
|
||||
import { LLMModel } from './llm/model'
|
||||
|
||||
import { EmbeddingModelInfo } from '../utils/api'
|
||||
|
||||
export type EmbeddingModelId =
|
||||
| 'text-embedding-3-small'
|
||||
@ -10,12 +12,12 @@ export type EmbeddingModelId =
|
||||
export type EmbeddingModelOption = {
|
||||
id: EmbeddingModelId
|
||||
name: string
|
||||
model: CustomLLMModel
|
||||
model: LLMModel
|
||||
dimension: number
|
||||
}
|
||||
|
||||
export type EmbeddingModel = {
|
||||
id: EmbeddingModelId
|
||||
id: string
|
||||
dimension: number
|
||||
getEmbedding: (text: string) => Promise<number[]>
|
||||
}
|
||||
|
||||
@ -1,3 +1,22 @@
|
||||
export enum ApiProvider {
|
||||
Infio = "Infio",
|
||||
OpenRouter = "OpenRouter",
|
||||
SiliconFlow = "SiliconFlow",
|
||||
AlibabaQwen = "AlibabaQwen",
|
||||
Anthropic = "Anthropic",
|
||||
Deepseek = "Deepseek",
|
||||
OpenAI = "OpenAI",
|
||||
Google = "Google",
|
||||
Groq = "Groq",
|
||||
Ollama = "Ollama",
|
||||
OpenAICompatible = "OpenAICompatible",
|
||||
}
|
||||
|
||||
export type LLMModel = {
|
||||
provider: ApiProvider;
|
||||
modelId: string;
|
||||
}
|
||||
|
||||
// Model Providers
|
||||
export enum ModelProviders {
|
||||
OPENAI = "openai",
|
||||
|
||||
@ -1,6 +1,5 @@
|
||||
import { z } from 'zod';
|
||||
|
||||
|
||||
import { DEFAULT_MODELS } from '../constants';
|
||||
import {
|
||||
MAX_DELAY,
|
||||
@ -11,10 +10,143 @@ import {
|
||||
modelOptionsSchema
|
||||
} from '../settings/versions/shared';
|
||||
import { DEFAULT_AUTOCOMPLETE_SETTINGS } from "../settings/versions/v1/v1";
|
||||
import { ApiProvider } from '../types/llm/model';
|
||||
import { isRegexValid, isValidIgnorePattern } from '../utils/auto-complete';
|
||||
|
||||
export const SETTINGS_SCHEMA_VERSION = 0.1
|
||||
|
||||
const InfioProviderSchema = z.object({
|
||||
name: z.literal('Infio'),
|
||||
apiKey: z.string().catch(''),
|
||||
baseUrl: z.string().catch(''),
|
||||
useCustomUrl: z.boolean().catch(false)
|
||||
}).catch({
|
||||
name: 'Infio',
|
||||
apiKey: '',
|
||||
baseUrl: '',
|
||||
useCustomUrl: false
|
||||
})
|
||||
|
||||
const OpenRouterProviderSchema = z.object({
|
||||
name: z.literal('OpenRouter'),
|
||||
apiKey: z.string().catch(''),
|
||||
baseUrl: z.string().catch(''),
|
||||
useCustomUrl: z.boolean().catch(false)
|
||||
}).catch({
|
||||
name: 'OpenRouter',
|
||||
apiKey: '',
|
||||
baseUrl: '',
|
||||
useCustomUrl: false
|
||||
})
|
||||
|
||||
const SiliconFlowProviderSchema = z.object({
|
||||
name: z.literal('SiliconFlow'),
|
||||
apiKey: z.string().catch(''),
|
||||
baseUrl: z.string().catch(''),
|
||||
useCustomUrl: z.boolean().catch(false)
|
||||
}).catch({
|
||||
name: 'SiliconFlow',
|
||||
apiKey: '',
|
||||
baseUrl: '',
|
||||
useCustomUrl: false
|
||||
})
|
||||
|
||||
const AlibabaQwenProviderSchema = z.object({
|
||||
name: z.literal('AlibabaQwen'),
|
||||
apiKey: z.string().catch(''),
|
||||
baseUrl: z.string().catch(''),
|
||||
useCustomUrl: z.boolean().catch(false)
|
||||
}).catch({
|
||||
name: 'AlibabaQwen',
|
||||
apiKey: '',
|
||||
baseUrl: '',
|
||||
useCustomUrl: false
|
||||
})
|
||||
|
||||
const AnthropicProviderSchema = z.object({
|
||||
name: z.literal('Anthropic'),
|
||||
apiKey: z.string().catch(''),
|
||||
baseUrl: z.string().optional(),
|
||||
useCustomUrl: z.boolean().catch(false)
|
||||
}).catch({
|
||||
name: 'Anthropic',
|
||||
apiKey: '',
|
||||
baseUrl: '',
|
||||
useCustomUrl: false
|
||||
})
|
||||
|
||||
const DeepSeekProviderSchema = z.object({
|
||||
name: z.literal('DeepSeek'),
|
||||
apiKey: z.string().catch(''),
|
||||
baseUrl: z.string().catch(''),
|
||||
useCustomUrl: z.boolean().catch(false)
|
||||
}).catch({
|
||||
name: 'DeepSeek',
|
||||
apiKey: '',
|
||||
baseUrl: '',
|
||||
useCustomUrl: false
|
||||
})
|
||||
|
||||
const GoogleProviderSchema = z.object({
|
||||
name: z.literal('Google'),
|
||||
apiKey: z.string().catch(''),
|
||||
baseUrl: z.string().catch(''),
|
||||
useCustomUrl: z.boolean().catch(false)
|
||||
}).catch({
|
||||
name: 'Google',
|
||||
apiKey: '',
|
||||
baseUrl: '',
|
||||
useCustomUrl: false
|
||||
})
|
||||
|
||||
const OpenAIProviderSchema = z.object({
|
||||
name: z.literal('OpenAI'),
|
||||
apiKey: z.string().catch(''),
|
||||
baseUrl: z.string().optional(),
|
||||
useCustomUrl: z.boolean().catch(false)
|
||||
}).catch({
|
||||
name: 'OpenAI',
|
||||
apiKey: '',
|
||||
baseUrl: '',
|
||||
useCustomUrl: false
|
||||
})
|
||||
|
||||
const OpenAICompatibleProviderSchema = z.object({
|
||||
name: z.literal('OpenAICompatible'),
|
||||
apiKey: z.string().catch(''),
|
||||
baseUrl: z.string().optional(),
|
||||
useCustomUrl: z.boolean().catch(true)
|
||||
}).catch({
|
||||
name: 'OpenAICompatible',
|
||||
apiKey: '',
|
||||
baseUrl: '',
|
||||
useCustomUrl: true
|
||||
})
|
||||
|
||||
const OllamaProviderSchema = z.object({
|
||||
name: z.literal('Ollama'),
|
||||
apiKey: z.string().catch(''),
|
||||
baseUrl: z.string().catch(''),
|
||||
useCustomUrl: z.boolean().catch(false)
|
||||
}).catch({
|
||||
name: 'Ollama',
|
||||
apiKey: '',
|
||||
baseUrl: '',
|
||||
useCustomUrl: false
|
||||
})
|
||||
|
||||
const GroqProviderSchema = z.object({
|
||||
name: z.literal('Groq'),
|
||||
apiKey: z.string().catch(''),
|
||||
baseUrl: z.string().catch(''),
|
||||
useCustomUrl: z.boolean().catch(false)
|
||||
}).catch({
|
||||
name: 'Groq',
|
||||
apiKey: '',
|
||||
baseUrl: '',
|
||||
useCustomUrl: false
|
||||
})
|
||||
|
||||
const ollamaModelSchema = z.object({
|
||||
baseUrl: z.string().catch(''),
|
||||
model: z.string().catch(''),
|
||||
@ -61,7 +193,34 @@ export const InfioSettingsSchema = z.object({
|
||||
// Version
|
||||
version: z.literal(SETTINGS_SCHEMA_VERSION).catch(SETTINGS_SCHEMA_VERSION),
|
||||
|
||||
// activeModels
|
||||
// Provider
|
||||
defaultProvider: z.nativeEnum(ApiProvider).catch(ApiProvider.OpenRouter),
|
||||
infioProvider: InfioProviderSchema,
|
||||
openrouterProvider: OpenRouterProviderSchema,
|
||||
siliconflowProvider: SiliconFlowProviderSchema,
|
||||
alibabaQwenProvider: AlibabaQwenProviderSchema,
|
||||
anthropicProvider: AnthropicProviderSchema,
|
||||
deepseekProvider: DeepSeekProviderSchema,
|
||||
openaiProvider: OpenAIProviderSchema,
|
||||
googleProvider: GoogleProviderSchema,
|
||||
ollamaProvider: OllamaProviderSchema,
|
||||
groqProvider: GroqProviderSchema,
|
||||
openaicompatibleProvider: OpenAICompatibleProviderSchema,
|
||||
|
||||
// Chat Model
|
||||
chatModelProvider: z.nativeEnum(ApiProvider).catch(ApiProvider.OpenRouter),
|
||||
chatModelId: z.string().catch(''),
|
||||
|
||||
// Apply Model
|
||||
applyModelProvider: z.nativeEnum(ApiProvider).catch(ApiProvider.OpenRouter),
|
||||
applyModelId: z.string().catch(''),
|
||||
|
||||
// Embedding Model
|
||||
embeddingModelProvider: z.nativeEnum(ApiProvider).catch(ApiProvider.Google),
|
||||
embeddingModelId: z.string().catch(''),
|
||||
|
||||
/// [compatible]
|
||||
// activeModels [compatible]
|
||||
activeModels: z.array(
|
||||
z.object({
|
||||
name: z.string(),
|
||||
@ -74,17 +233,17 @@ export const InfioSettingsSchema = z.object({
|
||||
dimension: z.number().optional(),
|
||||
})
|
||||
).catch(DEFAULT_MODELS),
|
||||
|
||||
// API Keys
|
||||
// API Keys [compatible]
|
||||
infioApiKey: z.string().catch(''),
|
||||
openAIApiKey: z.string().catch(''),
|
||||
anthropicApiKey: z.string().catch(''),
|
||||
geminiApiKey: z.string().catch(''),
|
||||
groqApiKey: z.string().catch(''),
|
||||
deepseekApiKey: z.string().catch(''),
|
||||
|
||||
// DEFAULT Chat Model
|
||||
chatModelId: z.string().catch('deepseek-chat'),
|
||||
ollamaEmbeddingModel: ollamaModelSchema.catch({
|
||||
baseUrl: '',
|
||||
model: '',
|
||||
}),
|
||||
ollamaChatModel: ollamaModelSchema.catch({
|
||||
baseUrl: '',
|
||||
model: '',
|
||||
@ -94,9 +253,6 @@ export const InfioSettingsSchema = z.object({
|
||||
apiKey: '',
|
||||
model: '',
|
||||
}),
|
||||
|
||||
// DEFAULT Apply Model
|
||||
applyModelId: z.string().catch('deepseek-chat'),
|
||||
ollamaApplyModel: ollamaModelSchema.catch({
|
||||
baseUrl: '',
|
||||
model: '',
|
||||
@ -107,15 +263,6 @@ export const InfioSettingsSchema = z.object({
|
||||
model: '',
|
||||
}),
|
||||
|
||||
// DEFAULT Embedding Model
|
||||
embeddingModelId: z.string().catch(
|
||||
'text-embedding-004',
|
||||
),
|
||||
ollamaEmbeddingModel: ollamaModelSchema.catch({
|
||||
baseUrl: '',
|
||||
model: '',
|
||||
}),
|
||||
|
||||
// System Prompt
|
||||
systemPrompt: z.string().catch(''),
|
||||
|
||||
@ -132,10 +279,13 @@ export const InfioSettingsSchema = z.object({
|
||||
// autocomplete options
|
||||
autocompleteEnabled: z.boolean(),
|
||||
advancedMode: z.boolean(),
|
||||
|
||||
// [compatible]
|
||||
apiProvider: z.enum(['azure', 'openai', "ollama"]),
|
||||
azureOAIApiSettings: z.string().catch(''),
|
||||
openAIApiSettings: z.string().catch(''),
|
||||
ollamaApiSettings: z.string().catch(''),
|
||||
|
||||
triggers: z.array(triggerSchema),
|
||||
delay: z.number().int().min(MIN_DELAY, { message: "Delay must be between 0ms and 2000ms" }).max(MAX_DELAY, { message: "Delay must be between 0ms and 2000ms" }),
|
||||
modelOptions: modelOptionsSchema,
|
||||
|
||||
1202
src/utils/api.ts
Normal file
1202
src/utils/api.ts
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,58 +1,24 @@
|
||||
import {
|
||||
ANTHROPIC_PRICES,
|
||||
GEMINI_PRICES,
|
||||
GROQ_PRICES,
|
||||
OPENAI_PRICES,
|
||||
} from '../constants'
|
||||
import { CustomLLMModel } from '../types/llm/model'
|
||||
import { LLMModel } from '../types/llm/model'
|
||||
import { ResponseUsage } from '../types/llm/response'
|
||||
|
||||
import { GetProviderModels } from './api'
|
||||
|
||||
// Returns the cost in dollars. Returns null if the model is not supported.
|
||||
export const calculateLLMCost = ({
|
||||
model,
|
||||
usage,
|
||||
}: {
|
||||
model: CustomLLMModel
|
||||
model: LLMModel
|
||||
usage: ResponseUsage
|
||||
}): number | null => {
|
||||
switch (model.provider) {
|
||||
case 'openai': {
|
||||
const modelPricing = OPENAI_PRICES[model.name]
|
||||
if (!modelPricing) return null
|
||||
return (
|
||||
(usage.prompt_tokens * modelPricing.input +
|
||||
usage.completion_tokens * modelPricing.output) /
|
||||
1_000_000
|
||||
)
|
||||
}
|
||||
case 'anthropic': {
|
||||
const modelPricing = ANTHROPIC_PRICES[model.name]
|
||||
if (!modelPricing) return null
|
||||
return (
|
||||
(usage.prompt_tokens * modelPricing.input +
|
||||
usage.completion_tokens * modelPricing.output) /
|
||||
1_000_000
|
||||
)
|
||||
}
|
||||
case 'gemini': {
|
||||
const modelPricing = GEMINI_PRICES[model.name]
|
||||
if (!modelPricing) return null
|
||||
return (
|
||||
(usage.prompt_tokens * modelPricing.input +
|
||||
usage.completion_tokens * modelPricing.output) /
|
||||
1_000_000
|
||||
)
|
||||
}
|
||||
case 'groq': {
|
||||
const modelPricing = GROQ_PRICES[model.name]
|
||||
if (!modelPricing) return null
|
||||
return (
|
||||
(usage.prompt_tokens * modelPricing.input +
|
||||
usage.completion_tokens * modelPricing.output) /
|
||||
1_000_000
|
||||
)
|
||||
}
|
||||
default:
|
||||
return null
|
||||
const providerModels = GetProviderModels(model.provider)
|
||||
if (!providerModels) {
|
||||
return null
|
||||
}
|
||||
const modelInfo = providerModels[model.modelId]
|
||||
if (!modelInfo) {
|
||||
return null
|
||||
}
|
||||
const cost = modelInfo.inputPrice * usage.prompt_tokens + modelInfo.outputPrice * usage.completion_tokens
|
||||
return cost
|
||||
}
|
||||
|
||||
168
styles.css
168
styles.css
@ -1092,7 +1092,7 @@ input[type='text'].infio-chat-list-dropdown-item-title-input {
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
height: 100%;
|
||||
margin: 0;
|
||||
margin-top: 5px !important; /* 使用 !important 强制生效 */
|
||||
}
|
||||
|
||||
.infio-llm-model-settings-table .switch {
|
||||
@ -1107,6 +1107,10 @@ input[type='text'].infio-chat-list-dropdown-item-title-input {
|
||||
margin-top: 20px;
|
||||
}
|
||||
|
||||
.infio-provider {
|
||||
margin-top: 20px;
|
||||
}
|
||||
|
||||
.infio-llm-chat-setting-title {
|
||||
border-top: 1px solid var(--background-modifier-border);
|
||||
}
|
||||
@ -1119,9 +1123,171 @@ input[type='text'].infio-chat-list-dropdown-item-title-input {
|
||||
margin-bottom: 3px;
|
||||
}
|
||||
|
||||
.infio-llm-setting-checkbox-name {
|
||||
font-weight: bold;
|
||||
color: var(--inline-title-color);
|
||||
}
|
||||
|
||||
.infio-llm-setting-select-trigger {
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
justify-content: space-between;
|
||||
padding: var(--size-2-1) var(--size-4-1);
|
||||
font-size: var(--font-ui-smaller);
|
||||
color: var(--text-normal);
|
||||
background-color: var(--background-primary);
|
||||
border: 1px solid var(--background-modifier-border);
|
||||
border-radius: var(--radius-s);
|
||||
cursor: pointer;
|
||||
width: 100%;
|
||||
margin-bottom: var(--size-4-1);
|
||||
}
|
||||
|
||||
.infio-llm-setting-select-content {
|
||||
overflow: hidden;
|
||||
background-color: var(--background-primary);
|
||||
border: 1px solid var(--background-modifier-border);
|
||||
border-radius: var(--radius-s);
|
||||
box-shadow: var(--shadow-s);
|
||||
}
|
||||
|
||||
.infio-llm-setting-select-item {
|
||||
font-size: var(--font-ui-smaller);
|
||||
color: var(--text-normal);
|
||||
padding: var(--size-2-1) var(--size-4-2);
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: space-between;
|
||||
cursor: pointer;
|
||||
outline: none;
|
||||
}
|
||||
|
||||
.infio-llm-setting-select-item:hover {
|
||||
background-color: var(--background-modifier-hover);
|
||||
}
|
||||
|
||||
.infio-llm-setting-select-item[data-highlighted] {
|
||||
background-color: var(--background-modifier-hover);
|
||||
color: var(--text-normal);
|
||||
}
|
||||
|
||||
.infio-llm-setting-select-indicator {
|
||||
color: var(--text-accent);
|
||||
padding-left: var(--size-4-1);
|
||||
}
|
||||
|
||||
.iinfio-llm-setting-divider {
|
||||
margin-top: 14px;
|
||||
}
|
||||
|
||||
.infio-llm-setting-slider-round {
|
||||
font-weight: bold;
|
||||
color: var(--inline-title-color);
|
||||
}
|
||||
|
||||
.infio-llm-setting-item-control {
|
||||
width: 50%; /* Adjust the width as needed */
|
||||
max-width: 100%; /* Ensures it doesn't exceed the parent width */
|
||||
background-color: var(--background-primary);
|
||||
color: var(--text-normal);
|
||||
border: 1px solid var(--background-modifier-border);
|
||||
border-radius: var(--radius-s);
|
||||
padding: var(--size-2-1);
|
||||
}
|
||||
|
||||
.infio-llm-setting-model-id {
|
||||
color: var(--text-accent);
|
||||
}
|
||||
|
||||
/* Add hover and focus states for better interactivity */
|
||||
.infio-llm-setting-item-control:hover {
|
||||
border-color: var(--background-modifier-border-hover);
|
||||
}
|
||||
|
||||
.infio-llm-setting-item-control:focus {
|
||||
border-color: var(--background-modifier-border-focus);
|
||||
outline: none;
|
||||
}
|
||||
|
||||
.infio-llm-setting-combobox-dropdown {
|
||||
margin-top: 4px;
|
||||
max-height: 200px;
|
||||
overflow-y: auto;
|
||||
background-color: var(--background-primary);
|
||||
color: var(--text-normal);
|
||||
border: 1px solid var(--background-modifier-border);
|
||||
border-radius: 0;
|
||||
z-index: 1000;
|
||||
padding: 2px 0;
|
||||
box-shadow: var(--shadow-s);
|
||||
width: var(--radix-popover-trigger-width);
|
||||
min-width: var(--radix-popover-trigger-width);
|
||||
}
|
||||
|
||||
|
||||
/* 添加容器样式使 select 和 input 在同一行 */
|
||||
.infio-llm-setting-search-container {
|
||||
display: flex;
|
||||
gap: 2px;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.infio-llm-setting-provider-switch {
|
||||
width: 26%;
|
||||
border-radius: 0;
|
||||
margin: 0;
|
||||
margin-left: 1px;
|
||||
padding: 0;
|
||||
background-color: var(--background-secondary);
|
||||
/* outline: none; */
|
||||
text-align: center;
|
||||
text-align-last: center;
|
||||
color: var(--text-accent);
|
||||
}
|
||||
|
||||
.infio-llm-setting-provider-switch:focus {
|
||||
/* border: none; */
|
||||
outline: none;
|
||||
box-shadow: none;
|
||||
}
|
||||
|
||||
.infio-llm-setting-item-search {
|
||||
width: 74%;
|
||||
border: none;
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
/* background-color: var(--background-secondary); */
|
||||
outline: none;
|
||||
border-radius: 0 !important;
|
||||
-webkit-border-radius: 0 !important;
|
||||
-moz-border-radius: 0 !important;
|
||||
-ms-border-radius: 0 !important;
|
||||
}
|
||||
|
||||
.infio-llm-setting-item-search:focus {
|
||||
border: none;
|
||||
outline: none;
|
||||
box-shadow: none;
|
||||
}
|
||||
|
||||
.infio-llm-setting-combobox-option {
|
||||
padding: 8px 12px;
|
||||
cursor: pointer;
|
||||
background-color: transparent;
|
||||
}
|
||||
|
||||
.infio-llm-setting-combobox-option:hover {
|
||||
background-color: var(--background-modifier-hover);
|
||||
}
|
||||
|
||||
/*
|
||||
* Highlight styles
|
||||
*/
|
||||
.infio-llm-setting-model-item-highlight {
|
||||
background-color: var(--text-highlight-bg);
|
||||
color: var(--text-normal);
|
||||
border-radius: var(--radius-s);
|
||||
padding: 0 2px;
|
||||
}
|
||||
|
||||
.infio-llm-setting-item-control::placeholder {
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user