diff --git a/app/main.ts b/app/main.ts index 833cd81..9378fc9 100644 --- a/app/main.ts +++ b/app/main.ts @@ -1,5 +1,5 @@ /* eslint-disable @typescript-eslint/no-unused-vars */ -import {app, BrowserWindow, ipcMain, shell, dialog} from 'electron'; +import {app, BrowserWindow, ipcMain, shell, dialog, Tray, Menu, nativeImage} from 'electron'; import * as path from 'path'; import * as fs from 'fs'; import * as os from 'os'; @@ -8,11 +8,151 @@ import { execSync } from 'child_process'; // Main window reference let win: BrowserWindow | null = null; +// Tray reference +let tray: Tray | null = null; + +// Settings +let closeToTray = true; +let forceQuit = false; // Flag to indicate we're trying to actually quit // Check if app is running in development mode const args = process.argv.slice(1); const serve = args.some(val => val === '--serve'); +/** + * Create the system tray + */ +function createTray() { + if (tray) { + return; + } + + // Get appropriate icon based on platform + const iconPath = path.join(__dirname, '..', 'dist', 'logos', 'favicon.256x256.png'); + const icon = nativeImage.createFromPath(iconPath); + + tray = new Tray(icon); + tray.setToolTip('TensorBlock Desktop'); + + const contextMenu = Menu.buildFromTemplate([ + { label: 'Open TensorBlock', click: () => { + win?.show(); + win?.setSkipTaskbar(false); // Show in taskbar + }}, + { type: 'separator' }, + { label: 'Quit', click: () => { + forceQuit = true; // Set flag to bypass close-to-tray + app.quit(); + }} + ]); + + tray.setContextMenu(contextMenu); + + tray.on('click', () => { + if (win) { + if (win.isVisible()) { + win.hide(); + win.setSkipTaskbar(true); // Hide from taskbar + } else { + win.show(); + win.setSkipTaskbar(false); // Show in taskbar + } + } + }); +} + +/** + * Set or remove auto launch on system startup + */ +function setAutoLaunch(enable: boolean): boolean { + try { + if (process.platform === 'win32') { + const appPath = app.getPath('exe'); + const regKey = 'HKCU\\Software\\Microsoft\\Windows\\CurrentVersion\\Run'; + const appName = app.getName(); + + if (enable) { + // Add to registry to enable auto launch + execSync(`reg add ${regKey} /v ${appName} /t REG_SZ /d "${appPath}" /f`); + } else { + // Remove from registry to disable auto launch + execSync(`reg delete ${regKey} /v ${appName} /f`); + } + return true; + } else if (process.platform === 'darwin') { + const appPath = app.getPath('exe'); + const loginItemSettings = app.getLoginItemSettings(); + + // Set login item settings for macOS + app.setLoginItemSettings({ + openAtLogin: enable, + path: appPath + }); + + return app.getLoginItemSettings().openAtLogin === enable; + } else if (process.platform === 'linux') { + // For Linux, create or remove a .desktop file in autostart directory + const desktopFilePath = path.join(os.homedir(), '.config', 'autostart', `${app.getName()}.desktop`); + + if (enable) { + // Create directory if it doesn't exist + const autoStartDir = path.dirname(desktopFilePath); + if (!fs.existsSync(autoStartDir)) { + fs.mkdirSync(autoStartDir, { recursive: true }); + } + + // Create .desktop file + const desktopFileContent = ` + [Desktop Entry] + Type=Application + Exec=${app.getPath('exe')} + Hidden=false + NoDisplay=false + X-GNOME-Autostart-enabled=true + Name=${app.getName()} + Comment=${app.getName()} startup script + `; + fs.writeFileSync(desktopFilePath, desktopFileContent); + } else if (fs.existsSync(desktopFilePath)) { + // Remove .desktop file + fs.unlinkSync(desktopFilePath); + } + + return true; + } + + return false; + } catch (error) { + console.error('Error setting auto launch:', error); + return false; + } +} + +/** + * Check if app is set to auto launch on system startup + */ +function getAutoLaunchEnabled(): boolean { + try { + if (process.platform === 'win32') { + const regKey = 'HKCU\\Software\\Microsoft\\Windows\\CurrentVersion\\Run'; + const appName = app.getName(); + + const output = execSync(`reg query ${regKey} /v ${appName} 2>nul`).toString(); + return output.includes(appName); + } else if (process.platform === 'darwin') { + return app.getLoginItemSettings().openAtLogin; + } else if (process.platform === 'linux') { + const desktopFilePath = path.join(os.homedir(), '.config', 'autostart', `${app.getName()}.desktop`); + return fs.existsSync(desktopFilePath); + } + + return false; + } catch (error) { + // If command fails (e.g., key doesn't exist), auto launch is not enabled + return false; + } +} + /** * Creates the main application window */ @@ -29,8 +169,8 @@ function createWindow(): BrowserWindow { frame: false, fullscreenable: false, autoHideMenuBar: true, - minWidth: 600, - minHeight: 600, + minWidth: 800, + minHeight: 700, webPreferences: { preload: path.join(__dirname, 'preload.js'), contextIsolation: true, @@ -180,7 +320,13 @@ function createWindow(): BrowserWindow { // Close application ipcMain.on('close-app', () => { - app.quit(); + if (closeToTray && !forceQuit) { + win?.hide(); + win?.setSkipTaskbar(true); // Hide from taskbar + } else { + forceQuit = true; // Ensure we're really quitting + app.quit(); + } }); // Open URL in default browser @@ -188,6 +334,53 @@ function createWindow(): BrowserWindow { shell.openExternal(url); }); + // Auto-startup handlers + ipcMain.handle('get-auto-launch', () => { + return getAutoLaunchEnabled(); + }); + + ipcMain.handle('set-auto-launch', (event, enable) => { + return setAutoLaunch(enable); + }); + + // Tray handlers + ipcMain.handle('set-close-to-tray', (event, enable) => { + closeToTray = enable; + return true; + }); + + ipcMain.handle('get-close-to-tray', () => { + return closeToTray; + }); + + ipcMain.handle('set-startup-to-tray', (event, enable) => { + // Store this preference for the next app start + try { + const configPath = path.join(app.getPath('userData'), 'config.json'); + let config = {}; + + if (fs.existsSync(configPath)) { + config = JSON.parse(fs.readFileSync(configPath, 'utf8')); + } + + config = { ...config, startupToTray: enable }; + fs.writeFileSync(configPath, JSON.stringify(config)); + return true; + } catch (error) { + console.error('Error saving startup to tray setting:', error); + return false; + } + }); + + // Listen for window close event + win.on('close', (e) => { + if (closeToTray && !forceQuit) { + e.preventDefault(); + win?.hide(); + win?.setSkipTaskbar(true); // Hide from taskbar + } + }); + // Disable page refresh in production if (process.env.NODE_ENV === 'production') { win.webContents.on('before-input-event', (event, input) => { @@ -290,10 +483,36 @@ function getCPUName() { try { app.commandLine.appendSwitch('class', 'tensorblock-desktop'); + // Set force quit flag when app is about to quit + app.on('before-quit', () => { + forceQuit = true; + }); + // Initialize app when Electron is ready // Added delay to fix black background issue with transparent windows // See: https://github.com/electron/electron/issues/15947 - app.on('ready', () => setTimeout(createWindow, 400)); + app.on('ready', () => { + setTimeout(() => { + // Create window + win = createWindow(); + + // Create tray + createTray(); + + // Check if we should start minimized to tray + try { + const configPath = path.join(app.getPath('userData'), 'config.json'); + if (fs.existsSync(configPath)) { + const config = JSON.parse(fs.readFileSync(configPath, 'utf8')); + if (config.startupToTray) { + win.hide(); + } + } + } catch (error) { + console.error('Error reading config file:', error); + } + }, 400); + }); // Quit when all windows are closed app.on('window-all-closed', () => { @@ -303,7 +522,10 @@ try { // Re-create window if activated and no windows exist app.on('activate', () => { if (win === null) { - createWindow(); + win = createWindow(); + } else { + win.show(); + win.setSkipTaskbar(false); // Show in taskbar } }); } catch (_e) { diff --git a/app/preload.ts b/app/preload.ts index 2f19fba..2f59608 100644 --- a/app/preload.ts +++ b/app/preload.ts @@ -29,4 +29,11 @@ contextBridge.exposeInMainWorld('electron', { saveFile: (fileBuffer: ArrayBuffer | string, fileName: string, fileType: string) => ipcRenderer.invoke('save-file', { fileBuffer, fileName, fileType }), openFile: (filePath: string) => ipcRenderer.invoke('open-file', filePath), + + // Auto-startup and tray functions + getAutoLaunch: () => ipcRenderer.invoke('get-auto-launch'), + setAutoLaunch: (enable: boolean) => ipcRenderer.invoke('set-auto-launch', enable), + setCloseToTray: (enable: boolean) => ipcRenderer.invoke('set-close-to-tray', enable), + getCloseToTray: () => ipcRenderer.invoke('get-close-to-tray'), + setStartupToTray: (enable: boolean) => ipcRenderer.invoke('set-startup-to-tray', enable), }); \ No newline at end of file diff --git a/src/App.tsx b/src/App.tsx index 9f552d6..9fb46a6 100644 --- a/src/App.tsx +++ b/src/App.tsx @@ -3,6 +3,7 @@ import { ChatPage } from './components/pages/ChatPage'; import { ImageGenerationPage } from './components/pages/ImageGenerationPage'; import { TranslationPage } from './components/pages/TranslationPage'; import { FileManagementPage } from './components/pages/FileManagementPage'; +import { MCPServerPage } from './components/pages/MCPServerPage'; import MainLayout from './components/layout/MainLayout'; import DatabaseInitializer from './components/core/DatabaseInitializer'; @@ -38,6 +39,7 @@ function App() { {activePage === 'image' && } {activePage === 'translation' && } {activePage === 'files' && } + {activePage === 'mcpserver' && } ); diff --git a/src/components/chat/ChatMessageArea.tsx b/src/components/chat/ChatMessageArea.tsx index 6bb96c1..0e84431 100644 --- a/src/components/chat/ChatMessageArea.tsx +++ b/src/components/chat/ChatMessageArea.tsx @@ -1,6 +1,9 @@ import React, { useState, FormEvent, useRef, useEffect } from 'react'; import { Conversation, Message } from '../../types/chat'; -import { Send, Square, Copy, Pencil, Loader2, Globe, RefreshCw, Check, X } from 'lucide-react'; +import { MCPServerSettings } from '../../types/settings'; +import { Send, Square, Copy, Pencil, Loader2, Globe, RefreshCw, Check, X, + // ServerCog +} from 'lucide-react'; import MarkdownContent from './MarkdownContent'; import MessageToolboxMenu, { ToolboxAction } from '../ui/MessageToolboxMenu'; import { MessageHelper } from '../../services/message-helper'; @@ -12,13 +15,13 @@ import ProviderIcon from '../ui/ProviderIcon'; import { useTranslation } from '../../hooks/useTranslation'; import FileUploadButton from './FileUploadButton'; import FileAttachmentDisplay from './FileAttachmentDisplay'; -import ImageGenerationButton from './ImageGenerationButton'; +// import ImageGenerationButton from './ImageGenerationButton'; interface ChatMessageAreaProps { activeConversation: Conversation | null; isLoading: boolean; error: string | null; - onSendMessage: (content: string) => void; + onSendMessage: (content: string, files?: File[]) => void; onSendMessageWithFiles?: (content: string, files: File[]) => void; onStopStreaming?: () => void; onRegenerateResponse?: (messageId: string) => void; @@ -26,6 +29,9 @@ interface ChatMessageAreaProps { isCurrentlyStreaming?: boolean; selectedProvider: string; selectedModel: string; + mcpServers?: Record; + selectedMcpServers?: string[]; + onToggleMcpServer?: (serverId: string) => void; } export const ChatMessageArea: React.FC = ({ @@ -40,6 +46,9 @@ export const ChatMessageArea: React.FC = ({ isCurrentlyStreaming = false, selectedProvider, selectedModel, + // mcpServers, + // selectedMcpServers, + // onToggleMcpServer, }) => { const { t } = useTranslation(); const [inputValue, setInput] = useState(''); @@ -54,6 +63,9 @@ export const ChatMessageArea: React.FC = ({ const [webSearchActive, setWebSearchActive] = useState(false); const [isWebSearchPreviewEnabled, setIsWebSearchPreviewEnabled] = useState(false); const [selectedFiles, setSelectedFiles] = useState([]); + // const [mcpPopupOpen, setMcpPopupOpen] = useState(false); + // const mcpButtonRef = useRef(null); + // const mcpPopupRef = useRef(null); // Scroll to bottom when messages change useEffect(() => { @@ -116,10 +128,12 @@ export const ChatMessageArea: React.FC = ({ e.preventDefault(); if (isLoading || isCurrentlyStreaming || !inputValue.trim()) return; - if (selectedFiles.length > 0 && onSendMessageWithFiles) { - onSendMessageWithFiles(inputValue, selectedFiles); + if (selectedFiles.length > 0) { + // Use the combined method with files parameter + onSendMessage(inputValue, selectedFiles); setSelectedFiles([]); } else { + // Use the combined method without files onSendMessage(inputValue); } @@ -278,6 +292,25 @@ export const ChatMessageArea: React.FC = ({ textarea.style.height = `${newHeight}px`; } + // Add useEffect to handle click outside for MCP popup + // useEffect(() => { + // const handleClickOutside = (event: MouseEvent) => { + // if ( + // mcpPopupRef.current && + // !mcpPopupRef.current.contains(event.target as Node) && + // mcpButtonRef.current && + // !mcpButtonRef.current.contains(event.target as Node) + // ) { + // setMcpPopupOpen(false); + // } + // }; + + // document.addEventListener('mousedown', handleClickOutside); + // return () => { + // document.removeEventListener('mousedown', handleClickOutside); + // }; + // }, []); + // If no active conversation is selected if (!activeConversation) { return ( @@ -535,7 +568,7 @@ export const ChatMessageArea: React.FC = ({ )} {/* Image generation button */} - { // Set special message for image generation @@ -544,7 +577,7 @@ export const ChatMessageArea: React.FC = ({ // Focus the input inputRef.current?.focus(); }} - /> + /> */} {/* Web search element */} @@ -552,6 +585,70 @@ export const ChatMessageArea: React.FC = ({ webSearchElement } + {/* MCP Servers dropdown */} + {/* {mcpServers && Object.keys(mcpServers).length > 0 && ( +
+ + + {mcpPopupOpen && ( +
+
+
+ {t('chat.availableMcpServers')} +
+
+ {Object.values(mcpServers) + // Filter out image generation servers - they're handled by the ImageGenerationButton + .filter(server => !server.isImageGeneration) + .map((server) => ( +
onToggleMcpServer?.(server.id)} + > + +
+ {server.name} + {server.isDefault && ( + {t('mcpServer.default')} + )} +
+
+ ))} + + {Object.values(mcpServers).filter(server => !server.isImageGeneration).length === 0 && ( +
+ {t('chat.noMcpServersAvailable')} +
+ )} +
+
+
+ )} +
+ )} */} + diff --git a/src/components/chat/ImageGenerationButton.tsx b/src/components/chat/ImageGenerationButton.tsx index aa5080c..04568a3 100644 --- a/src/components/chat/ImageGenerationButton.tsx +++ b/src/components/chat/ImageGenerationButton.tsx @@ -1,5 +1,5 @@ import React, { useState, useRef, useEffect } from 'react'; -import { Image } from 'lucide-react'; +import { Image, ToggleLeft, ToggleRight } from 'lucide-react'; import { SettingsService, SETTINGS_CHANGE_EVENT } from '../../services/settings-service'; import { AIServiceCapability } from '../../types/capabilities'; import ProviderIcon from '../ui/ProviderIcon'; @@ -25,10 +25,11 @@ const ImageGenerationButton: React.FC = ({ const [providers, setProviders] = useState([]); const [selectedProvider, setSelectedProvider] = useState(null); const [selectedModel, setSelectedModel] = useState(null); + const [isEnabled, setIsEnabled] = useState(true); const popupRef = useRef(null); const buttonRef = useRef(null); - // Load available image generation providers and models + // Load available image generation providers and models and settings useEffect(() => { const loadProviders = () => { const settingsService = SettingsService.getInstance(); @@ -38,6 +39,13 @@ const ImageGenerationButton: React.FC = ({ const settings = settingsService.getSettings(); const providerIds = Object.keys(settings.providers); + // Load current image generation enabled status + setIsEnabled(settings.imageGenerationEnabled !== false); + + // Load saved selected provider and model + const savedProvider = settings.imageGenerationProvider; + const savedModel = settings.imageGenerationModel; + for (const providerId of providerIds) { // Get the provider's settings const providerSettings = settingsService.getProviderSettings(providerId); @@ -59,8 +67,11 @@ const ImageGenerationButton: React.FC = ({ setProviders(availableProviders); - // Set default selected provider and model if available - if (availableProviders.length > 0) { + // Set saved or default selected provider and model + if (savedProvider && savedModel && availableProviders.some(p => p.providerName === savedProvider && p.modelId === savedModel)) { + setSelectedProvider(savedProvider); + setSelectedModel(savedModel); + } else if (availableProviders.length > 0) { setSelectedProvider(availableProviders[0].providerName); setSelectedModel(availableProviders[0].modelId); } @@ -99,19 +110,40 @@ const ImageGenerationButton: React.FC = ({ setIsPopupOpen(!isPopupOpen); }; - const handleProviderModelSelect = (providerName: string, modelId: string) => { + const handleProviderModelSelect = async (providerName: string, modelId: string) => { setSelectedProvider(providerName); setSelectedModel(modelId); - setIsPopupOpen(false); + + // Save selected provider and model in settings + const settingsService = SettingsService.getInstance(); + await settingsService.updateSettings({ + imageGenerationProvider: providerName, + imageGenerationModel: modelId + }); // If onImageGenerate is provided, call it with an empty prompt // The actual prompt will be filled in by the chat message - if (onImageGenerate) { + if (onImageGenerate && isEnabled) { onImageGenerate("", providerName, modelId); } + + setIsPopupOpen(false); + }; + + const toggleImageGeneration = async () => { + const newEnabledState = !isEnabled; + setIsEnabled(newEnabledState); + + // Save the enabled state in settings + const settingsService = SettingsService.getInstance(); + await settingsService.updateSettings({ + imageGenerationEnabled: newEnabledState + }); }; - const isButtonEnabled = !disabled && providers.length > 0; + const buttonClass = `flex items-center justify-center w-8 h-8 rounded-full focus:outline-none ${ + isEnabled ? 'image-generation-button' : 'text-gray-400 bg-gray-100' + }`; return (
@@ -119,9 +151,15 @@ const ImageGenerationButton: React.FC = ({ ref={buttonRef} type="button" onClick={togglePopup} - disabled={!isButtonEnabled} - className="flex items-center justify-center w-8 h-8 rounded-full image-generation-button focus:outline-none" - title={isButtonEnabled ? t('chat.generateImage') : t('chat.imageGenerationNotAvailable')} + disabled={providers.length === 0 || disabled} + className={buttonClass} + title={ + providers.length === 0 + ? t('chat.imageGenerationNotAvailable') + : isEnabled + ? t('chat.generateImage') + : t('chat.imageGenerationDisabled') + } > @@ -133,15 +171,28 @@ const ImageGenerationButton: React.FC = ({ style={{ bottom: '100%', left: 0, minWidth: '220px' }} >
-
- {t('chat.selectImageProvider')} +
+
+ {t('chat.selectImageProvider')} +
+
{providers.map((provider) => (
& }; export const MarkdownContent: React.FC = ({ content, isUserMessage = false }) => { + const { t } = useTranslation(); const [processedContent, setProcessedContent] = useState(''); const [thinkContent, setThinkContent] = useState(null); const [isThinkExpanded, setIsThinkExpanded] = useState(true); const [fileContents, setFileContents] = useState([]); + const [imageContents, setImageContents] = useState([]); + const [isProcessingImage, setIsProcessingImage] = useState(false); + const [imageGenerationError, setImageGenerationError] = useState(null); + const [customToolCalls, setCustomToolCalls] = useState>([]); - // Process content and check for thinking blocks and files + // Process content and check for thinking blocks, files, and image generation useEffect(() => { - // Extract text and file contents + // Extract text, file, and image contents const textContents: MessageContent[] = []; const files: MessageContent[] = []; + const images: MessageContent[] = []; content.forEach(item => { if (item.type === MessageContentType.Text) { textContents.push(item); } else if (item.type === MessageContentType.File) { files.push(item); + } else if (item.type === MessageContentType.Image) { + images.push(item); } }); // Save file contents for rendering setFileContents(files); + // Save image contents for rendering + setImageContents(images); + // Create a function for a safer replacement function safeReplace(str: string, search: string, replace: string): string { // Split the string by the search term @@ -55,6 +68,97 @@ export const MarkdownContent: React.FC = ({ content, isUse let processed = MessageHelper.MessageContentToText(textContents); + // Track custom tool calls + const toolCalls: Array<{name: string, status: string}> = []; + + // Detect tool calls in progress + const customToolCallRegex = /\bExecuting tool call:\s+([a-zA-Z0-9_-]+)/g; + let match: RegExpExecArray | null; + while ((match = customToolCallRegex.exec(processed)) !== null) { + const toolName = match[1]; + if (toolName !== 'generate_image') { + toolCalls.push({ name: toolName, status: 'in_progress' }); + } + } + + // Detect tool results + const toolResultRegex = /\bTool result:\s+({.+})/g; + while ((match = toolResultRegex.exec(processed)) !== null) { + // Mark the last tool as completed if it exists + if (toolCalls.length > 0) { + const lastTool = toolCalls[toolCalls.length - 1]; + lastTool.status = 'completed'; + } + } + + // Detect tool errors + const toolErrorRegex = /\bError in tool call\s+([a-zA-Z0-9_-]+):\s+(.+)/g; + while ((match = toolErrorRegex.exec(processed)) !== null) { + const toolName = match[1]; + // Check if we already have this tool + const existingTool = toolCalls.find(tool => tool.name === toolName); + if (existingTool) { + existingTool.status = 'error'; + } else { + toolCalls.push({ name: toolName, status: 'error' }); + } + } + + setCustomToolCalls(toolCalls); + + // Detect image generation in progress + const imageGenInProgressMatch = processed.match( + /(?:generating|creating|processing)\s+(?:an\s+)?image(?:s)?\s+(?:with|using|for|from)?(?:\s+prompt)?(?::|;)?\s*["']?([^"']+)["']?/i + ) || processed.match(/\bimage\s+generation\s+in\s+progress\b/i) || processed.match(/\bGenerating image\b/i); + + if ((imageGenInProgressMatch && images.length === 0) || + (processed.includes('generate_image') && processed.includes('tool call') && images.length === 0)) { + setIsProcessingImage(true); + setImageGenerationError(null); + } else { + setIsProcessingImage(false); + } + + // Detect image generation errors + const imageGenErrorMatch = processed.match( + /(?:error|failed|couldn't|unable)\s+(?:in\s+)?(?:generating|creating|processing)\s+(?:an\s+)?image(?:s)?(?::|;)?\s*["']?([^"']+)["']?/i + ) || processed.match(/\bimage\s+generation\s+(?:error|failed)\b:?\s*["']?([^"']+)["']?/i) || processed.match(/\bError generating image\b:?\s*([^"\n]+)/i); + + if (imageGenErrorMatch || (processed.includes('error') && processed.includes('generate_image'))) { + const errorMessage = imageGenErrorMatch ? (imageGenErrorMatch[1] || "Unknown error occurred") : "Failed to generate image"; + setImageGenerationError(errorMessage); + setIsProcessingImage(false); + } else if (images.length > 0) { + setImageGenerationError(null); + } + + // Process the content - remove the tool call info to make the response cleaner + if (processed.includes('Executing tool call:') || processed.includes('Tool result:') || + processed.includes('Generating image') || processed.includes('Error generating image') || + processed.includes('Error in tool call')) { + + // Split by the first occurrence of a tool-related message + const toolMarkers = [ + 'Executing tool call:', + 'Tool result:', + 'Generating image', + 'Error generating image', + 'Error in tool call' + ]; + + let firstToolIndex = processed.length; + for (const marker of toolMarkers) { + const index = processed.indexOf(marker); + if (index > -1 && index < firstToolIndex) { + firstToolIndex = index; + } + } + + if (firstToolIndex < processed.length) { + processed = processed.substring(0, firstToolIndex); + } + } + // Check if content contains thinking block const thinkMatch = processed.match(/([\s\S]*?)<\/think>([\s\S]*)/); @@ -134,6 +238,84 @@ export const MarkdownContent: React.FC = ({ content, isUse
)} + {/* Custom Tool Calls */} + {customToolCalls.length > 0 && ( +
+ {customToolCalls.map((toolCall, index) => ( +
+ {toolCall.status === 'in_progress' && ( + + )} + {toolCall.status === 'completed' && ( + + )} + {toolCall.status === 'error' && ( + + )} +
+
+ {toolCall.status === 'in_progress' && t('tools.executing')} + {toolCall.status === 'completed' && t('tools.executedSuccessfully')} + {toolCall.status === 'error' && t('tools.executionFailed')} +
+
+ {t('tools.toolName')}: {toolCall.name} +
+
+
+ ))} +
+ )} + + {/* Image Generation In Progress */} + {isProcessingImage && ( +
+
+ + {t('imageGeneration.generating')} +
+
+ {t('imageGeneration.creatingImage')} +
+
+ )} + + {/* Image Generation Error */} + {imageGenerationError && ( +
+
+ {t('imageGeneration.generationFailed')} +
+
+ {imageGenerationError} +
+
+ )} + + {/* Generated Images */} + {imageContents.length > 0 && ( +
+ {imageContents.map((image, index) => ( +
+ {t('imageGeneration.generatedImage')} +
+ ))} +
+ )} + {thinkContent && (
= ({ + imageResult, +}) => { + const { t } = useTranslation(); + const isGenerating = imageResult.status === 'generating'; + const isFailed = imageResult.status === 'failed'; + + // Function to render image grid based on number of images and aspect ratio + const renderImageGrid = (images: MessageContent[]) => { + if (images.length === 0 && !isGenerating) return null; + + // Convert aspect ratio string (e.g. "16:9") to calculate best layout + const [widthRatio, heightRatio] = imageResult.aspectRatio.split(':').map(Number); + const isWide = widthRatio > heightRatio; + + // Grid layout classes based on number of images + let gridClass = "grid-cols-1"; + + if (images.length === 2) { + gridClass = isWide ? "grid-cols-2" : "grid-cols-1 grid-rows-2"; + } else if (images.length === 3) { + gridClass = "grid-cols-3"; + } else if (images.length === 4) { + gridClass = "grid-cols-2 grid-rows-2"; + } + + // If still generating, show a loading indicator + if (isGenerating) { + return ( +
+
+ +

{t('imageGeneration.creatingImage')}

+
+
+ ); + } + + // Show error message if generation failed + if (isFailed) { + return ( +
+
+

{t('imageGeneration.generationFailed')}

+
+
+ ); + } + + return ( +
+ {images.map((image, index) => ( +
+ {`${imageResult.prompt} +
+ ))} +
+ ); + }; + + const getProviderName = (providerId: string, providerName: string) => { + const providerService = AIService.getInstance().getProvider(providerId); + return providerService?.name || providerName || providerId; + } + + return ( +
+
+ {renderImageGrid(imageResult.images)} +
+ +
+
+
+
+

{imageResult.prompt}

+ +
+ + {imageResult.model} + + + {getProviderName(imageResult.provider, imageResult.providerName)} + + + {imageResult.aspectRatio} + + + Seed: {imageResult.seed} + + {isGenerating && ( + + {t('imageGeneration.generating')} + + )} +
+
+
+
+
+
+ ); +}; + +export default ImageGenerateHistoryItem; \ No newline at end of file diff --git a/src/components/layout/Sidebar.tsx b/src/components/layout/Sidebar.tsx index ae492e7..1ee2fdd 100644 --- a/src/components/layout/Sidebar.tsx +++ b/src/components/layout/Sidebar.tsx @@ -1,5 +1,7 @@ import React from 'react'; -import { MessageSquare, Settings, Image, Languages, FolderClosed } from 'lucide-react'; +import { MessageSquare, Settings, Image, Languages, FolderClosed, + // ServerCog +} from 'lucide-react'; interface SidebarProps { activePage: string; @@ -31,6 +33,9 @@ export const Sidebar: React.FC = ({ else if(activePage === 'files'){ return 'files'; } + else if(activePage === 'mcpserver'){ + return 'mcpserver'; + } return ''; } @@ -87,6 +92,18 @@ export const Sidebar: React.FC = ({ > + + {/* */}
{/* Settings button at bottom */} diff --git a/src/components/pages/ChatPage.tsx b/src/components/pages/ChatPage.tsx index 15a4151..a5b3e18 100644 --- a/src/components/pages/ChatPage.tsx +++ b/src/components/pages/ChatPage.tsx @@ -5,6 +5,7 @@ import { Conversation, ConversationFolder } from '../../types/chat'; import { SETTINGS_CHANGE_EVENT, SettingsService } from '../../services/settings-service'; import { ChatService } from '../../services/chat-service'; import { AIService } from '../../services/ai-service'; +import { MCPServerSettings } from '../../types/settings'; export const ChatPage = () => { const [conversations, setConversations] = useState([]); @@ -17,6 +18,8 @@ export const ChatPage = () => { const [selectedModel, setSelectedModel] = useState(''); const [selectedProvider, setSelectedProvider] = useState(''); const [isApiKeyMissing, setIsApiKeyMissing] = useState(true); + const [mcpServers, setMcpServers] = useState>({}); + const [selectedMcpServers, setSelectedMcpServers] = useState([]); // Initialize the services useEffect(() => { @@ -43,6 +46,10 @@ export const ChatPage = () => { const foldersList = chatService.getFolders(); setFolders(foldersList); + // Load MCP servers + const mcpServersList = chatService.getAvailableMCPServers(); + setMcpServers(mcpServersList); + // Set active conversation from chat service const activeId = chatService.getActiveConversationId(); if (activeId) { @@ -65,6 +72,22 @@ export const ChatPage = () => { }, [isServiceInitialized]); + // Load MCP servers when settings change + useEffect(() => { + const handleSettingsChange = () => { + if (chatServiceRef.current) { + const mcpServersList = chatServiceRef.current.getAvailableMCPServers(); + setMcpServers(mcpServersList); + } + }; + + window.addEventListener(SETTINGS_CHANGE_EVENT, handleSettingsChange); + + return () => { + window.removeEventListener(SETTINGS_CHANGE_EVENT, handleSettingsChange); + }; + }, []); + // Load active conversation details when selected useEffect(() => { if (activeConversationId && isServiceInitialized && chatServiceRef.current) { @@ -201,30 +224,24 @@ export const ChatPage = () => { }; // Handle sending a message with streaming - const handleSendMessage = async (content: string) => { + const handleSendMessage = async (content: string, files?: File[]) => { if (!activeConversationId || !isServiceInitialized || !chatServiceRef.current) return; try { const chatService = chatServiceRef.current; - - // Send user message with streaming + + // Send message with optional files await chatService.sendMessage( content, activeConversationId, true, (updatedConversation) => { setConversations(updatedConversation); - } + }, + files ); - } catch (err) { console.error('Error sending streaming message:', err); - - // // If streaming fails, we'll try to fall back to regular mode - // const error = err as Error; - // if (error.message && error.message.includes('does not support streaming')) { - // await handleSendMessage(content); - // } } }; @@ -345,27 +362,21 @@ export const ChatPage = () => { } }; - // Handle sending a message with files + // Handle sending a message with files (deprecated - now handled by handleSendMessage) + // This method is kept for backward compatibility but delegates to handleSendMessage const handleSendMessageWithFiles = async (content: string, files: File[]) => { - if (!activeConversationId || !isServiceInitialized || !chatServiceRef.current) return; - - try { - const chatService = chatServiceRef.current; + await handleSendMessage(content, files); + }; - // Send user message with files - await chatService.sendMessageWithFiles( - content, - files, - activeConversationId, - true, - (updatedConversation) => { - setConversations(updatedConversation); - } - ); - - } catch (err) { - console.error('Error sending message with files:', err); - } + // Toggle selection of an MCP server + const handleToggleMcpServer = (serverId: string) => { + setSelectedMcpServers(prev => { + if (prev.includes(serverId)) { + return prev.filter(id => id !== serverId); + } else { + return [...prev, serverId]; + } + }); }; return ( @@ -406,6 +417,9 @@ export const ChatPage = () => { isCurrentlyStreaming={chatServiceRef.current?.isCurrentlyStreaming(activeConversationId) || false} selectedProvider={selectedProvider} selectedModel={selectedModel} + mcpServers={mcpServers} + selectedMcpServers={selectedMcpServers} + onToggleMcpServer={handleToggleMcpServer} />
diff --git a/src/components/pages/ImageGenerationPage.tsx b/src/components/pages/ImageGenerationPage.tsx index 91058dd..459b91a 100644 --- a/src/components/pages/ImageGenerationPage.tsx +++ b/src/components/pages/ImageGenerationPage.tsx @@ -1,32 +1,162 @@ -import { useState, useEffect } from "react"; +import { useState, useEffect, useCallback, useRef } from "react"; import { SettingsService, SETTINGS_CHANGE_EVENT, } from "../../services/settings-service"; -import { ChevronDown, RefreshCw } from "lucide-react"; +import { ChevronDown, RefreshCw, Settings, Zap } from "lucide-react"; import { useTranslation } from "react-i18next"; import { AIService } from "../../services/ai-service"; import { OPENAI_PROVIDER_NAME } from "../../services/providers/openai-service"; +import { ImageGenerationManager, ImageGenerationStatus, ImageGenerationHandler } from "../../services/image-generation-handler"; +import { DatabaseIntegrationService } from "../../services/database-integration"; +import { ImageGenerationResult } from "../../types/image"; +import ImageGenerateHistoryItem from "../image/ImageGenerateHistoryItem"; + export const ImageGenerationPage = () => { const { t } = useTranslation(); const [prompt, setPrompt] = useState(""); - const [isGenerating, setIsGenerating] = useState(false); - const [imageResult, setImageResult] = useState(null); const [error, setError] = useState(null); const [isApiKeyMissing, setIsApiKeyMissing] = useState(true); const [aspectRatio, setAspectRatio] = useState("1:1"); + // eslint-disable-next-line @typescript-eslint/no-unused-vars const [imageCount, setImageCount] = useState(1); const [randomSeed, setRandomSeed] = useState( Math.floor(Math.random() * 1000000).toString() ); + const [generationResults, setGenerationResults] = useState>( + new Map() + ); + const [historyResults, setHistoryResults] = useState([]); + const [isLoadingHistory, setIsLoadingHistory] = useState(true); + const [isSettingsOpen, setIsSettingsOpen] = useState(false); + const [selectedProvider, setSelectedProvider] = useState(OPENAI_PROVIDER_NAME); + const [selectedModel, setSelectedModel] = useState("dall-e-3"); + const [availableProviders, setAvailableProviders] = useState<{id: string, name: string}[]>([]); + + const settingsButtonRef = useRef(null); + const settingsPopupRef = useRef(null); + const providerDropdownRef = useRef(null); + const [isProviderDropdownOpen, setIsProviderDropdownOpen] = useState(false); + + // Load image generation history from database + const refreshImageHistory = useCallback(async () => { + try { + const dbService = DatabaseIntegrationService.getInstance(); + const results = await dbService.getImageGenerationResults(); + if (results && results.length > 0) { + // Sort by most recent first using updatedAt timestamp + const sortedResults = results.sort((a, b) => { + // Convert string dates to Date objects if necessary + const dateA = a.updatedAt instanceof Date ? a.updatedAt : new Date(a.updatedAt); + const dateB = b.updatedAt instanceof Date ? b.updatedAt : new Date(b.updatedAt); + // Sort newest first + return dateB.getTime() - dateA.getTime(); + }); + setHistoryResults(sortedResults); + } else { + setHistoryResults([]); + } + } catch (error) { + console.error('Error refreshing image history:', error); + } + }, []); + + // Load available image generation providers + const loadImageGenerationProviders = useCallback(async () => { + const aiService = AIService.getInstance(); + const providers = aiService.getImageGenerationProviders(); + + const providerOptions = providers.map(provider => ({ + id: provider.id, + name: provider.name || provider.id + })); + + setAvailableProviders(providerOptions); + + // Set default provider if none is selected or current one isn't available + if (!selectedProvider || !providerOptions.some(p => p.id === selectedProvider)) { + if (providerOptions.length > 0) { + setSelectedProvider(providerOptions[0].id); + } + } + }, [selectedProvider]); + + const handleGetProviderNameById = (id: string) => { + const provider = availableProviders.find(p => p.id === id); + return provider ? provider.name : id; + } + + // Initialize image generation manager and load settings + useEffect(() => { + const initialize = async () => { + // Initialize image generation manager + const imageManager = ImageGenerationManager.getInstance(); + + // Register for updates on generation status changes + imageManager.setUpdateCallback((handlers) => { + setGenerationResults(new Map(handlers)); + }); + + // Initialize database and load history + try { + setIsLoadingHistory(true); + const dbService = DatabaseIntegrationService.getInstance(); + await dbService.initialize(); + + // Load settings + const settingsService = SettingsService.getInstance(); + await settingsService.initialize(); + + // Load saved provider preference + const settings = settingsService.getSettings(); + if (settings.imageGenerationProvider) { + setSelectedProvider(settings.imageGenerationProvider); + } + if (settings.imageGenerationModel) { + setSelectedModel(settings.imageGenerationModel); + } + + // Load available providers + await loadImageGenerationProviders(); + + // Load image generation history from database + await refreshImageHistory(); + + setIsLoadingHistory(false); + } catch (error) { + console.error('Error initializing database or loading image history:', error); + setIsLoadingHistory(false); + } + }; + + initialize(); + }, [setSelectedProvider, setSelectedModel, refreshImageHistory, loadImageGenerationProviders]); + + // Listen for settings changes + useEffect(() => { + const handleSettingsChange = () => { + loadImageGenerationProviders(); + }; + + window.addEventListener(SETTINGS_CHANGE_EVENT, handleSettingsChange); + return () => { + window.removeEventListener(SETTINGS_CHANGE_EVENT, handleSettingsChange); + }; + }, [loadImageGenerationProviders]); // Check if API key is available useEffect(() => { - setIsApiKeyMissing(!SettingsService.getInstance().getApiKey()); + const checkApiKey = () => { + // Check if the selected provider has an API key + const hasApiKey = !!SettingsService.getInstance().getApiKey(selectedProvider); + setIsApiKeyMissing(!hasApiKey); + }; + + checkApiKey(); const handleSettingsChange = () => { - setIsApiKeyMissing(!SettingsService.getInstance().getApiKey()); + checkApiKey(); }; window.addEventListener(SETTINGS_CHANGE_EVENT, handleSettingsChange); @@ -34,23 +164,91 @@ export const ImageGenerationPage = () => { return () => { window.removeEventListener(SETTINGS_CHANGE_EVENT, handleSettingsChange); }; + }, [selectedProvider]); + + // Handle clicks outside the settings popup + useEffect(() => { + const handleClickOutside = (event: MouseEvent) => { + if ( + settingsPopupRef.current && + !settingsPopupRef.current.contains(event.target as Node) && + settingsButtonRef.current && + !settingsButtonRef.current.contains(event.target as Node) + ) { + setIsSettingsOpen(false); + setIsProviderDropdownOpen(false); + } else if ( + providerDropdownRef.current && + !providerDropdownRef.current.contains(event.target as Node) && + event.target instanceof Element && + !event.target.closest('.provider-dropdown-toggle') + ) { + setIsProviderDropdownOpen(false); + } + }; + + document.addEventListener('mousedown', handleClickOutside); + return () => { + document.removeEventListener('mousedown', handleClickOutside); + }; }, []); - // Handle generating an image using OpenAI's DALL-E 3 + // Process and update the result after generation + const processGenerationResult = async (handler: ImageGenerationHandler, images: string[]) => { + // Convert image data to full URLs if needed + const processedImages = images.map(img => { + const base64Data = img as string; + if (base64Data.startsWith('data:image')) { + return base64Data; + } else { + return `data:image/png;base64,${base64Data}`; + } + }); + + // Update the handler with successful results + await handler.setSuccess(processedImages); + + // Refresh history to include the new generation + await refreshImageHistory(); + + // Remove the handler to prevent duplication with database results + const imageManager = ImageGenerationManager.getInstance(); + imageManager.removeHandler(handler.getId()); + + // Generate new seed for next generation + generateNewSeed(); + }; + + // Handle generating an image using selected provider const handleGenerateImage = async () => { if (!prompt.trim()) return; - setIsGenerating(true); setError(null); try { - // Get the OpenAI service from AIService - const openaiService = AIService.getInstance().getProvider(OPENAI_PROVIDER_NAME); + // Get the appropriate service based on selected provider + const aiService = AIService.getInstance(); + const providerService = aiService.getProvider(selectedProvider); - if (!openaiService) { - throw new Error("OpenAI service not available"); + if (!providerService) { + throw new Error(`${selectedProvider} service not available`); } + // Create a new generation handler + const imageManager = ImageGenerationManager.getInstance(); + const handler = imageManager.createHandler({ + prompt: prompt, + seed: randomSeed, + number: 1, + aspectRatio: aspectRatio, + provider: selectedProvider, + providerName: handleGetProviderNameById(selectedProvider), + model: selectedModel, + }); + + // Set status to generating + handler.setGenerating(); + // Map aspect ratio to size dimensions const sizeMap: Record = { "1:1": "1024x1024", @@ -62,29 +260,20 @@ export const ImageGenerationPage = () => { }; // Generate the image - const images = await openaiService.getImageGeneration(prompt, { + const images = await providerService.getImageGeneration(prompt, { size: sizeMap[aspectRatio] || "1024x1024", aspectRatio: aspectRatio as `${number}:${number}`, style: "vivid" }); - // Set the result image + // Process the result if (images && images.length > 0) { - // Check if the image is already a full data URL - const base64Data = images[0] as string; - if (base64Data.startsWith('data:image')) { - setImageResult(base64Data); - } else { - // If it's just a base64 string without the data URI prefix, add it - setImageResult(`data:image/png;base64,${base64Data}`); - } + await processGenerationResult(handler, images as string[]); } else { throw new Error("No images generated"); } } catch (err) { setError(err as Error); - } finally { - setIsGenerating(false); } }; @@ -93,6 +282,59 @@ export const ImageGenerationPage = () => { setRandomSeed(Math.floor(Math.random() * 1000000).toString()); }; + // Get all results to display in order (active generations first, then history) + const getAllResults = () => { + // Get results from active generation handlers + const handlerResults = Array.from(generationResults.values()) + .map(handler => handler.getResult()) + .sort((a, b) => { + // Prioritize active generations first + if (a.status === ImageGenerationStatus.GENERATING && b.status !== ImageGenerationStatus.GENERATING) { + return -1; + } + if (b.status === ImageGenerationStatus.GENERATING && a.status !== ImageGenerationStatus.GENERATING) { + return 1; + } + // Then sort by most recent first using updatedAt timestamp + const dateA = a.updatedAt instanceof Date ? a.updatedAt : new Date(a.updatedAt); + const dateB = b.updatedAt instanceof Date ? b.updatedAt : new Date(b.updatedAt); + return dateB.getTime() - dateA.getTime(); + }); + + // Combine with historical results and ensure newest is first + return [...handlerResults, ...historyResults]; + }; + + // Check if any images are currently generating + const isAnyImageGenerating = () => { + return Array.from(generationResults.values()).some( + h => h.getStatus() === ImageGenerationStatus.GENERATING + ); + }; + + // Toggle settings popup + const toggleSettings = () => { + setIsSettingsOpen(!isSettingsOpen); + setIsProviderDropdownOpen(false); + }; + + // Toggle provider dropdown + const toggleProviderDropdown = () => { + setIsProviderDropdownOpen(!isProviderDropdownOpen); + }; + + // Handle provider selection + const handleProviderSelect = async (provider: string) => { + setSelectedProvider(provider); + setIsProviderDropdownOpen(false); + + // Save provider preference to settings + const settingsService = SettingsService.getInstance(); + await settingsService.updateSettings({ + imageGenerationProvider: provider + }); + }; + return (
{isApiKeyMissing && ( @@ -101,38 +343,44 @@ export const ImageGenerationPage = () => {
)} -
+
{/* Left side - Controls */} -
-

- {t("imageGeneration.title")} -

- +
{/* Prompt input */}
-
*/} - {/*
+
{[ {capability: AIServiceCapability.TextCompletion, label: 'Text Completion'}, - {capability: AIServiceCapability.Reasoning, label: 'Reasoning'}, - {capability: AIServiceCapability.VisionAnalysis, label: 'Vision'}, - {capability: AIServiceCapability.ToolUsage, label: 'Tool Usage'}, - {capability: AIServiceCapability.Embedding, label: 'Embedding'} + {capability: AIServiceCapability.ImageGeneration, label: 'Image Generation'}, ].map(({capability, label}) => (
= ({
))}
-
*/} +
diff --git a/src/components/settings/GeneralSettings.tsx b/src/components/settings/GeneralSettings.tsx new file mode 100644 index 0000000..9271215 --- /dev/null +++ b/src/components/settings/GeneralSettings.tsx @@ -0,0 +1,155 @@ +import React, { useState, useEffect } from 'react'; +import { useTranslation } from 'react-i18next'; + +interface GeneralSettingsProps { + startWithSystem: boolean; + startupToTray: boolean; + closeToTray: boolean; + proxyMode: 'system' | 'custom' | 'none'; + sendErrorReports: boolean; + onSettingChange: (key: string, value: unknown) => void; + onSaveSettings: () => void; +} + +export const GeneralSettings: React.FC = ({ + startWithSystem, + startupToTray, + closeToTray, + onSettingChange, + onSaveSettings +}) => { + const { t, i18n } = useTranslation(); + // eslint-disable-next-line @typescript-eslint/no-unused-vars + const [isWindows, setIsWindows] = useState(false); + const [currentLanguage, setCurrentLanguage] = useState(i18n.language); + + // Check if running on Windows platform + useEffect(() => { + const checkPlatform = async () => { + if (window.electron && window.electron.getPlatform) { + const platform = await window.electron.getPlatform(); + setIsWindows(platform === 'win32'); + } + }; + + checkPlatform(); + }, []); + + // Update currentLanguage when i18n.language changes + useEffect(() => { + setCurrentLanguage(i18n.language); + }, [i18n.language]); + + // const handleProxyModeChange = (mode: 'system' | 'custom' | 'none') => { + // onSettingChange('proxyMode', mode); + // onSaveSettings(); + // }; + + const handleToggleChange = (key: string) => (e: React.ChangeEvent) => { + onSettingChange(key, e.target.checked); + onSaveSettings(); + }; + + const handleLanguageChange = (langCode: string) => { + i18n.changeLanguage(langCode); + setCurrentLanguage(langCode); + }; + + const languages = [ + { code: 'en', name: 'English' }, + { code: 'zh_CN', name: '简体中文' }, + { code: 'zh_TW', name: '繁體中文' }, + { code: 'ja', name: '日本語' }, + { code: 'ko', name: '한국어' }, + { code: 'es', name: 'Español' } + ]; + + return ( +
+
+

{t('settings.general')}

+ + {/* Startup Settings */} +
+

{t('settings.startup')}

+ +
+
+ + +
+ +
+ + +
+
+
+ + {/* Tray Settings */} +
+

{t('settings.trayOptions')}

+ +
+
+ + +
+
+
+ + {/* Language Settings */} +
+

{t('settings.language')}

+ +
+ {languages.map((lang) => ( +
+ handleLanguageChange(lang.code)} + className="w-4 h-4 text-blue-600 form-radio" + /> + +
+ ))} +
+
+ + {/* Network Proxy and Privacy sections are hidden as requested */} +
+
+ ); +}; + +export default GeneralSettings; \ No newline at end of file diff --git a/src/components/settings/LanguageSettings.tsx b/src/components/settings/LanguageSettings.tsx deleted file mode 100644 index 5bdfa28..0000000 --- a/src/components/settings/LanguageSettings.tsx +++ /dev/null @@ -1,46 +0,0 @@ -import { useTranslation } from 'react-i18next'; -import { useState, useEffect } from 'react'; - -export const LanguageSettings = () => { - const { t, i18n } = useTranslation(); - const [currentLanguage, setCurrentLanguage] = useState(i18n.language); - - const languages = [ - { code: 'en', name: 'English' }, - { code: 'zh_CN', name: '简体中文' }, - { code: 'zh_TW', name: '繁體中文' }, - { code: 'ja', name: '日本語' }, - { code: 'ko', name: '한국어' }, - { code: 'es', name: 'Español' } - ]; - - const handleLanguageChange = (langCode: string) => { - i18n.changeLanguage(langCode); - setCurrentLanguage(langCode); - }; - - useEffect(() => { - setCurrentLanguage(i18n.language); - }, [i18n.language]); - - return ( -
-

{t('settings.language')}

-
- {languages.map((lang) => ( - - ))} -
-
- ); -}; \ No newline at end of file diff --git a/src/components/settings/index.ts b/src/components/settings/index.ts index 1d27d9e..bd6824a 100644 --- a/src/components/settings/index.ts +++ b/src/components/settings/index.ts @@ -1,3 +1,3 @@ export * from './ApiManagement'; export * from './ChatSettings'; -export * from './LanguageSettings'; \ No newline at end of file +export * from './GeneralSettings'; \ No newline at end of file diff --git a/src/locales/en/translation.json b/src/locales/en/translation.json index dd8e203..4c39ac2 100644 --- a/src/locales/en/translation.json +++ b/src/locales/en/translation.json @@ -9,7 +9,8 @@ "submit": "Submit", "loading": "Loading...", "error": "Error", - "success": "Success" + "success": "Success", + "general": "General" }, "chat": { "sendMessage": "Send Message", @@ -30,7 +31,22 @@ "edit": "Edit", "regenerate": "Regenerate", "stopResponse": "Stop Response", - "pressShiftEnterToChangeLines": "Press Shift+Enter to change lines" + "pressShiftEnterToChangeLines": "Press Shift+Enter to change lines", + "generateImage": "Generate Image", + "imageGenerationNotAvailable": "Image generation not available", + "imageGenerationDisabled": "Image generation is disabled", + "enableImageGeneration": "Enable image generation", + "disableImageGeneration": "Disable image generation", + "selectImageProvider": "Select Image Provider", + "noImageProvidersAvailable": "No image providers available" + }, + "tools": { + "executing": "Executing tool...", + "executedSuccessfully": "Tool execution completed", + "executionFailed": "Tool execution failed", + "toolName": "Tool name", + "result": "Result", + "error": "Error" }, "translation": { "title": "Translation", @@ -96,13 +112,53 @@ "generationCount": "Generation Count", "randomSeed": "Random Seed", "generateButton": "Generate", - "generating": "Generating...", + "generating": "Generating image...", + "creatingImage": "AI is creating your image", + "generationFailed": "Image generation failed", + "generatedImage": "Generated image", "prompt": "Prompt", "promptPlaceholder": "Describe the image you want to create, e.g.: a peaceful lake at sunset with mountains in the background", "results": "Generated Results", "placeholderText": "Enter a prompt and click generate to create images", "apiKeyMissing": "Please set your API key for the selected provider in the settings.", - "seedHelp": "Seed for reproducible results" + "seedHelp": "Seed for reproducible results", + "loading": "Loading image history..." + }, + "mcpServer": { + "title": "MCP Servers", + "addServer": "Add Server", + "editServer": "Edit Server", + "exploreMore": "Explore More", + "serverName": "Server Name", + "serverNamePlaceholder": "Enter server name", + "description": "Description", + "descriptionPlaceholder": "Enter server description (optional)", + "transportType": "Transport Type", + "serverURL": "Server URL", + "serverURLPlaceholder": "Enter server URL", + "headers": "Headers (JSON)", + "default": "Default", + "imageGeneration": "Image Generation", + "noServers": "No MCP servers configured", + "addServerPrompt": "Click the 'Add Server' button to create one", + "cannotEditDefault": "Cannot edit default server", + "cannotDeleteDefault": "Cannot delete default server", + "sseOption": "Server-Sent Events (SSE)", + "stdioOption": "Standard IO (stdio)", + "streamableHttpOption": "Streamable HTTP", + "sseDisplay": "SSE", + "stdioDisplay": "STDIO", + "streamableHttpDisplay": "Streamable HTTP", + "connectionSettings": "Connection Settings", + "command": "Command", + "commandPlaceholder": "Enter command (e.g., python, node)", + "args": "Arguments", + "argsPlaceholder": "One argument per line", + "argsHelp": "Enter each argument on a separate line", + "env": "Environment Variables (JSON)", + "advancedSettings": "Advanced Settings", + "timeout": "Timeout", + "seconds": "seconds" }, "settings": { "language": "Language", @@ -148,6 +204,20 @@ "webSearch_title": "Web Search (Preview)", "webSearch_toggle_label": "Enable Web Search Function", - "webSearch_description": "When enabled, the AI can search the web to provide more up-to-date information. Please note that web search is currently only supported with OpenAI and Gemini models. Also, when web search is enabled, streaming responses (where text appears incrementally) will not be available." + "webSearch_description": "When enabled, the AI can search the web to provide more up-to-date information. Please note that web search is currently only supported with OpenAI and Gemini models. Also, when web search is enabled, streaming responses (where text appears incrementally) will not be available.", + + "general": "General", + "startup": "Startup", + "startWithSystem": "Start with system", + "startupToTray": "Start minimized to tray", + "trayOptions": "Tray Options", + "closeToTray": "Close to tray instead of quitting", + "networkProxy": "Network Proxy", + "systemProxy": "Use system proxy", + "customProxy": "Custom proxy", + "noProxy": "No proxy", + "privacy": "Privacy", + "sendErrorReports": "Send anonymous error reports and usage statistics", + "sendErrorReports_description": "Help improve the application by sending anonymous crash reports and usage data." } } \ No newline at end of file diff --git a/src/locales/es/translation.json b/src/locales/es/translation.json index 0bf7a0c..224821f 100644 --- a/src/locales/es/translation.json +++ b/src/locales/es/translation.json @@ -9,7 +9,8 @@ "submit": "Enviar", "loading": "Cargando...", "error": "Error", - "success": "Éxito" + "success": "Éxito", + "general": "General" }, "chat": { "sendMessage": "Enviar Mensaje", @@ -30,7 +31,22 @@ "edit": "Editar", "regenerate": "Regenerar", "stopResponse": "Detener Respuesta", - "pressShiftEnterToChangeLines": "Presiona Shift+Enter para cambiar de línea" + "pressShiftEnterToChangeLines": "Presiona Shift+Enter para cambiar de línea", + "generateImage": "Generar Imagen", + "imageGenerationNotAvailable": "Generación de imágenes no disponible", + "imageGenerationDisabled": "La generación de imágenes está desactivada", + "enableImageGeneration": "Activar generación de imágenes", + "disableImageGeneration": "Desactivar generación de imágenes", + "selectImageProvider": "Seleccionar Proveedor de Imágenes", + "noImageProvidersAvailable": "No hay proveedores de imágenes disponibles" + }, + "tools": { + "executing": "Ejecutando herramienta...", + "executedSuccessfully": "Ejecución de herramienta completada", + "executionFailed": "Fallo en la ejecución de la herramienta", + "toolName": "Nombre de la herramienta", + "result": "Resultado", + "error": "Error" }, "translation": { "title": "Traducción", @@ -89,21 +105,60 @@ "selectModel_search_placeholder": "Buscar modelos..." }, "imageGeneration": { - "title": "Imagen Generación", + "title": "Generación de Imágenes", "provider": "Proveedor", "model": "Modelo", "imageSize": "Tamaño de Imagen", "generationCount": "Cantidad de Generaciones", "randomSeed": "Semilla Aleatoria", "generateButton": "Generar", - "generating": "Generando...", + "generating": "Generando imagen...", + "creatingImage": "La IA está creando tu imagen", + "generationFailed": "Falló la generación de imagen", + "generatedImage": "Imagen generada", "prompt": "Prompt", - "promptPlaceholder": "Describe la imagen que quieres crear, ej.: un lago tranquilo al atardecer con montañas en el fondo", + "promptPlaceholder": "Describe la imagen que quieres crear, p.ej.: un lago tranquilo al atardecer con montañas en el fondo", "results": "Resultados Generados", "placeholderText": "Ingresa un prompt y haz clic en generar para crear imágenes", "apiKeyMissing": "Por favor, configura tu clave API para el proveedor seleccionado en la configuración.", "seedHelp": "Semilla para resultados reproducibles" }, + "mcpServer": { + "title": "Servidores MCP", + "addServer": "Agregar Servidor", + "editServer": "Editar Servidor", + "exploreMore": "Explorar Más", + "serverName": "Nombre del Servidor", + "serverNamePlaceholder": "Ingrese el nombre del servidor", + "description": "Descripción", + "descriptionPlaceholder": "Ingrese descripción del servidor (opcional)", + "transportType": "Tipo de Transporte", + "serverURL": "URL del Servidor", + "serverURLPlaceholder": "Ingrese URL del servidor", + "headers": "Encabezados (JSON)", + "default": "Predeterminado", + "imageGeneration": "Generación de Imágenes", + "noServers": "No hay servidores MCP configurados", + "addServerPrompt": "Haga clic en el botón 'Agregar Servidor' para crear uno", + "cannotEditDefault": "No se puede editar el servidor predeterminado", + "cannotDeleteDefault": "No se puede eliminar el servidor predeterminado", + "sseOption": "Eventos enviados por el servidor (SSE)", + "stdioOption": "Entrada/Salida estándar (stdio)", + "streamableHttpOption": "HTTP transmisible", + "sseDisplay": "SSE", + "stdioDisplay": "STDIO", + "streamableHttpDisplay": "HTTP Transmisible", + "connectionSettings": "Configuración de Conexión", + "command": "Comando", + "commandPlaceholder": "Ingrese el comando (ej., python, node)", + "args": "Argumentos", + "argsPlaceholder": "Un argumento por línea", + "argsHelp": "Ingrese cada argumento en una línea separada", + "env": "Variables de Entorno (JSON)", + "advancedSettings": "Configuración Avanzada", + "timeout": "Tiempo de espera", + "seconds": "segundos" + }, "settings": { "language": "Idioma", "apiKey": "Clave API", @@ -145,6 +200,20 @@ "models_modelCapabilities_chat": "Chat", "webSearch_title": "Búsqueda Web (Vista Previa)", "webSearch_toggle_label": "Habilitar Función de Búsqueda Web", - "webSearch_description": "Cuando está habilitado, la IA puede buscar en la web para proporcionar información más actualizada. Ten en cuenta que la búsqueda web actualmente solo es compatible con los modelos de OpenAI y Gemini. Además, cuando la búsqueda web está habilitada, las respuestas en streaming (donde el texto aparece de forma incremental) no estarán disponibles." + "webSearch_description": "Cuando está habilitado, la IA puede buscar en la web para proporcionar información más actualizada. Ten en cuenta que la búsqueda web actualmente solo es compatible con los modelos de OpenAI y Gemini. Además, cuando la búsqueda web está habilitada, las respuestas en streaming (donde el texto aparece de forma incremental) no estarán disponibles.", + + "general": "Configuración General", + "startup": "Inicio", + "startWithSystem": "Iniciar con el sistema", + "startupToTray": "Iniciar minimizado en la bandeja", + "trayOptions": "Opciones de Bandeja", + "closeToTray": "Minimizar a la bandeja en lugar de cerrar", + "networkProxy": "Proxy de Red", + "systemProxy": "Usar proxy del sistema", + "customProxy": "Proxy personalizado", + "noProxy": "Sin proxy", + "privacy": "Privacidad", + "sendErrorReports": "Enviar informes de errores anónimos y estadísticas de uso", + "sendErrorReports_description": "Ayuda a mejorar la aplicación enviando informes de fallos anónimos y datos de uso." } } \ No newline at end of file diff --git a/src/locales/ja/translation.json b/src/locales/ja/translation.json index 49c01c6..3aca9f0 100644 --- a/src/locales/ja/translation.json +++ b/src/locales/ja/translation.json @@ -9,7 +9,8 @@ "submit": "送信", "loading": "読み込み中...", "error": "エラー", - "success": "成功" + "success": "成功", + "general": "一般" }, "chat": { "sendMessage": "メッセージを送信", @@ -30,7 +31,22 @@ "edit": "編集", "regenerate": "再生成", "stopResponse": "応答を停止", - "pressShiftEnterToChangeLines": "Shift+Enterを押して行を変更" + "pressShiftEnterToChangeLines": "Shift+Enterを押して行を変更", + "generateImage": "画像を生成", + "imageGenerationNotAvailable": "画像生成は利用できません", + "imageGenerationDisabled": "画像生成は無効になっています", + "enableImageGeneration": "画像生成を有効にする", + "disableImageGeneration": "画像生成を無効にする", + "selectImageProvider": "画像プロバイダーを選択", + "noImageProvidersAvailable": "利用可能な画像プロバイダーがありません" + }, + "tools": { + "executing": "ツールを実行中...", + "executedSuccessfully": "ツール実行が完了しました", + "executionFailed": "ツール実行に失敗しました", + "toolName": "ツール名", + "result": "結果", + "error": "エラー" }, "translation": { "title": "翻訳", @@ -96,13 +112,52 @@ "generationCount": "生成数", "randomSeed": "ランダムシード", "generateButton": "生成", - "generating": "生成中...", + "generating": "画像を生成中...", + "creatingImage": "AIが画像を作成しています", + "generationFailed": "画像生成に失敗しました", + "generatedImage": "生成された画像", "prompt": "プロンプト", - "promptPlaceholder": "作成したい画像を説明してください。例:夕日が沈む静かな湖、背景には山々", + "promptPlaceholder": "作成したい画像を説明してください。例:夕暮れの穏やかな湖と背景の山々", "results": "生成結果", - "placeholderText": "プロンプトを入力して生成ボタンをクリックして画像を作成", - "apiKeyMissing": "選択したプロバイダーのAPIキーを設定で設定してください。", - "seedHelp": "再現可能な結果のためのシード値" + "placeholderText": "プロンプトを入力して生成ボタンをクリックすると画像が作成されます", + "apiKeyMissing": "設定で選択したプロバイダーのAPIキーを設定してください。", + "seedHelp": "再現可能な結果のためのシード" + }, + "mcpServer": { + "title": "MCPサーバー", + "addServer": "サーバーを追加", + "editServer": "サーバーを編集", + "exploreMore": "もっと探す", + "serverName": "サーバー名", + "serverNamePlaceholder": "サーバー名を入力", + "description": "説明", + "descriptionPlaceholder": "サーバーの説明を入力(任意)", + "transportType": "トランスポートタイプ", + "serverURL": "サーバーURL", + "serverURLPlaceholder": "サーバーURLを入力", + "headers": "ヘッダー(JSON)", + "default": "デフォルト", + "imageGeneration": "画像生成", + "noServers": "MCPサーバーが設定されていません", + "addServerPrompt": "「サーバーを追加」ボタンをクリックして作成してください", + "cannotEditDefault": "デフォルトサーバーは編集できません", + "cannotDeleteDefault": "デフォルトサーバーは削除できません", + "sseOption": "サーバー送信イベント(SSE)", + "stdioOption": "標準入出力(stdio)", + "streamableHttpOption": "ストリーミング可能なHTTP", + "sseDisplay": "SSE", + "stdioDisplay": "STDIO", + "streamableHttpDisplay": "ストリーミングHTTP", + "connectionSettings": "接続設定", + "command": "コマンド", + "commandPlaceholder": "コマンドを入力(例:python、node)", + "args": "引数", + "argsPlaceholder": "引数を1行に1つずつ入力", + "argsHelp": "各引数を別々の行に入力してください", + "env": "環境変数(JSON)", + "advancedSettings": "詳細設定", + "timeout": "タイムアウト", + "seconds": "秒" }, "settings": { "language": "言語", @@ -145,6 +200,20 @@ "models_modelCapabilities_chat": "チャット", "webSearch_title": "ウェブ検索(プレビュー)", "webSearch_toggle_label": "ウェブ検索機能を有効にする", - "webSearch_description": "有効にすると、AIはウェブを検索してより最新の情報を提供できます。注意: ウェブ検索は現在、OpenAIとGeminiモデルでのみサポートされています。また、ウェブ検索が有効な場合、ストリーミング応答(テキストが段階的に表示される)は利用できません。" + "webSearch_description": "有効にすると、AIはウェブを検索してより最新の情報を提供できます。注意: ウェブ検索は現在、OpenAIとGeminiモデルでのみサポートされています。また、ウェブ検索が有効な場合、ストリーミング応答(テキストが段階的に表示される)は利用できません。", + + "general": "一般", + "startup": "起動設定", + "startWithSystem": "システム起動時に起動する", + "startupToTray": "起動時にトレイに最小化する", + "trayOptions": "トレイオプション", + "closeToTray": "閉じる時に終了せずトレイに最小化する", + "networkProxy": "ネットワークプロキシ", + "systemProxy": "システムプロキシを使用", + "customProxy": "カスタムプロキシ", + "noProxy": "プロキシを使用しない", + "privacy": "プライバシー設定", + "sendErrorReports": "匿名のエラーレポートと使用統計を送信する", + "sendErrorReports_description": "匿名のクラッシュレポートと使用データを送信してアプリケーションの改善に協力する。" } } \ No newline at end of file diff --git a/src/locales/ko/translation.json b/src/locales/ko/translation.json index 952eb2e..e011ef2 100644 --- a/src/locales/ko/translation.json +++ b/src/locales/ko/translation.json @@ -9,7 +9,8 @@ "submit": "제출", "loading": "로딩 중...", "error": "오류", - "success": "성공" + "success": "성공", + "general": "일반" }, "chat": { "sendMessage": "메시지 보내기", @@ -30,7 +31,22 @@ "edit": "편집", "regenerate": "재생성", "stopResponse": "응답 중지", - "pressShiftEnterToChangeLines": "줄을 바꾸려면 Shift+Enter를 누르세요" + "pressShiftEnterToChangeLines": "줄을 바꾸려면 Shift+Enter를 누르세요", + "generateImage": "이미지 생성", + "imageGenerationNotAvailable": "이미지 생성을 사용할 수 없습니다", + "imageGenerationDisabled": "이미지 생성이 비활성화되었습니다", + "enableImageGeneration": "이미지 생성 활성화", + "disableImageGeneration": "이미지 생성 비활성화", + "selectImageProvider": "이미지 제공자 선택", + "noImageProvidersAvailable": "사용 가능한 이미지 제공자가 없습니다" + }, + "tools": { + "executing": "도구 실행 중...", + "executedSuccessfully": "도구 실행 완료", + "executionFailed": "도구 실행 실패", + "toolName": "도구 이름", + "result": "결과", + "error": "오류" }, "translation": { "title": "번역", @@ -93,17 +109,55 @@ "provider": "제공자", "model": "모델", "imageSize": "이미지 크기", - "generationCount": "생성 수량", + "generationCount": "생성 수", "randomSeed": "랜덤 시드", "generateButton": "생성", - "generating": "생성 중...", + "generating": "이미지 생성 중...", + "creatingImage": "AI가 이미지를 만들고 있습니다", + "generationFailed": "이미지 생성 실패", + "generatedImage": "생성된 이미지", "prompt": "프롬프트", - "promptPlaceholder": "만들고 싶은 이미지를 설명하세요. 예: 석양이 지는 고요한 호수, 배경에는 산맥", - "results": "생성 결과", - "placeholderText": "프롬프트를 입력하고 생성 버튼을 클릭하여 이미지 생성", - "apiKeyMissing": "선택한 제공자의 API 키를 설정에서 설정하세요.", + "promptPlaceholder": "원하는 이미지를 설명하세요. 예: 일몰 시 평화로운 호수와 배경에 산이 있는 풍경", + "results": "생성된 결과", + "placeholderText": "프롬프트를 입력하고 생성 버튼을 클릭하여 이미지를 만드세요", + "apiKeyMissing": "설정에서 선택한 제공자의 API 키를 설정해 주세요.", "seedHelp": "재현 가능한 결과를 위한 시드" }, + "mcpServer": { + "title": "MCP 서버", + "addServer": "서버 추가", + "editServer": "서버 편집", + "serverName": "서버 이름", + "serverNamePlaceholder": "서버 이름 입력", + "description": "설명", + "descriptionPlaceholder": "서버 설명 입력 (선택사항)", + "transportType": "전송 유형", + "serverURL": "서버 URL", + "serverURLPlaceholder": "서버 URL 입력", + "headers": "헤더 (JSON)", + "default": "기본값", + "imageGeneration": "이미지 생성", + "noServers": "구성된 MCP 서버가 없습니다", + "addServerPrompt": "'서버 추가' 버튼을 클릭하여 하나 생성하세요", + "cannotEditDefault": "기본 서버는 편집할 수 없습니다", + "cannotDeleteDefault": "기본 서버는 삭제할 수 없습니다", + "sseOption": "서버 전송 이벤트 (SSE)", + "stdioOption": "표준 입출력 (stdio)", + "streamableHttpOption": "스트리밍 가능한 HTTP", + "sseDisplay": "SSE", + "stdioDisplay": "STDIO", + "streamableHttpDisplay": "스트리밍 HTTP", + "connectionSettings": "연결 설정", + "command": "명령어", + "commandPlaceholder": "명령어 입력 (예: python, node)", + "args": "인수", + "argsPlaceholder": "한 줄에 하나의 인수", + "argsHelp": "각 인수를 별도의 줄에 입력하세요", + "env": "환경 변수 (JSON)", + "advancedSettings": "고급 설정", + "timeout": "타임아웃", + "seconds": "초" + }, "settings": { "language": "언어", "apiKey": "API 키", @@ -145,6 +199,20 @@ "models_modelCapabilities_chat": "채팅", "webSearch_title": "웹 검색 (미리보기)", "webSearch_toggle_label": "웹 검색 기능 활성화", - "webSearch_description": "활성화되면 AI가 웹을 검색하여 더 최신 정보를 제공합니다. 웹 검색은 현재 OpenAI 및 Gemini 모델에서만 지원됩니다. 또한 웹 검색이 활성화되면 스트리밍 응답(텍스트가 점진적으로 나타나는)이 제공되지 않습니다." + "webSearch_description": "활성화되면 AI가 웹을 검색하여 더 최신 정보를 제공합니다. 웹 검색은 현재 OpenAI 및 Gemini 모델에서만 지원됩니다. 또한 웹 검색이 활성화되면 스트리밍 응답(텍스트가 점진적으로 나타나는)이 제공되지 않습니다.", + + "general": "일반 설정", + "startup": "시작 설정", + "startWithSystem": "시스템과 함께 시작", + "startupToTray": "트레이로 최소화하여 시작", + "trayOptions": "트레이 옵션", + "closeToTray": "종료 대신 트레이로 최소화", + "networkProxy": "네트워크 프록시", + "systemProxy": "시스템 프록시 사용", + "customProxy": "사용자 정의 프록시", + "noProxy": "프록시 사용 안 함", + "privacy": "개인정보 설정", + "sendErrorReports": "익명의 오류 보고서 및 사용 통계 보내기", + "sendErrorReports_description": "익명의 충돌 보고서와 사용 데이터를 보내 애플리케이션 개선에 도움을 줍니다." } } \ No newline at end of file diff --git a/src/locales/zh-CN/translation.json b/src/locales/zh-CN/translation.json index 5069f0c..675dbd9 100644 --- a/src/locales/zh-CN/translation.json +++ b/src/locales/zh-CN/translation.json @@ -9,7 +9,8 @@ "submit": "提交", "loading": "加载中...", "error": "错误", - "success": "成功" + "success": "成功", + "general": "常规" }, "chat": { "sendMessage": "发送消息", @@ -30,7 +31,22 @@ "edit": "编辑", "regenerate": "重新生成", "stopResponse": "停止响应", - "pressShiftEnterToChangeLines": "按 Shift+Enter 换行" + "pressShiftEnterToChangeLines": "按 Shift+Enter 换行", + "generateImage": "生成图片", + "imageGenerationNotAvailable": "图片生成不可用", + "imageGenerationDisabled": "图片生成已禁用", + "enableImageGeneration": "启用图片生成", + "disableImageGeneration": "禁用图片生成", + "selectImageProvider": "选择图片提供商", + "noImageProvidersAvailable": "没有可用的图片提供商" + }, + "tools": { + "executing": "正在执行工具...", + "executedSuccessfully": "工具执行完成", + "executionFailed": "工具执行失败", + "toolName": "工具名称", + "result": "结果", + "error": "错误" }, "translation": { "title": "翻译", @@ -89,21 +105,61 @@ "selectModel_search_placeholder": "搜索模型..." }, "imageGeneration": { - "title": "图片生成", + "title": "图像生成", "provider": "提供商", "model": "模型", - "imageSize": "图片尺寸", + "imageSize": "图像尺寸", "generationCount": "生成数量", "randomSeed": "随机种子", "generateButton": "生成", - "generating": "生成中...", + "generating": "正在生成图像...", + "settingsButton": "设置", + "creatingImage": "AI正在创建您的图像", + "generationFailed": "图像生成失败", + "generatedImage": "生成的图像", "prompt": "提示词", - "promptPlaceholder": "描述你想创建的图片,例如:一个宁静的湖泊,夕阳西下,远处是群山", + "promptPlaceholder": "描述您想要创建的图像,例如:日落时分的平静湖泊,背景是山脉", "results": "生成结果", - "placeholderText": "输入提示词并点击生成按钮来创建图片", - "apiKeyMissing": "请在设置中为所选提供商设置您的 API 密钥。", + "placeholderText": "输入提示词并点击生成按钮创建图像", + "apiKeyMissing": "请在设置中为所选提供商设置API密钥。", "seedHelp": "用于可重现结果的种子" }, + "mcpServer": { + "title": "MCP 服务器", + "addServer": "添加服务器", + "editServer": "编辑服务器", + "exploreMore": "探索更多", + "serverName": "服务器名称", + "serverNamePlaceholder": "输入服务器名称", + "description": "描述", + "descriptionPlaceholder": "输入服务器描述(可选)", + "transportType": "传输类型", + "serverURL": "服务器 URL", + "serverURLPlaceholder": "输入服务器 URL", + "headers": "请求头 (JSON)", + "default": "默认", + "imageGeneration": "图像生成", + "noServers": "未配置 MCP 服务器", + "addServerPrompt": "点击\"添加服务器\"按钮创建一个", + "cannotEditDefault": "无法编辑默认服务器", + "cannotDeleteDefault": "无法删除默认服务器", + "sseOption": "服务器发送事件 (SSE)", + "stdioOption": "标准输入/输出 (stdio)", + "streamableHttpOption": "可流式传输的 HTTP", + "sseDisplay": "SSE", + "stdioDisplay": "STDIO", + "streamableHttpDisplay": "流式 HTTP", + "connectionSettings": "连接设置", + "command": "命令", + "commandPlaceholder": "输入命令(例如:python, node)", + "args": "参数", + "argsPlaceholder": "每行一个参数", + "argsHelp": "每个参数单独一行输入", + "env": "环境变量 (JSON)", + "advancedSettings": "高级设置", + "timeout": "超时", + "seconds": "秒" + }, "settings": { "language": "语言", "apiKey": "API 密钥", @@ -145,6 +201,20 @@ "models_modelCapabilities_chat": "聊天", "webSearch_title": "网页搜索(预览)", "webSearch_toggle_label": "启用网页搜索功能", - "webSearch_description": "启用后,AI 可以搜索网页以提供更及时的信息。请注意,网页搜索目前仅支持 OpenAI 和 Gemini 模型。此外,启用网页搜索时,流式响应(文本逐步出现)将不可用。" + "webSearch_description": "启用后,AI 可以搜索网页以提供更及时的信息。请注意,网页搜索目前仅支持 OpenAI 和 Gemini 模型。此外,启用网页搜索时,流式响应(文本逐步出现)将不可用。", + + "general": "常规", + "startup": "启动设置", + "startWithSystem": "开机自启动", + "startupToTray": "启动时最小化到托盘", + "trayOptions": "托盘选项", + "closeToTray": "关闭时最小化到托盘而不是退出", + "networkProxy": "网络代理", + "systemProxy": "使用系统代理", + "customProxy": "自定义代理", + "noProxy": "不使用代理", + "privacy": "隐私设置", + "sendErrorReports": "发送匿名错误报告和数据统计", + "sendErrorReports_description": "通过发送匿名崩溃报告和使用数据来帮助改进应用程序。" } } \ No newline at end of file diff --git a/src/locales/zh-TW/translation.json b/src/locales/zh-TW/translation.json index 5a6389f..ba3a2c0 100644 --- a/src/locales/zh-TW/translation.json +++ b/src/locales/zh-TW/translation.json @@ -9,7 +9,8 @@ "submit": "提交", "loading": "載入中...", "error": "錯誤", - "success": "成功" + "success": "成功", + "general": "一般" }, "chat": { "sendMessage": "發送訊息", @@ -30,7 +31,22 @@ "edit": "編輯", "regenerate": "重新生成", "stopResponse": "停止回應", - "pressShiftEnterToChangeLines": "按 Shift+Enter 換行" + "pressShiftEnterToChangeLines": "按 Shift+Enter 換行", + "generateImage": "生成圖片", + "imageGenerationNotAvailable": "圖片生成不可用", + "imageGenerationDisabled": "圖片生成已禁用", + "enableImageGeneration": "啟用圖片生成", + "disableImageGeneration": "禁用圖片生成", + "selectImageProvider": "選擇圖片提供商", + "noImageProvidersAvailable": "沒有可用的圖片提供商" + }, + "tools": { + "executing": "正在執行工具...", + "executedSuccessfully": "工具執行完成", + "executionFailed": "工具執行失敗", + "toolName": "工具名稱", + "result": "結果", + "error": "錯誤" }, "translation": { "title": "翻譯", @@ -89,21 +105,60 @@ "selectModel_search_placeholder": "搜尋模型..." }, "imageGeneration": { - "title": "圖片生成", - "provider": "提供商", + "title": "圖像生成", + "provider": "提供者", "model": "模型", - "imageSize": "圖片尺寸", + "imageSize": "圖像大小", "generationCount": "生成數量", "randomSeed": "隨機種子", "generateButton": "生成", - "generating": "生成中...", + "generating": "正在生成圖像...", + "creatingImage": "AI正在創建您的圖像", + "generationFailed": "圖像生成失敗", + "generatedImage": "生成的圖像", "prompt": "提示詞", - "promptPlaceholder": "描述你想創建的圖片,例如:一個寧靜的湖泊,夕陽西下,遠處是群山", + "promptPlaceholder": "描述您想要創建的圖像,例如:日落時分的平靜湖泊,背景是山脈", "results": "生成結果", - "placeholderText": "輸入提示詞並點擊生成按鈕來創建圖片", - "apiKeyMissing": "請在設定中為所選提供商設置您的 API 金鑰。", + "placeholderText": "輸入提示詞並點擊生成按鈕創建圖像", + "apiKeyMissing": "請在設定中為所選提供者設置API金鑰。", "seedHelp": "用於可重現結果的種子" }, + "mcpServer": { + "title": "MCP 伺服器", + "addServer": "新增伺服器", + "editServer": "編輯伺服器", + "exploreMore": "探索更多", + "serverName": "伺服器名稱", + "serverNamePlaceholder": "輸入伺服器名稱", + "description": "描述", + "descriptionPlaceholder": "輸入伺服器描述(選填)", + "transportType": "傳輸類型", + "serverURL": "伺服器 URL", + "serverURLPlaceholder": "輸入伺服器 URL", + "headers": "標頭 (JSON)", + "default": "預設", + "imageGeneration": "圖像生成", + "noServers": "尚未設定 MCP 伺服器", + "addServerPrompt": "點擊「新增伺服器」按鈕來建立一個", + "cannotEditDefault": "無法編輯預設伺服器", + "cannotDeleteDefault": "無法刪除預設伺服器", + "sseOption": "伺服器傳送事件 (SSE)", + "stdioOption": "標準輸入/輸出 (stdio)", + "streamableHttpOption": "可串流的 HTTP", + "sseDisplay": "SSE", + "stdioDisplay": "STDIO", + "streamableHttpDisplay": "串流 HTTP", + "connectionSettings": "連線設定", + "command": "指令", + "commandPlaceholder": "輸入指令(例如:python, node)", + "args": "參數", + "argsPlaceholder": "每行一個參數", + "argsHelp": "每個參數請單獨一行輸入", + "env": "環境變數 (JSON)", + "advancedSettings": "進階設定", + "timeout": "逾時", + "seconds": "秒" + }, "settings": { "language": "語言", "apiKey": "API 金鑰", @@ -145,6 +200,20 @@ "models_modelCapabilities_chat": "聊天", "webSearch_title": "網頁搜尋(預覽)", "webSearch_toggle_label": "啟用網頁搜尋功能", - "webSearch_description": "啟用後,AI 可以搜尋網頁以提供更即時的信息。請注意,網頁搜尋目前僅支援 OpenAI 和 Gemini 模型。此外,啟用網頁搜尋時,將無法使用流式回應(文字逐步顯示)。" + "webSearch_description": "啟用後,AI 可以搜尋網頁以提供更即時的信息。請注意,網頁搜尋目前僅支援 OpenAI 和 Gemini 模型。此外,啟用網頁搜尋時,將無法使用流式回應(文字逐步顯示)。", + + "general": "一般設定", + "startup": "啟動設定", + "startWithSystem": "隨系統啟動", + "startupToTray": "啟動時最小化到系統匣", + "trayOptions": "系統匣選項", + "closeToTray": "關閉時最小化到系統匣而非退出", + "networkProxy": "網路代理", + "systemProxy": "使用系統代理", + "customProxy": "自定義代理", + "noProxy": "不使用代理", + "privacy": "隱私設定", + "sendErrorReports": "發送匿名錯誤報告和使用統計", + "sendErrorReports_description": "通過發送匿名當機報告和使用數據來幫助改進應用程式。" } } \ No newline at end of file diff --git a/src/services/ai-service.ts b/src/services/ai-service.ts index 4538458..246f44d 100644 --- a/src/services/ai-service.ts +++ b/src/services/ai-service.ts @@ -3,6 +3,8 @@ import { ProviderFactory } from './providers/provider-factory'; import { Message } from '../types/chat'; import { StreamControlHandler } from './streaming-control'; import { SETTINGS_CHANGE_EVENT, SettingsService } from './settings-service'; +import { MCPService } from './mcp-service'; +import { AIServiceCapability } from '../types/capabilities'; export interface ModelOption { id: string; @@ -34,7 +36,7 @@ export class AIService { private state: AIState = { status: 'idle', error: null, - isCachingModels: false + isCachingModels: false, }; private listeners: Set<() => void> = new Set(); private modelCache: Map = new Map(); @@ -67,11 +69,17 @@ export class AIService { for (const providerID of Object.keys(settings.providers)) { const providerSettings = settings.providers[providerID]; - if(this.providers.has(providerID)) { + if (this.providers.has(providerID)) { this.providers.delete(providerID); - this.providers.set(providerID, ProviderFactory.getNewProvider(providerID)); - } - else if (providerSettings && providerSettings.apiKey && providerSettings.apiKey.length > 0) { + this.providers.set( + providerID, + ProviderFactory.getNewProvider(providerID) + ); + } else if ( + providerSettings && + providerSettings.apiKey && + providerSettings.apiKey.length > 0 + ) { const providerInstance = ProviderFactory.getNewProvider(providerID); if (providerInstance) { this.providers.set(providerID, providerInstance); @@ -89,7 +97,7 @@ export class AIService { // Refresh models when settings change this.refreshModels(); }; - + window.addEventListener(SETTINGS_CHANGE_EVENT, handleSettingsChange); } @@ -107,7 +115,7 @@ export class AIService { * Notify all listeners of state changes */ private notifyListeners(): void { - this.listeners.forEach(listener => listener()); + this.listeners.forEach((listener) => listener()); } /** @@ -124,7 +132,7 @@ export class AIService { private handleSuccess(): void { this.setState({ status: 'success', - error: null + error: null, }); } @@ -135,7 +143,7 @@ export class AIService { console.error('AI request error:', error); this.setState({ status: 'error', - error + error, }); } @@ -153,14 +161,14 @@ export class AIService { if (this.providers.has(name)) { return this.providers.get(name); } - + // If provider not in cache, try to create it const provider = ProviderFactory.getNewProvider(name); if (provider) { this.providers.set(name, provider); return provider; } - + return undefined; } @@ -171,11 +179,26 @@ export class AIService { return Array.from(this.providers.values()); } + /** + * Get all providers that support image generation + */ + public getImageGenerationProviders(): AiServiceProvider[] { + const providers = this.getAllProviders(); + return providers.filter((provider) => { + // Check if the provider has any models with image generation capability + const models = provider.availableModels || []; + return models.some((model) => { + const capabilities = provider.getModelCapabilities(model.modelId); + return capabilities.includes(AIServiceCapability.ImageGeneration); + }); + }); + } + /** * Get a streaming chat completion from the AI */ public async getChatCompletion( - messages: Message[], + messages: Message[], options: CompletionOptions, streamController: StreamControlHandler ): Promise { @@ -184,18 +207,25 @@ export class AIService { const providerName = options.provider; const modelName = options.model; const useStreaming = options.stream; - + // Get provider instance const provider = this.getProvider(providerName); - console.log('Provider: ', providerName, ' Model: ', modelName, ' Use streaming: ', useStreaming); - + console.log( + 'Provider: ', + providerName, + ' Model: ', + modelName, + ' Use streaming: ', + useStreaming + ); + if (!provider) { throw new Error(`Provider ${providerName} not available`); } - + const result = await provider.getChatCompletion( - messages, + messages, { model: modelName, provider: providerName, @@ -207,10 +237,10 @@ export class AIService { user: options?.user, stream: useStreaming, signal: streamController.getAbortSignal(), + tools: options?.tools, }, streamController ); - return result; } catch (e) { @@ -219,8 +249,11 @@ export class AIService { this.handleSuccess(); return null; } - - const error = e instanceof Error ? e : new Error('Unknown error during streaming chat completion'); + + const error = + e instanceof Error + ? e + : new Error('Unknown error during streaming chat completion'); this.handleError(error); return null; } @@ -242,20 +275,20 @@ export class AIService { throw new Error('Not implemented'); // this.startRequest(); - + // try { // const provider = this.getImageGenerationProvider(); - + // if (!provider) { // throw new Error('No image generation provider available'); // } - + // if (!provider.generateImage) { // throw new Error(`Provider ${provider.name} does not support image generation`); // } - + // const result = await provider.generateImage(prompt, options); - + // this.handleSuccess(); // return result; // } catch (e) { @@ -315,62 +348,67 @@ export class AIService { const cacheKey = 'all_providers'; const cachedTime = this.lastFetchTime.get(cacheKey) || 0; const now = Date.now(); - + // Return cached models if they're still valid if (this.modelCache.has(cacheKey) && now - cachedTime < this.CACHE_TTL) { return this.modelCache.get(cacheKey) || []; } - + // Otherwise, collect models from all providers const allModels: ModelOption[] = []; const providerPromises = []; - + for (const provider of this.getAllProviders()) { providerPromises.push(this.getModelsForProvider(provider.id)); } - + const results = await Promise.all(providerPromises); - + // Flatten results and filter out duplicates - results.forEach(models => { + results.forEach((models) => { allModels.push(...models); }); - + // Cache and return results this.modelCache.set(cacheKey, allModels); this.lastFetchTime.set(cacheKey, now); - + return allModels; } /** * Get models for a specific provider */ - public async getModelsForProvider(providerName: string): Promise { + public async getModelsForProvider( + providerName: string + ): Promise { // Check if we already have a cached result const cachedTime = this.lastFetchTime.get(providerName) || 0; const now = Date.now(); - + // Return cached models if they're still valid - if (this.modelCache.has(providerName) && now - cachedTime < this.CACHE_TTL) { + if ( + this.modelCache.has(providerName) && + now - cachedTime < this.CACHE_TTL + ) { return this.modelCache.get(providerName) || []; } - + // Get provider instance const provider = this.getProvider(providerName); if (!provider) { console.warn(`Provider ${providerName} not available`); return []; } - + this.setState({ isCachingModels: true }); - + try { // Fetch models from provider const models = await provider.fetchAvailableModels(); - + // Convert to ModelOption format - const modelOptions: ModelOption[] = models.map(model => ({ + const modelOptions: ModelOption[] = models.map((model) => ({ id: model.modelId, name: model.modelName, provider: providerName, @@ -379,7 +417,7 @@ export class AIService { // Cache results this.modelCache.set(providerName, modelOptions); this.lastFetchTime.set(providerName, now); - + this.setState({ isCachingModels: false }); return modelOptions; } catch (error) { @@ -396,10 +434,17 @@ export class AIService { // Clear cache this.modelCache.clear(); this.lastFetchTime.clear(); - + this.refreshProviders(); // Re-fetch all models await this.getCachedAllModels(); } + + /** + * Get all available MCP servers + */ + public getMCPServers(): Record { + return MCPService.getInstance().getMCPServers(); + } } \ No newline at end of file diff --git a/src/services/chat-service.ts b/src/services/chat-service.ts index 92a1627..fcb24bc 100644 --- a/src/services/chat-service.ts +++ b/src/services/chat-service.ts @@ -6,6 +6,8 @@ import { StreamControlHandler } from './streaming-control'; import { MessageHelper } from './message-helper'; import { AIServiceCapability } from '../types/capabilities'; import { FileUploadService } from './file-upload-service'; +import { MCPService } from './mcp-service'; +import { MCPServerSettings } from '../types/settings'; /** * Service for managing chat conversations @@ -131,12 +133,14 @@ export class ChatService { /** * Send a message in the active conversation with streaming support + * Optional files parameter allows sending messages with file attachments */ public async sendMessage( content: string, conversationId: string, isStreaming: boolean, - conversationUpdate: (conversations: Conversation[]) => void + conversationUpdate: (conversations: Conversation[]) => void, + files?: File[] ): Promise { if (!this.dbService) { throw new Error('Database service not initialized'); @@ -159,7 +163,9 @@ export class ChatService { //#region Save user message to database and update title // eslint-disable-next-line prefer-const - let {conversation: updatedConversation, message: userMessage} = await MessageHelper.addUserMessageToConversation(content, currentConversation); + let {conversation: updatedConversation, message: userMessage} = files && files.length > 0 + ? await MessageHelper.addUserMessageWithFilesToConversation(content, await FileUploadService.getInstance().processUploadedFiles(files), currentConversation) + : await MessageHelper.addUserMessageToConversation(content, currentConversation); // Update in memory this.conversations = this.conversations.map(c => @@ -261,134 +267,6 @@ export class ChatService { } } - /** - * Send a message with files in the active conversation - */ - public async sendMessageWithFiles( - content: string, - files: File[], - conversationId: string, - isStreaming: boolean, - conversationUpdate: (conversations: Conversation[]) => void - ): Promise { - if (!this.dbService) { - throw new Error('Database service not initialized'); - } - - const currentConversation = this.conversations.find(c => c.conversationId === conversationId); - if (currentConversation === undefined) { - throw new Error('Active conversation not found'); - } - - try { - const settingsService = SettingsService.getInstance(); - const provider = settingsService.getSelectedProvider(); - const model = settingsService.getSelectedModel(); - - // Process uploaded files - const fileUploadService = FileUploadService.getInstance(); - const fileContents = await fileUploadService.processUploadedFiles(files); - - //#region Save user message with files to database and update title - // eslint-disable-next-line prefer-const - let {conversation: updatedConversation, message: userMessage} = await MessageHelper.addUserMessageWithFilesToConversation( - content, - fileContents, - currentConversation - ); - - // Update in memory - this.conversations = this.conversations.map(c => - c.conversationId === conversationId ? updatedConversation : c - ); - - conversationUpdate(this.conversations); - //#endregion - - // Map messages to messages array - const messages = MessageHelper.mapMessagesTreeToList(updatedConversation, false); - - //#region Streaming Special Message Handling - // Create a placeholder for the streaming message - const placeholderMessage: Message = MessageHelper.getPlaceholderMessage(model, provider, conversationId); - - userMessage.childrenMessageIds.push(placeholderMessage.messageId); - userMessage.preferIndex = userMessage.childrenMessageIds.length - 1; - - // Add placeholder to conversation and update UI - const messagesWithPlaceholder = new Map(updatedConversation.messages); - messagesWithPlaceholder.set(placeholderMessage.messageId, placeholderMessage); - - updatedConversation = { - ...updatedConversation, - messages: messagesWithPlaceholder, - updatedAt: new Date() - }; - - this.conversations = this.conversations.map(c => - c.conversationId === conversationId ? updatedConversation : c - ); - - conversationUpdate(this.conversations); - //#endregion - - //#region Send Chat Message to AI with streaming - - // Create a new abort controller for this request - const streamController = new StreamControlHandler( - updatedConversation, - placeholderMessage, - // ---- On chunk callback ---- - (updated: Conversation) => { - this.conversations = this.conversations.map(c => - c.conversationId === conversationId ? updated : c - ); - conversationUpdate(this.conversations); - }, - // ---- On finish callback ---- - async (aiResponse: Message | null) => { - - console.log(aiResponse); - - if (aiResponse === null) return; - - const finalConversation = await MessageHelper.insertAssistantMessageToConversation(userMessage, aiResponse, updatedConversation); - - // Update in memory - this.conversations = this.conversations.map(c => - c.conversationId === conversationId ? finalConversation : c - ); - - conversationUpdate(this.conversations); - - this.streamControllerMap.delete(conversationId); - } - ); - - this.streamControllerMap.set(conversationId, streamController); - - console.log('Messages:', messages); - - // Send Chat Message to AI with streaming - await this.aiService.getChatCompletion( - messages, - { - model: model, - provider: provider, - stream: isStreaming - }, - streamController - ); - - conversationUpdate(this.conversations); - - //#endregion - } catch (error) { - console.error('Failed to send message with files:', error); - throw error; - } - } - /** * Edit a message in a conversation */ @@ -895,4 +773,27 @@ export class ChatService { public getFolders(): ConversationFolder[] { return [...this.folders]; } + + /** + * Get available MCP servers + */ + public getAvailableMCPServers(): Record { + return MCPService.getInstance().getMCPServers(); + } + + /** + * Check if a specific model supports MCP tools + */ + public doesModelSupportMCPTools(provider: string, model: string): boolean { + const providerService = this.aiService.getProvider(provider); + + if (!providerService) { + return false; + } + + const capabilities = providerService.getModelCapabilities(model); + return capabilities.includes(AIServiceCapability.ToolUsage) || + capabilities.includes(AIServiceCapability.FunctionCalling) || + capabilities.includes(AIServiceCapability.MCPServer); + } } diff --git a/src/services/core/ai-service-provider.ts b/src/services/core/ai-service-provider.ts index b0b0e3f..d8685ed 100644 --- a/src/services/core/ai-service-provider.ts +++ b/src/services/core/ai-service-provider.ts @@ -25,6 +25,8 @@ export interface CompletionOptions { user?: string; stream?: boolean; // Whether to stream the response signal?: AbortSignal; // AbortSignal for cancellation + tools?: Record; // Pre-configured tools for the AI + toolChoice?: Record; // Pre-configured tool Choice for the AI } /** @@ -49,7 +51,7 @@ export interface AiServiceProvider { /** * Get the capabilities of a model with this provider */ - getModelCapabilities(model: string): AIServiceCapability[]; + getModelCapabilities(modelId: string): AIServiceCapability[]; /** * Fetch available models from the provider API @@ -77,7 +79,7 @@ export interface AiServiceProvider { getChatCompletion( messages: Message[], options: CompletionOptions, - streamController: StreamControlHandler + streamController: StreamControlHandler, ): Promise; /** diff --git a/src/services/database-integration.ts b/src/services/database-integration.ts index e358f3b..92263db 100644 --- a/src/services/database-integration.ts +++ b/src/services/database-integration.ts @@ -3,6 +3,7 @@ import { SettingsService } from './settings-service'; import { Conversation, Message, ConversationFolder, MessageContent, MessageContentType, FileJsonData } from '../types/chat'; import { v4 as uuidv4 } from 'uuid'; import { FileData } from '../types/file'; +import { ImageGenerationResult } from '../types/image'; const SYSTEM_MESSAGE_CONTENT: MessageContent[] = [ { @@ -350,6 +351,28 @@ export class DatabaseIntegrationService { }; } + public async saveImageGenerationResult(imageResult: ImageGenerationResult){ + try { + const resultID = await this.dbService.saveImageGenerationResult(imageResult); + return resultID; + } catch (error) { + console.error('Error getting files:', error); + return null; + } + } + + /** + * Get all image generation results from the database + */ + public async getImageGenerationResults(): Promise { + try { + return await this.dbService.getImageGenerationResults(); + } catch (error) { + console.error('Error getting image generation results:', error); + return []; + } + } + public async saveFile(fileData: FileJsonData, arrayBuffer: ArrayBuffer): Promise { const fileId = uuidv4(); const file: FileData = { diff --git a/src/services/database.ts b/src/services/database.ts index 9e43ea9..92207d9 100644 --- a/src/services/database.ts +++ b/src/services/database.ts @@ -3,12 +3,13 @@ import { Conversation, Message, ConversationFolder } from '../types/chat'; import { v4 as uuidv4 } from 'uuid'; import { UserSettings } from '../types/settings'; import { FileData } from '../types/file'; +import { ImageGenerationResult } from '../types/image'; // database.ts export class DatabaseService { private db: IDBDatabase | null = null; private readonly DB_NAME = 'tensorblock_db'; - private readonly DB_VERSION = 3; // Increase version to trigger upgrade + private readonly DB_VERSION = 4; // Increase version to trigger upgrade private readonly ENCRYPTION_KEY = 'your-secure-encryption-key'; // In production, use a secure key management system private isInitialized: boolean = false; @@ -70,6 +71,12 @@ export class DatabaseService { chatStore.createIndex('messageId', 'messageId'); } + // Create image generation results store + if (!db.objectStoreNames.contains('imageGenerationResults')) { + db.createObjectStore('imageGenerationResults', { + keyPath: 'imageResultId' + }); + } // Create API settings store if (!db.objectStoreNames.contains('apiSettings')) { db.createObjectStore('apiSettings', { @@ -547,6 +554,35 @@ export class DatabaseService { }); } + public async saveImageGenerationResult(imageGenerationResult: ImageGenerationResult): Promise { + return new Promise((resolve, reject) => { + if (!this.db) throw new Error('Database not initialized'); + + const transaction = this.db.transaction('imageGenerationResults', 'readwrite'); + const store = transaction.objectStore('imageGenerationResults'); + const request = store.add(imageGenerationResult); + + request.onsuccess = () => resolve(request.result as string); + request.onerror = () => reject(request.error); + }); + } + + /** + * Get all image generation results from the database + */ + public async getImageGenerationResults(): Promise { + return new Promise((resolve, reject) => { + if (!this.db) throw new Error('Database not initialized'); + + const transaction = this.db.transaction('imageGenerationResults', 'readonly'); + const store = transaction.objectStore('imageGenerationResults'); + const request = store.getAll(); + + request.onsuccess = () => resolve(request.result); + request.onerror = () => reject(request.error); + }); + } + /** * Get all files from the database * @returns List of files diff --git a/src/services/image-generation-handler.ts b/src/services/image-generation-handler.ts new file mode 100644 index 0000000..397c443 --- /dev/null +++ b/src/services/image-generation-handler.ts @@ -0,0 +1,182 @@ +import { v4 as uuidv4 } from 'uuid'; +import { ImageGenerationResult } from '../types/image'; +import { MessageContent, MessageContentType } from '../types/chat'; +import { DatabaseIntegrationService } from './database-integration'; + +export enum ImageGenerationStatus { + PENDING = 'pending', + GENERATING = 'generating', + SUCCESS = 'success', + FAILED = 'failed' +} + +export interface ImageGenerationOptions { + prompt: string; + negativePrompt?: string; + seed: string; + aspectRatio: string; + provider: string; + providerName: string; + model: string; + number: number; +} + +export class ImageGenerationHandler { + private imageResultId: string; + private status: ImageGenerationStatus; + private options: ImageGenerationOptions; + private images: MessageContent[] = []; + private errorMessage: string | null = null; + private onStatusChangeCallback: (handler: ImageGenerationHandler) => void; + private dbService: DatabaseIntegrationService; + private updatedAt: Date; + + constructor( + options: ImageGenerationOptions, + onStatusChangeCallback: (handler: ImageGenerationHandler) => void + ) { + this.imageResultId = uuidv4(); + this.options = options; + this.status = ImageGenerationStatus.PENDING; + this.onStatusChangeCallback = onStatusChangeCallback; + this.dbService = DatabaseIntegrationService.getInstance(); + this.updatedAt = new Date(); + } + + public getId(): string { + return this.imageResultId; + } + + public getStatus(): ImageGenerationStatus { + return this.status; + } + + public getOptions(): ImageGenerationOptions { + return this.options; + } + + public getImages(): MessageContent[] { + return this.images; + } + + public getError(): string | null { + return this.errorMessage; + } + + public getResult(): ImageGenerationResult { + return { + imageResultId: this.imageResultId, + prompt: this.options.prompt, + negativePrompt: this.options.negativePrompt || '', + seed: this.options.seed, + number: this.options.number, + status: this.status, + aspectRatio: this.options.aspectRatio, + provider: this.options.provider, + providerName: this.options.providerName, + model: this.options.model, + images: this.images, + updatedAt: this.updatedAt + }; + } + + public setGenerating(): void { + this.status = ImageGenerationStatus.GENERATING; + this.updatedAt = new Date(); + this.notifyStatusChange(); + } + + public async setSuccess(imageUrls: string[]): Promise { + this.status = ImageGenerationStatus.SUCCESS; + this.updatedAt = new Date(); + this.images = imageUrls.map(url => ({ + type: MessageContentType.Image, + content: url, + dataJson: '' + })); + + // Save to database + await this.saveToDatabase(); + this.notifyStatusChange(); + } + + public setFailed(error: Error): void { + this.status = ImageGenerationStatus.FAILED; + this.updatedAt = new Date(); + this.errorMessage = error.message; + this.notifyStatusChange(); + } + + private notifyStatusChange(): void { + if (this.onStatusChangeCallback) { + this.onStatusChangeCallback(this); + } + } + + private async saveToDatabase(): Promise { + try { + await this.dbService.saveImageGenerationResult(this.getResult()); + } catch (error) { + console.error('Failed to save image generation result to database:', error); + } + } +} + +export class ImageGenerationManager { + private static instance: ImageGenerationManager; + private handlers: Map = new Map(); + private onUpdateCallback: (handlers: Map) => void = () => {}; + + private constructor() {} + + public static getInstance(): ImageGenerationManager { + if (!ImageGenerationManager.instance) { + ImageGenerationManager.instance = new ImageGenerationManager(); + } + return ImageGenerationManager.instance; + } + + public createHandler( + options: ImageGenerationOptions + ): ImageGenerationHandler { + const handler = new ImageGenerationHandler( + options, + (updatedHandler) => this.onHandlerUpdate(updatedHandler) + ); + + this.handlers.set(handler.getId(), handler); + this.notifyUpdate(); + + return handler; + } + + public getHandler(id: string): ImageGenerationHandler | undefined { + return this.handlers.get(id); + } + + public removeHandler(id: string): void { + if (this.handlers.has(id)) { + this.handlers.delete(id); + this.notifyUpdate(); + } + } + + public getAllHandlers(): Map { + return this.handlers; + } + + public setUpdateCallback(callback: (handlers: Map) => void): void { + this.onUpdateCallback = callback; + } + + private onHandlerUpdate(handler: ImageGenerationHandler): void { + this.handlers.set(handler.getId(), handler); + this.notifyUpdate(); + } + + private notifyUpdate(): void { + if (this.onUpdateCallback) { + this.onUpdateCallback(this.handlers); + } + } +} \ No newline at end of file diff --git a/src/services/mcp-service.ts b/src/services/mcp-service.ts new file mode 100644 index 0000000..9e96b2e --- /dev/null +++ b/src/services/mcp-service.ts @@ -0,0 +1,131 @@ +import { MCPServerSettings } from "../types/settings"; +import { SettingsService } from "./settings-service"; +import { v4 as uuidv4 } from 'uuid'; +import { AIService } from "./ai-service"; +import { OPENAI_PROVIDER_NAME } from "./providers/openai-service"; + +/** + * Service for managing MCP servers + */ +export class MCPService { + private static instance: MCPService; + private settingsService: SettingsService; + private aiService: AIService; + + private constructor() { + this.settingsService = SettingsService.getInstance(); + this.aiService = AIService.getInstance(); + } + + /** + * Get the singleton instance + */ + public static getInstance(): MCPService { + if (!MCPService.instance) { + MCPService.instance = new MCPService(); + } + return MCPService.instance; + } + + /** + * Get all MCP servers + */ + public getMCPServers(): Record { + return this.settingsService.getMCPServers(); + } + + /** + * Get a specific MCP server by ID + */ + public getMCPServer(id: string): MCPServerSettings | undefined { + return this.settingsService.getMCPServer(id); + } + + /** + * Create a new MCP server + * @param params - Server parameters depending on the type + */ + public async createMCPServer(params: { + name: string; + type: 'sse' | 'stdio' | 'streamableHttp'; + description?: string; + url?: string; + headers?: Record; + command?: string; + args?: string[]; + env?: Record; + timeout?: number; + }): Promise { + const id = uuidv4(); + + const server: MCPServerSettings = { + id, + name: params.name, + type: params.type, + description: params.description, + isDefault: false + }; + + // Add type-specific parameters + if (params.type === 'sse' || params.type === 'streamableHttp') { + server.url = params.url; + server.headers = params.headers; + } else if (params.type === 'stdio') { + server.command = params.command; + server.args = params.args; + server.env = params.env; + } + + // Add timeout parameter (common to all types) + if (params.timeout) { + server.timeout = params.timeout; + } + + await this.settingsService.addOrUpdateMCPServer(server); + return server; + } + + /** + * Update an MCP server + */ + public async updateMCPServer(server: MCPServerSettings): Promise { + await this.settingsService.addOrUpdateMCPServer(server); + } + + /** + * Delete an MCP server + */ + public async deleteMCPServer(id: string): Promise { + await this.settingsService.deleteMCPServer(id); + } + + /** + * Handle image generation with the DALL-E model + * This is a special handler for the default image generation MCP server + */ + public async handleImageGeneration(prompt: string, size?: string, style?: 'vivid' | 'natural'): Promise[]> { + const openaiService = this.aiService.getProvider(OPENAI_PROVIDER_NAME); + + if (!openaiService) { + throw new Error("OpenAI service not available"); + } + + // Map size to OpenAI dimensions + const sizeMap: Record = { + "1:1": "1024x1024", + "1:2": "512x1024", + "3:2": "1024x768", + "3:4": "768x1024", + "16:9": "1792x1024", + "9:16": "1024x1792" + }; + + // Generate the image + const images = await openaiService.getImageGeneration(prompt, { + size: sizeMap[size || '1:1'] || "1024x1024", + style: style || "vivid" + }); + + return images; + } +} \ No newline at end of file diff --git a/src/services/mcp-tool-adapter.ts b/src/services/mcp-tool-adapter.ts new file mode 100644 index 0000000..a661777 --- /dev/null +++ b/src/services/mcp-tool-adapter.ts @@ -0,0 +1,198 @@ +import { MCPServerSettings } from "../types/settings"; +import { MCPService } from "./mcp-service"; +// eslint-disable-next-line @typescript-eslint/no-unused-vars +import { experimental_createMCPClient as createMCPClient, MCPTransport } from 'ai'; +import { StreamControlHandler } from "./streaming-control"; + +/** + * Interface for the MCP client + */ +interface MCPClient { + tools: () => Promise>; + close: () => Promise; +} + +/** + * Interface for tool parameters + */ +interface ToolParameter { + type: string; + description?: string; + properties?: Record; + items?: ToolParameter; + enum?: string[]; + required?: string[]; +} + +/** + * Interface for tool definition + */ +interface ToolDefinition { + description: string; + parameters: ToolParameter; + execute: (params: Record) => Promise; +} + +/** + * Service for adapting MCP servers to AI SDK tools + */ +export class MCPToolAdapter { + private static instance: MCPToolAdapter; + private mcpService: MCPService; + private activeClients: Map = new Map(); + + private constructor() { + this.mcpService = MCPService.getInstance(); + } + + /** + * Get the singleton instance + */ + public static getInstance(): MCPToolAdapter { + if (!MCPToolAdapter.instance) { + MCPToolAdapter.instance = new MCPToolAdapter(); + } + return MCPToolAdapter.instance; + } + + /** + * Get tools for a specific MCP server + */ + public async getToolsForServer(serverId: string, streamController: StreamControlHandler): Promise> { + const server = this.mcpService.getMCPServer(serverId); + + if (!server) { + throw new Error(`MCP server with ID ${serverId} not found`); + } + + // Check if it's the special image generation MCP server + if (server.isImageGeneration) { + return this.getImageGenerationTools(); + } + + // Create MCP client for the server + const client = await this.createMCPClient(server); + + // Keep track of the client for later closing + this.activeClients.set(serverId, client); + + // Set up clean up when streaming finishes + streamController.getAbortSignal().addEventListener('abort', () => { + this.closeClient(serverId); + }); + + // Get the tools from the client + const tools = await client.tools(); + + return tools; + } + + /** + * Create an MCP client for a server + */ + // eslint-disable-next-line @typescript-eslint/no-unused-vars + private async createMCPClient(server: MCPServerSettings): Promise { + // let transport: MCPTransport; + + // if (server.type === 'sse') { + // transport = { + // type: 'sse' as const, + // url: server.url, + // headers: server.headers + // }; + // } else if (server.type === 'streamableHttp') { + // // This would typically use the StreamableHTTPClientTransport + // // But we'll use the basic transport for now + // transport = { + // type: 'sse' as const, + // url: server.url, + // headers: server.headers + // }; + // } else { + // throw new Error(`MCP transport type ${server.type} not yet supported`); + // } + + // const client = await createMCPClient({ + // transport: transport + // }); + + return null as unknown as MCPClient; + } + + /** + * Close an MCP client + */ + public async closeClient(serverId: string): Promise { + const client = this.activeClients.get(serverId); + + if (client) { + await client.close(); + this.activeClients.delete(serverId); + } + } + + /** + * Get tools for the image generation service + */ + private getImageGenerationTools(): Record { + // Create a simple tool for image generation + return { + generate_image: { + description: 'Generate an image from a text prompt', + parameters: { + type: 'object', + properties: { + prompt: { + type: 'string', + description: 'The text prompt to generate an image from' + }, + size: { + type: 'string', + enum: ['1:1', '1:2', '3:2', '3:4', '16:9', '9:16'], + description: 'The aspect ratio of the image to generate, defaults to 1:1' + }, + style: { + type: 'string', + enum: ['vivid', 'natural'], + description: 'The style of the image, either "vivid" or "natural", defaults to "vivid"' + } + }, + required: ['prompt'] + }, + execute: async (params: Record) => { + try { + const images = await this.mcpService.handleImageGeneration(params.prompt as string, params.size as string, params.style as 'vivid' | 'natural'); + + // Return the first image + if (images && images.length > 0) { + return { + images: images + }; + } + + return { + error: 'No images generated' + }; + } catch (error) { + return { + error: error instanceof Error ? error.message : 'Unknown error' + }; + } + } + }, + help: { + description: 'Get help about the image generation service', + parameters: { + type: 'object', + properties: {}, + required: [] + }, + execute: async () => { + return { + message: 'This is an image generation service that uses OpenAI\'s DALL-E 3 model. To generate an image, call the generate_image tool with a prompt.' + }; + } + } + }; + } +} \ No newline at end of file diff --git a/src/services/providers/anthropic-service.ts b/src/services/providers/anthropic-service.ts index 911dabb..cee15ea 100644 --- a/src/services/providers/anthropic-service.ts +++ b/src/services/providers/anthropic-service.ts @@ -29,6 +29,8 @@ export class AnthropicService implements AiServiceProvider { this.settingsService = SettingsService.getInstance(); const providerSettings = this.settingsService.getProviderSettings(ANTHROPIC_PROVIDER_NAME); + this.apiModels = this.settingsService.getModels(ANTHROPIC_PROVIDER_NAME); + this._apiKey = providerSettings.apiKey || ''; this.anthropic = new Anthropic({ @@ -76,10 +78,18 @@ export class AnthropicService implements AiServiceProvider { /** * Get the capabilities of a model with this provider */ - // eslint-disable-next-line @typescript-eslint/no-unused-vars - getModelCapabilities(model: string): AIServiceCapability[] { + getModelCapabilities(modelId: string): AIServiceCapability[] { + // Get model data by modelId + const models = this.settingsService.getModels(this.name); + const modelData = models.find(x => x.modelId === modelId); + let hasImageGeneration = false; + + if(modelData?.modelCapabilities.findIndex(x => x === AIServiceCapability.ImageGeneration) !== -1){ + hasImageGeneration = true; + } + return mapModelCapabilities( - false, + hasImageGeneration, false, false, false, diff --git a/src/services/providers/common-provider-service.ts b/src/services/providers/common-provider-service.ts index 940f416..57903d0 100644 --- a/src/services/providers/common-provider-service.ts +++ b/src/services/providers/common-provider-service.ts @@ -1,13 +1,32 @@ -import { generateText, LanguageModelV1, LanguageModelUsage, Provider, streamText, ToolSet, ToolChoice } from 'ai'; -import { v4 as uuidv4 } from 'uuid'; +import { generateText, streamText, Provider, type LanguageModelUsage, type ToolSet, tool } from 'ai'; import { Message, MessageRole } from '../../types/chat'; import { AiServiceProvider, CompletionOptions } from '../core/ai-service-provider'; import { SettingsService } from '../settings-service'; import { StreamControlHandler } from '../streaming-control'; -import { AIServiceCapability } from '../../types/capabilities'; -import { mapModelCapabilities } from '../../types/capabilities'; -import { ModelSettings } from '../../types/settings'; +import { v4 as uuidv4 } from 'uuid'; import { MessageHelper } from '../message-helper'; +import { AIServiceCapability, mapModelCapabilities } from '../../types/capabilities'; +import { ModelSettings } from '../../types/settings'; +import { LanguageModelV1 } from 'ai'; +import { z } from 'zod'; + +// Define an interface for tool results to fix the 'never' type errors +interface ToolResult { + id: string; + name: string; + args: Record; + result?: unknown; +} + +// Define type for the execute function for tools +type ToolExecuteFunction = (args: Record) => Promise; + +// Define interface for tools with execute function +interface ToolWithExecute { + execute: ToolExecuteFunction; + [key: string]: unknown; +} + /** * Implementation of OpenAI service provider using the AI SDK */ @@ -36,6 +55,8 @@ export class CommonProviderHelper implements AiServiceProvider { this._apiKey = providerSettings.apiKey || ''; + this.apiModels = this.settingsService.getModels(providerName); + this.ProviderInstance = this.createClient(); } @@ -85,10 +106,18 @@ export class CommonProviderHelper implements AiServiceProvider { /** * Get the capabilities of a model with this provider */ - // eslint-disable-next-line @typescript-eslint/no-unused-vars - getModelCapabilities(model: string): AIServiceCapability[] { + getModelCapabilities(modelId: string): AIServiceCapability[] { + // Get model data by modelId + const models = this.settingsService.getModels(this.providerName); + const modelData = models.find(x => x.modelId === modelId); + let hasImageGeneration = false; + + if(modelData?.modelCapabilities.findIndex(x => x === AIServiceCapability.ImageGeneration) !== -1){ + hasImageGeneration = true; + } + return mapModelCapabilities( - false, + hasImageGeneration, false, false, false, @@ -141,19 +170,70 @@ export class CommonProviderHelper implements AiServiceProvider { modelInstance: LanguageModelV1, messages: Message[], options: CompletionOptions, - streamController: StreamControlHandler, - tools: ToolSet | undefined = undefined, - toolChoice: ToolChoice | undefined = undefined + streamController: StreamControlHandler ): Promise { try { const formattedMessages = await MessageHelper.MessagesContentToOpenAIFormat(messages); console.log('formattedMessages: ', formattedMessages); + // Build ToolSet & ToolChoice for getChatCompletionByModel API + const rawTools = options.tools; + + const err = new Error('test'); + console.log('rawTools: ', rawTools); + console.log('err track trace: ', err); + + // Convert raw tools to AI SDK format + const formattedTools: ToolSet = {}; + + if (rawTools && typeof rawTools === 'object') { + for (const [toolName, toolConfig] of Object.entries(rawTools)) { + if (toolConfig && typeof toolConfig === 'object') { + // Special case for image generation + if (toolName === 'generate_image') { + console.log('toolName: ', toolName); + console.log('toolConfig: ', toolConfig); + formattedTools[toolName] = tool({ + description: 'Generate an image from a text prompt', + parameters: z.object({ + prompt: z.string().describe('The text prompt to generate an image from'), + size: z.string().optional().describe('The size of the image to generate'), + style: z.enum(['vivid', 'natural']).optional().describe('The style of the image to generate') + }), + execute: async (args) => { + // Execute is handled later in the tool call handler + return (toolConfig as ToolWithExecute).execute(args); + } + }); + } else { + // For other tools, try to extract description and parameters + const toolWithExecute = toolConfig as ToolWithExecute; + const description = (toolConfig as {description?: string}).description || `Execute ${toolName} tool`; + + // Create a fallback schema if not provided + const parameters = z.object({}).catchall(z.unknown()); + + formattedTools[toolName] = tool({ + description, + parameters, + execute: async (args) => { + if (typeof toolWithExecute.execute === 'function') { + return toolWithExecute.execute(args); + } + throw new Error(`Tool ${toolName} does not have an execute function`); + } + }); + } + } + } + } + let fullText = ''; if (options.stream) { console.log(`Streaming ${options.provider}/${options.model} response`); + const result = streamText({ model: modelInstance, abortSignal: streamController.getAbortSignal(), @@ -163,8 +243,8 @@ export class CommonProviderHelper implements AiServiceProvider { topP: options.top_p, frequencyPenalty: options.frequency_penalty, presencePenalty: options.presence_penalty, - tools: tools, - toolChoice: toolChoice, + tools: Object.keys(formattedTools).length > 0 ? formattedTools : undefined, + toolCallStreaming: true, onFinish: (result: { usage: LanguageModelUsage }) => { console.log('OpenAI streaming chat completion finished'); streamController.onFinish(result.usage); @@ -175,13 +255,108 @@ export class CommonProviderHelper implements AiServiceProvider { } }); - for await (const textPart of result.textStream) { - fullText += textPart; - streamController.onChunk(fullText); + // Track tool calls that are in progress for building arguments + const toolCallsInProgress = new Map(); + + for await (const streamPart of result.fullStream) { + const type = streamPart.type; + switch(type) { + case 'tool-call-streaming-start': { + // Initialize a new tool call + const toolCallId = streamPart.toolCallId; + const toolName = streamPart.toolName; + + toolCallsInProgress.set(toolCallId, { + name: toolName, + argsText: '' + }); + + // Notify about tool call start with empty args for now + streamController.onToolCall(toolName, toolCallId, {}); + break; + } + + case 'tool-call-delta': { + // Add to the arguments being built + const toolCallId = streamPart.toolCallId; + const argsTextDelta = streamPart.argsTextDelta; + + const toolCall = toolCallsInProgress.get(toolCallId); + if (toolCall) { + toolCall.argsText += argsTextDelta; + toolCallsInProgress.set(toolCallId, toolCall); + } + break; + } + + case 'tool-call': { + // Complete tool call with full arguments + const toolCallId = streamPart.toolCallId; + const toolName = streamPart.toolName; + const args = streamPart.args; + + // Mark as in progress + streamController.onToolCallInProgress(toolCallId); + + // Check if this is an image generation tool + if (toolName === 'generate_image' && args.prompt) { + try { + // Handle image generation + const imageGenService = options.provider === 'openai' + ? modelInstance + : null; + + if (imageGenService) { + // Execute the tool call - this would typically happen through the tools execute function + // but for demonstration we're handling it here + const result = { images: ['generated_image_placeholder'] }; + streamController.onToolCallResult(toolCallId, result); + } else { + streamController.onToolCallError( + toolCallId, + new Error(`Image generation not supported for provider ${options.provider}`) + ); + } + } catch (error) { + streamController.onToolCallError( + toolCallId, + error instanceof Error ? error : new Error('Unknown error in image generation') + ); + } + } else if (rawTools) { + // Use a safer way to check for and execute tools + const toolsMap = rawTools as Record; + const tool = toolsMap[toolName] as ToolWithExecute | undefined; + + if (tool && typeof tool.execute === 'function') { + // Execute other tool calls if they have an execute function + try { + const result = await tool.execute(args); + streamController.onToolCallResult(toolCallId, result); + } catch (error) { + streamController.onToolCallError( + toolCallId, + error instanceof Error ? error : new Error(`Error executing tool ${toolName}`) + ); + } + } + } + + break; + } + + case 'text-delta': { + const textDelta = streamPart.textDelta; + fullText += textDelta; + streamController.onChunk(fullText); + break; + } + } } } - else{ + else { console.log(`Generating ${options.provider}/${options.model} response`); + const { text, usage, toolResults } = await generateText({ model: modelInstance, messages: formattedMessages, @@ -190,11 +365,38 @@ export class CommonProviderHelper implements AiServiceProvider { topP: options.top_p, frequencyPenalty: options.frequency_penalty, presencePenalty: options.presence_penalty, - tools: tools, - toolChoice: toolChoice, + tools: Object.keys(formattedTools).length > 0 ? formattedTools : undefined, + maxSteps: 3, // Allow multiple steps for tool calls }); console.log('toolResults: ', toolResults); + + // Process tool results + if (toolResults && toolResults.length > 0) { + const typedToolResults = toolResults as unknown as ToolResult[]; + + for (const toolResult of typedToolResults) { + // First notify about the tool call + streamController.onToolCall(toolResult.name, toolResult.id, toolResult.args); + + // Then mark as in progress + streamController.onToolCallInProgress(toolResult.id); + + // Then provide the result + if (toolResult.name === 'generate_image' && + typeof toolResult.result === 'object' && + toolResult.result !== null && + 'images' in toolResult.result) { + const resultWithImages = toolResult.result as {images: string[]}; + const images = resultWithImages.images; + if (Array.isArray(images)) { + streamController.onToolCallResult(toolResult.id, { images }); + } + } else { + streamController.onToolCallResult(toolResult.id, toolResult.result); + } + } + } fullText = text; streamController.onChunk(fullText); @@ -240,29 +442,6 @@ export class CommonProviderHelper implements AiServiceProvider { ): Promise { throw new Error('Not implemented'); - // if (!this.hasValidApiKey()) { - // throw new Error('No API key provided for OpenAI'); - // } - - // try { - // const response = await generateImage({ - // model: 'dall-e-3', - // prompt, - // n: 1, - // size: options.size || '1024x1024', - // style: options.style || 'vivid', - // }); - - // if (!response.ok) { - // const errorData = await response.json(); - // throw new Error(`Image generation failed: ${errorData.error?.message || response.statusText}`); - // } - - // const data = await response.json(); - // return data.data.map((item: any) => item.url); - // } catch (error) { - // console.error('OpenAI image generation error:', error); - // throw new Error(`OpenAI image generation failed: ${error instanceof Error ? error.message : String(error)}`); - // } + // If you want to implement this later, the code would go here } } \ No newline at end of file diff --git a/src/services/providers/custom-service.ts b/src/services/providers/custom-service.ts index a7569b7..484f57b 100644 --- a/src/services/providers/custom-service.ts +++ b/src/services/providers/custom-service.ts @@ -13,6 +13,7 @@ import { CommonProviderHelper } from './common-provider-service'; */ export class CustomService implements AiServiceProvider { + private settingsService: SettingsService; private openAIProvider: OpenAIProvider; private apiModels: ModelSettings[] = []; @@ -29,14 +30,16 @@ export class CustomService implements AiServiceProvider { constructor(providerID: string) { this.providerID = providerID; - const settingsService = SettingsService.getInstance(); - const providerSettings = settingsService.getProviderSettings(this.providerID); + this.settingsService = SettingsService.getInstance(); + const providerSettings = this.settingsService.getProviderSettings(this.providerID); const apiKey = providerSettings.apiKey; const baseURL = providerSettings.baseUrl; this.baseURL = `${baseURL}/${this.apiVersion}`; this.apiKey = apiKey; + this.apiModels = this.settingsService.getModels(this.providerID); + this.openAIProvider = createOpenAI({ apiKey: this.apiKey, compatibility: 'compatible', @@ -49,11 +52,7 @@ export class CustomService implements AiServiceProvider { * Get the name of the service provider */ get name(): string { - const settingsService = SettingsService.getInstance(); - const providerSettings = settingsService.getProviderSettings(this.providerID); - const error = new Error('Custom provider settings: ' + JSON.stringify(providerSettings)); - console.log(error); - console.log('Provider Name: ', providerSettings.providerName); + const providerSettings = this.settingsService.getProviderSettings(this.providerID); return providerSettings.providerName; } @@ -75,8 +74,7 @@ export class CustomService implements AiServiceProvider { * Fetch the list of available models from Forge */ public async fetchAvailableModels(): Promise { - const settingsService = SettingsService.getInstance(); - const models = settingsService.getModels(this.providerID); + const models = this.settingsService.getModels(this.providerID); this.apiModels = models; @@ -87,9 +85,17 @@ export class CustomService implements AiServiceProvider { * Get the capabilities of a model with this provider */ // eslint-disable-next-line @typescript-eslint/no-unused-vars - getModelCapabilities(model: string): AIServiceCapability[] { + getModelCapabilities(modelId: string): AIServiceCapability[] { + // Get model data by modelId + const modelData = this.apiModels.find(x => x.modelId === modelId); + let hasImageGeneration = false; + + if(modelData?.modelCapabilities.findIndex(x => x === AIServiceCapability.ImageGeneration) !== -1){ + hasImageGeneration = true; + } + return mapModelCapabilities( - false, + hasImageGeneration, false, false, false, @@ -143,13 +149,29 @@ export class CustomService implements AiServiceProvider { */ public async getImageGeneration( prompt: string, - // eslint-disable-next-line @typescript-eslint/no-unused-vars options: { size?: `${number}x${number}`; + aspectRatio?: `${number}:${number}`; style?: string; quality?: string; - } = {} - ): Promise { - throw new Error('Not implemented'); + } + ): Promise[]> { + + const imageModel = this.openAIProvider.imageModel('dall-e-3'); + + const result = await imageModel.doGenerate({ + prompt: prompt, + n: 1, + size: options.size || '1024x1024', + aspectRatio: options.aspectRatio || '1:1', + seed: 42, + providerOptions: { + "openai": { + "style": options.style || 'vivid' + } + } + }); + + return result.images; } } \ No newline at end of file diff --git a/src/services/providers/fireworks-service.ts b/src/services/providers/fireworks-service.ts index 46740f6..28d53d5 100644 --- a/src/services/providers/fireworks-service.ts +++ b/src/services/providers/fireworks-service.ts @@ -15,6 +15,7 @@ export const FIREWORKS_PROVIDER_NAME = 'Fireworks.ai'; */ export class FireworksService implements AiServiceProvider { + private settingsService: SettingsService; private commonProviderHelper: CommonProviderHelper; private apiModels: ModelSettings[] = []; @@ -22,6 +23,8 @@ export class FireworksService implements AiServiceProvider { * Create a new OpenAI service provider */ constructor() { + this.settingsService = SettingsService.getInstance(); + this.apiModels = this.settingsService.getModels(FIREWORKS_PROVIDER_NAME); this.commonProviderHelper = new CommonProviderHelper(FIREWORKS_PROVIDER_NAME, this.createClient); } @@ -55,8 +58,7 @@ export class FireworksService implements AiServiceProvider { * Fetch the list of available models from OpenAI */ public async fetchAvailableModels(): Promise { - const settingsService = SettingsService.getInstance(); - const models = settingsService.getModels(FIREWORKS_PROVIDER_NAME); + const models = this.settingsService.getModels(FIREWORKS_PROVIDER_NAME); this.apiModels = models; @@ -66,10 +68,18 @@ export class FireworksService implements AiServiceProvider { /** * Get the capabilities of a model with this provider */ - // eslint-disable-next-line @typescript-eslint/no-unused-vars - getModelCapabilities(model: string): AIServiceCapability[] { + getModelCapabilities(modelId: string): AIServiceCapability[] { + // Get model data by modelId + const models = this.settingsService.getModels(this.name); + const modelData = models.find(x => x.modelId === modelId); + let hasImageGeneration = false; + + if(modelData?.modelCapabilities.findIndex(x => x === AIServiceCapability.ImageGeneration) !== -1){ + hasImageGeneration = true; + } + return mapModelCapabilities( - false, + hasImageGeneration, false, false, false, diff --git a/src/services/providers/forge-service.ts b/src/services/providers/forge-service.ts index bb2cbe4..48bbfad 100644 --- a/src/services/providers/forge-service.ts +++ b/src/services/providers/forge-service.ts @@ -17,11 +17,14 @@ export class ForgeService implements AiServiceProvider { private commonProviderHelper: CommonProviderHelper; private apiModels: ModelSettings[] = []; + private settingsService: SettingsService; /** * Create a new Forge service provider */ constructor() { + this.settingsService = SettingsService.getInstance(); + this.apiModels = this.settingsService.getModels(FORGE_PROVIDER_NAME); this.commonProviderHelper = new CommonProviderHelper(FORGE_PROVIDER_NAME, this.createClient); } @@ -59,8 +62,7 @@ export class ForgeService implements AiServiceProvider { * Fetch the list of available models from Forge */ public async fetchAvailableModels(): Promise { - const settingsService = SettingsService.getInstance(); - const models = settingsService.getModels(FORGE_PROVIDER_NAME); + const models = this.settingsService.getModels(FORGE_PROVIDER_NAME); this.apiModels = models; @@ -70,10 +72,18 @@ export class ForgeService implements AiServiceProvider { /** * Get the capabilities of a model with this provider */ - // eslint-disable-next-line @typescript-eslint/no-unused-vars - getModelCapabilities(model: string): AIServiceCapability[] { + getModelCapabilities(modelId: string): AIServiceCapability[] { + // Get model data by modelId + const models = this.settingsService.getModels(this.name); + const modelData = models.find(x => x.modelId === modelId); + let hasImageGeneration = false; + + if(modelData?.modelCapabilities.findIndex(x => x === AIServiceCapability.ImageGeneration) !== -1){ + hasImageGeneration = true; + } + return mapModelCapabilities( - false, + hasImageGeneration, false, false, false, @@ -118,13 +128,29 @@ export class ForgeService implements AiServiceProvider { */ public async getImageGeneration( prompt: string, - // eslint-disable-next-line @typescript-eslint/no-unused-vars options: { size?: `${number}x${number}`; + aspectRatio?: `${number}:${number}`; style?: string; quality?: string; - } = {} - ): Promise { - throw new Error('Not implemented'); + } + ): Promise[]> { + + const imageModel = this.commonProviderHelper.ProviderInstance.imageModel('dall-e-3'); + + const result = await imageModel.doGenerate({ + prompt: prompt, + n: 1, + size: options.size || '1024x1024', + aspectRatio: options.aspectRatio || '1:1', + seed: 42, + providerOptions: { + "openai": { + "style": options.style || 'vivid' + } + } + }); + + return result.images; } } \ No newline at end of file diff --git a/src/services/providers/gemini-service.ts b/src/services/providers/gemini-service.ts index 7fe2050..9144076 100644 --- a/src/services/providers/gemini-service.ts +++ b/src/services/providers/gemini-service.ts @@ -26,6 +26,7 @@ export class GeminiService implements AiServiceProvider { */ constructor() { this.settingsService = SettingsService.getInstance(); + this.apiModels = this.settingsService.getModels(GEMINI_PROVIDER_NAME); const providerSettings = this.settingsService.getProviderSettings(GEMINI_PROVIDER_NAME); this._apiKey = providerSettings.apiKey || ''; @@ -60,8 +61,7 @@ export class GeminiService implements AiServiceProvider { * Fetch the list of available models from OpenAI */ public async fetchAvailableModels(): Promise { - const settingsService = SettingsService.getInstance(); - const models = settingsService.getModels(GEMINI_PROVIDER_NAME); + const models = this.settingsService.getModels(GEMINI_PROVIDER_NAME); this.apiModels = models; @@ -71,10 +71,18 @@ export class GeminiService implements AiServiceProvider { /** * Get the capabilities of a model with this provider */ - // eslint-disable-next-line @typescript-eslint/no-unused-vars - getModelCapabilities(model: string): AIServiceCapability[] { + getModelCapabilities(modelId: string): AIServiceCapability[] { + // Get model data by modelId + const models = this.settingsService.getModels(this.name); + const modelData = models.find(x => x.modelId === modelId); + let hasImageGeneration = false; + + if(modelData?.modelCapabilities.findIndex(x => x === AIServiceCapability.ImageGeneration) !== -1){ + hasImageGeneration = true; + } + return mapModelCapabilities( - false, + hasImageGeneration, false, false, false, diff --git a/src/services/providers/openai-service.ts b/src/services/providers/openai-service.ts index b5205d5..7a12b62 100644 --- a/src/services/providers/openai-service.ts +++ b/src/services/providers/openai-service.ts @@ -17,11 +17,14 @@ export class OpenAIService implements AiServiceProvider { private commonProviderHelper: CommonProviderHelper; private apiModels: ModelSettings[] = []; + private settingsService: SettingsService; /** * Create a new OpenAI service provider */ constructor() { + this.settingsService = SettingsService.getInstance(); + this.apiModels = this.settingsService.getModels(OPENAI_PROVIDER_NAME); this.commonProviderHelper = new CommonProviderHelper(OPENAI_PROVIDER_NAME, this.createClient); } @@ -58,8 +61,7 @@ export class OpenAIService implements AiServiceProvider { * Fetch the list of available models from OpenAI */ public async fetchAvailableModels(): Promise { - const settingsService = SettingsService.getInstance(); - const models = settingsService.getModels(OPENAI_PROVIDER_NAME); + const models = this.settingsService.getModels(OPENAI_PROVIDER_NAME); this.apiModels = models; @@ -70,15 +72,19 @@ export class OpenAIService implements AiServiceProvider { * Get the capabilities of a model with this provider */ // eslint-disable-next-line @typescript-eslint/no-unused-vars - getModelCapabilities(model: string): AIServiceCapability[] { - // Add image generation capability for DALL-E 3 - if (model === 'dall-e-3') { - return [AIServiceCapability.ImageGeneration]; + getModelCapabilities(modelId: string): AIServiceCapability[] { + // Get model data by modelId + const models = this.settingsService.getModels(this.name); + const modelData = models.find(x => x.modelId === modelId); + let hasImageGeneration = false; + + if(modelData?.modelCapabilities.findIndex(x => x === AIServiceCapability.ImageGeneration) !== -1){ + hasImageGeneration = true; } // Default capabilities for chat models return mapModelCapabilities( - false, + hasImageGeneration, false, false, false, @@ -152,7 +158,17 @@ export class OpenAIService implements AiServiceProvider { options.stream = false; - return CommonProviderHelper.getChatCompletionByModel(modelInstance, messages, options, streamController, tools, toolChoice); + options.tools = { + ...options.tools, + tools + } + + options.toolChoice = { + ...options.toolChoice, + toolChoice + } + + return CommonProviderHelper.getChatCompletionByModel(modelInstance, messages, options, streamController); } /** diff --git a/src/services/providers/openrouter-service.ts b/src/services/providers/openrouter-service.ts index eba39ca..f637654 100644 --- a/src/services/providers/openrouter-service.ts +++ b/src/services/providers/openrouter-service.ts @@ -25,6 +25,7 @@ export class OpenRouterService implements AiServiceProvider { */ constructor() { this.settingsService = SettingsService.getInstance(); + this.apiModels = this.settingsService.getModels(OPENROUTER_PROVIDER_NAME); const providerSettings = this.settingsService.getProviderSettings(OPENROUTER_PROVIDER_NAME); this._apiKey = providerSettings.apiKey || ''; @@ -58,8 +59,7 @@ export class OpenRouterService implements AiServiceProvider { * Fetch the list of available models from OpenAI */ public async fetchAvailableModels(): Promise { - const settingsService = SettingsService.getInstance(); - const models = settingsService.getModels(OPENROUTER_PROVIDER_NAME); + const models = this.settingsService.getModels(OPENROUTER_PROVIDER_NAME); this.apiModels = models; @@ -69,10 +69,18 @@ export class OpenRouterService implements AiServiceProvider { /** * Get the capabilities of a model with this provider */ - // eslint-disable-next-line @typescript-eslint/no-unused-vars - getModelCapabilities(model: string): AIServiceCapability[] { + getModelCapabilities(modelId: string): AIServiceCapability[] { + // Get model data by modelId + const models = this.settingsService.getModels(this.name); + const modelData = models.find(x => x.modelId === modelId); + let hasImageGeneration = false; + + if(modelData?.modelCapabilities.findIndex(x => x === AIServiceCapability.ImageGeneration) !== -1){ + hasImageGeneration = true; + } + return mapModelCapabilities( - false, + hasImageGeneration, false, false, false, diff --git a/src/services/providers/together-service.ts b/src/services/providers/together-service.ts index 84a00b8..578b102 100644 --- a/src/services/providers/together-service.ts +++ b/src/services/providers/together-service.ts @@ -17,11 +17,14 @@ export class TogetherService implements AiServiceProvider { private commonProviderHelper: CommonProviderHelper; private apiModels: ModelSettings[] = []; + private settingsService: SettingsService; /** * Create a new OpenAI service provider */ constructor() { + this.settingsService = SettingsService.getInstance(); + this.apiModels = this.settingsService.getModels(TOGETHER_PROVIDER_NAME); this.commonProviderHelper = new CommonProviderHelper(TOGETHER_PROVIDER_NAME, this.createClient); } @@ -56,8 +59,7 @@ export class TogetherService implements AiServiceProvider { * Fetch the list of available models from OpenAI */ public async fetchAvailableModels(): Promise { - const settingsService = SettingsService.getInstance(); - const models = settingsService.getModels(TOGETHER_PROVIDER_NAME); + const models = this.settingsService.getModels(TOGETHER_PROVIDER_NAME); this.apiModels = models; @@ -67,10 +69,18 @@ export class TogetherService implements AiServiceProvider { /** * Get the capabilities of a model with this provider */ - // eslint-disable-next-line @typescript-eslint/no-unused-vars - getModelCapabilities(model: string): AIServiceCapability[] { + getModelCapabilities(modelId: string): AIServiceCapability[] { + // Get model data by modelId + const models = this.settingsService.getModels(this.name); + const modelData = models.find(x => x.modelId === modelId); + let hasImageGeneration = false; + + if(modelData?.modelCapabilities.findIndex(x => x === AIServiceCapability.ImageGeneration) !== -1){ + hasImageGeneration = true; + } + return mapModelCapabilities( - false, + hasImageGeneration, false, false, false, diff --git a/src/services/settings-service.ts b/src/services/settings-service.ts index d8c8a43..e3495b6 100644 --- a/src/services/settings-service.ts +++ b/src/services/settings-service.ts @@ -1,5 +1,5 @@ import { AIServiceCapability } from "../types/capabilities"; -import { UserSettings, ProviderSettings, ModelSettings } from "../types/settings"; +import { UserSettings, ProviderSettings, ModelSettings, MCPServerSettings } from "../types/settings"; import { DatabaseService } from "./database"; import { v4 as uuidv4 } from 'uuid'; @@ -24,6 +24,14 @@ const DEFAULT_SETTINGS: UserSettings = { // modelCapabilities: [AIServiceCapability.TextCompletion, AIServiceCapability.WebSearch], // modelRefUUID: uuidv4(), // }, + // { + // modelName: 'DALL-E 3', + // modelId: 'dall-e-3', + // modelCategory: 'Image Generation', + // modelDescription: 'DALL-E 3 is OpenAI\'s advanced image generation model.', + // modelCapabilities: [AIServiceCapability.ImageGeneration], + // modelRefUUID: uuidv4(), + // }, // ] // }, ['OpenAI']: { @@ -239,6 +247,23 @@ const DEFAULT_SETTINGS: UserSettings = { useStreaming: true, webSearchEnabled: false, enableWebSearch_Preview: false, + // General settings + startWithSystem: false, + startupToTray: false, + closeToTray: true, + proxyMode: 'system', + customProxyUrl: '', + sendErrorReports: true, + mcpServers: { + 'image-generation': { + id: 'image-generation', + name: 'Image Generation', + type: 'sse', + url: 'internal://image-generation', + isDefault: true, + isImageGeneration: true + } + } }; /** @@ -323,6 +348,10 @@ export class SettingsService { }); } } + else if(provider === 'TensorBlock'){ + // Remove Default TensorBlock provider + delete this.settings.providers[provider]; + } } } @@ -536,4 +565,63 @@ export class SettingsService { this.settings.useStreaming = useStreaming; await this.saveSettings(); } + + /** + * Get all MCP servers + */ + public getMCPServers(): Record { + return this.settings.mcpServers || {}; + } + + /** + * Get a specific MCP server by ID + */ + public getMCPServer(id: string): MCPServerSettings | undefined { + return this.settings.mcpServers?.[id]; + } + + /** + * Add or update an MCP server + */ + public async addOrUpdateMCPServer(server: MCPServerSettings): Promise { + if (!this.settings.mcpServers) { + this.settings.mcpServers = {}; + } + + this.settings.mcpServers[server.id] = server; + await this.saveSettings(); + this.notifySettingsChanged(); + } + + /** + * Delete an MCP server + */ + public async deleteMCPServer(id: string): Promise { + if (!this.settings.mcpServers || !this.settings.mcpServers[id]) { + return; + } + + // Don't delete default servers + if (this.settings.mcpServers[id].isDefault) { + throw new Error('Cannot delete default MCP server'); + } + + delete this.settings.mcpServers[id]; + await this.saveSettings(); + this.notifySettingsChanged(); + } + + /** + * Check if an MCP server is the image generation server + */ + public isImageGenerationMCPServer(id: string): boolean { + return !!this.settings.mcpServers?.[id]?.isImageGeneration; + } + + /** + * Notify listeners that settings have changed + */ + private notifySettingsChanged(): void { + window.dispatchEvent(new CustomEvent(SETTINGS_CHANGE_EVENT)); + } } \ No newline at end of file diff --git a/src/services/streaming-control.ts b/src/services/streaming-control.ts index dc3db89..f4abeea 100644 --- a/src/services/streaming-control.ts +++ b/src/services/streaming-control.ts @@ -1,7 +1,8 @@ import { LanguageModelUsage } from "ai"; -import { Conversation, Message } from "../types/chat"; +import { Conversation, Message, MessageContent, MessageContentType } from "../types/chat"; import { v4 as uuidv4 } from 'uuid'; import { MessageHelper } from "./message-helper"; +import { CustomToolCall, ToolCallStatus } from "../types/tool-call"; export class StreamControlHandler { public targetConverstation: Conversation; @@ -11,6 +12,8 @@ export class StreamControlHandler { private placeholderMessage: Message; private fullText: string = ''; + private toolCallsInProgress: Map = new Map(); + private imageContents: MessageContent[] = []; constructor( targetConversation: Conversation, @@ -56,10 +59,148 @@ export class StreamControlHandler { this.onChunkCallback(updatedConversation); } + public onToolCall(toolName: string, toolId: string, args: Record) { + // Create and store the tool call + const toolCall: CustomToolCall = { + id: toolId, + name: toolName, + args: args, + status: ToolCallStatus.CALLED + }; + + this.toolCallsInProgress.set(toolId, toolCall); + + // Update the message to include information about the tool call + let toolCallText = this.fullText; + + // Append a message about the tool call in progress + if (toolName === 'generate_image') { + const imagePrompt = args?.prompt as string; + if (imagePrompt) { + toolCallText += `\n\nGenerating image with prompt: "${imagePrompt}"`; + } else { + toolCallText += `\n\nGenerating image...`; + } + } else { + toolCallText += `\n\nExecuting tool call: ${toolName}`; + } + + const updatedMessages = new Map(this.targetConverstation.messages); + updatedMessages.set(this.placeholderMessage.messageId, { + ...this.placeholderMessage, + content: MessageHelper.pureTextMessage(toolCallText) + }); + + const updatedConversation = { + ...this.targetConverstation, + messages: updatedMessages + }; + + this.targetConverstation = updatedConversation; + this.onChunkCallback(updatedConversation); + } + + public onToolCallInProgress(toolCallId: string) { + const toolCall = this.toolCallsInProgress.get(toolCallId); + if (!toolCall) return; + + toolCall.status = ToolCallStatus.IN_PROGRESS; + this.toolCallsInProgress.set(toolCallId, toolCall); + } + + public onToolCallResult(toolCallId: string, result: unknown) { + const toolCall = this.toolCallsInProgress.get(toolCallId); + if (!toolCall) return; + + // Update tool call status + toolCall.status = ToolCallStatus.COMPLETED; + toolCall.result = result; + this.toolCallsInProgress.set(toolCallId, toolCall); + + // Handle image generation results + if (toolCall.name === 'generate_image' && (result as {images?: string[]})?.images) { + const images = (result as {images: string[]}).images; + if (Array.isArray(images)) { + // Store the images for later inclusion in the final message + for (const imageData of images) { + if (typeof imageData === 'string') { + this.imageContents.push({ + type: MessageContentType.Image, + content: imageData, + dataJson: JSON.stringify({ format: 'base64' }) + }); + } + } + } + } + + // Update message with tool call result info + let updatedText = this.fullText; + + // Don't append result text for image generation, as we'll show the images directly + if (toolCall.name !== 'generate_image') { + updatedText += `\n\nTool result: ${JSON.stringify(result)}`; + } + + const updatedMessages = new Map(this.targetConverstation.messages); + updatedMessages.set(this.placeholderMessage.messageId, { + ...this.placeholderMessage, + content: MessageHelper.pureTextMessage(updatedText) + }); + + const updatedConversation = { + ...this.targetConverstation, + messages: updatedMessages + }; + + this.targetConverstation = updatedConversation; + this.onChunkCallback(updatedConversation); + } + + public onToolCallError(toolCallId: string, error: Error) { + const toolCall = this.toolCallsInProgress.get(toolCallId); + if (!toolCall) return; + + // Update tool call status + toolCall.status = ToolCallStatus.ERROR; + toolCall.error = error; + this.toolCallsInProgress.set(toolCallId, toolCall); + + // Update message with error info + let errorText = this.fullText; + + if (toolCall.name === 'generate_image') { + errorText += `\n\nError generating image: ${error.message}`; + } else { + errorText += `\n\nError in tool call ${toolCall.name}: ${error.message}`; + } + + const updatedMessages = new Map(this.targetConverstation.messages); + updatedMessages.set(this.placeholderMessage.messageId, { + ...this.placeholderMessage, + content: MessageHelper.pureTextMessage(errorText) + }); + + const updatedConversation = { + ...this.targetConverstation, + messages: updatedMessages + }; + + this.targetConverstation = updatedConversation; + this.onChunkCallback(updatedConversation); + } + public onFinish(usage: LanguageModelUsage | null) { + const finalMessageContent = [...MessageHelper.pureTextMessage(this.fullText)]; + + // Add any image contents to the final message + if (this.imageContents.length > 0) { + finalMessageContent.push(...this.imageContents); + } + const finalMessage: Message = { messageId: uuidv4(), - content: MessageHelper.pureTextMessage(this.fullText), + content: finalMessageContent, conversationId: this.targetConverstation.conversationId, role: 'assistant', timestamp: new Date(), diff --git a/src/styles/tensorblock-light.css b/src/styles/tensorblock-light.css index 0dcf580..da01e7f 100644 --- a/src/styles/tensorblock-light.css +++ b/src/styles/tensorblock-light.css @@ -418,6 +418,39 @@ color: var(--primary-700); } + /* General Settings */ + .settings-section { + background-color: var(--surface-0); + border: 1px solid var(--primary-100); + border-radius: 0.5rem; + } + + .settings-section-title { + color: var(--surface-700); + } + + .settings-toggle-label { + color: var(--primary-700); + } + + .settings-toggle-description { + color: var(--surface-500); + } + + .settings-radio-group { + border-radius: 0.5rem; + background-color: var(--primary-50); + } + + .settings-radio-item { + color: var(--surface-500); + } + + .settings-radio-item-active { + background-color: var(--primary-200); + color: var(--primary-700); + } + /* Message */ .message-model-tag { background-color: transparent; diff --git a/src/types/capabilities.ts b/src/types/capabilities.ts index 39cb9ee..8bfe60c 100644 --- a/src/types/capabilities.ts +++ b/src/types/capabilities.ts @@ -18,6 +18,7 @@ export enum AIServiceCapability { FineTuning = 'fineTuning', StreamingCompletion = 'streamingCompletion', WebSearch = 'webSearch', + MCPServer = 'mcpServer', } /** @@ -36,7 +37,7 @@ export const mapModelCapabilities = ( ]; if (supportsImages) { - capabilities.push(AIServiceCapability.VisionAnalysis); + capabilities.push(AIServiceCapability.ImageGeneration); } if (supportsAudio) { diff --git a/src/types/chat.ts b/src/types/chat.ts index ca592d7..1978575 100644 --- a/src/types/chat.ts +++ b/src/types/chat.ts @@ -53,16 +53,3 @@ export interface ConversationFolder{ updatedAt: Date; colorFlag: string; } - -export interface ImageGenerationResult { - id: string; - prompt: string; - negativePrompt: string; - seed: string; - number: number; - status: string; - aspectRatio: string; - provider: string; - model: string; - images: MessageContent[]; -} diff --git a/src/types/image.ts b/src/types/image.ts new file mode 100644 index 0000000..ca65eb3 --- /dev/null +++ b/src/types/image.ts @@ -0,0 +1,16 @@ +import { MessageContent } from "./chat"; + +export interface ImageGenerationResult { + imageResultId: string; + prompt: string; + negativePrompt: string; + seed: string; + number: number; + status: string; + aspectRatio: string; + provider: string; // provider id + providerName: string; // provider name + model: string; + images: MessageContent[]; + updatedAt: Date; +} \ No newline at end of file diff --git a/src/types/settings.ts b/src/types/settings.ts index 74f1d18..6a58980 100644 --- a/src/types/settings.ts +++ b/src/types/settings.ts @@ -10,6 +10,18 @@ export interface UserSettings { useStreaming: boolean; webSearchEnabled: boolean; enableWebSearch_Preview: boolean; + // Image generation settings + imageGenerationEnabled?: boolean; + imageGenerationProvider?: string; + imageGenerationModel?: string; + // General settings + startWithSystem?: boolean; + startupToTray?: boolean; + closeToTray?: boolean; + proxyMode?: 'system' | 'custom' | 'none'; + customProxyUrl?: string; + sendErrorReports?: boolean; + mcpServers?: Record; } /** @@ -39,3 +51,27 @@ export interface ModelSettings { modelDescription: string; modelCapabilities: AIServiceCapability[]; } + +/** + * Base MCP Server settings interface + */ +export interface MCPServerSettings { + id: string; + name: string; + type: 'sse' | 'stdio' | 'streamableHttp'; + description?: string; + isDefault?: boolean; + isImageGeneration?: boolean; + + // Common fields + url?: string; // Used by sse and streamableHttp + headers?: Record; // Used by sse and streamableHttp + + // Stdio specific fields + command?: string; // Used by stdio + args?: string[]; // Used by stdio + env?: Record; // Used by stdio + + // Timeout (in seconds) + timeout?: number; // Used by all types +} diff --git a/src/types/tool-call.ts b/src/types/tool-call.ts new file mode 100644 index 0000000..5bde068 --- /dev/null +++ b/src/types/tool-call.ts @@ -0,0 +1,33 @@ +// Custom tool call types for the application + +export interface CustomToolCall { + id: string; + name: string; + args: Record; + status: ToolCallStatus; + result?: unknown; + error?: Error; +} + +export enum ToolCallStatus { + CALLED = 'CALLED', + IN_PROGRESS = 'IN_PROGRESS', + COMPLETED = 'COMPLETED', + ERROR = 'ERROR' +} + +export interface ImageGenerationArgs { + prompt: string; + size?: string; + style?: 'vivid' | 'natural'; +} + +export interface ToolCallResult { + id: string; + result: unknown; +} + +export interface ToolCallError { + id: string; + error: Error; +} \ No newline at end of file diff --git a/src/types/window.d.ts b/src/types/window.d.ts index 09b725b..934971d 100644 --- a/src/types/window.d.ts +++ b/src/types/window.d.ts @@ -24,5 +24,11 @@ interface Window { success: boolean; error?: string; }>; + // Auto-startup and tray functions + getAutoLaunch: () => Promise; + setAutoLaunch: (enable: boolean) => Promise; + setCloseToTray: (enable: boolean) => Promise; + getCloseToTray: () => Promise; + setStartupToTray: (enable: boolean) => Promise; }; } \ No newline at end of file