-
🤖
+
{activeAgentIcon}
-
NexusChat
+
+ {isJustChat ? 'Just Chat' : activeAgentName || 'NexusChat'}
+
= ({
{selectedModel
- ? 'Activate the brain cluster and spark creative thinking. Your virtual assistant is here to communicate with you about everything.'
+ ? isJustChat
+ ? 'Chat sin herramientas ni MCP activos. Conversación simple con IA.'
+ : 'Activate the brain cluster and spark creative thinking. Your virtual agent is here to communicate with you about everything.'
: 'Selecciona un modelo para comenzar'}
@@ -309,9 +319,9 @@ export const LobeChatArea: React.FC
= ({
{messages.length === 0 ? (
{
- console.log('Selected assistant:', assistant);
- // TODO: Handle assistant selection
+ onAgentSelect={(agent) => {
+ console.log('Selected agent:', agent);
+ // TODO: Handle agent selection
}}
onQuestionSelect={(question) => {
console.log('Selected question:', question);
@@ -342,7 +352,7 @@ export const LobeChatArea: React.FC = ({
{message.content}
- {message.role === 'assistant' && (
+ {message.role === 'agent' && (
diff --git a/client/src/components/LobeChatSidebar.tsx b/client/src/components/LobeChatSidebar.tsx
index 5f2b03d..b9b8618 100644
--- a/client/src/components/LobeChatSidebar.tsx
+++ b/client/src/components/LobeChatSidebar.tsx
@@ -1,7 +1,8 @@
import { Search, Plus, MessageSquare, ChevronDown } from 'lucide-react';
import { createStyles } from 'antd-style';
-import type { Conversation } from '../types';
+import type { Agent } from '../hooks/useAgents';
import { lobeChatColors, lobeChatSpacing } from '../styles/lobeChatTheme';
+import { AgentList } from './AgentList';
const useStyles = createStyles(({ css }) => ({
sidebar: css`
@@ -175,10 +176,6 @@ const useStyles = createStyles(({ css }) => ({
&:hover {
background: ${lobeChatColors.sidebar.hover};
}
-
- &.active {
- background: ${lobeChatColors.sidebar.active};
- }
`,
conversationIcon: css`
@@ -231,17 +228,27 @@ const useStyles = createStyles(({ css }) => ({
}));
interface LobeChatSidebarProps {
- conversations: Conversation[];
- activeConversationId: string;
- onNewChat: () => void;
- onSelectConversation: (id: string) => void;
+ agents: Agent[];
+ activeAgentId?: string | null;
+ isJustChatActive?: boolean;
+ onJustChatSelect: () => void;
+ onAgentSelect: (id: string) => void;
+ onAgentCreate: () => void;
+ onAgentRename: (id: string, newName: string) => void;
+ onAgentIconChange: (id: string, newIcon: string) => void;
+ onAgentDelete: (id: string) => void;
}
export const LobeChatSidebar: React.FC = ({
- conversations,
- activeConversationId,
- onNewChat,
- onSelectConversation,
+ agents,
+ activeAgentId,
+ isJustChatActive,
+ onJustChatSelect,
+ onAgentSelect,
+ onAgentCreate,
+ onAgentRename,
+ onAgentIconChange,
+ onAgentDelete,
}) => {
const { styles } = useStyles();
@@ -266,63 +273,26 @@ export const LobeChatSidebar: React.FC = ({
⌘ K
+ {/* Agent List */}
-
-
-
- {/* Active conversation */}
-
-
🤖
-
-
NexusChat
-
- gpt-4o-mini
-
-
-
-
- {/* Example conversations from screenshot */}
- {[
- { emoji: '💻', title: 'Full-stack Developer', tag: 'gpt-4o-mini', date: '08-29' },
- { emoji: '🦀', title: 'Rust Programming Assi...', tag: 'gpt-4o-mini', date: '08-29' },
- { emoji: '⚛️', title: 'React Native Coding G...', tag: 'gpt-4o-mini', date: '08-29' },
- { emoji: '📘', title: 'JS to TS Expert', tag: 'gpt-4o-mini', date: '08-29' },
- { emoji: '🌊', title: 'TailwindHelper', tag: 'gpt-4o-mini', date: '08-29' },
- { emoji: '🍳', title: 'Healthy Recipe Recom...', tag: 'gpt-4o-mini', date: '08-29' },
- { emoji: '👻', title: 'GhostWriter Pro', tag: 'gpt-4o-mini', date: '08-29' },
- { emoji: '😊', title: 'Emotional Companion', tag: 'gpt-4o-mini', date: '08-29' },
- ].map((conv, index) => (
-
onSelectConversation(conv.title)}
- >
-
{conv.emoji}
-
-
{conv.title}
-
- {conv.tag}
-
-
-
{conv.date}
-
- ))}
-
+
);
diff --git a/client/src/components/SettingsModal.tsx b/client/src/components/SettingsModal.tsx
index 06688c6..14ae695 100644
--- a/client/src/components/SettingsModal.tsx
+++ b/client/src/components/SettingsModal.tsx
@@ -1,7 +1,7 @@
import React, { useState } from 'react';
import { X, Palette, Zap, Globe, User, Shield, Sparkles } from 'lucide-react';
import { createStyles } from 'antd-style';
-import { SettingsAIProviders } from './SettingsView';
+import { AIProviderSettings } from './AIProviderSettings';
import { SettingsBranding } from './SettingsBranding';
import { lobeChatColors, lobeChatSpacing } from '../styles/lobeChatTheme';
@@ -279,7 +279,7 @@ export const SettingsModal: React.FC
= ({ isOpen, onClose })
const renderContent = () => {
switch (activeTab) {
case 'ai':
- return ;
+ return ;
case 'branding':
return ;
diff --git a/client/src/components/Sidebar.tsx b/client/src/components/Sidebar.tsx
index 5a717b7..acfaf6e 100644
--- a/client/src/components/Sidebar.tsx
+++ b/client/src/components/Sidebar.tsx
@@ -3,6 +3,8 @@ import { MessageSquare, Plus, User, Settings, LogOut, X } from 'lucide-react';
import { createStyles } from 'antd-style';
import type { Conversation } from '../types';
import { lobeUIColors, spacing } from '../styles/theme';
+import { ConversationList } from './ConversationList';
+import { useConversations } from '../hooks/useConversations';
const useStyles = createStyles(({ css, token }) => ({
sidebar: css`
@@ -162,6 +164,8 @@ interface SidebarProps {
activeConversationId: string;
onNewChat: () => void;
onSelectConversation: (id: string) => void;
+ onRenameConversation: (id: string, newTitle: string) => void;
+ onDeleteConversation: (id: string) => void;
}
export const Sidebar: React.FC = ({
@@ -169,6 +173,8 @@ export const Sidebar: React.FC = ({
activeConversationId,
onNewChat,
onSelectConversation,
+ onRenameConversation,
+ onDeleteConversation,
}) => {
const { styles } = useStyles();
@@ -182,28 +188,13 @@ export const Sidebar: React.FC = ({
diff --git a/client/src/config/aiProviders.ts b/client/src/config/aiProviders.ts
index 50dc86d..0f9a321 100644
--- a/client/src/config/aiProviders.ts
+++ b/client/src/config/aiProviders.ts
@@ -93,19 +93,72 @@ export const AI_PROVIDERS: AIProvider[] = [
icon: '🔷',
enabled: false,
models: [
+ // Serie Gemini 2.0 (Generación Actual)
{
- id: 'gemini-pro',
- name: 'Gemini Pro',
+ id: 'gemini-2.0-flash',
+ name: 'Gemini 2.0 Flash',
providerId: 'google',
- contextWindow: 32000,
- pricing: { input: 0.5, output: 1.5 },
+ contextWindow: 1048576, // 1M tokens
+ pricing: { input: 0.075, output: 0.30 },
},
{
- id: 'gemini-pro-vision',
- name: 'Gemini Pro Vision',
+ id: 'gemini-2.0-flash-lite',
+ name: 'Gemini 2.0 Flash-Lite (Preview)',
providerId: 'google',
- contextWindow: 16000,
- pricing: { input: 0.5, output: 1.5 },
+ contextWindow: 1048576,
+ pricing: { input: 0.04, output: 0.16 },
+ },
+ {
+ id: 'gemini-2.0-pro',
+ name: 'Gemini 2.0 Pro (Preview)',
+ providerId: 'google',
+ contextWindow: 2097152, // 2M tokens
+ pricing: { input: 2.50, output: 10.00 },
+ },
+ // Serie Gemini 2.5
+ {
+ id: 'gemini-2.5-flash',
+ name: 'Gemini 2.5 Flash',
+ providerId: 'google',
+ contextWindow: 1048576,
+ pricing: { input: 0.075, output: 0.30 },
+ },
+ // Serie Gemini 1.5
+ {
+ id: 'gemini-1.5-pro',
+ name: 'Gemini 1.5 Pro',
+ providerId: 'google',
+ contextWindow: 2097152, // 2M tokens
+ pricing: { input: 1.25, output: 5.00 },
+ },
+ {
+ id: 'gemini-1.5-flash',
+ name: 'Gemini 1.5 Flash',
+ providerId: 'google',
+ contextWindow: 1048576, // 1M tokens
+ pricing: { input: 0.075, output: 0.30 },
+ },
+ {
+ id: 'gemini-1.5-flash-8b',
+ name: 'Gemini 1.5 Flash-8B',
+ providerId: 'google',
+ contextWindow: 1048576,
+ pricing: { input: 0.0375, output: 0.15 },
+ },
+ // Serie Gemini 1.0 (Legacy)
+ {
+ id: 'gemini-1.0-pro',
+ name: 'Gemini 1.0 Pro',
+ providerId: 'google',
+ contextWindow: 32768,
+ pricing: { input: 0.50, output: 1.50 },
+ },
+ {
+ id: 'gemini-1.0-ultra',
+ name: 'Gemini 1.0 Ultra',
+ providerId: 'google',
+ contextWindow: 32768,
+ pricing: { input: 1.00, output: 3.00 },
},
],
},
diff --git a/client/src/hooks/useChat.ts b/client/src/hooks/useChat.ts
index a2b8439..5f966e8 100644
--- a/client/src/hooks/useChat.ts
+++ b/client/src/hooks/useChat.ts
@@ -1,32 +1,61 @@
import { useState, useEffect, useCallback } from 'react';
import { io, Socket } from 'socket.io-client';
-import type { Message, Conversation } from '../types';
+import type { Message } from '../types';
+import { useAgents } from './useAgents';
+import type { AIModel } from '../config/aiProviders';
-export const useChat = () => {
+interface UseChatProps {
+ selectedModel?: AIModel | null;
+}
+
+export const useChat = (props?: UseChatProps) => {
+ const { selectedModel } = props || {};
const [socket, setSocket] = useState
(null);
const [messages, setMessages] = useState([]);
- const [conversations, setConversations] = useState([]);
- const [activeConversationId, setActiveConversationId] = useState('default');
+ const [activeAgentId, setActiveAgentId] = useState(null);
+ const [isJustChat, setIsJustChat] = useState(false);
const [isTyping, setIsTyping] = useState(false);
+ const {
+ agents,
+ createAgent,
+ updateAgent,
+ deleteAgent,
+ } = useAgents();
+
// Inicializar Socket.IO
useEffect(() => {
const newSocket = io('http://localhost:3000');
newSocket.on('connect', () => {
console.log('Connected to server');
+
+ // Enviar configuraciones de providers al servidor
+ const savedConfigs = localStorage.getItem('aiProviderConfigs');
+ if (savedConfigs) {
+ const configs = JSON.parse(savedConfigs);
+ newSocket.emit('provider_configs', configs);
+ console.log('Provider configs sent to server');
+ }
});
newSocket.on('ai_response', (data: { content: string; timestamp: string }) => {
- setMessages((prev) => [
- ...prev,
- {
- id: Date.now().toString(),
- role: 'assistant',
- content: data.content,
- timestamp: new Date(data.timestamp),
- },
- ]);
+ const agentMessage: Message = {
+ id: Date.now().toString(),
+ role: 'assistant',
+ content: data.content,
+ timestamp: new Date(data.timestamp),
+ };
+
+ setMessages((prev) => {
+ const updated = [...prev, agentMessage];
+ // Save messages in localStorage by agent
+ const storageKey = isJustChat
+ ? 'messages_just_chat'
+ : `messages_${activeAgentId}`;
+ localStorage.setItem(storageKey, JSON.stringify(updated));
+ return updated;
+ });
setIsTyping(false);
});
@@ -40,31 +69,72 @@ export const useChat = () => {
return () => {
newSocket.close();
};
- }, []);
+ }, [activeAgentId, isJustChat]);
- // Crear nueva conversación
- const createNewConversation = useCallback(() => {
- const newConv: Conversation = {
- id: Date.now().toString(),
- title: 'Nueva conversación',
- messages: [],
- createdAt: new Date(),
- };
- setConversations((prev) => [newConv, ...prev]);
- setActiveConversationId(newConv.id);
- setMessages([]);
- }, []);
+ // Load messages when active agent or Just Chat changes
+ useEffect(() => {
+ const storageKey = isJustChat
+ ? 'messages_just_chat'
+ : activeAgentId
+ ? `messages_${activeAgentId}`
+ : null;
- // Seleccionar conversación
- const selectConversation = useCallback((id: string) => {
- setActiveConversationId(id);
- const conv = conversations.find((c) => c.id === id);
- if (conv) {
- setMessages(conv.messages);
+ if (storageKey) {
+ const stored = localStorage.getItem(storageKey);
+ if (stored) {
+ const parsed = JSON.parse(stored);
+ setMessages(parsed.map((m: any) => ({
+ ...m,
+ timestamp: new Date(m.timestamp),
+ })));
+ } else {
+ setMessages([]);
+ }
+ } else {
+ setMessages([]);
}
- }, [conversations]);
+ }, [activeAgentId, isJustChat]);
- // Enviar mensaje
+ // Select Just Chat (chat without tools)
+ const selectJustChat = useCallback(() => {
+ setIsJustChat(true);
+ setActiveAgentId(null);
+ }, []);
+
+ // Select agent
+ const selectAgent = useCallback((id: string) => {
+ setIsJustChat(false);
+ setActiveAgentId(id);
+ }, []);
+
+ // Create new agent
+ const handleCreateAgent = useCallback((name: string, icon: string, description?: string) => {
+ const newAgent = createAgent(name, icon);
+ if (description) {
+ updateAgent(newAgent.id, { description });
+ }
+ selectAgent(newAgent.id);
+ }, [createAgent, updateAgent]);
+
+ // Rename agent
+ const renameAgent = useCallback((id: string, newName: string) => {
+ updateAgent(id, { name: newName });
+ }, [updateAgent]);
+
+ // Change agent icon
+ const changeAgentIcon = useCallback((id: string, newIcon: string) => {
+ updateAgent(id, { icon: newIcon });
+ }, [updateAgent]);
+
+ // Delete agent
+ const handleDeleteAgent = useCallback((id: string) => {
+ deleteAgent(id);
+ if (id === activeAgentId) {
+ selectJustChat();
+ }
+ }, [deleteAgent, activeAgentId, selectJustChat]);
+
+ // Send message
const sendMessage = useCallback((content: string) => {
if (!socket || !content.trim()) return;
@@ -75,23 +145,49 @@ export const useChat = () => {
timestamp: new Date(),
};
- setMessages((prev) => [...prev, userMessage]);
+ setMessages((prev) => {
+ const updated = [...prev, userMessage];
+ // Save messages in localStorage
+ const storageKey = isJustChat
+ ? 'messages_just_chat'
+ : activeAgentId
+ ? `messages_${activeAgentId}`
+ : null;
+
+ if (storageKey) {
+ localStorage.setItem(storageKey, JSON.stringify(updated));
+ }
+ return updated;
+ });
+
setIsTyping(true);
+ console.log('🚀 Sending message with model:', selectedModel);
+ console.log('📝 Message content:', content);
+ console.log('🤖 Agent ID:', activeAgentId);
+ console.log('💬 Is Just Chat:', isJustChat);
+
socket.emit('user_message', {
message: content,
- conversationId: activeConversationId,
+ agentId: activeAgentId,
+ isJustChat: isJustChat,
+ selectedModel: selectedModel,
});
- }, [socket, activeConversationId]);
+ }, [socket, activeAgentId, isJustChat, selectedModel]);
return {
messages,
- conversations,
- activeConversationId,
+ agents,
+ activeAgentId,
+ isJustChat,
isTyping,
sendMessage,
- createNewConversation,
- selectConversation,
+ selectJustChat,
+ selectAgent,
+ createAgent: handleCreateAgent,
+ renameAgent,
+ changeAgentIcon,
+ deleteAgent: handleDeleteAgent,
};
};
diff --git a/client/src/hooks/useConversations.ts b/client/src/hooks/useConversations.ts
new file mode 100644
index 0000000..a754642
--- /dev/null
+++ b/client/src/hooks/useConversations.ts
@@ -0,0 +1,84 @@
+import { useState, useEffect } from 'react';
+import type { Conversation } from '../types';
+
+export const useConversations = () => {
+ const [conversations, setConversations] = useState([]);
+ const [loading, setLoading] = useState(true);
+
+ // Cargar conversaciones del localStorage (temporal hasta tener backend)
+ useEffect(() => {
+ loadConversations();
+ }, []);
+
+ const loadConversations = () => {
+ try {
+ const stored = localStorage.getItem('conversations');
+ if (stored) {
+ const parsed = JSON.parse(stored);
+ setConversations(parsed.map((c: any) => ({
+ ...c,
+ createdAt: new Date(c.createdAt),
+ updatedAt: new Date(c.updatedAt),
+ })));
+ }
+ } catch (error) {
+ console.error('Error loading conversations:', error);
+ } finally {
+ setLoading(false);
+ }
+ };
+
+ const createConversation = (firstMessage: string): Conversation => {
+ // Generar título del chat basado en el primer mensaje (máximo 50 caracteres)
+ const title = firstMessage.length > 50
+ ? firstMessage.substring(0, 47) + '...'
+ : firstMessage;
+
+ const newConversation: Conversation = {
+ id: `conv_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`,
+ title,
+ createdAt: new Date(),
+ updatedAt: new Date(),
+ messageCount: 1,
+ };
+
+ const updated = [newConversation, ...conversations];
+ setConversations(updated);
+ saveConversations(updated);
+
+ return newConversation;
+ };
+
+ const updateConversationTitle = (id: string, newTitle: string) => {
+ const updated = conversations.map(conv =>
+ conv.id === id
+ ? { ...conv, title: newTitle, updatedAt: new Date() }
+ : conv
+ );
+ setConversations(updated);
+ saveConversations(updated);
+ };
+
+ const deleteConversation = (id: string) => {
+ const updated = conversations.filter(conv => conv.id !== id);
+ setConversations(updated);
+ saveConversations(updated);
+
+ // También eliminar mensajes asociados
+ localStorage.removeItem(`messages_${id}`);
+ };
+
+ const saveConversations = (convs: Conversation[]) => {
+ localStorage.setItem('conversations', JSON.stringify(convs));
+ };
+
+ return {
+ conversations,
+ loading,
+ createConversation,
+ updateConversationTitle,
+ deleteConversation,
+ refreshConversations: loadConversations,
+ };
+};
+
diff --git a/client/src/types/index.ts b/client/src/types/index.ts
index ae620e2..b8b155e 100644
--- a/client/src/types/index.ts
+++ b/client/src/types/index.ts
@@ -8,8 +8,10 @@ export interface Message {
export interface Conversation {
id: string;
title: string;
- messages: Message[];
+ messages?: Message[];
createdAt: Date;
+ updatedAt: Date;
+ messageCount?: number;
}
export interface ChatState {
diff --git a/package.json b/package.json
index 5dee0d9..630b432 100644
--- a/package.json
+++ b/package.json
@@ -21,6 +21,7 @@
"@types/react-dom": "^19.2.3",
"antd": "^6.3.0",
"antd-style": "^4.1.0",
+ "axios": "^1.13.5",
"cors": "^2.8.6",
"dotenv": "^16.4.5",
"express": "^5.2.1",
diff --git a/src/server/WebServer.ts b/src/server/WebServer.ts
index d57170f..46008ec 100644
--- a/src/server/WebServer.ts
+++ b/src/server/WebServer.ts
@@ -5,6 +5,8 @@ import path from 'path';
import cors from 'cors';
import logger from '../utils/logger';
import { config } from '../config';
+import providerRouter from './routes/provider';
+import { AIServiceFactory, AIMessage } from '../services/AIService';
export class WebServer {
private app: Express;
@@ -31,88 +33,191 @@ export class WebServer {
}
private setupRoutes(): void {
- this.app.get('/', (req: Request, res: Response) => {
- res.sendFile(path.join(__dirname, '../../public/index.html'));
- });
+ // API Routes (deben ir primero)
+ this.app.use('/api', providerRouter);
+ logger.info('API routes mounted at /api');
+ // Health check
this.app.get('/health', (req: Request, res: Response) => {
res.json({ status: 'ok', timestamp: new Date() });
});
+
+ // Serve static files and handle SPA routing
+ this.app.use(express.static(path.join(__dirname, '../../public'), {
+ index: 'index.html',
+ fallthrough: true
+ }));
+
+ // Fallback for any non-API routes - serve index.html for client-side routing
+ this.app.use((req: Request, res: Response, next) => {
+ if (!req.path.startsWith('/api')) {
+ res.sendFile(path.join(__dirname, '../../public/index.html'));
+ } else {
+ next();
+ }
+ });
}
private setupSocketIO(): void {
this.io.on('connection', (socket) => {
- logger.info(`Cliente conectado: ${socket.id}`);
+ logger.info(`Client connected: ${socket.id}`);
- // Mensaje de bienvenida inicial (opcional)
- // socket.emit('ai_response', {
- // content: '¡Hola! Soy tu asistente AI. ¿En qué puedo ayudarte?',
- // timestamp: new Date(),
- // conversationId: socket.id
- // });
+ // Store conversation history and configurations per socket
+ const conversationHistory = new Map();
+ let providerConfigs: Record = {};
+
+ // Receive provider configurations from client
+ socket.on('provider_configs', (configs) => {
+ providerConfigs = configs;
+ logger.info(`Provider configurations received for ${socket.id}`);
+ });
socket.on('user_message', async (data) => {
- const { message, conversationId } = data;
+ const { message, agentId, isJustChat, selectedModel } = data;
- logger.info(`Mensaje recibido de ${socket.id}: ${message}`);
+ logger.info(`📨 Message received from ${socket.id}`);
+ logger.info(`📝 Message: ${message}`);
+ logger.info(`🤖 Agent ID: ${agentId || 'none'}`);
+ logger.info(`💬 Is Just Chat: ${isJustChat}`);
+ logger.info(`🎯 Selected Model: ${JSON.stringify(selectedModel)}`);
+ logger.info(`🔧 Provider Configs available: ${Object.keys(providerConfigs).length > 0 ? 'YES' : 'NO'}`);
try {
- // Simular procesamiento de AI (reemplazar con tu lógica real)
- setTimeout(() => {
- // Generar respuesta de AI
- const aiResponse = this.generateAIResponse(message);
-
- socket.emit('ai_response', {
- content: aiResponse,
+ // Validate that a model is selected
+ if (!selectedModel || !selectedModel.id) {
+ logger.error('❌ No model selected');
+ socket.emit('error', {
+ message: 'Please select an AI model in settings.',
timestamp: new Date(),
- conversationId: conversationId || socket.id,
});
+ return;
+ }
- logger.info(`Respuesta enviada a ${socket.id}`);
- }, 1000 + Math.random() * 1000); // Simular latencia variable
+ logger.info(`✅ Model validation passed: ${selectedModel.id}`);
+
+ // Get provider configuration for the model
+ const provider = providerConfigs[selectedModel.providerId];
+
+ logger.info(`🔍 Looking for provider: ${selectedModel.providerId}`);
+ logger.info(`📦 Provider found: ${provider ? 'YES' : 'NO'}`);
+
+ if (provider) {
+ logger.info(`🔑 Provider enabled: ${provider.enabled}`);
+ logger.info(`🔐 Provider has API Key: ${provider.apiKey ? 'YES' : 'NO'}`);
+ }
+
+ if (!provider || !provider.enabled || !provider.apiKey) {
+ logger.error(`❌ Provider ${selectedModel.providerId} not configured properly`);
+ socket.emit('error', {
+ message: `Provider ${selectedModel.providerId} is not configured. Go to Settings → AI Providers.`,
+ timestamp: new Date(),
+ });
+ return;
+ }
+
+ logger.info(`✅ Provider validation passed`);
+
+ // Create AI service
+ const aiService = AIServiceFactory.create(selectedModel.providerId, {
+ apiKey: provider.apiKey,
+ apiUrl: provider.useCustomUrl ? provider.apiUrl : this.getDefaultApiUrl(selectedModel.providerId),
+ model: selectedModel.id,
+ });
+
+ logger.info(`✅ AIService created successfully`);
+
+ // Get or create conversation history
+ const conversationKey = agentId || 'just_chat';
+ let messages = conversationHistory.get(conversationKey) || [];
+
+ // Add system message if it's an agent with description
+ if (agentId && !isJustChat && messages.length === 0) {
+ // TODO: Get agent description from configuration
+ messages.push({
+ role: 'system',
+ content: 'You are a helpful and friendly assistant.',
+ });
+ }
+
+ // Add user message
+ messages.push({
+ role: 'user',
+ content: message,
+ });
+
+ // Generate response
+ const response = await aiService.generateResponse(messages);
+
+ // Add response to history
+ messages.push({
+ role: 'assistant',
+ content: response.content,
+ });
+
+ // Save updated history
+ conversationHistory.set(conversationKey, messages);
+
+ // Send response to client
+ socket.emit('ai_response', {
+ content: response.content,
+ timestamp: new Date(),
+ conversationId: conversationKey,
+ usage: response.usage,
+ });
+
+ logger.info(`Response sent to ${socket.id} (${response.usage?.totalTokens || 0} tokens)`);
+
+ } catch (error: any) {
+ logger.error(`Error processing message: ${error.message}`);
+
+ let errorMessage = 'An error occurred while processing your message.';
+
+ if (error.response) {
+ // API error from provider
+ const status = error.response.status;
+ if (status === 401 || status === 403) {
+ errorMessage = 'Invalid API Key or insufficient permissions. Check your configuration.';
+ } else if (status === 429) {
+ errorMessage = 'Rate limit exceeded. Please wait a moment.';
+ } else if (error.response.data?.error?.message) {
+ errorMessage = error.response.data.error.message;
+ }
+ } else if (error.code === 'ECONNABORTED') {
+ errorMessage = 'Timeout: The response took too long.';
+ } else if (error.message) {
+ errorMessage = error.message;
+ }
- } catch (error) {
- logger.error(`Error procesando mensaje: ${error}`);
socket.emit('error', {
- message: 'Ocurrió un error al procesar tu mensaje. Por favor, intenta de nuevo.',
+ message: errorMessage,
timestamp: new Date(),
});
}
});
socket.on('disconnect', () => {
- logger.info(`Cliente desconectado: ${socket.id}`);
+ logger.info(`Client disconnected: ${socket.id}`);
+ // Clear conversation history on disconnect
+ conversationHistory.clear();
});
});
}
- // Método temporal para generar respuestas de AI
- // TODO: Reemplazar con integración de modelo de AI real
- private generateAIResponse(userMessage: string): string {
- const responses = [
- `Entiendo tu pregunta sobre "${userMessage}". Déjame ayudarte con eso.`,
- `Interesante punto sobre "${userMessage}". Aquí está mi análisis...`,
- `Gracias por tu mensaje. Respecto a "${userMessage}", puedo decirte que...`,
- `¡Excelente pregunta! Sobre "${userMessage}", considera lo siguiente...`,
- ];
-
- // Respuestas específicas para palabras clave
- if (userMessage.toLowerCase().includes('código') || userMessage.toLowerCase().includes('programar')) {
- return `Claro, puedo ayudarte con programación. Para "${userMessage}", te recomiendo:\n\n1. Analizar el problema\n2. Diseñar la solución\n3. Implementar paso a paso\n4. Probar y depurar\n\n¿Necesitas ayuda con algún paso específico?`;
- }
-
- if (userMessage.toLowerCase().includes('idea') || userMessage.toLowerCase().includes('creativ')) {
- return `¡Me encanta ayudar con ideas creativas! Para "${userMessage}", aquí hay algunas sugerencias innovadoras:\n\n• Pensar fuera de lo convencional\n• Combinar conceptos diferentes\n• Buscar inspiración en otras áreas\n• Iterar y mejorar\n\n¿Quieres que explore alguna dirección específica?`;
- }
-
- if (userMessage.toLowerCase().includes('aprender') || userMessage.toLowerCase().includes('enseñ')) {
- return `Perfecto, enseñar es mi pasión. Sobre "${userMessage}":\n\n📚 **Conceptos clave:**\n- Empezar con lo básico\n- Práctica constante\n- Aplicar lo aprendido\n\n¿Te gustaría que profundice en algún aspecto?`;
- }
-
- // Respuesta aleatoria por defecto
- return responses[Math.floor(Math.random() * responses.length)];
+ /**
+ * Obtener URL API por defecto para cada provider
+ */
+ private getDefaultApiUrl(providerId: string): string {
+ const defaultUrls: Record = {
+ openai: 'https://api.openai.com/v1',
+ anthropic: 'https://api.anthropic.com/v1',
+ google: 'https://generativelanguage.googleapis.com/v1',
+ mistral: 'https://api.mistral.ai/v1',
+ cohere: 'https://api.cohere.ai/v1',
+ };
+ return defaultUrls[providerId] || '';
}
+
async start(): Promise {
return new Promise((resolve) => {
this.httpServer.listen(this.port, () => {
diff --git a/src/server/routes/provider.ts b/src/server/routes/provider.ts
new file mode 100644
index 0000000..9a5cbd8
--- /dev/null
+++ b/src/server/routes/provider.ts
@@ -0,0 +1,160 @@
+import { Router, Request, Response } from 'express';
+import axios from 'axios';
+
+const router = Router();
+
+// Endpoints de verificación para cada provider
+const PROVIDER_TEST_ENDPOINTS: Record Record }> = {
+ openai: {
+ method: 'GET',
+ path: '/models',
+ headers: (apiKey: string) => ({
+ 'Authorization': `Bearer ${apiKey}`,
+ 'Content-Type': 'application/json',
+ }),
+ },
+ anthropic: {
+ method: 'GET',
+ path: '/models',
+ headers: (apiKey: string) => ({
+ 'x-api-key': apiKey,
+ 'anthropic-version': '2023-06-01',
+ 'Content-Type': 'application/json',
+ }),
+ },
+ google: {
+ method: 'GET',
+ path: '/models',
+ headers: (apiKey: string) => ({
+ 'Content-Type': 'application/json',
+ }),
+ },
+ mistral: {
+ method: 'GET',
+ path: '/models',
+ headers: (apiKey: string) => ({
+ 'Authorization': `Bearer ${apiKey}`,
+ 'Content-Type': 'application/json',
+ }),
+ },
+ cohere: {
+ method: 'GET',
+ path: '/models',
+ headers: (apiKey: string) => ({
+ 'Authorization': `Bearer ${apiKey}`,
+ 'Content-Type': 'application/json',
+ }),
+ },
+};
+
+/**
+ * POST /api/test-provider
+ * Prueba la conexión a un provider de IA
+ */
+router.post('/test-provider', async (req: Request, res: Response) => {
+ console.log('📡 Test provider endpoint hit');
+ console.log('Request body:', req.body);
+
+ try {
+ const { providerId, apiKey, apiUrl } = req.body;
+
+ // Validación
+ if (!providerId || !apiKey || !apiUrl) {
+ console.log('❌ Missing required fields');
+ return res.status(400).json({
+ success: false,
+ error: 'providerId, apiKey y apiUrl son requeridos',
+ });
+ }
+
+ const testConfig = PROVIDER_TEST_ENDPOINTS[providerId];
+ if (!testConfig) {
+ return res.status(400).json({
+ success: false,
+ error: `Provider ${providerId} no soportado`,
+ });
+ }
+
+ // Construir URL completa
+ let fullUrl = `${apiUrl}${testConfig.path}`;
+
+ // Para Google, agregar API key como query param
+ if (providerId === 'google') {
+ fullUrl += `?key=${apiKey}`;
+ }
+
+ console.log(`Testing ${providerId} connection to ${fullUrl}`);
+
+ // Hacer request de prueba
+ const response = await axios({
+ method: testConfig.method.toLowerCase() as any,
+ url: fullUrl,
+ headers: testConfig.headers(apiKey),
+ timeout: 10000, // 10 segundos timeout
+ validateStatus: (status) => status < 500, // No lanzar error en 4xx
+ });
+
+ // Verificar respuesta
+ if (response.status === 200 || response.status === 201) {
+ // Contar modelos si existen
+ let modelsCount = 0;
+ if (response.data) {
+ if (Array.isArray(response.data)) {
+ modelsCount = response.data.length;
+ } else if (response.data.data && Array.isArray(response.data.data)) {
+ modelsCount = response.data.data.length;
+ } else if (response.data.models && Array.isArray(response.data.models)) {
+ modelsCount = response.data.models.length;
+ }
+ }
+
+ return res.json({
+ success: true,
+ message: 'Conexión exitosa',
+ modelsCount,
+ provider: providerId,
+ });
+ } else if (response.status === 401 || response.status === 403) {
+ return res.json({
+ success: false,
+ error: 'API Key inválida o sin permisos',
+ statusCode: response.status,
+ });
+ } else {
+ return res.json({
+ success: false,
+ error: `Error ${response.status}: ${response.statusText}`,
+ statusCode: response.status,
+ });
+ }
+ } catch (error: any) {
+ console.error('Error testing provider:', error);
+
+ // Manejar errores específicos
+ if (error.code === 'ECONNABORTED') {
+ return res.json({
+ success: false,
+ error: 'Timeout: No se pudo conectar al servidor (10s)',
+ });
+ } else if (error.code === 'ENOTFOUND' || error.code === 'ECONNREFUSED') {
+ return res.json({
+ success: false,
+ error: 'No se pudo conectar al servidor. Verifique la URL.',
+ });
+ } else if (error.response) {
+ return res.json({
+ success: false,
+ error: `Error ${error.response.status}: ${error.response.statusText}`,
+ details: error.response.data?.error?.message || error.response.data?.message,
+ });
+ } else {
+ return res.json({
+ success: false,
+ error: error.message || 'Error desconocido',
+ });
+ }
+ }
+});
+
+export default router;
+
diff --git a/src/services/AIService.ts b/src/services/AIService.ts
new file mode 100644
index 0000000..7f53696
--- /dev/null
+++ b/src/services/AIService.ts
@@ -0,0 +1,281 @@
+import axios, { AxiosInstance } from 'axios';
+import logger from '../utils/logger';
+
+export interface AIMessage {
+ role: 'system' | 'user' | 'assistant';
+ content: string;
+}
+
+export interface AIProviderConfig {
+ apiKey: string;
+ apiUrl: string;
+ model: string;
+}
+
+export interface AIResponse {
+ content: string;
+ finishReason?: string;
+ usage?: {
+ promptTokens: number;
+ completionTokens: number;
+ totalTokens: number;
+ };
+}
+
+/**
+ * Servicio unificado para interactuar con diferentes providers de IA
+ */
+export class AIService {
+ private client: AxiosInstance;
+ private config: AIProviderConfig;
+ private provider: string;
+
+ constructor(provider: string, config: AIProviderConfig) {
+ this.provider = provider;
+ this.config = config;
+
+ this.client = axios.create({
+ baseURL: config.apiUrl,
+ timeout: 60000, // 60 segundos
+ headers: {
+ 'Content-Type': 'application/json',
+ },
+ });
+ }
+
+ /**
+ * Generar respuesta del modelo de IA
+ */
+ async generateResponse(messages: AIMessage[]): Promise {
+ try {
+ switch (this.provider) {
+ case 'openai':
+ return await this.openAIRequest(messages);
+
+ case 'anthropic':
+ return await this.anthropicRequest(messages);
+
+ case 'google':
+ return await this.googleRequest(messages);
+
+ case 'mistral':
+ return await this.mistralRequest(messages);
+
+ case 'cohere':
+ return await this.cohereRequest(messages);
+
+ default:
+ throw new Error(`Provider ${this.provider} no soportado`);
+ }
+ } catch (error: any) {
+ logger.error(`Error en ${this.provider}:`, error.message);
+ throw error;
+ }
+ }
+
+ /**
+ * Request a OpenAI API
+ */
+ private async openAIRequest(messages: AIMessage[]): Promise {
+ const response = await this.client.post(
+ '/chat/completions',
+ {
+ model: this.config.model,
+ messages: messages,
+ temperature: 0.7,
+ max_tokens: 2000,
+ },
+ {
+ headers: {
+ 'Authorization': `Bearer ${this.config.apiKey}`,
+ },
+ }
+ );
+
+ const choice = response.data.choices[0];
+ return {
+ content: choice.message.content,
+ finishReason: choice.finish_reason,
+ usage: {
+ promptTokens: response.data.usage.prompt_tokens,
+ completionTokens: response.data.usage.completion_tokens,
+ totalTokens: response.data.usage.total_tokens,
+ },
+ };
+ }
+
+ /**
+ * Request a Anthropic API (Claude)
+ */
+ private async anthropicRequest(messages: AIMessage[]): Promise {
+ // Separar system message de los demás
+ const systemMessage = messages.find(m => m.role === 'system')?.content || '';
+ const userMessages = messages.filter(m => m.role !== 'system');
+
+ const response = await this.client.post(
+ '/messages',
+ {
+ model: this.config.model,
+ max_tokens: 2000,
+ system: systemMessage,
+ messages: userMessages.map(m => ({
+ role: m.role === 'assistant' ? 'assistant' : 'user',
+ content: m.content,
+ })),
+ },
+ {
+ headers: {
+ 'x-api-key': this.config.apiKey,
+ 'anthropic-version': '2023-06-01',
+ },
+ }
+ );
+
+ return {
+ content: response.data.content[0].text,
+ finishReason: response.data.stop_reason,
+ usage: {
+ promptTokens: response.data.usage.input_tokens,
+ completionTokens: response.data.usage.output_tokens,
+ totalTokens: response.data.usage.input_tokens + response.data.usage.output_tokens,
+ },
+ };
+ }
+
+ /**
+ * Request a Google Gemini API
+ */
+ private async googleRequest(messages: AIMessage[]): Promise {
+ // Convertir mensajes al formato de Gemini
+ const contents = messages
+ .filter(m => m.role !== 'system')
+ .map(m => ({
+ role: m.role === 'assistant' ? 'model' : 'user',
+ parts: [{ text: m.content }],
+ }));
+
+ // System instruction
+ const systemInstruction = messages.find(m => m.role === 'system')?.content;
+
+ const requestBody: any = {
+ contents,
+ generationConfig: {
+ temperature: 0.7,
+ maxOutputTokens: 2000,
+ },
+ };
+
+ if (systemInstruction) {
+ requestBody.systemInstruction = {
+ parts: [{ text: systemInstruction }],
+ };
+ }
+
+ const response = await this.client.post(
+ `/models/${this.config.model}:generateContent?key=${this.config.apiKey}`,
+ requestBody
+ );
+
+ const candidate = response.data.candidates[0];
+ return {
+ content: candidate.content.parts[0].text,
+ finishReason: candidate.finishReason,
+ usage: response.data.usageMetadata ? {
+ promptTokens: response.data.usageMetadata.promptTokenCount,
+ completionTokens: response.data.usageMetadata.candidatesTokenCount,
+ totalTokens: response.data.usageMetadata.totalTokenCount,
+ } : undefined,
+ };
+ }
+
+ /**
+ * Request a Mistral API
+ */
+ private async mistralRequest(messages: AIMessage[]): Promise {
+ const response = await this.client.post(
+ '/chat/completions',
+ {
+ model: this.config.model,
+ messages: messages,
+ temperature: 0.7,
+ max_tokens: 2000,
+ },
+ {
+ headers: {
+ 'Authorization': `Bearer ${this.config.apiKey}`,
+ },
+ }
+ );
+
+ const choice = response.data.choices[0];
+ return {
+ content: choice.message.content,
+ finishReason: choice.finish_reason,
+ usage: {
+ promptTokens: response.data.usage.prompt_tokens,
+ completionTokens: response.data.usage.completion_tokens,
+ totalTokens: response.data.usage.total_tokens,
+ },
+ };
+ }
+
+ /**
+ * Request a Cohere API
+ */
+ private async cohereRequest(messages: AIMessage[]): Promise {
+ // Convertir mensajes al formato de Cohere
+ const chatHistory = messages.slice(0, -1).map(m => ({
+ role: m.role === 'assistant' ? 'CHATBOT' : 'USER',
+ message: m.content,
+ }));
+
+ const lastMessage = messages[messages.length - 1];
+
+ const response = await this.client.post(
+ '/chat',
+ {
+ model: this.config.model,
+ message: lastMessage.content,
+ chat_history: chatHistory,
+ temperature: 0.7,
+ max_tokens: 2000,
+ },
+ {
+ headers: {
+ 'Authorization': `Bearer ${this.config.apiKey}`,
+ },
+ }
+ );
+
+ return {
+ content: response.data.text,
+ finishReason: response.data.finish_reason,
+ usage: response.data.meta?.tokens ? {
+ promptTokens: response.data.meta.tokens.input_tokens,
+ completionTokens: response.data.meta.tokens.output_tokens,
+ totalTokens: response.data.meta.tokens.input_tokens + response.data.meta.tokens.output_tokens,
+ } : undefined,
+ };
+ }
+
+ /**
+ * Generar respuesta streaming (para implementación futura)
+ */
+ async generateStreamingResponse(
+ messages: AIMessage[],
+ onChunk: (chunk: string) => void
+ ): Promise {
+ // TODO: Implementar streaming para cada provider
+ throw new Error('Streaming no implementado aún');
+ }
+}
+
+/**
+ * Factory para crear instancias de AIService
+ */
+export class AIServiceFactory {
+ static create(provider: string, config: AIProviderConfig): AIService {
+ return new AIService(provider, config);
+ }
+}
+