diff --git a/client/src/App.tsx b/client/src/App.tsx
index e8a3334..f6f2d66 100644
--- a/client/src/App.tsx
+++ b/client/src/App.tsx
@@ -42,7 +42,18 @@ function App() {
// Auto-select first available model
const availableModels = getAvailableModels(enabledProviders);
- if (availableModels.length > 0 && !selectedModel) {
+ // If there's a saved model for Just Chat, load it first
+ const savedJustModel = localStorage.getItem('selected_model_just_chat');
+ if (savedJustModel) {
+ try {
+ const parsed = JSON.parse(savedJustModel);
+ // ensure the model exists in available models
+ const match = availableModels.find(m => m.id === parsed.id && m.providerId === parsed.providerId);
+ if (match) setSelectedModel(match);
+ } catch (e) {
+ // ignore parse errors
+ }
+ } else if (availableModels.length > 0 && !selectedModel) {
setSelectedModel(availableModels[0]);
}
} else {
@@ -94,7 +105,20 @@ function App() {
onSendMessage={chatState.sendMessage}
selectedModel={selectedModel}
availableModels={availableModels}
- onModelSelect={setSelectedModel}
+ onModelSelect={(model) => {
+ // Update global selected model
+ setSelectedModel(model);
+
+ // If Just Chat is active, persist the selected model for Just Chat
+ if (chatState.isJustChat) {
+ localStorage.setItem('selected_model_just_chat', JSON.stringify(model));
+ }
+
+ // If an agent is active, save as agent's model
+ if (chatState.activeAgentId) {
+ chatState.setAgentModel(chatState.activeAgentId, model);
+ }
+ }}
activeAgentName={
chatState.activeAgentId
? chatState.agents.find(a => a.id === chatState.activeAgentId)?.name
diff --git a/client/src/components/AIProviderSettings.tsx b/client/src/components/AIProviderSettings.tsx
index 5044bab..43adecb 100644
--- a/client/src/components/AIProviderSettings.tsx
+++ b/client/src/components/AIProviderSettings.tsx
@@ -397,62 +397,48 @@ export const AIProviderSettings: React.FC = () => {
throw new Error(`Provider ${providerId} no soportado`);
}
- // Hacer request directo al provider
- const response = await fetch(testUrl, {
- method: 'GET',
- headers,
- mode: 'cors',
+ // Instead of calling provider directly from browser (CORS issues), call our backend API
+ const backendUrl = '/api/test-provider';
+ const response = await fetch(backendUrl, {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify({ providerId, apiKey: cleanApiKey, apiUrl: apiUrl }),
});
- if (response.ok || response.status === 200) {
- let modelsCount = 0;
- try {
- const data = await response.json();
+ if (response.ok) {
+ const data = await response.json();
+ if (data.success) {
+ const modelsCount = data.modelsCount || 0;
+ setTestingStates(prev => ({ ...prev, [providerId]: 'success' }));
+ setTestMessages(prev => ({
+ ...prev,
+ [providerId]: modelsCount > 0
+ ? `✓ Connection successful. ${modelsCount} models available.`
+ : `✓ Connection successful. API Key valid.`
+ }));
- // Contar modelos según la estructura de respuesta
- if (Array.isArray(data)) {
- modelsCount = data.length;
- } else if (data.data && Array.isArray(data.data)) {
- modelsCount = data.data.length;
- } else if (data.models && Array.isArray(data.models)) {
- modelsCount = data.models.length;
- }
- } catch (e) {
- // Si no podemos parsear, pero la respuesta fue OK, asumimos éxito
+ // Enable provider automatically
+ setConfigs(prev => ({
+ ...prev,
+ [providerId]: { ...prev[providerId], enabled: true },
+ }));
+ } else {
+ setTestingStates(prev => ({ ...prev, [providerId]: 'error' }));
+ setTestMessages(prev => ({
+ ...prev,
+ [providerId]: `✗ ${data.error || 'Unknown error'}`
+ }));
}
-
- setTestingStates(prev => ({ ...prev, [providerId]: 'success' }));
- setTestMessages(prev => ({
- ...prev,
- [providerId]: modelsCount > 0
- ? `✓ Conexión exitosa. ${modelsCount} modelos disponibles.`
- : `✓ Conexión exitosa. API Key válida.`
- }));
-
- // Habilitar provider automáticamente
- setConfigs(prev => ({
- ...prev,
- [providerId]: { ...prev[providerId], enabled: true },
- }));
- } else if (response.status === 401 || response.status === 403) {
- setTestingStates(prev => ({ ...prev, [providerId]: 'error' }));
- setTestMessages(prev => ({
- ...prev,
- [providerId]: `✗ Error: API Key inválida o sin permisos`
- }));
} else {
- let errorDetail = '';
+ let msg = `HTTP ${response.status}`;
try {
- const errorData = await response.json();
- errorDetail = errorData.error?.message || errorData.message || '';
- } catch (e) {
- // Ignorar si no se puede parsear
- }
-
+ const err = await response.json();
+ msg = err.error || err.message || msg;
+ } catch (e) {}
setTestingStates(prev => ({ ...prev, [providerId]: 'error' }));
setTestMessages(prev => ({
...prev,
- [providerId]: `✗ Error ${response.status}: ${errorDetail || response.statusText}`
+ [providerId]: `✗ Error: ${msg}`
}));
}
} catch (error: any) {
diff --git a/client/src/components/LobeChatArea.tsx b/client/src/components/LobeChatArea.tsx
index 31362ce..47fe009 100644
--- a/client/src/components/LobeChatArea.tsx
+++ b/client/src/components/LobeChatArea.tsx
@@ -6,6 +6,7 @@ import { WelcomeScreen } from './WelcomeScreen';
import type { Message } from '../types';
import { lobeChatColors, lobeChatSpacing } from '../styles/lobeChatTheme';
import { AIModel } from '../config/aiProviders';
+import React from 'react';
const useStyles = createStyles(({ css }) => ({
container: css`
@@ -255,6 +256,33 @@ interface LobeChatAreaProps {
isJustChat?: boolean;
}
+function renderMessageContent(content: string, format?: 'markdown' | 'rich' | 'text') {
+ if (!format || format === 'text') {
+ return
{content}
;
+ }
+
+ if (format === 'rich') {
+ return ;
+ }
+
+ // very minimal markdown -> html conversion (headings, bold, italics, code blocks, lists)
+ const esc = (s: string) => s.replace(/&/g, '&').replace(//g, '>');
+ let html = esc(content)
+ .replace(/```([\s\S]*?)```/g, (_m, code) => `${esc(code)}
`)
+ .replace(/^### (.*$)/gim, '$1
')
+ .replace(/^## (.*$)/gim, '$1
')
+ .replace(/^# (.*$)/gim, '$1
')
+ .replace(/\*\*(.*?)\*\*/gim, '$1')
+ .replace(/\*(.*?)\*/gim, '$1')
+ .replace(/`([^`]+)`/gim, '$1')
+ .replace(/\n\s*\n/g, '
')
+ .replace(/^- (.*$)/gim, '$1')
+ .replace(/\n/g, '- ')
+ .replace(/
- ([\s\S]*?)
/g, ' - $1
');
+
+ return ;
+}
+
export const LobeChatArea: React.FC = ({
messages,
isTyping,
@@ -350,7 +378,7 @@ export const LobeChatArea: React.FC = ({
- {message.content}
+ {renderMessageContent(message.content, message.format)}
{message.role === 'agent' && (
@@ -395,4 +423,3 @@ export const LobeChatArea: React.FC = ({
);
};
-
diff --git a/client/src/hooks/useChat.ts b/client/src/hooks/useChat.ts
index 5f966e8..28d8839 100644
--- a/client/src/hooks/useChat.ts
+++ b/client/src/hooks/useChat.ts
@@ -39,12 +39,13 @@ export const useChat = (props?: UseChatProps) => {
}
});
- newSocket.on('ai_response', (data: { content: string; timestamp: string }) => {
+ newSocket.on('ai_response', (data: { content: string; timestamp: string; format?: string; conversationId?: string; usage?: any }) => {
const agentMessage: Message = {
id: Date.now().toString(),
role: 'assistant',
content: data.content,
timestamp: new Date(data.timestamp),
+ format: (data.format as any) || 'text',
};
setMessages((prev) => {
@@ -59,8 +60,58 @@ export const useChat = (props?: UseChatProps) => {
setIsTyping(false);
});
- newSocket.on('error', (data: { message: string }) => {
- console.error('Error from server:', data.message);
+ // Streaming chunks
+ let streamingMessageId: string | null = null;
+ newSocket.on('ai_response_chunk', (data: { chunk: string; conversationId?: string }) => {
+ // Append chunk to a streaming assistant message
+ const chunk = data.chunk || '';
+ if (!streamingMessageId) {
+ streamingMessageId = `stream_${Date.now()}`;
+ const partial: Message = { id: streamingMessageId, role: 'assistant', content: chunk, timestamp: new Date(), format: 'text' };
+ setMessages(prev => {
+ const updated = [...prev, partial];
+ const storageKey = isJustChat ? 'messages_just_chat' : `messages_${activeAgentId}`;
+ localStorage.setItem(storageKey, JSON.stringify(updated));
+ return updated;
+ });
+ } else {
+ setMessages(prev => {
+ const updated = prev.map(m => m.id === streamingMessageId ? { ...m, content: m.content + chunk } : m);
+ const storageKey = isJustChat ? 'messages_just_chat' : `messages_${activeAgentId}`;
+ localStorage.setItem(storageKey, JSON.stringify(updated));
+ return updated;
+ });
+ }
+ setIsTyping(true);
+ });
+
+ newSocket.on('ai_response_end', (data: { content: string; format?: string; conversationId?: string }) => {
+ const finalContent = data.content || '';
+ const finalFormat = data.format || 'text';
+ if (streamingMessageId) {
+ setMessages(prev => {
+ const updated = prev.map(m => m.id === streamingMessageId ? { ...m, content: finalContent, format: finalFormat, timestamp: new Date() } : m);
+ const storageKey = isJustChat ? 'messages_just_chat' : `messages_${activeAgentId}`;
+ localStorage.setItem(storageKey, JSON.stringify(updated));
+ return updated;
+ });
+ streamingMessageId = null;
+ } else {
+ // fallback: push a new assistant message
+ const agentMessage: Message = {
+ id: Date.now().toString(),
+ role: 'assistant',
+ content: finalContent,
+ timestamp: new Date(),
+ format: finalFormat,
+ };
+ setMessages(prev => {
+ const updated = [...prev, agentMessage];
+ const storageKey = isJustChat ? 'messages_just_chat' : `messages_${activeAgentId}`;
+ localStorage.setItem(storageKey, JSON.stringify(updated));
+ return updated;
+ });
+ }
setIsTyping(false);
});
@@ -126,6 +177,11 @@ export const useChat = (props?: UseChatProps) => {
updateAgent(id, { icon: newIcon });
}, [updateAgent]);
+ // Set agent selected model
+ const setAgentModel = useCallback((agentId: string, model: AIModel | null) => {
+ updateAgent(agentId, { selectedModel: model });
+ }, [updateAgent]);
+
// Delete agent
const handleDeleteAgent = useCallback((id: string) => {
deleteAgent(id);
@@ -162,18 +218,37 @@ export const useChat = (props?: UseChatProps) => {
setIsTyping(true);
- console.log('🚀 Sending message with model:', selectedModel);
+ // Determine effective model: agent's selectedModel > global selectedModel
+ let modelToUse: AIModel | null = selectedModel || null;
+ if (activeAgentId) {
+ const agent = agents.find(a => a.id === activeAgentId);
+ if (agent && agent.selectedModel) {
+ modelToUse = agent.selectedModel;
+ }
+ }
+
+ console.log('🚀 Sending message with model:', modelToUse);
console.log('📝 Message content:', content);
console.log('🤖 Agent ID:', activeAgentId);
console.log('💬 Is Just Chat:', isJustChat);
+ // Determine system prompt (agent purpose) if an agent is active and not Just Chat
+ let systemPrompt: string | null = null;
+ if (activeAgentId && !isJustChat) {
+ const agent = agents.find(a => a.id === activeAgentId);
+ if (agent && agent.description) {
+ systemPrompt = agent.description;
+ }
+ }
+
socket.emit('user_message', {
message: content,
agentId: activeAgentId,
isJustChat: isJustChat,
- selectedModel: selectedModel,
+ selectedModel: modelToUse,
+ systemPrompt,
});
- }, [socket, activeAgentId, isJustChat, selectedModel]);
+ }, [socket, activeAgentId, isJustChat, selectedModel, agents]);
return {
messages,
@@ -188,6 +263,6 @@ export const useChat = (props?: UseChatProps) => {
renameAgent,
changeAgentIcon,
deleteAgent: handleDeleteAgent,
+ setAgentModel,
};
};
-
diff --git a/client/src/types/index.ts b/client/src/types/index.ts
index b8b155e..38e9678 100644
--- a/client/src/types/index.ts
+++ b/client/src/types/index.ts
@@ -1,8 +1,9 @@
export interface Message {
id: string;
- role: 'user' | 'assistant';
+ role: 'user' | 'assistant' | 'agent';
content: string;
timestamp: Date;
+ format?: 'markdown' | 'rich' | 'text';
}
export interface Conversation {
@@ -20,4 +21,3 @@ export interface ChatState {
activeConversationId: string;
isTyping: boolean;
}
-
diff --git a/package.json b/package.json
index 630b432..f317544 100644
--- a/package.json
+++ b/package.json
@@ -16,7 +16,7 @@
"dependencies": {
"@lobehub/fluent-emoji": "^4.1.0",
"@lobehub/ui": "^4.38.0",
- "@prisma/client": "^7.4.0",
+ "@prisma/client": "5.22.0",
"@types/react": "^19.2.14",
"@types/react-dom": "^19.2.3",
"antd": "^6.3.0",
@@ -42,7 +42,7 @@
"concurrently": "^9.2.1",
"eslint": "^8.56.0",
"prettier": "^3.2.4",
- "prisma": "^7.4.0",
+ "prisma": "5.22.0",
"tsx": "^4.7.0",
"typescript": "^5.5.3",
"vite": "^7.3.1"
diff --git a/src/db/prisma.ts b/src/db/prisma.ts
new file mode 100644
index 0000000..4e54f7a
--- /dev/null
+++ b/src/db/prisma.ts
@@ -0,0 +1,5 @@
+import { PrismaClient } from '@prisma/client';
+
+const prisma = new PrismaClient();
+
+export default prisma;
diff --git a/src/index.ts b/src/index.ts
index ad09353..1843bf8 100644
--- a/src/index.ts
+++ b/src/index.ts
@@ -1,3 +1,6 @@
+import dotenv from 'dotenv';
+dotenv.config();
+
import { Application } from './core/Application';
import logger from './utils/logger';
diff --git a/src/server/WebServer.ts b/src/server/WebServer.ts
index 46008ec..a2f1c9a 100644
--- a/src/server/WebServer.ts
+++ b/src/server/WebServer.ts
@@ -6,7 +6,9 @@ import cors from 'cors';
import logger from '../utils/logger';
import { config } from '../config';
import providerRouter from './routes/provider';
-import { AIServiceFactory, AIMessage } from '../services/AIService';
+import agentsRouter from './routes/agents';
+import { AIServiceFactory, AIMessage, detectFormat } from '../services/AIService';
+import prisma from '../db/prisma';
export class WebServer {
private app: Express;
@@ -35,6 +37,7 @@ export class WebServer {
private setupRoutes(): void {
// API Routes (deben ir primero)
this.app.use('/api', providerRouter);
+ this.app.use('/api/agents', agentsRouter);
logger.info('API routes mounted at /api');
// Health check
@@ -67,13 +70,45 @@ export class WebServer {
let providerConfigs: Record = {};
// Receive provider configurations from client
- socket.on('provider_configs', (configs) => {
+ socket.on('provider_configs', async (configs) => {
providerConfigs = configs;
logger.info(`Provider configurations received for ${socket.id}`);
+
+ try {
+ // Ensure default user exists
+ let user = await prisma.user.findUnique({ where: { email: 'local@localhost' } });
+ if (!user) {
+ user = await prisma.user.create({ data: { email: 'local@localhost', password: 'local', name: 'Local User' } });
+ logger.info('Created default local user in DB');
+ }
+
+ // Upsert provider configs for this user
+ for (const pid of Object.keys(configs)) {
+ const cfg = configs[pid];
+ await prisma.aIProvider.upsert({
+ where: { userId_providerId: { userId: user.id, providerId: pid } },
+ update: {
+ apiKey: cfg.apiKey || null,
+ enabled: cfg.enabled || false,
+ updatedAt: new Date(),
+ },
+ create: {
+ userId: user.id,
+ providerId: pid,
+ name: pid,
+ enabled: cfg.enabled || false,
+ apiKey: cfg.apiKey || null,
+ },
+ });
+ }
+ logger.info('Provider configs persisted to DB');
+ } catch (err) {
+ logger.error('Error persisting provider configs to DB:', (err as any).message);
+ }
});
socket.on('user_message', async (data) => {
- const { message, agentId, isJustChat, selectedModel } = data;
+ const { message, agentId, isJustChat, selectedModel, systemPrompt } = data;
logger.info(`📨 Message received from ${socket.id}`);
logger.info(`📝 Message: ${message}`);
@@ -126,46 +161,103 @@ export class WebServer {
logger.info(`✅ AIService created successfully`);
- // Get or create conversation history
- const conversationKey = agentId || 'just_chat';
- let messages = conversationHistory.get(conversationKey) || [];
+ // Get or create conversation DB record
+ let conversation = null;
+ try {
+ // find default user
+ let user = await prisma.user.findUnique({ where: { email: 'local@localhost' } });
+ if (!user) {
+ user = await prisma.user.create({ data: { email: 'local@localhost', password: 'local', name: 'Local User' } });
+ }
- // Add system message if it's an agent with description
- if (agentId && !isJustChat && messages.length === 0) {
- // TODO: Get agent description from configuration
- messages.push({
- role: 'system',
- content: 'You are a helpful and friendly assistant.',
- });
+ if (agentId) {
+ // find or create conversation linked to agent
+ conversation = await prisma.conversation.create({
+ data: {
+ userId: user.id,
+ title: `Conversation for ${agentId}`,
+ agentId: agentId,
+ modelId: selectedModel?.id || null,
+ providerId: selectedModel?.providerId || null,
+ }
+ });
+ } else {
+ // just chat
+ conversation = await prisma.conversation.create({
+ data: {
+ userId: user.id,
+ title: 'Just Chat',
+ modelId: selectedModel?.id || null,
+ providerId: selectedModel?.providerId || null,
+ }
+ });
+ }
+ } catch (err) {
+ logger.error('Error creating conversation in DB:', (err as any).message);
}
- // Add user message
- messages.push({
- role: 'user',
- content: message,
- });
+ // Build messages array for AI service
+ const messagesForAI: AIMessage[] = [];
+ if (systemPrompt) {
+ messagesForAI.push({ role: 'system', content: systemPrompt });
+ logger.info('System prompt added to conversation');
+ }
+ messagesForAI.push({ role: 'user', content: message });
- // Generate response
- const response = await aiService.generateResponse(messages);
+ // Persist user message to DB
+ if (conversation) {
+ try {
+ await prisma.message.create({
+ data: {
+ conversationId: conversation.id,
+ role: 'user',
+ content: message,
+ }
+ });
+ } catch (err) {
+ logger.error('Error saving user message to DB:', (err as any).message);
+ }
+ }
- // Add response to history
- messages.push({
- role: 'assistant',
- content: response.content,
- });
+ // Stream response in chunks back to client and persist messages
+ let accumulated = '';
+ try {
+ await aiService.generateStreamingResponse(messagesForAI, async (chunk: string) => {
+ accumulated += chunk;
+ // emit chunk to client
+ socket.emit('ai_response_chunk', { chunk, conversationId: conversation?.id || null });
+ });
- // Save updated history
- conversationHistory.set(conversationKey, messages);
+ // after streaming finished, finalize
+ const finalFormat = (accumulated ? (detectFormat(accumulated as any) as any) : 'text');
- // Send response to client
- socket.emit('ai_response', {
- content: response.content,
- timestamp: new Date(),
- conversationId: conversationKey,
- usage: response.usage,
- });
+ // persist assistant message
+ if (conversation) {
+ try {
+ await prisma.message.create({
+ data: {
+ conversationId: conversation.id,
+ role: 'assistant',
+ content: accumulated,
+ }
+ });
+ } catch (err) {
+ logger.error('Error saving assistant message to DB:', (err as any).message);
+ }
+ }
- logger.info(`Response sent to ${socket.id} (${response.usage?.totalTokens || 0} tokens)`);
+ // send end event
+ socket.emit('ai_response_end', {
+ content: accumulated,
+ format: finalFormat,
+ conversationId: conversation?.id || null,
+ });
+
+ logger.info(`Stream response completed for ${socket.id}`);
+ } catch (err) {
+ logger.error('Error during streaming response:', (err as any).message);
+ socket.emit('error', { message: 'Error generating response', timestamp: new Date() });
+ }
} catch (error: any) {
logger.error(`Error processing message: ${error.message}`);
@@ -237,4 +329,3 @@ export class WebServer {
});
}
}
-
diff --git a/src/server/routes/agents.ts b/src/server/routes/agents.ts
new file mode 100644
index 0000000..6b33668
--- /dev/null
+++ b/src/server/routes/agents.ts
@@ -0,0 +1,72 @@
+import express from 'express';
+import prisma from '../../db/prisma';
+
+const router = express.Router();
+
+// Get all agents
+router.get('/', async (req, res) => {
+ try {
+ // use default local user
+ let user = await prisma.user.findUnique({ where: { email: 'local@localhost' } });
+ if (!user) {
+ user = await prisma.user.create({ data: { email: 'local@localhost', password: 'local', name: 'Local User' } });
+ }
+
+ const agents = await prisma.agent.findMany({ where: { userId: user.id }, orderBy: { createdAt: 'desc' } });
+ res.json({ success: true, data: agents });
+ } catch (err) {
+ res.status(500).json({ success: false, error: (err as any).message });
+ }
+});
+
+// Create agent
+router.post('/', async (req, res) => {
+ const { name, emoji, description, selectedModelId } = req.body;
+ try {
+ let user = await prisma.user.findUnique({ where: { email: 'local@localhost' } });
+ if (!user) {
+ user = await prisma.user.create({ data: { email: 'local@localhost', password: 'local', name: 'Local User' } });
+ }
+
+ const agent = await prisma.agent.create({
+ data: {
+ userId: user.id,
+ name: name || 'New Agent',
+ emoji: emoji || '🤖',
+ role: 'assistant',
+ description: description || '',
+ status: 'active',
+ }
+ });
+
+ res.json({ success: true, data: agent });
+ } catch (err) {
+ res.status(500).json({ success: false, error: (err as any).message });
+ }
+});
+
+// Update agent
+router.put('/:id', async (req, res) => {
+ const id = req.params.id;
+ const updates = req.body;
+ try {
+ const agent = await prisma.agent.update({ where: { id }, data: updates });
+ res.json({ success: true, data: agent });
+ } catch (err) {
+ res.status(500).json({ success: false, error: (err as any).message });
+ }
+});
+
+// Delete agent
+router.delete('/:id', async (req, res) => {
+ const id = req.params.id;
+ try {
+ await prisma.agent.delete({ where: { id } });
+ res.json({ success: true });
+ } catch (err) {
+ res.status(500).json({ success: false, error: (err as any).message });
+ }
+});
+
+export default router;
+
diff --git a/src/services/AIService.ts b/src/services/AIService.ts
index 7f53696..8048672 100644
--- a/src/services/AIService.ts
+++ b/src/services/AIService.ts
@@ -14,6 +14,7 @@ export interface AIProviderConfig {
export interface AIResponse {
content: string;
+ format?: 'markdown' | 'rich' | 'text';
finishReason?: string;
usage?: {
promptTokens: number;
@@ -22,6 +23,16 @@ export interface AIResponse {
};
}
+export function detectFormat(text: string): 'markdown' | 'rich' | 'text' {
+ if (!text) return 'text';
+ const hasMarkdown = /(^#{1,6}\s)|(```|\*\*|\*\w|\n- |\n\d+\.)/m.test(text);
+ const hasHtml = /<[^>]+>/.test(text);
+ const hasJsonLike = /\{\s*"[\w]+"\s*:\s*/.test(text);
+ if (hasHtml || hasJsonLike) return 'rich';
+ if (hasMarkdown) return 'markdown';
+ return 'text';
+}
+
/**
* Servicio unificado para interactuar con diferentes providers de IA
*/
@@ -93,8 +104,10 @@ export class AIService {
);
const choice = response.data.choices[0];
+ const content = choice.message.content;
return {
- content: choice.message.content,
+ content,
+ format: detectFormat(content),
finishReason: choice.finish_reason,
usage: {
promptTokens: response.data.usage.prompt_tokens,
@@ -131,8 +144,10 @@ export class AIService {
}
);
+ const content = response.data.content[0].text;
return {
- content: response.data.content[0].text,
+ content,
+ format: detectFormat(content),
finishReason: response.data.stop_reason,
usage: {
promptTokens: response.data.usage.input_tokens,
@@ -177,8 +192,10 @@ export class AIService {
);
const candidate = response.data.candidates[0];
+ const content = candidate.content.parts[0].text;
return {
- content: candidate.content.parts[0].text,
+ content,
+ format: detectFormat(content),
finishReason: candidate.finishReason,
usage: response.data.usageMetadata ? {
promptTokens: response.data.usageMetadata.promptTokenCount,
@@ -208,8 +225,10 @@ export class AIService {
);
const choice = response.data.choices[0];
+ const content = choice.message.content;
return {
- content: choice.message.content,
+ content,
+ format: detectFormat(content),
finishReason: choice.finish_reason,
usage: {
promptTokens: response.data.usage.prompt_tokens,
@@ -247,8 +266,10 @@ export class AIService {
}
);
+ const content = response.data.text;
return {
- content: response.data.text,
+ content,
+ format: detectFormat(content),
finishReason: response.data.finish_reason,
usage: response.data.meta?.tokens ? {
promptTokens: response.data.meta.tokens.input_tokens,
@@ -261,12 +282,26 @@ export class AIService {
/**
* Generar respuesta streaming (para implementación futura)
*/
- async generateStreamingResponse(
- messages: AIMessage[],
- onChunk: (chunk: string) => void
- ): Promise {
- // TODO: Implementar streaming para cada provider
- throw new Error('Streaming no implementado aún');
+ async generateStreamingResponse(
+ messages: AIMessage[],
+ onChunk: (chunk: string) => void
+ ): Promise {
+ // For now, call the full response generator and then stream it in chunks
+ const full = await this.generateResponse(messages);
+ const content = full.content || '';
+
+ // Simple chunking: split by sentences, fallback to words
+ const sentenceChunks = content.split(/(?<=\.|\?|!|\n\n)\s+/).filter(Boolean);
+ const chunks = sentenceChunks.length > 1 ? sentenceChunks : content.split(/\s+/);
+
+ for (let i = 0; i < chunks.length; i++) {
+ const chunk = (i === 0) ? chunks[i] : ` ${chunks[i]}`;
+ onChunk(chunk);
+ // add small delay to simulate streaming
+ await new Promise((res) => setTimeout(res, 120));
+ }
+
+ return;
}
}
@@ -278,4 +313,3 @@ export class AIServiceFactory {
return new AIService(provider, config);
}
}
-