implement agent management API with CRUD operations and integrate with Prisma for database interactions

This commit is contained in:
cesarmendivil 2026-02-17 10:54:40 -07:00
parent 37701bc5b8
commit a83ea7b078
11 changed files with 427 additions and 110 deletions

View File

@ -42,7 +42,18 @@ function App() {
// Auto-select first available model
const availableModels = getAvailableModels(enabledProviders);
if (availableModels.length > 0 && !selectedModel) {
// If there's a saved model for Just Chat, load it first
const savedJustModel = localStorage.getItem('selected_model_just_chat');
if (savedJustModel) {
try {
const parsed = JSON.parse(savedJustModel);
// ensure the model exists in available models
const match = availableModels.find(m => m.id === parsed.id && m.providerId === parsed.providerId);
if (match) setSelectedModel(match);
} catch (e) {
// ignore parse errors
}
} else if (availableModels.length > 0 && !selectedModel) {
setSelectedModel(availableModels[0]);
}
} else {
@ -94,7 +105,20 @@ function App() {
onSendMessage={chatState.sendMessage}
selectedModel={selectedModel}
availableModels={availableModels}
onModelSelect={setSelectedModel}
onModelSelect={(model) => {
// Update global selected model
setSelectedModel(model);
// If Just Chat is active, persist the selected model for Just Chat
if (chatState.isJustChat) {
localStorage.setItem('selected_model_just_chat', JSON.stringify(model));
}
// If an agent is active, save as agent's model
if (chatState.activeAgentId) {
chatState.setAgentModel(chatState.activeAgentId, model);
}
}}
activeAgentName={
chatState.activeAgentId
? chatState.agents.find(a => a.id === chatState.activeAgentId)?.name

View File

@ -397,62 +397,48 @@ export const AIProviderSettings: React.FC = () => {
throw new Error(`Provider ${providerId} no soportado`);
}
// Hacer request directo al provider
const response = await fetch(testUrl, {
method: 'GET',
headers,
mode: 'cors',
// Instead of calling provider directly from browser (CORS issues), call our backend API
const backendUrl = '/api/test-provider';
const response = await fetch(backendUrl, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ providerId, apiKey: cleanApiKey, apiUrl: apiUrl }),
});
if (response.ok || response.status === 200) {
let modelsCount = 0;
try {
const data = await response.json();
if (response.ok) {
const data = await response.json();
if (data.success) {
const modelsCount = data.modelsCount || 0;
setTestingStates(prev => ({ ...prev, [providerId]: 'success' }));
setTestMessages(prev => ({
...prev,
[providerId]: modelsCount > 0
? `✓ Connection successful. ${modelsCount} models available.`
: `✓ Connection successful. API Key valid.`
}));
// Contar modelos según la estructura de respuesta
if (Array.isArray(data)) {
modelsCount = data.length;
} else if (data.data && Array.isArray(data.data)) {
modelsCount = data.data.length;
} else if (data.models && Array.isArray(data.models)) {
modelsCount = data.models.length;
}
} catch (e) {
// Si no podemos parsear, pero la respuesta fue OK, asumimos éxito
// Enable provider automatically
setConfigs(prev => ({
...prev,
[providerId]: { ...prev[providerId], enabled: true },
}));
} else {
setTestingStates(prev => ({ ...prev, [providerId]: 'error' }));
setTestMessages(prev => ({
...prev,
[providerId]: `${data.error || 'Unknown error'}`
}));
}
setTestingStates(prev => ({ ...prev, [providerId]: 'success' }));
setTestMessages(prev => ({
...prev,
[providerId]: modelsCount > 0
? `✓ Conexión exitosa. ${modelsCount} modelos disponibles.`
: `✓ Conexión exitosa. API Key válida.`
}));
// Habilitar provider automáticamente
setConfigs(prev => ({
...prev,
[providerId]: { ...prev[providerId], enabled: true },
}));
} else if (response.status === 401 || response.status === 403) {
setTestingStates(prev => ({ ...prev, [providerId]: 'error' }));
setTestMessages(prev => ({
...prev,
[providerId]: `✗ Error: API Key inválida o sin permisos`
}));
} else {
let errorDetail = '';
let msg = `HTTP ${response.status}`;
try {
const errorData = await response.json();
errorDetail = errorData.error?.message || errorData.message || '';
} catch (e) {
// Ignorar si no se puede parsear
}
const err = await response.json();
msg = err.error || err.message || msg;
} catch (e) {}
setTestingStates(prev => ({ ...prev, [providerId]: 'error' }));
setTestMessages(prev => ({
...prev,
[providerId]: `✗ Error ${response.status}: ${errorDetail || response.statusText}`
[providerId]: `✗ Error: ${msg}`
}));
}
} catch (error: any) {

View File

@ -6,6 +6,7 @@ import { WelcomeScreen } from './WelcomeScreen';
import type { Message } from '../types';
import { lobeChatColors, lobeChatSpacing } from '../styles/lobeChatTheme';
import { AIModel } from '../config/aiProviders';
import React from 'react';
const useStyles = createStyles(({ css }) => ({
container: css`
@ -255,6 +256,33 @@ interface LobeChatAreaProps {
isJustChat?: boolean;
}
function renderMessageContent(content: string, format?: 'markdown' | 'rich' | 'text') {
if (!format || format === 'text') {
return <div>{content}</div>;
}
if (format === 'rich') {
return <div dangerouslySetInnerHTML={{ __html: content }} />;
}
// very minimal markdown -> html conversion (headings, bold, italics, code blocks, lists)
const esc = (s: string) => s.replace(/&/g, '&amp;').replace(/</g, '&lt;').replace(/>/g, '&gt;');
let html = esc(content)
.replace(/```([\s\S]*?)```/g, (_m, code) => `<pre><code>${esc(code)}</code></pre>`)
.replace(/^### (.*$)/gim, '<h3>$1</h3>')
.replace(/^## (.*$)/gim, '<h2>$1</h2>')
.replace(/^# (.*$)/gim, '<h1>$1</h1>')
.replace(/\*\*(.*?)\*\*/gim, '<strong>$1</strong>')
.replace(/\*(.*?)\*/gim, '<em>$1</em>')
.replace(/`([^`]+)`/gim, '<code>$1</code>')
.replace(/\n\s*\n/g, '<br/><br/>')
.replace(/^- (.*$)/gim, '<li>$1</li>')
.replace(/\n<li>/g, '<ul><li>')
.replace(/<li>([\s\S]*?)<br\/><br\/>/g, '<li>$1</li></ul><br/><br/>');
return <div dangerouslySetInnerHTML={{ __html: html }} />;
}
export const LobeChatArea: React.FC<LobeChatAreaProps> = ({
messages,
isTyping,
@ -350,7 +378,7 @@ export const LobeChatArea: React.FC<LobeChatAreaProps> = ({
</span>
</div>
<div className={styles.messageText}>
{message.content}
{renderMessageContent(message.content, message.format)}
</div>
{message.role === 'agent' && (
<div className={styles.messageActions}>
@ -395,4 +423,3 @@ export const LobeChatArea: React.FC<LobeChatAreaProps> = ({
</div>
);
};

View File

@ -39,12 +39,13 @@ export const useChat = (props?: UseChatProps) => {
}
});
newSocket.on('ai_response', (data: { content: string; timestamp: string }) => {
newSocket.on('ai_response', (data: { content: string; timestamp: string; format?: string; conversationId?: string; usage?: any }) => {
const agentMessage: Message = {
id: Date.now().toString(),
role: 'assistant',
content: data.content,
timestamp: new Date(data.timestamp),
format: (data.format as any) || 'text',
};
setMessages((prev) => {
@ -59,8 +60,58 @@ export const useChat = (props?: UseChatProps) => {
setIsTyping(false);
});
newSocket.on('error', (data: { message: string }) => {
console.error('Error from server:', data.message);
// Streaming chunks
let streamingMessageId: string | null = null;
newSocket.on('ai_response_chunk', (data: { chunk: string; conversationId?: string }) => {
// Append chunk to a streaming assistant message
const chunk = data.chunk || '';
if (!streamingMessageId) {
streamingMessageId = `stream_${Date.now()}`;
const partial: Message = { id: streamingMessageId, role: 'assistant', content: chunk, timestamp: new Date(), format: 'text' };
setMessages(prev => {
const updated = [...prev, partial];
const storageKey = isJustChat ? 'messages_just_chat' : `messages_${activeAgentId}`;
localStorage.setItem(storageKey, JSON.stringify(updated));
return updated;
});
} else {
setMessages(prev => {
const updated = prev.map(m => m.id === streamingMessageId ? { ...m, content: m.content + chunk } : m);
const storageKey = isJustChat ? 'messages_just_chat' : `messages_${activeAgentId}`;
localStorage.setItem(storageKey, JSON.stringify(updated));
return updated;
});
}
setIsTyping(true);
});
newSocket.on('ai_response_end', (data: { content: string; format?: string; conversationId?: string }) => {
const finalContent = data.content || '';
const finalFormat = data.format || 'text';
if (streamingMessageId) {
setMessages(prev => {
const updated = prev.map(m => m.id === streamingMessageId ? { ...m, content: finalContent, format: finalFormat, timestamp: new Date() } : m);
const storageKey = isJustChat ? 'messages_just_chat' : `messages_${activeAgentId}`;
localStorage.setItem(storageKey, JSON.stringify(updated));
return updated;
});
streamingMessageId = null;
} else {
// fallback: push a new assistant message
const agentMessage: Message = {
id: Date.now().toString(),
role: 'assistant',
content: finalContent,
timestamp: new Date(),
format: finalFormat,
};
setMessages(prev => {
const updated = [...prev, agentMessage];
const storageKey = isJustChat ? 'messages_just_chat' : `messages_${activeAgentId}`;
localStorage.setItem(storageKey, JSON.stringify(updated));
return updated;
});
}
setIsTyping(false);
});
@ -126,6 +177,11 @@ export const useChat = (props?: UseChatProps) => {
updateAgent(id, { icon: newIcon });
}, [updateAgent]);
// Set agent selected model
const setAgentModel = useCallback((agentId: string, model: AIModel | null) => {
updateAgent(agentId, { selectedModel: model });
}, [updateAgent]);
// Delete agent
const handleDeleteAgent = useCallback((id: string) => {
deleteAgent(id);
@ -162,18 +218,37 @@ export const useChat = (props?: UseChatProps) => {
setIsTyping(true);
console.log('🚀 Sending message with model:', selectedModel);
// Determine effective model: agent's selectedModel > global selectedModel
let modelToUse: AIModel | null = selectedModel || null;
if (activeAgentId) {
const agent = agents.find(a => a.id === activeAgentId);
if (agent && agent.selectedModel) {
modelToUse = agent.selectedModel;
}
}
console.log('🚀 Sending message with model:', modelToUse);
console.log('📝 Message content:', content);
console.log('🤖 Agent ID:', activeAgentId);
console.log('💬 Is Just Chat:', isJustChat);
// Determine system prompt (agent purpose) if an agent is active and not Just Chat
let systemPrompt: string | null = null;
if (activeAgentId && !isJustChat) {
const agent = agents.find(a => a.id === activeAgentId);
if (agent && agent.description) {
systemPrompt = agent.description;
}
}
socket.emit('user_message', {
message: content,
agentId: activeAgentId,
isJustChat: isJustChat,
selectedModel: selectedModel,
selectedModel: modelToUse,
systemPrompt,
});
}, [socket, activeAgentId, isJustChat, selectedModel]);
}, [socket, activeAgentId, isJustChat, selectedModel, agents]);
return {
messages,
@ -188,6 +263,6 @@ export const useChat = (props?: UseChatProps) => {
renameAgent,
changeAgentIcon,
deleteAgent: handleDeleteAgent,
setAgentModel,
};
};

View File

@ -1,8 +1,9 @@
export interface Message {
id: string;
role: 'user' | 'assistant';
role: 'user' | 'assistant' | 'agent';
content: string;
timestamp: Date;
format?: 'markdown' | 'rich' | 'text';
}
export interface Conversation {
@ -20,4 +21,3 @@ export interface ChatState {
activeConversationId: string;
isTyping: boolean;
}

View File

@ -16,7 +16,7 @@
"dependencies": {
"@lobehub/fluent-emoji": "^4.1.0",
"@lobehub/ui": "^4.38.0",
"@prisma/client": "^7.4.0",
"@prisma/client": "5.22.0",
"@types/react": "^19.2.14",
"@types/react-dom": "^19.2.3",
"antd": "^6.3.0",
@ -42,7 +42,7 @@
"concurrently": "^9.2.1",
"eslint": "^8.56.0",
"prettier": "^3.2.4",
"prisma": "^7.4.0",
"prisma": "5.22.0",
"tsx": "^4.7.0",
"typescript": "^5.5.3",
"vite": "^7.3.1"

5
src/db/prisma.ts Normal file
View File

@ -0,0 +1,5 @@
import { PrismaClient } from '@prisma/client';
const prisma = new PrismaClient();
export default prisma;

View File

@ -1,3 +1,6 @@
import dotenv from 'dotenv';
dotenv.config();
import { Application } from './core/Application';
import logger from './utils/logger';

View File

@ -6,7 +6,9 @@ import cors from 'cors';
import logger from '../utils/logger';
import { config } from '../config';
import providerRouter from './routes/provider';
import { AIServiceFactory, AIMessage } from '../services/AIService';
import agentsRouter from './routes/agents';
import { AIServiceFactory, AIMessage, detectFormat } from '../services/AIService';
import prisma from '../db/prisma';
export class WebServer {
private app: Express;
@ -35,6 +37,7 @@ export class WebServer {
private setupRoutes(): void {
// API Routes (deben ir primero)
this.app.use('/api', providerRouter);
this.app.use('/api/agents', agentsRouter);
logger.info('API routes mounted at /api');
// Health check
@ -67,13 +70,45 @@ export class WebServer {
let providerConfigs: Record<string, any> = {};
// Receive provider configurations from client
socket.on('provider_configs', (configs) => {
socket.on('provider_configs', async (configs) => {
providerConfigs = configs;
logger.info(`Provider configurations received for ${socket.id}`);
try {
// Ensure default user exists
let user = await prisma.user.findUnique({ where: { email: 'local@localhost' } });
if (!user) {
user = await prisma.user.create({ data: { email: 'local@localhost', password: 'local', name: 'Local User' } });
logger.info('Created default local user in DB');
}
// Upsert provider configs for this user
for (const pid of Object.keys(configs)) {
const cfg = configs[pid];
await prisma.aIProvider.upsert({
where: { userId_providerId: { userId: user.id, providerId: pid } },
update: {
apiKey: cfg.apiKey || null,
enabled: cfg.enabled || false,
updatedAt: new Date(),
},
create: {
userId: user.id,
providerId: pid,
name: pid,
enabled: cfg.enabled || false,
apiKey: cfg.apiKey || null,
},
});
}
logger.info('Provider configs persisted to DB');
} catch (err) {
logger.error('Error persisting provider configs to DB:', (err as any).message);
}
});
socket.on('user_message', async (data) => {
const { message, agentId, isJustChat, selectedModel } = data;
const { message, agentId, isJustChat, selectedModel, systemPrompt } = data;
logger.info(`📨 Message received from ${socket.id}`);
logger.info(`📝 Message: ${message}`);
@ -126,46 +161,103 @@ export class WebServer {
logger.info(`✅ AIService created successfully`);
// Get or create conversation history
const conversationKey = agentId || 'just_chat';
let messages = conversationHistory.get(conversationKey) || [];
// Get or create conversation DB record
let conversation = null;
try {
// find default user
let user = await prisma.user.findUnique({ where: { email: 'local@localhost' } });
if (!user) {
user = await prisma.user.create({ data: { email: 'local@localhost', password: 'local', name: 'Local User' } });
}
// Add system message if it's an agent with description
if (agentId && !isJustChat && messages.length === 0) {
// TODO: Get agent description from configuration
messages.push({
role: 'system',
content: 'You are a helpful and friendly assistant.',
});
if (agentId) {
// find or create conversation linked to agent
conversation = await prisma.conversation.create({
data: {
userId: user.id,
title: `Conversation for ${agentId}`,
agentId: agentId,
modelId: selectedModel?.id || null,
providerId: selectedModel?.providerId || null,
}
});
} else {
// just chat
conversation = await prisma.conversation.create({
data: {
userId: user.id,
title: 'Just Chat',
modelId: selectedModel?.id || null,
providerId: selectedModel?.providerId || null,
}
});
}
} catch (err) {
logger.error('Error creating conversation in DB:', (err as any).message);
}
// Add user message
messages.push({
role: 'user',
content: message,
});
// Build messages array for AI service
const messagesForAI: AIMessage[] = [];
if (systemPrompt) {
messagesForAI.push({ role: 'system', content: systemPrompt });
logger.info('System prompt added to conversation');
}
messagesForAI.push({ role: 'user', content: message });
// Generate response
const response = await aiService.generateResponse(messages);
// Persist user message to DB
if (conversation) {
try {
await prisma.message.create({
data: {
conversationId: conversation.id,
role: 'user',
content: message,
}
});
} catch (err) {
logger.error('Error saving user message to DB:', (err as any).message);
}
}
// Add response to history
messages.push({
role: 'assistant',
content: response.content,
});
// Stream response in chunks back to client and persist messages
let accumulated = '';
try {
await aiService.generateStreamingResponse(messagesForAI, async (chunk: string) => {
accumulated += chunk;
// emit chunk to client
socket.emit('ai_response_chunk', { chunk, conversationId: conversation?.id || null });
});
// Save updated history
conversationHistory.set(conversationKey, messages);
// after streaming finished, finalize
const finalFormat = (accumulated ? (detectFormat(accumulated as any) as any) : 'text');
// Send response to client
socket.emit('ai_response', {
content: response.content,
timestamp: new Date(),
conversationId: conversationKey,
usage: response.usage,
});
// persist assistant message
if (conversation) {
try {
await prisma.message.create({
data: {
conversationId: conversation.id,
role: 'assistant',
content: accumulated,
}
});
} catch (err) {
logger.error('Error saving assistant message to DB:', (err as any).message);
}
}
logger.info(`Response sent to ${socket.id} (${response.usage?.totalTokens || 0} tokens)`);
// send end event
socket.emit('ai_response_end', {
content: accumulated,
format: finalFormat,
conversationId: conversation?.id || null,
});
logger.info(`Stream response completed for ${socket.id}`);
} catch (err) {
logger.error('Error during streaming response:', (err as any).message);
socket.emit('error', { message: 'Error generating response', timestamp: new Date() });
}
} catch (error: any) {
logger.error(`Error processing message: ${error.message}`);
@ -237,4 +329,3 @@ export class WebServer {
});
}
}

View File

@ -0,0 +1,72 @@
import express from 'express';
import prisma from '../../db/prisma';
const router = express.Router();
// Get all agents
router.get('/', async (req, res) => {
try {
// use default local user
let user = await prisma.user.findUnique({ where: { email: 'local@localhost' } });
if (!user) {
user = await prisma.user.create({ data: { email: 'local@localhost', password: 'local', name: 'Local User' } });
}
const agents = await prisma.agent.findMany({ where: { userId: user.id }, orderBy: { createdAt: 'desc' } });
res.json({ success: true, data: agents });
} catch (err) {
res.status(500).json({ success: false, error: (err as any).message });
}
});
// Create agent
router.post('/', async (req, res) => {
const { name, emoji, description, selectedModelId } = req.body;
try {
let user = await prisma.user.findUnique({ where: { email: 'local@localhost' } });
if (!user) {
user = await prisma.user.create({ data: { email: 'local@localhost', password: 'local', name: 'Local User' } });
}
const agent = await prisma.agent.create({
data: {
userId: user.id,
name: name || 'New Agent',
emoji: emoji || '🤖',
role: 'assistant',
description: description || '',
status: 'active',
}
});
res.json({ success: true, data: agent });
} catch (err) {
res.status(500).json({ success: false, error: (err as any).message });
}
});
// Update agent
router.put('/:id', async (req, res) => {
const id = req.params.id;
const updates = req.body;
try {
const agent = await prisma.agent.update({ where: { id }, data: updates });
res.json({ success: true, data: agent });
} catch (err) {
res.status(500).json({ success: false, error: (err as any).message });
}
});
// Delete agent
router.delete('/:id', async (req, res) => {
const id = req.params.id;
try {
await prisma.agent.delete({ where: { id } });
res.json({ success: true });
} catch (err) {
res.status(500).json({ success: false, error: (err as any).message });
}
});
export default router;

View File

@ -14,6 +14,7 @@ export interface AIProviderConfig {
export interface AIResponse {
content: string;
format?: 'markdown' | 'rich' | 'text';
finishReason?: string;
usage?: {
promptTokens: number;
@ -22,6 +23,16 @@ export interface AIResponse {
};
}
export function detectFormat(text: string): 'markdown' | 'rich' | 'text' {
if (!text) return 'text';
const hasMarkdown = /(^#{1,6}\s)|(```|\*\*|\*\w|\n- |\n\d+\.)/m.test(text);
const hasHtml = /<[^>]+>/.test(text);
const hasJsonLike = /\{\s*"[\w]+"\s*:\s*/.test(text);
if (hasHtml || hasJsonLike) return 'rich';
if (hasMarkdown) return 'markdown';
return 'text';
}
/**
* Servicio unificado para interactuar con diferentes providers de IA
*/
@ -93,8 +104,10 @@ export class AIService {
);
const choice = response.data.choices[0];
const content = choice.message.content;
return {
content: choice.message.content,
content,
format: detectFormat(content),
finishReason: choice.finish_reason,
usage: {
promptTokens: response.data.usage.prompt_tokens,
@ -131,8 +144,10 @@ export class AIService {
}
);
const content = response.data.content[0].text;
return {
content: response.data.content[0].text,
content,
format: detectFormat(content),
finishReason: response.data.stop_reason,
usage: {
promptTokens: response.data.usage.input_tokens,
@ -177,8 +192,10 @@ export class AIService {
);
const candidate = response.data.candidates[0];
const content = candidate.content.parts[0].text;
return {
content: candidate.content.parts[0].text,
content,
format: detectFormat(content),
finishReason: candidate.finishReason,
usage: response.data.usageMetadata ? {
promptTokens: response.data.usageMetadata.promptTokenCount,
@ -208,8 +225,10 @@ export class AIService {
);
const choice = response.data.choices[0];
const content = choice.message.content;
return {
content: choice.message.content,
content,
format: detectFormat(content),
finishReason: choice.finish_reason,
usage: {
promptTokens: response.data.usage.prompt_tokens,
@ -247,8 +266,10 @@ export class AIService {
}
);
const content = response.data.text;
return {
content: response.data.text,
content,
format: detectFormat(content),
finishReason: response.data.finish_reason,
usage: response.data.meta?.tokens ? {
promptTokens: response.data.meta.tokens.input_tokens,
@ -261,12 +282,26 @@ export class AIService {
/**
* Generar respuesta streaming (para implementación futura)
*/
async generateStreamingResponse(
messages: AIMessage[],
onChunk: (chunk: string) => void
): Promise<void> {
// TODO: Implementar streaming para cada provider
throw new Error('Streaming no implementado aún');
async generateStreamingResponse(
messages: AIMessage[],
onChunk: (chunk: string) => void
): Promise<void> {
// For now, call the full response generator and then stream it in chunks
const full = await this.generateResponse(messages);
const content = full.content || '';
// Simple chunking: split by sentences, fallback to words
const sentenceChunks = content.split(/(?<=\.|\?|!|\n\n)\s+/).filter(Boolean);
const chunks = sentenceChunks.length > 1 ? sentenceChunks : content.split(/\s+/);
for (let i = 0; i < chunks.length; i++) {
const chunk = (i === 0) ? chunks[i] : ` ${chunks[i]}`;
onChunk(chunk);
// add small delay to simulate streaming
await new Promise((res) => setTimeout(res, 120));
}
return;
}
}
@ -278,4 +313,3 @@ export class AIServiceFactory {
return new AIService(provider, config);
}
}