diff --git a/backend/app/main.py b/backend/app/main.py index 363559c..230f509 100644 --- a/backend/app/main.py +++ b/backend/app/main.py @@ -131,7 +131,8 @@ def send_completed_inspection_to_n8n(inspection, db): "id": answer.question.id, "texto": answer.question.text, "seccion": answer.question.section, - "orden": answer.question.order + "orden": answer.question.order, + "tipo": answer.question.type }, "respuesta": answer.answer_value, "estado": answer.status, @@ -139,7 +140,8 @@ def send_completed_inspection_to_n8n(inspection, db): "puntos_obtenidos": answer.points_earned, "es_critico": answer.is_flagged, "imagenes": imagenes, - "ai_analysis": answer.ai_analysis + "ai_analysis": answer.ai_analysis, + "chat_history": answer.chat_history # Incluir historial de chat si existe }) # Preparar datos completos de la inspección @@ -207,7 +209,7 @@ def send_completed_inspection_to_n8n(inspection, db): # No lanzamos excepción para no interrumpir el flujo normal -BACKEND_VERSION = "1.0.90" +BACKEND_VERSION = "1.0.91" app = FastAPI(title="Checklist Inteligente API", version=BACKEND_VERSION) # S3/MinIO configuration @@ -2959,33 +2961,80 @@ Responde en formato JSON: @app.post("/api/ai/chat-assistant") async def chat_with_ai_assistant( - request: dict, + question_id: int = Form(...), + inspection_id: int = Form(...), + user_message: str = Form(""), + chat_history: str = Form("[]"), + context_photos: str = Form("[]"), + vehicle_info: str = Form("{}"), + assistant_prompt: str = Form(""), + assistant_instructions: str = Form(""), + response_length: str = Form("medium"), + files: List[UploadFile] = File(default=[]), db: Session = Depends(get_db), current_user: models.User = Depends(get_current_user) ): """ Chat conversacional con IA usando contexto de fotos anteriores El asistente tiene acceso a fotos de preguntas previas para dar mejor contexto + Ahora soporta archivos adjuntos (imágenes y PDFs) """ print("\n" + "="*80) print("🤖 AI CHAT ASSISTANT") print("="*80) - question_id = request.get('question_id') - inspection_id = request.get('inspection_id') - user_message = request.get('user_message') - chat_history = request.get('chat_history', []) - context_photos = request.get('context_photos', []) - assistant_prompt = request.get('assistant_prompt', '') - assistant_instructions = request.get('assistant_instructions', '') - response_length = request.get('response_length', 'medium') - vehicle_info = request.get('vehicle_info', {}) + # Parsear JSON strings + import json + chat_history_list = json.loads(chat_history) + context_photos_list = json.loads(context_photos) + vehicle_info_dict = json.loads(vehicle_info) print(f"📋 Question ID: {question_id}") print(f"🚗 Inspection ID: {inspection_id}") print(f"💬 User message: {user_message}") - print(f"📸 Context photos: {len(context_photos)} fotos") - print(f"💭 Chat history: {len(chat_history)} mensajes previos") + print(f"📎 Attached files: {len(files)}") + print(f"📸 Context photos: {len(context_photos_list)} fotos") + print(f"💭 Chat history: {len(chat_history_list)} mensajes previos") + + # Procesar archivos adjuntos + attached_files_data = [] + if files: + import base64 + from pypdf import PdfReader + from io import BytesIO + + for file in files: + file_content = await file.read() + file_type = file.content_type + + file_info = { + 'filename': file.filename, + 'type': file_type, + 'size': len(file_content) + } + + # Si es PDF, extraer texto + if file_type == 'application/pdf' or file.filename.lower().endswith('.pdf'): + try: + pdf_file = BytesIO(file_content) + pdf_reader = PdfReader(pdf_file) + pdf_text = "" + for page in pdf_reader.pages: + pdf_text += page.extract_text() + file_info['content_type'] = 'pdf' + file_info['text'] = pdf_text[:2000] # Limitar texto + print(f"📄 PDF procesado: {file.filename} - {len(pdf_text)} caracteres") + except Exception as e: + print(f"❌ Error procesando PDF {file.filename}: {str(e)}") + file_info['error'] = str(e) + + # Si es imagen, convertir a base64 + elif file_type.startswith('image/'): + file_info['content_type'] = 'image' + file_info['base64'] = base64.b64encode(file_content).decode('utf-8') + print(f"🖼️ Imagen procesada: {file.filename}") + + attached_files_data.append(file_info) # Obtener configuración de IA ai_config = db.query(models.AIConfiguration).filter( @@ -3003,17 +3052,17 @@ async def chat_with_ai_assistant( # Construir el contexto del vehículo vehicle_context = f""" INFORMACIÓN DEL VEHÍCULO: -- Marca: {vehicle_info.get('brand', 'N/A')} -- Modelo: {vehicle_info.get('model', 'N/A')} -- Placa: {vehicle_info.get('plate', 'N/A')} -- Kilometraje: {vehicle_info.get('km', 'N/A')} km +- Marca: {vehicle_info_dict.get('brand', 'N/A')} +- Modelo: {vehicle_info_dict.get('model', 'N/A')} +- Placa: {vehicle_info_dict.get('plate', 'N/A')} +- Kilometraje: {vehicle_info_dict.get('km', 'N/A')} km """ # Construir el contexto de las fotos anteriores photos_context = "" - if context_photos: - photos_context = f"\n\nFOTOS ANALIZADAS PREVIAMENTE ({len(context_photos)} imágenes):\n" - for idx, photo in enumerate(context_photos[:10], 1): # Limitar a 10 fotos + if context_photos_list: + photos_context = f"\n\nFOTOS ANALIZADAS PREVIAMENTE ({len(context_photos_list)} imágenes):\n" + for idx, photo in enumerate(context_photos_list[:10], 1): # Limitar a 10 fotos ai_analysis = photo.get('aiAnalysis', []) if ai_analysis and len(ai_analysis) > 0: analysis_text = ai_analysis[0].get('analysis', {}) @@ -3029,6 +3078,18 @@ INFORMACIÓN DEL VEHÍCULO: } max_tokens = max_tokens_map.get(response_length, 400) + # Construir contexto de archivos adjuntos + attached_context = "" + if attached_files_data: + attached_context = f"\n\nARCHIVOS ADJUNTOS EN ESTE MENSAJE ({len(attached_files_data)} archivos):\n" + for idx, file_info in enumerate(attached_files_data, 1): + if file_info.get('content_type') == 'pdf': + attached_context += f"\n{idx}. PDF: {file_info['filename']}\n" + if 'text' in file_info: + attached_context += f" Contenido: {file_info['text'][:500]}...\n" + elif file_info.get('content_type') == 'image': + attached_context += f"\n{idx}. Imagen: {file_info['filename']}\n" + # Construir el system prompt base_prompt = assistant_prompt or "Eres un experto mecánico automotriz que ayuda a diagnosticar problemas." @@ -3038,6 +3099,8 @@ INFORMACIÓN DEL VEHÍCULO: {photos_context} +{attached_context} + INSTRUCCIONES ADICIONALES: {assistant_instructions if assistant_instructions else "Sé técnico, claro y directo en tus respuestas."} @@ -3053,17 +3116,39 @@ FORMATO DE RESPUESTA: messages = [{"role": "system", "content": system_prompt}] # Agregar historial previo (últimos 10 mensajes para no saturar) - for msg in chat_history[-10:]: + for msg in chat_history_list[-10:]: messages.append({ "role": msg.get('role'), "content": msg.get('content') }) - # Agregar el mensaje actual del usuario - messages.append({ - "role": "user", - "content": user_message - }) + # Agregar el mensaje actual del usuario con imágenes si hay + has_images = any(f.get('content_type') == 'image' for f in attached_files_data) + + if has_images: + # Formato multimodal para OpenAI/Gemini + user_content = [] + if user_message: + user_content.append({"type": "text", "text": user_message}) + + # Agregar imágenes + for file_info in attached_files_data: + if file_info.get('content_type') == 'image': + user_content.append({ + "type": "image_url", + "image_url": {"url": f"data:image/jpeg;base64,{file_info['base64']}"} + }) + + messages.append({ + "role": "user", + "content": user_content + }) + else: + # Solo texto + messages.append({ + "role": "user", + "content": user_message + }) print(f"🔧 Enviando a {ai_config.provider} con {len(messages)} mensajes") @@ -3111,7 +3196,8 @@ FORMATO DE RESPUESTA: "response": ai_response, "confidence": confidence, "provider": ai_config.provider, - "model": ai_config.model_name + "model": ai_config.model_name, + "attached_files": [{'filename': f['filename'], 'type': f['type']} for f in attached_files_data] } except Exception as e: diff --git a/backend/app/models.py b/backend/app/models.py index c4baf27..7d07411 100644 --- a/backend/app/models.py +++ b/backend/app/models.py @@ -157,6 +157,7 @@ class Answer(Base): comment = Column(Text) # Comentarios adicionales ai_analysis = Column(JSON) # Análisis de IA si aplica + chat_history = Column(JSON) # Historial de chat con AI Assistant (para tipo ai_assistant) is_flagged = Column(Boolean, default=False) # Si requiere atención created_at = Column(DateTime(timezone=True), server_default=func.now()) diff --git a/backend/app/schemas.py b/backend/app/schemas.py index d6448d1..6b1bdce 100644 --- a/backend/app/schemas.py +++ b/backend/app/schemas.py @@ -215,6 +215,7 @@ class AnswerCreate(AnswerBase): inspection_id: int question_id: int ai_analysis: Optional[list] = None # Lista de análisis de IA (soporta múltiples imágenes) + chat_history: Optional[list] = None # Historial de chat con AI Assistant class AnswerUpdate(AnswerBase): pass @@ -225,6 +226,7 @@ class Answer(AnswerBase): question_id: int points_earned: int ai_analysis: Optional[list] = None # Lista de análisis de IA + chat_history: Optional[list] = None # Historial de chat con AI Assistant created_at: datetime class Config: diff --git a/frontend/package.json b/frontend/package.json index 18a88fd..167b53b 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -1,7 +1,7 @@ { "name": "checklist-frontend", "private": true, - "version": "1.0.92", + "version": "1.0.93", "type": "module", "scripts": { "dev": "vite", diff --git a/frontend/public/service-worker.js b/frontend/public/service-worker.js index 5d8182c..3970457 100644 --- a/frontend/public/service-worker.js +++ b/frontend/public/service-worker.js @@ -1,6 +1,6 @@ // Service Worker para PWA con detección de actualizaciones // IMPORTANTE: Actualizar esta versión cada vez que se despliegue una nueva versión -const CACHE_NAME = 'ayutec-v1.0.92'; +const CACHE_NAME = 'ayutec-v1.0.93'; const urlsToCache = [ '/', '/index.html' diff --git a/frontend/src/App.jsx b/frontend/src/App.jsx index 0ebdcb9..3d58052 100644 --- a/frontend/src/App.jsx +++ b/frontend/src/App.jsx @@ -4270,7 +4270,8 @@ function InspectionModal({ checklist, existingInspection, user, onClose, onCompl status: status, comment: answer.observations || null, ai_analysis: answer.aiAnalysis || null, - is_flagged: status === 'critical' + is_flagged: status === 'critical', + chat_history: answer.chatHistory || null // Agregar historial de chat } const response = await fetch(`${API_URL}/api/answers`, { @@ -5265,7 +5266,9 @@ function InspectionModal({ checklist, existingInspection, user, onClose, onCompl // Componente Modal de Chat IA Asistente function AIAssistantChatModal({ question, inspection, allAnswers, messages, setMessages, loading, setLoading, onClose }) { const [inputMessage, setInputMessage] = useState('') + const [attachedFiles, setAttachedFiles] = useState([]) const chatEndRef = useRef(null) + const fileInputRef = useRef(null) const config = question.options || {} // Auto-scroll al final @@ -5273,19 +5276,66 @@ function AIAssistantChatModal({ question, inspection, allAnswers, messages, setM chatEndRef.current?.scrollIntoView({ behavior: 'smooth' }) }, [messages]) + // Manejar adjuntos de archivos + const handleFileAttach = (e) => { + const files = Array.from(e.target.files) + const validFiles = files.filter(file => { + const isImage = file.type.startsWith('image/') + const isPDF = file.type === 'application/pdf' + const isValid = isImage || isPDF + if (!isValid) { + alert(`⚠️ ${file.name}: Solo se permiten imágenes y PDFs`) + } + return isValid + }) + setAttachedFiles(prev => [...prev, ...validFiles]) + } + + const removeAttachedFile = (index) => { + setAttachedFiles(prev => prev.filter((_, i) => i !== index)) + } + // Enviar mensaje al asistente const sendMessage = async () => { - if (!inputMessage.trim() || loading) return + if ((!inputMessage.trim() && attachedFiles.length === 0) || loading) return - const userMessage = { role: 'user', content: inputMessage, timestamp: new Date().toISOString() } + const userMessage = { + role: 'user', + content: inputMessage || '📎 Archivos adjuntos', + timestamp: new Date().toISOString(), + files: attachedFiles.map(f => ({ name: f.name, type: f.type, size: f.size })) + } setMessages(prev => [...prev, userMessage]) + + const currentFiles = attachedFiles setInputMessage('') + setAttachedFiles([]) setLoading(true) try { const token = localStorage.getItem('token') const API_URL = import.meta.env.VITE_API_URL || '' + // Preparar FormData para enviar archivos + const formData = new FormData() + formData.append('question_id', question.id) + formData.append('inspection_id', inspection.id) + formData.append('user_message', inputMessage) + formData.append('chat_history', JSON.stringify(messages)) + formData.append('assistant_prompt', config.assistant_prompt || '') + formData.append('assistant_instructions', config.assistant_instructions || '') + formData.append('response_length', config.response_length || 'medium') + + // Adjuntar archivos + currentFiles.forEach((file, index) => { + formData.append('files', file) + }) + + // Adjuntar archivos + currentFiles.forEach((file, index) => { + formData.append('files', file) + }) + // Recopilar fotos de preguntas anteriores según configuración const contextPhotos = [] const contextQuestionIds = config.context_questions @@ -5308,33 +5358,23 @@ function AIAssistantChatModal({ question, inspection, allAnswers, messages, setM } }) - // Preparar el payload - const payload = { - question_id: question.id, - inspection_id: inspection.id, - user_message: inputMessage, - chat_history: messages, - context_photos: contextPhotos, - assistant_prompt: config.assistant_prompt || '', - assistant_instructions: config.assistant_instructions || '', - response_length: config.response_length || 'medium', - vehicle_info: { - brand: inspection.vehicle_brand, - model: inspection.vehicle_model, - plate: inspection.vehicle_plate, - km: inspection.vehicle_km - } - } + formData.append('context_photos', JSON.stringify(contextPhotos)) + formData.append('vehicle_info', JSON.stringify({ + brand: inspection.vehicle_brand, + model: inspection.vehicle_model, + plate: inspection.vehicle_plate, + km: inspection.vehicle_km + })) - console.log('📤 Enviando a chat IA:', payload) + console.log('📤 Enviando a chat IA con archivos:', currentFiles.length) const response = await fetch(`${API_URL}/api/ai/chat-assistant`, { method: 'POST', headers: { - 'Authorization': `Bearer ${token}`, - 'Content-Type': 'application/json' + 'Authorization': `Bearer ${token}` + // No incluir Content-Type, fetch lo establece automáticamente con FormData }, - body: JSON.stringify(payload) + body: formData }) if (!response.ok) { @@ -5430,6 +5470,18 @@ function AIAssistantChatModal({ question, inspection, allAnswers, messages, setM
{msg.content}
+ {/* Mostrar archivos adjuntos si existen */} + {msg.files && msg.files.length > 0 && ( +
+ {msg.files.map((file, fIdx) => ( +
+ {file.type.startsWith('image/') ? '🖼️' : '📄'} + {file.name} + ({(file.size / 1024).toFixed(1)} KB) +
+ ))} +
+ )}
+ {/* Preview de archivos adjuntos */} + {attachedFiles.length > 0 && ( +
+ {attachedFiles.map((file, idx) => ( +
+ {file.type.startsWith('image/') ? '🖼️' : '📄'} + {file.name} + +
+ ))} +
+ )} +
+ +