From 24eb03930214a8aa691846f33764162e4612a1cc Mon Sep 17 00:00:00 2001 From: ronalds Date: Thu, 4 Dec 2025 11:38:48 -0300 Subject: [PATCH] =?UTF-8?q?=E2=9C=85=20IMPLEMENTADO=20-=20Soporte=20para?= =?UTF-8?q?=20Anthropic=20Claude=20y=20Correcci=C3=B3n=20de=20Configuraci?= =?UTF-8?q?=C3=B3n=20=F0=9F=94=A7=20Backend=20v1.2.0=20|=20Frontend=20v1.2?= =?UTF-8?q?.5=20He=20implementado=20completamente=20el=20soporte=20para=20?= =?UTF-8?q?Anthropic=20Claude=20y=20corregido=20el=20problema=20de=20visua?= =?UTF-8?q?lizaci=C3=B3n=20en=20el=20m=C3=B3dulo=20de=20configuraci=C3=B3n?= =?UTF-8?q?:?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 馃幆 Cambios Implementados: 1. Backend - Soporte para Anthropic Claude Nuevos modelos agregados: Claude Sonnet 4.5 (Recomendado) - Equilibrio perfecto Claude Opus 4.5 - M谩xima capacidad Claude Haiku 4.5 - Ultra r谩pido y econ贸mico Integraci贸n completa: Chat Assistant con Anthropic Generaci贸n de res煤menes PDF con Anthropic Manejo correcto de formato de mensajes (system separado) Configuraci贸n por defecto: claude-sonnet-4.5 --- backend/app/main.py | 71 +++++++++++++++++++++++++++---- backend/app/schemas.py | 2 +- backend/requirements.txt | 1 + frontend/package.json | 2 +- frontend/public/service-worker.js | 2 +- frontend/src/App.jsx | 15 ++++++- frontend/src/Sidebar.jsx | 2 +- 7 files changed, 81 insertions(+), 14 deletions(-) diff --git a/backend/app/main.py b/backend/app/main.py index 4e96b7b..b675f03 100644 --- a/backend/app/main.py +++ b/backend/app/main.py @@ -276,7 +276,7 @@ def extract_pdf_text_smart(pdf_content: bytes, max_chars: int = None) -> dict: } -BACKEND_VERSION = "1.1.2" +BACKEND_VERSION = "1.2.0" app = FastAPI(title="Checklist Inteligente API", version=BACKEND_VERSION) # S3/MinIO configuration @@ -1512,12 +1512,12 @@ REGLAS: """ try: - # Usar OpenAI o Gemini seg煤n configuraci贸n - if config.provider == "openai" and config.openai_api_key: - client = openai.OpenAI(api_key=config.openai_api_key) + # Usar OpenAI, Anthropic o Gemini seg煤n configuraci贸n + if config.provider == "openai": + client = openai.OpenAI(api_key=config.api_key) response = await asyncio.to_thread( client.chat.completions.create, - model=config.openai_model or "gpt-4o", + model=config.model_name or "gpt-4o", messages=[{"role": "user", "content": summary_prompt}], temperature=0.3, max_tokens=800, @@ -1525,10 +1525,22 @@ REGLAS: ) summary_json = response.choices[0].message.content - elif config.provider == "gemini" and config.gemini_api_key: - genai.configure(api_key=config.gemini_api_key) + elif config.provider == "anthropic": + import anthropic as anthropic_lib + client = anthropic_lib.Anthropic(api_key=config.api_key) + response = await asyncio.to_thread( + client.messages.create, + model=config.model_name or "claude-sonnet-4-5", + max_tokens=800, + temperature=0.3, + messages=[{"role": "user", "content": summary_prompt + "\n\nRespuesta en formato JSON:"}] + ) + summary_json = response.content[0].text + + elif config.provider == "gemini": + genai.configure(api_key=config.api_key) model = genai.GenerativeModel( - model_name=config.gemini_model or "gemini-2.0-flash-exp", + model_name=config.model_name or "gemini-2.5-pro", generation_config={ "temperature": 0.3, "max_output_tokens": 800, @@ -2726,6 +2738,25 @@ def get_available_ai_models(current_user: models.User = Depends(get_current_user "name": "Gemini 1.5 Flash Latest", "provider": "gemini", "description": "Modelo 1.5 r谩pido para an谩lisis b谩sicos" + }, + # Anthropic Claude Models + { + "id": "claude-sonnet-4-5", + "name": "Claude Sonnet 4.5 (Recomendado)", + "provider": "anthropic", + "description": "Equilibrio perfecto entre velocidad e inteligencia, ideal para diagn贸sticos automotrices" + }, + { + "id": "claude-opus-4-5", + "name": "Claude Opus 4.5", + "provider": "anthropic", + "description": "M谩xima capacidad para an谩lisis complejos y razonamiento profundo" + }, + { + "id": "claude-haiku-4-5", + "name": "Claude Haiku 4.5", + "provider": "anthropic", + "description": "Ultra r谩pido y econ贸mico, perfecto para an谩lisis en tiempo real" } ] @@ -2771,6 +2802,8 @@ def create_ai_configuration( model_name = "gpt-4o" elif config.provider == "gemini": model_name = "gemini-2.5-pro" + elif config.provider == "anthropic": + model_name = "claude-sonnet-4-5" else: model_name = "default" @@ -3510,6 +3543,28 @@ Longitud: {response_length} ai_response = response.choices[0].message.content confidence = 0.85 # OpenAI no devuelve confidence directo + elif ai_config.provider == 'anthropic': + import anthropic + + # Crear cliente de Anthropic + client = anthropic.Anthropic(api_key=ai_config.api_key) + + # Antropic usa un formato diferente: system separado de messages + # El primer mensaje es el system prompt + system_content = messages[0]['content'] if messages[0]['role'] == 'system' else "" + user_messages = [msg for msg in messages if msg['role'] != 'system'] + + response = client.messages.create( + model=ai_config.model_name or "claude-sonnet-4-5", + max_tokens=max_tokens, + system=system_content, + messages=user_messages, + temperature=0.7 + ) + + ai_response = response.content[0].text + confidence = 0.85 + elif ai_config.provider == 'gemini': import google.generativeai as genai genai.configure(api_key=ai_config.api_key) diff --git a/backend/app/schemas.py b/backend/app/schemas.py index 5ac40ed..56066a8 100644 --- a/backend/app/schemas.py +++ b/backend/app/schemas.py @@ -269,7 +269,7 @@ class InspectionDetail(Inspection): # AI Configuration Schemas class AIConfigurationBase(BaseModel): - provider: str # openai, gemini + provider: str # openai, gemini, anthropic api_key: str model_name: Optional[str] = None logo_url: Optional[str] = None diff --git a/backend/requirements.txt b/backend/requirements.txt index 94118a6..c1a227e 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -11,6 +11,7 @@ passlib==1.7.4 bcrypt==4.0.1 python-multipart==0.0.6 openai==1.10.0 +anthropic==0.40.0 google-generativeai==0.3.2 Pillow==10.2.0 reportlab==4.0.9 diff --git a/frontend/package.json b/frontend/package.json index 9a195c5..5186aaf 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -1,7 +1,7 @@ { "name": "checklist-frontend", "private": true, - "version": "1.2.4", + "version": "1.2.5", "type": "module", "scripts": { "dev": "vite", diff --git a/frontend/public/service-worker.js b/frontend/public/service-worker.js index c6f001a..c0c6293 100644 --- a/frontend/public/service-worker.js +++ b/frontend/public/service-worker.js @@ -1,6 +1,6 @@ // Service Worker para PWA con detecci贸n de actualizaciones // IMPORTANTE: Actualizar esta versi贸n cada vez que se despliegue una nueva versi贸n -const CACHE_NAME = 'ayutec-v1.2.4'; +const CACHE_NAME = 'ayutec-v1.2.5'; const urlsToCache = [ '/', '/index.html' diff --git a/frontend/src/App.jsx b/frontend/src/App.jsx index 44ac903..2fcecb0 100644 --- a/frontend/src/App.jsx +++ b/frontend/src/App.jsx @@ -654,6 +654,15 @@ function SettingsTab({ user }) {
OpenAI
GPT-4, GPT-4 Vision
+