diff --git a/backend/app/main.py b/backend/app/main.py index 4e96b7b..b675f03 100644 --- a/backend/app/main.py +++ b/backend/app/main.py @@ -276,7 +276,7 @@ def extract_pdf_text_smart(pdf_content: bytes, max_chars: int = None) -> dict: } -BACKEND_VERSION = "1.1.2" +BACKEND_VERSION = "1.2.0" app = FastAPI(title="Checklist Inteligente API", version=BACKEND_VERSION) # S3/MinIO configuration @@ -1512,12 +1512,12 @@ REGLAS: """ try: - # Usar OpenAI o Gemini según configuración - if config.provider == "openai" and config.openai_api_key: - client = openai.OpenAI(api_key=config.openai_api_key) + # Usar OpenAI, Anthropic o Gemini según configuración + if config.provider == "openai": + client = openai.OpenAI(api_key=config.api_key) response = await asyncio.to_thread( client.chat.completions.create, - model=config.openai_model or "gpt-4o", + model=config.model_name or "gpt-4o", messages=[{"role": "user", "content": summary_prompt}], temperature=0.3, max_tokens=800, @@ -1525,10 +1525,22 @@ REGLAS: ) summary_json = response.choices[0].message.content - elif config.provider == "gemini" and config.gemini_api_key: - genai.configure(api_key=config.gemini_api_key) + elif config.provider == "anthropic": + import anthropic as anthropic_lib + client = anthropic_lib.Anthropic(api_key=config.api_key) + response = await asyncio.to_thread( + client.messages.create, + model=config.model_name or "claude-sonnet-4-5", + max_tokens=800, + temperature=0.3, + messages=[{"role": "user", "content": summary_prompt + "\n\nRespuesta en formato JSON:"}] + ) + summary_json = response.content[0].text + + elif config.provider == "gemini": + genai.configure(api_key=config.api_key) model = genai.GenerativeModel( - model_name=config.gemini_model or "gemini-2.0-flash-exp", + model_name=config.model_name or "gemini-2.5-pro", generation_config={ "temperature": 0.3, "max_output_tokens": 800, @@ -2726,6 +2738,25 @@ def get_available_ai_models(current_user: models.User = Depends(get_current_user "name": "Gemini 1.5 Flash Latest", "provider": "gemini", "description": "Modelo 1.5 rápido para análisis básicos" + }, + # Anthropic Claude Models + { + "id": "claude-sonnet-4-5", + "name": "Claude Sonnet 4.5 (Recomendado)", + "provider": "anthropic", + "description": "Equilibrio perfecto entre velocidad e inteligencia, ideal para diagnósticos automotrices" + }, + { + "id": "claude-opus-4-5", + "name": "Claude Opus 4.5", + "provider": "anthropic", + "description": "Máxima capacidad para análisis complejos y razonamiento profundo" + }, + { + "id": "claude-haiku-4-5", + "name": "Claude Haiku 4.5", + "provider": "anthropic", + "description": "Ultra rápido y económico, perfecto para análisis en tiempo real" } ] @@ -2771,6 +2802,8 @@ def create_ai_configuration( model_name = "gpt-4o" elif config.provider == "gemini": model_name = "gemini-2.5-pro" + elif config.provider == "anthropic": + model_name = "claude-sonnet-4-5" else: model_name = "default" @@ -3510,6 +3543,28 @@ Longitud: {response_length} ai_response = response.choices[0].message.content confidence = 0.85 # OpenAI no devuelve confidence directo + elif ai_config.provider == 'anthropic': + import anthropic + + # Crear cliente de Anthropic + client = anthropic.Anthropic(api_key=ai_config.api_key) + + # Antropic usa un formato diferente: system separado de messages + # El primer mensaje es el system prompt + system_content = messages[0]['content'] if messages[0]['role'] == 'system' else "" + user_messages = [msg for msg in messages if msg['role'] != 'system'] + + response = client.messages.create( + model=ai_config.model_name or "claude-sonnet-4-5", + max_tokens=max_tokens, + system=system_content, + messages=user_messages, + temperature=0.7 + ) + + ai_response = response.content[0].text + confidence = 0.85 + elif ai_config.provider == 'gemini': import google.generativeai as genai genai.configure(api_key=ai_config.api_key) diff --git a/backend/app/schemas.py b/backend/app/schemas.py index 5ac40ed..56066a8 100644 --- a/backend/app/schemas.py +++ b/backend/app/schemas.py @@ -269,7 +269,7 @@ class InspectionDetail(Inspection): # AI Configuration Schemas class AIConfigurationBase(BaseModel): - provider: str # openai, gemini + provider: str # openai, gemini, anthropic api_key: str model_name: Optional[str] = None logo_url: Optional[str] = None diff --git a/backend/requirements.txt b/backend/requirements.txt index 94118a6..c1a227e 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -11,6 +11,7 @@ passlib==1.7.4 bcrypt==4.0.1 python-multipart==0.0.6 openai==1.10.0 +anthropic==0.40.0 google-generativeai==0.3.2 Pillow==10.2.0 reportlab==4.0.9 diff --git a/frontend/package.json b/frontend/package.json index 9a195c5..5186aaf 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -1,7 +1,7 @@ { "name": "checklist-frontend", "private": true, - "version": "1.2.4", + "version": "1.2.5", "type": "module", "scripts": { "dev": "vite", diff --git a/frontend/public/service-worker.js b/frontend/public/service-worker.js index c6f001a..c0c6293 100644 --- a/frontend/public/service-worker.js +++ b/frontend/public/service-worker.js @@ -1,6 +1,6 @@ // Service Worker para PWA con detección de actualizaciones // IMPORTANTE: Actualizar esta versión cada vez que se despliegue una nueva versión -const CACHE_NAME = 'ayutec-v1.2.4'; +const CACHE_NAME = 'ayutec-v1.2.5'; const urlsToCache = [ '/', '/index.html' diff --git a/frontend/src/App.jsx b/frontend/src/App.jsx index 44ac903..2fcecb0 100644 --- a/frontend/src/App.jsx +++ b/frontend/src/App.jsx @@ -654,6 +654,15 @@ function SettingsTab({ user }) {
OpenAI
GPT-4, GPT-4 Vision
+