IMPLEMENTADO - Soporte para Anthropic Claude y Corrección de Configuración

🔧 Backend v1.2.0 | Frontend v1.2.5
He implementado completamente el soporte para Anthropic Claude y corregido el problema de visualización en el módulo de configuración:

🎯 Cambios Implementados:
1. Backend - Soporte para Anthropic Claude
Nuevos modelos agregados:

Claude Sonnet 4.5 (Recomendado) - Equilibrio perfecto
Claude Opus 4.5 - Máxima capacidad
Claude Haiku 4.5 - Ultra rápido y económico
Integración completa:

Chat Assistant con Anthropic
Generación de resúmenes PDF con Anthropic
Manejo correcto de formato de mensajes (system separado)
Configuración por defecto: claude-sonnet-4.5
This commit is contained in:
2025-12-04 11:38:48 -03:00
parent 9de059e9ca
commit 24eb039302
7 changed files with 81 additions and 14 deletions

View File

@@ -276,7 +276,7 @@ def extract_pdf_text_smart(pdf_content: bytes, max_chars: int = None) -> dict:
}
BACKEND_VERSION = "1.1.2"
BACKEND_VERSION = "1.2.0"
app = FastAPI(title="Checklist Inteligente API", version=BACKEND_VERSION)
# S3/MinIO configuration
@@ -1512,12 +1512,12 @@ REGLAS:
"""
try:
# Usar OpenAI o Gemini según configuración
if config.provider == "openai" and config.openai_api_key:
client = openai.OpenAI(api_key=config.openai_api_key)
# Usar OpenAI, Anthropic o Gemini según configuración
if config.provider == "openai":
client = openai.OpenAI(api_key=config.api_key)
response = await asyncio.to_thread(
client.chat.completions.create,
model=config.openai_model or "gpt-4o",
model=config.model_name or "gpt-4o",
messages=[{"role": "user", "content": summary_prompt}],
temperature=0.3,
max_tokens=800,
@@ -1525,10 +1525,22 @@ REGLAS:
)
summary_json = response.choices[0].message.content
elif config.provider == "gemini" and config.gemini_api_key:
genai.configure(api_key=config.gemini_api_key)
elif config.provider == "anthropic":
import anthropic as anthropic_lib
client = anthropic_lib.Anthropic(api_key=config.api_key)
response = await asyncio.to_thread(
client.messages.create,
model=config.model_name or "claude-sonnet-4-5",
max_tokens=800,
temperature=0.3,
messages=[{"role": "user", "content": summary_prompt + "\n\nRespuesta en formato JSON:"}]
)
summary_json = response.content[0].text
elif config.provider == "gemini":
genai.configure(api_key=config.api_key)
model = genai.GenerativeModel(
model_name=config.gemini_model or "gemini-2.0-flash-exp",
model_name=config.model_name or "gemini-2.5-pro",
generation_config={
"temperature": 0.3,
"max_output_tokens": 800,
@@ -2726,6 +2738,25 @@ def get_available_ai_models(current_user: models.User = Depends(get_current_user
"name": "Gemini 1.5 Flash Latest",
"provider": "gemini",
"description": "Modelo 1.5 rápido para análisis básicos"
},
# Anthropic Claude Models
{
"id": "claude-sonnet-4-5",
"name": "Claude Sonnet 4.5 (Recomendado)",
"provider": "anthropic",
"description": "Equilibrio perfecto entre velocidad e inteligencia, ideal para diagnósticos automotrices"
},
{
"id": "claude-opus-4-5",
"name": "Claude Opus 4.5",
"provider": "anthropic",
"description": "Máxima capacidad para análisis complejos y razonamiento profundo"
},
{
"id": "claude-haiku-4-5",
"name": "Claude Haiku 4.5",
"provider": "anthropic",
"description": "Ultra rápido y económico, perfecto para análisis en tiempo real"
}
]
@@ -2771,6 +2802,8 @@ def create_ai_configuration(
model_name = "gpt-4o"
elif config.provider == "gemini":
model_name = "gemini-2.5-pro"
elif config.provider == "anthropic":
model_name = "claude-sonnet-4-5"
else:
model_name = "default"
@@ -3510,6 +3543,28 @@ Longitud: {response_length}
ai_response = response.choices[0].message.content
confidence = 0.85 # OpenAI no devuelve confidence directo
elif ai_config.provider == 'anthropic':
import anthropic
# Crear cliente de Anthropic
client = anthropic.Anthropic(api_key=ai_config.api_key)
# Antropic usa un formato diferente: system separado de messages
# El primer mensaje es el system prompt
system_content = messages[0]['content'] if messages[0]['role'] == 'system' else ""
user_messages = [msg for msg in messages if msg['role'] != 'system']
response = client.messages.create(
model=ai_config.model_name or "claude-sonnet-4-5",
max_tokens=max_tokens,
system=system_content,
messages=user_messages,
temperature=0.7
)
ai_response = response.content[0].text
confidence = 0.85
elif ai_config.provider == 'gemini':
import google.generativeai as genai
genai.configure(api_key=ai_config.api_key)

View File

@@ -269,7 +269,7 @@ class InspectionDetail(Inspection):
# AI Configuration Schemas
class AIConfigurationBase(BaseModel):
provider: str # openai, gemini
provider: str # openai, gemini, anthropic
api_key: str
model_name: Optional[str] = None
logo_url: Optional[str] = None

View File

@@ -11,6 +11,7 @@ passlib==1.7.4
bcrypt==4.0.1
python-multipart==0.0.6
openai==1.10.0
anthropic==0.40.0
google-generativeai==0.3.2
Pillow==10.2.0
reportlab==4.0.9