gateway/connectors/connectorAiAnthropic.py
2025-04-26 02:13:22 +02:00

236 lines
No EOL
9.2 KiB
Python

import logging
import httpx
from typing import Dict, Any, List, Union
from fastapi import HTTPException
from modules.configuration import APP_CONFIG
# Configure logger
logger = logging.getLogger(__name__)
def loadConfigData():
"""Load configuration data for Anthropic connector"""
return {
"apiKey": APP_CONFIG.get('Connector_AiAnthropic_API_SECRET'),
"apiUrl": APP_CONFIG.get('Connector_AiAnthropic_API_URL'),
"modelName": APP_CONFIG.get('Connector_AiAnthropic_MODEL_NAME'),
"temperature": float(APP_CONFIG.get('Connector_AiAnthropic_TEMPERATURE')),
"maxTokens": int(APP_CONFIG.get('Connector_AiAnthropic_MAX_TOKENS'))
}
class ChatService:
"""Connector for communication with the Anthropic API."""
def __init__(self):
# Load configuration
self.config = loadConfigData()
self.apiKey = self.config["apiKey"]
self.apiUrl = self.config["apiUrl"]
self.modelName = self.config["modelName"]
# HttpClient for API calls
self.httpClient = httpx.AsyncClient(
timeout=120.0, # Longer timeout for complex requests
headers={
"x-api-key": self.apiKey,
"anthropic-version": "2023-06-01", # Anthropic API Version
"Content-Type": "application/json"
}
)
logger.info(f"Anthropic Connector initialized with model: {self.modelName}")
async def callApi(self, messages: List[Dict[str, Any]], temperature: float = None, maxTokens: int = None) -> Dict[str, Any]:
"""
Calls the Anthropic API with the given messages.
Args:
messages: List of messages in OpenAI format (role, content)
temperature: Temperature for response generation (0.0-1.0)
maxTokens: Maximum number of tokens in the response
Returns:
The response converted to OpenAI format
Raises:
HTTPException: For errors in API communication
"""
try:
# Convert OpenAI format to Anthropic format
formattedMessages = self._convertToAnthropicFormat(messages)
# Use parameters from configuration if none were overridden
if temperature is None:
temperature = self.config.get("temperature", 0.2)
if maxTokens is None:
maxTokens = self.config.get("maxTokens", 2000)
# Create Anthropic API payload
payload = {
"model": self.modelName,
"messages": formattedMessages,
"temperature": temperature,
"max_tokens": maxTokens
}
response = await self.httpClient.post(
self.apiUrl,
json=payload
)
if response.status_code != 200:
logger.error(f"Anthropic API error: {response.status_code} - {response.text}")
raise HTTPException(status_code=500, detail="Error communicating with Anthropic API")
# Convert response from Anthropic format to OpenAI format
anthropicResponse = response.json()
openaiFormattedResponse = self._convertToOpenaiFormat(anthropicResponse)
return openaiFormattedResponse
except Exception as e:
logger.error(f"Error calling Anthropic API: {str(e)}")
raise HTTPException(status_code=500, detail=f"Error calling Anthropic API: {str(e)}")
def _convertToAnthropicFormat(self, openaiMessages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
Converts messages from OpenAI format to Anthropic format.
OpenAI uses:
[{"role": "system", "content": "..."},
{"role": "user", "content": "..."},
{"role": "assistant", "content": "..."}]
Anthropic uses:
[{"role": "user", "content": "..."},
{"role": "assistant", "content": "..."}]
Note: Anthropic has no direct system message equivalent,
so we add system messages to the first user message.
"""
anthropicMessages = []
systemContent = ""
# First extract all system messages
for msg in openaiMessages:
if msg.get("role") == "system":
systemContent += msg.get("content", "") + "\n\n"
# Convert the remaining messages
for msg in openaiMessages:
role = msg.get("role")
content = msg.get("content", "")
# Skip system messages (already extracted)
if role == "system":
continue
# For the first user message: prepend system content if available
if role == "user" and systemContent and not any(m.get("role") == "user" for m in anthropicMessages):
if isinstance(content, str):
content = systemContent + content
elif isinstance(content, list):
# If content is an array (for multimodal messages)
textParts = []
for part in content:
if part.get("type") == "text":
textParts.append(part)
if textParts:
textParts[0]["text"] = systemContent + textParts[0].get("text", "")
# Anthropic only supports "user" and "assistant" roles
if role not in ["user", "assistant"]:
role = "user"
anthropicMessages.append({"role": role, "content": content})
return anthropicMessages
def _convertToOpenaiFormat(self, anthropicResponse: Dict[str, Any]) -> Dict[str, Any]:
"""
Converts a response from Anthropic format to OpenAI format.
"""
# Extract content from Anthropic response
content = ""
if "content" in anthropicResponse:
if isinstance(anthropicResponse["content"], list):
# Content is a list of parts (in newer API versions)
for part in anthropicResponse["content"]:
if part.get("type") == "text":
content += part.get("text", "")
else:
# Direct content as string (in older API versions)
content = anthropicResponse["content"]
# Create OpenAI-formatted response
return {
"id": anthropicResponse.get("id", ""),
"object": "chat.completion",
"created": anthropicResponse.get("created", 0),
"model": anthropicResponse.get("model", self.modelName),
"choices": [
{
"message": {
"role": "assistant",
"content": content
},
"index": 0,
"finish_reason": "stop"
}
]
}
async def analyzeImage(self, imageData: Union[str, bytes], mimeType: str = None, prompt: str = "Describe this image") -> str:
"""
Analyzes an image using Anthropic's vision capabilities.
Args:
imageData: Either a file path (str) or image data (bytes)
mimeType: The MIME type of the image (optional, only for binary data)
prompt: The prompt for analysis
Returns:
The analysis response as text
"""
try:
# Distinguish between file path and binary data
if isinstance(imageData, str):
# It's a file path - import filehandling only when needed
from modules import agentserviceFilemanager as fileHandler
base64Data, autoMimeType = fileHandler.encodeFileToBase64(imageData)
mimeType = mimeType or autoMimeType
else:
# It's binary data
import base64
base64Data = base64.b64encode(imageData).decode('utf-8')
# MIME type must be specified for binary data
if not mimeType:
# Fallback to generic image type
mimeType = "image/png"
# Prepare the payload for the Vision API
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": prompt},
{
"type": "image_url",
"image_url": {
"url": f"data:{mimeType};base64,{base64Data}"
}
}
]
}
]
# Use the existing callApi function with the Vision model
response = await self.callApi(messages)
# Extract and return content
return response["choices"][0]["message"]["content"]
except Exception as e:
logger.error(f"Error during image analysis: {str(e)}", exc_info=True)
return f"[Error during image analysis: {str(e)}]"