import logging import httpx from typing import Dict, Any, List, Optional, Union from fastapi import HTTPException from modules.configuration import APP_CONFIG # Configure logger logger = logging.getLogger(__name__) # Load configuration data def load_config_data(): return { "api_key": APP_CONFIG.get('Connector_AiAnthropic_API_SECRET'), "api_url": APP_CONFIG.get('Connector_AiAnthropic_API_URL'), "model_name": APP_CONFIG.get('Connector_AiAnthropic_MODEL_NAME'), "temperature": float(APP_CONFIG.get('Connector_AiAnthropic_TEMPERATURE')), "max_tokens": int(APP_CONFIG.get('Connector_AiAnthropic_MAX_TOKENS')) } class ChatService: """ Connector for communication with the Anthropic API. """ def __init__(self): # Load configuration self.config = load_config_data() self.api_key = self.config["api_key"] self.api_url = self.config["api_url"] self.model_name = self.config["model_name"] # HttpClient for API calls self.http_client = httpx.AsyncClient( timeout=120.0, # Longer timeout for complex requests headers={ "x-api-key": self.api_key, "anthropic-version": "2023-06-01", # Anthropic API Version "Content-Type": "application/json" } ) logger.info(f"Anthropic Connector initialized with model: {self.model_name}") async def call_api(self, messages: List[Dict[str, Any]], temperature: float = None, max_tokens: int = None) -> Dict[str, Any]: """ Calls the Anthropic API with the given messages. Args: messages: List of messages in OpenAI format (role, content) temperature: Temperature for response generation (0.0-1.0) max_tokens: Maximum number of tokens in the response Returns: The response converted to OpenAI format Raises: HTTPException: For errors in API communication """ try: # Convert OpenAI format to Anthropic format formatted_messages = self._convert_to_anthropic_format(messages) # Use parameters from configuration if none were overridden if temperature is None: temperature = self.config.get("temperature", 0.2) if max_tokens is None: max_tokens = self.config.get("max_tokens", 2000) # Create Anthropic API payload payload = { "model": self.model_name, "messages": formatted_messages, "temperature": temperature, "max_tokens": max_tokens } response = await self.http_client.post( self.api_url, json=payload ) if response.status_code != 200: logger.error(f"Anthropic API error: {response.status_code} - {response.text}") raise HTTPException(status_code=500, detail="Error communicating with Anthropic API") # Convert response from Anthropic format to OpenAI format anthropic_response = response.json() openai_formatted_response = self._convert_to_openai_format(anthropic_response) return openai_formatted_response except Exception as e: logger.error(f"Error calling Anthropic API: {str(e)}") raise HTTPException(status_code=500, detail=f"Error calling Anthropic API: {str(e)}") def _convert_to_anthropic_format(self, openai_messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]: """ Converts messages from OpenAI format to Anthropic format. OpenAI uses: [{"role": "system", "content": "..."}, {"role": "user", "content": "..."}, {"role": "assistant", "content": "..."}] Anthropic uses: [{"role": "user", "content": "..."}, {"role": "assistant", "content": "..."}] Note: Anthropic has no direct system message equivalent, so we add system messages to the first user message. """ anthropic_messages = [] system_content = "" # First extract all system messages for msg in openai_messages: if msg.get("role") == "system": system_content += msg.get("content", "") + "\n\n" # Convert the remaining messages for i, msg in enumerate(openai_messages): role = msg.get("role") content = msg.get("content", "") # Skip system messages (already extracted) if role == "system": continue # For the first user message: prepend system content if available if role == "user" and system_content and not any(m.get("role") == "user" for m in anthropic_messages): if isinstance(content, str): content = system_content + content elif isinstance(content, list): # If content is an array (for multimodal messages) text_parts = [] for part in content: if part.get("type") == "text": text_parts.append(part) if text_parts: text_parts[0]["text"] = system_content + text_parts[0].get("text", "") # Anthropic only supports "user" and "assistant" roles if role not in ["user", "assistant"]: role = "user" anthropic_messages.append({"role": role, "content": content}) return anthropic_messages def _convert_to_openai_format(self, anthropic_response: Dict[str, Any]) -> Dict[str, Any]: """ Converts a response from Anthropic format to OpenAI format. """ # Extract content from Anthropic response content = "" if "content" in anthropic_response: if isinstance(anthropic_response["content"], list): # Content is a list of parts (in newer API versions) for part in anthropic_response["content"]: if part.get("type") == "text": content += part.get("text", "") else: # Direct content as string (in older API versions) content = anthropic_response["content"] # Create OpenAI-formatted response return { "id": anthropic_response.get("id", ""), "object": "chat.completion", "created": anthropic_response.get("created", 0), "model": anthropic_response.get("model", self.model_name), "choices": [ { "message": { "role": "assistant", "content": content }, "index": 0, "finish_reason": "stop" } ] } async def analyze_image(self, image_data: Union[str, bytes], mime_type: str = None, prompt: str = "Describe this image") -> str: """ Analyzes an image with the OpenAI Vision API. Args: image_data: Either a file path (str) or image data (bytes) mime_type: The MIME type of the image (optional, only for binary data) prompt: The prompt for analysis Returns: The response from the OpenAI Vision API as text """ try: # Distinguish between file path and binary data if isinstance(image_data, str): # It's a file path - import filehandling only when needed from modules import agentservice_filemanager as file_handler base64_data, auto_mime_type = file_handler.encode_file_to_base64(image_data) mime_type = mime_type or auto_mime_type else: # It's binary data import base64 base64_data = base64.b64encode(image_data).decode('utf-8') # MIME type must be specified for binary data if not mime_type: # Fallback to generic image type mime_type = "image/png" # Prepare the payload for the Vision API messages = [ { "role": "user", "content": [ {"type": "text", "text": prompt}, { "type": "image_url", "image_url": { "url": f"data:{mime_type};base64,{base64_data}" } } ] } ] # Use the existing call_api function with the Vision model response = await self.call_api(messages) # Extract and return content return response["choices"][0]["message"]["content"] except Exception as e: logger.error(f"Error during image analysis: {str(e)}", exc_info=True) return f"[Error during image analysis: {str(e)}]"