171 lines
No EOL
6.6 KiB
Python
171 lines
No EOL
6.6 KiB
Python
import logging
|
|
import httpx
|
|
from typing import Dict, Any, List, Union
|
|
from fastapi import HTTPException
|
|
from modules.shared.configuration import APP_CONFIG
|
|
|
|
# Configure logger
|
|
logger = logging.getLogger(__name__)
|
|
|
|
def loadConfigData():
|
|
"""Load configuration data for Anthropic connector"""
|
|
return {
|
|
"apiKey": APP_CONFIG.get('Connector_AiAnthropic_API_SECRET'),
|
|
"apiUrl": APP_CONFIG.get('Connector_AiAnthropic_API_URL'),
|
|
"modelName": APP_CONFIG.get('Connector_AiAnthropic_MODEL_NAME'),
|
|
"temperature": float(APP_CONFIG.get('Connector_AiAnthropic_TEMPERATURE')),
|
|
"maxTokens": int(APP_CONFIG.get('Connector_AiAnthropic_MAX_TOKENS'))
|
|
}
|
|
|
|
class AiAnthropic:
|
|
"""Connector for communication with the Anthropic API."""
|
|
|
|
def __init__(self):
|
|
# Load configuration
|
|
self.config = loadConfigData()
|
|
self.apiKey = self.config["apiKey"]
|
|
self.apiUrl = self.config["apiUrl"]
|
|
self.modelName = self.config["modelName"]
|
|
|
|
# HttpClient for API calls
|
|
self.httpClient = httpx.AsyncClient(
|
|
timeout=120.0, # Longer timeout for complex requests
|
|
headers={
|
|
"x-api-key": self.apiKey,
|
|
"anthropic-version": "2023-06-01", # Anthropic API Version
|
|
"Content-Type": "application/json"
|
|
}
|
|
)
|
|
|
|
logger.info(f"Anthropic Connector initialized with model: {self.modelName}")
|
|
|
|
async def callAiBasic(self, messages: List[Dict[str, Any]], temperature: float = None, maxTokens: int = None) -> Dict[str, Any]:
|
|
"""
|
|
Calls the Anthropic API with the given messages.
|
|
|
|
Args:
|
|
messages: List of messages in OpenAI format (role, content)
|
|
temperature: Temperature for response generation (0.0-1.0)
|
|
maxTokens: Maximum number of tokens in the response
|
|
|
|
Returns:
|
|
The response in OpenAI format
|
|
|
|
Raises:
|
|
HTTPException: For errors in API communication
|
|
"""
|
|
try:
|
|
# Use parameters from configuration if none were overridden
|
|
if temperature is None:
|
|
temperature = self.config.get("temperature", 0.2)
|
|
|
|
if maxTokens is None:
|
|
maxTokens = self.config.get("maxTokens", 2000)
|
|
|
|
# Create Anthropic API payload
|
|
payload = {
|
|
"model": self.modelName,
|
|
"messages": messages,
|
|
"temperature": temperature,
|
|
"max_tokens": maxTokens
|
|
}
|
|
|
|
response = await self.httpClient.post(
|
|
self.apiUrl,
|
|
json=payload
|
|
)
|
|
|
|
if response.status_code != 200:
|
|
logger.error(f"Anthropic API error: {response.status_code} - {response.text}")
|
|
raise HTTPException(status_code=500, detail="Error communicating with Anthropic API")
|
|
|
|
# Parse response
|
|
anthropicResponse = response.json()
|
|
|
|
# Extract content from response
|
|
content = ""
|
|
if "content" in anthropicResponse:
|
|
if isinstance(anthropicResponse["content"], list):
|
|
# Content is a list of parts (in newer API versions)
|
|
for part in anthropicResponse["content"]:
|
|
if part.get("type") == "text":
|
|
content += part.get("text", "")
|
|
else:
|
|
# Direct content as string (in older API versions)
|
|
content = anthropicResponse["content"]
|
|
|
|
# Return in OpenAI format
|
|
return {
|
|
"id": anthropicResponse.get("id", ""),
|
|
"object": "chat.completion",
|
|
"created": anthropicResponse.get("created", 0),
|
|
"model": anthropicResponse.get("model", self.modelName),
|
|
"choices": [
|
|
{
|
|
"message": {
|
|
"role": "assistant",
|
|
"content": content
|
|
},
|
|
"index": 0,
|
|
"finish_reason": "stop"
|
|
}
|
|
]
|
|
}
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error calling Anthropic API: {str(e)}")
|
|
raise HTTPException(status_code=500, detail=f"Error calling Anthropic API: {str(e)}")
|
|
|
|
async def callAiImage(self, prompt: str, imageData: Union[str, bytes], mimeType: str = None) -> str:
|
|
"""
|
|
Analyzes an image using Anthropic's vision capabilities.
|
|
|
|
Args:
|
|
imageData: Either a file path (str) or image data (bytes)
|
|
mimeType: The MIME type of the image (optional, only for binary data)
|
|
prompt: The prompt for analysis
|
|
|
|
Returns:
|
|
The analysis response as text
|
|
"""
|
|
try:
|
|
# Distinguish between file path and binary data
|
|
if isinstance(imageData, str):
|
|
# It's a file path - import filehandling only when needed
|
|
from modules import agentserviceFilemanager as fileHandler
|
|
base64Data, autoMimeType = fileHandler.encodeFileToBase64(imageData)
|
|
mimeType = mimeType or autoMimeType
|
|
else:
|
|
# It's binary data
|
|
import base64
|
|
base64Data = base64.b64encode(imageData).decode('utf-8')
|
|
# MIME type must be specified for binary data
|
|
if not mimeType:
|
|
# Fallback to generic image type
|
|
mimeType = "image/png"
|
|
|
|
# Prepare the payload for the Vision API
|
|
messages = [
|
|
{
|
|
"role": "user",
|
|
"content": [
|
|
{"type": "text", "text": prompt},
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": f"data:{mimeType};base64,{base64Data}"
|
|
}
|
|
}
|
|
]
|
|
}
|
|
]
|
|
|
|
# Use the existing callApi function with the Vision model
|
|
response = await self.callApi(messages)
|
|
|
|
# Extract and return content
|
|
return response["choices"][0]["message"]["content"]
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error during image analysis: {str(e)}", exc_info=True)
|
|
return f"[Error during image analysis: {str(e)}]" |