388 lines
No EOL
16 KiB
Python
388 lines
No EOL
16 KiB
Python
import logging
|
|
import httpx
|
|
from typing import List
|
|
from fastapi import HTTPException
|
|
from modules.shared.configuration import APP_CONFIG
|
|
from modules.aicore.aicoreBase import BaseConnectorAi
|
|
from modules.datamodels.datamodelAi import AiModel, PriorityEnum, ProcessingModeEnum, OperationTypeEnum, AiModelCall, AiModelResponse, createOperationTypeRatings
|
|
|
|
# Configure logger
|
|
logger = logging.getLogger(__name__)
|
|
|
|
class ContextLengthExceededException(Exception):
|
|
"""Exception raised when the context length exceeds the model's limit"""
|
|
pass
|
|
|
|
def loadConfigData():
|
|
"""Load configuration data for OpenAI connector"""
|
|
return {
|
|
"apiKey": APP_CONFIG.get('Connector_AiOpenai_API_SECRET'),
|
|
}
|
|
|
|
class AiOpenai(BaseConnectorAi):
|
|
"""Connector for communication with the OpenAI API."""
|
|
|
|
def __init__(self):
|
|
super().__init__()
|
|
# Load configuration
|
|
self.config = loadConfigData()
|
|
self.apiKey = self.config["apiKey"]
|
|
|
|
# HttpClient for API calls
|
|
self.httpClient = httpx.AsyncClient(
|
|
timeout=120.0, # Longer timeout for complex requests
|
|
headers={
|
|
"Authorization": f"Bearer {self.apiKey}",
|
|
"Content-Type": "application/json"
|
|
}
|
|
)
|
|
logger.info("OpenAI Connector initialized")
|
|
|
|
def getConnectorType(self) -> str:
|
|
"""Get the connector type identifier."""
|
|
return "openai"
|
|
|
|
def getModels(self) -> List[AiModel]:
|
|
"""Get all available OpenAI models."""
|
|
return [
|
|
AiModel(
|
|
name="gpt-4o",
|
|
displayName="OpenAI GPT-4o",
|
|
connectorType="openai",
|
|
apiUrl="https://api.openai.com/v1/chat/completions",
|
|
temperature=0.2,
|
|
maxTokens=16384,
|
|
contextLength=128000,
|
|
costPer1kTokensInput=0.03,
|
|
costPer1kTokensOutput=0.06,
|
|
speedRating=7, # Good speed for complex tasks
|
|
qualityRating=9, # High quality
|
|
# capabilities removed (not used in business logic)
|
|
functionCall=self.callAiBasic,
|
|
priority=PriorityEnum.BALANCED,
|
|
processingMode=ProcessingModeEnum.ADVANCED,
|
|
operationTypes=createOperationTypeRatings(
|
|
(OperationTypeEnum.PLAN, 8),
|
|
(OperationTypeEnum.DATA_ANALYSE, 9),
|
|
(OperationTypeEnum.DATA_GENERATE, 9),
|
|
(OperationTypeEnum.DATA_EXTRACT, 7)
|
|
),
|
|
version="gpt-4o",
|
|
calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.03 + (bytesReceived / 4 / 1000) * 0.06
|
|
),
|
|
AiModel(
|
|
name="gpt-3.5-turbo",
|
|
displayName="OpenAI GPT-3.5 Turbo",
|
|
connectorType="openai",
|
|
apiUrl="https://api.openai.com/v1/chat/completions",
|
|
temperature=0.2,
|
|
maxTokens=4096,
|
|
contextLength=16000,
|
|
costPer1kTokensInput=0.0015,
|
|
costPer1kTokensOutput=0.002,
|
|
speedRating=9, # Very fast
|
|
qualityRating=7, # Good but not premium
|
|
# capabilities removed (not used in business logic)
|
|
functionCall=self.callAiBasic,
|
|
priority=PriorityEnum.SPEED,
|
|
processingMode=ProcessingModeEnum.BASIC,
|
|
operationTypes=createOperationTypeRatings(
|
|
(OperationTypeEnum.PLAN, 7),
|
|
(OperationTypeEnum.DATA_ANALYSE, 8),
|
|
(OperationTypeEnum.DATA_GENERATE, 8)
|
|
# Note: GPT-3.5-turbo does NOT support vision/image operations
|
|
),
|
|
version="gpt-3.5-turbo",
|
|
calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.0015 + (bytesReceived / 4 / 1000) * 0.002
|
|
),
|
|
AiModel(
|
|
name="gpt-4o",
|
|
displayName="OpenAI GPT-4o Instance Vision",
|
|
connectorType="openai",
|
|
apiUrl="https://api.openai.com/v1/chat/completions",
|
|
temperature=0.2,
|
|
maxTokens=16384,
|
|
contextLength=128000,
|
|
costPer1kTokensInput=0.03,
|
|
costPer1kTokensOutput=0.06,
|
|
speedRating=6, # Slower for vision tasks
|
|
qualityRating=9, # High quality vision
|
|
functionCall=self.callAiImage,
|
|
priority=PriorityEnum.QUALITY,
|
|
processingMode=ProcessingModeEnum.DETAILED,
|
|
operationTypes=createOperationTypeRatings(
|
|
(OperationTypeEnum.IMAGE_ANALYSE, 9)
|
|
),
|
|
version="gpt-4o",
|
|
calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.03 + (bytesReceived / 4 / 1000) * 0.06
|
|
),
|
|
AiModel(
|
|
name="dall-e-3",
|
|
displayName="OpenAI DALL-E 3",
|
|
connectorType="openai",
|
|
apiUrl="https://api.openai.com/v1/images/generations",
|
|
temperature=0.0, # Image generation doesn't use temperature
|
|
maxTokens=0, # Image generation doesn't use tokens
|
|
contextLength=0,
|
|
costPer1kTokensInput=0.04,
|
|
costPer1kTokensOutput=0.0,
|
|
speedRating=5, # Slow for image generation
|
|
qualityRating=9, # High quality art generation
|
|
# capabilities removed (not used in business logic)
|
|
functionCall=self.generateImage,
|
|
priority=PriorityEnum.QUALITY,
|
|
processingMode=ProcessingModeEnum.DETAILED,
|
|
operationTypes=createOperationTypeRatings(
|
|
(OperationTypeEnum.IMAGE_GENERATE, 10)
|
|
),
|
|
version="dall-e-3",
|
|
calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.04
|
|
)
|
|
]
|
|
|
|
async def callAiBasic(self, modelCall: AiModelCall) -> AiModelResponse:
|
|
"""
|
|
Calls the OpenAI API with the given messages using standardized pattern.
|
|
|
|
Args:
|
|
modelCall: AiModelCall with messages and options
|
|
|
|
Returns:
|
|
AiModelResponse with content and metadata
|
|
|
|
Raises:
|
|
HTTPException: For errors in API communication
|
|
"""
|
|
try:
|
|
# Extract parameters from modelCall
|
|
messages = modelCall.messages
|
|
model = modelCall.model
|
|
options = modelCall.options
|
|
temperature = getattr(options, "temperature", None)
|
|
if temperature is None:
|
|
temperature = model.temperature
|
|
maxTokens = model.maxTokens
|
|
|
|
payload = {
|
|
"model": model.name,
|
|
"messages": messages,
|
|
"temperature": temperature,
|
|
"max_tokens": maxTokens
|
|
}
|
|
|
|
response = await self.httpClient.post(
|
|
model.apiUrl,
|
|
json=payload
|
|
)
|
|
|
|
if response.status_code != 200:
|
|
error_message = f"OpenAI API error: {response.status_code} - {response.text}"
|
|
logger.error(error_message)
|
|
|
|
# Check for context length exceeded error
|
|
if response.status_code == 400:
|
|
try:
|
|
error_data = response.json()
|
|
if (error_data.get("error", {}).get("code") == "context_length_exceeded" or
|
|
"context length" in error_data.get("error", {}).get("message", "").lower()):
|
|
# Raise a specific exception for context length issues
|
|
raise ContextLengthExceededException(
|
|
f"Context length exceeded: {error_data.get('error', {}).get('message', 'Unknown error')}"
|
|
)
|
|
except (ValueError, KeyError):
|
|
pass # If we can't parse the error, fall through to generic error
|
|
|
|
# Include the actual error details in the exception
|
|
raise HTTPException(status_code=500, detail=error_message)
|
|
|
|
responseJson = response.json()
|
|
content = responseJson["choices"][0]["message"]["content"]
|
|
|
|
return AiModelResponse(
|
|
content=content,
|
|
success=True,
|
|
modelId=model.name,
|
|
metadata={"response_id": responseJson.get("id", "")}
|
|
)
|
|
|
|
except ContextLengthExceededException:
|
|
# Re-raise context length exceptions without wrapping
|
|
raise
|
|
except Exception as e:
|
|
logger.error(f"Error calling OpenAI API: {str(e)}")
|
|
raise HTTPException(status_code=500, detail=f"Error calling OpenAI API: {str(e)}")
|
|
|
|
async def callAiImage(self, modelCall: AiModelCall) -> AiModelResponse:
|
|
"""
|
|
Analyzes an image with the OpenAI Vision API using standardized pattern.
|
|
|
|
Args:
|
|
modelCall: AiModelCall with messages and image data in options
|
|
|
|
Returns:
|
|
AiModelResponse with analysis content
|
|
"""
|
|
try:
|
|
# Extract parameters from modelCall
|
|
messages = modelCall.messages
|
|
model = modelCall.model
|
|
|
|
# Messages should already be in the correct format with image data embedded
|
|
# Just verify they contain image data
|
|
if not messages or not messages[0].get("content"):
|
|
raise ValueError("No messages provided for image analysis")
|
|
|
|
logger.debug(f"Starting image analysis with {len(messages)} message(s)...")
|
|
|
|
# Use the messages directly - they should already contain the image data
|
|
# in the format: {"type": "image_url", "image_url": {"url": "data:...base64,..."}}
|
|
|
|
# Use parameters from model
|
|
temperature = model.temperature
|
|
# Don't set maxTokens - let the model use its full context length
|
|
|
|
payload = {
|
|
"model": model.name,
|
|
"messages": messages,
|
|
"temperature": temperature
|
|
}
|
|
|
|
response = await self.httpClient.post(
|
|
model.apiUrl,
|
|
json=payload
|
|
)
|
|
|
|
if response.status_code != 200:
|
|
logger.error(f"OpenAI API error: {response.status_code} - {response.text}")
|
|
raise HTTPException(status_code=500, detail="Error communicating with OpenAI API")
|
|
|
|
responseJson = response.json()
|
|
content = responseJson["choices"][0]["message"]["content"]
|
|
|
|
return AiModelResponse(
|
|
content=content,
|
|
success=True,
|
|
modelId=model.name,
|
|
metadata={"response_id": responseJson.get("id", "")}
|
|
)
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error during image analysis: {str(e)}", exc_info=True)
|
|
return AiModelResponse(
|
|
content="",
|
|
success=False,
|
|
error=f"Error during image analysis: {str(e)}"
|
|
)
|
|
|
|
async def generateImage(self, modelCall: AiModelCall) -> AiModelResponse:
|
|
"""
|
|
Generate an image using DALL-E 3 using standardized pattern.
|
|
|
|
Args:
|
|
modelCall: AiModelCall with messages and generation options
|
|
|
|
Returns:
|
|
AiModelResponse with generated image data
|
|
"""
|
|
try:
|
|
# Extract parameters from modelCall
|
|
messages = modelCall.messages
|
|
model = modelCall.model
|
|
options = modelCall.options
|
|
|
|
# Get prompt from messages
|
|
promptContent = messages[0]["content"] if messages else ""
|
|
|
|
# Parse prompt using AiCallPromptImage model
|
|
from modules.datamodels.datamodelAi import AiCallPromptImage
|
|
import json
|
|
|
|
try:
|
|
# Try to parse as JSON
|
|
promptData = json.loads(promptContent)
|
|
promptModel = AiCallPromptImage(**promptData)
|
|
except:
|
|
# If not JSON, use plain text prompt
|
|
promptModel = AiCallPromptImage(
|
|
prompt=promptContent,
|
|
size=options.size if options and hasattr(options, 'size') else "1024x1024",
|
|
quality=options.quality if options and hasattr(options, 'quality') else "standard",
|
|
style=options.style if options and hasattr(options, 'style') else "vivid"
|
|
)
|
|
|
|
# Extract parameters from Pydantic model
|
|
prompt = promptModel.prompt
|
|
size = promptModel.size or "1024x1024"
|
|
quality = promptModel.quality or "standard"
|
|
style = promptModel.style or "vivid"
|
|
|
|
logger.debug(f"Starting image generation with prompt: '{prompt[:100]}...'")
|
|
|
|
# DALL-E 3 API endpoint
|
|
dalle_url = "https://api.openai.com/v1/images/generations"
|
|
|
|
payload = {
|
|
"model": "dall-e-3",
|
|
"prompt": prompt,
|
|
"size": size,
|
|
"quality": quality,
|
|
"style": style,
|
|
"n": 1,
|
|
"response_format": "b64_json" # Get base64 data directly instead of URLs
|
|
}
|
|
|
|
# Create a separate client for DALL-E API calls
|
|
dalle_client = httpx.AsyncClient(
|
|
timeout=120.0,
|
|
headers={
|
|
"Authorization": f"Bearer {self.apiKey}",
|
|
"Content-Type": "application/json"
|
|
}
|
|
)
|
|
|
|
response = await dalle_client.post(
|
|
dalle_url,
|
|
json=payload
|
|
)
|
|
|
|
await dalle_client.aclose()
|
|
|
|
if response.status_code != 200:
|
|
logger.error(f"DALL-E API error: {response.status_code} - {response.text}")
|
|
return {
|
|
"success": False,
|
|
"error": f"DALL-E API error: {response.status_code} - {response.text}"
|
|
}
|
|
|
|
responseJson = response.json()
|
|
|
|
if "data" in responseJson and len(responseJson["data"]) > 0:
|
|
image_data = responseJson["data"][0]["b64_json"]
|
|
|
|
logger.info(f"Successfully generated image: {len(image_data)} characters")
|
|
return AiModelResponse(
|
|
content=image_data,
|
|
success=True,
|
|
modelId="dall-e-3",
|
|
metadata={
|
|
"size": size,
|
|
"quality": quality,
|
|
"style": style,
|
|
"response_id": responseJson.get("id", "")
|
|
}
|
|
)
|
|
else:
|
|
logger.error("No image data in DALL-E response")
|
|
return AiModelResponse(
|
|
content="",
|
|
success=False,
|
|
error="No image data in DALL-E response"
|
|
)
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error during image generation: {str(e)}", exc_info=True)
|
|
return AiModelResponse(
|
|
content="",
|
|
success=False,
|
|
error=f"Error during image generation: {str(e)}"
|
|
) |