285 lines
No EOL
12 KiB
Python
285 lines
No EOL
12 KiB
Python
import logging
|
|
from typing import Dict, Any, List
|
|
|
|
# Logger konfigurieren
|
|
logger = logging.getLogger(__name__)
|
|
|
|
def convert_to_anthropic_format(openai_messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
"""
|
|
Konvertiert Nachrichten vom OpenAI-Format ins Anthropic-Format.
|
|
|
|
OpenAI verwendet:
|
|
[{"role": "system", "content": "..."},
|
|
{"role": "user", "content": "..."},
|
|
{"role": "assistant", "content": "..."}]
|
|
|
|
Anthropic verwendet:
|
|
[{"role": "user", "content": [{"type": "text", "text": "..."}]},
|
|
{"role": "assistant", "content": [{"type": "text", "text": "..."}]}]
|
|
|
|
Anmerkung: Anthropic hat kein direktes System-Message-Äquivalent,
|
|
daher fügen wir System-Nachrichten in die erste User-Nachricht ein.
|
|
"""
|
|
anthropic_messages = []
|
|
system_content = ""
|
|
|
|
# Extrahiere zuerst alle System-Nachrichten
|
|
for msg in openai_messages:
|
|
if msg.get("role") == "system":
|
|
if isinstance(msg.get("content"), str):
|
|
system_content += msg.get("content", "") + "\n\n"
|
|
# Falls content bereits ein Array ist (selten bei system messages)
|
|
elif isinstance(msg.get("content"), list):
|
|
for part in msg.get("content", []):
|
|
if part.get("type") == "text":
|
|
system_content += part.get("text", "") + "\n\n"
|
|
|
|
# Konvertiere die restlichen Nachrichten
|
|
for i, msg in enumerate(openai_messages):
|
|
role = msg.get("role")
|
|
content = msg.get("content", "")
|
|
|
|
# System-Nachrichten überspringen (bereits extrahiert)
|
|
if role == "system":
|
|
continue
|
|
|
|
# Anthropic unterstützt nur "user" und "assistant" als Rollen
|
|
if role not in ["user", "assistant"]:
|
|
role = "user"
|
|
|
|
# Standardisiertes Nachrichtenformat erstellen
|
|
anthropic_msg = {"role": role}
|
|
|
|
# Content-Formatierung
|
|
if isinstance(content, str):
|
|
# String in ein Anthropic-kompatibles Array umwandeln
|
|
# Für die erste User-Nachricht: System-Inhalte voranstellen, falls vorhanden
|
|
if role == "user" and system_content and not any(m.get("role") == "user" for m in anthropic_messages):
|
|
text_content = system_content + content
|
|
else:
|
|
text_content = content
|
|
|
|
anthropic_msg["content"] = [{"type": "text", "text": text_content}]
|
|
|
|
elif isinstance(content, list):
|
|
# Bei Array-Content (multimodal)
|
|
transformed_content = []
|
|
|
|
# Für die erste User-Nachricht: System-Inhalte dem ersten Text-Element voranstellen
|
|
if role == "user" and system_content and not any(m.get("role") == "user" for m in anthropic_messages):
|
|
first_text_added = False
|
|
|
|
for part in content:
|
|
if part.get("type") == "text" and not first_text_added:
|
|
transformed_content.append({
|
|
"type": "text",
|
|
"text": system_content + part.get("text", "")
|
|
})
|
|
first_text_added = True
|
|
elif part.get("type") == "image_url":
|
|
# OpenAI image_url in Anthropic image umwandeln
|
|
image_url = part.get("image_url", {}).get("url", "")
|
|
if image_url.startswith("data:"):
|
|
# Base64-kodiertes Bild
|
|
parts = image_url.split(",", 1)
|
|
if len(parts) == 2:
|
|
media_type = parts[0].split(":")[1].split(";")[0]
|
|
base64_data = parts[1]
|
|
transformed_content.append({
|
|
"type": "image",
|
|
"source": {
|
|
"type": "base64",
|
|
"media_type": media_type,
|
|
"data": base64_data
|
|
}
|
|
})
|
|
else:
|
|
# URL-Bild - nicht direkt unterstützt von Anthropic in dieser Form
|
|
logger.warning("Externe Bild-URLs werden von Anthropic nicht direkt unterstützt")
|
|
else:
|
|
transformed_content.append(part)
|
|
|
|
# Falls kein Text-Element gefunden wurde, füge System-Content als separates Element hinzu
|
|
if system_content and not first_text_added:
|
|
transformed_content.insert(0, {"type": "text", "text": system_content})
|
|
else:
|
|
# Wenn es nicht die erste User-Nachricht ist oder kein System-Content vorhanden ist
|
|
for part in content:
|
|
if part.get("type") == "image_url":
|
|
# OpenAI image_url in Anthropic image umwandeln
|
|
image_url = part.get("image_url", {}).get("url", "")
|
|
if image_url.startswith("data:"):
|
|
# Base64-kodiertes Bild
|
|
parts = image_url.split(",", 1)
|
|
if len(parts) == 2:
|
|
media_type = parts[0].split(":")[1].split(";")[0]
|
|
base64_data = parts[1]
|
|
transformed_content.append({
|
|
"type": "image",
|
|
"source": {
|
|
"type": "base64",
|
|
"media_type": media_type,
|
|
"data": base64_data
|
|
}
|
|
})
|
|
else:
|
|
# URL-Bild - nicht direkt unterstützt von Anthropic in dieser Form
|
|
logger.warning("Externe Bild-URLs werden von Anthropic nicht direkt unterstützt")
|
|
else:
|
|
transformed_content.append(part)
|
|
|
|
anthropic_msg["content"] = transformed_content
|
|
|
|
anthropic_messages.append(anthropic_msg)
|
|
|
|
return anthropic_messages
|
|
|
|
def convert_to_openai_format(anthropic_messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
"""
|
|
Konvertiert Nachrichten vom Anthropic-Format ins OpenAI-Format.
|
|
|
|
Anthropic:
|
|
[{"role": "user", "content": [{"type": "text", "text": "..."}]},
|
|
{"role": "assistant", "content": [{"type": "text", "text": "..."}]}]
|
|
|
|
OpenAI:
|
|
[{"role": "system", "content": "..."},
|
|
{"role": "user", "content": "..."},
|
|
{"role": "assistant", "content": "..."}]
|
|
"""
|
|
openai_messages = []
|
|
|
|
for msg in anthropic_messages:
|
|
role = msg.get("role", "user")
|
|
content = msg.get("content", [])
|
|
|
|
# Erstelle OpenAI-Message
|
|
openai_msg = {"role": role}
|
|
|
|
# Content-Behandlung
|
|
if isinstance(content, list):
|
|
# Multimodaler Inhalt von Anthropic
|
|
if all(isinstance(part, dict) and part.get("type") == "text" for part in content):
|
|
# Wenn alle Elemente Text sind, vereinfache zu einem einzelnen String
|
|
openai_msg["content"] = "\n".join(part.get("text", "") for part in content if part.get("type") == "text")
|
|
else:
|
|
# Mischung aus Text und Bildern/Dokumenten
|
|
openai_content = []
|
|
|
|
for part in content:
|
|
part_type = part.get("type", "")
|
|
|
|
if part_type == "text":
|
|
openai_content.append({
|
|
"type": "text",
|
|
"text": part.get("text", "")
|
|
})
|
|
elif part_type == "image":
|
|
# Anthropic image in OpenAI image_url umwandeln
|
|
source = part.get("source", {})
|
|
if source.get("type") == "base64":
|
|
media_type = source.get("media_type", "image/jpeg")
|
|
base64_data = source.get("data", "")
|
|
openai_content.append({
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": f"data:{media_type};base64,{base64_data}"
|
|
}
|
|
})
|
|
# Anthropic Dokumente können nicht direkt in OpenAI-Format übersetzt werden
|
|
elif part_type == "document":
|
|
# Versuche, Dokumente als Text zu behandeln
|
|
openai_content.append({
|
|
"type": "text",
|
|
"text": f"[Dokument wurde übermittelt, kann aber nicht direkt in OpenAI-Format konvertiert werden]"
|
|
})
|
|
|
|
openai_msg["content"] = openai_content
|
|
else:
|
|
# Einfacher String-Inhalt
|
|
openai_msg["content"] = content
|
|
|
|
openai_messages.append(openai_msg)
|
|
|
|
return openai_messages
|
|
|
|
def convert_anthropic_response_to_openai_format(anthropic_response: Dict[str, Any]) -> Dict[str, Any]:
|
|
"""
|
|
Konvertiert eine Antwort vom Anthropic-Format ins OpenAI-Format.
|
|
|
|
Args:
|
|
anthropic_response: Antwort im Anthropic-Format
|
|
|
|
Returns:
|
|
Die Antwort im OpenAI-Format
|
|
"""
|
|
# Extrahiere Inhalt aus Anthropic-Antwort
|
|
content = ""
|
|
if "content" in anthropic_response:
|
|
if isinstance(anthropic_response["content"], list):
|
|
# Inhalt ist eine Liste von Teilen (bei neueren API-Versionen)
|
|
for part in anthropic_response["content"]:
|
|
if part.get("type") == "text":
|
|
content += part.get("text", "")
|
|
else:
|
|
# Direkter Inhalt als String (bei älteren API-Versionen)
|
|
content = anthropic_response["content"]
|
|
|
|
# Erstelle OpenAI-formatierte Antwort
|
|
return {
|
|
"id": anthropic_response.get("id", ""),
|
|
"object": "chat.completion",
|
|
"created": anthropic_response.get("created", 0),
|
|
"model": anthropic_response.get("model", ""),
|
|
"choices": [
|
|
{
|
|
"message": {
|
|
"role": "assistant",
|
|
"content": content
|
|
},
|
|
"index": 0,
|
|
"finish_reason": "stop"
|
|
}
|
|
]
|
|
}
|
|
|
|
def convert_openai_response_to_anthropic_format(openai_response: Dict[str, Any]) -> Dict[str, Any]:
|
|
"""
|
|
Konvertiert eine Antwort vom OpenAI-Format ins Anthropic-Format.
|
|
|
|
Args:
|
|
openai_response: Antwort im OpenAI-Format
|
|
|
|
Returns:
|
|
Die Antwort im Anthropic-Format (nur die relevanten Felder)
|
|
"""
|
|
# Extrahiere Inhalt aus OpenAI-Antwort
|
|
content = []
|
|
if "choices" in openai_response and openai_response["choices"]:
|
|
choice = openai_response["choices"][0]
|
|
message = choice.get("message", {})
|
|
message_content = message.get("content", "")
|
|
|
|
if isinstance(message_content, str):
|
|
content.append({
|
|
"type": "text",
|
|
"text": message_content
|
|
})
|
|
elif isinstance(message_content, list):
|
|
# Multimodaler Inhalt (selten in Antworten)
|
|
for part in message_content:
|
|
if part.get("type") == "text":
|
|
content.append({
|
|
"type": "text",
|
|
"text": part.get("text", "")
|
|
})
|
|
# Bilder in Antworten würden hier auch verarbeitet werden
|
|
|
|
# Erstelle Anthropic-formatierte Antwort
|
|
return {
|
|
"id": openai_response.get("id", ""),
|
|
"model": openai_response.get("model", ""),
|
|
"content": content,
|
|
"type": "message",
|
|
"role": "assistant"
|
|
} |