Merge branch 'main' of https://github.com/valueonag/gateway
This commit is contained in:
commit
af68f6a8c8
749 changed files with 207432 additions and 32029 deletions
74
.dockerignore
Normal file
74
.dockerignore
Normal file
|
|
@ -0,0 +1,74 @@
|
||||||
|
# Docker ignore file for Gateway
|
||||||
|
# Excludes unnecessary files from Docker build context
|
||||||
|
|
||||||
|
# Git
|
||||||
|
.git
|
||||||
|
.gitignore
|
||||||
|
.github
|
||||||
|
|
||||||
|
# Python
|
||||||
|
__pycache__
|
||||||
|
*.pyc
|
||||||
|
*.pyo
|
||||||
|
*.pyd
|
||||||
|
.Python
|
||||||
|
*.so
|
||||||
|
*.egg
|
||||||
|
*.egg-info
|
||||||
|
dist
|
||||||
|
build
|
||||||
|
venv
|
||||||
|
env
|
||||||
|
ENV
|
||||||
|
.venv
|
||||||
|
|
||||||
|
# IDE
|
||||||
|
.vscode
|
||||||
|
.idea
|
||||||
|
*.swp
|
||||||
|
*.swo
|
||||||
|
*~
|
||||||
|
|
||||||
|
# Environment files (env_gcp.env will be copied as .env by workflow)
|
||||||
|
env_*.env
|
||||||
|
.env.local
|
||||||
|
# Note: .env is NOT ignored - it will be created from env_gcp.env by the workflow
|
||||||
|
|
||||||
|
# Logs
|
||||||
|
*.log
|
||||||
|
logs/
|
||||||
|
local/logs/
|
||||||
|
|
||||||
|
# Tests
|
||||||
|
tests/
|
||||||
|
.pytest_cache
|
||||||
|
.coverage
|
||||||
|
htmlcov/
|
||||||
|
|
||||||
|
# Documentation
|
||||||
|
docs/
|
||||||
|
*.md
|
||||||
|
README.txt
|
||||||
|
LICENSE.txt
|
||||||
|
|
||||||
|
# Local development files
|
||||||
|
local/
|
||||||
|
*.txt
|
||||||
|
!requirements.txt
|
||||||
|
|
||||||
|
# Debug files
|
||||||
|
debug/
|
||||||
|
test-chat/
|
||||||
|
|
||||||
|
# OS files
|
||||||
|
.DS_Store
|
||||||
|
Thumbs.db
|
||||||
|
|
||||||
|
# Temporary files
|
||||||
|
tmp/
|
||||||
|
temp/
|
||||||
|
*.tmp
|
||||||
|
|
||||||
|
# Build artifacts
|
||||||
|
*.zip
|
||||||
|
release.zip
|
||||||
27
.forgejo/workflows/deploy.yml
Normal file
27
.forgejo/workflows/deploy.yml
Normal file
|
|
@ -0,0 +1,27 @@
|
||||||
|
name: Deploy Gateway
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
deploy:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Deploy to Infomaniak VM
|
||||||
|
env:
|
||||||
|
SSH_PRIVATE_KEY: ${{ secrets.SSH_PRIVATE_KEY }}
|
||||||
|
run: |
|
||||||
|
mkdir -p ~/.ssh
|
||||||
|
echo "$SSH_PRIVATE_KEY" > ~/.ssh/deploy_key
|
||||||
|
chmod 600 ~/.ssh/deploy_key
|
||||||
|
echo "StrictHostKeyChecking=no" >> ~/.ssh/config
|
||||||
|
echo "UserKnownHostsFile=/dev/null" >> ~/.ssh/config
|
||||||
|
ssh -i ~/.ssh/deploy_key ubuntu@api.poweron.swiss "
|
||||||
|
cd /srv/gateway/current &&
|
||||||
|
git pull &&
|
||||||
|
source .venv/bin/activate &&
|
||||||
|
pip install -r requirements.txt --no-cache-dir &&
|
||||||
|
sudo systemctl restart gateway
|
||||||
|
"
|
||||||
73
.gcloudignore
Normal file
73
.gcloudignore
Normal file
|
|
@ -0,0 +1,73 @@
|
||||||
|
# Google Cloud Build ignore file
|
||||||
|
# Similar to .dockerignore but for Cloud Build
|
||||||
|
|
||||||
|
# Git
|
||||||
|
.git
|
||||||
|
.gitignore
|
||||||
|
.github
|
||||||
|
|
||||||
|
# Python
|
||||||
|
__pycache__
|
||||||
|
*.pyc
|
||||||
|
*.pyo
|
||||||
|
*.pyd
|
||||||
|
.Python
|
||||||
|
*.so
|
||||||
|
*.egg
|
||||||
|
*.egg-info
|
||||||
|
dist
|
||||||
|
build
|
||||||
|
venv
|
||||||
|
env
|
||||||
|
ENV
|
||||||
|
.venv
|
||||||
|
|
||||||
|
# IDE
|
||||||
|
.vscode
|
||||||
|
.idea
|
||||||
|
*.swp
|
||||||
|
*.swo
|
||||||
|
*~
|
||||||
|
|
||||||
|
# Environment files (will be handled separately)
|
||||||
|
env_*.env
|
||||||
|
.env.local
|
||||||
|
|
||||||
|
# Logs
|
||||||
|
*.log
|
||||||
|
logs/
|
||||||
|
local/logs/
|
||||||
|
|
||||||
|
# Tests
|
||||||
|
tests/
|
||||||
|
.pytest_cache
|
||||||
|
.coverage
|
||||||
|
htmlcov/
|
||||||
|
|
||||||
|
# Documentation
|
||||||
|
docs/
|
||||||
|
*.md
|
||||||
|
README.txt
|
||||||
|
LICENSE.txt
|
||||||
|
|
||||||
|
# Local development files
|
||||||
|
local/
|
||||||
|
*.txt
|
||||||
|
!requirements.txt
|
||||||
|
|
||||||
|
# Debug files
|
||||||
|
debug/
|
||||||
|
test-chat/
|
||||||
|
|
||||||
|
# OS files
|
||||||
|
.DS_Store
|
||||||
|
Thumbs.db
|
||||||
|
|
||||||
|
# Temporary files
|
||||||
|
tmp/
|
||||||
|
temp/
|
||||||
|
*.tmp
|
||||||
|
|
||||||
|
# Build artifacts
|
||||||
|
*.zip
|
||||||
|
release.zip
|
||||||
151
.github/workflows/deploy-gcp.yml
vendored
Normal file
151
.github/workflows/deploy-gcp.yml
vendored
Normal file
|
|
@ -0,0 +1,151 @@
|
||||||
|
# GitHub Actions workflow for deploying Gateway to Google Cloud Run
|
||||||
|
# Documentation: https://cloud.google.com/run/docs/deploying
|
||||||
|
#
|
||||||
|
# Required GitHub Secrets:
|
||||||
|
# - GCP_PROJECT_ID: Your Google Cloud Project ID
|
||||||
|
# - GCP_SA_KEY: Service Account JSON key with Cloud Run Admin and Cloud Build Editor roles
|
||||||
|
# - GCP_SERVICE_ACCOUNT_EMAIL: Email of the service account to run Cloud Run service as
|
||||||
|
#
|
||||||
|
# Required Google Cloud Setup:
|
||||||
|
# 1. Create a service account with Cloud Run Admin and Cloud Build Editor roles
|
||||||
|
# 2. Create secret "CONFIG_KEY" in Secret Manager with your master key
|
||||||
|
# 3. Grant the service account access to Secret Manager secrets
|
||||||
|
# 4. Create Cloud SQL instance (if not exists)
|
||||||
|
# 5. Create env_prod.env and env_int.env files with your configuration
|
||||||
|
#
|
||||||
|
# Environment Selection:
|
||||||
|
# - Push to 'main' branch → uses env_prod.env (production)
|
||||||
|
# - Push to 'int' branch → uses env_int.env (integration)
|
||||||
|
# - Manual dispatch → select environment (prod/int) to use corresponding env file
|
||||||
|
|
||||||
|
name: Deploy Gateway to Google Cloud Run
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
- int
|
||||||
|
paths:
|
||||||
|
- 'gateway/**'
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
environment:
|
||||||
|
description: 'Environment to deploy to'
|
||||||
|
required: true
|
||||||
|
default: 'prod'
|
||||||
|
type: choice
|
||||||
|
options:
|
||||||
|
- prod
|
||||||
|
- int
|
||||||
|
|
||||||
|
# Cancel in-progress runs when a new run is triggered (saves logs/storage)
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
env:
|
||||||
|
PROJECT_ID: ${{ secrets.GCP_PROJECT_ID }}
|
||||||
|
REGION: europe-west6 # Zurich region
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
deploy:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
id-token: write # Required for Workload Identity Federation
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Determine environment
|
||||||
|
id: env
|
||||||
|
run: |
|
||||||
|
if [ "${{ github.event_name }}" == "workflow_dispatch" ]; then
|
||||||
|
ENV_TYPE="${{ github.event.inputs.environment }}"
|
||||||
|
elif [ "${{ github.ref }}" == "refs/heads/int" ]; then
|
||||||
|
ENV_TYPE="int"
|
||||||
|
else
|
||||||
|
ENV_TYPE="prod"
|
||||||
|
fi
|
||||||
|
echo "env_type=$ENV_TYPE" >> $GITHUB_OUTPUT
|
||||||
|
echo "service_name=gateway-$ENV_TYPE" >> $GITHUB_OUTPUT
|
||||||
|
echo "env_file=env_${ENV_TYPE}.env" >> $GITHUB_OUTPUT
|
||||||
|
echo "Determined environment: $ENV_TYPE"
|
||||||
|
echo "Service name: gateway-$ENV_TYPE"
|
||||||
|
echo "Env file: env_${ENV_TYPE}.env"
|
||||||
|
|
||||||
|
- name: Authenticate to Google Cloud
|
||||||
|
uses: google-github-actions/auth@v2
|
||||||
|
with:
|
||||||
|
credentials_json: ${{ secrets.GCP_SA_KEY }}
|
||||||
|
# Alternative: Use Workload Identity Federation (more secure)
|
||||||
|
# workload_identity_provider: ${{ secrets.WIF_PROVIDER }}
|
||||||
|
# service_account: ${{ secrets.WIF_SERVICE_ACCOUNT }}
|
||||||
|
|
||||||
|
- name: Set up Cloud SDK
|
||||||
|
uses: google-github-actions/setup-gcloud@v2
|
||||||
|
|
||||||
|
- name: Configure Docker for GCR
|
||||||
|
run: |
|
||||||
|
gcloud auth configure-docker
|
||||||
|
|
||||||
|
- name: Set environment file
|
||||||
|
run: |
|
||||||
|
cd gateway
|
||||||
|
ENV_FILE="${{ steps.env.outputs.env_file }}"
|
||||||
|
if [ -f "$ENV_FILE" ]; then
|
||||||
|
echo "Using $ENV_FILE"
|
||||||
|
cp "$ENV_FILE" .env
|
||||||
|
else
|
||||||
|
echo "Warning: $ENV_FILE not found, using env_prod.env as fallback"
|
||||||
|
cp env_prod.env .env
|
||||||
|
fi
|
||||||
|
# Clean up other env files (optional, for security)
|
||||||
|
rm -f env_*.env
|
||||||
|
|
||||||
|
- name: Build and push container image
|
||||||
|
working-directory: ./gateway
|
||||||
|
run: |
|
||||||
|
# Build container image using Cloud Build
|
||||||
|
# If Dockerfile exists, it will be used; otherwise Cloud Buildpacks will be used
|
||||||
|
SERVICE_NAME="${{ steps.env.outputs.service_name }}"
|
||||||
|
gcloud builds submit \
|
||||||
|
--tag gcr.io/${{ env.PROJECT_ID }}/$SERVICE_NAME:${{ github.sha }} \
|
||||||
|
--tag gcr.io/${{ env.PROJECT_ID }}/$SERVICE_NAME:latest \
|
||||||
|
--project ${{ env.PROJECT_ID }}
|
||||||
|
|
||||||
|
- name: Deploy to Cloud Run
|
||||||
|
run: |
|
||||||
|
SERVICE_NAME="${{ steps.env.outputs.service_name }}"
|
||||||
|
ENV_TYPE="${{ steps.env.outputs.env_type }}"
|
||||||
|
gcloud run deploy $SERVICE_NAME \
|
||||||
|
--image gcr.io/${{ env.PROJECT_ID }}/$SERVICE_NAME:${{ github.sha }} \
|
||||||
|
--region ${{ env.REGION }} \
|
||||||
|
--platform managed \
|
||||||
|
--allow-unauthenticated \
|
||||||
|
--project ${{ env.PROJECT_ID }} \
|
||||||
|
--set-env-vars "APP_ENV_TYPE=$ENV_TYPE" \
|
||||||
|
--set-secrets "CONFIG_KEY=CONFIG_KEY:latest" \
|
||||||
|
--memory 2Gi \
|
||||||
|
--cpu 2 \
|
||||||
|
--timeout 300 \
|
||||||
|
--max-instances 10 \
|
||||||
|
--min-instances 1 \
|
||||||
|
--port 8000 \
|
||||||
|
--service-account ${{ secrets.GCP_SERVICE_ACCOUNT_EMAIL }}
|
||||||
|
|
||||||
|
- name: Get service URL
|
||||||
|
id: service-url
|
||||||
|
run: |
|
||||||
|
SERVICE_NAME="${{ steps.env.outputs.service_name }}"
|
||||||
|
SERVICE_URL=$(gcloud run services describe $SERVICE_NAME \
|
||||||
|
--region ${{ env.REGION }} \
|
||||||
|
--project ${{ env.PROJECT_ID }} \
|
||||||
|
--format 'value(status.url)')
|
||||||
|
echo "url=$SERVICE_URL" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Output deployment URL
|
||||||
|
run: |
|
||||||
|
echo "🚀 Deployment successful!"
|
||||||
|
echo "Service URL: ${{ steps.service-url.outputs.url }}"
|
||||||
10
.github/workflows/int_gateway-int.yml
vendored
10
.github/workflows/int_gateway-int.yml
vendored
|
|
@ -10,6 +10,11 @@ on:
|
||||||
- int
|
- int
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
|
# Cancel in-progress runs when a new run is triggered (saves logs/storage)
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
@ -32,7 +37,11 @@ jobs:
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip
|
python -m pip install --upgrade pip
|
||||||
|
if [ -f requirements.lock ]; then
|
||||||
|
pip install -r requirements.lock --no-cache-dir
|
||||||
|
else
|
||||||
pip install -r requirements.txt --no-cache-dir
|
pip install -r requirements.txt --no-cache-dir
|
||||||
|
fi
|
||||||
|
|
||||||
# Optional: Add step to run tests here (PyTest, Django test suites, etc.)
|
# Optional: Add step to run tests here (PyTest, Django test suites, etc.)
|
||||||
|
|
||||||
|
|
@ -46,6 +55,7 @@ jobs:
|
||||||
path: |
|
path: |
|
||||||
release.zip
|
release.zip
|
||||||
!venv/
|
!venv/
|
||||||
|
retention-days: 5
|
||||||
|
|
||||||
deploy:
|
deploy:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
|
||||||
10
.github/workflows/main_gateway-prod.yml
vendored
10
.github/workflows/main_gateway-prod.yml
vendored
|
|
@ -10,6 +10,11 @@ on:
|
||||||
- main
|
- main
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
|
# Cancel in-progress runs when a new run is triggered (saves logs/storage)
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
@ -32,7 +37,11 @@ jobs:
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip
|
python -m pip install --upgrade pip
|
||||||
|
if [ -f requirements.lock ]; then
|
||||||
|
pip install -r requirements.lock --no-cache-dir
|
||||||
|
else
|
||||||
pip install -r requirements.txt --no-cache-dir
|
pip install -r requirements.txt --no-cache-dir
|
||||||
|
fi
|
||||||
|
|
||||||
# Optional: Add step to run tests here (PyTest, Django test suites, etc.)
|
# Optional: Add step to run tests here (PyTest, Django test suites, etc.)
|
||||||
|
|
||||||
|
|
@ -46,6 +55,7 @@ jobs:
|
||||||
path: |
|
path: |
|
||||||
release.zip
|
release.zip
|
||||||
!venv/
|
!venv/
|
||||||
|
retention-days: 5
|
||||||
|
|
||||||
deploy:
|
deploy:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
|
||||||
51
.github/workflows/update-requirements-lock.yml
vendored
Normal file
51
.github/workflows/update-requirements-lock.yml
vendored
Normal file
|
|
@ -0,0 +1,51 @@
|
||||||
|
# Generates requirements.lock from requirements.txt using Python 3.11 (same as build).
|
||||||
|
# Run manually (workflow_dispatch) or on changes to requirements.txt.
|
||||||
|
# After running, commit the generated requirements.lock so builds use it for fast installs.
|
||||||
|
|
||||||
|
name: Update requirements.lock
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
- int
|
||||||
|
paths:
|
||||||
|
- 'requirements.txt'
|
||||||
|
|
||||||
|
# Cancel in-progress runs when a new run is triggered (saves logs/storage)
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
update-lock:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: write # push requirements.lock
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: '3.11'
|
||||||
|
|
||||||
|
- name: Install pip-tools
|
||||||
|
run: python -m pip install --upgrade "pip>=24,<26" pip-tools
|
||||||
|
|
||||||
|
- name: Generate requirements.lock
|
||||||
|
run: pip-compile requirements.txt -o requirements.lock
|
||||||
|
|
||||||
|
- name: Commit and push requirements.lock
|
||||||
|
run: |
|
||||||
|
git config user.name "github-actions[bot]"
|
||||||
|
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||||
|
git add requirements.lock
|
||||||
|
if git diff --staged --quiet; then
|
||||||
|
echo "No changes to requirements.lock"
|
||||||
|
else
|
||||||
|
git commit -m "chore: update requirements.lock"
|
||||||
|
git push
|
||||||
|
fi
|
||||||
50
Dockerfile
Normal file
50
Dockerfile
Normal file
|
|
@ -0,0 +1,50 @@
|
||||||
|
# Dockerfile for PowerOn Gateway - Google Cloud Run
|
||||||
|
# Python 3.11 base image optimized for Cloud Run
|
||||||
|
|
||||||
|
FROM python:3.11-slim
|
||||||
|
|
||||||
|
# Set working directory
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Set environment variables
|
||||||
|
ENV PYTHONUNBUFFERED=1 \
|
||||||
|
PYTHONDONTWRITEBYTECODE=1 \
|
||||||
|
NUMEXPR_MAX_THREADS=12 \
|
||||||
|
PORT=8000
|
||||||
|
|
||||||
|
# Install system dependencies
|
||||||
|
RUN apt-get update && apt-get install -y \
|
||||||
|
gcc \
|
||||||
|
g++ \
|
||||||
|
postgresql-client \
|
||||||
|
libpq-dev \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Copy requirements first for better caching (requirements.lock from "Update requirements.lock" workflow)
|
||||||
|
COPY requirements.txt .
|
||||||
|
COPY requirements.lock .
|
||||||
|
|
||||||
|
# Install Python dependencies (lock file avoids slow pip backtracking)
|
||||||
|
RUN pip install --no-cache-dir --upgrade pip && \
|
||||||
|
pip install --no-cache-dir -r requirements.lock
|
||||||
|
|
||||||
|
# Copy application code (includes .env file created by workflow from env_gcp.env)
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Create directories for logs (Cloud Run uses /tmp for writable storage)
|
||||||
|
RUN mkdir -p /tmp/logs /tmp/debug
|
||||||
|
|
||||||
|
# Note: .env file (created from env_gcp.env by workflow) contains encrypted secrets
|
||||||
|
# These are decrypted at runtime using the master key from Secret Manager
|
||||||
|
# (mounted as CONFIG_KEY environment variable in Cloud Run)
|
||||||
|
|
||||||
|
# Expose port (Cloud Run sets PORT env var, but we default to 8000)
|
||||||
|
EXPOSE 8000
|
||||||
|
|
||||||
|
# Health check for Cloud Run
|
||||||
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \
|
||||||
|
CMD python -c "import requests; requests.get('http://localhost:8000/api/admin/health', timeout=5)" || exit 1
|
||||||
|
|
||||||
|
# Run the application
|
||||||
|
# Cloud Run will set PORT env var, uvicorn reads it automatically
|
||||||
|
CMD exec uvicorn app:app --host 0.0.0.0 --port ${PORT:-8000} --workers 1
|
||||||
30
LICENSE.txt
Normal file
30
LICENSE.txt
Normal file
|
|
@ -0,0 +1,30 @@
|
||||||
|
PROPRIETARY LICENSE – ALL RIGHTS RESERVED
|
||||||
|
========================================
|
||||||
|
|
||||||
|
Copyright (c) 2025 Patrick Motsch
|
||||||
|
|
||||||
|
Dieses Software-Repository enthält proprietäre und vertrauliche
|
||||||
|
Software.
|
||||||
|
|
||||||
|
ALLE RECHTE VORBEHALTEN.
|
||||||
|
|
||||||
|
Ohne vorherige ausdrückliche schriftliche Zustimmung des Rechteinhabers
|
||||||
|
ist es untersagt, diese Software oder Teile davon:
|
||||||
|
|
||||||
|
• zu verwenden
|
||||||
|
• zu kopieren
|
||||||
|
• zu modifizieren
|
||||||
|
• zu verbreiten
|
||||||
|
• öffentlich zugänglich zu machen
|
||||||
|
• zu lizenzieren oder weiterzugeben
|
||||||
|
• zu dekompilieren, zu disassemblieren oder zu reverse engineeren
|
||||||
|
|
||||||
|
Die Nutzung dieser Software ist ausschließlich im Rahmen eines
|
||||||
|
separaten kommerziellen Lizenz- oder Nutzungsvertrags mit dem
|
||||||
|
Rechteinhaber gestattet.
|
||||||
|
|
||||||
|
Dieser Lizenzhinweis ersetzt keinen individuellen Lizenzvertrag
|
||||||
|
und gewährt keinerlei Nutzungsrechte.
|
||||||
|
|
||||||
|
© 2025 Patrick Motsch. Alle Rechte vorbehalten.
|
||||||
|
|
||||||
46
README.txt
Normal file
46
README.txt
Normal file
|
|
@ -0,0 +1,46 @@
|
||||||
|
AI TOOLKIT – PROPRIETARY SOFTWARE
|
||||||
|
================================
|
||||||
|
|
||||||
|
Dieses Repository enthält proprietäre Software (AI Toolkit),
|
||||||
|
die vom Rechteinhaber entwickelt wurde.
|
||||||
|
|
||||||
|
STATUS
|
||||||
|
------
|
||||||
|
• Proprietäre, nicht quelloffene Software
|
||||||
|
• Kein Open-Source-Projekt
|
||||||
|
• Nutzung nur auf Basis eines separaten schriftlichen Lizenzvertrags
|
||||||
|
|
||||||
|
LIZENZ
|
||||||
|
------
|
||||||
|
Dieses Repository unterliegt keiner Open-Source-Lizenz.
|
||||||
|
Alle Rechte sind vorbehalten.
|
||||||
|
|
||||||
|
Ohne eine explizite schriftliche Lizenzvereinbarung mit dem Rechteinhaber
|
||||||
|
ist es untersagt, den Code oder Teile davon:
|
||||||
|
|
||||||
|
• zu nutzen
|
||||||
|
• zu kopieren
|
||||||
|
• zu verändern
|
||||||
|
• zu verbreiten
|
||||||
|
• weiterzuverkaufen
|
||||||
|
• zu dekompilieren oder zu reverse engineeren
|
||||||
|
|
||||||
|
KUNDENPRODUKTE
|
||||||
|
--------------
|
||||||
|
Produkte, die auf Basis dieses Toolkits erstellt werden,
|
||||||
|
unterliegen separaten Lizenzverträgen.
|
||||||
|
|
||||||
|
Die kundenspezifischen Produktkonfigurationen (z. B. Prompts,
|
||||||
|
Parameter, Workflows, fachliche Logik) stehen – sofern vertraglich
|
||||||
|
vereinbart – im Eigentum des jeweiligen Kunden.
|
||||||
|
|
||||||
|
Der zugrundeliegende Toolkit-Code verbleibt vollständig
|
||||||
|
beim Rechteinhaber.
|
||||||
|
|
||||||
|
KONTAKT
|
||||||
|
-------
|
||||||
|
Für kommerzielle Lizenzen oder Rückfragen:
|
||||||
|
Patrick Motsch
|
||||||
|
|
||||||
|
© 2025 Patrick Motsch. Alle Rechte vorbehalten.
|
||||||
|
|
||||||
315
app.py
315
app.py
|
|
@ -1,10 +1,15 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
import unicodedata
|
||||||
|
|
||||||
from urllib.parse import quote_plus
|
from urllib.parse import quote_plus
|
||||||
|
|
||||||
os.environ["NUMEXPR_MAX_THREADS"] = "12"
|
os.environ["NUMEXPR_MAX_THREADS"] = "12"
|
||||||
|
|
||||||
from fastapi import FastAPI
|
from fastapi import FastAPI, Request
|
||||||
|
from fastapi.responses import JSONResponse
|
||||||
from fastapi.middleware.cors import CORSMiddleware
|
from fastapi.middleware.cors import CORSMiddleware
|
||||||
from fastapi.security import HTTPBearer
|
from fastapi.security import HTTPBearer
|
||||||
from contextlib import asynccontextmanager
|
from contextlib import asynccontextmanager
|
||||||
|
|
@ -15,7 +20,8 @@ from datetime import datetime
|
||||||
|
|
||||||
from modules.shared.configuration import APP_CONFIG
|
from modules.shared.configuration import APP_CONFIG
|
||||||
from modules.shared.eventManagement import eventManager
|
from modules.shared.eventManagement import eventManager
|
||||||
from modules.features import featuresLifecycle as featuresLifecycle
|
from modules.interfaces.interfaceDbApp import getRootInterface
|
||||||
|
from modules.system.registry import loadFeatureMainModules
|
||||||
|
|
||||||
class DailyRotatingFileHandler(RotatingFileHandler):
|
class DailyRotatingFileHandler(RotatingFileHandler):
|
||||||
"""
|
"""
|
||||||
|
|
@ -41,6 +47,9 @@ class DailyRotatingFileHandler(RotatingFileHandler):
|
||||||
|
|
||||||
def _updateFileIfNeeded(self):
|
def _updateFileIfNeeded(self):
|
||||||
"""Update the log file if the date has changed"""
|
"""Update the log file if the date has changed"""
|
||||||
|
# Guard against interpreter shutdown when datetime may be None
|
||||||
|
if datetime is None:
|
||||||
|
return False
|
||||||
today = datetime.now().strftime("%Y%m%d")
|
today = datetime.now().strftime("%Y%m%d")
|
||||||
|
|
||||||
if self.currentDate != today:
|
if self.currentDate != today:
|
||||||
|
|
@ -140,23 +149,24 @@ def initLogging():
|
||||||
def filter(self, record):
|
def filter(self, record):
|
||||||
if isinstance(record.msg, str):
|
if isinstance(record.msg, str):
|
||||||
# Remove only emojis, preserve other Unicode characters like quotes
|
# Remove only emojis, preserve other Unicode characters like quotes
|
||||||
import re
|
# Guard against None characters during shutdown
|
||||||
import unicodedata
|
try:
|
||||||
|
|
||||||
# Remove emoji characters specifically
|
|
||||||
record.msg = "".join(
|
record.msg = "".join(
|
||||||
char
|
char
|
||||||
for char in record.msg
|
for char in record.msg
|
||||||
if unicodedata.category(char) != "So"
|
if char is not None and unicodedata.category(char) != "So"
|
||||||
or not (
|
or (char is not None and not (
|
||||||
0x1F600 <= ord(char) <= 0x1F64F
|
0x1F600 <= ord(char) <= 0x1F64F
|
||||||
or 0x1F300 <= ord(char) <= 0x1F5FF
|
or 0x1F300 <= ord(char) <= 0x1F5FF
|
||||||
or 0x1F680 <= ord(char) <= 0x1F6FF
|
or 0x1F680 <= ord(char) <= 0x1F6FF
|
||||||
or 0x1F1E0 <= ord(char) <= 0x1F1FF
|
or 0x1F1E0 <= ord(char) <= 0x1F1FF
|
||||||
or 0x2600 <= ord(char) <= 0x26FF
|
or 0x2600 <= ord(char) <= 0x26FF
|
||||||
or 0x2700 <= ord(char) <= 0x27BF
|
or 0x2700 <= ord(char) <= 0x27BF
|
||||||
|
))
|
||||||
)
|
)
|
||||||
)
|
except (TypeError, AttributeError):
|
||||||
|
# Handle edge cases during shutdown
|
||||||
|
pass
|
||||||
return True
|
return True
|
||||||
|
|
||||||
# Add filter to normalize problematic unicode (e.g., arrows) to ASCII for terminals like cp1252
|
# Add filter to normalize problematic unicode (e.g., arrows) to ASCII for terminals like cp1252
|
||||||
|
|
@ -231,6 +241,9 @@ def initLogging():
|
||||||
"asyncio",
|
"asyncio",
|
||||||
"fastapi.security.oauth2",
|
"fastapi.security.oauth2",
|
||||||
"msal",
|
"msal",
|
||||||
|
"azure.core.pipeline.policies.http_logging_policy",
|
||||||
|
"stripe",
|
||||||
|
"apscheduler",
|
||||||
]
|
]
|
||||||
for loggerName in noisyLoggers:
|
for loggerName in noisyLoggers:
|
||||||
logging.getLogger(loggerName).setLevel(logging.WARNING)
|
logging.getLogger(loggerName).setLevel(logging.WARNING)
|
||||||
|
|
@ -269,32 +282,160 @@ initLogging()
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
instanceLabel = APP_CONFIG.get("APP_ENV_LABEL")
|
instanceLabel = APP_CONFIG.get("APP_ENV_LABEL")
|
||||||
|
|
||||||
|
# Pre-warm AI connectors on process load (before lifespan). Critical for chatbot latency.
|
||||||
|
try:
|
||||||
|
import modules.aicore.aicoreModelRegistry # noqa: F401
|
||||||
|
logger.info("AI connectors pre-warm (app load) triggered")
|
||||||
|
except Exception as e:
|
||||||
|
logging.getLogger(__name__).warning(f"AI pre-warm at app load failed: {e}")
|
||||||
|
|
||||||
# Define lifespan context manager for application startup/shutdown events
|
# Define lifespan context manager for application startup/shutdown events
|
||||||
@asynccontextmanager
|
@asynccontextmanager
|
||||||
async def lifespan(app: FastAPI):
|
async def lifespan(app: FastAPI):
|
||||||
logger.info("Application is starting up")
|
logger.info("Application is starting up")
|
||||||
|
|
||||||
|
# AI connectors already pre-warmed at module-load via _eager_prewarm() in aicoreModelRegistry.
|
||||||
|
|
||||||
|
# Bootstrap database if needed (creates initial users, mandates, roles, etc.)
|
||||||
|
# This must happen before getting root interface
|
||||||
|
from modules.security.rootAccess import getRootDbAppConnector
|
||||||
|
from modules.interfaces.interfaceBootstrap import initBootstrap
|
||||||
|
rootDb = getRootDbAppConnector()
|
||||||
|
try:
|
||||||
|
initBootstrap(rootDb)
|
||||||
|
logger.info("Bootstrap check completed")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Bootstrap check failed (may already be initialized): {str(e)}")
|
||||||
|
|
||||||
|
# Register all feature definitions in RBAC catalog (for /api/features/ endpoint)
|
||||||
|
try:
|
||||||
|
from modules.security.rbacCatalog import getCatalogService
|
||||||
|
from modules.system.registry import registerAllFeaturesInCatalog, syncCatalogFeaturesToDb
|
||||||
|
catalogService = getCatalogService()
|
||||||
|
registerAllFeaturesInCatalog(catalogService)
|
||||||
|
logger.info("Feature catalog registration completed")
|
||||||
|
# Persist the in-memory feature registry into the Feature DB-table so
|
||||||
|
# the FeatureInstance.featureCode FK has real targets. Without this
|
||||||
|
# every FeatureInstance row would be flagged as orphan by the
|
||||||
|
# SysAdmin DB-health scan (cf. interfaceFeatures.upsertFeature).
|
||||||
|
try:
|
||||||
|
syncCatalogFeaturesToDb(catalogService)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Feature DB sync failed: {e}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Feature catalog registration failed: {e}")
|
||||||
|
|
||||||
|
# Sync gateway i18n registry to DB and load translation cache
|
||||||
|
try:
|
||||||
|
from modules.shared.i18nRegistry import _syncRegistryToDb, _loadCache
|
||||||
|
await _syncRegistryToDb()
|
||||||
|
await _loadCache()
|
||||||
|
logger.info("i18n registry sync + cache load completed")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"i18n registry sync failed (non-critical): {e}")
|
||||||
|
|
||||||
|
# Pre-warm service center modules (avoids first-request import latency)
|
||||||
|
try:
|
||||||
|
from modules.serviceCenter import preWarm
|
||||||
|
preWarm()
|
||||||
|
logger.info("Service center pre-warm completed")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Service center pre-warm failed (non-critical): {e}")
|
||||||
|
|
||||||
|
# Get event user for feature lifecycle (system-level user for background operations)
|
||||||
|
rootInterface = getRootInterface()
|
||||||
|
eventUser = rootInterface.getUserByUsername("event")
|
||||||
|
if not eventUser:
|
||||||
|
logger.error("Could not get event user - some features may not start properly")
|
||||||
|
|
||||||
|
# --- Init Feature Containers (Plug&Play) ---
|
||||||
|
try:
|
||||||
|
mainModules = loadFeatureMainModules()
|
||||||
|
for featureName, module in mainModules.items():
|
||||||
|
if hasattr(module, "onStart"):
|
||||||
|
try:
|
||||||
|
await module.onStart(eventUser)
|
||||||
|
logger.info(f"Feature '{featureName}' started")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Feature '{featureName}' failed to start: {e}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Could not initialize feature containers: {e}")
|
||||||
|
|
||||||
# --- Init Managers ---
|
# --- Init Managers ---
|
||||||
await featuresLifecycle.start()
|
import asyncio
|
||||||
|
try:
|
||||||
|
main_loop = asyncio.get_running_loop()
|
||||||
|
eventManager.set_event_loop(main_loop)
|
||||||
|
from modules.workflows.scheduler.mainScheduler import setMainLoop as setSchedulerMainLoop
|
||||||
|
setSchedulerMainLoop(main_loop)
|
||||||
|
|
||||||
|
# Suppress noisy ConnectionResetError from ProactorEventLoop on Windows
|
||||||
|
# when clients (browsers) close connections abruptly. This is a known
|
||||||
|
# asyncio issue on Windows: https://bugs.python.org/issue39010
|
||||||
|
def _suppressClientDisconnect(loop, ctx):
|
||||||
|
exc = ctx.get("exception")
|
||||||
|
if isinstance(exc, ConnectionResetError):
|
||||||
|
return
|
||||||
|
if isinstance(exc, ConnectionAbortedError):
|
||||||
|
return
|
||||||
|
loop.default_exception_handler(ctx)
|
||||||
|
main_loop.set_exception_handler(_suppressClientDisconnect)
|
||||||
|
except RuntimeError:
|
||||||
|
pass
|
||||||
eventManager.start()
|
eventManager.start()
|
||||||
|
|
||||||
|
# Register audit log cleanup scheduler
|
||||||
|
from modules.shared.auditLogger import registerAuditLogCleanupScheduler
|
||||||
|
registerAuditLogCleanupScheduler()
|
||||||
|
|
||||||
|
# Recover background jobs that were RUNNING when the previous worker died
|
||||||
|
try:
|
||||||
|
from modules.serviceCenter.services.serviceBackgroundJobs.mainBackgroundJobService import (
|
||||||
|
recoverInterruptedJobs,
|
||||||
|
)
|
||||||
|
recoverInterruptedJobs()
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"BackgroundJob recovery failed (non-critical): {e}")
|
||||||
|
|
||||||
yield
|
yield
|
||||||
|
|
||||||
# --- Stop Managers ---
|
# --- Stop Managers ---
|
||||||
eventManager.stop()
|
eventManager.stop()
|
||||||
await featuresLifecycle.stop()
|
|
||||||
|
# --- Stop Feature Containers (Plug&Play) ---
|
||||||
|
try:
|
||||||
|
mainModules = loadFeatureMainModules()
|
||||||
|
for featureName, module in mainModules.items():
|
||||||
|
if hasattr(module, "onStop"):
|
||||||
|
try:
|
||||||
|
await module.onStop(eventUser)
|
||||||
|
logger.info(f"Feature '{featureName}' stopped")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Feature '{featureName}' failed to stop: {e}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Could not shutdown feature containers: {e}")
|
||||||
|
|
||||||
logger.info("Application has been shut down")
|
logger.info("Application has been shut down")
|
||||||
|
|
||||||
|
|
||||||
|
# Custom function to generate readable operation IDs for Swagger UI
|
||||||
|
# Uses snake_case function names directly instead of auto-generated IDs
|
||||||
|
def _generateOperationId(route) -> str:
|
||||||
|
"""Generate operation ID from route function name (snake_case)."""
|
||||||
|
if hasattr(route, "endpoint") and hasattr(route.endpoint, "__name__"):
|
||||||
|
return route.endpoint.__name__
|
||||||
|
return route.name if route.name else "unknown"
|
||||||
|
|
||||||
|
|
||||||
# START APP
|
# START APP
|
||||||
app = FastAPI(
|
app = FastAPI(
|
||||||
title="PowerOn | Data Platform API",
|
title="PowerOn AG | Workflow Engine",
|
||||||
description=f"Backend API for the Multi-Agent Platform by ValueOn AG ({instanceLabel})",
|
description=f"API for dynamic SaaS platforms ({instanceLabel})",
|
||||||
lifespan=lifespan,
|
lifespan=lifespan,
|
||||||
swagger_ui_init_oauth={
|
swagger_ui_init_oauth={
|
||||||
"usePkceWithAuthorizationCodeGrant": True,
|
"usePkceWithAuthorizationCodeGrant": True,
|
||||||
},
|
},
|
||||||
|
generate_unique_id_function=_generateOperationId,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Configure OpenAPI security scheme for Swagger UI
|
# Configure OpenAPI security scheme for Swagger UI
|
||||||
|
|
@ -346,24 +487,52 @@ def getAllowedOrigins():
|
||||||
return origins
|
return origins
|
||||||
|
|
||||||
|
|
||||||
# CORS configuration using environment variables
|
# CORS origin regex pattern for wildcard subdomain support
|
||||||
app.add_middleware(
|
# Matches all subdomains of poweron.swiss and poweron-center.net
|
||||||
CORSMiddleware,
|
CORS_ORIGIN_REGEX = r"https://.*\.(poweron\.swiss|poweron-center\.net)"
|
||||||
allow_origins=getAllowedOrigins(),
|
|
||||||
allow_credentials=True,
|
|
||||||
allow_methods=["GET", "POST", "PUT", "DELETE", "OPTIONS"],
|
# SlowAPI rate limiter initialization
|
||||||
allow_headers=["*"],
|
from modules.auth import limiter
|
||||||
expose_headers=["*"],
|
from slowapi.errors import RateLimitExceeded
|
||||||
max_age=86400, # Increased caching for preflight requests
|
from slowapi import _rate_limit_exceeded_handler
|
||||||
|
app.state.limiter = limiter
|
||||||
|
app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler)
|
||||||
|
|
||||||
|
|
||||||
|
async def _insufficientBalanceHandler(request: Request, exc: Exception):
|
||||||
|
"""HTTP 402 with structured billing hint."""
|
||||||
|
payload = exc.toClientDict() if hasattr(exc, "toClientDict") else {"error": "INSUFFICIENT_BALANCE", "message": str(exc)}
|
||||||
|
return JSONResponse(status_code=402, content={"detail": payload})
|
||||||
|
|
||||||
|
|
||||||
|
from modules.serviceCenter.services.serviceBilling.mainServiceBilling import (
|
||||||
|
InsufficientBalanceException,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
app.add_exception_handler(InsufficientBalanceException, _insufficientBalanceHandler)
|
||||||
|
|
||||||
# CSRF protection middleware
|
# CSRF protection middleware
|
||||||
from modules.security.csrf import CSRFMiddleware
|
from modules.auth import CSRFMiddleware
|
||||||
from modules.security.tokenRefreshMiddleware import (
|
from modules.auth import (
|
||||||
TokenRefreshMiddleware,
|
TokenRefreshMiddleware,
|
||||||
ProactiveTokenRefreshMiddleware,
|
ProactiveTokenRefreshMiddleware,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Per-request context middleware: language (Accept-Language) + user timezone (X-User-Timezone).
|
||||||
|
# Both are written into ContextVars and consumed by t() / resolveText() and getRequestNow()
|
||||||
|
# without having to thread them through every call site.
|
||||||
|
from modules.shared.i18nRegistry import _setLanguage, normalizePrimaryLanguageTag
|
||||||
|
from modules.shared.timeUtils import _setRequestTimezone
|
||||||
|
|
||||||
|
@app.middleware("http")
|
||||||
|
async def _requestContextMiddleware(request: Request, call_next):
|
||||||
|
acceptLang = request.headers.get("Accept-Language", "")
|
||||||
|
lang = normalizePrimaryLanguageTag(acceptLang, "de")
|
||||||
|
_setLanguage(lang)
|
||||||
|
_setRequestTimezone(request.headers.get("X-User-Timezone", ""))
|
||||||
|
return await call_next(request)
|
||||||
|
|
||||||
app.add_middleware(CSRFMiddleware)
|
app.add_middleware(CSRFMiddleware)
|
||||||
|
|
||||||
# Token refresh middleware (silent refresh for expired OAuth tokens)
|
# Token refresh middleware (silent refresh for expired OAuth tokens)
|
||||||
|
|
@ -374,6 +543,19 @@ app.add_middleware(
|
||||||
ProactiveTokenRefreshMiddleware, enabled=True, check_interval_minutes=5
|
ProactiveTokenRefreshMiddleware, enabled=True, check_interval_minutes=5
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# CORS must be registered LAST so it wraps the whole stack: every response (errors, CSRF 403,
|
||||||
|
# rate limits) still gets Access-Control-Allow-Origin for browser cross-origin calls.
|
||||||
|
app.add_middleware(
|
||||||
|
CORSMiddleware,
|
||||||
|
allow_origins=getAllowedOrigins(),
|
||||||
|
allow_origin_regex=CORS_ORIGIN_REGEX,
|
||||||
|
allow_credentials=True,
|
||||||
|
allow_methods=["GET", "POST", "PUT", "PATCH", "DELETE", "OPTIONS"],
|
||||||
|
allow_headers=["*"],
|
||||||
|
expose_headers=["*"],
|
||||||
|
max_age=86400,
|
||||||
|
)
|
||||||
|
|
||||||
# Include all routers
|
# Include all routers
|
||||||
|
|
||||||
from modules.routes.routeAdmin import router as generalRouter
|
from modules.routes.routeAdmin import router as generalRouter
|
||||||
|
|
@ -391,8 +573,8 @@ app.include_router(userRouter)
|
||||||
from modules.routes.routeDataFiles import router as fileRouter
|
from modules.routes.routeDataFiles import router as fileRouter
|
||||||
app.include_router(fileRouter)
|
app.include_router(fileRouter)
|
||||||
|
|
||||||
from modules.routes.routeDataNeutralization import router as neutralizationRouter
|
from modules.routes.routeDataSources import router as dataSourceRouter
|
||||||
app.include_router(neutralizationRouter)
|
app.include_router(dataSourceRouter)
|
||||||
|
|
||||||
from modules.routes.routeDataPrompts import router as promptRouter
|
from modules.routes.routeDataPrompts import router as promptRouter
|
||||||
app.include_router(promptRouter)
|
app.include_router(promptRouter)
|
||||||
|
|
@ -400,12 +582,6 @@ app.include_router(promptRouter)
|
||||||
from modules.routes.routeDataConnections import router as connectionsRouter
|
from modules.routes.routeDataConnections import router as connectionsRouter
|
||||||
app.include_router(connectionsRouter)
|
app.include_router(connectionsRouter)
|
||||||
|
|
||||||
from modules.routes.routeWorkflows import router as workflowRouter
|
|
||||||
app.include_router(workflowRouter)
|
|
||||||
|
|
||||||
from modules.routes.routeChatPlayground import router as chatPlaygroundRouter
|
|
||||||
app.include_router(chatPlaygroundRouter)
|
|
||||||
|
|
||||||
from modules.routes.routeSecurityLocal import router as localRouter
|
from modules.routes.routeSecurityLocal import router as localRouter
|
||||||
app.include_router(localRouter)
|
app.include_router(localRouter)
|
||||||
|
|
||||||
|
|
@ -415,18 +591,81 @@ app.include_router(msftRouter)
|
||||||
from modules.routes.routeSecurityGoogle import router as googleRouter
|
from modules.routes.routeSecurityGoogle import router as googleRouter
|
||||||
app.include_router(googleRouter)
|
app.include_router(googleRouter)
|
||||||
|
|
||||||
|
from modules.routes.routeSecurityClickup import router as clickupRouter
|
||||||
|
app.include_router(clickupRouter)
|
||||||
|
|
||||||
|
from modules.routes.routeClickup import router as clickupApiRouter
|
||||||
|
app.include_router(clickupApiRouter)
|
||||||
|
|
||||||
from modules.routes.routeVoiceGoogle import router as voiceGoogleRouter
|
from modules.routes.routeVoiceGoogle import router as voiceGoogleRouter
|
||||||
app.include_router(voiceGoogleRouter)
|
app.include_router(voiceGoogleRouter)
|
||||||
|
|
||||||
from modules.routes.routeSecurityAdmin import router as adminSecurityRouter
|
from modules.routes.routeVoiceUser import router as voiceUserRouter
|
||||||
app.include_router(adminSecurityRouter)
|
app.include_router(voiceUserRouter)
|
||||||
|
|
||||||
from modules.routes.routeSharepoint import router as sharepointRouter
|
from modules.routes.routeSharepoint import router as sharepointRouter
|
||||||
app.include_router(sharepointRouter)
|
app.include_router(sharepointRouter)
|
||||||
|
|
||||||
from modules.routes.routeDataAutomation import router as automationRouter
|
from modules.routes.routeAudit import router as auditRouter
|
||||||
app.include_router(automationRouter)
|
app.include_router(auditRouter)
|
||||||
|
|
||||||
from modules.routes.routeAdminAutomationEvents import router as adminAutomationEventsRouter
|
from modules.routes.routeAdminLogs import router as adminLogsRouter
|
||||||
app.include_router(adminAutomationEventsRouter)
|
app.include_router(adminLogsRouter)
|
||||||
|
|
||||||
|
from modules.routes.routeAdminRbacRules import router as rbacAdminRulesRouter
|
||||||
|
app.include_router(rbacAdminRulesRouter)
|
||||||
|
|
||||||
|
from modules.routes.routeAdminFeatures import router as featuresAdminRouter
|
||||||
|
app.include_router(featuresAdminRouter)
|
||||||
|
|
||||||
|
from modules.routes.routeStore import router as storeRouter
|
||||||
|
app.include_router(storeRouter)
|
||||||
|
|
||||||
|
from modules.routes.routeInvitations import router as invitationsRouter
|
||||||
|
app.include_router(invitationsRouter)
|
||||||
|
|
||||||
|
from modules.routes.routeNotifications import router as notificationsRouter
|
||||||
|
app.include_router(notificationsRouter)
|
||||||
|
|
||||||
|
from modules.routes.routeI18n import router as i18nRouter
|
||||||
|
app.include_router(i18nRouter)
|
||||||
|
|
||||||
|
from modules.routes.routeAdminUserAccessOverview import router as userAccessOverviewRouter
|
||||||
|
app.include_router(userAccessOverviewRouter)
|
||||||
|
|
||||||
|
from modules.routes.routeAdminDemoConfig import router as demoConfigRouter
|
||||||
|
app.include_router(demoConfigRouter)
|
||||||
|
|
||||||
|
from modules.routes.routeAdminDatabaseHealth import router as adminDatabaseHealthRouter
|
||||||
|
app.include_router(adminDatabaseHealthRouter)
|
||||||
|
|
||||||
|
from modules.routes.routeGdpr import router as gdprRouter
|
||||||
|
app.include_router(gdprRouter)
|
||||||
|
|
||||||
|
from modules.routes.routeBilling import router as billingRouter
|
||||||
|
app.include_router(billingRouter)
|
||||||
|
|
||||||
|
from modules.routes.routeSubscription import router as subscriptionRouter
|
||||||
|
app.include_router(subscriptionRouter)
|
||||||
|
|
||||||
|
from modules.routes.routeJobs import router as jobsRouter
|
||||||
|
app.include_router(jobsRouter)
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# SYSTEM ROUTES (Navigation, etc.)
|
||||||
|
# ============================================================================
|
||||||
|
from modules.routes.routeSystem import router as systemRouter, navigationRouter
|
||||||
|
app.include_router(systemRouter)
|
||||||
|
app.include_router(navigationRouter)
|
||||||
|
|
||||||
|
from modules.routes.routeWorkflowDashboard import router as workflowDashboardRouter
|
||||||
|
app.include_router(workflowDashboardRouter)
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# PLUG&PLAY FEATURE ROUTERS
|
||||||
|
# Dynamically load routers from feature containers in modules/features/
|
||||||
|
# ============================================================================
|
||||||
|
from modules.system.registry import loadFeatureRouters
|
||||||
|
|
||||||
|
featureLoadResults = loadFeatureRouters(app)
|
||||||
|
logger.info(f"Feature router load results: {featureLoadResults}")
|
||||||
19
config.ini
19
config.ini
|
|
@ -4,6 +4,8 @@
|
||||||
# Auth configuration
|
# Auth configuration
|
||||||
Auth_ALGORITHM = HS256
|
Auth_ALGORITHM = HS256
|
||||||
Auth_TOKEN_TYPE = bearer
|
Auth_TOKEN_TYPE = bearer
|
||||||
|
Auth_RESET_TOKEN_EXPIRY_HOURS = 24
|
||||||
|
|
||||||
|
|
||||||
# File management configuration
|
# File management configuration
|
||||||
File_Management_MAX_UPLOAD_SIZE_MB = 50
|
File_Management_MAX_UPLOAD_SIZE_MB = 50
|
||||||
|
|
@ -35,3 +37,20 @@ Web_Crawl_RETRY_DELAY = 2
|
||||||
Web_Research_MAX_DEPTH = 2
|
Web_Research_MAX_DEPTH = 2
|
||||||
Web_Research_MAX_LINKS_PER_DOMAIN = 4
|
Web_Research_MAX_LINKS_PER_DOMAIN = 4
|
||||||
Web_Research_CRAWL_TIMEOUT_MINUTES = 10
|
Web_Research_CRAWL_TIMEOUT_MINUTES = 10
|
||||||
|
|
||||||
|
# STAC API Connector configuration (Swiss Topo)
|
||||||
|
Connector_StacSwisstopo_BASE_URL = https://data.geo.admin.ch/api/stac/v1
|
||||||
|
Connector_StacSwisstopo_TIMEOUT = 30
|
||||||
|
Connector_StacSwisstopo_MAX_RETRIES = 3
|
||||||
|
Connector_StacSwisstopo_RETRY_DELAY = 1.0
|
||||||
|
Connector_StacSwisstopo_ENABLE_CACHE = True
|
||||||
|
|
||||||
|
# Demo RMA credentials (same for all demo trustee instances)
|
||||||
|
Demo_RMA_ApiBaseUrl = https://service.int.runmyaccounts.com/api/latest/clients/
|
||||||
|
Demo_RMA_ClientName = poweronag
|
||||||
|
Demo_RMA_ApiKey = pat_tipTbnHU26CrMzAnLSjCR_uzHJv4CDNa7obaQGHIA-4
|
||||||
|
|
||||||
|
# Operator company information (shown on invoice emails)
|
||||||
|
Operator_CompanyName = PowerOn AG
|
||||||
|
Operator_Address = Birmensdorferstrasse 94, 8003 Zürich
|
||||||
|
Operator_VatNumber = CHE491.960.195
|
||||||
|
|
|
||||||
BIN
demoData/expenses/B2025-01a.pdf
Normal file
BIN
demoData/expenses/B2025-01a.pdf
Normal file
Binary file not shown.
BIN
demoData/expenses/B2025-02c.pdf
Normal file
BIN
demoData/expenses/B2025-02c.pdf
Normal file
Binary file not shown.
BIN
demoData/expenses/B2025-03a.pdf
Normal file
BIN
demoData/expenses/B2025-03a.pdf
Normal file
Binary file not shown.
BIN
demoData/expenses/B2025-05a.pdf
Normal file
BIN
demoData/expenses/B2025-05a.pdf
Normal file
Binary file not shown.
BIN
demoData/expenses/B2025-05c.pdf
Normal file
BIN
demoData/expenses/B2025-05c.pdf
Normal file
Binary file not shown.
BIN
demoData/expenses/B2025-08a.pdf
Normal file
BIN
demoData/expenses/B2025-08a.pdf
Normal file
Binary file not shown.
BIN
demoData/invoices/Digitec_Rechnung_63650751.pdf
Normal file
BIN
demoData/invoices/Digitec_Rechnung_63650751.pdf
Normal file
Binary file not shown.
BIN
demoData/invoices/ELKIGescannt_20250503-2030.pdf
Normal file
BIN
demoData/invoices/ELKIGescannt_20250503-2030.pdf
Normal file
Binary file not shown.
256
demoData/knowledge-base/2025-10-investor-detail.md
Normal file
256
demoData/knowledge-base/2025-10-investor-detail.md
Normal file
|
|
@ -0,0 +1,256 @@
|
||||||
|
# PowerOn AI Platform - Investoren-Dokumentation
|
||||||
|
## Stand: 14. Oktober 2025
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Executive Summary
|
||||||
|
|
||||||
|
PowerOn ist eine Software, die Unternehmen dabei hilft, wiederkehrende Aufgaben zu automatisieren. Statt dass Mitarbeiter manuell Daten sammeln, Dokumente durcharbeiten und Berichte schreiben, übernimmt PowerOn diese Arbeiten.
|
||||||
|
|
||||||
|
### Das Problem, das PowerOn löst
|
||||||
|
Mitarbeiter verbringen 30% ihrer Arbeitszeit damit, Informationen zu suchen. Unternehmen haben Schwierigkeiten, große Dokumente zu analysieren, aktuelle Marktdaten zu sammeln und regelmäßige Berichte zu erstellen. PowerOn automatisiert diese Aufgaben.
|
||||||
|
|
||||||
|
### Wie PowerOn funktioniert
|
||||||
|
Ein Benutzer gibt eine Aufgabe ein, zum Beispiel "Lese meine Mails der internen Mailbox der letzten 2 Wochen, fasse diese pro Thema im Sharepoint Marketing Ordner zusammen und verfasse eine Antwort für die wichtigsten Kunden". PowerOn verbindet sich dann automatisch mit Outlook, SharePoint und anderen Systemen, sammelt die Daten, analysiert sie und erstellt die gewünschten Zusammenfassungen und Antworten.
|
||||||
|
|
||||||
|
### Gemessene Verbesserungen
|
||||||
|
Tests mit Pilotkunden zeigen:
|
||||||
|
- Marktanalysen: von 3-4 Wochen auf 3-5 Tage
|
||||||
|
- Berichterstellung: 62% Zeitersparnis
|
||||||
|
- Prototypenentwicklung: 70% schneller
|
||||||
|
- Dokumentenanalyse: 80% weniger Zeitaufwand
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1. Kernfunktionen von PowerOn
|
||||||
|
|
||||||
|
### 1.1 Was PowerOn tatsächlich macht
|
||||||
|
|
||||||
|
PowerOn ist eine KI-gestützte Workflow-Engine, die drei verschiedene Arbeitsabläufe unterstützt:
|
||||||
|
|
||||||
|
**Dynamische Workflows**: PowerOn passt sich automatisch an neue Aufgaben an. Ein Benutzer kann jede beliebige Anfrage stellen, und das System findet den besten Weg zur Lösung.
|
||||||
|
|
||||||
|
**Action-Plan Workflows**: PowerOn plant komplexe Aufgaben selbstständig. Das System teilt große Projekte in kleinere Schritte auf und führt diese automatisch aus.
|
||||||
|
|
||||||
|
**Feste Geschäftsprozesse**: Unternehmen können standardisierte Abläufe definieren, die PowerOn immer gleich ausführt, zum Beispiel monatliche Berichte oder regelmäßige Marktanalysen.
|
||||||
|
|
||||||
|
### 1.2 Kernfunktionen
|
||||||
|
|
||||||
|
**Dokumentenanalyse**: Das System liest große Dokumente (PDF, Word, Excel) und extrahiert die wichtigsten Informationen. Ein 200-seitiger Vertrag wird automatisch zusammengefasst.
|
||||||
|
|
||||||
|
**Web-Recherche**: PowerOn sucht im Internet nach aktuellen Informationen zu einem Thema und sammelt relevante Daten von verschiedenen Websites.
|
||||||
|
|
||||||
|
**Berichterstellung**: Basierend auf den gesammelten Daten und Dokumenten erstellt das System fertige Berichte in verschiedenen Formaten (PDF, Word, Excel).
|
||||||
|
|
||||||
|
**Code-Generierung**: PowerOn kann einfache Programme und Skripte erstellen, um wiederkehrende Aufgaben zu automatisieren.
|
||||||
|
|
||||||
|
### 1.3 Wie der Arbeitsablauf funktioniert
|
||||||
|
|
||||||
|
Ein Benutzer gibt eine Aufgabe ein, zum Beispiel "Analysiere die Konkurrenz im E-Mobilitätssektor". PowerOn führt dann automatisch folgende Schritte aus:
|
||||||
|
|
||||||
|
1. Sucht im Internet nach aktuellen Informationen über E-Mobilitätsunternehmen
|
||||||
|
2. Analysiert vorhandene interne Dokumente des Unternehmens
|
||||||
|
3. Erstellt einen strukturierten Bericht mit den wichtigsten Erkenntnissen
|
||||||
|
4. Stellt den Bericht in verschiedenen Formaten zur Verfügung
|
||||||
|
|
||||||
|
### 1.4 Technische Besonderheiten
|
||||||
|
|
||||||
|
**Keine Größenbeschränkungen**: PowerOn kann beliebig große Dokumente verarbeiten und unbegrenzt viele Berichte erstellen. Das System umgeht die normalen Grenzen von KI-Systemen durch intelligente Aufteilung.
|
||||||
|
|
||||||
|
**Automatische Datenschutz-Funktion**: Sensible Daten werden automatisch erkannt und vor der Verarbeitung entfernt. Nach der Analyse werden die Daten wieder eingefügt, sodass der Bericht vollständig ist, aber keine vertraulichen Informationen preisgegeben werden.
|
||||||
|
|
||||||
|
**Mehrere KI-Anbieter**: PowerOn arbeitet gleichzeitig mit verschiedenen KI-Systemen (OpenAI, Anthropic, Perplexity). Wenn ein System ausfällt oder überlastet ist, übernimmt automatisch ein anderes. Das gewährleistet einen stabilen Betrieb und macht das System unabhängig von einzelnen Anbietern.
|
||||||
|
|
||||||
|
**Sicherheit**: Jedes Unternehmen hat einen eigenen, abgeschotteten Bereich. Alle Aktivitäten werden protokolliert.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2. Warum PowerOn anders ist
|
||||||
|
|
||||||
|
### 2.1 Keine technischen Grenzen
|
||||||
|
|
||||||
|
Andere KI-Systeme haben strenge Beschränkungen: maximal 50 Seiten Dokument, höchstens 10 Berichte pro Monat. PowerOn hat diese Grenzen nicht. Das System kann 1000-seitige Verträge analysieren und hunderte Berichte erstellen, ohne zusätzliche Kosten.
|
||||||
|
|
||||||
|
### 2.2 Automatischer Datenschutz
|
||||||
|
|
||||||
|
PowerOn erkennt automatisch sensible Daten wie Namen, Adressen oder Kontonummern und entfernt sie vor der Verarbeitung. Nach der Analyse werden die Daten wieder eingefügt. So entstehen vollständige Berichte ohne Datenschutzverletzungen.
|
||||||
|
|
||||||
|
### 2.3 Stabile und unabhängige Technologie
|
||||||
|
|
||||||
|
PowerOn arbeitet mit mehreren KI-Anbietern gleichzeitig. Wenn ein System ausfällt, übernimmt automatisch ein anderes. Das reduziert Ausfallzeiten und macht das Unternehmen unabhängig von einzelnen Anbietern.
|
||||||
|
|
||||||
|
### 2.4 Direkte Integration in Unternehmenssysteme
|
||||||
|
|
||||||
|
PowerOn verbindet sich direkt mit den Systemen, die Unternehmen täglich nutzen:
|
||||||
|
- **E-Mail-Systeme**: Outlook, Gmail für automatische E-Mail-Analyse
|
||||||
|
- **Dokumentenmanagement**: SharePoint, Google Drive für Dateizugriff
|
||||||
|
- **Projektmanagement**: Jira, ClickUp für Aufgabenverwaltung
|
||||||
|
- **Cloud-Speicher**: OneDrive, Dropbox für Dateiintegration
|
||||||
|
|
||||||
|
Statt dass Mitarbeiter Daten manuell zwischen verschiedenen Systemen kopieren, arbeitet PowerOn direkt mit allen Systemen zusammen.
|
||||||
|
|
||||||
|
### 2.5 Drei verschiedene Arbeitsweisen
|
||||||
|
|
||||||
|
**Dynamisch**: PowerOn passt sich an jede neue Aufgabe an. Ein Benutzer kann jede beliebige Anfrage stellen.
|
||||||
|
|
||||||
|
**Action-Plan**: PowerOn plant komplexe Projekte selbstständig und teilt sie in machbare Schritte auf.
|
||||||
|
|
||||||
|
**Standardisiert**: Unternehmen können feste Abläufe definieren, die PowerOn immer gleich ausführt.
|
||||||
|
|
||||||
|
### 2.6 Einfache Bedienung
|
||||||
|
|
||||||
|
Mitarbeiter müssen nicht programmieren können. Sie geben einfach ein, was sie brauchen, und PowerOn macht den Rest. Ein Marketing-Manager kann eine Konkurrenzanalyse bestellen, ohne IT-Kenntnisse zu haben.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3. Markt und Geschäftsmodell
|
||||||
|
|
||||||
|
### 3.1 Zielkunden
|
||||||
|
|
||||||
|
PowerOn richtet sich hauptsächlich an mittelständische Unternehmen mit 50-500 Mitarbeitern. Diese Unternehmen haben oft komplexe Datenverarbeitungsanforderungen, aber nicht die Ressourcen, um eigene KI-Systeme zu entwickeln.
|
||||||
|
|
||||||
|
Typische Kunden sind Beratungsunternehmen, Banken, Versicherungen, Kliniken und andere Dienstleister, die regelmäßig Analysen und Berichte erstellen müssen.
|
||||||
|
|
||||||
|
### 3.2 Nutzen für Kunden
|
||||||
|
|
||||||
|
#### Gemessene Verbesserungen
|
||||||
|
Basierend auf Tests mit Pilotkunden:
|
||||||
|
- Marktanalysen werden 73% schneller durchgeführt (von 3-4 Wochen auf 3-5 Tage)
|
||||||
|
- Berichterstellung spart 62% Zeit ein
|
||||||
|
- Prototypenentwicklung ist 70% schneller
|
||||||
|
- Dokumentenanalyse reduziert den Zeitaufwand um 80%
|
||||||
|
- Kosteneinsparung von 5.000-8.000 Euro pro Marktanalyse
|
||||||
|
|
||||||
|
#### Praktische Vorteile
|
||||||
|
Mitarbeiter benötigen keine Programmierkenntnisse, um PowerOn zu nutzen. Das System arbeitet mit vorhandenen Daten und Systemen zusammen, ohne dass große Umstellungen erforderlich sind.
|
||||||
|
|
||||||
|
### 3.3 Einnahmemodelle
|
||||||
|
|
||||||
|
PowerOn plant verschiedene Einnahmequellen:
|
||||||
|
1. Monatliche Abonnements pro Benutzer
|
||||||
|
2. Nutzungsbasierte Abrechnung für Verarbeitungsleistungen
|
||||||
|
3. Individuelle Lizenzen für große Unternehmen
|
||||||
|
4. Beratungs- und Implementierungsdienstleistungen
|
||||||
|
|
||||||
|
Die genauen Preise werden basierend auf Marktanalysen festgelegt. Das Ziel ist eine Bruttomarge von 75-85% nach der Skalierung.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4. Risiken und Zukunftssicherheit
|
||||||
|
|
||||||
|
### 4.1 Risiken durch bessere KI-Systeme
|
||||||
|
|
||||||
|
#### Kurzfristige Risiken (6-12 Monate)
|
||||||
|
Wenn KI-Systeme besser werden, könnten einfache Aufgaben wie Textgenerierung zur Standardware werden. Dies könnte den Wert einzelner KI-Funktionen reduzieren. PowerOn ist jedoch darauf ausgelegt, verschiedene KI-Systeme zu koordinieren, was auch bei verbesserten Systemen wertvoll bleibt.
|
||||||
|
|
||||||
|
#### Mittelfristige Risiken (1-3 Jahre)
|
||||||
|
Einzelne KI-Systeme könnten in der Lage sein, mehr Aufgaben gleichzeitig zu erledigen. Dies könnte die Notwendigkeit der Koordination reduzieren. PowerOn konzentriert sich jedoch auf spezifische Unternehmensanforderungen und die Integration in bestehende Systeme, was weiterhin wertvoll ist.
|
||||||
|
|
||||||
|
#### Langfristige Risiken (3+ Jahre)
|
||||||
|
Sehr fortgeschrittene KI-Systeme könnten in der Lage sein, komplexe Aufgaben ohne Koordination zu lösen. PowerOn konzentriert sich jedoch auf die spezifischen Anforderungen von Unternehmen, einschließlich Sicherheit, Compliance und Integration, die auch bei fortgeschrittenen KI-Systemen wichtig bleiben.
|
||||||
|
|
||||||
|
### 4.2 Was könnte obsolet werden
|
||||||
|
|
||||||
|
Einfache Aufgaben wie grundlegende Textgenerierung oder Web-Suche könnten zu Standardfunktionen werden. Auch einfache Datenanalysen könnten automatisiert werden.
|
||||||
|
|
||||||
|
### 4.3 Was bleibt wertvoll
|
||||||
|
|
||||||
|
Die Koordination verschiedener Systeme, die Integration in Unternehmensprozesse und die Einhaltung von Sicherheits- und Datenschutzbestimmungen bleiben auch bei verbesserten KI-Systemen wichtig. PowerOn ist so aufgebaut, dass es sich an neue Technologien anpassen kann, ohne das gesamte System neu entwickeln zu müssen.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5. Finanzielle Bewertung
|
||||||
|
|
||||||
|
### 5.1 Aktuelle Bewertung der Komponenten
|
||||||
|
|
||||||
|
PowerOn besteht aus mehreren wertvollen Komponenten, die einzeln bewertet werden können:
|
||||||
|
|
||||||
|
**Frontend-System**: €150.000-250.000
|
||||||
|
- Modulare Benutzeroberfläche, die einfach erweitert werden kann
|
||||||
|
- Funktioniert in allen gängigen Browsern
|
||||||
|
- Anpassbar an verschiedene Unternehmensanforderungen
|
||||||
|
|
||||||
|
**Backend-Infrastruktur**: €200.000-300.000
|
||||||
|
- Stabile Grundstruktur für alle Funktionen
|
||||||
|
- Schnelle Verarbeitung auch bei großen Datenmengen
|
||||||
|
- Einfache Integration neuer Funktionen
|
||||||
|
|
||||||
|
**Workflow-System**: €250.000-350.000
|
||||||
|
- Kernfunktion für die Koordination verschiedener Aufgaben
|
||||||
|
- Drei verschiedene Arbeitsweisen (dynamisch, Action-Plan, standardisiert)
|
||||||
|
- Automatische Anpassung an neue Anforderungen
|
||||||
|
|
||||||
|
**Sicherheits- und Datenschutz-System**: €100.000-150.000
|
||||||
|
- Automatische Erkennung und Schutz sensibler Daten
|
||||||
|
- Verschiedene Anmeldeverfahren für Unternehmen
|
||||||
|
- Vollständige Protokollierung aller Aktivitäten
|
||||||
|
|
||||||
|
**Datenverarbeitungs-Engine**: €150.000-200.000
|
||||||
|
- Verarbeitung beliebig großer Dokumente
|
||||||
|
- Intelligente Aufteilung zur Umgehung von KI-Grenzen
|
||||||
|
- Unterstützung aller gängigen Dateiformate
|
||||||
|
|
||||||
|
**Multi-Agent-Koordinationssystem**: €300.000-400.000
|
||||||
|
- Einzigartige Technologie zur Koordination verschiedener KI-Systeme
|
||||||
|
- Automatische Auswahl des besten KI-Anbieters für jede Aufgabe
|
||||||
|
- Stabile Ausführung auch bei Ausfällen einzelner Systeme
|
||||||
|
|
||||||
|
**Unternehmens-Integration**: €200.000-300.000
|
||||||
|
- Anpassung an verschiedene Branchen und Anforderungen
|
||||||
|
- Einfache Integration in bestehende Unternehmenssysteme
|
||||||
|
- Skalierbare Architektur für wachsende Anforderungen
|
||||||
|
|
||||||
|
**Integrations-Framework**: €150.000-200.000
|
||||||
|
- Verbindungen zu verschiedenen KI-Anbietern (OpenAI, Anthropic, Perplexity)
|
||||||
|
- Direkte Integration in Unternehmenssysteme (Outlook, SharePoint, Google Drive, Jira)
|
||||||
|
- Einfache Integration neuer Systeme und Anbieter
|
||||||
|
- Unabhängigkeit von einzelnen Anbietern
|
||||||
|
|
||||||
|
**Workflow-Management-System**: €100.000-150.000
|
||||||
|
- Plan-Act-Observe-Refine-Zyklus für kontinuierliche Verbesserung
|
||||||
|
- Echtzeit-Überwachung des Arbeitsfortschritts
|
||||||
|
- Automatische Fehlerbehandlung und Wiederaufnahme
|
||||||
|
|
||||||
|
**Gesamtbewertung**: €1.6-2.4 Mio.
|
||||||
|
|
||||||
|
### 5.2 Investitionsbedarf
|
||||||
|
|
||||||
|
PowerOn benötigt Investitionsmittel, um die Entwicklung abzuschließen und den Markt zu erschließen. Die Mittel werden hauptsächlich für die Produktentwicklung, den Aufbau eines Vertriebsteams und die Infrastruktur verwendet.
|
||||||
|
|
||||||
|
### 5.3 Wachstumspotenzial
|
||||||
|
|
||||||
|
Das System ist darauf ausgelegt, mit wachsenden Anforderungen zu skalieren. Die modulare Architektur ermöglicht es, neue Funktionen hinzuzufügen und die Plattform an verschiedene Kundenanforderungen anzupassen.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 6. Marktpotenzial und Ausstiegsmöglichkeiten
|
||||||
|
|
||||||
|
### 6.1 Marktpotenzial
|
||||||
|
|
||||||
|
Der Markt für KI-basierte Geschäftsanwendungen wächst schnell. Unternehmen suchen nach Lösungen, die komplexe Aufgaben automatisieren und die Effizienz steigern können. PowerOn positioniert sich in diesem wachsenden Markt.
|
||||||
|
|
||||||
|
### 6.2 Ausstiegsmöglichkeiten
|
||||||
|
|
||||||
|
Langfristig gibt es verschiedene Möglichkeiten für einen Ausstieg, darunter den Verkauf an größere Softwareunternehmen oder den Börsengang. Diese Optionen hängen von der Entwicklung des Unternehmens und des Marktes ab.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 7. Fazit
|
||||||
|
|
||||||
|
### 7.1 Stärken von PowerOn
|
||||||
|
|
||||||
|
PowerOn bietet eine einzigartige Lösung für die Koordination verschiedener KI-Systeme. Das System ist darauf ausgelegt, sich an neue Technologien anzupassen, und bietet nachgewiesene Verbesserungen bei Geschäftsprozessen.
|
||||||
|
|
||||||
|
### 7.2 Risikofaktoren
|
||||||
|
|
||||||
|
Die schnelle Entwicklung der KI-Technologie stellt ein Risiko dar, da einfache Aufgaben möglicherweise obsolet werden. Der Wettbewerb durch größere Unternehmen und die Marktakzeptanz sind weitere Faktoren, die berücksichtigt werden müssen.
|
||||||
|
|
||||||
|
### 7.3 Investitionsbewertung
|
||||||
|
|
||||||
|
PowerOn befindet sich in einer frühen Entwicklungsphase mit einem funktionsfähigen Grundsystem. Das Potenzial für Wachstum ist vorhanden, aber es gibt auch erhebliche Risiken, die mit der Entwicklung neuer Technologien verbunden sind.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
*Dokument erstellt am 14. Oktober 2025*
|
||||||
|
*Version: 1.0*
|
||||||
|
*Autor: PowerOn Development Team*
|
||||||
175
demoData/knowledge-base/investor-summary.md
Normal file
175
demoData/knowledge-base/investor-summary.md
Normal file
|
|
@ -0,0 +1,175 @@
|
||||||
|
# PowerOn AI Platform
|
||||||
|
## Investoren-Summary
|
||||||
|
|
||||||
|
### Marktpositionierung
|
||||||
|
|
||||||
|
Die PowerOn AI Platform ist eine innovative Enterprise-Lösung für die Automatisierung und Optimierung von komplexen geschäftlichen Prozessen durch einen Multi-Agent-KI-Ansatz. Wir positionieren uns an der Schnittstelle zwischen den schnell wachsenden Märkten für:
|
||||||
|
- Künstliche Intelligenz (Marktvolumen 2025: $190 Mrd.)
|
||||||
|
- Business Process Automation (Marktvolumen 2025: $19,6 Mrd.)
|
||||||
|
- Enterprise Knowledge Management (Marktvolumen 2025: $43 Mrd.)
|
||||||
|
|
||||||
|
### Wettbewerbsvorteile
|
||||||
|
|
||||||
|
1. **Proprietäre Multi-Agent-Technologie**: Unsere Plattform orchestriert spezifische KI-Agenten für verschiedene Aufgaben, was zu deutlich überlegenen Ergebnissen im Vergleich zu Einzelagenten-Ansätzen führt.
|
||||||
|
|
||||||
|
2. **Modellunabhängigkeit**: Integration mit führenden KI-Providern (OpenAI, Anthropic) ohne Vendor Lock-in, wodurch wir immer die besten Modelle für spezifische Aufgaben einsetzen können.
|
||||||
|
|
||||||
|
3. **Enterprise-Ready**: Entwickelt mit Multi-Tenant-Architektur, umfassenden Sicherheitsfeatures und Skalierbarkeit für Unternehmensanforderungen.
|
||||||
|
|
||||||
|
4. **Anpassbar und erweiterbar**: Modulare Architektur, die kontinuierliche Feature-Erweiterungen und kundenspezifische Anpassungen ermöglicht.
|
||||||
|
|
||||||
|
5. **Fortschrittliche Workflow-Orchestrierung**:
|
||||||
|
- Intelligente Koordination mehrerer spezialisierter Agenten
|
||||||
|
- Echtzeit-Statusüberwachung und Fortschrittsanzeige
|
||||||
|
- Robuste Fehlerbehandlung und Wiederaufnahmemechanismen
|
||||||
|
- Nahtlose Integration von Dateiverarbeitung und Dokumentenmanagement
|
||||||
|
|
||||||
|
6. **Umfassende Enterprise-Features**:
|
||||||
|
- Multi-Tenant-Architektur mit Mandantenverwaltung
|
||||||
|
- Erweiterte Benutzer- und Berechtigungsverwaltung
|
||||||
|
- Enterprise-Grade Sicherheitsfeatures
|
||||||
|
- Skalierbare Infrastruktur
|
||||||
|
|
||||||
|
### Finanzielle Highlights
|
||||||
|
|
||||||
|
- **Go-to-Market-Strategie**: Initiale Fokussierung auf mittelständische Unternehmen in den Bereichen Professional Services, Finanzdienstleistungen und Gesundheitswesen.
|
||||||
|
|
||||||
|
- **Umsatzmodell**: Kombiniertes SaaS-Abonnement (pro Benutzer/Monat) und nutzungsbasierte Abrechnung (pro Verarbeitungseinheit).
|
||||||
|
|
||||||
|
- **Erwartete Bruttomarge**: 75-85% nach Erreichen der Skalierung.
|
||||||
|
|
||||||
|
- **Erwartetes ARR in Jahr 3**: €4,5 Mio. bei 150 Unternehmenskunden.
|
||||||
|
|
||||||
|
- **Kostenstrukturen**:
|
||||||
|
- 40% Produktentwicklung
|
||||||
|
- 30% Vertrieb und Marketing
|
||||||
|
- 20% Betrieb und Support
|
||||||
|
- 10% Verwaltung
|
||||||
|
|
||||||
|
### Wachstumspfad
|
||||||
|
|
||||||
|
#### Kurzfristig (12 Monate)
|
||||||
|
- Markteinführung der Core-Plattform
|
||||||
|
- Aufbau von 3-5 Schlüsselreferenzkunden
|
||||||
|
- Entwicklung branchenspezifischer Templates
|
||||||
|
|
||||||
|
#### Mittelfristig (24 Monate)
|
||||||
|
- Erweiterung auf Agentenmarktplatz
|
||||||
|
- Integration von proprietären Unternehmensmodellen
|
||||||
|
- Internationale Expansion
|
||||||
|
|
||||||
|
#### Langfristig (36+ Monate)
|
||||||
|
- Entwicklung spezialisierter Branchenlösungen
|
||||||
|
- KI-Middleware für Unternehmen
|
||||||
|
- Strategische Partnerschaften mit Enterprise-Software-Anbietern
|
||||||
|
|
||||||
|
### Investitionsbedarf
|
||||||
|
|
||||||
|
Das aktuelle Finanzierungsziel von CHF 2.5 Mio. ermöglicht:
|
||||||
|
- Abschluss der Produktentwicklung und Erreichen der Marktreife
|
||||||
|
- Aufbau eines Vertriebs- und Marketingteams
|
||||||
|
- Sicherung strategischer Partnerschaften
|
||||||
|
- 18-monatige Runway bis zur Profitabilität
|
||||||
|
|
||||||
|
### Exit-Potenzial
|
||||||
|
|
||||||
|
Das Team sieht folgende Exit-Optionen:
|
||||||
|
1. Strategische Übernahme durch Enterprise-Software-Unternehmen (5-7 Jahre)
|
||||||
|
2. Erwerb durch grössere KI-Plattform (3-5 Jahre)
|
||||||
|
3. IPO bei Erreichen von CHF 50+ Mio. ARR (7-10 Jahre)
|
||||||
|
|
||||||
|
### Extraktion aus ValueOn AG
|
||||||
|
|
||||||
|
Vor einem Exit ist die Extraktion der PowerOn AI Platform aus der ValueOn AG in eine eigenständige Organisation vorgesehen:
|
||||||
|
|
||||||
|
1. **Vergütung der Aufwände**:
|
||||||
|
- Vollständige Vergütung aller übernommenen Entwicklungskosten
|
||||||
|
- Übernahme der Infrastruktur- und Betriebskosten
|
||||||
|
- Schadloshaltung für alle bisherigen Investitionen
|
||||||
|
- Marketing & Sales-Assets verbleiben bei ValueOn AG ohne Vergütung
|
||||||
|
|
||||||
|
2. **Schlüsselpersonen**:
|
||||||
|
- Anrechnung des geschaffenen Mehrwerts für jede Schlüsselperson
|
||||||
|
- Option auf Auszahlung oder Aktienübernahme
|
||||||
|
- Individuelle Vereinbarungen basierend auf Beitrag und Verantwortung
|
||||||
|
- Langfristige Bindung durch Equity-Programme
|
||||||
|
|
||||||
|
3. **Investitionskapital**:
|
||||||
|
- Beschaffung des notwendigen Kapitals zum aktuellen Marktwert
|
||||||
|
- Berücksichtigung der Extraktionskosten
|
||||||
|
- Sicherstellung der operativen Liquidität
|
||||||
|
- Finanzierung des weiteren Wachstums
|
||||||
|
|
||||||
|
Die Extraktion wird durchgeführt, sobald:
|
||||||
|
- Die technische Basis stabil ist
|
||||||
|
- Erste Referenzkunden gewonnen wurden
|
||||||
|
- Die Marktpositionierung klar ist
|
||||||
|
- Die Wachstumsstrategie definiert ist
|
||||||
|
|
||||||
|
### Marktwert und Bewertung
|
||||||
|
|
||||||
|
#### Aktueller Wert (Juni 2025)
|
||||||
|
Basierend auf dem aktuellen Entwicklungsstand und der technologischen Basis:
|
||||||
|
|
||||||
|
1. **Technologischer Wert**:
|
||||||
|
- Basis-Frontend-Architektur (modular, aber noch in Entwicklung): CHF 0.15-0.25 Mio.
|
||||||
|
- Backend-Grundstruktur (FastAPI, Basis-Interfaces): CHF 0.2-0.3 Mio.
|
||||||
|
- Workflow-System (Grundfunktionalität): CHF 0.25-0.35 Mio.
|
||||||
|
|
||||||
|
2. **Funktionaler Wert**:
|
||||||
|
- Basis-Workflow-Orchestrierung: CHF 0.1-0.15 Mio.
|
||||||
|
- Einfache Dokumentenverarbeitung: CHF 0.05-0.1 Mio.
|
||||||
|
- Grundlegende Benutzerverwaltung: CHF 0.05-0.1 Mio.
|
||||||
|
|
||||||
|
3. **Entwicklungspotenzial**:
|
||||||
|
- Erweiterbare Architektur: CHF 0.15-0.2 Mio.
|
||||||
|
- Modulare Struktur: CHF 0.1-0.15 Mio.
|
||||||
|
- Basis für zukünftige Erweiterungen: CHF 0.15-0.2 Mio.
|
||||||
|
|
||||||
|
**Aktuelle Gesamtbewertung**: CHF 1.2-1.8 Mio.
|
||||||
|
|
||||||
|
Diese Bewertung basiert auf:
|
||||||
|
- Dem aktuellen Entwicklungsstand (Frontend und Backend)
|
||||||
|
- Der vorhandenen Grundfunktionalität
|
||||||
|
- Der modularen Basis-Architektur
|
||||||
|
- Dem Entwicklungspotenzial
|
||||||
|
|
||||||
|
#### Wert per Ende 2025
|
||||||
|
Prognostizierte Bewertung basierend auf:
|
||||||
|
- Vervollständigung der Core-Funktionalität
|
||||||
|
- Erste Referenzkunden
|
||||||
|
- Erweiterte Workflow-Funktionen
|
||||||
|
- Verbesserte Benutzeroberfläche
|
||||||
|
|
||||||
|
**Prognostizierte Bewertung Ende 2025**: CHF 2-3 Mio.
|
||||||
|
|
||||||
|
#### Wert per Ende 2026
|
||||||
|
Prognostizierte Bewertung basierend auf:
|
||||||
|
- Vollständige Multi-Agent-Implementierung
|
||||||
|
- Erweiterte Integrationen
|
||||||
|
- Wachsende Kundenbasis
|
||||||
|
- Erwartetes ARR von CHF 4,5 Mio.
|
||||||
|
|
||||||
|
**Prognostizierte Bewertung Ende 2026**: CHF 4-6 Mio.
|
||||||
|
|
||||||
|
Die Wertsteigerung wird getrieben durch:
|
||||||
|
|
||||||
|
1. **Technologische Entwicklung**:
|
||||||
|
- Vervollständigung der Agenten-Implementierung
|
||||||
|
- Erweiterung der Workflow-Funktionalitäten
|
||||||
|
- Verbesserung der Integrationen
|
||||||
|
|
||||||
|
2. **Marktentwicklung**:
|
||||||
|
- Aufbau der Kundenbasis
|
||||||
|
- Entwicklung von Branchenlösungen
|
||||||
|
- Erste internationale Expansion
|
||||||
|
|
||||||
|
3. **Geschäftsentwicklung**:
|
||||||
|
- Wachsende Umsätze
|
||||||
|
- Verbesserte Margen
|
||||||
|
- Neue Geschäftsmodelle
|
||||||
|
|
||||||
|
4. **Strategische Positionierung**:
|
||||||
|
- Etablierung in Nischenmärkten
|
||||||
|
- Aufbau von Partnerschaften
|
||||||
|
- Entwicklung proprietärer Technologien
|
||||||
799
demoData/knowledge-base/platform-overview.html
Normal file
799
demoData/knowledge-base/platform-overview.html
Normal file
|
|
@ -0,0 +1,799 @@
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||||
|
<link rel="icon" href="/poweron-favicon.png" type="image/png">
|
||||||
|
<title>PowerOn Platform - Big Picture | PowerON</title>
|
||||||
|
<meta name="description" content="PowerON Platform Architecture - Big Picture for External Developers">
|
||||||
|
<meta name="author" content="PowerON">
|
||||||
|
|
||||||
|
<!-- Fonts -->
|
||||||
|
<link rel="preconnect" href="https://fonts.googleapis.com">
|
||||||
|
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
|
||||||
|
<link href="https://fonts.googleapis.com/css2?family=DM+Sans:ital,wght@0,400;0,500;0,700;1,400&display=swap" rel="stylesheet">
|
||||||
|
|
||||||
|
<!-- Styles -->
|
||||||
|
<link rel="stylesheet" href="doc_platform_big_picture.css">
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<div class="header">
|
||||||
|
<div class="navbar">
|
||||||
|
<a href="/" class="logo">
|
||||||
|
<img src="logo2.png" alt="PowerON" class="logo-img" onerror="this.style.display='none'; this.nextElementSibling.style.display='inline';">
|
||||||
|
</a>
|
||||||
|
<nav>
|
||||||
|
<span class="nav-title">Platform Architecture</span>
|
||||||
|
</nav>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="container">
|
||||||
|
<div class="hero">
|
||||||
|
<h1>PowerOn Platform - Big Picture</h1>
|
||||||
|
<p class="subtitle">Enterprise AI Workflow Platform with Integrated Data Privacy Neutralizer</p>
|
||||||
|
<p class="intro">This document provides an overview of the PowerOn platform architecture, building blocks, and capabilities for external software developers who want to contribute to or integrate with the platform.</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Tabs Navigation -->
|
||||||
|
<div class="tabs">
|
||||||
|
<button class="tab-button active" onclick="openTab(event, 'overview')">Overview</button>
|
||||||
|
<button class="tab-button" onclick="openTab(event, 'customer-story')">Customer Story</button>
|
||||||
|
<button class="tab-button" onclick="openTab(event, 'workflows')">Workflows</button>
|
||||||
|
<button class="tab-button" onclick="openTab(event, 'services')">Microservices</button>
|
||||||
|
<button class="tab-button" onclick="openTab(event, 'rbac')">RBAC System</button>
|
||||||
|
<button class="tab-button" onclick="openTab(event, 'ui')">UI Architecture</button>
|
||||||
|
<button class="tab-button" onclick="openTab(event, 'big-picture')">Big Picture</button>
|
||||||
|
<button class="tab-button" onclick="openTab(event, 'integration')">Integration</button>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Tab Content -->
|
||||||
|
<div id="overview" class="tab-content active">
|
||||||
|
<h2>Platform Overview</h2>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>Core Concept</h3>
|
||||||
|
<p>PowerOn is a <strong>Multi-Agent AI Platform for Enterprise Workflows</strong> with an integrated data privacy neutralizer. The platform enables companies to accelerate their AI transformation without data privacy risks.</p>
|
||||||
|
|
||||||
|
<div class="highlight-box">
|
||||||
|
<h4>Key Value Propositions</h4>
|
||||||
|
<ul>
|
||||||
|
<li><strong>Data Privacy First:</strong> Integrated privacy neutralizer enables safe use of ChatGPT/Copilot without privacy risks</li>
|
||||||
|
<li><strong>Unlimited Processing:</strong> No token limits - process documents of any size through intelligent chunking</li>
|
||||||
|
<li><strong>Universal Integration:</strong> Seamless integration of all enterprise data sources</li>
|
||||||
|
<li><strong>Workflow Automation:</strong> Configure workflows per customer journey with standard automation elements and AI components</li>
|
||||||
|
<li><strong>Future-Proof Architecture:</strong> Automatically improves with better AI models and larger token limits</li>
|
||||||
|
<li><strong>Plug & Play Architecture:</strong> Renderers and dynamic AI selection per intention (analyze, generate, web, plan, etc.)</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>Architecture Layers</h3>
|
||||||
|
<div class="architecture-diagram">
|
||||||
|
<div class="layer">
|
||||||
|
<h4>UI Layer (Playground)</h4>
|
||||||
|
<p>React-based playground UI as entry point. Additional UIs (chatbots, customer UIs) can be easily integrated via REST API in React, JavaScript, or other languages.</p>
|
||||||
|
</div>
|
||||||
|
<div class="layer">
|
||||||
|
<h4>API Layer</h4>
|
||||||
|
<p>RESTful API providing full access to platform capabilities. Open API design allows external UIs and integrations.</p>
|
||||||
|
</div>
|
||||||
|
<div class="layer">
|
||||||
|
<h4>Workflow Engine</h4>
|
||||||
|
<p>Core orchestration engine managing tasks, actions, and state. Supports multiple execution modes (Learning, Actionplan, Automation).</p>
|
||||||
|
</div>
|
||||||
|
<div class="layer">
|
||||||
|
<h4>Microservices Layer</h4>
|
||||||
|
<p>Modular service architecture with specialized services for AI, data processing, security, and integrations.</p>
|
||||||
|
</div>
|
||||||
|
<div class="layer">
|
||||||
|
<h4>Data Layer</h4>
|
||||||
|
<p>Multi-tenant database with RBAC-based access control. Mandate isolation ensures secure data separation.</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>Customer Journey → Workflow</h3>
|
||||||
|
<p>For each customer journey, a workflow can be configured in the workflow editor where:</p>
|
||||||
|
<ul>
|
||||||
|
<li>Customers integrate their data sources</li>
|
||||||
|
<li>Standard automation elements are available</li>
|
||||||
|
<li>AI components can be used</li>
|
||||||
|
<li>Workflows can be executed manually or automated (hourly/daily/weekly)</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>Plug & Play Architecture</h3>
|
||||||
|
<div class="feature-grid">
|
||||||
|
<div class="feature-card">
|
||||||
|
<h4>Dynamic Renderers</h4>
|
||||||
|
<p>Plug & play architecture for document renderers. Support for multiple formats (PDF, DOCX, XLSX, PPTX, HTML, Markdown, JSON, CSV, etc.) with easy extension capabilities.</p>
|
||||||
|
</div>
|
||||||
|
<div class="feature-card">
|
||||||
|
<h4>Dynamic AI Selection</h4>
|
||||||
|
<p>Intelligent AI model selection per intention type. The system automatically selects the best AI model based on the task: analysis, generation, web research, planning, etc.</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>System Architecture Diagram</h3>
|
||||||
|
<div class="diagram-image-container">
|
||||||
|
<img src="doc_platform_01_platform_overview.jpg" alt="PowerON Platform Architecture Diagram" class="diagram-image">
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div id="customer-story" class="tab-content">
|
||||||
|
<h2>Customer Story</h2>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>The Journey from Application-Centric to Data-Centric Work</h3>
|
||||||
|
<p class="lead">PowerOn enables customers to transition from <strong>application-centric</strong> to <strong>data-centric</strong> work. This is a <strong>key differentiator</strong> that transforms how businesses operate.</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>Step 1: Customer Journey Identification</h3>
|
||||||
|
<div class="step-card">
|
||||||
|
<div class="step-number">1</div>
|
||||||
|
<div class="step-content">
|
||||||
|
<h4>Identify Business Processes</h4>
|
||||||
|
<p>Work with customers to identify their key customer journeys and business processes that can benefit from automation and AI.</p>
|
||||||
|
<ul>
|
||||||
|
<li>Document analysis workflows</li>
|
||||||
|
<li>Email processing and routing</li>
|
||||||
|
<li>Data extraction and transformation</li>
|
||||||
|
<li>Report generation</li>
|
||||||
|
<li>Customer communication workflows</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>Step 2: MVP Integration with Focus on Data Privacy & Compliance</h3>
|
||||||
|
<div class="step-card">
|
||||||
|
<div class="step-number">2</div>
|
||||||
|
<div class="step-content">
|
||||||
|
<h4>Simple MVP Integration</h4>
|
||||||
|
<p>Start with a simple MVP that integrates customer data sources with <strong>strong focus on data privacy and compliance</strong>:</p>
|
||||||
|
<ul>
|
||||||
|
<li><strong>Data Privacy Neutralizer:</strong> Automatic anonymization of sensitive data before AI processing</li>
|
||||||
|
<li><strong>Compliance First:</strong> DSGVO/GDPR compliant processing from day one</li>
|
||||||
|
<li><strong>Secure Connections:</strong> Encrypted connections to customer data sources (SharePoint, Google Drive, Outlook, etc.)</li>
|
||||||
|
<li><strong>Mandate Isolation:</strong> Complete data separation between tenants</li>
|
||||||
|
<li><strong>Audit Logging:</strong> Full traceability of all data access and processing</li>
|
||||||
|
</ul>
|
||||||
|
<p class="highlight-text">This step builds trust and demonstrates the platform's commitment to data security.</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>Step 3: Pre-Processing Engine Deployment</h3>
|
||||||
|
<div class="step-card">
|
||||||
|
<div class="step-number">3</div>
|
||||||
|
<div class="step-content">
|
||||||
|
<h4>Standard API Pre-Processing</h4>
|
||||||
|
<p>Deploy a pre-processing engine at the customer's location using a <strong>standard API</strong>:</p>
|
||||||
|
<ul>
|
||||||
|
<li><strong>On-Premise/Edge Processing:</strong> Data processing happens at the customer's location</li>
|
||||||
|
<li><strong>Standard API:</strong> Consistent interface for all customers</li>
|
||||||
|
<li><strong>Data Minimization:</strong> Only necessary data is sent to the platform</li>
|
||||||
|
<li><strong>Local Neutralization:</strong> Privacy neutralization can happen before data leaves customer premises</li>
|
||||||
|
<li><strong>Reduced Latency:</strong> Faster processing for large documents</li>
|
||||||
|
</ul>
|
||||||
|
<p class="highlight-text">This step further enhances data privacy and gives customers full control over their data processing.</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>Step 4: Gradual Component Integration - The Transformation</h3>
|
||||||
|
<div class="step-card">
|
||||||
|
<div class="step-number">4</div>
|
||||||
|
<div class="step-content">
|
||||||
|
<h4>From Application-Centric to Data-Centric</h4>
|
||||||
|
<p>Gradually integrate additional components until the customer works <strong>data-centrically</strong> instead of <strong>application-centrically</strong>:</p>
|
||||||
|
|
||||||
|
<div class="transformation-comparison">
|
||||||
|
<div class="comparison-box old">
|
||||||
|
<h5>❌ Application-Centric (Old Way)</h5>
|
||||||
|
<ul>
|
||||||
|
<li>Work within individual applications (Word, Excel, SharePoint, Outlook)</li>
|
||||||
|
<li>Manual data transfer between applications</li>
|
||||||
|
<li>Data silos in different systems</li>
|
||||||
|
<li>Workflows are application-bound</li>
|
||||||
|
<li>Difficult to automate across applications</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
<div class="comparison-box new">
|
||||||
|
<h5>✅ Data-Centric (PowerOn Way)</h5>
|
||||||
|
<ul>
|
||||||
|
<li>Work with data directly, regardless of source application</li>
|
||||||
|
<li>Automatic data integration across all sources</li>
|
||||||
|
<li>Unified data view across all systems</li>
|
||||||
|
<li>Workflows span multiple applications seamlessly</li>
|
||||||
|
<li>Easy automation across entire data ecosystem</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<p class="highlight-text"><strong>This transformation is a KEY DIFFERENTIATOR!</strong> Customers no longer think in terms of applications, but in terms of their data and business processes.</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>Customer Journey Diagram</h3>
|
||||||
|
<div class="diagram-image-container">
|
||||||
|
<img src="doc_platform_02_customer_story.jpg" alt="Customer Story - Journey from Application-Centric to Data-Centric" class="diagram-image">
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div id="workflows" class="tab-content">
|
||||||
|
<h2>Workflow System</h2>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>Core Concept: Tasks with Actions</h3>
|
||||||
|
<p class="lead">The core building block is <strong>workflow elements: tasks with actions</strong>. Each workflow consists of tasks, and each task contains one or more actions that execute specific operations.</p>
|
||||||
|
|
||||||
|
<div class="workflow-structure">
|
||||||
|
<div class="workflow-item">
|
||||||
|
<h4>Workflow</h4>
|
||||||
|
<p><strong>Definition:</strong> Top-level container representing a complete customer journey or business process.</p>
|
||||||
|
<p><strong>Purpose:</strong> Orchestrates multiple tasks to achieve a business goal.</p>
|
||||||
|
</div>
|
||||||
|
<div class="workflow-item">
|
||||||
|
<h4>Task</h4>
|
||||||
|
<p><strong>Definition:</strong> A logical step in the workflow.</p>
|
||||||
|
<p><strong>Purpose:</strong> Groups related actions that work together to complete a sub-goal.</p>
|
||||||
|
</div>
|
||||||
|
<div class="workflow-item">
|
||||||
|
<h4>Action</h4>
|
||||||
|
<p><strong>Definition:</strong> Executable unit that performs a specific operation.</p>
|
||||||
|
<p><strong>Purpose:</strong> Actions belong to methods (microservices) and are the atomic units of work.</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>Execution Modes</h3>
|
||||||
|
<p class="lead">PowerOn supports three execution modes, each optimized for different use cases:</p>
|
||||||
|
<div class="mode-grid">
|
||||||
|
<div class="mode-card">
|
||||||
|
<h4>Learning Mode</h4>
|
||||||
|
<p><strong>Best for:</strong> Exploratory tasks with up to 5 steps</p>
|
||||||
|
<p><strong>Approach:</strong> Iterative Plan-Act-Observe-Refine loop</p>
|
||||||
|
<p><strong>Use Case:</strong> When the solution path is not fully known in advance</p>
|
||||||
|
</div>
|
||||||
|
<div class="mode-card">
|
||||||
|
<h4>Actionplan Mode</h4>
|
||||||
|
<p><strong>Best for:</strong> Structured, sequential processes</p>
|
||||||
|
<p><strong>Approach:</strong> Batch planning with sequential execution</p>
|
||||||
|
<p><strong>Use Case:</strong> When the workflow steps are well-defined</p>
|
||||||
|
</div>
|
||||||
|
<div class="mode-card">
|
||||||
|
<h4>Automation Mode</h4>
|
||||||
|
<p><strong>Best for:</strong> Repetitive, predefined workflows</p>
|
||||||
|
<p><strong>Approach:</strong> Automated execution (scheduled or event-triggered)</p>
|
||||||
|
<p><strong>Use Case:</strong> Production workflows that run automatically</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>Available Workflow Methods</h3>
|
||||||
|
<p class="lead">Workflow methods provide actions that can be executed within workflows. Each method exposes multiple actions accessible via <code>self.services.<method>.<action></code>:</p>
|
||||||
|
<ul>
|
||||||
|
<li><strong>ai.*</strong> - AI operations (process, analyze, generate)</li>
|
||||||
|
<li><strong>sharepoint.*</strong> - SharePoint integration (search, read, upload)</li>
|
||||||
|
<li><strong>outlook.*</strong> - Outlook integration (read emails, send emails)</li>
|
||||||
|
<li><strong>context.*</strong> - Context management (get context, set context)</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>Workflow System Diagram</h3>
|
||||||
|
<div class="diagram-image-container">
|
||||||
|
<img src="doc_platform_03_workflow_system.jpg" alt="Workflow System - Structure, Execution Modes, and Available Methods" class="diagram-image">
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div id="services" class="tab-content">
|
||||||
|
<h2>Microservices Architecture</h2>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>Service Access Pattern</h3>
|
||||||
|
<p class="lead">All microservices are accessible via <code>self.services.<serviceName></code>. Services follow a consistent access pattern and are organized into logical categories.</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>Services Structure Tree</h3>
|
||||||
|
<p>Complete overview of all available microservices:</p>
|
||||||
|
|
||||||
|
<div class="services-tree">
|
||||||
|
<div class="service-category">
|
||||||
|
<h4>Core Services</h4>
|
||||||
|
<ul>
|
||||||
|
<li><code>self.services.chat</code> - Chat and conversation management
|
||||||
|
<ul>
|
||||||
|
<li>Progress logging</li>
|
||||||
|
<li>Document management</li>
|
||||||
|
<li>Connection handling</li>
|
||||||
|
</ul>
|
||||||
|
</li>
|
||||||
|
<li><code>self.services.workflow</code> - Workflow state and management</li>
|
||||||
|
<li><code>self.services.utils</code> - Utility functions (timestamps, formatting, etc.)</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="service-category">
|
||||||
|
<h4>AI & Processing Services</h4>
|
||||||
|
<ul>
|
||||||
|
<li><code>self.services.ai</code> - AI model management and operations
|
||||||
|
<ul>
|
||||||
|
<li>Model selection</li>
|
||||||
|
<li>Prompt processing</li>
|
||||||
|
<li>Response handling</li>
|
||||||
|
</ul>
|
||||||
|
</li>
|
||||||
|
<li><code>self.services.generation</code> - Document generation
|
||||||
|
<ul>
|
||||||
|
<li>Multiple formats (PDF, DOCX, XLSX, PPTX, HTML, Markdown, etc.)</li>
|
||||||
|
<li>Template-based rendering</li>
|
||||||
|
<li>JSON schema support</li>
|
||||||
|
</ul>
|
||||||
|
</li>
|
||||||
|
<li><code>self.services.extraction</code> - Document extraction and processing
|
||||||
|
<ul>
|
||||||
|
<li>Multiple extractors (PDF, DOCX, XLSX, PPTX, CSV, HTML, XML, JSON, Images, etc.)</li>
|
||||||
|
<li>Intelligent chunking</li>
|
||||||
|
<li>Merging strategies</li>
|
||||||
|
</ul>
|
||||||
|
</li>
|
||||||
|
<li><code>self.services.neutralization</code> - Data privacy neutralization
|
||||||
|
<ul>
|
||||||
|
<li>PII detection and anonymization</li>
|
||||||
|
<li>Pattern-based neutralization</li>
|
||||||
|
<li>Binary and text processing</li>
|
||||||
|
</ul>
|
||||||
|
</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="service-category">
|
||||||
|
<h4>Integration Services</h4>
|
||||||
|
<ul>
|
||||||
|
<li><code>self.services.sharepoint</code> - SharePoint integration
|
||||||
|
<ul>
|
||||||
|
<li>Site discovery</li>
|
||||||
|
<li>File operations (read, upload, search)</li>
|
||||||
|
<li>Path resolution</li>
|
||||||
|
</ul>
|
||||||
|
</li>
|
||||||
|
<li><code>self.services.web</code> - Web operations
|
||||||
|
<ul>
|
||||||
|
<li>HTTP requests</li>
|
||||||
|
<li>Web scraping</li>
|
||||||
|
<li>API integration</li>
|
||||||
|
</ul>
|
||||||
|
</li>
|
||||||
|
<li><code>self.services.ticket</code> - Ticket system integration
|
||||||
|
<ul>
|
||||||
|
<li>Jira integration</li>
|
||||||
|
<li>ClickUp integration</li>
|
||||||
|
<li>Generic ticket operations</li>
|
||||||
|
</ul>
|
||||||
|
</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="service-category">
|
||||||
|
<h4>Security & Infrastructure</h4>
|
||||||
|
<ul>
|
||||||
|
<li><code>self.services.security</code> - Security operations
|
||||||
|
<ul>
|
||||||
|
<li>Authentication</li>
|
||||||
|
<li>Authorization</li>
|
||||||
|
<li>Token management</li>
|
||||||
|
</ul>
|
||||||
|
</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>Code Examples</h3>
|
||||||
|
<p>Examples of how to use services in workflow actions or methods:</p>
|
||||||
|
<pre><code># In workflow actions or methods
|
||||||
|
result = await self.services.<service>.<method>(parameters)
|
||||||
|
|
||||||
|
# Example: Using AI service
|
||||||
|
response = await self.services.ai.process(prompt="Analyze this document", documents=[...])
|
||||||
|
|
||||||
|
# Example: Using SharePoint service
|
||||||
|
files = await self.services.sharepoint.searchFiles(pathQuery="sites/my-site/documents")
|
||||||
|
|
||||||
|
# Example: Using generation service
|
||||||
|
document = self.services.generation.createDocument(format="pdf", content={...})</code></pre>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>Microservices Architecture Diagram</h3>
|
||||||
|
<div class="diagram-image-container">
|
||||||
|
<img src="doc_platform_04_microservice_architecture.jpg" alt="Microservices Architecture - Core Services, AI & Processing, Integration Services, and Security" class="diagram-image">
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div id="rbac" class="tab-content">
|
||||||
|
<h2>RBAC System</h2>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>Overview</h3>
|
||||||
|
<p class="lead">The Role-Based Access Control (RBAC) system provides <strong>complete UI configuration per tenant and user</strong>. It enables fine-grained control over data access, UI visibility, and resource availability.</p>
|
||||||
|
|
||||||
|
<div class="feature-grid">
|
||||||
|
<div class="feature-card">
|
||||||
|
<h4>Data Access</h4>
|
||||||
|
<p>Table and field-level permissions for database operations. Control who can read, create, update, or delete specific data.</p>
|
||||||
|
</div>
|
||||||
|
<div class="feature-card">
|
||||||
|
<h4>UI Access</h4>
|
||||||
|
<p>Component and feature visibility management. Configure exactly which UI elements each user or role can see.</p>
|
||||||
|
</div>
|
||||||
|
<div class="feature-card">
|
||||||
|
<h4>Resource Access</h4>
|
||||||
|
<p>System resource availability control. Manage access to AI models, actions, and other platform resources.</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>Access Levels: Opening Logic</h3>
|
||||||
|
<p class="lead">For DATA context, the system uses <strong>opening rights</strong> with four access levels. These levels determine what data a user can access:</p>
|
||||||
|
<div class="access-levels">
|
||||||
|
<div class="access-level">
|
||||||
|
<h4>none (n)</h4>
|
||||||
|
<p>No access - item is completely hidden/disabled</p>
|
||||||
|
</div>
|
||||||
|
<div class="access-level">
|
||||||
|
<h4>my (m)</h4>
|
||||||
|
<p>My records - only records created by the current user</p>
|
||||||
|
</div>
|
||||||
|
<div class="access-level">
|
||||||
|
<h4>group (g)</h4>
|
||||||
|
<p>Group records - records within the same mandate (group context)</p>
|
||||||
|
</div>
|
||||||
|
<div class="access-level">
|
||||||
|
<h4>all (a)</h4>
|
||||||
|
<p>All records - full access to all records in the mandate</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>View Logic: Open + Close</h3>
|
||||||
|
<p class="lead">The <code>view</code> attribute controls visibility and enablement. This is the fundamental on/off switch for all RBAC contexts:</p>
|
||||||
|
<ul>
|
||||||
|
<li><strong>view: true</strong> - Item is visible/enabled</li>
|
||||||
|
<li><strong>view: false</strong> - Item is hidden/disabled (regardless of other permissions)</li>
|
||||||
|
</ul>
|
||||||
|
<p><strong>Key Rule:</strong> Only objects with <code>view: true</code> are shown. This applies to:</p>
|
||||||
|
<ul>
|
||||||
|
<li><strong>DATA Context:</strong> Controls whether tables/fields are accessible</li>
|
||||||
|
<li><strong>UI Context:</strong> Controls whether UI elements are visible</li>
|
||||||
|
<li><strong>RESOURCE Context:</strong> Controls whether resources are available</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>Rule Specificity & Hierarchy</h3>
|
||||||
|
<p class="lead">The RBAC system uses a cascading hierarchy where more specific rules override generic ones:</p>
|
||||||
|
<ol>
|
||||||
|
<li><strong>Generic Rules</strong> (<code>item = null</code>) - Apply to all items in context</li>
|
||||||
|
<li><strong>Specific Rules</strong> (<code>item = "table.field"</code> or <code>item = "ui.component.feature"</code>) - Override generic rules</li>
|
||||||
|
</ol>
|
||||||
|
<p><strong>Resolution Logic:</strong> Within a single role, the most specific rule wins. Across multiple roles, opening (union) logic applies - if ANY role enables something, it is enabled.</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>Opening Rights Principle</h3>
|
||||||
|
<p class="lead">For DATA context, read permission (R) is a prerequisite for create/update/delete operations (CUD). This ensures data integrity and proper access control:</p>
|
||||||
|
<ul>
|
||||||
|
<li>If Read = "n": No CUD operations allowed</li>
|
||||||
|
<li>If Read = "m": CUD operations limited to "m" or "n"</li>
|
||||||
|
<li>If Read = "g": CUD operations limited to "g", "m", or "n"</li>
|
||||||
|
<li>If Read = "a": CUD operations can be "a", "g", "m", or "n"</li>
|
||||||
|
</ul>
|
||||||
|
<p><strong>Key Rule:</strong> You can ONLY create/update/delete if you have read right.</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>Context Types</h3>
|
||||||
|
<p class="lead">RBAC rules apply to three different context types, each serving a specific purpose:</p>
|
||||||
|
<div class="context-grid">
|
||||||
|
<div class="context-card">
|
||||||
|
<h4>DATA</h4>
|
||||||
|
<p>Database tables and fields. Controls read/create/update/delete permissions.</p>
|
||||||
|
<p><strong>Example:</strong> <code>item: "UserInDB.email"</code></p>
|
||||||
|
</div>
|
||||||
|
<div class="context-card">
|
||||||
|
<h4>UI</h4>
|
||||||
|
<p>UI elements and features. Controls component visibility.</p>
|
||||||
|
<p><strong>Example:</strong> <code>item: "playground.voice.settings"</code></p>
|
||||||
|
</div>
|
||||||
|
<div class="context-card">
|
||||||
|
<h4>RESOURCE</h4>
|
||||||
|
<p>System resources (AI models, actions, etc.). Controls resource availability.</p>
|
||||||
|
<p><strong>Example:</strong> <code>item: "ai.model.anthropic"</code></p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>RBAC System Diagram</h3>
|
||||||
|
<div class="diagram-image-container">
|
||||||
|
<img src="doc_platform_05_rbac_system.jpg" alt="RBAC System - Contexts, Access Levels, View Logic, and Rule Hierarchy" class="diagram-image">
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div id="ui" class="tab-content">
|
||||||
|
<h2>UI Architecture</h2>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>Playground UI</h3>
|
||||||
|
<p class="lead">The <strong>Playground</strong> serves as the main entry point and demonstration UI. It's built with React and provides a comprehensive interface for workflow interaction:</p>
|
||||||
|
<ul>
|
||||||
|
<li>Chat interface for workflow interaction</li>
|
||||||
|
<li>Workflow editor for configuration</li>
|
||||||
|
<li>Document management</li>
|
||||||
|
<li>Connection management</li>
|
||||||
|
<li>Voice input/output capabilities</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>RBAC-Driven UI Configuration</h3>
|
||||||
|
<p class="lead">The UI is <strong>completely configurable via RBAC rules</strong>. This allows customers to configure exactly the UI they need for their use case:</p>
|
||||||
|
<ul>
|
||||||
|
<li>Per tenant configuration</li>
|
||||||
|
<li>Per user configuration</li>
|
||||||
|
<li>Component-level visibility control</li>
|
||||||
|
<li>Feature-level access control</li>
|
||||||
|
</ul>
|
||||||
|
<p>This allows customers to configure exactly the UI they need for their use case.</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>External UI Integration</h3>
|
||||||
|
<p class="lead">Additional UIs can be easily integrated via the REST API. All UI components communicate with the platform through the standardized REST API, ensuring consistent behavior and security:</p>
|
||||||
|
<ul>
|
||||||
|
<li><strong>Chatbots:</strong> Build custom chatbots using the workflow API</li>
|
||||||
|
<li><strong>Customer UIs:</strong> Create customer-specific interfaces in React, JavaScript, or other languages</li>
|
||||||
|
<li><strong>Mobile Apps:</strong> Integrate via REST API from mobile applications</li>
|
||||||
|
<li><strong>Third-Party Tools:</strong> Connect existing tools via webhooks and API</li>
|
||||||
|
</ul>
|
||||||
|
<p>All UI components communicate with the platform through the standardized REST API, ensuring consistent behavior and security.</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>Available UI Components</h3>
|
||||||
|
<p class="lead">The platform provides reusable UI components that can be configured via RBAC:</p>
|
||||||
|
<ul>
|
||||||
|
<li>Chat interface</li>
|
||||||
|
<li>Document viewer/editor</li>
|
||||||
|
<li>Workflow editor</li>
|
||||||
|
<li>Connection manager</li>
|
||||||
|
<li>Settings panels</li>
|
||||||
|
<li>Dashboard widgets</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>UI Architecture Diagram</h3>
|
||||||
|
<div class="diagram-image-container">
|
||||||
|
<img src="doc_platform_06_ui_architecture.jpg" alt="UI Architecture - RBAC-Driven Configuration, UI Components, UI Layer, and REST API" class="diagram-image">
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div id="big-picture" class="tab-content">
|
||||||
|
<h2>Big Picture & Future Vision</h2>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>Vendor-Independent Platform</h3>
|
||||||
|
<div class="vision-card">
|
||||||
|
<h4>AI Model Independence</h4>
|
||||||
|
<p>PowerOn is designed as a <strong>vendor-independent platform</strong> regarding AI models:</p>
|
||||||
|
<ul>
|
||||||
|
<li>Support for multiple AI providers (OpenAI, Anthropic, Google, Azure, etc.)</li>
|
||||||
|
<li>Dynamic model selection based on task requirements</li>
|
||||||
|
<li>Easy addition of new AI providers</li>
|
||||||
|
<li>No vendor lock-in - customers can switch providers seamlessly</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="vision-card">
|
||||||
|
<h4>Connector Independence</h4>
|
||||||
|
<p>Universal connector architecture supporting all major platforms:</p>
|
||||||
|
<ul>
|
||||||
|
<li><strong>Microsoft:</strong> SharePoint, Outlook, Teams, OneDrive, Azure</li>
|
||||||
|
<li><strong>Google:</strong> Drive, Gmail, Workspace, Cloud</li>
|
||||||
|
<li><strong>Amazon:</strong> AWS services, S3, etc.</li>
|
||||||
|
<li><strong>Other:</strong> Jira, Slack, Salesforce, and many more</li>
|
||||||
|
</ul>
|
||||||
|
<p>Customers are not locked into a single vendor ecosystem.</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>Graphical Workflow Modeling</h3>
|
||||||
|
<div class="vision-card">
|
||||||
|
<h4>Visual Customer Journey Design</h4>
|
||||||
|
<p>Future capability to <strong>graphically model workflows</strong> for customer journeys:</p>
|
||||||
|
<ul>
|
||||||
|
<li>Drag-and-drop workflow editor</li>
|
||||||
|
<li>Visual representation of customer journeys</li>
|
||||||
|
<li>Easy workflow modification without coding</li>
|
||||||
|
<li>Template library for common workflows</li>
|
||||||
|
<li>Workflow versioning and testing</li>
|
||||||
|
</ul>
|
||||||
|
<p>This makes workflow creation accessible to business users, not just developers.</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>MCP Integration in Customer Copilot</h3>
|
||||||
|
<div class="vision-card">
|
||||||
|
<h4>Microsoft Copilot Plugin Architecture</h4>
|
||||||
|
<p>Integration of PowerOn actions as <strong>MCP (Model Context Protocol) plugins</strong> in the customer's Copilot:</p>
|
||||||
|
<ul>
|
||||||
|
<li><strong>Native Copilot Integration:</strong> PowerOn workflows accessible directly from Microsoft Copilot</li>
|
||||||
|
<li><strong>Action Library:</strong> All PowerOn actions available as Copilot plugins</li>
|
||||||
|
<li><strong>Seamless Experience:</strong> Customers use PowerOn capabilities without leaving Copilot</li>
|
||||||
|
<li><strong>Enterprise Workflows:</strong> Complex workflows triggered from simple Copilot conversations</li>
|
||||||
|
<li><strong>Data Privacy:</strong> All PowerOn privacy features work seamlessly in Copilot context</li>
|
||||||
|
</ul>
|
||||||
|
<p class="highlight-text">This enables customers to leverage PowerOn's powerful workflow capabilities directly from their familiar Copilot interface.</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>Platform Evolution</h3>
|
||||||
|
<div class="vision-grid">
|
||||||
|
<div class="vision-item">
|
||||||
|
<h4>Today</h4>
|
||||||
|
<ul>
|
||||||
|
<li>REST API-based workflows</li>
|
||||||
|
<li>Playground UI</li>
|
||||||
|
<li>Multiple AI providers</li>
|
||||||
|
<li>Standard connectors</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
<div class="vision-item">
|
||||||
|
<h4>Near Future</h4>
|
||||||
|
<ul>
|
||||||
|
<li>Graphical workflow editor</li>
|
||||||
|
<li>MCP Copilot integration</li>
|
||||||
|
<li>Enhanced pre-processing</li>
|
||||||
|
<li>Advanced AI selection</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
<div class="vision-item">
|
||||||
|
<h4>Future</h4>
|
||||||
|
<ul>
|
||||||
|
<li>AI-powered workflow generation</li>
|
||||||
|
<li>Multi-platform Copilot support</li>
|
||||||
|
<li>Edge computing expansion</li>
|
||||||
|
<li>Federated learning</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>Big Picture & Future Vision Diagram</h3>
|
||||||
|
<div class="diagram-image-container">
|
||||||
|
<img src="doc_platform_07_big_picture_and_future_vision.jpg" alt="Big Picture & Future Vision - Platform Evolution from Today to Future" class="diagram-image">
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div id="integration" class="tab-content">
|
||||||
|
<h2>Integration Guide</h2>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>REST API</h3>
|
||||||
|
<p class="lead">The platform exposes a comprehensive REST API for all operations. This API serves as the primary integration point for external developers:</p>
|
||||||
|
<ul>
|
||||||
|
<li><strong>Workflow API:</strong> Create, execute, and manage workflows</li>
|
||||||
|
<li><strong>Document API:</strong> Upload, download, and process documents</li>
|
||||||
|
<li><strong>Connection API:</strong> Manage external connections (SharePoint, Outlook, etc.)</li>
|
||||||
|
<li><strong>RBAC API:</strong> Manage roles and permissions</li>
|
||||||
|
<li><strong>Options API:</strong> Dynamic options for UI components</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>Building Blocks for Developers</h3>
|
||||||
|
<p class="lead">Developers can extend the platform by creating custom components in these areas:</p>
|
||||||
|
<div class="building-blocks">
|
||||||
|
<div class="block">
|
||||||
|
<h4>Workflow Methods</h4>
|
||||||
|
<p>Create custom workflow methods by extending <code>MethodBase</code> and registering actions.</p>
|
||||||
|
</div>
|
||||||
|
<div class="block">
|
||||||
|
<h4>Services</h4>
|
||||||
|
<p>Extend the services layer by creating new service modules following the existing pattern.</p>
|
||||||
|
</div>
|
||||||
|
<div class="block">
|
||||||
|
<h4>Connectors</h4>
|
||||||
|
<p>Build connectors for external systems (databases, APIs, services) using the connector interface.</p>
|
||||||
|
</div>
|
||||||
|
<div class="block">
|
||||||
|
<h4>UI Components</h4>
|
||||||
|
<p>Create React components that integrate with the REST API and respect RBAC rules.</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>Development Workflow</h3>
|
||||||
|
<p class="lead">Follow these steps to get started with platform development:</p>
|
||||||
|
<ol>
|
||||||
|
<li><strong>Understand the Architecture:</strong> Review this document and codebase structure</li>
|
||||||
|
<li><strong>Set Up Development Environment:</strong> Clone repository and configure local environment</li>
|
||||||
|
<li><strong>Choose Integration Point:</strong> Decide whether to extend workflows, services, or UI</li>
|
||||||
|
<li><strong>Follow Patterns:</strong> Use existing code as reference for consistent implementation</li>
|
||||||
|
<li><strong>Test with RBAC:</strong> Ensure your changes respect RBAC rules</li>
|
||||||
|
<li><strong>Document:</strong> Update documentation for your changes</li>
|
||||||
|
</ol>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h3>Key Integration Points</h3>
|
||||||
|
<p class="lead">Main directories where developers can add new functionality:</p>
|
||||||
|
<ul>
|
||||||
|
<li><code>gateway/modules/workflows/methods/</code> - Add new workflow methods</li>
|
||||||
|
<li><code>gateway/modules/services/</code> - Add new microservices</li>
|
||||||
|
<li><code>gateway/modules/connectors/</code> - Add new connectors</li>
|
||||||
|
<li><code>gateway/modules/routes/</code> - Add new API endpoints</li>
|
||||||
|
<li><code>gateway/modules/features/</code> - Add new features</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="footer">
|
||||||
|
<div class="container">
|
||||||
|
<p>© 2025 PowerON. All rights reserved.</p>
|
||||||
|
<p>Platform Architecture Documentation v1.0</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<script>
|
||||||
|
function openTab(evt, tabName) {
|
||||||
|
var i, tabcontent, tablinks;
|
||||||
|
tabcontent = document.getElementsByClassName("tab-content");
|
||||||
|
for (i = 0; i < tabcontent.length; i++) {
|
||||||
|
tabcontent[i].classList.remove("active");
|
||||||
|
}
|
||||||
|
tablinks = document.getElementsByClassName("tab-button");
|
||||||
|
for (i = 0; i < tablinks.length; i++) {
|
||||||
|
tablinks[i].classList.remove("active");
|
||||||
|
}
|
||||||
|
document.getElementById(tabName).classList.add("active");
|
||||||
|
evt.currentTarget.classList.add("active");
|
||||||
|
}
|
||||||
|
</script>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
880
demoData/knowledge-base/referenzen.html
Normal file
880
demoData/knowledge-base/referenzen.html
Normal file
|
|
@ -0,0 +1,880 @@
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="de">
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||||
|
<title>PowerOn Kunden und Nutzereferenzen</title>
|
||||||
|
<style>
|
||||||
|
/* PowerOn.swiss Stylesheet */
|
||||||
|
/* Tailwind CSS Custom Properties & Design Tokens */
|
||||||
|
|
||||||
|
:root {
|
||||||
|
/* Locale */
|
||||||
|
-webkit-locale: "de";
|
||||||
|
|
||||||
|
/* Tailwind Transform Properties */
|
||||||
|
--tw-border-spacing-x: 0;
|
||||||
|
--tw-border-spacing-y: 0;
|
||||||
|
--tw-translate-x: 0;
|
||||||
|
--tw-translate-y: 0;
|
||||||
|
--tw-rotate: 0;
|
||||||
|
--tw-skew-x: 0;
|
||||||
|
--tw-skew-y: 0;
|
||||||
|
--tw-scale-x: 1;
|
||||||
|
--tw-scale-y: 1;
|
||||||
|
--tw-pan-x: ;
|
||||||
|
--tw-pan-y: ;
|
||||||
|
--tw-pinch-zoom: ;
|
||||||
|
--tw-scroll-snap-strictness: proximity;
|
||||||
|
|
||||||
|
/* Tailwind Gradient Properties */
|
||||||
|
--tw-gradient-from-position: ;
|
||||||
|
--tw-gradient-via-position: ;
|
||||||
|
--tw-gradient-to-position: ;
|
||||||
|
|
||||||
|
/* Tailwind Typography Properties */
|
||||||
|
--tw-ordinal: ;
|
||||||
|
--tw-slashed-zero: ;
|
||||||
|
--tw-numeric-figure: ;
|
||||||
|
--tw-numeric-spacing: ;
|
||||||
|
--tw-numeric-fraction: ;
|
||||||
|
|
||||||
|
/* Tailwind Ring/Shadow Properties */
|
||||||
|
--tw-ring-inset: ;
|
||||||
|
--tw-ring-offset-width: 0px;
|
||||||
|
--tw-ring-offset-color: #fff;
|
||||||
|
--tw-ring-color: rgb(59 130 246 / .5);
|
||||||
|
--tw-ring-offset-shadow: 0 0 #0000;
|
||||||
|
--tw-ring-shadow: 0 0 #0000;
|
||||||
|
--tw-shadow: 0 0 #0000;
|
||||||
|
--tw-shadow-colored: 0 0 #0000;
|
||||||
|
|
||||||
|
/* Tailwind Filter Properties */
|
||||||
|
--tw-blur: ;
|
||||||
|
--tw-brightness: ;
|
||||||
|
--tw-contrast: ;
|
||||||
|
--tw-grayscale: ;
|
||||||
|
--tw-hue-rotate: ;
|
||||||
|
--tw-invert: ;
|
||||||
|
--tw-saturate: ;
|
||||||
|
--tw-sepia: ;
|
||||||
|
--tw-drop-shadow: ;
|
||||||
|
|
||||||
|
/* Tailwind Backdrop Filter Properties */
|
||||||
|
--tw-backdrop-blur: ;
|
||||||
|
--tw-backdrop-brightness: ;
|
||||||
|
--tw-backdrop-contrast: ;
|
||||||
|
--tw-backdrop-grayscale: ;
|
||||||
|
--tw-backdrop-hue-rotate: ;
|
||||||
|
--tw-backdrop-invert: ;
|
||||||
|
--tw-backdrop-opacity: ;
|
||||||
|
--tw-backdrop-saturate: ;
|
||||||
|
--tw-backdrop-sepia: ;
|
||||||
|
|
||||||
|
/* Tailwind Container Properties */
|
||||||
|
--tw-contain-size: ;
|
||||||
|
--tw-contain-layout: ;
|
||||||
|
--tw-contain-paint: ;
|
||||||
|
--tw-contain-style: ;
|
||||||
|
|
||||||
|
/* Design System Colors (HSL Format) */
|
||||||
|
/* Base Colors */
|
||||||
|
--background: 0 0% 100%;
|
||||||
|
--foreground: 222.2 84% 4.9%;
|
||||||
|
|
||||||
|
/* Card Colors */
|
||||||
|
--card: 0 0% 100%;
|
||||||
|
--card-foreground: 222.2 84% 4.9%;
|
||||||
|
|
||||||
|
/* Popover Colors */
|
||||||
|
--popover: 0 0% 100%;
|
||||||
|
--popover-foreground: 222.2 84% 4.9%;
|
||||||
|
|
||||||
|
/* Primary Colors (Red Brand Color) */
|
||||||
|
--primary: 0 84% 42%;
|
||||||
|
--primary-foreground: 0 0% 100%;
|
||||||
|
|
||||||
|
/* Secondary Colors */
|
||||||
|
--secondary: 210 40% 96.1%;
|
||||||
|
--secondary-foreground: 222.2 47.4% 11.2%;
|
||||||
|
|
||||||
|
/* Muted Colors */
|
||||||
|
--muted: 210 40% 96.1%;
|
||||||
|
--muted-foreground: 215.4 16.3% 46.9%;
|
||||||
|
|
||||||
|
/* Accent Colors */
|
||||||
|
--accent: 210 40% 96.1%;
|
||||||
|
--accent-foreground: 222.2 47.4% 11.2%;
|
||||||
|
|
||||||
|
/* Destructive Colors */
|
||||||
|
--destructive: 0 84% 42%;
|
||||||
|
--destructive-foreground: 210 40% 98%;
|
||||||
|
|
||||||
|
/* Custom Red Colors */
|
||||||
|
--red-primary: 0 84% 42%;
|
||||||
|
--red-primary-hover: 0 53% 23%;
|
||||||
|
--red-primary-light: 0 84% 60%;
|
||||||
|
--red-background-light: 0 84% 97%;
|
||||||
|
|
||||||
|
/* Border & Input Colors */
|
||||||
|
--border: 214.3 31.8% 91.4%;
|
||||||
|
--input: 214.3 31.8% 91.4%;
|
||||||
|
--ring: 222.2 84% 4.9%;
|
||||||
|
|
||||||
|
/* Tool/Brand Specific Colors */
|
||||||
|
--tool-dark: 0 0% 9.4%;
|
||||||
|
--tool-dark-light: 0 0% 16.5%;
|
||||||
|
--tool-dark-medium: 0 0% 12.2%;
|
||||||
|
--tool-beige: 43 12% 73.7%;
|
||||||
|
--tool-beige-light: 43 20% 80%;
|
||||||
|
--tool-beige-dark: 43 8% 67%;
|
||||||
|
--tool-orange: 9 90% 60.6%;
|
||||||
|
--tool-orange-light: 9 85% 65%;
|
||||||
|
--tool-orange-dark: 9 94% 53%;
|
||||||
|
|
||||||
|
/* Border Radius */
|
||||||
|
--radius: 0.5rem;
|
||||||
|
|
||||||
|
/* Sidebar Colors */
|
||||||
|
--sidebar-background: 0 0% 98%;
|
||||||
|
--sidebar-foreground: 240 5.3% 26.1%;
|
||||||
|
--sidebar-primary: 240 5.9% 10%;
|
||||||
|
--sidebar-primary-foreground: 0 0% 98%;
|
||||||
|
--sidebar-accent: 240 4.8% 95.9%;
|
||||||
|
--sidebar-accent-foreground: 240 5.9% 10%;
|
||||||
|
--sidebar-border: 220 13% 91%;
|
||||||
|
--sidebar-ring: 217.2 91.2% 59.8%;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Base Reset */
|
||||||
|
*,
|
||||||
|
*::before,
|
||||||
|
*::after {
|
||||||
|
box-sizing: border-box;
|
||||||
|
border-width: 0;
|
||||||
|
border-style: solid;
|
||||||
|
border-color: hsl(var(--border));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Body Base Styles */
|
||||||
|
body {
|
||||||
|
line-height: 1.5;
|
||||||
|
-webkit-text-size-adjust: 100%;
|
||||||
|
tab-size: 4;
|
||||||
|
font-family: 'DM Sans', sans-serif;
|
||||||
|
font-feature-settings: normal;
|
||||||
|
font-variation-settings: normal;
|
||||||
|
-webkit-tap-highlight-color: transparent;
|
||||||
|
background-color: hsl(var(--background));
|
||||||
|
color: hsl(var(--foreground));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Utility Classes für die Farben */
|
||||||
|
.bg-background { background-color: hsl(var(--background)); }
|
||||||
|
.bg-primary { background-color: hsl(var(--primary)); }
|
||||||
|
.bg-secondary { background-color: hsl(var(--secondary)); }
|
||||||
|
.bg-muted { background-color: hsl(var(--muted)); }
|
||||||
|
.bg-card { background-color: hsl(var(--card)); }
|
||||||
|
|
||||||
|
.text-foreground { color: hsl(var(--foreground)); }
|
||||||
|
.text-primary { color: hsl(var(--primary)); }
|
||||||
|
.text-primary-foreground { color: hsl(var(--primary-foreground)); }
|
||||||
|
.text-muted-foreground { color: hsl(var(--muted-foreground)); }
|
||||||
|
|
||||||
|
.border-border { border-color: hsl(var(--border)); }
|
||||||
|
|
||||||
|
/* Custom Red Button */
|
||||||
|
.btn-red-primary {
|
||||||
|
background-color: hsl(var(--red-primary));
|
||||||
|
color: hsl(var(--primary-foreground));
|
||||||
|
border-radius: var(--radius);
|
||||||
|
}
|
||||||
|
|
||||||
|
.btn-red-primary:hover {
|
||||||
|
background-color: hsl(var(--red-primary-hover));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Tool Colors */
|
||||||
|
.bg-tool-dark { background-color: hsl(var(--tool-dark)); }
|
||||||
|
.bg-tool-beige { background-color: hsl(var(--tool-beige)); }
|
||||||
|
.bg-tool-orange { background-color: hsl(var(--tool-orange)); }
|
||||||
|
|
||||||
|
/* Custom Layout Styles */
|
||||||
|
.container {
|
||||||
|
max-width: 1200px;
|
||||||
|
margin: 0 auto;
|
||||||
|
padding: 0 20px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.header {
|
||||||
|
background: hsl(var(--red-primary));
|
||||||
|
color: hsl(var(--primary-foreground));
|
||||||
|
padding: 60px 0;
|
||||||
|
text-align: center;
|
||||||
|
}
|
||||||
|
|
||||||
|
.logo {
|
||||||
|
font-size: 2.5rem;
|
||||||
|
font-weight: 700;
|
||||||
|
margin-bottom: 20px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.logo a {
|
||||||
|
color: hsl(var(--primary-foreground));
|
||||||
|
text-decoration: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
.subtitle {
|
||||||
|
font-size: 1.2rem;
|
||||||
|
opacity: 0.9;
|
||||||
|
max-width: 600px;
|
||||||
|
margin: 0 auto;
|
||||||
|
}
|
||||||
|
|
||||||
|
.main-content {
|
||||||
|
padding: 60px 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
.section {
|
||||||
|
margin-bottom: 50px;
|
||||||
|
background: hsl(var(--card));
|
||||||
|
border-radius: var(--radius);
|
||||||
|
padding: 40px;
|
||||||
|
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.05);
|
||||||
|
border: 1px solid hsl(var(--border));
|
||||||
|
}
|
||||||
|
|
||||||
|
.section h2 {
|
||||||
|
color: hsl(var(--foreground));
|
||||||
|
font-size: 2rem;
|
||||||
|
margin-bottom: 30px;
|
||||||
|
border-bottom: 3px solid hsl(var(--red-primary));
|
||||||
|
padding-bottom: 10px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.section h3 {
|
||||||
|
color: hsl(var(--foreground));
|
||||||
|
font-size: 1.4rem;
|
||||||
|
margin: 30px 0 15px 0;
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
}
|
||||||
|
|
||||||
|
.section h3::before {
|
||||||
|
content: "▶";
|
||||||
|
color: hsl(var(--red-primary));
|
||||||
|
margin-right: 10px;
|
||||||
|
font-size: 0.8rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
.feature-grid {
|
||||||
|
display: grid;
|
||||||
|
grid-template-columns: repeat(auto-fit, minmax(300px, 1fr));
|
||||||
|
gap: 30px;
|
||||||
|
margin-top: 30px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.feature-card {
|
||||||
|
background: hsl(var(--muted));
|
||||||
|
border: 1px solid hsl(var(--border));
|
||||||
|
border-radius: var(--radius);
|
||||||
|
padding: 25px;
|
||||||
|
transition: transform 0.2s ease, box-shadow 0.2s ease;
|
||||||
|
}
|
||||||
|
|
||||||
|
.feature-card:hover {
|
||||||
|
transform: translateY(-2px);
|
||||||
|
box-shadow: 0 8px 25px rgba(0, 0, 0, 0.1);
|
||||||
|
}
|
||||||
|
|
||||||
|
.feature-card h3 {
|
||||||
|
color: hsl(var(--foreground));
|
||||||
|
margin-bottom: 15px;
|
||||||
|
font-size: 1.2rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
.feature-card ul {
|
||||||
|
list-style: none;
|
||||||
|
padding: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
.feature-card li {
|
||||||
|
padding: 8px 0;
|
||||||
|
position: relative;
|
||||||
|
padding-left: 20px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.feature-card li::before {
|
||||||
|
content: "✓";
|
||||||
|
color: hsl(var(--red-primary));
|
||||||
|
font-weight: bold;
|
||||||
|
position: absolute;
|
||||||
|
left: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
.use-case-card {
|
||||||
|
background: hsl(var(--card));
|
||||||
|
border: 2px solid hsl(var(--red-primary));
|
||||||
|
border-radius: var(--radius);
|
||||||
|
padding: 30px;
|
||||||
|
margin-bottom: 30px;
|
||||||
|
transition: transform 0.2s ease, box-shadow 0.2s ease;
|
||||||
|
}
|
||||||
|
|
||||||
|
.use-case-card:hover {
|
||||||
|
transform: translateY(-2px);
|
||||||
|
box-shadow: 0 8px 25px rgba(0, 0, 0, 0.1);
|
||||||
|
}
|
||||||
|
|
||||||
|
.use-case-title {
|
||||||
|
color: hsl(var(--red-primary));
|
||||||
|
font-size: 1.3rem;
|
||||||
|
font-weight: 700;
|
||||||
|
margin-bottom: 20px;
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
}
|
||||||
|
|
||||||
|
.use-case-title::before {
|
||||||
|
content: attr(data-icon);
|
||||||
|
font-size: 1.5rem;
|
||||||
|
margin-right: 10px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.use-case-content {
|
||||||
|
margin-bottom: 20px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.use-case-content h4 {
|
||||||
|
color: hsl(var(--foreground));
|
||||||
|
font-size: 1.1rem;
|
||||||
|
font-weight: 600;
|
||||||
|
margin: 15px 0 8px 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
.use-case-content p {
|
||||||
|
color: hsl(var(--muted-foreground));
|
||||||
|
margin-bottom: 10px;
|
||||||
|
line-height: 1.6;
|
||||||
|
}
|
||||||
|
|
||||||
|
.process-flow {
|
||||||
|
background: hsl(var(--red-background-light));
|
||||||
|
border: 1px solid hsl(var(--red-primary-light));
|
||||||
|
border-radius: var(--radius);
|
||||||
|
padding: 20px;
|
||||||
|
margin: 20px 0;
|
||||||
|
overflow-x: auto;
|
||||||
|
}
|
||||||
|
|
||||||
|
.process-flow-title {
|
||||||
|
color: hsl(var(--red-primary));
|
||||||
|
font-weight: 600;
|
||||||
|
margin-bottom: 15px;
|
||||||
|
font-size: 1rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
.process-steps {
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
gap: 10px;
|
||||||
|
flex-wrap: wrap;
|
||||||
|
}
|
||||||
|
|
||||||
|
.process-step {
|
||||||
|
background: hsl(var(--red-primary));
|
||||||
|
color: hsl(var(--primary-foreground));
|
||||||
|
padding: 8px 16px;
|
||||||
|
border-radius: 20px;
|
||||||
|
font-size: 0.9rem;
|
||||||
|
font-weight: 500;
|
||||||
|
white-space: nowrap;
|
||||||
|
transition: transform 0.2s ease;
|
||||||
|
}
|
||||||
|
|
||||||
|
.process-step:hover {
|
||||||
|
transform: scale(1.05);
|
||||||
|
}
|
||||||
|
|
||||||
|
.process-arrow {
|
||||||
|
color: hsl(var(--red-primary));
|
||||||
|
font-size: 1.2rem;
|
||||||
|
font-weight: bold;
|
||||||
|
}
|
||||||
|
|
||||||
|
.results {
|
||||||
|
background: hsl(var(--muted));
|
||||||
|
color: hsl(var(--foreground));
|
||||||
|
}
|
||||||
|
|
||||||
|
.results h2 {
|
||||||
|
color: hsl(var(--foreground));
|
||||||
|
border-bottom-color: hsl(var(--red-primary));
|
||||||
|
}
|
||||||
|
|
||||||
|
.results h3 {
|
||||||
|
color: hsl(var(--foreground));
|
||||||
|
}
|
||||||
|
|
||||||
|
.results .feature-card {
|
||||||
|
background: hsl(var(--card));
|
||||||
|
border-color: hsl(var(--border));
|
||||||
|
color: hsl(var(--foreground));
|
||||||
|
}
|
||||||
|
|
||||||
|
.results .feature-card h3 {
|
||||||
|
color: hsl(var(--foreground));
|
||||||
|
}
|
||||||
|
|
||||||
|
.results .feature-card li::before {
|
||||||
|
color: hsl(var(--red-primary));
|
||||||
|
}
|
||||||
|
|
||||||
|
.approach {
|
||||||
|
background: hsl(var(--red-background-light));
|
||||||
|
color: hsl(var(--foreground));
|
||||||
|
}
|
||||||
|
|
||||||
|
.approach h2 {
|
||||||
|
color: hsl(var(--foreground));
|
||||||
|
border-bottom-color: hsl(var(--red-primary));
|
||||||
|
}
|
||||||
|
|
||||||
|
.approach h3 {
|
||||||
|
color: hsl(var(--foreground));
|
||||||
|
}
|
||||||
|
|
||||||
|
.approach .feature-card {
|
||||||
|
background: hsl(var(--card));
|
||||||
|
border-color: hsl(var(--red-primary));
|
||||||
|
color: hsl(var(--foreground));
|
||||||
|
}
|
||||||
|
|
||||||
|
.approach .feature-card h3 {
|
||||||
|
color: hsl(var(--foreground));
|
||||||
|
}
|
||||||
|
|
||||||
|
.approach .feature-card li::before {
|
||||||
|
color: hsl(var(--red-primary));
|
||||||
|
}
|
||||||
|
|
||||||
|
.cta {
|
||||||
|
background: hsl(var(--muted));
|
||||||
|
color: hsl(var(--foreground));
|
||||||
|
}
|
||||||
|
|
||||||
|
.cta h2 {
|
||||||
|
color: hsl(var(--foreground));
|
||||||
|
border-bottom-color: hsl(var(--red-primary));
|
||||||
|
}
|
||||||
|
|
||||||
|
.cta h3 {
|
||||||
|
color: hsl(var(--foreground));
|
||||||
|
}
|
||||||
|
|
||||||
|
.cta .feature-card {
|
||||||
|
background: hsl(var(--card));
|
||||||
|
border-color: hsl(var(--border));
|
||||||
|
color: hsl(var(--foreground));
|
||||||
|
}
|
||||||
|
|
||||||
|
.cta .feature-card h3 {
|
||||||
|
color: hsl(var(--foreground));
|
||||||
|
}
|
||||||
|
|
||||||
|
.cta .feature-card li::before {
|
||||||
|
color: hsl(var(--red-primary));
|
||||||
|
}
|
||||||
|
|
||||||
|
.note {
|
||||||
|
background: hsl(var(--red-background-light));
|
||||||
|
border: 1px solid hsl(var(--red-primary-light));
|
||||||
|
border-radius: var(--radius);
|
||||||
|
padding: 20px;
|
||||||
|
margin-top: 30px;
|
||||||
|
color: hsl(var(--red-primary-hover));
|
||||||
|
}
|
||||||
|
|
||||||
|
.note::before {
|
||||||
|
content: "ℹ️ ";
|
||||||
|
font-weight: bold;
|
||||||
|
}
|
||||||
|
|
||||||
|
.footer {
|
||||||
|
background: hsl(var(--tool-dark));
|
||||||
|
color: hsl(var(--primary-foreground));
|
||||||
|
text-align: center;
|
||||||
|
padding: 40px 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
.footer a {
|
||||||
|
color: hsl(var(--red-primary-light));
|
||||||
|
text-decoration: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
.footer a:hover {
|
||||||
|
text-decoration: underline;
|
||||||
|
}
|
||||||
|
|
||||||
|
@media (max-width: 768px) {
|
||||||
|
.header {
|
||||||
|
padding: 40px 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
.logo {
|
||||||
|
font-size: 2rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
.section {
|
||||||
|
padding: 25px;
|
||||||
|
margin-bottom: 30px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.feature-grid {
|
||||||
|
grid-template-columns: 1fr;
|
||||||
|
gap: 20px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.process-steps {
|
||||||
|
flex-direction: column;
|
||||||
|
align-items: stretch;
|
||||||
|
}
|
||||||
|
|
||||||
|
.process-arrow {
|
||||||
|
transform: rotate(90deg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<div class="header">
|
||||||
|
<div class="container">
|
||||||
|
<div class="logo">
|
||||||
|
<a href="https://poweron.swiss/">PowerOn</a>
|
||||||
|
</div>
|
||||||
|
<div class="subtitle">
|
||||||
|
Kunden und Nutzereferenzen (neutralisiert)
|
||||||
|
</div>
|
||||||
|
<div style="font-size: 0.9rem; margin-top: 10px; opacity: 0.8;">
|
||||||
|
Kurzüberblick über realisierte PowerOn-Leistungen ohne Kundennennungen
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="main-content">
|
||||||
|
<div class="container">
|
||||||
|
<div class="section">
|
||||||
|
<h2>Leistungsbausteine</h2>
|
||||||
|
<div class="feature-grid">
|
||||||
|
<div class="feature-card">
|
||||||
|
<h3>Impact Sessions</h3>
|
||||||
|
<ul>
|
||||||
|
<li>Orientierung für Entscheiderinnen und Entscheider</li>
|
||||||
|
<li>Klärung Nutzen, Risiken, nächste Schritte</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="feature-card">
|
||||||
|
<h3>Deep Dives & Academy-Module</h3>
|
||||||
|
<ul>
|
||||||
|
<li>Hands-on Training mit echten Business Cases</li>
|
||||||
|
<li>Transfer in konkrete Arbeitsabläufe</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="feature-card">
|
||||||
|
<h3>Workshops / Prototyping</h3>
|
||||||
|
<ul>
|
||||||
|
<li>Definition von Use Cases und KPI</li>
|
||||||
|
<li>Rapid Prototyping bis funktionsfähiges MVP</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="feature-card">
|
||||||
|
<h3>Transformation Labs</h3>
|
||||||
|
<ul>
|
||||||
|
<li>Begleitung bis Umsetzung und Go-Live</li>
|
||||||
|
<li>Skalierung und Betrieb</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h2>Referenz-Use-Cases (ohne Kundendaten)</h2>
|
||||||
|
|
||||||
|
<div class="use-case-card">
|
||||||
|
<div class="use-case-title" data-icon="🔄">Prozessautomatisierung und KPI-Produkt</div>
|
||||||
|
<div class="use-case-content">
|
||||||
|
<h4>Kontext</h4>
|
||||||
|
<p>Hoher manueller Aufwand und intransparente Kosten in Spesen und Controlling bremsen das Tagesgeschäft</p>
|
||||||
|
|
||||||
|
<h4>Ziel</h4>
|
||||||
|
<p>Operative Kosten senken und Steuerungsfähigkeit erhöhen durch standardisierte, schnellere Freigaben</p>
|
||||||
|
|
||||||
|
<h4>Lösung</h4>
|
||||||
|
<p>End‑to‑End Workflow in PowerOn mit automatischer Belegerfassung, Prüfung und KPI‑Auswertung</p>
|
||||||
|
|
||||||
|
<h4>Ergebnis</h4>
|
||||||
|
<p>Kürzere Durchlaufzeiten und jederzeit transparente Kennzahlen</p>
|
||||||
|
</div>
|
||||||
|
<div class="process-flow">
|
||||||
|
<div class="process-flow-title">Prozessablauf:</div>
|
||||||
|
<div class="process-steps">
|
||||||
|
<div class="process-step">Beleg</div>
|
||||||
|
<div class="process-arrow">→</div>
|
||||||
|
<div class="process-step">Erfassen</div>
|
||||||
|
<div class="process-arrow">→</div>
|
||||||
|
<div class="process-step">Validieren</div>
|
||||||
|
<div class="process-arrow">→</div>
|
||||||
|
<div class="process-step">Genehmigen</div>
|
||||||
|
<div class="process-arrow">→</div>
|
||||||
|
<div class="process-step">Buchen</div>
|
||||||
|
<div class="process-arrow">→</div>
|
||||||
|
<div class="process-step">KPI‑Dashboard</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="use-case-card">
|
||||||
|
<div class="use-case-title" data-icon="🧱">Enterprise-Features skalieren für bestehende Lösung</div>
|
||||||
|
<div class="use-case-content">
|
||||||
|
<h4>Kontext</h4>
|
||||||
|
<p>Wachsende Nutzerzahlen und steigende Anforderungen gefährden die wahrgenommene Servicequalität</p>
|
||||||
|
|
||||||
|
<h4>Ziel</h4>
|
||||||
|
<p>Verlässliche Skalierbarkeit sicherstellen und Kundenzufriedenheit schützen</p>
|
||||||
|
|
||||||
|
<h4>Lösung</h4>
|
||||||
|
<p>Rollen- und Berechtigungskonzept erweitern, Performance optimieren und Betriebsprozesse festigen</p>
|
||||||
|
|
||||||
|
<h4>Ergebnis</h4>
|
||||||
|
<p>Hohe Stabilität, schnellere Antwortzeiten und sicherer Betrieb</p>
|
||||||
|
</div>
|
||||||
|
<div class="process-flow">
|
||||||
|
<div class="process-flow-title">Prozessablauf:</div>
|
||||||
|
<div class="process-steps">
|
||||||
|
<div class="process-step">Users</div>
|
||||||
|
<div class="process-arrow">→</div>
|
||||||
|
<div class="process-step">Auth/Rollen</div>
|
||||||
|
<div class="process-arrow">→</div>
|
||||||
|
<div class="process-step">Services</div>
|
||||||
|
<div class="process-arrow">→</div>
|
||||||
|
<div class="process-step">Queue/Jobs</div>
|
||||||
|
<div class="process-arrow">→</div>
|
||||||
|
<div class="process-step">Monitoring</div>
|
||||||
|
<div class="process-arrow">→</div>
|
||||||
|
<div class="process-step">SLO/SLA</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="use-case-card">
|
||||||
|
<div class="use-case-title" data-icon="🧭">Management-Alignment und Entscheidvorbereitung</div>
|
||||||
|
<div class="use-case-content">
|
||||||
|
<h4>Kontext</h4>
|
||||||
|
<p>Strategische Weichenstellung für KI erfordert breite Abstützung und klare Investitionssicht</p>
|
||||||
|
|
||||||
|
<h4>Ziel</h4>
|
||||||
|
<p>Entscheidungssicherheit auf GL‑Ebene schaffen und Investitionen fokussieren</p>
|
||||||
|
|
||||||
|
<h4>Lösung</h4>
|
||||||
|
<p>Kompakte Impact‑Session mit Variantenvergleich und klarer Roadmap</p>
|
||||||
|
|
||||||
|
<h4>Ergebnis</h4>
|
||||||
|
<p>Verbindliche Entscheide zu Scope, Budget und Zeitplan</p>
|
||||||
|
</div>
|
||||||
|
<div class="process-flow">
|
||||||
|
<div class="process-flow-title">Prozessablauf:</div>
|
||||||
|
<div class="process-steps">
|
||||||
|
<div class="process-step">Ausgangslage</div>
|
||||||
|
<div class="process-arrow">→</div>
|
||||||
|
<div class="process-step">Optionen</div>
|
||||||
|
<div class="process-arrow">→</div>
|
||||||
|
<div class="process-step">Kosten/Nutzen</div>
|
||||||
|
<div class="process-arrow">→</div>
|
||||||
|
<div class="process-step">Roadmap</div>
|
||||||
|
<div class="process-arrow">→</div>
|
||||||
|
<div class="process-step">Entscheid (GL)</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="use-case-card">
|
||||||
|
<div class="use-case-title" data-icon="🧩">Tech‑Workshops zu Multi‑Agent‑Architektur</div>
|
||||||
|
<div class="use-case-content">
|
||||||
|
<h4>Kontext</h4>
|
||||||
|
<p>Unterschiedliche Vorgehensweisen und Standards verlangsamen Delivery und erschweren Skalierung</p>
|
||||||
|
|
||||||
|
<h4>Ziel</h4>
|
||||||
|
<p>Gemeinsame Spielregeln schaffen, um Time‑to‑Value zu verkürzen und konsistente Qualität sicherzustellen</p>
|
||||||
|
|
||||||
|
<h4>Lösung</h4>
|
||||||
|
<p>Klare Architekturprinzipien, verbindliche Standards und kollaborative Working Agreements</p>
|
||||||
|
|
||||||
|
<h4>Ergebnis</h4>
|
||||||
|
<p>Einheitliche Regeln, eindeutige Verantwortlichkeiten und eine belastbare Sprint‑Roadmap</p>
|
||||||
|
</div>
|
||||||
|
<div class="process-flow">
|
||||||
|
<div class="process-flow-title">Prozessablauf:</div>
|
||||||
|
<div class="process-steps">
|
||||||
|
<div class="process-step">Pain Points</div>
|
||||||
|
<div class="process-arrow">→</div>
|
||||||
|
<div class="process-step">Prinzipien</div>
|
||||||
|
<div class="process-arrow">→</div>
|
||||||
|
<div class="process-step">Standards</div>
|
||||||
|
<div class="process-arrow">→</div>
|
||||||
|
<div class="process-step">Working Agreements</div>
|
||||||
|
<div class="process-arrow">→</div>
|
||||||
|
<div class="process-step">Sprint‑Roadmap</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="use-case-card">
|
||||||
|
<div class="use-case-title" data-icon="📊">Data & Analytics Demo / Reporting</div>
|
||||||
|
<div class="use-case-content">
|
||||||
|
<h4>Kontext</h4>
|
||||||
|
<p>Entscheidungen werden mit Bauchgefühl statt mit einheitlichen Zahlen getroffen</p>
|
||||||
|
|
||||||
|
<h4>Ziel</h4>
|
||||||
|
<p>Entscheidungen im Fachbereich konsequent datenbasiert treffen</p>
|
||||||
|
|
||||||
|
<h4>Lösung</h4>
|
||||||
|
<p>Schlanke Datenaufbereitung mit PowerOn‑Pipelines und Visualisierung im BI‑Tool</p>
|
||||||
|
|
||||||
|
<h4>Ergebnis</h4>
|
||||||
|
<p>Entscheidungsreife KPIs auf einen Blick</p>
|
||||||
|
</div>
|
||||||
|
<div class="process-flow">
|
||||||
|
<div class="process-flow-title">Prozessablauf:</div>
|
||||||
|
<div class="process-steps">
|
||||||
|
<div class="process-step">Datenquellen</div>
|
||||||
|
<div class="process-arrow">→</div>
|
||||||
|
<div class="process-step">Bereinigen/Joinen</div>
|
||||||
|
<div class="process-arrow">→</div>
|
||||||
|
<div class="process-step">KPIs berechnen</div>
|
||||||
|
<div class="process-arrow">→</div>
|
||||||
|
<div class="process-step">Dashboard (BI)</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="use-case-card">
|
||||||
|
<div class="use-case-title" data-icon="🛠️">Code‑Modernisierung und Analyse</div>
|
||||||
|
<div class="use-case-content">
|
||||||
|
<h4>Kontext</h4>
|
||||||
|
<p>Veraltete Codebasis bremst Releases, erhöht Betriebsrisiken und erschwert neue Features</p>
|
||||||
|
|
||||||
|
<h4>Ziel</h4>
|
||||||
|
<p>Risiken in Legacy‑Code reduzieren und Zukunftsfähigkeit herstellen</p>
|
||||||
|
|
||||||
|
<h4>Lösung</h4>
|
||||||
|
<p>Systematische Code‑Analyse mit klaren Migrationspfaden und schnellen Verbesserungen</p>
|
||||||
|
|
||||||
|
<h4>Ergebnis</h4>
|
||||||
|
<p>Priorisierte Massnahmen mit messbarem Risikoabbau</p>
|
||||||
|
</div>
|
||||||
|
<div class="process-flow">
|
||||||
|
<div class="process-flow-title">Prozessablauf:</div>
|
||||||
|
<div class="process-steps">
|
||||||
|
<div class="process-step">Systeme</div>
|
||||||
|
<div class="process-arrow">→</div>
|
||||||
|
<div class="process-step">Code‑Analyse</div>
|
||||||
|
<div class="process-arrow">→</div>
|
||||||
|
<div class="process-step">Risiken bewerten</div>
|
||||||
|
<div class="process-arrow">→</div>
|
||||||
|
<div class="process-step">Migrationspfade</div>
|
||||||
|
<div class="process-arrow">→</div>
|
||||||
|
<div class="process-step">Quick Wins</div>
|
||||||
|
<div class="process-arrow">→</div>
|
||||||
|
<div class="process-step">Stabiler Release</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="section results">
|
||||||
|
<h2>Typische Resultate</h2>
|
||||||
|
<div class="feature-grid">
|
||||||
|
<div class="feature-card">
|
||||||
|
<h3>Effizienzsteigerung</h3>
|
||||||
|
<ul>
|
||||||
|
<li>30–70% Zeiteinsparung in Zielprozessen (je nach Ausgangslage)</li>
|
||||||
|
<li>Schnellere Entscheide dank standardisierten Artefakten und Dashboards</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="feature-card">
|
||||||
|
<h3>Risikoreduktion</h3>
|
||||||
|
<ul>
|
||||||
|
<li>Reduzierte Betriebsrisiken durch klare Architektur- und Qualitätsstandards</li>
|
||||||
|
<li>Höhere Akzeptanz durch Einbindung von Stakeholdern früh im Prozess</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="section approach">
|
||||||
|
<h2>Vorgehen (Kurz)</h2>
|
||||||
|
<div class="feature-grid">
|
||||||
|
<div class="feature-card">
|
||||||
|
<h3>1. Discovery</h3>
|
||||||
|
<ul>
|
||||||
|
<li>Ziele, Ist‑Prozess, Datenlage</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="feature-card">
|
||||||
|
<h3>2. Prototyp</h3>
|
||||||
|
<ul>
|
||||||
|
<li>Schlanker End‑to‑End‑Flow mit messbarem Nutzen</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="feature-card">
|
||||||
|
<h3>3. Skalierung</h3>
|
||||||
|
<ul>
|
||||||
|
<li>Security, Performance, Betrieb</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="feature-card">
|
||||||
|
<h3>4. Transition</h3>
|
||||||
|
<ul>
|
||||||
|
<li>Übergabe oder Betrieb durch PowerOn‑Team</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="section cta">
|
||||||
|
<h2>Gemeinsamer Start</h2>
|
||||||
|
<div class="feature-grid">
|
||||||
|
<div class="feature-card">
|
||||||
|
<h3>Vorbereitung</h3>
|
||||||
|
<ul>
|
||||||
|
<li>Use‑Case shortlist definieren</li>
|
||||||
|
<li>2‑h Impact‑Session terminieren</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="feature-card">
|
||||||
|
<h3>Umsetzung</h3>
|
||||||
|
<ul>
|
||||||
|
<li>MVP‑Scope und Erfolgskriterien festlegen</li>
|
||||||
|
<li>Sprint‑Planung starten</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="footer">
|
||||||
|
<div class="container">
|
||||||
|
<p>© 2025 <a href="https://poweron.swiss/">PowerOn</a> – Intelligente Workflow-Plattform</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
57
demoData/neutralizer/_generateTenantDossierPdf.py
Normal file
57
demoData/neutralizer/_generateTenantDossierPdf.py
Normal file
|
|
@ -0,0 +1,57 @@
|
||||||
|
"""Generate tenant-dossier.pdf for neutralization demo. Run: python _generateTenantDossierPdf.py
|
||||||
|
|
||||||
|
Uses ReportLab so the PDF opens reliably in all viewers (stdlib-only PDFs are fragile).
|
||||||
|
"""
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from reportlab.lib.pagesizes import A4
|
||||||
|
from reportlab.pdfgen import canvas
|
||||||
|
|
||||||
|
|
||||||
|
def _main():
|
||||||
|
outPath = Path(__file__).resolve().parent / "tenant-dossier.pdf"
|
||||||
|
c = canvas.Canvas(str(outPath), pagesize=A4)
|
||||||
|
_, h = A4
|
||||||
|
margin = 72
|
||||||
|
y = h - margin
|
||||||
|
c.setFont("Helvetica-Bold", 13)
|
||||||
|
c.drawString(margin, y, "Tenant dossier (demo) - confidential")
|
||||||
|
y -= 22
|
||||||
|
c.setFont("Helvetica", 11)
|
||||||
|
lines = [
|
||||||
|
"Fictional demo data for neutralization testing.",
|
||||||
|
"",
|
||||||
|
"Tenant name: Hans Muster",
|
||||||
|
"Date of birth: 14.03.1982",
|
||||||
|
"Nationality: Swiss",
|
||||||
|
"",
|
||||||
|
"Residential address:",
|
||||||
|
"Bahnhofstrasse 1",
|
||||||
|
"8001 Zurich",
|
||||||
|
"Switzerland",
|
||||||
|
"",
|
||||||
|
"Email: hans.muster@example-mail.demo",
|
||||||
|
"Phone: +41 79 123 45 67",
|
||||||
|
"",
|
||||||
|
"Lease reference: LE-2024-88421",
|
||||||
|
"Monthly rent: CHF 2450.00",
|
||||||
|
"Deposit held: CHF 7350.00",
|
||||||
|
"",
|
||||||
|
"Employer: Demo Consulting AG, Limmatquai 78, 8001 Zurich",
|
||||||
|
"",
|
||||||
|
"Notes: Tenant requested balcony repair (ticket REQ-992).",
|
||||||
|
]
|
||||||
|
lineHeight = 14
|
||||||
|
for line in lines:
|
||||||
|
if y < margin and line:
|
||||||
|
c.showPage()
|
||||||
|
c.setFont("Helvetica", 11)
|
||||||
|
y = h - margin
|
||||||
|
c.drawString(margin, y, line)
|
||||||
|
y -= lineHeight
|
||||||
|
c.save()
|
||||||
|
print(f"Wrote {outPath}")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
_main()
|
||||||
74
demoData/neutralizer/tenant-dossier.pdf
Normal file
74
demoData/neutralizer/tenant-dossier.pdf
Normal file
|
|
@ -0,0 +1,74 @@
|
||||||
|
%PDF-1.3
|
||||||
|
%“Œ‹ž ReportLab Generated PDF document (opensource)
|
||||||
|
1 0 obj
|
||||||
|
<<
|
||||||
|
/F1 2 0 R /F2 3 0 R
|
||||||
|
>>
|
||||||
|
endobj
|
||||||
|
2 0 obj
|
||||||
|
<<
|
||||||
|
/BaseFont /Helvetica /Encoding /WinAnsiEncoding /Name /F1 /Subtype /Type1 /Type /Font
|
||||||
|
>>
|
||||||
|
endobj
|
||||||
|
3 0 obj
|
||||||
|
<<
|
||||||
|
/BaseFont /Helvetica-Bold /Encoding /WinAnsiEncoding /Name /F2 /Subtype /Type1 /Type /Font
|
||||||
|
>>
|
||||||
|
endobj
|
||||||
|
4 0 obj
|
||||||
|
<<
|
||||||
|
/Contents 8 0 R /MediaBox [ 0 0 595.2756 841.8898 ] /Parent 7 0 R /Resources <<
|
||||||
|
/Font 1 0 R /ProcSet [ /PDF /Text /ImageB /ImageC /ImageI ]
|
||||||
|
>> /Rotate 0 /Trans <<
|
||||||
|
|
||||||
|
>>
|
||||||
|
/Type /Page
|
||||||
|
>>
|
||||||
|
endobj
|
||||||
|
5 0 obj
|
||||||
|
<<
|
||||||
|
/PageMode /UseNone /Pages 7 0 R /Type /Catalog
|
||||||
|
>>
|
||||||
|
endobj
|
||||||
|
6 0 obj
|
||||||
|
<<
|
||||||
|
/Author (anonymous) /CreationDate (D:20260413002929+02'00') /Creator (anonymous) /Keywords () /ModDate (D:20260413002929+02'00') /Producer (ReportLab PDF Library - \(opensource\))
|
||||||
|
/Subject (unspecified) /Title (untitled) /Trapped /False
|
||||||
|
>>
|
||||||
|
endobj
|
||||||
|
7 0 obj
|
||||||
|
<<
|
||||||
|
/Count 1 /Kids [ 4 0 R ] /Type /Pages
|
||||||
|
>>
|
||||||
|
endobj
|
||||||
|
8 0 obj
|
||||||
|
<<
|
||||||
|
/Filter [ /ASCII85Decode /FlateDecode ] /Length 654
|
||||||
|
>>
|
||||||
|
stream
|
||||||
|
Gasam9lnc;&A@g>lnO(2=RrpscmGHAZie8p-5Y3=@t?5.P!"j*HK;Fi@]13b1HoLNhXc)p>lp^JaPgD8!#HB_>8&+nWYS,F`)(;Y<Lk/U.?Nb4Scn<JS30YZ'XG(Oo"<&;)IQU>@)>/R[H=Dq4)8esgGpgXQD3IM$H$"2L[$s#Dk8hf2E>G=!I\)qcAifY?5kL#lX:umL)C2t<$6-:MY6mu9k?#W%2[oR^VsI+.!d4gq#g2k1Vj8HiJIpNf:t7&r:FE<6naroO=f7-A\)mh3K+#;jO=Q5$Z^pXYXcahlq@-EPABR+A_HCPde%4"G)Q2m;h-`b6ENmFFmS1/_)fuc<nk^'7Nd.ZjQ)DX+b?hlicXDh:rg+(CE?=F9Jh2`Gf"K!30mVJj*_6)D.,+<>50.gZ!l8E@]BR[V=I5)R1mE7:'u=chT!!'f^Xe@:2KoYE13<lcbsh;6"Y1<fV1]0>Fj#R5slPDniWfK\<FuOQ"qgBfC(;L0I9t1Xb"J`(keS):7\>L\<E@#kcetHiE:7(*Ytq`N/PVk`NGPS<$a)n8\UEUO8UoBnDWCfD"o\<F$DDi=agk\F*6K4S<-O;FDdo1&LBP6[_hphXpf.)NqIR"9r[LsT9bl'oa`lu]DB/g$G)e?3sEoY""m)B"T~>endstream
|
||||||
|
endobj
|
||||||
|
xref
|
||||||
|
0 9
|
||||||
|
0000000000 65535 f
|
||||||
|
0000000061 00000 n
|
||||||
|
0000000102 00000 n
|
||||||
|
0000000209 00000 n
|
||||||
|
0000000321 00000 n
|
||||||
|
0000000524 00000 n
|
||||||
|
0000000592 00000 n
|
||||||
|
0000000853 00000 n
|
||||||
|
0000000912 00000 n
|
||||||
|
trailer
|
||||||
|
<<
|
||||||
|
/ID
|
||||||
|
[<fffce794bf59aca4604ad63204977686><fffce794bf59aca4604ad63204977686>]
|
||||||
|
% ReportLab generated PDF document -- digest (opensource)
|
||||||
|
|
||||||
|
/Info 6 0 R
|
||||||
|
/Root 5 0 R
|
||||||
|
/Size 9
|
||||||
|
>>
|
||||||
|
startxref
|
||||||
|
1656
|
||||||
|
%%EOF
|
||||||
125
demoData/pwg/_generateScans.py
Normal file
125
demoData/pwg/_generateScans.py
Normal file
|
|
@ -0,0 +1,125 @@
|
||||||
|
"""Generate the 3 fictitious PWG scan PDFs used by the pilot demo.
|
||||||
|
|
||||||
|
Run: python _generateScans.py
|
||||||
|
|
||||||
|
Produces:
|
||||||
|
scans/mieter01-bestaetigt.pdf -> all fields ok, signed
|
||||||
|
scans/mieter02-abweichung-betrag.pdf -> rent on scan != journal lines
|
||||||
|
scans/mieter03-keine-unterschrift.pdf -> hasSignature=false
|
||||||
|
"""
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from reportlab.lib.pagesizes import A4
|
||||||
|
from reportlab.pdfgen import canvas
|
||||||
|
|
||||||
|
|
||||||
|
def _renderForm(outPath: Path, *, tenantName: str, tenantAddress: str,
|
||||||
|
objectAddress: str, period: str, rentChf: float,
|
||||||
|
tenantNotes: str, hasSignature: bool) -> None:
|
||||||
|
c = canvas.Canvas(str(outPath), pagesize=A4)
|
||||||
|
w, h = A4
|
||||||
|
margin = 60
|
||||||
|
y = h - margin
|
||||||
|
|
||||||
|
c.setFont("Helvetica-Bold", 16)
|
||||||
|
c.drawString(margin, y, "Stiftung PWG")
|
||||||
|
y -= 18
|
||||||
|
c.setFont("Helvetica", 10)
|
||||||
|
c.drawString(margin, y, "Postfach 1234 · 8000 Zürich")
|
||||||
|
y -= 30
|
||||||
|
|
||||||
|
c.setFont("Helvetica-Bold", 14)
|
||||||
|
c.drawString(margin, y, f"Jahresmietzinsbestätigung {period}")
|
||||||
|
y -= 28
|
||||||
|
|
||||||
|
c.setFont("Helvetica", 11)
|
||||||
|
c.drawString(margin, y, "Sehr geehrte Damen und Herren,")
|
||||||
|
y -= 18
|
||||||
|
c.drawString(margin, y, "hiermit bestätige ich die nachstehenden Angaben für die o.g. Periode:")
|
||||||
|
y -= 28
|
||||||
|
|
||||||
|
rows = [
|
||||||
|
("Mieter / in:", tenantName),
|
||||||
|
("Wohnadresse:", tenantAddress),
|
||||||
|
("Mietobjekt:", objectAddress),
|
||||||
|
("Periode:", period),
|
||||||
|
("Bestätigter Mietzins (CHF, monatlich):", f"{rentChf:.2f}"),
|
||||||
|
("Anmerkungen:", tenantNotes or "(keine)"),
|
||||||
|
]
|
||||||
|
c.setFont("Helvetica", 11)
|
||||||
|
for lab, val in rows:
|
||||||
|
c.drawString(margin, y, lab)
|
||||||
|
c.drawString(margin + 220, y, str(val))
|
||||||
|
y -= 18
|
||||||
|
y -= 28
|
||||||
|
|
||||||
|
c.drawString(margin, y, "Ort, Datum: Zürich, 12.04.2026")
|
||||||
|
y -= 28
|
||||||
|
c.drawString(margin, y, "Unterschrift Mieter / in:")
|
||||||
|
y -= 36
|
||||||
|
|
||||||
|
if hasSignature:
|
||||||
|
c.setFont("Helvetica-Oblique", 14)
|
||||||
|
c.drawString(margin + 220, y + 24, _signatureFor(tenantName))
|
||||||
|
else:
|
||||||
|
c.setFont("Helvetica", 9)
|
||||||
|
c.drawString(margin + 220, y + 24, "(handschriftlich)")
|
||||||
|
c.line(margin + 215, y + 22, margin + 415, y + 22)
|
||||||
|
|
||||||
|
c.showPage()
|
||||||
|
c.save()
|
||||||
|
|
||||||
|
|
||||||
|
def _signatureFor(name: str) -> str:
|
||||||
|
parts = name.split()
|
||||||
|
if not parts:
|
||||||
|
return "____"
|
||||||
|
return parts[0][0] + ". " + parts[-1]
|
||||||
|
|
||||||
|
|
||||||
|
def _main() -> None:
|
||||||
|
here = Path(__file__).resolve().parent
|
||||||
|
outDir = here / "scans"
|
||||||
|
outDir.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
# 1) bestätigt — exakt passend zu seed (Anna Müller, 1850.00)
|
||||||
|
_renderForm(
|
||||||
|
outDir / "mieter01-bestaetigt.pdf",
|
||||||
|
tenantName="Anna Müller",
|
||||||
|
tenantAddress="Bahnhofstrasse 12, 8001 Zürich",
|
||||||
|
objectAddress="Bahnhofstrasse 12, 3.OG, 8001 Zürich",
|
||||||
|
period="2026",
|
||||||
|
rentChf=1850.00,
|
||||||
|
tenantNotes="",
|
||||||
|
hasSignature=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# 2) abweichung_betrag — Mieter trägt 2300 ein, Buchhaltung sagt 2200
|
||||||
|
_renderForm(
|
||||||
|
outDir / "mieter02-abweichung-betrag.pdf",
|
||||||
|
tenantName="Beat Schneider",
|
||||||
|
tenantAddress="Limmatquai 45, 8001 Zürich",
|
||||||
|
objectAddress="Limmatquai 45, 1.OG, 8001 Zürich",
|
||||||
|
period="2026",
|
||||||
|
rentChf=2300.00,
|
||||||
|
tenantNotes="Mietzins gemäss letzter Indexanpassung — bitte prüfen.",
|
||||||
|
hasSignature=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# 3) keine_unterschrift — Carla Weber, 1650 stimmt, aber nicht unterschrieben
|
||||||
|
_renderForm(
|
||||||
|
outDir / "mieter03-keine-unterschrift.pdf",
|
||||||
|
tenantName="Carla Weber",
|
||||||
|
tenantAddress="Seestrasse 88, 8002 Zürich",
|
||||||
|
objectAddress="Seestrasse 88, EG, 8002 Zürich",
|
||||||
|
period="2026",
|
||||||
|
rentChf=1650.00,
|
||||||
|
tenantNotes="",
|
||||||
|
hasSignature=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
print(f"Generated 3 scans in {outDir}")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
_main()
|
||||||
68
demoData/pwg/_seedTrusteeData.json
Normal file
68
demoData/pwg/_seedTrusteeData.json
Normal file
|
|
@ -0,0 +1,68 @@
|
||||||
|
{
|
||||||
|
"_comment": "PWG-Demo Seed-Daten — fiktive Mieter (Debitoren) und Mietzins-Buchungen 2026 für Trustee-Feature. Wird von pwgDemo2026.py idempotent geladen.",
|
||||||
|
"rentAccount": "6000",
|
||||||
|
"rentAccountLabel": "Mietzinsertrag Wohnen",
|
||||||
|
"year": 2026,
|
||||||
|
"tenants": [
|
||||||
|
{
|
||||||
|
"contactNumber": "10001",
|
||||||
|
"name": "Anna Müller",
|
||||||
|
"address": "Bahnhofstrasse 12",
|
||||||
|
"zip": "8001",
|
||||||
|
"city": "Zürich",
|
||||||
|
"country": "CH",
|
||||||
|
"email": "anna.mueller@example.ch",
|
||||||
|
"monthlyRentChf": 1850.00,
|
||||||
|
"scenario": "bestaetigt",
|
||||||
|
"_note": "Stimmt exakt — erwarteter Pilot-Status 'bestaetigt'"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"contactNumber": "10002",
|
||||||
|
"name": "Beat Schneider",
|
||||||
|
"address": "Limmatquai 45",
|
||||||
|
"zip": "8001",
|
||||||
|
"city": "Zürich",
|
||||||
|
"country": "CH",
|
||||||
|
"email": "beat.schneider@example.ch",
|
||||||
|
"monthlyRentChf": 2200.00,
|
||||||
|
"scenario": "abweichung_betrag",
|
||||||
|
"_note": "Scan zeigt 2300 CHF/Monat (Mieter nicht über Erhöhung informiert) — erwarteter Status 'abweichung_betrag'"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"contactNumber": "10003",
|
||||||
|
"name": "Carla Weber",
|
||||||
|
"address": "Seestrasse 88",
|
||||||
|
"zip": "8002",
|
||||||
|
"city": "Zürich",
|
||||||
|
"country": "CH",
|
||||||
|
"email": "carla.weber@example.ch",
|
||||||
|
"monthlyRentChf": 1650.00,
|
||||||
|
"scenario": "keine_unterschrift",
|
||||||
|
"_note": "Scan ist ohne Unterschrift — erwarteter Status 'keine_unterschrift'"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"contactNumber": "10004",
|
||||||
|
"name": "Daniel Keller",
|
||||||
|
"address": "Hardturmstrasse 200",
|
||||||
|
"zip": "8005",
|
||||||
|
"city": "Zürich",
|
||||||
|
"country": "CH",
|
||||||
|
"email": "daniel.keller@example.ch",
|
||||||
|
"monthlyRentChf": 2450.00,
|
||||||
|
"scenario": "kein_scan",
|
||||||
|
"_note": "Hat noch nicht zurückgesendet — taucht nicht im Pilot-Run auf"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"contactNumber": "10005",
|
||||||
|
"name": "Elena Fischer",
|
||||||
|
"address": "Rämistrasse 71",
|
||||||
|
"zip": "8001",
|
||||||
|
"city": "Zürich",
|
||||||
|
"country": "CH",
|
||||||
|
"email": "elena.fischer@example.ch",
|
||||||
|
"monthlyRentChf": 1990.00,
|
||||||
|
"scenario": "kein_scan",
|
||||||
|
"_note": "Reserve-Mieter für spätere Demo-Erweiterungen"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
80
demoData/pwg/scans/mieter01-bestaetigt.pdf
Normal file
80
demoData/pwg/scans/mieter01-bestaetigt.pdf
Normal file
|
|
@ -0,0 +1,80 @@
|
||||||
|
%PDF-1.3
|
||||||
|
%“Œ‹ž ReportLab Generated PDF document http://www.reportlab.com
|
||||||
|
1 0 obj
|
||||||
|
<<
|
||||||
|
/F1 2 0 R /F2 3 0 R /F3 4 0 R
|
||||||
|
>>
|
||||||
|
endobj
|
||||||
|
2 0 obj
|
||||||
|
<<
|
||||||
|
/BaseFont /Helvetica /Encoding /WinAnsiEncoding /Name /F1 /Subtype /Type1 /Type /Font
|
||||||
|
>>
|
||||||
|
endobj
|
||||||
|
3 0 obj
|
||||||
|
<<
|
||||||
|
/BaseFont /Helvetica-Bold /Encoding /WinAnsiEncoding /Name /F2 /Subtype /Type1 /Type /Font
|
||||||
|
>>
|
||||||
|
endobj
|
||||||
|
4 0 obj
|
||||||
|
<<
|
||||||
|
/BaseFont /Helvetica-Oblique /Encoding /WinAnsiEncoding /Name /F3 /Subtype /Type1 /Type /Font
|
||||||
|
>>
|
||||||
|
endobj
|
||||||
|
5 0 obj
|
||||||
|
<<
|
||||||
|
/Contents 9 0 R /MediaBox [ 0 0 595.2756 841.8898 ] /Parent 8 0 R /Resources <<
|
||||||
|
/Font 1 0 R /ProcSet [ /PDF /Text /ImageB /ImageC /ImageI ]
|
||||||
|
>> /Rotate 0 /Trans <<
|
||||||
|
|
||||||
|
>>
|
||||||
|
/Type /Page
|
||||||
|
>>
|
||||||
|
endobj
|
||||||
|
6 0 obj
|
||||||
|
<<
|
||||||
|
/PageMode /UseNone /Pages 8 0 R /Type /Catalog
|
||||||
|
>>
|
||||||
|
endobj
|
||||||
|
7 0 obj
|
||||||
|
<<
|
||||||
|
/Author (anonymous) /CreationDate (D:20260420002638-01'00') /Creator (ReportLab PDF Library - www.reportlab.com) /Keywords () /ModDate (D:20260420002638-01'00') /Producer (ReportLab PDF Library - www.reportlab.com)
|
||||||
|
/Subject (unspecified) /Title (untitled) /Trapped /False
|
||||||
|
>>
|
||||||
|
endobj
|
||||||
|
8 0 obj
|
||||||
|
<<
|
||||||
|
/Count 1 /Kids [ 5 0 R ] /Type /Pages
|
||||||
|
>>
|
||||||
|
endobj
|
||||||
|
9 0 obj
|
||||||
|
<<
|
||||||
|
/Filter [ /ASCII85Decode /FlateDecode ] /Length 605
|
||||||
|
>>
|
||||||
|
stream
|
||||||
|
Gat$u_2b!='YNm9]OOh_`s.;Y\Ku+!X/aQ:.b.-A/gNQpRp[N%>l++NBXO3A:fg1WZM\=sbo<,Q[3'29Es](/@'O@[I#'OcS8a:5_Y<8fh=lJSmJ`RLh*-1@#UuhX,=8I86m^'+)4?n^b2N-d3/?],U+[TZQ@ZJ8,<0,Yi>eoPABDBdLBA$k+0Ik*9&VW;<a<ghE=ZquneO>5@Mh:Ji.!#+`k%CJr^^%]YVpL:\WM.^h5>]]TUiL[_3bUPl*u7tL)fSq&ABG:._)GlSks3%?6@q<#fWg]-m\(U)K<V<fZ#)"#g-=L)_=g^(43+QjCJ9nCJK5L+ut3!C0@CCq/eFOEnq$^=I2k%!i4NY9D?D2a]>AD%ZQqC(%lgdge#da<N%1N;lT3hpLr?F>uIVqb%d[b>@jSh2'HC<+`WqKT\j."HGbZ/,'GI@L]d5Gq#Bu(=GEa'j*$L`Rna35kpC)q-)VX=iB?Q>cb;U14X_hGR&cJicR65LLeK?KTlcegm"M*#IBaRqVfL6:M.[Wh$KLqAK0+g#D*30YbcTZBVL*J+KQ8j4'43h]r`7UAqHR_2FMW4U(].V2NG5u__ND;RK6I;:rW6,"=tf~>endstream
|
||||||
|
endobj
|
||||||
|
xref
|
||||||
|
0 10
|
||||||
|
0000000000 65535 f
|
||||||
|
0000000073 00000 n
|
||||||
|
0000000124 00000 n
|
||||||
|
0000000231 00000 n
|
||||||
|
0000000343 00000 n
|
||||||
|
0000000458 00000 n
|
||||||
|
0000000661 00000 n
|
||||||
|
0000000729 00000 n
|
||||||
|
0000001025 00000 n
|
||||||
|
0000001084 00000 n
|
||||||
|
trailer
|
||||||
|
<<
|
||||||
|
/ID
|
||||||
|
[<621e745f4154d3ac7a42de07bdd8794e><621e745f4154d3ac7a42de07bdd8794e>]
|
||||||
|
% ReportLab generated PDF document -- digest (http://www.reportlab.com)
|
||||||
|
|
||||||
|
/Info 7 0 R
|
||||||
|
/Root 6 0 R
|
||||||
|
/Size 10
|
||||||
|
>>
|
||||||
|
startxref
|
||||||
|
1779
|
||||||
|
%%EOF
|
||||||
80
demoData/pwg/scans/mieter02-abweichung-betrag.pdf
Normal file
80
demoData/pwg/scans/mieter02-abweichung-betrag.pdf
Normal file
|
|
@ -0,0 +1,80 @@
|
||||||
|
%PDF-1.3
|
||||||
|
%“Œ‹ž ReportLab Generated PDF document http://www.reportlab.com
|
||||||
|
1 0 obj
|
||||||
|
<<
|
||||||
|
/F1 2 0 R /F2 3 0 R /F3 4 0 R
|
||||||
|
>>
|
||||||
|
endobj
|
||||||
|
2 0 obj
|
||||||
|
<<
|
||||||
|
/BaseFont /Helvetica /Encoding /WinAnsiEncoding /Name /F1 /Subtype /Type1 /Type /Font
|
||||||
|
>>
|
||||||
|
endobj
|
||||||
|
3 0 obj
|
||||||
|
<<
|
||||||
|
/BaseFont /Helvetica-Bold /Encoding /WinAnsiEncoding /Name /F2 /Subtype /Type1 /Type /Font
|
||||||
|
>>
|
||||||
|
endobj
|
||||||
|
4 0 obj
|
||||||
|
<<
|
||||||
|
/BaseFont /Helvetica-Oblique /Encoding /WinAnsiEncoding /Name /F3 /Subtype /Type1 /Type /Font
|
||||||
|
>>
|
||||||
|
endobj
|
||||||
|
5 0 obj
|
||||||
|
<<
|
||||||
|
/Contents 9 0 R /MediaBox [ 0 0 595.2756 841.8898 ] /Parent 8 0 R /Resources <<
|
||||||
|
/Font 1 0 R /ProcSet [ /PDF /Text /ImageB /ImageC /ImageI ]
|
||||||
|
>> /Rotate 0 /Trans <<
|
||||||
|
|
||||||
|
>>
|
||||||
|
/Type /Page
|
||||||
|
>>
|
||||||
|
endobj
|
||||||
|
6 0 obj
|
||||||
|
<<
|
||||||
|
/PageMode /UseNone /Pages 8 0 R /Type /Catalog
|
||||||
|
>>
|
||||||
|
endobj
|
||||||
|
7 0 obj
|
||||||
|
<<
|
||||||
|
/Author (anonymous) /CreationDate (D:20260420002638-01'00') /Creator (ReportLab PDF Library - www.reportlab.com) /Keywords () /ModDate (D:20260420002638-01'00') /Producer (ReportLab PDF Library - www.reportlab.com)
|
||||||
|
/Subject (unspecified) /Title (untitled) /Trapped /False
|
||||||
|
>>
|
||||||
|
endobj
|
||||||
|
8 0 obj
|
||||||
|
<<
|
||||||
|
/Count 1 /Kids [ 5 0 R ] /Type /Pages
|
||||||
|
>>
|
||||||
|
endobj
|
||||||
|
9 0 obj
|
||||||
|
<<
|
||||||
|
/Filter [ /ASCII85Decode /FlateDecode ] /Length 645
|
||||||
|
>>
|
||||||
|
stream
|
||||||
|
Gat$u;/=o?&:X)O\Araq7?SD+a]l*Rm7'_NodC6`..P8W>KNG3^t>i_Ce?:WES)tdE9P%)]=[OMJ;0,-k>h\dEgU/f?l\_X0L5j&\*u7>lf&mdg;Ok]pMom2O]%QZN+CTcK3Z=iK3(.L2\iD9Y:h#JK)F(Z;IH.<dKiU:oJl0Vq46<liGp-8i9;4:8h'ZhP.@f3>9AG%RA'dZ8Tl(;M;Z.lg7m%'r?#V+#+[C[+hXgYl(%>:Lj%@c-Y$GTZ`"76>Gs6G*oW%,BOGaN\3XoX9SV137[hSKN*;q*b!REa+VYE_685)jc=;j2%+poDP+1suFj9/'1o)>"7]VsjQiC>b3a;5CmR!8e_A&5;*gb0YK9R*C%hIFKTIS?Lf./'.4>sU0AXJ?:'Ki%F;f7lOdf8#o"_'B(%Dp*n'!q.>=Br1X_In@U1sS''A`Wjehl1+L*1tN,2no:=PnEL:G0[+39KTbr2jZmOrqY\k!kL,7^BBtD`<kr5,#9U5P`F4jdI8fK7f+/@#uCA.ORb$/6JX,8%UMJt<W=X1r3nMdd^aN[$dRq>;*O?sX)7aI6USk9`Ike3IM.son+Et.<>Zi+<03="'oQ`85>71#[^?PT*K9I,oI;ls,.0QF=X7oSNc#8qr<64SCKL~>endstream
|
||||||
|
endobj
|
||||||
|
xref
|
||||||
|
0 10
|
||||||
|
0000000000 65535 f
|
||||||
|
0000000073 00000 n
|
||||||
|
0000000124 00000 n
|
||||||
|
0000000231 00000 n
|
||||||
|
0000000343 00000 n
|
||||||
|
0000000458 00000 n
|
||||||
|
0000000661 00000 n
|
||||||
|
0000000729 00000 n
|
||||||
|
0000001025 00000 n
|
||||||
|
0000001084 00000 n
|
||||||
|
trailer
|
||||||
|
<<
|
||||||
|
/ID
|
||||||
|
[<c69a670760cbecedce0d0f0aa897bce2><c69a670760cbecedce0d0f0aa897bce2>]
|
||||||
|
% ReportLab generated PDF document -- digest (http://www.reportlab.com)
|
||||||
|
|
||||||
|
/Info 7 0 R
|
||||||
|
/Root 6 0 R
|
||||||
|
/Size 10
|
||||||
|
>>
|
||||||
|
startxref
|
||||||
|
1819
|
||||||
|
%%EOF
|
||||||
74
demoData/pwg/scans/mieter03-keine-unterschrift.pdf
Normal file
74
demoData/pwg/scans/mieter03-keine-unterschrift.pdf
Normal file
|
|
@ -0,0 +1,74 @@
|
||||||
|
%PDF-1.3
|
||||||
|
%“Œ‹ž ReportLab Generated PDF document http://www.reportlab.com
|
||||||
|
1 0 obj
|
||||||
|
<<
|
||||||
|
/F1 2 0 R /F2 3 0 R
|
||||||
|
>>
|
||||||
|
endobj
|
||||||
|
2 0 obj
|
||||||
|
<<
|
||||||
|
/BaseFont /Helvetica /Encoding /WinAnsiEncoding /Name /F1 /Subtype /Type1 /Type /Font
|
||||||
|
>>
|
||||||
|
endobj
|
||||||
|
3 0 obj
|
||||||
|
<<
|
||||||
|
/BaseFont /Helvetica-Bold /Encoding /WinAnsiEncoding /Name /F2 /Subtype /Type1 /Type /Font
|
||||||
|
>>
|
||||||
|
endobj
|
||||||
|
4 0 obj
|
||||||
|
<<
|
||||||
|
/Contents 8 0 R /MediaBox [ 0 0 595.2756 841.8898 ] /Parent 7 0 R /Resources <<
|
||||||
|
/Font 1 0 R /ProcSet [ /PDF /Text /ImageB /ImageC /ImageI ]
|
||||||
|
>> /Rotate 0 /Trans <<
|
||||||
|
|
||||||
|
>>
|
||||||
|
/Type /Page
|
||||||
|
>>
|
||||||
|
endobj
|
||||||
|
5 0 obj
|
||||||
|
<<
|
||||||
|
/PageMode /UseNone /Pages 7 0 R /Type /Catalog
|
||||||
|
>>
|
||||||
|
endobj
|
||||||
|
6 0 obj
|
||||||
|
<<
|
||||||
|
/Author (anonymous) /CreationDate (D:20260420002638-01'00') /Creator (ReportLab PDF Library - www.reportlab.com) /Keywords () /ModDate (D:20260420002638-01'00') /Producer (ReportLab PDF Library - www.reportlab.com)
|
||||||
|
/Subject (unspecified) /Title (untitled) /Trapped /False
|
||||||
|
>>
|
||||||
|
endobj
|
||||||
|
7 0 obj
|
||||||
|
<<
|
||||||
|
/Count 1 /Kids [ 4 0 R ] /Type /Pages
|
||||||
|
>>
|
||||||
|
endobj
|
||||||
|
8 0 obj
|
||||||
|
<<
|
||||||
|
/Filter [ /ASCII85Decode /FlateDecode ] /Length 629
|
||||||
|
>>
|
||||||
|
stream
|
||||||
|
Gat$u9okbt'YNU1]OOh_epoJMDS+[%:t8PjKdtN.M\BF4Rp[N%>l*c%BN.Y4;--:2/AITuo>V8jfI,n>q[27)KtHLJJe6?4"Os?2IYXhCeua]=Y\nmRL])O<JRATn*r)6Y3M[D62b"k4=V\0t^+:E*JFq#l,g/G6U^8"Vof29K0aFs:mH03k(:"'&+U$Z..%si4bA2&IPBPm.kMu&o"92)[)Oj?nq'B%I_o?4!V+)6&oT4`B7!m:s7oM%%fPppb%0bIp622oZ<,bku]V<uU]HO_9_0FC<PS/*b%63>YCu^UM"D+]L%$mi4Mg_c9Z*W=TB25q0p'VtnW+DO[lI4"^GhEIMZS%r+4-427/j88s-'(Bb"Di(5HFd8E`+E5?9&t.@c*c7+LKh&MCQ'%;!]]r.FG*TWE*:(lfNGob^n\G/l;h/P5/$kYZ($gE_$jH%mJdC=!KQ!_4S3&rBD-KT3+VX$f4PVo=p]8U1:+q/mK$e4@cA%V:!]??hl@+Wd@MMo'pV'V2F!p8Qn>0Qg]@?"`j7&8S?#Y.\n>pfT2>Qb:NYh\qGUODRXM1&D$AAhDi`&H4"4_,<b\%s4E?o?Kuu'YIscD>'nf.p$SEU*J@`KCfZ[as)_0uXW;~>endstream
|
||||||
|
endobj
|
||||||
|
xref
|
||||||
|
0 9
|
||||||
|
0000000000 65535 f
|
||||||
|
0000000073 00000 n
|
||||||
|
0000000114 00000 n
|
||||||
|
0000000221 00000 n
|
||||||
|
0000000333 00000 n
|
||||||
|
0000000536 00000 n
|
||||||
|
0000000604 00000 n
|
||||||
|
0000000900 00000 n
|
||||||
|
0000000959 00000 n
|
||||||
|
trailer
|
||||||
|
<<
|
||||||
|
/ID
|
||||||
|
[<9b415a84726399a7dd006f60068c5362><9b415a84726399a7dd006f60068c5362>]
|
||||||
|
% ReportLab generated PDF document -- digest (http://www.reportlab.com)
|
||||||
|
|
||||||
|
/Info 6 0 R
|
||||||
|
/Root 5 0 R
|
||||||
|
/Size 9
|
||||||
|
>>
|
||||||
|
startxref
|
||||||
|
1678
|
||||||
|
%%EOF
|
||||||
BIN
demoData/trustee/Budget2026a.xlsx
Normal file
BIN
demoData/trustee/Budget2026a.xlsx
Normal file
Binary file not shown.
152
demoData/workflows/pwg-mietzinsbestaetigung-pilot.workflow.json
Normal file
152
demoData/workflows/pwg-mietzinsbestaetigung-pilot.workflow.json
Normal file
|
|
@ -0,0 +1,152 @@
|
||||||
|
{
|
||||||
|
"$schemaVersion": "1.0",
|
||||||
|
"$kind": "poweron.workflow",
|
||||||
|
"$exportedAt": "2026-04-16T10:00:00Z",
|
||||||
|
"$gatewayVersion": "demo-2026-04",
|
||||||
|
"label": "PWG Pilot: Jahresmietzinsbestätigung",
|
||||||
|
"description": "Verarbeitet gescannte Rückantworten der Jahresmietzinsbestätigungen: OCR, Abgleich gegen Trustee-DB (Mieter + Mietzins-Buchungen), AI-Klassifikation pro Scan und Zustellung als CSV-Anhang im Outlook-Draft an die Sachbearbeitung. Pilot-Lieferung Sommer 2026.",
|
||||||
|
"tags": ["pwg", "pilot", "mietzins", "trustee", "ocr"],
|
||||||
|
"templateScope": "instance",
|
||||||
|
"sharedReadOnly": false,
|
||||||
|
"notifyOnFailure": true,
|
||||||
|
"graph": {
|
||||||
|
"nodes": [
|
||||||
|
{
|
||||||
|
"id": "n1",
|
||||||
|
"type": "trigger.manual",
|
||||||
|
"x": 50,
|
||||||
|
"y": 200,
|
||||||
|
"title": "Manueller Start",
|
||||||
|
"parameters": {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "n2",
|
||||||
|
"type": "sharepoint.listFiles",
|
||||||
|
"x": 320,
|
||||||
|
"y": 200,
|
||||||
|
"title": "Scan-Ordner auflisten",
|
||||||
|
"parameters": {
|
||||||
|
"connectionReference": "",
|
||||||
|
"pathQuery": "PWG/Mietzinsbestaetigungen/Scans-Eingang"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "n3",
|
||||||
|
"type": "flow.loop",
|
||||||
|
"x": 590,
|
||||||
|
"y": 200,
|
||||||
|
"title": "Pro Scan-Dokument",
|
||||||
|
"parameters": {
|
||||||
|
"level": 1,
|
||||||
|
"concurrency": 1
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "n4",
|
||||||
|
"type": "sharepoint.downloadFile",
|
||||||
|
"x": 860,
|
||||||
|
"y": 200,
|
||||||
|
"title": "PDF/Bild laden",
|
||||||
|
"parameters": {
|
||||||
|
"connectionReference": "",
|
||||||
|
"pathQuery": "{{loop.item.path}}"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "n5",
|
||||||
|
"type": "trustee.extractFromFiles",
|
||||||
|
"x": 1130,
|
||||||
|
"y": 200,
|
||||||
|
"title": "OCR & Felder extrahieren",
|
||||||
|
"parameters": {
|
||||||
|
"featureInstanceId": "",
|
||||||
|
"prompt": "Extrahiere die folgenden Felder aus dieser Jahresmietzinsbestätigung und antworte als JSON: tenantName (string), tenantAddress (string), objectAddress (string), confirmedRentAmount (number|null in CHF), currency ('CHF'), period (string z.B. '2026'), tenantNotes (string|null - alle handschriftlichen Anmerkungen oder Korrekturen), hasSignature (boolean - ist eine Unterschrift vorhanden?), documentDate (ISO date|null), ocrConfidence (number 0-1)."
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "n6",
|
||||||
|
"type": "trustee.queryData",
|
||||||
|
"x": 1400,
|
||||||
|
"y": 200,
|
||||||
|
"title": "Referenzdaten Trustee-DB",
|
||||||
|
"parameters": {
|
||||||
|
"featureInstanceId": "",
|
||||||
|
"mode": "lookup",
|
||||||
|
"entity": "tenantWithRent",
|
||||||
|
"tenantNameRef": "{{n5.output.tenantName}}",
|
||||||
|
"tenantAddressRef": "{{n5.output.tenantAddress}}",
|
||||||
|
"period": "{{n5.output.period}}",
|
||||||
|
"rentAccountPattern": "6000-6099"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "n7",
|
||||||
|
"type": "ai.prompt",
|
||||||
|
"x": 1670,
|
||||||
|
"y": 200,
|
||||||
|
"title": "Prüfung & Klassifikation",
|
||||||
|
"parameters": {
|
||||||
|
"outputFormat": "json",
|
||||||
|
"simpleMode": false,
|
||||||
|
"documentList": "{{n5.output}}",
|
||||||
|
"context": "{{n6.output}}",
|
||||||
|
"aiPrompt": "Du bist ein Sachbearbeitungs-Assistent der Stiftung PWG. Deine Aufgabe ist es, eine eingescannte und OCR-extrahierte Jahresmietzinsbestätigung gegen die Stammdaten der Buchhaltung (Trustee-Feature) abzugleichen.\n\nEingaben:\n1. SCAN_DATEN (extrahiert per OCR aus dem Rückantwort-Dokument):\n{{scan}}\n\n2. REFERENZ_DATEN (aus Trustee-DB für diesen Mieter; ggf. leer wenn nicht eindeutig zuordenbar):\n{{reference}}\n\nVorgehen:\n1. Prüfe Identität: Stimmt SCAN_DATEN.tenantName + SCAN_DATEN.tenantAddress mit einem Datensatz in REFERENZ_DATEN.contacts überein? (Toleranz: kleine Tippfehler, Umlaute, Abkürzungen).\n2. Prüfe Mietzinsbetrag: Stimmt SCAN_DATEN.confirmedRentAmount mit dem aus REFERENZ_DATEN.expectedRentAmount erwarteten Mietzins überein? (Toleranz: ±1 CHF Rundung).\n3. Prüfe Unterschrift: hasSignature muss true sein.\n4. Prüfe OCR-Qualität: ocrConfidence < 0.6 -> 'unleserlich'.\n\nKlassifiziere in EXAKT EINEN Status:\n- 'bestaetigt': Identität stimmt, Betrag stimmt, Unterschrift vorhanden.\n- 'abweichung_betrag': Identität ok, Unterschrift ok, Betrag weicht ab.\n- 'abweichung_anmerkung': tenantNotes enthält substantielle Anmerkung (nicht leer, nicht reine Bestätigung).\n- 'keine_unterschrift': hasSignature == false.\n- 'unleserlich': OCR-Qualität ungenügend ODER Pflichtfelder fehlen.\n- 'kein_match': Mieter nicht in REFERENZ_DATEN auffindbar.\n\nBei Status != 'bestaetigt': Generiere einen kurzen, höflichen Antwortvorschlag (deutsch, Sie-Form, max. 5 Sätze, PWG-Stil) für die Sachbearbeitung. Bei 'bestaetigt': antwortVorschlag = null.\n\nAntworte AUSSCHLIESSLICH als JSON nach folgendem Schema:\n{\n \"tenantName\": string,\n \"objectAddress\": string,\n \"status\": \"bestaetigt\" | \"abweichung_betrag\" | \"abweichung_anmerkung\" | \"keine_unterschrift\" | \"unleserlich\" | \"kein_match\",\n \"scanRentAmount\": number | null,\n \"expectedRentAmount\": number | null,\n \"delta\": number | null,\n \"tenantNotes\": string | null,\n \"antwortVorschlag\": string | null,\n \"matchConfidence\": number,\n \"auditEvidence\": string\n}"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "n8",
|
||||||
|
"type": "data.aggregate",
|
||||||
|
"x": 1940,
|
||||||
|
"y": 200,
|
||||||
|
"title": "Ergebnisse sammeln (im Loop)",
|
||||||
|
"parameters": {
|
||||||
|
"mode": "collect"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "n9",
|
||||||
|
"type": "data.consolidate",
|
||||||
|
"x": 2210,
|
||||||
|
"y": 200,
|
||||||
|
"title": "CSV bauen (nach Loop)",
|
||||||
|
"parameters": {
|
||||||
|
"mode": "csvJoin",
|
||||||
|
"separator": "\n"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "n10",
|
||||||
|
"type": "email.draftEmail",
|
||||||
|
"x": 2480,
|
||||||
|
"y": 200,
|
||||||
|
"title": "Draft an Sachbearbeitung",
|
||||||
|
"parameters": {
|
||||||
|
"connectionReference": "",
|
||||||
|
"to": "sachbearbeiter@pwg.ch",
|
||||||
|
"subject": "Mietzinsbestätigungen Auswertung {{currentDate}}",
|
||||||
|
"body": "Hallo,\n\nim Anhang die Auswertung der eingegangenen Jahresmietzinsbestätigungen.\nPro Scan eine Zeile mit Status, Betragsabgleich und (bei Abweichung) Antwortvorschlag.\n\nBitte die Zeilen mit Status != 'bestaetigt' manuell sichten.\n\nFreundliche Grüße,\nPWG Automation",
|
||||||
|
"emailStyle": "business",
|
||||||
|
"attachments": [
|
||||||
|
{
|
||||||
|
"name": "mietzinsbestaetigungen-auswertung",
|
||||||
|
"mimeType": "text/csv",
|
||||||
|
"csvFromVariable": "n9.output"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"connections": [
|
||||||
|
{"source": "n1", "target": "n2", "sourceOutput": 0, "targetInput": 0},
|
||||||
|
{"source": "n2", "target": "n3", "sourceOutput": 0, "targetInput": 0},
|
||||||
|
{"source": "n3", "target": "n4", "sourceOutput": 0, "targetInput": 0},
|
||||||
|
{"source": "n4", "target": "n5", "sourceOutput": 0, "targetInput": 0},
|
||||||
|
{"source": "n5", "target": "n6", "sourceOutput": 0, "targetInput": 0},
|
||||||
|
{"source": "n6", "target": "n7", "sourceOutput": 0, "targetInput": 0},
|
||||||
|
{"source": "n7", "target": "n8", "sourceOutput": 0, "targetInput": 0},
|
||||||
|
{"source": "n8", "target": "n9", "sourceOutput": 0, "targetInput": 0},
|
||||||
|
{"source": "n9", "target": "n10", "sourceOutput": 0, "targetInput": 0}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"invocations": []
|
||||||
|
}
|
||||||
93
env_dev.env
93
env_dev.env
|
|
@ -4,37 +4,22 @@
|
||||||
APP_ENV_TYPE = dev
|
APP_ENV_TYPE = dev
|
||||||
APP_ENV_LABEL = Development Instance Patrick
|
APP_ENV_LABEL = Development Instance Patrick
|
||||||
APP_API_URL = http://localhost:8000
|
APP_API_URL = http://localhost:8000
|
||||||
APP_KEY_SYSVAR = D:/Athi/Local/Web/poweron/local/key.txt
|
APP_KEY_SYSVAR = D:/Athi/Local/Web/poweron/local/notes/key.txt
|
||||||
APP_INIT_PASS_ADMIN_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpEeFFtRGtQeVUtcjlrU3dab1ZxUm9WSks0MlJVYUtERFlqUElHemZrOGNENk1tcmJNX3Vxc01UMDhlNU40VzZZRVBpUGNmT3podzZrOGhOeEJIUEt4eVlSWG5UYXA3d09DVXlLT21Kb1JYSUU9
|
APP_INIT_PASS_ADMIN_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpEeFFtRGtQeVUtcjlrU3dab1ZxUm9WSks0MlJVYUtERFlqUElHemZrOGNENk1tcmJNX3Vxc01UMDhlNU40VzZZRVBpUGNmT3podzZrOGhOeEJIUEt4eVlSWG5UYXA3d09DVXlLT21Kb1JYSUU9
|
||||||
APP_INIT_PASS_EVENT_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpERzZjNm56WGVBdjJTeG5Udjd6OGQwUVotYXUzQjJ1YVNyVXVBa3NZVml3ODU0MVNkZjhWWmJwNUFkc19BcHlHMTU1Q3BRcHU0cDBoZkFlR2l6UEZQU3d2U3MtMDh5UDZteGFoQ0EyMUE1ckE9
|
APP_INIT_PASS_EVENT_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpERzZjNm56WGVBdjJTeG5Udjd6OGQwUVotYXUzQjJ1YVNyVXVBa3NZVml3ODU0MVNkZjhWWmJwNUFkc19BcHlHMTU1Q3BRcHU0cDBoZkFlR2l6UEZQU3d2U3MtMDh5UDZteGFoQ0EyMUE1ckE9
|
||||||
|
|
||||||
# PostgreSQL Storage (new)
|
# PostgreSQL DB Host
|
||||||
DB_APP_HOST=localhost
|
DB_HOST=localhost
|
||||||
DB_APP_DATABASE=poweron_app
|
DB_USER=poweron_dev
|
||||||
DB_APP_USER=poweron_dev
|
DB_PASSWORD_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpEcUIxNEFfQ2xnS0RrSC1KNnUxTlVvTGZoMHgzaEI4Z3NlVzVROTVLak5Ubi1vaEZubFZaMTFKMGd6MXAxekN2d2NvMy1hRjg2UVhybktlcFA5anZ1WjFlQmZhcXdwaGhWdzRDc3ExeUhzWTg9
|
||||||
DB_APP_PASSWORD_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpEcUIxNEFfQ2xnS0RrSC1KNnUxTlVvTGZoMHgzaEI4Z3NlVzVROTVLak5Ubi1vaEZubFZaMTFKMGd6MXAxekN2d2NvMy1hRjg2UVhybktlcFA5anZ1WjFlQmZhcXdwaGhWdzRDc3ExeUhzWTg9
|
DB_PORT=5432
|
||||||
DB_APP_PORT=5432
|
|
||||||
|
|
||||||
# PostgreSQL Storage (new)
|
|
||||||
DB_CHAT_HOST=localhost
|
|
||||||
DB_CHAT_DATABASE=poweron_chat
|
|
||||||
DB_CHAT_USER=poweron_dev
|
|
||||||
DB_CHAT_PASSWORD_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpERFNzNVhoalpCR0QxYXAwdEpXWXVVOTdZdWtqWW5FNXFGcFl2amNYLWYwYl9STXltRlFxLWNzVWlMVnNYdXk0RklnRExFT0FaQjg2aGswNnhhSGhCN29KN2VEb2FlUV9NTlV3b0tLelplSVU9
|
|
||||||
DB_CHAT_PORT=5432
|
|
||||||
|
|
||||||
# PostgreSQL Storage (new)
|
|
||||||
DB_MANAGEMENT_HOST=localhost
|
|
||||||
DB_MANAGEMENT_DATABASE=poweron_management
|
|
||||||
DB_MANAGEMENT_USER=poweron_dev
|
|
||||||
DB_MANAGEMENT_PASSWORD_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpEUldqSTVpUnFqdGhITDYzT3RScGlMYVdTMmZhOXdudDRCc3dhdllOd3l6MS1vWHY2MjVsTUF1Sk9saEJOSk9ONUlBZjQwb2c2T1gtWWJhcXFzVVVXd01xc0U0b0lJX0JyVDRxaDhNS01JcWs9
|
|
||||||
DB_MANAGEMENT_PORT=5432
|
|
||||||
|
|
||||||
# Security Configuration
|
# Security Configuration
|
||||||
APP_JWT_KEY_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpERjlrSktmZHVuQnJ1VVJDdndLaUcxZGJsT2ZlUFRlcFdOZ001RnlzM2FhLWhRV2tjWWFhaWQwQ3hkcUFvbThMcndxSjFpYTdfRV9OZGhTcksxbXFTZWg5MDZvOHpCVXBHcDJYaHlJM0tyNWRZckZsVHpQcmxTZHJoZUs1M3lfU2ljRnJaTmNSQ0w0X085OXI0QW80M2xfQnJqZmZ6VEh3TUltX0xzeE42SGtZPQ==
|
APP_JWT_KEY_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpERjlrSktmZHVuQnJ1VVJDdndLaUcxZGJsT2ZlUFRlcFdOZ001RnlzM2FhLWhRV2tjWWFhaWQwQ3hkcUFvbThMcndxSjFpYTdfRV9OZGhTcksxbXFTZWg5MDZvOHpCVXBHcDJYaHlJM0tyNWRZckZsVHpQcmxTZHJoZUs1M3lfU2ljRnJaTmNSQ0w0X085OXI0QW80M2xfQnJqZmZ6VEh3TUltX0xzeE42SGtZPQ==
|
||||||
APP_TOKEN_EXPIRY=300
|
APP_TOKEN_EXPIRY=300
|
||||||
|
|
||||||
# CORS Configuration
|
# CORS Configuration
|
||||||
APP_ALLOWED_ORIGINS=http://localhost:8080,https://playground.poweron-center.net
|
APP_ALLOWED_ORIGINS=http://localhost:8080,http://localhost:5176,https://playground.poweron-center.net
|
||||||
|
|
||||||
# Logging configuration
|
# Logging configuration
|
||||||
APP_LOGGING_LOG_LEVEL = DEBUG
|
APP_LOGGING_LOG_LEVEL = DEBUG
|
||||||
|
|
@ -46,34 +31,72 @@ APP_LOGGING_FILE_ENABLED = True
|
||||||
APP_LOGGING_ROTATION_SIZE = 10485760
|
APP_LOGGING_ROTATION_SIZE = 10485760
|
||||||
APP_LOGGING_BACKUP_COUNT = 5
|
APP_LOGGING_BACKUP_COUNT = 5
|
||||||
|
|
||||||
# Service Redirects
|
# OAuth: Auth app (login/JWT) vs Data app (Microsoft Graph / Google APIs). Same IDs until you split apps in Azure / GCP.
|
||||||
Service_MSFT_REDIRECT_URI = http://localhost:8000/api/msft/auth/callback
|
Service_MSFT_AUTH_CLIENT_ID = c7e7112d-61dc-4f3a-8cd3-08cc4cd7504c
|
||||||
Service_GOOGLE_REDIRECT_URI = http://localhost:8000/api/google/auth/callback
|
Service_MSFT_AUTH_CLIENT_SECRET = DEV_ENC:Z0FBQUFBQm83T29rV1pQelMtc1p1MXR4NTFpa19CTEhHQ0xfNmdPUmZqcWp5UHBMS0hYTGl4c1pPdmhTNTJVWUl5WnlnUUZhV0VTRzVCb0d5YjR1NnZPZk5CZ0dGazNGdUJVbjkxeVdrYlNiVjJUYzF2aVFtQnVxTHFqTTJqZlF0RTFGNmE1OGN1TEk=
|
||||||
|
Service_MSFT_AUTH_REDIRECT_URI = http://localhost:8000/api/msft/auth/login/callback
|
||||||
|
Service_MSFT_DATA_CLIENT_ID = c7e7112d-61dc-4f3a-8cd3-08cc4cd7504c
|
||||||
|
Service_MSFT_DATA_CLIENT_SECRET = DEV_ENC:Z0FBQUFBQm83T29rV1pQelMtc1p1MXR4NTFpa19CTEhHQ0xfNmdPUmZqcWp5UHBMS0hYTGl4c1pPdmhTNTJVWUl5WnlnUUZhV0VTRzVCb0d5YjR1NnZPZk5CZ0dGazNGdUJVbjkxeVdrYlNiVjJUYzF2aVFtQnVxTHFqTTJqZlF0RTFGNmE1OGN1TEk=
|
||||||
|
Service_MSFT_DATA_REDIRECT_URI = http://localhost:8000/api/msft/auth/connect/callback
|
||||||
|
|
||||||
|
Service_GOOGLE_AUTH_CLIENT_ID = 354925410565-aqs2b2qaiqmm73qpjnel6al8eid78uvg.apps.googleusercontent.com
|
||||||
|
Service_GOOGLE_AUTH_CLIENT_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpETDJhbGVQMHlFQzNPVFI1ZzBMa3pNMGlQUHhaQm10eVl1bFlSeTBybzlTOWE2MURXQ0hkRlo0NlNGbHQxWEl1OVkxQnVKYlhhOXR1cUF4T3k0WDdscktkY1oyYllRTmdDTWpfbUdwWGtSd1JvNlYxeTBJdEtaaS1vYnItcW0yaFM=
|
||||||
|
Service_GOOGLE_AUTH_REDIRECT_URI = http://localhost:8000/api/google/auth/login/callback
|
||||||
|
Service_GOOGLE_DATA_CLIENT_ID = 354925410565-aqs2b2qaiqmm73qpjnel6al8eid78uvg.apps.googleusercontent.com
|
||||||
|
Service_GOOGLE_DATA_CLIENT_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpETDJhbGVQMHlFQzNPVFI1ZzBMa3pNMGlQUHhaQm10eVl1bFlSeTBybzlTOWE2MURXQ0hkRlo0NlNGbHQxWEl1OVkxQnVKYlhhOXR1cUF4T3k0WDdscktkY1oyYllRTmdDTWpfbUdwWGtSd1JvNlYxeTBJdEtaaS1vYnItcW0yaFM=
|
||||||
|
Service_GOOGLE_DATA_REDIRECT_URI = http://localhost:8000/api/google/auth/connect/callback
|
||||||
|
|
||||||
|
# ClickUp OAuth (Verbindungen / automation). Create an app in ClickUp: Settings → Apps → API; set redirect URL to Service_CLICKUP_OAUTH_REDIRECT_URI exactly.
|
||||||
|
Service_CLICKUP_CLIENT_ID = O3FX3H602A30MQN4I4SBNGJLIDBD5SL4
|
||||||
|
Service_CLICKUP_CLIENT_SECRET = DEV_ENC:Z0FBQUFBQnB5dkd4ZWVBeHVtRnpIT0VBN0tSZDhLRmFmN05DOVBOelJtLWhkVnJDRVBqUkh3bDFTZFRWaWQ1cWowdGNLUk5IQzlGN1J6RFVCaW8zRnBwLVBnclJfdWgxV3pVRzFEV2lwcW5Rc19Xa1ROWXNJcUF0ajZaYUxOUXk0WHRsRmJLM25FaHV5T2IxdV92ZW1nRjhzaGpwU0l2Wm9FTkRnY2lJVjhuNHUwT29salAxYV8wPQ==
|
||||||
|
Service_CLICKUP_OAUTH_REDIRECT_URI = http://localhost:8000/api/clickup/auth/connect/callback
|
||||||
|
|
||||||
|
# Stripe Billing (both end with _SECRET for encryption script)
|
||||||
|
STRIPE_SECRET_KEY_SECRET = DEV_ENC:Z0FBQUFBQnB5dkd5aHNGejgzQmpTdmprdzQxR19KZkh3MlhYUTNseFN3WnlaWjh2SDZyalN6aU9xSktkbUQwUnZrVnlvbGVRQm4yZFdiRU5aSEk5WVJuUnR4VUwtTm9OVk1WWmJQeU5QaDdib0hfVWV5U1BfYTFXRmdoOWdnOWxkb3JFQmF3bm45UjFUVUxmWGtGRkFKUGd6bmhpQlFnaVI3Q2lLdDlsY1VESk1vOEM0ZFBJNW1qcVZ0N2tPYmRLNmVKajZ2M3o3S05lWnRRVG5LdkRseW4wQ3VjNHNQZTZUdz09
|
||||||
|
STRIPE_WEBHOOK_SECRET = DEV_ENC:Z0FBQUFBQnB5dkd5dDJMSHBrVk8wTzJhU2xzTTZCZWdvWmU2NGI2WklfRXRJZVUzaVYyOU9GLUZsalUwa2lPdEgtUHo0dVVvRDU1cy1saHJyU0Rxa2xQZjBuakExQzk3bmxBcU9WbEIxUEtpR1JoUFMxZG9ISGRZUXFhdFpSMGxvQUV3a0VLQllfUUtCOHZwTGdteV9rYTFOazBfSlN3ekNWblFpakJlZVlCTmNkWWQ4Sm01a1RCWTlnTlFHWVA0MkZYMlprUExrWFN2V0NVU1BTd1NKczFJbVo3VHpLdlc4UT09
|
||||||
|
STRIPE_API_VERSION = 2026-01-28.clover
|
||||||
|
STRIPE_AUTOMATIC_TAX_ENABLED = false
|
||||||
|
STRIPE_TAX_RATE_ID_CH_VAT = txr_1TOQd14OUoIL0Osj7A0ZQlr0
|
||||||
|
|
||||||
# AI configuration
|
# AI configuration
|
||||||
Connector_AiOpenai_API_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpEajBuZmtYTVdqLTBpQm9KZ2pCXzRCV3VhZzlYTEhKb1FqWXNrV3lyb25uZUN1WVVQUEY3dGYtejludV9MNGlKeVREanZGOGloV09mY2ttQ3k5SjBFOGFac2ZQTkNKNUZWVnRINVQyeWhsR2wyYnVrRDNzV2NqSHB0ajQ4UWtGeGZtbmR0Q3VvS0hDZlphVmpSc2Z6RG5nPT0=
|
Connector_AiOpenai_API_SECRET = DEV_ENC:Z0FBQUFBQnBaSnM4TWFRRmxVQmNQblVIYmc1Y0Q3aW9zZUtDWlNWdGZjbFpncGp2NHN2QjkxMWxibUJnZDBId252MWk5TXN3Yk14ajFIdi1CTkx2ZWx2QzF5OFR6LUx5azQ3dnNLaXJBOHNxc0tlWmtZcTFVelF4eXBSM2JkbHd2eTM0VHNXdHNtVUprZWtPVzctNlJsZHNmM20tU1N6Q1Q2cHFYSi1tNlhZNDNabTVuaEVGWmIydEhadTcyMlBURmw2aUJxOF9GTzR0dTZiNGZfOFlHaVpPZ1A1LXhhOEFtN1J5TEVNNWtMcGpyNkMzSl8xRnZsaTF1WTZrOUZmb0cxVURjSGFLS2dIYTQyZEJtTm90bEYxVWxNNXVPdTVjaVhYbXhxT3JsVDM5VjZMVFZKSE1tZnM9
|
||||||
Connector_AiAnthropic_API_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpENmFBWG16STFQUVZxNzZZRzRLYTA4X3lRanF1VkF4cU45OExNMzlsQmdISGFxTUxud1dXODBKcFhMVG9KNjdWVnlTTFFROVc3NDlsdlNHLUJXeG41NDBHaXhHR0VHVWl5UW9RNkVWbmlhakRKVW5pM0R4VHk0LUw0TV9LdkljNHdBLXJua21NQkl2b3l4UkVkMGN1YjBrMmJEeWtMay1jbmxrYWJNbUV0aktCXzU1djR2d2RSQXZORTNwcG92ZUVvVGMtQzQzTTVncEZTRGRtZUFIZWQ0dz09
|
Connector_AiAnthropic_API_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpENmFBWG16STFQUVZxNzZZRzRLYTA4X3lRanF1VkF4cU45OExNMzlsQmdISGFxTUxud1dXODBKcFhMVG9KNjdWVnlTTFFROVc3NDlsdlNHLUJXeG41NDBHaXhHR0VHVWl5UW9RNkVWbmlhakRKVW5pM0R4VHk0LUw0TV9LdkljNHdBLXJua21NQkl2b3l4UkVkMGN1YjBrMmJEeWtMay1jbmxrYWJNbUV0aktCXzU1djR2d2RSQXZORTNwcG92ZUVvVGMtQzQzTTVncEZTRGRtZUFIZWQ0dz09
|
||||||
Connector_AiPerplexity_API_SECRET = DEV_ENC:Z0FBQUFBQm82Mzk2Q1MwZ0dNcUVBcUtuRDJIcTZkMXVvYnpjM3JEMzJiT1NKSHljX282ZDIyZTJYc09VSTdVNXAtOWU2UXp5S193NTk5dHJsWlFjRjhWektFOG1DVGY4ZUhHTXMzS0RPN1lNcF9nSlVWbW5BZ1hkZDVTejl6bVZNRFVvX29xamJidWRFMmtjQmkyRUQ2RUh6UTN1aWNPSUJBPT0=
|
Connector_AiPerplexity_API_SECRET = DEV_ENC:Z0FBQUFBQnB5dkd5ZmdDZ3hrSElrMnQzNFAtel9wX191VjVzN2g1LWZoa0V1YklubEdmMEJDdEZiR1RWeVZrM3V3enBHX3p6WUtTS0kwYkFyVEF0Nm8zX05CelVQcFJUc0lwVW5iNFczc1p1WWJ2WFBmd0lpLUxxWndEeUh0b2hGUHVpN19vb19nMTBnV1A1VmNpWERVX05lQ29VS20wTjZ3PT0=
|
||||||
Connector_AiTavily_API_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpEQTdnUHMwd2pIaXNtMmtCTFREd0pyQXRKb1F5eGtHSnkyOGZiUnlBOFc0b3Vzcndrc3ViRm1nMDJIOEZKYWxqdWNkZGh5N0Z4R0JlQmxXSG5pVnJUR2VYckZhMWNMZ1FNeXJ3enJLVlpiblhOZTNleUg3ZzZyUzRZanFSeDlVMkI=
|
Connector_AiTavily_API_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpEQTdnUHMwd2pIaXNtMmtCTFREd0pyQXRKb1F5eGtHSnkyOGZiUnlBOFc0b3Vzcndrc3ViRm1nMDJIOEZKYWxqdWNkZGh5N0Z4R0JlQmxXSG5pVnJUR2VYckZhMWNMZ1FNeXJ3enJLVlpiblhOZTNleUg3ZzZyUzRZanFSeDlVMkI=
|
||||||
|
Connector_AiPrivateLlm_API_SECRET = DEV_ENC:Z0FBQUFBQnBudkpGRHM5eFdUVmVZU1R1cHBwN1RlMUx4T0NlLTJLUFFVX3J2OElDWFpuZmJHVmp4Z3BNNWMwZUVVZUd2TFhRSjVmVkVlcFlVRWtybXh0ZHloZ01ZcnVvX195YjdlWVdEcjZSWFFTTlNBWUlaTlNoLWhqVFBIb0thVlBiaWhjYjFQOFY=
|
||||||
|
Connector_AiMistral_API_SECRET = DEV_ENC:Z0FBQUFBQnBudkpGeEQxYUIxOHhia0JlQWpWQ2dWQWZzY3l6SWwyUnJoR1hRQWloX2lxb2lGNkc4UnA4U2tWNjJaYzB1d1hvNG9fWUp1N3V4OW9FMGhaWVhjSlVwWEc1X2loVDBSZDEtdHdfcTA5QkcxQTR4OHc4RkRzclJrU2d1RFZpNDJkRDRURlE=
|
||||||
|
|
||||||
# Microsoft Service Configuration
|
|
||||||
Service_MSFT_CLIENT_ID = c7e7112d-61dc-4f3a-8cd3-08cc4cd7504c
|
|
||||||
Service_MSFT_CLIENT_SECRET = DEV_ENC:Z0FBQUFBQm83T29rV1pQelMtc1p1MXR4NTFpa19CTEhHQ0xfNmdPUmZqcWp5UHBMS0hYTGl4c1pPdmhTNTJVWUl5WnlnUUZhV0VTRzVCb0d5YjR1NnZPZk5CZ0dGazNGdUJVbjkxeVdrYlNiVjJUYzF2aVFtQnVxTHFqTTJqZlF0RTFGNmE1OGN1TEk=
|
|
||||||
Service_MSFT_TENANT_ID = common
|
Service_MSFT_TENANT_ID = common
|
||||||
|
|
||||||
# Google Service configuration
|
|
||||||
Service_GOOGLE_CLIENT_ID = 354925410565-aqs2b2qaiqmm73qpjnel6al8eid78uvg.apps.googleusercontent.com
|
|
||||||
Service_GOOGLE_CLIENT_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpETDJhbGVQMHlFQzNPVFI1ZzBMa3pNMGlQUHhaQm10eVl1bFlSeTBybzlTOWE2MURXQ0hkRlo0NlNGbHQxWEl1OVkxQnVKYlhhOXR1cUF4T3k0WDdscktkY1oyYllRTmdDTWpfbUdwWGtSd1JvNlYxeTBJdEtaaS1vYnItcW0yaFM=
|
|
||||||
|
|
||||||
# Google Cloud Speech Services configuration
|
# Google Cloud Speech Services configuration
|
||||||
Connector_GoogleSpeech_API_KEY_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpETk5FWWM3Q0JKMzhIYTlyMkhuNjA4NlF4dk82U2NScHhTVGY3UG83NkhfX3RrcWVtWWcyLXRjU1dTT21zWEl6YWRMMUFndXpsUnJOeHh3QThsNDZKRXROTzdXRUdsT0JZajZJNVlfb0gtMXkwWm9DOERPVnpjU0pyUEZfOGJsUnprT3ltMVVhalUyUm9hMUFtZEtHUnJqOGZ4dEZjZm5SWVVTckVCWnY1UkdVSHVmUlgwbnAyc0xDQW84R3ViSko5OHVCVWZRUVNiaG1pVFB6X3EwS0FPd2dUYjhiSmRjcXh2WEZiXzI4SFZqT21tbDduUWRyVWdFZXpmcVM5ZDR0VWtzZnF5UER6cGwwS2JlLV9CSTZ0Z0IyQ1h0YW9TcmhRTXZEckp4bWhmTkt6UTNYMk4zVkpnbUJmaDIxZnoyR2dWTEYwTUFEV0w2eUdUUGpoZk9XRkt4RVF1Z1NPdUpBeTcyWV9PY1Ffd2s0ZEdVekxGekhoeEl4TmNqaXYtbUJuSVdycFducERWdWtZajZnX011Q2w4eE9VMTBqQ1ZxRmdScWhXY1E3WWhzX1JZcHhxam9FbDVPN3Q1MWtrMUZuTUg3LVFQVHp1T1hpQWNDMzEzekVJWk9ybl91YUVjSkFob1VaMi1ONEtuMnRSOEg1S3QybUMwbVZDejItajBLTjM2Zy1hNzZQMW5LLVVDVGdFWm5BZUxNeEFnUkZzU3dxV0lCUlc0LWo4b05GczVpOGZSV2ZxbFBwUml6OU5tYjdnTks3Y3hrVEZVTHlmc1NPdFh4WE5pWldEZklOQUxBbjBpMTlkX3FFQVJ6c2NSZGdzTThycE92VW82enZKamhiRGFnU25aZGlHZHhZd2lUUmhuTVptNjhoWVlJQkxIOEkzbzJNMjZCZFJyM25tdXBnQ2ZWaHV3b2p6UWJpdk9xUEhBc1dyTlNmeF9wbm5yYUhHV01UZnVXWDFlNzBkdXlWUWhvcmJpSmljbmE3LUpUZEg4VzRwZ2JVSjdYUm1sODViQXVxUzdGTmZFbVpiN2V1YW5XV3U4b2VRWmxldGVGVHZsSldoekhVLU9wZ2V0cGZIYkNqM2pXVGctQVAyUm4xTHhpd1VVLXFhcnVEV21Rby1hbTlqTl84TjVveHdYTExUVkhHQ0ltaTB2WXJnY1NQVE5PbWg3ejgySElYc1JSTlQ3NDlFUWR6STZVUjVqaXFRN200NF9LY1ljQ0R2UldlWUtKY1NQVnJ4QXRyYTBGSWVuenhyM0Z0cWtndTd1eG8xRzY5a2dNZ1hkQm5MV3BHVzA2N1QwUkd6WlRGYTZQOUhnVWQ2S0Y5U0s1dXFNVXh5Q2pLWVUxSUQ2MlR1ak52NmRIZ2hlYTk1SGZGWS1RV3hWVU9rR3d1Rk9MLS11REZXbzhqMHpsSm1HYW1jMUNLT29YOHZsRWNaLTVvOFpmT3l3MHVwaERTT0dNLWFjcGRYZ25qT2szTkVFUnRFR3JWYS1aNXFIRnMyalozTlQzNFF2NXJLVHVPVF9zdTF6ZjlkbzJ4RFc2ZENmNFFxZDZzTzhfMUl0bW96V0lPZkh1dXFYZlEteFBlSG84Si1FNS1TTi1OMkFnX2pOYW8xY3MxMVJnVC02MDUyaXZfMEVHWDQtVlRpcENmV0h3V0dCWEFRS2prQXdNRlQ5dnRFVHU0Q1dNTmh0SlBCaU55bFMydWM1TTFFLW96ODBnV3dNZHFZTWZhRURYSHlrdzF3RlRuWDBoQUhSOUJWemtRM3pxcDJFbGJoaTJ3ZktRTlJxbXltaHBoZXVJVDlxS3cxNWo2c0ZBV0NzaUstRWdsMW1xLXFkanZGYUFiU0tSLXFQa0tkcDFoMV9kak41ZjQ0R214UmtOR1ZBanRuemY3Mmw1SkZ5aDZodGIzT3N2aV85MW9kcld6c0g0ZDgtTWo3b3Y3VjJCRnR2U2tMVm9rUXNVRnVHbzZXVTZ6RmI2RkNmajBfMWVnODVFbnpkT0oyci15czJHU0p1cUowTGZJMzVnd3hIRjQyTVhKOGRkcFRKdVpyQ3Yzd01Jb1lSajFmV0paeEV0cjk1SmpmdWpDVFJMUmMtUFctOGhaTmlKQXNRVlVUNlhJemxudHZCR056SVlBb3NOTEYxRTRLaFlVd2d3TWtxVlB6ZEtQLTkxOGMyY3N0a2pYRFUweDBNaGhja2xSSklPOUZla1dKTWRNbG8tUGdSNEV5cW90OWlOZFlIUExBd3U2b2hyS1owbXVMM3p0Qm41cUtzWUxYNzB1N3JpUTNBSGdsT0NuamNTb1lIbXR4MG1sakNPVkxBUXRLVE1xX0YxWDhOcERIY1lTQVFqS01CaXZKNllFaXlIR0JsM1pKMmV1OUo3TGI1WkRaVnYxUTl1LTM0SU1qN1V1b0RCT0x0VHNLTmNLZnk1S0MxYnBBcm03WnVua0xqaEhGUzhOU253ZkppRzdudXBSVlMxeFVOSWxtZ1o2RVBSQUhEUEFuQ1hxSVZMME4yWUtaU3VyRGo3RkUyRUNjT0pNcE1BdE1ZRzdXVl8ydUtXZjdMdHdEVW4teHUtTi1HSGliLUxud21TX0NtcGVkRFBHNkZ1WTlNczR4OUJfUVluc1BoV09oWS1scUdsNnB5d1U5M1huX3k4QzAyNldtb2hybktYN2xKZ1NTNWFsaWwzV3pCRVhkaGR5eTNlV1d6ZzFfaFZTT0E4UjRpQ3pKdEZxUlJ6UFZXM3laUndyWEk2NlBXLUpoajVhZzVwQXpWVzUtVjVNZFBwdWdQa3AxZC1KdGdqNnhibjN4dmFYb2cxcEVwc1g5R09zRUdINUZtOE5QRjVUU0dpZy1QVl9odnFtVDNuWFZLSURtMXlSMlhRNTBWSVFJbEdOOWpfVWV0SmdRWDdlUXZZWE8xRUxDN1I0aEN6MHYwNzM1cmpJS0ZpMnBYWkxfb3FsbEV1VnlqWGxqdVJ6SHlwSjAzRlMycTBaQ295NXNnZERpUnJQcjhrUUd3bkI4bDVzRmxQblhkaFJPTTdISnVUQmhET3BOMTM4bjVvUEc2VmZhb2lrR1FyTUl2RWNEeGg0U0dsNnV6eU5zOUxiNDY5SXBxR0hBS00wOTgyWTFnWkQyaEtLVUloT3ZxZGh0RWVGRmJzenFsaUtfZENQM0JzdkVVeTdXR3hUSmJST1NBMUI1NkVFWncwNW5JZVVLX1p1RXdqVnFfQWpvQ08yQjZhN1NkTkpTSnUxOVRXZXE0WFEtZWxhZW1NNXYtQ2sya0VGLURmS01lMkctNVY3c2ZhN0ZGRFgwWHlabTFkeS1hcUZ1dDZ3cnpPQ3hha2IzVE11M0pqbklmU0diczBqTFBNZC1QZGp6VzNTSnJVSjJoWkJUQjVORG4tYUJmMEJtSUNUdVpEaGt6OTM3TjFOdVhXUHItZjRtZ25nU3NhZC1sVTVXNTRDTmxZbnlfeHNsdkpuMXhUYnE1MnpVQ0ZOclRWM1M4eHdXTzRXbFRZZVQtTS1iRVdXVWZMSGotcWg3MUxUYTFnSEEtanBCRHlZRUNIdGdpUFhsYjdYUndCZnRITzhMZVJ1dHFoVlVNb0duVjlxd0U4OGRuQVV3MG90R0hiYW5MWkxWVklzbWFRNzBfSUNrdzc5bVdtTXg0dExEYnRCaDI3c1I4TWFwLXZKR0wxSjRZYjZIV3ZqZjNqTWhFT0RGSDVMc1A1UzY2bDBiMGFSUy1fNVRQRzRJWDVydUpqb1ZfSHNVbldVeUN2YlAxSW5WVDdxVzJ1WHpLeUdmb0xWMDNHN05oQzY3YnhvUUdhS2xaOHNidkVvbTZtSHFlblhOYmwyR3NQdVJDRUdxREhWdF9ZcXhwUWxHc2hyLW5vUGhIUVhJNUNhY0hFU0ptVnI0TFVhZDE1TFBBUEstSkRoZWJ5MHJhUmZrR1ZrRlFtRGpxS1pOMmFMQjBsdjluY3FiYUU4eGJVVXlZVEpuNWdHVVhJMGtwaTdZR2NDbXd2eHpOQ09SeTV6N1BaVUpsR1pQVDBZcElJUUt6VnVpQmxSYnE4Y1BCWV9IRWdVV0p3enBGVHItdnBGN3NyNWFBWmkySnByWThsbDliSlExQmp3LVlBaDIyZXp6UnR6cU9rTzJmTDBlSVpON0tiWllMdm1oME1zTFl2S2ZYYllhQlY2VHNZRGtHUDY4U1lIVExLZTU4VzZxSTZrZHl1ZTBDc0g4SjI4WGYyZHV1bm9wQ3R2Z09ld1ZmUkN5alJGeHZKSHl1bWhQVXpNMzdjblpLcUhfSm02Qlh5S1FVN3lIcHl0NnlRPT0=
|
Connector_GoogleSpeech_API_KEY_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpETk5FWWM3Q0JKMzhIYTlyMkhuNjA4NlF4dk82U2NScHhTVGY3UG83NkhfX3RrcWVtWWcyLXRjU1dTT21zWEl6YWRMMUFndXpsUnJOeHh3QThsNDZKRXROTzdXRUdsT0JZajZJNVlfb0gtMXkwWm9DOERPVnpjU0pyUEZfOGJsUnprT3ltMVVhalUyUm9hMUFtZEtHUnJqOGZ4dEZjZm5SWVVTckVCWnY1UkdVSHVmUlgwbnAyc0xDQW84R3ViSko5OHVCVWZRUVNiaG1pVFB6X3EwS0FPd2dUYjhiSmRjcXh2WEZiXzI4SFZqT21tbDduUWRyVWdFZXpmcVM5ZDR0VWtzZnF5UER6cGwwS2JlLV9CSTZ0Z0IyQ1h0YW9TcmhRTXZEckp4bWhmTkt6UTNYMk4zVkpnbUJmaDIxZnoyR2dWTEYwTUFEV0w2eUdUUGpoZk9XRkt4RVF1Z1NPdUpBeTcyWV9PY1Ffd2s0ZEdVekxGekhoeEl4TmNqaXYtbUJuSVdycFducERWdWtZajZnX011Q2w4eE9VMTBqQ1ZxRmdScWhXY1E3WWhzX1JZcHhxam9FbDVPN3Q1MWtrMUZuTUg3LVFQVHp1T1hpQWNDMzEzekVJWk9ybl91YUVjSkFob1VaMi1ONEtuMnRSOEg1S3QybUMwbVZDejItajBLTjM2Zy1hNzZQMW5LLVVDVGdFWm5BZUxNeEFnUkZzU3dxV0lCUlc0LWo4b05GczVpOGZSV2ZxbFBwUml6OU5tYjdnTks3Y3hrVEZVTHlmc1NPdFh4WE5pWldEZklOQUxBbjBpMTlkX3FFQVJ6c2NSZGdzTThycE92VW82enZKamhiRGFnU25aZGlHZHhZd2lUUmhuTVptNjhoWVlJQkxIOEkzbzJNMjZCZFJyM25tdXBnQ2ZWaHV3b2p6UWJpdk9xUEhBc1dyTlNmeF9wbm5yYUhHV01UZnVXWDFlNzBkdXlWUWhvcmJpSmljbmE3LUpUZEg4VzRwZ2JVSjdYUm1sODViQXVxUzdGTmZFbVpiN2V1YW5XV3U4b2VRWmxldGVGVHZsSldoekhVLU9wZ2V0cGZIYkNqM2pXVGctQVAyUm4xTHhpd1VVLXFhcnVEV21Rby1hbTlqTl84TjVveHdYTExUVkhHQ0ltaTB2WXJnY1NQVE5PbWg3ejgySElYc1JSTlQ3NDlFUWR6STZVUjVqaXFRN200NF9LY1ljQ0R2UldlWUtKY1NQVnJ4QXRyYTBGSWVuenhyM0Z0cWtndTd1eG8xRzY5a2dNZ1hkQm5MV3BHVzA2N1QwUkd6WlRGYTZQOUhnVWQ2S0Y5U0s1dXFNVXh5Q2pLWVUxSUQ2MlR1ak52NmRIZ2hlYTk1SGZGWS1RV3hWVU9rR3d1Rk9MLS11REZXbzhqMHpsSm1HYW1jMUNLT29YOHZsRWNaLTVvOFpmT3l3MHVwaERTT0dNLWFjcGRYZ25qT2szTkVFUnRFR3JWYS1aNXFIRnMyalozTlQzNFF2NXJLVHVPVF9zdTF6ZjlkbzJ4RFc2ZENmNFFxZDZzTzhfMUl0bW96V0lPZkh1dXFYZlEteFBlSG84Si1FNS1TTi1OMkFnX2pOYW8xY3MxMVJnVC02MDUyaXZfMEVHWDQtVlRpcENmV0h3V0dCWEFRS2prQXdNRlQ5dnRFVHU0Q1dNTmh0SlBCaU55bFMydWM1TTFFLW96ODBnV3dNZHFZTWZhRURYSHlrdzF3RlRuWDBoQUhSOUJWemtRM3pxcDJFbGJoaTJ3ZktRTlJxbXltaHBoZXVJVDlxS3cxNWo2c0ZBV0NzaUstRWdsMW1xLXFkanZGYUFiU0tSLXFQa0tkcDFoMV9kak41ZjQ0R214UmtOR1ZBanRuemY3Mmw1SkZ5aDZodGIzT3N2aV85MW9kcld6c0g0ZDgtTWo3b3Y3VjJCRnR2U2tMVm9rUXNVRnVHbzZXVTZ6RmI2RkNmajBfMWVnODVFbnpkT0oyci15czJHU0p1cUowTGZJMzVnd3hIRjQyTVhKOGRkcFRKdVpyQ3Yzd01Jb1lSajFmV0paeEV0cjk1SmpmdWpDVFJMUmMtUFctOGhaTmlKQXNRVlVUNlhJemxudHZCR056SVlBb3NOTEYxRTRLaFlVd2d3TWtxVlB6ZEtQLTkxOGMyY3N0a2pYRFUweDBNaGhja2xSSklPOUZla1dKTWRNbG8tUGdSNEV5cW90OWlOZFlIUExBd3U2b2hyS1owbXVMM3p0Qm41cUtzWUxYNzB1N3JpUTNBSGdsT0NuamNTb1lIbXR4MG1sakNPVkxBUXRLVE1xX0YxWDhOcERIY1lTQVFqS01CaXZKNllFaXlIR0JsM1pKMmV1OUo3TGI1WkRaVnYxUTl1LTM0SU1qN1V1b0RCT0x0VHNLTmNLZnk1S0MxYnBBcm03WnVua0xqaEhGUzhOU253ZkppRzdudXBSVlMxeFVOSWxtZ1o2RVBSQUhEUEFuQ1hxSVZMME4yWUtaU3VyRGo3RkUyRUNjT0pNcE1BdE1ZRzdXVl8ydUtXZjdMdHdEVW4teHUtTi1HSGliLUxud21TX0NtcGVkRFBHNkZ1WTlNczR4OUJfUVluc1BoV09oWS1scUdsNnB5d1U5M1huX3k4QzAyNldtb2hybktYN2xKZ1NTNWFsaWwzV3pCRVhkaGR5eTNlV1d6ZzFfaFZTT0E4UjRpQ3pKdEZxUlJ6UFZXM3laUndyWEk2NlBXLUpoajVhZzVwQXpWVzUtVjVNZFBwdWdQa3AxZC1KdGdqNnhibjN4dmFYb2cxcEVwc1g5R09zRUdINUZtOE5QRjVUU0dpZy1QVl9odnFtVDNuWFZLSURtMXlSMlhRNTBWSVFJbEdOOWpfVWV0SmdRWDdlUXZZWE8xRUxDN1I0aEN6MHYwNzM1cmpJS0ZpMnBYWkxfb3FsbEV1VnlqWGxqdVJ6SHlwSjAzRlMycTBaQ295NXNnZERpUnJQcjhrUUd3bkI4bDVzRmxQblhkaFJPTTdISnVUQmhET3BOMTM4bjVvUEc2VmZhb2lrR1FyTUl2RWNEeGg0U0dsNnV6eU5zOUxiNDY5SXBxR0hBS00wOTgyWTFnWkQyaEtLVUloT3ZxZGh0RWVGRmJzenFsaUtfZENQM0JzdkVVeTdXR3hUSmJST1NBMUI1NkVFWncwNW5JZVVLX1p1RXdqVnFfQWpvQ08yQjZhN1NkTkpTSnUxOVRXZXE0WFEtZWxhZW1NNXYtQ2sya0VGLURmS01lMkctNVY3c2ZhN0ZGRFgwWHlabTFkeS1hcUZ1dDZ3cnpPQ3hha2IzVE11M0pqbklmU0diczBqTFBNZC1QZGp6VzNTSnJVSjJoWkJUQjVORG4tYUJmMEJtSUNUdVpEaGt6OTM3TjFOdVhXUHItZjRtZ25nU3NhZC1sVTVXNTRDTmxZbnlfeHNsdkpuMXhUYnE1MnpVQ0ZOclRWM1M4eHdXTzRXbFRZZVQtTS1iRVdXVWZMSGotcWg3MUxUYTFnSEEtanBCRHlZRUNIdGdpUFhsYjdYUndCZnRITzhMZVJ1dHFoVlVNb0duVjlxd0U4OGRuQVV3MG90R0hiYW5MWkxWVklzbWFRNzBfSUNrdzc5bVdtTXg0dExEYnRCaDI3c1I4TWFwLXZKR0wxSjRZYjZIV3ZqZjNqTWhFT0RGSDVMc1A1UzY2bDBiMGFSUy1fNVRQRzRJWDVydUpqb1ZfSHNVbldVeUN2YlAxSW5WVDdxVzJ1WHpLeUdmb0xWMDNHN05oQzY3YnhvUUdhS2xaOHNidkVvbTZtSHFlblhOYmwyR3NQdVJDRUdxREhWdF9ZcXhwUWxHc2hyLW5vUGhIUVhJNUNhY0hFU0ptVnI0TFVhZDE1TFBBUEstSkRoZWJ5MHJhUmZrR1ZrRlFtRGpxS1pOMmFMQjBsdjluY3FiYUU4eGJVVXlZVEpuNWdHVVhJMGtwaTdZR2NDbXd2eHpOQ09SeTV6N1BaVUpsR1pQVDBZcElJUUt6VnVpQmxSYnE4Y1BCWV9IRWdVV0p3enBGVHItdnBGN3NyNWFBWmkySnByWThsbDliSlExQmp3LVlBaDIyZXp6UnR6cU9rTzJmTDBlSVpON0tiWllMdm1oME1zTFl2S2ZYYllhQlY2VHNZRGtHUDY4U1lIVExLZTU4VzZxSTZrZHl1ZTBDc0g4SjI4WGYyZHV1bm9wQ3R2Z09ld1ZmUkN5alJGeHZKSHl1bWhQVXpNMzdjblpLcUhfSm02Qlh5S1FVN3lIcHl0NnlRPT0=
|
||||||
|
|
||||||
# Feature SyncDelta JIRA configuration
|
# Feature SyncDelta JIRA configuration
|
||||||
Feature_SyncDelta_JIRA_DELTA_TOKEN_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpEbm0yRUJ6VUJKbUwyRW5kMnRaNW4wM2YxMkJUTXVXZUdmdVRCaUZIVHU2TTV2RWZLRmUtZkcwZE4yRUNlNDQ0aUJWYjNfdVg5YjV5c2JwMHhoUUYxZWdkeS11bXR0eGxRLWRVaVU3cUVQZWJlNDRtY1lWUDdqeDVFSlpXS0VFX21WajlRS3lHQjc0bS11akkybWV3QUFlR2hNWUNYLUdiRjZuN2dQODdDSExXWG1Dd2ZGclI2aUhlSWhETVZuY3hYdnhkb2c2LU1JTFBvWFpTNmZtMkNVOTZTejJwbDI2eGE0OS1xUlIwQnlCSmFxRFNCeVJNVzlOMDhTR1VUamx4RDRyV3p6Tk9qVHBrWWdySUM3TVRaYjd3N0JHMFhpdzFhZTNDLTFkRVQ2RVE4U19COXRhRWtNc0NVOHRqUS1CRDFpZ19xQmtFLU9YSDU3TXBZQXpVcld3PT0=
|
Feature_SyncDelta_JIRA_DELTA_TOKEN_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpEbm0yRUJ6VUJKbUwyRW5kMnRaNW4wM2YxMkJUTXVXZUdmdVRCaUZIVHU2TTV2RWZLRmUtZkcwZE4yRUNlNDQ0aUJWYjNfdVg5YjV5c2JwMHhoUUYxZWdkeS11bXR0eGxRLWRVaVU3cUVQZWJlNDRtY1lWUDdqeDVFSlpXS0VFX21WajlRS3lHQjc0bS11akkybWV3QUFlR2hNWUNYLUdiRjZuN2dQODdDSExXWG1Dd2ZGclI2aUhlSWhETVZuY3hYdnhkb2c2LU1JTFBvWFpTNmZtMkNVOTZTejJwbDI2eGE0OS1xUlIwQnlCSmFxRFNCeVJNVzlOMDhTR1VUamx4RDRyV3p6Tk9qVHBrWWdySUM3TVRaYjd3N0JHMFhpdzFhZTNDLTFkRVQ2RVE4U19COXRhRWtNc0NVOHRqUS1CRDFpZ19xQmtFLU9YSDU3TXBZQXpVcld3PT0=
|
||||||
|
|
||||||
|
# Teamsbot Browser Bot Service
|
||||||
|
# For local testing: run the bot locally with `npm run dev` in service-teams-browser-bot
|
||||||
|
# The bot will connect back to localhost:8000 via WebSocket
|
||||||
|
TEAMSBOT_BROWSER_BOT_URL = https://cae-poweron-shared.redwater-53d21339.switzerlandnorth.azurecontainerapps.io
|
||||||
|
|
||||||
# Debug Configuration
|
# Debug Configuration
|
||||||
APP_DEBUG_CHAT_WORKFLOW_ENABLED = True
|
APP_DEBUG_CHAT_WORKFLOW_ENABLED = True
|
||||||
APP_DEBUG_CHAT_WORKFLOW_DIR = D:/Athi/Local/Web/poweron/local/debug
|
APP_DEBUG_CHAT_WORKFLOW_DIR = D:/Athi/Local/Web/poweron/local/debug
|
||||||
|
APP_DEBUG_ACCOUNTING_SYNC_ENABLED = True
|
||||||
|
APP_DEBUG_ACCOUNTING_SYNC_DIR = D:/Athi/Local/Web/poweron/local/debug/sync
|
||||||
|
|
||||||
# Manadate Pre-Processing Servers
|
# Manadate Pre-Processing Servers
|
||||||
PREPROCESS_ALTHAUS_CHAT_SECRET = (empty)
|
PREPROCESS_ALTHAUS_CHAT_SECRET = DEV_ENC:Z0FBQUFBQnBudkpGbEphQ3ZUMlFMQ2EwSGpoSE9NNzRJNTJtaGk1N0RGakdIYnVVeVFHZmF5OXB3QTVWLVNaZk9wNkhfQkZWRnVwRGRxem9iRzJIWXdpX1NIN2FwSExfT3c9PQ==
|
||||||
|
|
||||||
|
# Preprocessor API Configuration
|
||||||
|
PP_QUERY_API_KEY=ouho02j0rj2oijroi3rj2oijro23jr0990
|
||||||
|
PP_QUERY_BASE_URL=https://poweron-althaus-preprocess-prod-e3fegaatc7faency.switzerlandnorth-01.azurewebsites.net/api/v1/dataquery/query
|
||||||
|
|
||||||
|
# Azure Communication Services Email Configuration
|
||||||
|
MESSAGING_ACS_CONNECTION_STRING = endpoint=https://mailing-poweron-prod.switzerland.communication.azure.com/;accesskey=4UizRfBKBgMhDgQ92IYINM6dJsO1HIeL6W1DvIX9S0GtaS1PjIXqJQQJ99CAACULyCpHwxUcAAAAAZCSuSCt
|
||||||
|
MESSAGING_ACS_SENDER_EMAIL = DoNotReply@poweron.swiss
|
||||||
|
|
||||||
|
# Zurich WFS Parcels (dynamic map layer). Default: Stadt Zürich OGD. Override for full canton if wfs.zh.ch resolves.
|
||||||
|
# Connector_ZhWfsParcels_WFS_URL = https://wfs.zh.ch/av
|
||||||
|
# Connector_ZhWfsParcels_TYPENAMES = av_li_liegenschaften_a
|
||||||
|
|
||||||
|
|
|
||||||
82
env_int.env
82
env_int.env
|
|
@ -8,26 +8,11 @@ APP_KEY_SYSVAR = CONFIG_KEY
|
||||||
APP_INIT_PASS_ADMIN_SECRET = INT_ENC:Z0FBQUFBQm8xSVRjWm41MWZ4TUZGaVlrX3pWZWNwakJsY3Facm0wLVZDd1VKeTFoZEVZQnItcEdUUnVJS1NXeDBpM2xKbGRsYmxOSmRhc29PZjJSU2txQjdLbUVrTTE1NEJjUXBHbV9NOVJWZUR3QlJkQnJvTEU9
|
APP_INIT_PASS_ADMIN_SECRET = INT_ENC:Z0FBQUFBQm8xSVRjWm41MWZ4TUZGaVlrX3pWZWNwakJsY3Facm0wLVZDd1VKeTFoZEVZQnItcEdUUnVJS1NXeDBpM2xKbGRsYmxOSmRhc29PZjJSU2txQjdLbUVrTTE1NEJjUXBHbV9NOVJWZUR3QlJkQnJvTEU9
|
||||||
APP_INIT_PASS_EVENT_SECRET = INT_ENC:Z0FBQUFBQm8xSVRjdmtrakgxa0djekZVNGtTZV8wM2I5UUpCZllveVBMWXROYk5yS3BiV3JEelJSM09VYTRONHpnY3VtMGxDRk5JTEZSRFhtcDZ0RVRmZ1RicTFhb3c5dVZRQ1o4SmlkLVpPTW5MMTU2eTQ0Vkk9
|
APP_INIT_PASS_EVENT_SECRET = INT_ENC:Z0FBQUFBQm8xSVRjdmtrakgxa0djekZVNGtTZV8wM2I5UUpCZllveVBMWXROYk5yS3BiV3JEelJSM09VYTRONHpnY3VtMGxDRk5JTEZSRFhtcDZ0RVRmZ1RicTFhb3c5dVZRQ1o4SmlkLVpPTW5MMTU2eTQ0Vkk9
|
||||||
|
|
||||||
# PostgreSQL Storage (new)
|
# PostgreSQL DB Host
|
||||||
DB_APP_HOST=gateway-int-server.postgres.database.azure.com
|
DB_HOST=gateway-int-server.postgres.database.azure.com
|
||||||
DB_APP_DATABASE=poweron_app
|
DB_USER=heeshkdlby
|
||||||
DB_APP_USER=heeshkdlby
|
DB_PASSWORD_SECRET = INT_ENC:Z0FBQUFBQm8xSVRjczYzOUtTa21MMGJVTUQ5UmFfdWc3YlhCbWZOeXFaNEE1QzdJV3BLVjhnalBkLVVCMm5BZzdxdlFXQXc2RHYzLWtPSFZkZE1iWG9rQ1NkVWlpRnF5TURVbnl1cm9iYXlSMGYxd1BGYVc0VDA9
|
||||||
DB_APP_PASSWORD_SECRET = INT_ENC:Z0FBQUFBQm8xSVRjb2dka2pnN0tUbW1EU0w1Rk1jNERKQ0Z1U3JkVDhuZWZDM0g5M0kwVDE5VHdubkZna3gtZVAxTnl4MDdrR1c1ZXJ3ejJHYkZvcGUwbHJaajBGOWJob0EzRXVHc0JnZkJyNGhHZTZHOXBxd2c9
|
DB_PORT=5432
|
||||||
DB_APP_PORT=5432
|
|
||||||
|
|
||||||
# PostgreSQL Storage (new)
|
|
||||||
DB_CHAT_HOST=gateway-int-server.postgres.database.azure.com
|
|
||||||
DB_CHAT_DATABASE=poweron_chat
|
|
||||||
DB_CHAT_USER=heeshkdlby
|
|
||||||
DB_CHAT_PASSWORD_SECRET = INT_ENC:Z0FBQUFBQm8xSVRjczYzOUtTa21MMGJVTUQ5UmFfdWc3YlhCbWZOeXFaNEE1QzdJV3BLVjhnalBkLVVCMm5BZzdxdlFXQXc2RHYzLWtPSFZkZE1iWG9rQ1NkVWlpRnF5TURVbnl1cm9iYXlSMGYxd1BGYVc0VDA9
|
|
||||||
DB_CHAT_PORT=5432
|
|
||||||
|
|
||||||
# PostgreSQL Storage (new)
|
|
||||||
DB_MANAGEMENT_HOST=gateway-int-server.postgres.database.azure.com
|
|
||||||
DB_MANAGEMENT_DATABASE=poweron_management
|
|
||||||
DB_MANAGEMENT_USER=heeshkdlby
|
|
||||||
DB_MANAGEMENT_PASSWORD_SECRET = INT_ENC:Z0FBQUFBQm8xSVRjTnJKNlJMNmEwQ0Y5dVNrR3pkZk9SQXVvLTRTNW9lQ1g3TTE5cFhBNTd5UENqWW9qdWd3NWNseWhnUHJveDJyd1Z3X1czS3VuZnAwZHBXYVNQWlZsRy12ME42NndEVlR5X3ZPdFBNNmhLYm89
|
|
||||||
DB_MANAGEMENT_PORT=5432
|
|
||||||
|
|
||||||
# Security Configuration
|
# Security Configuration
|
||||||
APP_JWT_KEY_SECRET = INT_ENC:Z0FBQUFBQm8xSVRjNUctb2RwU25iR3ZnanBOdHZhWUtIajZ1RnZzTEp4aDR0MktWRjNoeVBrY1Npd1R0VE9YVHp3M2w1cXRzbUxNaU82QUJvaDNFeVQyN05KblRWblBvbWtoT0VXbkNBbDQ5OHhwSUFnaDZGRG10Vmgtdm1YUkRsYUhFMzRVZURmSFlDTFIzVWg4MXNueDZyMGc5aVpFdWRxY3dkTExGM093ZTVUZVl5LUhGWnlRPQ==
|
APP_JWT_KEY_SECRET = INT_ENC:Z0FBQUFBQm8xSVRjNUctb2RwU25iR3ZnanBOdHZhWUtIajZ1RnZzTEp4aDR0MktWRjNoeVBrY1Npd1R0VE9YVHp3M2w1cXRzbUxNaU82QUJvaDNFeVQyN05KblRWblBvbWtoT0VXbkNBbDQ5OHhwSUFnaDZGRG10Vmgtdm1YUkRsYUhFMzRVZURmSFlDTFIzVWg4MXNueDZyMGc5aVpFdWRxY3dkTExGM093ZTVUZVl5LUhGWnlRPQ==
|
||||||
|
|
@ -46,34 +31,65 @@ APP_LOGGING_FILE_ENABLED = True
|
||||||
APP_LOGGING_ROTATION_SIZE = 10485760
|
APP_LOGGING_ROTATION_SIZE = 10485760
|
||||||
APP_LOGGING_BACKUP_COUNT = 5
|
APP_LOGGING_BACKUP_COUNT = 5
|
||||||
|
|
||||||
# Service Redirects
|
# OAuth: Auth app (login/JWT) vs Data app (Graph / Google APIs)
|
||||||
Service_MSFT_REDIRECT_URI = https://gateway-int.poweron-center.net/api/msft/auth/callback
|
Service_MSFT_AUTH_CLIENT_ID = c7e7112d-61dc-4f3a-8cd3-08cc4cd7504c
|
||||||
Service_GOOGLE_REDIRECT_URI = https://gateway-int.poweron-center.net/api/google/auth/callback
|
Service_MSFT_AUTH_CLIENT_SECRET = INT_ENC:Z0FBQUFBQm83T29rMDZvcV9qTG5xb1FzUkdqS1llbzRxSEJXbmpONFFtcUtfZXdtZjQybmJSMjBjMEpnRVhiOGRuczZvVFBFdVVTQV80SG9PSnRQTEpLdVViNm5wc2E5aGRLWjZ4TGF1QjVkNmdRSzBpNWNkYXVublFYclVEdEM5TVBBZWVVMW5RVWk=
|
||||||
|
Service_MSFT_AUTH_REDIRECT_URI = https://gateway-int.poweron-center.net/api/msft/auth/login/callback
|
||||||
|
Service_MSFT_DATA_CLIENT_ID = c7e7112d-61dc-4f3a-8cd3-08cc4cd7504c
|
||||||
|
Service_MSFT_DATA_CLIENT_SECRET = INT_ENC:Z0FBQUFBQm83T29rMDZvcV9qTG5xb1FzUkdqS1llbzRxSEJXbmpONFFtcUtfZXdtZjQybmJSMjBjMEpnRVhiOGRuczZvVFBFdVVTQV80SG9PSnRQTEpLdVViNm5wc2E5aGRLWjZ4TGF1QjVkNmdRSzBpNWNkYXVublFYclVEdEM5TVBBZWVVMW5RVWk=
|
||||||
|
Service_MSFT_DATA_REDIRECT_URI = https://gateway-int.poweron-center.net/api/msft/auth/connect/callback
|
||||||
|
|
||||||
|
Service_GOOGLE_AUTH_CLIENT_ID = 354925410565-aqs2b2qaiqmm73qpjnel6al8eid78uvg.apps.googleusercontent.com
|
||||||
|
Service_GOOGLE_AUTH_CLIENT_SECRET = INT_ENC:Z0FBQUFBQm8xSVRjNThGeVRNd3hacThtRnE0bzlDa0JPUWQyaEd6QjlFckdsMGZjRlRfUks2bXV3aDdVRTF3LVRlZVY5WjVzSXV4ZGNnX002RDl3dkNYdGFzZkxVUW01My1wTHRCanVCLUozZEx4TlduQlB5MnpvNTR2SGlvbFl1YkhzTEtsSi1SOEo=
|
||||||
|
Service_GOOGLE_AUTH_REDIRECT_URI = https://gateway-int.poweron-center.net/api/google/auth/login/callback
|
||||||
|
Service_GOOGLE_DATA_CLIENT_ID = 354925410565-aqs2b2qaiqmm73qpjnel6al8eid78uvg.apps.googleusercontent.com
|
||||||
|
Service_GOOGLE_DATA_CLIENT_SECRET = INT_ENC:Z0FBQUFBQm8xSVRjNThGeVRNd3hacThtRnE0bzlDa0JPUWQyaEd6QjlFckdsMGZjRlRfUks2bXV3aDdVRTF3LVRlZVY5WjVzSXV4ZGNnX002RDl3dkNYdGFzZkxVUW01My1wTHRCanVCLUozZEx4TlduQlB5MnpvNTR2SGlvbFl1YkhzTEtsSi1SOEo=
|
||||||
|
Service_GOOGLE_DATA_REDIRECT_URI = https://gateway-int.poweron-center.net/api/google/auth/connect/callback
|
||||||
|
|
||||||
|
# ClickUp OAuth (Verbindungen / automation). Create an app in ClickUp: Settings → Apps → API; set redirect URL to Service_CLICKUP_OAUTH_REDIRECT_URI exactly.
|
||||||
|
Service_CLICKUP_CLIENT_ID = O3FX3H602A30MQN4I4SBNGJLIDBD5SL4
|
||||||
|
Service_CLICKUP_CLIENT_SECRET = INT_ENC:Z0FBQUFBQnB5dkd5SE1uVURMNVE3NkM4cHBKa2R2TjBnLWdpSXI5dHpKWGExZVFiUF95TFNnZ1NwLWFLdmh6eWFZTHVHYTBzU2FGRUpLYkVyM1NvZjZkWDZHN21qUER5ZVNOaGpCc3NrUGd3VnFTclF3OW1nUlVuWXQ1UVhDLVpyb1BwRExOeFpDeVhtbEhDVnd4TVdpbzNBNk5QQWFPdjdza0xBWGxFY1E3WFpCSUlNa1l4RDlBPQ==
|
||||||
|
Service_CLICKUP_OAUTH_REDIRECT_URI = http://localhost:8000/api/clickup/auth/connect/callback
|
||||||
|
|
||||||
|
# Stripe Billing (both end with _SECRET for encryption script)
|
||||||
|
STRIPE_SECRET_KEY_SECRET = INT_ENC:Z0FBQUFBQnB5dkd5ekdBaGNGVUlOQUpncTlzLWlTV0V5OWZzQkpDczhCUGw4U1JpTHZ0d3pfYlFNWElLRlNiNlNsaDRYTGZUTkg2OUFrTW1GZXpOUjBVbmRQWjN6ekhHd2ZSQ195OHlaeWh1TmxrUm10V2R3YmdncmFLbFMzVjdqcWJMSUJPR2xuSEozclNoZG1rZVBTaWg3OFQ1Qzdxb0wyQ2RKazc2dG1aZXBUTXlvbDZqLS1KOVI5M3BGc3NQZkZRbnFpRjIwWmh2ZHlVNlpxZVo2dWNmMjQ5eW02QmtzUT09
|
||||||
|
STRIPE_WEBHOOK_SECRET = whsec_2agCQEbDPSOn2C40EJcwoPCqlvaPLF7M
|
||||||
|
STRIPE_API_VERSION = 2026-01-28.clover
|
||||||
|
STRIPE_AUTOMATIC_TAX_ENABLED = false
|
||||||
|
STRIPE_TAX_RATE_ID_CH_VAT = txr_1TOQd14OUoIL0Osj7A0ZQlr0
|
||||||
|
|
||||||
# AI configuration
|
# AI configuration
|
||||||
Connector_AiOpenai_API_SECRET = INT_ENC:Z0FBQUFBQm8xSVRjSDBNYkptSkQxTUotYVVpZVNZc0dxNGNwSEtkOEE0T3RZWjROTEhSRlRXdlZmQUxxZ0w3Y0xOV2JNV19LNF9yTUZiU1pUNG15U2VDUDdSVlI4VlpnR3JXVFFtcXBaTEZiaUtSclVFd0lCZG1rWVhra1dfWTVQOTBEYUU0MjByYVNEMTFmeXNOcmpUT216MmJKdlVPeW5nPT0=
|
Connector_AiOpenai_API_SECRET = INT_ENC:Z0FBQUFBQnBaSnM4MENkQ2xJVmE5WFZKUkh2SHJFby1YVXN3ZmVxRkptS3ZWRmlwdU93ZEJjSjlMV2NGbU5mS3NCdmFfcmFYTEJNZXFIQ3ozTWE4ZC1pemlQNk9wbjU1d3BPS0ZCTTZfOF8yWmVXMWx0TU1DamlJLVFhSTJXclZsY3hMVWlPcXVqQWtMdER4T252NHZUWEhUOTdIN1VGR3ltazEweXFqQ0lvb0hYWmxQQnpxb0JwcFNhRDNGWXdoRTVJWm9FalZpTUF5b1RqZlRaYnVKYkp0NWR5Vko1WWJ0Wmg2VWJzYXZ0Z3Q4UkpsTldDX2dsekhKMmM4YjRoa2RwemMwYVQwM2cyMFlvaU5mOTVTWGlROU8xY2ZVRXlxZzJqWkxURWlGZGI2STZNb0NpdEtWUnM9
|
||||||
Connector_AiAnthropic_API_SECRET = INT_ENC:Z0FBQUFBQm8xSVRjT1ZlRWVJdVZMT3ljSFJDcFdxRFBRVkZhS204NnN5RDBlQ0tpenhTM0FFVktuWW9mWHNwRWx2dHB0eDBSZ0JFQnZKWlp6c01pVGREWHd1eGpERnU0Q2xhaks1clQ1ZXVsdnd2ZzhpNXNQS1BhY3FjSkdkVEhHalNaRGR4emhpakZncnpDQUVxOHVXQzVUWmtQc0FsYmFwTF9TSG5FOUFtWk5Ick1NcHFvY2s1T1c2WXlRUFFJZnh6TWhuaVpMYmppcDR0QUx0a0R6RXlwbGRYb1R4dzJkUT09
|
Connector_AiAnthropic_API_SECRET = INT_ENC:Z0FBQUFBQm8xSVRjT1ZlRWVJdVZMT3ljSFJDcFdxRFBRVkZhS204NnN5RDBlQ0tpenhTM0FFVktuWW9mWHNwRWx2dHB0eDBSZ0JFQnZKWlp6c01pVGREWHd1eGpERnU0Q2xhaks1clQ1ZXVsdnd2ZzhpNXNQS1BhY3FjSkdkVEhHalNaRGR4emhpakZncnpDQUVxOHVXQzVUWmtQc0FsYmFwTF9TSG5FOUFtWk5Ick1NcHFvY2s1T1c2WXlRUFFJZnh6TWhuaVpMYmppcDR0QUx0a0R6RXlwbGRYb1R4dzJkUT09
|
||||||
Connector_AiPerplexity_API_SECRET = INT_ENC:Z0FBQUFBQm82Mzk2UWZJdUFhSW8yc3RKc0tKRXphd0xWMkZOVlFpSGZ4SGhFWnk0cTF5VjlKQVZjdS1QSWdkS0pUSWw4OFU5MjUxdTVQel9aeWVIZTZ5TXRuVmFkZG0zWEdTOGdHMHpsTzI0TGlWYURKU1Q0VVpKTlhxUk5FTmN6SUJScDZ3ZldIaUJZcWpaQVRiSEpyQm9tRTNDWk9KTnZBPT0=
|
Connector_AiPerplexity_API_SECRET = INT_ENC:Z0FBQUFBQnB5dkd6UkhtU3lhYmZMSlo0bklQZ2s3UTFBSkprZTNwWkg5Q2lVa0wtenhxWXpva21xVDVMRjdKSmhpTmxWS05IUTRoRHdCbktSRVVjcVFnY1RfV0N2S2dyV0dTMlhxQlRFVm41RkFTWVQzQThuVkZwdlNuVC05QlVRVXB6Qjk3akNpYmY1MFR6R1ByMzlIMllRZlRRYVVRN2ZBPT0=
|
||||||
Connector_AiTavily_API_SECRET = INT_ENC:Z0FBQUFBQm8xSVRkdkJMTDY0akhXNzZDWHVYSEt1cDZoOWEzSktneHZEV2JndTNmWlNSMV9KbFNIZmQzeVlrNE5qUEIwcUlBSGM1a0hOZ3J6djIyOVhnZzI3M1dIUkdicl9FVXF3RGktMmlEYmhnaHJfWTdGUkktSXVUSGdQMC1vSEV6VE8zR2F1SVk=
|
Connector_AiTavily_API_SECRET = INT_ENC:Z0FBQUFBQm8xSVRkdkJMTDY0akhXNzZDWHVYSEt1cDZoOWEzSktneHZEV2JndTNmWlNSMV9KbFNIZmQzeVlrNE5qUEIwcUlBSGM1a0hOZ3J6djIyOVhnZzI3M1dIUkdicl9FVXF3RGktMmlEYmhnaHJfWTdGUkktSXVUSGdQMC1vSEV6VE8zR2F1SVk=
|
||||||
|
Connector_AiPrivateLlm_API_SECRET = INT_ENC:Z0FBQUFBQnBudkpGSjZ1NWh0aWc1R3Z4MHNaeS1HamtUbndhcUZFZDlqUDhjSmg5eHFfdlVkU0RsVkJ2UVRaMWs3aWhraG5jSlc0YkxNWHVmR2JoSW5ENFFCdkJBM0VienlKSnhzNnBKbTJOUTFKczRfWlQ3bWpmUkRTT1I1OGNUSTlQdExacGRpeXg=
|
||||||
|
Connector_AiMistral_API_SECRET = INT_ENC:Z0FBQUFBQnBudkpGZTNtZ1E4TWIxSEU1OUlreUpxZkJIR0Vxcm9xRHRUbnBxbTQ1cXlkbnltWkJVdTdMYWZ4c3Fsam42TERWUTVhNzZFMU9xVjdyRGFCYml6bmZsZFd2YmJzemlrSWN6Q3o3X0NXX2xXNUQteTNONHdKYzJ5YVpLLWdhU2JhSTJQZnI=
|
||||||
|
|
||||||
# Microsoft Service Configuration
|
|
||||||
Service_MSFT_CLIENT_ID = c7e7112d-61dc-4f3a-8cd3-08cc4cd7504c
|
|
||||||
Service_MSFT_CLIENT_SECRET = INT_ENC:Z0FBQUFBQm83T29rMDZvcV9qTG5xb1FzUkdqS1llbzRxSEJXbmpONFFtcUtfZXdtZjQybmJSMjBjMEpnRVhiOGRuczZvVFBFdVVTQV80SG9PSnRQTEpLdVViNm5wc2E5aGRLWjZ4TGF1QjVkNmdRSzBpNWNkYXVublFYclVEdEM5TVBBZWVVMW5RVWk=
|
|
||||||
Service_MSFT_TENANT_ID = common
|
Service_MSFT_TENANT_ID = common
|
||||||
|
|
||||||
# Google Service configuration
|
|
||||||
Service_GOOGLE_CLIENT_ID = 354925410565-aqs2b2qaiqmm73qpjnel6al8eid78uvg.apps.googleusercontent.com
|
|
||||||
Service_GOOGLE_CLIENT_SECRET = INT_ENC:Z0FBQUFBQm8xSVRjNThGeVRNd3hacThtRnE0bzlDa0JPUWQyaEd6QjlFckdsMGZjRlRfUks2bXV3aDdVRTF3LVRlZVY5WjVzSXV4ZGNnX002RDl3dkNYdGFzZkxVUW01My1wTHRCanVCLUozZEx4TlduQlB5MnpvNTR2SGlvbFl1YkhzTEtsSi1SOEo=
|
|
||||||
|
|
||||||
# Google Cloud Speech Services configuration
|
# Google Cloud Speech Services configuration
|
||||||
Connector_GoogleSpeech_API_KEY_SECRET = INT_ENC:Z0FBQUFBQm8xSVRkNmVXZ1pWcHcydTF2MXF0ZGJoWHBydF85bTczTktiaEJ3Wk1vMW1mZVhDSG1yd0ZxR2ZuSGJTX0N3MWptWXFJTkNTWjh1SUVVTXI4UDVzcGdLMkU5SHJ2TUpkRlRoRWdnSldtYjNTQkh4UDJHY2xmdTdZQ1ZiMTZZcGZxS3RzaHdjV3dtVkZUcEpJcWx0b2xuQVR6ZmpoVFZPY1hNMTV2SnhDaC1IZEh4UUpLTy1ILXA4RG1zamJTbUJ4X0t2M2NkdzJPbEJxSmFpRzV3WC0wZThoVzlxcmpHZ3ZkLVlVY3REZk1vV19WQ05BOWN6cnJ4MWNYYnNiQ0FQSUVnUlpfM3BhMnlsVlZUOG5wM3pzM1lSN1UzWlZKUXRLczlHbjI1LTFvSUJ4SlVXMy1BNk43bE5Hb0RfTTVlWk9oZnFIaVg0SW5pbm9EcXRTTzU1RFlYY3dTcnpKWWNyNjN5T1BGZ0FmX253cEFncmhvZVRuM05KYzhkOEhFMFJsc2NBSEwzZVZ1R0JMOGxsekVwUE55alZaRXFrdzNWWVNGWXNmbnhKeWhQSFo2VXBTUlRPeHdvdVdncEFuOWgydEtsSUFneUN6cGVaTnBSdjNCdVJseGJFdmlMc203UFhLVlYyTENkaGg2dVN6Z2xwT1ZmTmN5bVZGUkM3ZWcyVkt2ckFUVVd3WFFwYnJjNVRobEh2SkVJbXRwUUpEOFJKQ1NUc0Q4NHNqUFhPSDh5cTV6MEcwSDEwRUJCQ2JiTTJlOE5nd3pMMkJaQ1dVYjMwZVVWWnlETmp2dkZ3aXEtQ29WNkxZTFkzYUkxdTlQUU1OTnhWWU12YU9MVnJQa1d2ZjRtUlhneTNubEMxTmp1eUNPOThSMlB3Y1F0T2tCdFNsNFlKalZPV25yR2QycVBUb096RmZ1V0FTaGsxLV9FWDBmenBIOXpMdGpLcUc0TWRoY2hlMFhYTzlET1ZRekw0ZHNwUVBQdVJBX2h6Q2ZzWVZJWTNybTJiekp3WmhmWF9SUFBXQzlqUjctcVlHWWVMZWVQallzR0JGTVF0WmtnWlg1aTM1bFprNVExZXY5dnNvWF93UjhwbkJ3RzNXaVJ2d2RRU3JJVlBvaVh4eTlBRUtqWkJia3dJQVVBV2Nqdm9FUTRUVW1TaHp2ZUwxT0N2ZndxQ2Nka1RYWXF0LWxIWFE0dTFQcVhncFFPM0hFdUUtYlFnemx3WkF4bjA1aDFULUdrZlVZbEJtRGRCdjJyVkdJSXozd0I0dF9zbWhOeHFqRDA4T1NVaWR5cjBwSVgwbllPU294NjZGTnM1bFhIdGpNQUxFOENWd3FCbGpSRFRmRXotQnU0N2lCVEU5RGF6Qi10S2U2NGdadDlrRjZtVE5oZkw5ZWFjXzhCTmxXQzNFTFgxRXVYY3J3YkxnbnlBSm9PY3h4MlM1NVFQbVNDRW5Ld1dvNWMxSmdoTXJuaE1pT2VFeXYwWXBHZ29MZDVlN2lwUUNIeGNCVVdQVi1rRXdJMWFncUlPTXR0MmZVQ1l0d09mZTdzWGFBWUJMUFd3b0RSOU8zeER2UWpNdzAxS0ZJWnB5S3FJdU9wUDJnTTNwMWw3VFVqVXQ3ZGZnU1RkUktkc0NhUHJ0SGFxZ0lVWDEzYjNtU2JfMGNWM1Y0dHlCTzNESEdENC1jUWF5MVppRzR1QlBNSUJySjFfRi1ENHEwcmJ4S3hQUFpXVHA0TG9DZWdoUlo5WnNSM1lCZm1KbEs2ak1yUUU4Wk9JcVJGUkJwc0NvUkMyTjhoTWxtZmVQeDREZVRKZkhYN2duLVNTeGZzdFdBVnhEandJSXB5QjM0azF0ckI3Tk1wSzFhNGVOUVRrNjU0cG9JQ29pN09xOFkwR1lMTlktaGp4TktxdTVtTnNEcldsV2pEZm5nQWpJc2hxY0hjQnVSWUR5VVdaUXBHWUloTzFZUC1oNzJ4UjZ1dnpLcDJxWEZtQlNIMWkzZ0hXWXdKeC1iLXdZWVJhcU04VFlpMU5pd2ZIdTdCdkVWVFVBdmJuRk16bEFFQTh4alBrcTV2RzliT2hGdTVPOXlRMjFuZktiRTZIamQ1VFVqS0hRTXhxcU1mdkgyQ1NjQmZfcjl4c3NJd0RIeDVMZUFBbHJqdEJxWWl3aWdGUEQxR3ZnMkNGdVB4RUxkZi1xOVlFQXh1NjRfbkFEaEJ5TVZlUGFrWVhSTVRPeGxqNlJDTHNsRWRrei1pYjhnUmZrb3BvWkQ2QXBzYjFHNXZoWU1LSExhLWtlYlJTZlJmYUM5Y1Rhb1pkMVYyWTByM3NTS0VXMG1ybm1BTVN2QXRYaXZqX2dKSkZrajZSS2cyVlNOQnd5Y29zMlVyaWlNbTJEb3FuUFFtbWNTNVpZTktUenFZSl91cVFXZjRkQUZyYmtPczU2S1RKQ19ONGFOTHlwX2hOOEE1UHZEVjhnT0xxRjMxTEE4SHhRbmlmTkZwVXJBdlJDbU5oZS05SzI4QVhEWDZaN2ZiSlFwUGRXSnB5TE9MZV9ia3pYcmZVa1dicG5FMHRXUFZXMWJQVDAwOEdDQzJmZEl0ZDhUOEFpZXZWWXl5Q2xwSmFienNCMldlb2NKb2ZRYV9KbUdHRzNUcjU1VUFhMzk1a2J6dDVuNTl6NTdpM0hGa3k0UWVtbF9pdDVsQVp2cndDLUU5dnNYOF9CLS0ySXhBSFdCSnpqV010bllBb3U0cEZZYVF5R2tSNFM5NlRhdS1fb1NqbDBKMkw0V2N0VEZhNExtQlR3ckZ3cVlCeHVXdXJ6X0s4cEtsaG5rVUxCN2RRbHQxTmcyVFBqYUxyOHJzeFBXVUJaRHpXbUoxdHZzMFBzQk1UTUFvX1pGNFNMNDFvZWdTdEUtMUNKMXNIeVlvQk1CeEdpZVdmN0tsSDVZZHJXSGt5c2o2MHdwSTZIMVBhRzM1eU43Q2FtcVNidExxczNJeUx5U2RuUG5EeHpCTlg2SV9WNk1ET3BRNXFuc0pNWlVvZUYtY21oRGtJSmwxQ09QbHBUV3BuS3B5NE9RVkhfellqZjJUQ0diSV94QlhQWmdaaC1TRWxsMUVWSXB0aE1McFZDZDNwQUVKZ2t5cXRTXzlRZVJwN0pZSnJSV21XMlh0TzFRVEl0c2I4QjBxOGRCYkNxek04a011X1lrb2poQ3h2LUhKTGJiUlhneHp5QWFBcE5nMElkNTVzM3JGOWtUQ19wNVBTaVVHUHFDNFJnNXJaWDNBSkMwbi1WbTdtSnFySkhNQl9ZQjZrR2xDcXhTRExhMmNHcGlyWjR3ZU9SSjRZd1l4ZjVPeHNiYk53SW5SYnZPTzNkd1lnZmFseV9tQ3BxM3lNYVBHT0J0elJnMTByZ3VHemxta0tVQzZZRllmQ2VLZ1ZCNDhUUTc3LWNCZXBMekFwWW1fQkQ1NktzNGFMYUdYTU0xbXprY1FONUNlUHNMY3h2NFJMMmhNa3VNdzF4TVFWQk9odnJUMjFJMVd3Z2N6Sms5aEM2SWlWZFViZ0JWTEpUWWM5NmIzOS1oQmRqdkt1NUUycFlVcUxERUZGbnZqTUxIYnJmMDBHZDEzbnJsWEEzSUo3UmNPUDg1dnRUU1FzcWtjTWZwUG9zM0JTY3RqMDdST2UxcXFTM0d0bGkwdFhnMk5LaUlxNWx3V1pLaVlLUFJXZzBzVl9Ia1V1OHdYUEFWOU50UndycGtCdzM0Q0NQamp2VTNqbFBLaGhsbUk5dUI5MjU5OHVySk1oY0drUWtXUloyVVRvOWJmbUVYRzFVeWNQczh2NXJCeVppRlZiWDNJaDhOSmRmX2lURTNVS3NXQXFZT1QtUmdvMWJoVWYxU3lqUUJhbzEyX3I3TXhwbm9wc1FoQ1ZUTlNBRjMyQTBTY2tzbHZ3RFUtTjVxQ0o1QXRTVks2WENwMGZCRGstNU1jN3FhUFJCQThyaFhhMVRsbnlSRXNGRmt3Yk01X21ldmV3bTItWm1JaGpZQWZROEFtT1d1UUtPQlhYVVFqT2NxLUxQenJHX3JfMEdscDRiMXcyZ1ZmU3NFMzVoelZJaDlvT0ZoRGQ2bmtlM0M5ZHlCd2ZMbnRZRkZUWHVBUEx4czNfTmtMckh5eXZrZFBzOEItOGRYOEhsMzBhZ0xlOWFjZzgteVBsdnpPT1pYdUxnbFNXYnhKaVB6QUxVdUJCOFpvU2x2c1FHZV94MDBOVWJhYkxISkswc0U5UmdPWFJLXzZNYklHTjN1QzRKaldKdEVHb0pOU284N3c2LXZGMGVleEZ5NGZ6OGV1dm1tM0J0aTQ3VFlNOEJrdEh3PT0=
|
Connector_GoogleSpeech_API_KEY_SECRET = INT_ENC:Z0FBQUFBQm8xSVRkNmVXZ1pWcHcydTF2MXF0ZGJoWHBydF85bTczTktiaEJ3Wk1vMW1mZVhDSG1yd0ZxR2ZuSGJTX0N3MWptWXFJTkNTWjh1SUVVTXI4UDVzcGdLMkU5SHJ2TUpkRlRoRWdnSldtYjNTQkh4UDJHY2xmdTdZQ1ZiMTZZcGZxS3RzaHdjV3dtVkZUcEpJcWx0b2xuQVR6ZmpoVFZPY1hNMTV2SnhDaC1IZEh4UUpLTy1ILXA4RG1zamJTbUJ4X0t2M2NkdzJPbEJxSmFpRzV3WC0wZThoVzlxcmpHZ3ZkLVlVY3REZk1vV19WQ05BOWN6cnJ4MWNYYnNiQ0FQSUVnUlpfM3BhMnlsVlZUOG5wM3pzM1lSN1UzWlZKUXRLczlHbjI1LTFvSUJ4SlVXMy1BNk43bE5Hb0RfTTVlWk9oZnFIaVg0SW5pbm9EcXRTTzU1RFlYY3dTcnpKWWNyNjN5T1BGZ0FmX253cEFncmhvZVRuM05KYzhkOEhFMFJsc2NBSEwzZVZ1R0JMOGxsekVwUE55alZaRXFrdzNWWVNGWXNmbnhKeWhQSFo2VXBTUlRPeHdvdVdncEFuOWgydEtsSUFneUN6cGVaTnBSdjNCdVJseGJFdmlMc203UFhLVlYyTENkaGg2dVN6Z2xwT1ZmTmN5bVZGUkM3ZWcyVkt2ckFUVVd3WFFwYnJjNVRobEh2SkVJbXRwUUpEOFJKQ1NUc0Q4NHNqUFhPSDh5cTV6MEcwSDEwRUJCQ2JiTTJlOE5nd3pMMkJaQ1dVYjMwZVVWWnlETmp2dkZ3aXEtQ29WNkxZTFkzYUkxdTlQUU1OTnhWWU12YU9MVnJQa1d2ZjRtUlhneTNubEMxTmp1eUNPOThSMlB3Y1F0T2tCdFNsNFlKalZPV25yR2QycVBUb096RmZ1V0FTaGsxLV9FWDBmenBIOXpMdGpLcUc0TWRoY2hlMFhYTzlET1ZRekw0ZHNwUVBQdVJBX2h6Q2ZzWVZJWTNybTJiekp3WmhmWF9SUFBXQzlqUjctcVlHWWVMZWVQallzR0JGTVF0WmtnWlg1aTM1bFprNVExZXY5dnNvWF93UjhwbkJ3RzNXaVJ2d2RRU3JJVlBvaVh4eTlBRUtqWkJia3dJQVVBV2Nqdm9FUTRUVW1TaHp2ZUwxT0N2ZndxQ2Nka1RYWXF0LWxIWFE0dTFQcVhncFFPM0hFdUUtYlFnemx3WkF4bjA1aDFULUdrZlVZbEJtRGRCdjJyVkdJSXozd0I0dF9zbWhOeHFqRDA4T1NVaWR5cjBwSVgwbllPU294NjZGTnM1bFhIdGpNQUxFOENWd3FCbGpSRFRmRXotQnU0N2lCVEU5RGF6Qi10S2U2NGdadDlrRjZtVE5oZkw5ZWFjXzhCTmxXQzNFTFgxRXVYY3J3YkxnbnlBSm9PY3h4MlM1NVFQbVNDRW5Ld1dvNWMxSmdoTXJuaE1pT2VFeXYwWXBHZ29MZDVlN2lwUUNIeGNCVVdQVi1rRXdJMWFncUlPTXR0MmZVQ1l0d09mZTdzWGFBWUJMUFd3b0RSOU8zeER2UWpNdzAxS0ZJWnB5S3FJdU9wUDJnTTNwMWw3VFVqVXQ3ZGZnU1RkUktkc0NhUHJ0SGFxZ0lVWDEzYjNtU2JfMGNWM1Y0dHlCTzNESEdENC1jUWF5MVppRzR1QlBNSUJySjFfRi1ENHEwcmJ4S3hQUFpXVHA0TG9DZWdoUlo5WnNSM1lCZm1KbEs2ak1yUUU4Wk9JcVJGUkJwc0NvUkMyTjhoTWxtZmVQeDREZVRKZkhYN2duLVNTeGZzdFdBVnhEandJSXB5QjM0azF0ckI3Tk1wSzFhNGVOUVRrNjU0cG9JQ29pN09xOFkwR1lMTlktaGp4TktxdTVtTnNEcldsV2pEZm5nQWpJc2hxY0hjQnVSWUR5VVdaUXBHWUloTzFZUC1oNzJ4UjZ1dnpLcDJxWEZtQlNIMWkzZ0hXWXdKeC1iLXdZWVJhcU04VFlpMU5pd2ZIdTdCdkVWVFVBdmJuRk16bEFFQTh4alBrcTV2RzliT2hGdTVPOXlRMjFuZktiRTZIamQ1VFVqS0hRTXhxcU1mdkgyQ1NjQmZfcjl4c3NJd0RIeDVMZUFBbHJqdEJxWWl3aWdGUEQxR3ZnMkNGdVB4RUxkZi1xOVlFQXh1NjRfbkFEaEJ5TVZlUGFrWVhSTVRPeGxqNlJDTHNsRWRrei1pYjhnUmZrb3BvWkQ2QXBzYjFHNXZoWU1LSExhLWtlYlJTZlJmYUM5Y1Rhb1pkMVYyWTByM3NTS0VXMG1ybm1BTVN2QXRYaXZqX2dKSkZrajZSS2cyVlNOQnd5Y29zMlVyaWlNbTJEb3FuUFFtbWNTNVpZTktUenFZSl91cVFXZjRkQUZyYmtPczU2S1RKQ19ONGFOTHlwX2hOOEE1UHZEVjhnT0xxRjMxTEE4SHhRbmlmTkZwVXJBdlJDbU5oZS05SzI4QVhEWDZaN2ZiSlFwUGRXSnB5TE9MZV9ia3pYcmZVa1dicG5FMHRXUFZXMWJQVDAwOEdDQzJmZEl0ZDhUOEFpZXZWWXl5Q2xwSmFienNCMldlb2NKb2ZRYV9KbUdHRzNUcjU1VUFhMzk1a2J6dDVuNTl6NTdpM0hGa3k0UWVtbF9pdDVsQVp2cndDLUU5dnNYOF9CLS0ySXhBSFdCSnpqV010bllBb3U0cEZZYVF5R2tSNFM5NlRhdS1fb1NqbDBKMkw0V2N0VEZhNExtQlR3ckZ3cVlCeHVXdXJ6X0s4cEtsaG5rVUxCN2RRbHQxTmcyVFBqYUxyOHJzeFBXVUJaRHpXbUoxdHZzMFBzQk1UTUFvX1pGNFNMNDFvZWdTdEUtMUNKMXNIeVlvQk1CeEdpZVdmN0tsSDVZZHJXSGt5c2o2MHdwSTZIMVBhRzM1eU43Q2FtcVNidExxczNJeUx5U2RuUG5EeHpCTlg2SV9WNk1ET3BRNXFuc0pNWlVvZUYtY21oRGtJSmwxQ09QbHBUV3BuS3B5NE9RVkhfellqZjJUQ0diSV94QlhQWmdaaC1TRWxsMUVWSXB0aE1McFZDZDNwQUVKZ2t5cXRTXzlRZVJwN0pZSnJSV21XMlh0TzFRVEl0c2I4QjBxOGRCYkNxek04a011X1lrb2poQ3h2LUhKTGJiUlhneHp5QWFBcE5nMElkNTVzM3JGOWtUQ19wNVBTaVVHUHFDNFJnNXJaWDNBSkMwbi1WbTdtSnFySkhNQl9ZQjZrR2xDcXhTRExhMmNHcGlyWjR3ZU9SSjRZd1l4ZjVPeHNiYk53SW5SYnZPTzNkd1lnZmFseV9tQ3BxM3lNYVBHT0J0elJnMTByZ3VHemxta0tVQzZZRllmQ2VLZ1ZCNDhUUTc3LWNCZXBMekFwWW1fQkQ1NktzNGFMYUdYTU0xbXprY1FONUNlUHNMY3h2NFJMMmhNa3VNdzF4TVFWQk9odnJUMjFJMVd3Z2N6Sms5aEM2SWlWZFViZ0JWTEpUWWM5NmIzOS1oQmRqdkt1NUUycFlVcUxERUZGbnZqTUxIYnJmMDBHZDEzbnJsWEEzSUo3UmNPUDg1dnRUU1FzcWtjTWZwUG9zM0JTY3RqMDdST2UxcXFTM0d0bGkwdFhnMk5LaUlxNWx3V1pLaVlLUFJXZzBzVl9Ia1V1OHdYUEFWOU50UndycGtCdzM0Q0NQamp2VTNqbFBLaGhsbUk5dUI5MjU5OHVySk1oY0drUWtXUloyVVRvOWJmbUVYRzFVeWNQczh2NXJCeVppRlZiWDNJaDhOSmRmX2lURTNVS3NXQXFZT1QtUmdvMWJoVWYxU3lqUUJhbzEyX3I3TXhwbm9wc1FoQ1ZUTlNBRjMyQTBTY2tzbHZ3RFUtTjVxQ0o1QXRTVks2WENwMGZCRGstNU1jN3FhUFJCQThyaFhhMVRsbnlSRXNGRmt3Yk01X21ldmV3bTItWm1JaGpZQWZROEFtT1d1UUtPQlhYVVFqT2NxLUxQenJHX3JfMEdscDRiMXcyZ1ZmU3NFMzVoelZJaDlvT0ZoRGQ2bmtlM0M5ZHlCd2ZMbnRZRkZUWHVBUEx4czNfTmtMckh5eXZrZFBzOEItOGRYOEhsMzBhZ0xlOWFjZzgteVBsdnpPT1pYdUxnbFNXYnhKaVB6QUxVdUJCOFpvU2x2c1FHZV94MDBOVWJhYkxISkswc0U5UmdPWFJLXzZNYklHTjN1QzRKaldKdEVHb0pOU284N3c2LXZGMGVleEZ5NGZ6OGV1dm1tM0J0aTQ3VFlNOEJrdEh3PT0=
|
||||||
|
|
||||||
# Feature SyncDelta JIRA configuration
|
# Feature SyncDelta JIRA configuration
|
||||||
Feature_SyncDelta_JIRA_DELTA_TOKEN_SECRET = INT_ENC:Z0FBQUFBQm8xSVRkTUNsWm4wX0p6eXFDZmJ4dFdHNEs1MV9MUzdrb3RzeC1jVWVYZ0REWHRyZkFiaGZLcUQtTXFBZzZkNzRmQ0gxbEhGbUNlVVFfR1JEQTc0aldkZkgyWnBOcjdlUlZxR0tDTEdKRExULXAyUEtsVmNTMkRKU1BJNnFiM0hlMXo4YndMcHlRMExtZDQ3Zm9vNFhMcEZCcHpBPT0=
|
Feature_SyncDelta_JIRA_DELTA_TOKEN_SECRET = INT_ENC:Z0FBQUFBQm8xSVRkTUNsWm4wX0p6eXFDZmJ4dFdHNEs1MV9MUzdrb3RzeC1jVWVYZ0REWHRyZkFiaGZLcUQtTXFBZzZkNzRmQ0gxbEhGbUNlVVFfR1JEQTc0aldkZkgyWnBOcjdlUlZxR0tDTEdKRExULXAyUEtsVmNTMkRKU1BJNnFiM0hlMXo4YndMcHlRMExtZDQ3Zm9vNFhMcEZCcHpBPT0=
|
||||||
|
|
||||||
|
# Teamsbot Browser Bot Service
|
||||||
|
TEAMSBOT_BROWSER_BOT_URL = https://cae-poweron-shared.redwater-53d21339.switzerlandnorth.azurecontainerapps.io
|
||||||
|
|
||||||
# Debug Configuration
|
# Debug Configuration
|
||||||
APP_DEBUG_CHAT_WORKFLOW_ENABLED = FALSE
|
APP_DEBUG_CHAT_WORKFLOW_ENABLED = FALSE
|
||||||
APP_DEBUG_CHAT_WORKFLOW_DIR = ./test-chat
|
APP_DEBUG_CHAT_WORKFLOW_DIR = ./test-chat
|
||||||
|
APP_DEBUG_ACCOUNTING_SYNC_ENABLED = FALSE
|
||||||
|
APP_DEBUG_ACCOUNTING_SYNC_DIR = ./debug/sync
|
||||||
|
|
||||||
# Manadate Pre-Processing Servers
|
# Manadate Pre-Processing Servers
|
||||||
PREPROCESS_ALTHAUS_CHAT_SECRET = (empty)
|
PREPROCESS_ALTHAUS_CHAT_SECRET = INT_ENC:Z0FBQUFBQnBaSnM4UkNBelhvckxCQUVjZm94N3BZUDcxaEMyckE2dm1lRVhqODhrWU1SUjNXZ3dQZlVJOWhveXFkZXpobW5xT0NneGZ2SkNUblFmYXd0WTBYNTl3UmRnSWc9PQ==
|
||||||
|
|
||||||
|
# Preprocessor API Configuration
|
||||||
|
PP_QUERY_API_KEY=ouho02j0rj2oijroi3rj2oijro23jr0990
|
||||||
|
PP_QUERY_BASE_URL=https://poweron-althaus-preprocess-prod-e3fegaatc7faency.switzerlandnorth-01.azurewebsites.net/api/v1/dataquery/query
|
||||||
|
|
||||||
|
# Azure Communication Services Email Configuration
|
||||||
|
MESSAGING_ACS_CONNECTION_STRING = endpoint=https://mailing-poweron-prod.switzerland.communication.azure.com/;accesskey=4UizRfBKBgMhDgQ92IYINM6dJsO1HIeL6W1DvIX9S0GtaS1PjIXqJQQJ99CAACULyCpHwxUcAAAAAZCSuSCt
|
||||||
|
MESSAGING_ACS_SENDER_EMAIL = DoNotReply@poweron.swiss
|
||||||
|
|
|
||||||
83
env_prod.env
83
env_prod.env
|
|
@ -8,26 +8,11 @@ APP_INIT_PASS_ADMIN_SECRET = PROD_ENC:Z0FBQUFBQnBDM1Z3UnJRV0sySFlDblpXUlREclREaW
|
||||||
APP_INIT_PASS_EVENT_SECRET = PROD_ENC:Z0FBQUFBQnBDM1Z3QVpIY19DQVZSSzJmc2F0VEZvQlU1cHBhTEgxdHdnR3g4eW01aTEzYTUxc1gxTDR1RVVpSHRXYjV6N1BLZUdCUGlfOW1qdy0xSHFVRkNBcGZvaGlSSkZycXRuUllaWnpyVGRoeFg1dGEyNUk9
|
APP_INIT_PASS_EVENT_SECRET = PROD_ENC:Z0FBQUFBQnBDM1Z3QVpIY19DQVZSSzJmc2F0VEZvQlU1cHBhTEgxdHdnR3g4eW01aTEzYTUxc1gxTDR1RVVpSHRXYjV6N1BLZUdCUGlfOW1qdy0xSHFVRkNBcGZvaGlSSkZycXRuUllaWnpyVGRoeFg1dGEyNUk9
|
||||||
APP_API_URL = https://gateway-prod.poweron-center.net
|
APP_API_URL = https://gateway-prod.poweron-center.net
|
||||||
|
|
||||||
# PostgreSQL Storage (new)
|
# PostgreSQL DB Host
|
||||||
DB_APP_HOST=gateway-prod-server.postgres.database.azure.com
|
DB_HOST=gateway-prod-server.postgres.database.azure.com
|
||||||
DB_APP_DATABASE=poweron_app
|
DB_USER=gzxxmcrdhn
|
||||||
DB_APP_USER=gzxxmcrdhn
|
DB_PASSWORD_SECRET = PROD_ENC:Z0FBQUFBQnBDM1Z3Y1JScGxjZG9TdUkwaHRzSHZhRHpNcDV3N1U2TnIwZ21PRG5TWFFfR1k0N3BiRk5WelVadjlnXzVSTDZ6NXFQNFpqbnJ1R3dNVkJocm1zVEgtSk0xaDRiR19zNDBEbVIzSk51ekNlQ0Z3b0U9
|
||||||
DB_APP_PASSWORD_SECRET = PROD_ENC:Z0FBQUFBQnBDM1Z3cm5LQWV1OURQanVyTklVaVhJbDI2Y1Itb29pTWFmR2RYM0pyYUhhRUpWZ29tWWwzSmdQeVhScHlHQWVyY0xUTElIdVBJUjh5Zm9ZMzg1ZERNQXZ6TXlGb2tYOGpDX1gzXzB3UUlCM1ZaYWM9
|
DB_PORT=5432
|
||||||
DB_APP_PORT=5432
|
|
||||||
|
|
||||||
# PostgreSQL Storage (new)
|
|
||||||
DB_CHAT_HOST=gateway-prod-server.postgres.database.azure.com
|
|
||||||
DB_CHAT_DATABASE=poweron_chat
|
|
||||||
DB_CHAT_USER=gzxxmcrdhn
|
|
||||||
DB_CHAT_PASSWORD_SECRET = PROD_ENC:Z0FBQUFBQnBDM1Z3Y1JScGxjZG9TdUkwaHRzSHZhRHpNcDV3N1U2TnIwZ21PRG5TWFFfR1k0N3BiRk5WelVadjlnXzVSTDZ6NXFQNFpqbnJ1R3dNVkJocm1zVEgtSk0xaDRiR19zNDBEbVIzSk51ekNlQ0Z3b0U9
|
|
||||||
DB_CHAT_PORT=5432
|
|
||||||
|
|
||||||
# PostgreSQL Storage (new)
|
|
||||||
DB_MANAGEMENT_HOST=gateway-prod-server.postgres.database.azure.com
|
|
||||||
DB_MANAGEMENT_DATABASE=poweron_management
|
|
||||||
DB_MANAGEMENT_USER=gzxxmcrdhn
|
|
||||||
DB_MANAGEMENT_PASSWORD_SECRET = PROD_ENC:Z0FBQUFBQnBDM1Z3ZWpySThqdlVmWWd5dGxmWE91RVBsenZrQmNhSzVxbktmYzZ1RlM3cXhTMUdXRV9wX1lfLTJXLTFzeUo0R3pWLXlmUWdrZ2x6QkFlZVRXaEF6aUdRbDlzb1FfcWtub0dxSGp3OVVQWGg3enM9
|
|
||||||
DB_MANAGEMENT_PORT=5432
|
|
||||||
|
|
||||||
# Security Configuration
|
# Security Configuration
|
||||||
APP_JWT_KEY_SECRET = PROD_ENC:Z0FBQUFBQnBDM1Z3elhfV0Rnd2pQRjlMdkVwX1FnSmRhSzNZUlV5SVpaWXBNX1hpa2xPZGdMSWpnN2ZINHQxeGZnNHJweU5pZjlyYlY5Qm9zOUZEbl9wUEgtZHZXd1NhR19JSG9kbFU4MnFGQnllbFhRQVphRGQyNHlFVWR5VHQyUUpqN0stUmRuY2QyTi1oalczRHpLTEJqWURjZWs4YjZvT2U5YnFqcXEwdEpxV05fX05QMmtrPQ==
|
APP_JWT_KEY_SECRET = PROD_ENC:Z0FBQUFBQnBDM1Z3elhfV0Rnd2pQRjlMdkVwX1FnSmRhSzNZUlV5SVpaWXBNX1hpa2xPZGdMSWpnN2ZINHQxeGZnNHJweU5pZjlyYlY5Qm9zOUZEbl9wUEgtZHZXd1NhR19JSG9kbFU4MnFGQnllbFhRQVphRGQyNHlFVWR5VHQyUUpqN0stUmRuY2QyTi1oalczRHpLTEJqWURjZWs4YjZvT2U5YnFqcXEwdEpxV05fX05QMmtrPQ==
|
||||||
|
|
@ -46,34 +31,66 @@ APP_LOGGING_FILE_ENABLED = True
|
||||||
APP_LOGGING_ROTATION_SIZE = 10485760
|
APP_LOGGING_ROTATION_SIZE = 10485760
|
||||||
APP_LOGGING_BACKUP_COUNT = 5
|
APP_LOGGING_BACKUP_COUNT = 5
|
||||||
|
|
||||||
# Service Redirects
|
# OAuth: Auth app (login/JWT) vs Data app (Graph / Google APIs)
|
||||||
Service_MSFT_REDIRECT_URI = https://gateway-prod.poweron-center.net/api/msft/auth/callback
|
Service_MSFT_AUTH_CLIENT_ID = c7e7112d-61dc-4f3a-8cd3-08cc4cd7504c
|
||||||
Service_GOOGLE_REDIRECT_URI = https://gateway-prod.poweron-center.net/api/google/auth/callback
|
Service_MSFT_AUTH_CLIENT_SECRET = PROD_ENC:Z0FBQUFBQnBESkk2T25scFU1T1pNd2FENTFRM3kzcEpSXy1HT0trQkR2Wnl3U3RYbExzRy1YUTkxd3lPZE84U2lhX3FZanp5TjhYRGluLXVjU3hjaWRBUnZLbVhtRDItZ3FxNXJ3MUxicUZTXzJWZVNrR0VKN3ZlNEtET1ppOFk0MzNmbkwyRmROUk4=
|
||||||
|
Service_MSFT_AUTH_REDIRECT_URI = https://gateway-prod.poweron-center.net/api/msft/auth/login/callback
|
||||||
|
Service_MSFT_DATA_CLIENT_ID = c7e7112d-61dc-4f3a-8cd3-08cc4cd7504c
|
||||||
|
Service_MSFT_DATA_CLIENT_SECRET = PROD_ENC:Z0FBQUFBQnBESkk2T25scFU1T1pNd2FENTFRM3kzcEpSXy1HT0trQkR2Wnl3U3RYbExzRy1YUTkxd3lPZE84U2lhX3FZanp5TjhYRGluLXVjU3hjaWRBUnZLbVhtRDItZ3FxNXJ3MUxicUZTXzJWZVNrR0VKN3ZlNEtET1ppOFk0MzNmbkwyRmROUk4=
|
||||||
|
Service_MSFT_DATA_REDIRECT_URI = https://gateway-prod.poweron-center.net/api/msft/auth/connect/callback
|
||||||
|
|
||||||
|
Service_GOOGLE_AUTH_CLIENT_ID = 354925410565-aqs2b2qaiqmm73qpjnel6al8eid78uvg.apps.googleusercontent.com
|
||||||
|
Service_GOOGLE_AUTH_CLIENT_SECRET = PROD_ENC:Z0FBQUFBQnBDM1Z3eWFwSEZ4YnRJcjU1OW5kcXZKdkt1Z3gzWDFhVW5Eelh3VnpnNlppcWxweHY5UUQzeDIyVk83cW1XNVE4bllVWnR2MjlSQzFrV1UyUVV6OUt5b3Vqa3QzMUIwNFBqc2FVSXRxTlQ1OHVJZVFibnhBQ2puXzBwSXp5NUZhZjM1d1o=
|
||||||
|
Service_GOOGLE_AUTH_REDIRECT_URI = https://gateway-prod.poweron-center.net/api/google/auth/login/callback
|
||||||
|
Service_GOOGLE_DATA_CLIENT_ID = 354925410565-aqs2b2qaiqmm73qpjnel6al8eid78uvg.apps.googleusercontent.com
|
||||||
|
Service_GOOGLE_DATA_CLIENT_SECRET = PROD_ENC:Z0FBQUFBQnBDM1Z3eWFwSEZ4YnRJcjU1OW5kcXZKdkt1Z3gzWDFhVW5Eelh3VnpnNlppcWxweHY5UUQzeDIyVk83cW1XNVE4bllVWnR2MjlSQzFrV1UyUVV6OUt5b3Vqa3QzMUIwNFBqc2FVSXRxTlQ1OHVJZVFibnhBQ2puXzBwSXp5NUZhZjM1d1o=
|
||||||
|
Service_GOOGLE_DATA_REDIRECT_URI = https://gateway-prod.poweron-center.net/api/google/auth/connect/callback
|
||||||
|
|
||||||
|
# ClickUp OAuth (Verbindungen / automation). Create an app in ClickUp: Settings → Apps → API; set redirect URL to Service_CLICKUP_OAUTH_REDIRECT_URI exactly.
|
||||||
|
Service_CLICKUP_CLIENT_ID = O3FX3H602A30MQN4I4SBNGJLIDBD5SL4
|
||||||
|
Service_CLICKUP_CLIENT_SECRET = PROD_ENC:Z0FBQUFBQnB5dkd6VGw5WDdhdDRsVENSalhSSUV0OFFxbEx0V1l6aktNV0E5Y18xU3JHLUlqMWVJdmxyajAydVZRaDJkZzJOVXhxRV9ROFRZbWxlRjh4c3NtQnRFMmRtZWpzTWVsdngtWldlNXRKTURHQjJCOEt6alMwQlkwOFYyVVJWNURJUGJIZDIxYVlfNnBrMU54M0Q3TVdVbFZqRkJKTUtqa05wUkV4eGZvbXNsVi1nNVdBPQ==
|
||||||
|
Service_CLICKUP_OAUTH_REDIRECT_URI = http://localhost:8000/api/clickup/auth/connect/callback
|
||||||
|
|
||||||
|
# Stripe Billing (both end with _SECRET for encryption script)
|
||||||
|
STRIPE_SECRET_KEY_SECRET = PROD_ENC:Z0FBQUFBQnB5dkd6aVA3R3VRS3VHMUgzUEVjYkR4eUZKWFhPUzFTTVlHNnBvT3FienNQaUlBWVpPLXJyVGpGMWk4LXktMXphX0J6ZTVESkJxdjNNa3ZJbF9wX2ppYzdjYlF0cmdVamlEWWJDSmJYYkJseHctTlh4dnNoQWs4SG5haVl2TTNDdXpuaFpqeDBtNkFCbUxMa0RaWG14dmxyOEdILTNrZ2licmNpbXVkN2lFSWoxZW1BODNpV0ZTQ0VaeXRmR1d4RjExMlVFS3MtQU9zZXZlZE1mTmY3OWctUXJHdz09
|
||||||
|
STRIPE_WEBHOOK_SECRET = PROD_ENC:Z0FBQUFBQnBudkpGNUpTWldsakYydFhFelBrR1lSaWxYT3kyMENOMUljZTJUZHBWcEhhdWVCMzYxZXQ5b3VlTFVRalFiTVdsbGxrdUx0RDFwSEpsOC1sTDJRTEJNQlA3S3ZaQzBtV1h6bWp5VnlMZUgwUlF3cXYxcnljZVE5SWdzLVg3V0syOWRYS08=
|
||||||
|
STRIPE_API_VERSION = 2026-01-28.clover
|
||||||
|
STRIPE_AUTOMATIC_TAX_ENABLED = false
|
||||||
|
STRIPE_TAX_RATE_ID_CH_VAT = txr_1TOQZG8WqlVsabrfFEu49pah
|
||||||
|
|
||||||
|
|
||||||
# AI configuration
|
# AI configuration
|
||||||
Connector_AiOpenai_API_SECRET = PROD_ENC:Z0FBQUFBQnBDM1Z3cUI4bHgyUGlJVzVxc196MDNDdkxRQXFNNWYyM2hvU05mNFdscjdCT0p0cGNsdm9zR3pSb3JIUU1uaHhSclFESldUWjVMNmpjWTYwR1laZDBOVHA0YXBVZkNrbldPTlh5TjBCR0hRd1A1aEdhUnZZS05feUFNM2M5dzFWUjhid1Z6dXRKcTNTTU84c0FQU2loelRHU0pRPT0=
|
Connector_AiOpenai_API_SECRET = PROD_ENC:Z0FBQUFBQnBaSnM4TWJOVm4xVkx6azRlNDdxN3UxLUdwY2hhdGYxRGp4VFJqYXZIcmkxM1ZyOWV2M0Z4MHdFNkVYQ0ROb1d6LUZFUEdvMHhLMEtXYVBCRzM5TlYyY3ROYWtJRk41cDZxd0tYYi00MjVqMTh4QVcyTXl0bmVocEFHbXQwREpwNi1vODdBNmwzazE5bkpNelE2WXpvblIzWlQwbGdEelI2WXFqT1RibXVHcjNWbVhwYzBOM25XTzNmTDAwUjRvYk4yNjIyZHc5c2RSZzREQUFCdUwyb0ZuOXN1dzI2c2FKdXI4NGxEbk92czZWamJXU3ZSbUlLejZjRklRRk4tLV9aVUFZekI2bTU4OHYxNTUybDg3RVo0ZTh6dXNKRW5GNXVackZvcm9laGI0X3R6V3M9
|
||||||
Connector_AiAnthropic_API_SECRET = PROD_ENC:Z0FBQUFBQnBDM1Z3TnhYdlhSLW5RbXJyMHFXX0V0bHhuTDlTaFJsRDl2dTdIUTFtVFAwTE8tY3hLbzNSMnVTLXd3RUZualN3MGNzc1kwOTIxVUN2WW1rYi1TendFRVVBSVNqRFVjckEzNExyTGNaUkJLMmozazUwemI1cnhrcEtZVXJrWkdaVFFramp3MWZ6RmY2aGlRMXVEYjM2M3ZlbmxMdnNCRDM1QWR0Wmd6MWVnS1I1c01nV3hRLXg3d2NTZXVfTi1Wdm16UnRyNGsyRTZ0bG9TQ1g1OFB5Z002bmQ3QT09
|
Connector_AiAnthropic_API_SECRET = PROD_ENC:Z0FBQUFBQnBDM1Z3TnhYdlhSLW5RbXJyMHFXX0V0bHhuTDlTaFJsRDl2dTdIUTFtVFAwTE8tY3hLbzNSMnVTLXd3RUZualN3MGNzc1kwOTIxVUN2WW1rYi1TendFRVVBSVNqRFVjckEzNExyTGNaUkJLMmozazUwemI1cnhrcEtZVXJrWkdaVFFramp3MWZ6RmY2aGlRMXVEYjM2M3ZlbmxMdnNCRDM1QWR0Wmd6MWVnS1I1c01nV3hRLXg3d2NTZXVfTi1Wdm16UnRyNGsyRTZ0bG9TQ1g1OFB5Z002bmQ3QT09
|
||||||
Connector_AiPerplexity_API_SECRET = PROD_ENC:Z0FBQUFBQm82Mzk2Q1FGRkJEUkI4LXlQbHYzT2RkdVJEcmM4WGdZTWpJTEhoeUF1NW5LUVpJdDBYN3k1WFN4a2FQSWJSQmd0U0xJbzZDTmFFN05FcXl0Z3V1OEpsZjYydV94TXVjVjVXRTRYSWdLMkd5XzZIbFV6emRCZHpuOUpQeThadE5xcDNDVGV1RHJrUEN0c1BBYXctZFNWcFRuVXhRPT0=
|
Connector_AiPerplexity_API_SECRET = PROD_ENC:Z0FBQUFBQnB5dkd6NG5CTm9QOFZRV1BIVC0tV2RKTGtCQWFOUXlpRnhEdjN1U2x3VUdDamtIZV9CQzQ5ZmRmcUh3ZUVUa0NxbGhlenVVdWtaYjdpcnhvUlNFLXZfOWh2dWFZai0xUGU5cWpuYmpnRVRWakh0RVNUUTFyX0w5V0NXVWFrQlZuOTd5TkI0eVRoQ0ZBSm9HYUlYamoyY1FCMmlBPT0=
|
||||||
Connector_AiTavily_API_SECRET = PROD_ENC:Z0FBQUFBQnBDM1Z3NmItcDh6V0JpcE5Jc0NlUWZqcmllRHB5eDlNZmVnUlNVenhNTm5xWExzbjJqdE1GZ0hTSUYtb2dvdWNhTnlQNmVWQ2NGVDgwZ0MwMWZBMlNKWEhzdlF3TlZzTXhCZWM4Z1Uwb18tSTRoU1JBVTVkSkJHOTJwX291b3dPaVphVFg=
|
Connector_AiTavily_API_SECRET = PROD_ENC:Z0FBQUFBQnBDM1Z3NmItcDh6V0JpcE5Jc0NlUWZqcmllRHB5eDlNZmVnUlNVenhNTm5xWExzbjJqdE1GZ0hTSUYtb2dvdWNhTnlQNmVWQ2NGVDgwZ0MwMWZBMlNKWEhzdlF3TlZzTXhCZWM4Z1Uwb18tSTRoU1JBVTVkSkJHOTJwX291b3dPaVphVFg=
|
||||||
|
Connector_AiPrivateLlm_API_SECRET = PROD_ENC:Z0FBQUFBQnBudkpGanZ6U3pzZWkwXzVPWGtIQ040XzFrTXc5QWRnazdEeEktaUJ0akJmNnEzbWUzNHczLTJfc2dIdzBDY0FTaXZYcDhxNFdNbTNtbEJTb2VRZ0ZYd05hdlNLR1h6SUFzVml2Z1FLY1BjTl90UWozUGxtak1URnhhZmNDRWFTb0dKVUo=
|
||||||
|
Connector_AiMistral_API_SECRET = PROD_ENC:Z0FBQUFBQnBudkpGc2tQc2lvMk1YZk01Q1dob1U5cnR0dG03WWE3WkpoOWo0SEpvLU9Rc2lCNDExdy1wZExaN3lpT2FEQkxnaHRmWmZUUUZUUUJmblZreGlpaFpOdnFhbzlEd1RsVVJtX216cmhxTm5BcTN2eUZ2T054cDE5bmlEamJ3NGR6MVpFQnA=
|
||||||
|
|
||||||
# Microsoft Service Configuration
|
|
||||||
Service_MSFT_CLIENT_ID = c7e7112d-61dc-4f3a-8cd3-08cc4cd7504c
|
|
||||||
Service_MSFT_CLIENT_SECRET = PROD_ENC:Z0FBQUFBQnBESkk2T25scFU1T1pNd2FENTFRM3kzcEpSXy1HT0trQkR2Wnl3U3RYbExzRy1YUTkxd3lPZE84U2lhX3FZanp5TjhYRGluLXVjU3hjaWRBUnZLbVhtRDItZ3FxNXJ3MUxicUZTXzJWZVNrR0VKN3ZlNEtET1ppOFk0MzNmbkwyRmROUk4=
|
|
||||||
Service_MSFT_TENANT_ID = common
|
Service_MSFT_TENANT_ID = common
|
||||||
|
|
||||||
# Google Service configuration
|
|
||||||
Service_GOOGLE_CLIENT_ID = 354925410565-aqs2b2qaiqmm73qpjnel6al8eid78uvg.apps.googleusercontent.com
|
|
||||||
Service_GOOGLE_CLIENT_SECRET = PROD_ENC:Z0FBQUFBQnBDM1Z3eWFwSEZ4YnRJcjU1OW5kcXZKdkt1Z3gzWDFhVW5Eelh3VnpnNlppcWxweHY5UUQzeDIyVk83cW1XNVE4bllVWnR2MjlSQzFrV1UyUVV6OUt5b3Vqa3QzMUIwNFBqc2FVSXRxTlQ1OHVJZVFibnhBQ2puXzBwSXp5NUZhZjM1d1o=
|
|
||||||
|
|
||||||
# Google Cloud Speech Services configuration
|
# Google Cloud Speech Services configuration
|
||||||
Connector_GoogleSpeech_API_KEY_SECRET = PROD_ENC:Z0FBQUFBQnBDM1Z4NFQxaF9uN3h1cVB6dnZid1c1R1VfNDlSQ1NHMEVDZWtKanpMQ29CLXc1MXBqRm1hQ0YtWVhaejBMY1ZTOEFEVlpWQ3hrYkFza1E2RDNsYkdMMndNR0VGNTMwVDRGdURJY3hyaVFxVjEtSEYwNHJzeWM3WmlpZW9jU2E3NTgycEV2allqQ3dJRTNyRFAzaDJ6dklKeXpNRkJhYjFzUkptN2dpbkNpMklrcGxuZl9vTkt3T0JvNm1YTXd5UlkwZWptUXdWVFpnV2J4X3J2WUhIUlFkSElFVnlqMnlJRnNHTnlpMWs2R1dZc2ROWjNYZG85cndmd1E5cUZnVmZRYnVjTG43dXFmSWd2bGFfVWFWSmtpWkpndWNlSUNwcnFNU2NqZXFaV0xsY3l3SElLRkVHcHZGZERKV1ltcGhTS0dhTko1VTJLYzNoZjRkSGVEX3dTMWVVTmdDczV5cE1JQUdSbUJGUm11eFhTVjJHbkt0SzB4UG1Dc2xmbnp1Y041Y2RTeWRuWGdmQy1sTGx0MGtnM2VJQ3EyLXViRlNhTU9ybzZkR1N1bXE5SXhlZENWRFpWSGlYOWx4SUQ3UlR0ZEVxQkxNakRUVFRiUmFnbklOalphLUZkRFVVaXBRUk5NZW5PaUZydTFmQkNPSTdTVTNZd0plWXllNVFJdmN4MVcyTGlwMGFtVjBzOGRxR1FjbzhfYW5zdTB0ZEZBTTJhakltazh1dktNMUZsOUItdFdTb1pIaUxySllXNkdlY20zUS0wTnpFNTB2SU5acG1VcXhyaHBmME8takw3RDh5T043T2VGOV92TzNya2pWSlpYVjZDdXlZcjM3a0hPTlhkaW9oQmxqQlpGRFYyTTY4WmZmT3k4Tk1tdXRuSGdTUVpNT2NKenhXb05PdXBfSEdhMTNxNjdpNXlKUUI2YUgydFFPX1VvXzVJb0UxWTU2YVNiNDQ0QndZanhMMHR1cGdHWGhvcEg1QXEtSXZJdTdZUE12ZEVVWkF4QmtsQS1GYnY3SFIxSHlsOGVfcEpGS1A4QUVEQWNEOFZYYlljQ3ByTU03YU16Y0UzUnJQZEprSWNjT1ZXVEtDWi03Y3ZzRVdYUTlabXJISEo5THRHVXVuM0xqbzA4bGVlZVpOMk1QMmptb21tV0pTMlVoOXdWVU95UW1iQmttc2w1RG9mMWwxXzg1T2IxYUVmTUJEZkpUdTFDTzZ3RlBFeUFiX01iRTZNWkNaSG45TkFOM2pzbUJRZ2N0VFpoejJUTG1RODY3TzZpSzVkYUQzaEpfY2pSTkRzU0VpanlkdXVQQmJ2WU5peno4QWNLTDVxZTlhSHI3NnNiM0k0Y3JkQ0xaOU05bGtsQl8zQklvaktWSDZ4aVp2MHlYelJuUDJyTU9CZC1OZjJxNFc1dDcwSUlxaVh1LTMyWWFwU0IwUU9kOUFpMWpnOERtLTh1VmJiNGVwcXBMbU5fMjVZc0hFbmxQT2puSFd1ZGpyTkphLU5sVlBZWWxrWEZrWGJQWmVkN19tZFZfZ1l1V3pSWlA0V0ZxM2lrWnl2NU9WeTdCbDROSmhfeENKTFhMVXk1d195S2JMUFJoRXZjcVo4V2g0MTNKRnZhUE1wRkNPM3FZOGdVazJPeW5PSGpuZnFGTTdJMkRnam5rUlV6NFlqODlIelRYaEN5VjdJNnVwbllNODNCTFRHMWlXbmM1VlRxbXB3Wm9LRjVrQUpjYzRNMThUMWwwSVhBMUlyamtPZnE4R0o4bEdHay1zMjR5RDJkZ1lYRHZaNHVHU2otR3ZpN25LZlEySEU0UmdTNzJGVHNWQXMyb0dVMV9WUE13ODhZWUFaakxGOWZieGNXZkNYRnV5djEyWTZLcmdrajRBLU1rS1Z0VVRkOWlDMU9fMGVmYXFhZXJGMUhpNkdmb2hkbzZ1OWV6VlNmVzNISjVYTFh6SjJNdWR5MWZidE8yVEo2dnRrZXhMRXBPczUwTG13OGhNUVpIQm0zQmRKRnJ0Nl8wNW1Ob0dHRDVpU0NWREV3TkY2SjktdVBkMFU1ZXBmSFpHQ3FHNTRZdTJvaExpZVEtLTU4YTVyeFBpNDdEajZtWUc4c1dBeUJqQ3NIY1NLS0FIMUxGZzZxNFNkOG9ORGNHWWJCVnZuNnJVTEtoQi1mRTZyUl81ZWJJMi1KOGdERzBhNVRZeHRYUUlqY2JvMFlaNHhWMU9pWFFiZjdaLUhkaG15TTBPZVlkS2R5UVdENTI4QVFiY1RJV0ZNZnlpVWxfZmlnN1BXbGdrbjFGUkhzYl9qeHBxVVJacUE4bjZETENHVFpSamh0NVpOM2hMYTZjYzBuS3J0a3hhZGxSM1V5UHd2OTU3ZHY0Yy1xWDBkWUk0Ymp0MWVrS3YzSktKODhQZnY3QTZ1Wm1VZkZJbS1jamdreks1ZlhpQjFOUDFiOHJ2Nm9NcmdTdU5LQXV2RkZWZEFNZnVKUjVwcVY3dDdhQnpmRVJ6SmlvVXpDM0ZiYXh5bGE2X04tTE9qZ3BiTnN3TF9ZaFRxSUpjNjB1dXZBcy1TZHRHTjFjSUR3WUl4cE9VNzB5Rkk4U3Z1SVZYTl9sYXlZVk83UnFrMlVmcnBpam9lRUlCY19DdVJwOXl2TVVDV1pMRFZTZk9MY3Z1eXA0MnhGazc5YllQaWtOeTc4NjlOa2lGY05RRzY1cG9nbGpYelc4c3FicWxWRkg0YzRSamFlQ19zOU14YWJreU9pNDREZVJ3a0REMUxGTzF1XzI1bEF3VXVZRjlBeWFiLXJsOXgza3VZem1WckhWSnVNbDBNcldadU8xQ3RwOTl5NGgtVlR0QklCLWl5WkE4V1FlQTBCOVU1RE9sQlRrYUNZOGdfUmEwbEZvUTFGUEFWVmQ4V1FhOU9VNjZqemRpZm1sUDhZQTJ0YVBRbWZldkF5THV4QXpfdUtNZ0tlcGdSRFM3c0lDOTNQbnBxdmxYYWNpTmI3MW9BMlZIdTQ5RldudHpNQWQ5NDNPLVVTLXVVNzdHZXh4UXpZa3dVa2J4dTFDV1RkYjRnWXU2M3lJekRYWGNMcWU5OVh6U2xZWDh6MmpqcnpiOHlnMjA5S3RFQm1NZjNSM21adkVnTUpSYVhkTzNkNnJCTmljY0x1cl9kMkx3UHhySjZEdHREanZERzNEUTFlTkR0NWlBczAtdmFGTjdZNVpTMlkxV2czYW5RN2lqemg4eUViZDV6RjdKNXdFcUlvcVhoNkJ6eVJkR1pua1hnNzQwOEs2TXJYSlpGcW9qRDU2QjBOWFFtdXBJRkRKbmdZUF9ZSmRPVEtvUjVhLTV1NjdXQjRhS0duaEtJb2FrQnNjUTRvdFMxdkdTNk1NYlFHUFhhYTJ1eUN3WHN4UlJ4UjdrZjY0SzFGYWVFN1k0cGJnc1RjNmFUenR4NHljbVhablZSWHZmUVN3cXRHNjhsX1BSZWEzdTJUZFA0S2pTaU9YMnZIQ1ZPcGhWMFJqZkVEMWRMR1h3SnU0Z2FzZ3VGM3puNzdhVjhaQXNIWHFsbjB0TDVYSFdSNV9rdWhUUUhSZHBGYkJIVDB5SDdlMC13QTVnS0g5Qkg5RGNxSGJlelVndUhPcEQ0QkRKMTJTZUM1OXJhVm0zYjU0OVY2dk9MQVBheklIQXpVNW9Yc0ROVjEzaFZTWmVxYlBWMlNlSzladzJ6TmNuMG5FVVZkN1VZN1pfS2ZHa0lQcE80S24wSnQtVlJVV09OVWJ3M09YMkZpV2ktVF9ENHhKU2dfYUQ2aUVyamk0VHJHQmVfVHU4clpUTFoteW5aSWRPV1M0RDRMTms4NGRoYmJfVE82aUl2X3VieVJOdDhBQmRwdzdnRTVBNzZwaW93dUlZb3ZRYUtOeG9ULWxvNVp5a0haSjdkcUhRb3d6UGIxRUpCVkVYX2d6TkRqQVozUWxkNGFoc1FXYVd2YWNkME9Qclo0bjYxMFRWTy1nbnI5NTBJNzRMMDluUXRKYTFqQUN4d0d5aHVlamN3Tkk3NWJXeXR0TW9BeUg5Vnp4Q2RnZUY3b3AtMDlrNmlrSGR0eGRtbUdUd2lFRWg4MklEeWJHN2wwZEpVSXMxNDNOWjRFS0tPdWxhMmFCckhfRENIY184aEFDZXNrRDl2dHQtQW12UnRuQXJjaDJoTUpiYkNWQUtfRG9GMUZoNWM4UnBYZ29RWWs2NHcyUm5kdTF3Vk1GeFpiRUJLaVZ2UGFjbi1jV3lMV0N2ZDl4VERPN295X01NNG56ZjZkRzZoYUtmY1E5NlVXemx2SnVfb19iSXg0R2M3Mjd1a2JRPT0=
|
Connector_GoogleSpeech_API_KEY_SECRET = PROD_ENC:Z0FBQUFBQnBDM1Z4NFQxaF9uN3h1cVB6dnZid1c1R1VfNDlSQ1NHMEVDZWtKanpMQ29CLXc1MXBqRm1hQ0YtWVhaejBMY1ZTOEFEVlpWQ3hrYkFza1E2RDNsYkdMMndNR0VGNTMwVDRGdURJY3hyaVFxVjEtSEYwNHJzeWM3WmlpZW9jU2E3NTgycEV2allqQ3dJRTNyRFAzaDJ6dklKeXpNRkJhYjFzUkptN2dpbkNpMklrcGxuZl9vTkt3T0JvNm1YTXd5UlkwZWptUXdWVFpnV2J4X3J2WUhIUlFkSElFVnlqMnlJRnNHTnlpMWs2R1dZc2ROWjNYZG85cndmd1E5cUZnVmZRYnVjTG43dXFmSWd2bGFfVWFWSmtpWkpndWNlSUNwcnFNU2NqZXFaV0xsY3l3SElLRkVHcHZGZERKV1ltcGhTS0dhTko1VTJLYzNoZjRkSGVEX3dTMWVVTmdDczV5cE1JQUdSbUJGUm11eFhTVjJHbkt0SzB4UG1Dc2xmbnp1Y041Y2RTeWRuWGdmQy1sTGx0MGtnM2VJQ3EyLXViRlNhTU9ybzZkR1N1bXE5SXhlZENWRFpWSGlYOWx4SUQ3UlR0ZEVxQkxNakRUVFRiUmFnbklOalphLUZkRFVVaXBRUk5NZW5PaUZydTFmQkNPSTdTVTNZd0plWXllNVFJdmN4MVcyTGlwMGFtVjBzOGRxR1FjbzhfYW5zdTB0ZEZBTTJhakltazh1dktNMUZsOUItdFdTb1pIaUxySllXNkdlY20zUS0wTnpFNTB2SU5acG1VcXhyaHBmME8takw3RDh5T043T2VGOV92TzNya2pWSlpYVjZDdXlZcjM3a0hPTlhkaW9oQmxqQlpGRFYyTTY4WmZmT3k4Tk1tdXRuSGdTUVpNT2NKenhXb05PdXBfSEdhMTNxNjdpNXlKUUI2YUgydFFPX1VvXzVJb0UxWTU2YVNiNDQ0QndZanhMMHR1cGdHWGhvcEg1QXEtSXZJdTdZUE12ZEVVWkF4QmtsQS1GYnY3SFIxSHlsOGVfcEpGS1A4QUVEQWNEOFZYYlljQ3ByTU03YU16Y0UzUnJQZEprSWNjT1ZXVEtDWi03Y3ZzRVdYUTlabXJISEo5THRHVXVuM0xqbzA4bGVlZVpOMk1QMmptb21tV0pTMlVoOXdWVU95UW1iQmttc2w1RG9mMWwxXzg1T2IxYUVmTUJEZkpUdTFDTzZ3RlBFeUFiX01iRTZNWkNaSG45TkFOM2pzbUJRZ2N0VFpoejJUTG1RODY3TzZpSzVkYUQzaEpfY2pSTkRzU0VpanlkdXVQQmJ2WU5peno4QWNLTDVxZTlhSHI3NnNiM0k0Y3JkQ0xaOU05bGtsQl8zQklvaktWSDZ4aVp2MHlYelJuUDJyTU9CZC1OZjJxNFc1dDcwSUlxaVh1LTMyWWFwU0IwUU9kOUFpMWpnOERtLTh1VmJiNGVwcXBMbU5fMjVZc0hFbmxQT2puSFd1ZGpyTkphLU5sVlBZWWxrWEZrWGJQWmVkN19tZFZfZ1l1V3pSWlA0V0ZxM2lrWnl2NU9WeTdCbDROSmhfeENKTFhMVXk1d195S2JMUFJoRXZjcVo4V2g0MTNKRnZhUE1wRkNPM3FZOGdVazJPeW5PSGpuZnFGTTdJMkRnam5rUlV6NFlqODlIelRYaEN5VjdJNnVwbllNODNCTFRHMWlXbmM1VlRxbXB3Wm9LRjVrQUpjYzRNMThUMWwwSVhBMUlyamtPZnE4R0o4bEdHay1zMjR5RDJkZ1lYRHZaNHVHU2otR3ZpN25LZlEySEU0UmdTNzJGVHNWQXMyb0dVMV9WUE13ODhZWUFaakxGOWZieGNXZkNYRnV5djEyWTZLcmdrajRBLU1rS1Z0VVRkOWlDMU9fMGVmYXFhZXJGMUhpNkdmb2hkbzZ1OWV6VlNmVzNISjVYTFh6SjJNdWR5MWZidE8yVEo2dnRrZXhMRXBPczUwTG13OGhNUVpIQm0zQmRKRnJ0Nl8wNW1Ob0dHRDVpU0NWREV3TkY2SjktdVBkMFU1ZXBmSFpHQ3FHNTRZdTJvaExpZVEtLTU4YTVyeFBpNDdEajZtWUc4c1dBeUJqQ3NIY1NLS0FIMUxGZzZxNFNkOG9ORGNHWWJCVnZuNnJVTEtoQi1mRTZyUl81ZWJJMi1KOGdERzBhNVRZeHRYUUlqY2JvMFlaNHhWMU9pWFFiZjdaLUhkaG15TTBPZVlkS2R5UVdENTI4QVFiY1RJV0ZNZnlpVWxfZmlnN1BXbGdrbjFGUkhzYl9qeHBxVVJacUE4bjZETENHVFpSamh0NVpOM2hMYTZjYzBuS3J0a3hhZGxSM1V5UHd2OTU3ZHY0Yy1xWDBkWUk0Ymp0MWVrS3YzSktKODhQZnY3QTZ1Wm1VZkZJbS1jamdreks1ZlhpQjFOUDFiOHJ2Nm9NcmdTdU5LQXV2RkZWZEFNZnVKUjVwcVY3dDdhQnpmRVJ6SmlvVXpDM0ZiYXh5bGE2X04tTE9qZ3BiTnN3TF9ZaFRxSUpjNjB1dXZBcy1TZHRHTjFjSUR3WUl4cE9VNzB5Rkk4U3Z1SVZYTl9sYXlZVk83UnFrMlVmcnBpam9lRUlCY19DdVJwOXl2TVVDV1pMRFZTZk9MY3Z1eXA0MnhGazc5YllQaWtOeTc4NjlOa2lGY05RRzY1cG9nbGpYelc4c3FicWxWRkg0YzRSamFlQ19zOU14YWJreU9pNDREZVJ3a0REMUxGTzF1XzI1bEF3VXVZRjlBeWFiLXJsOXgza3VZem1WckhWSnVNbDBNcldadU8xQ3RwOTl5NGgtVlR0QklCLWl5WkE4V1FlQTBCOVU1RE9sQlRrYUNZOGdfUmEwbEZvUTFGUEFWVmQ4V1FhOU9VNjZqemRpZm1sUDhZQTJ0YVBRbWZldkF5THV4QXpfdUtNZ0tlcGdSRFM3c0lDOTNQbnBxdmxYYWNpTmI3MW9BMlZIdTQ5RldudHpNQWQ5NDNPLVVTLXVVNzdHZXh4UXpZa3dVa2J4dTFDV1RkYjRnWXU2M3lJekRYWGNMcWU5OVh6U2xZWDh6MmpqcnpiOHlnMjA5S3RFQm1NZjNSM21adkVnTUpSYVhkTzNkNnJCTmljY0x1cl9kMkx3UHhySjZEdHREanZERzNEUTFlTkR0NWlBczAtdmFGTjdZNVpTMlkxV2czYW5RN2lqemg4eUViZDV6RjdKNXdFcUlvcVhoNkJ6eVJkR1pua1hnNzQwOEs2TXJYSlpGcW9qRDU2QjBOWFFtdXBJRkRKbmdZUF9ZSmRPVEtvUjVhLTV1NjdXQjRhS0duaEtJb2FrQnNjUTRvdFMxdkdTNk1NYlFHUFhhYTJ1eUN3WHN4UlJ4UjdrZjY0SzFGYWVFN1k0cGJnc1RjNmFUenR4NHljbVhablZSWHZmUVN3cXRHNjhsX1BSZWEzdTJUZFA0S2pTaU9YMnZIQ1ZPcGhWMFJqZkVEMWRMR1h3SnU0Z2FzZ3VGM3puNzdhVjhaQXNIWHFsbjB0TDVYSFdSNV9rdWhUUUhSZHBGYkJIVDB5SDdlMC13QTVnS0g5Qkg5RGNxSGJlelVndUhPcEQ0QkRKMTJTZUM1OXJhVm0zYjU0OVY2dk9MQVBheklIQXpVNW9Yc0ROVjEzaFZTWmVxYlBWMlNlSzladzJ6TmNuMG5FVVZkN1VZN1pfS2ZHa0lQcE80S24wSnQtVlJVV09OVWJ3M09YMkZpV2ktVF9ENHhKU2dfYUQ2aUVyamk0VHJHQmVfVHU4clpUTFoteW5aSWRPV1M0RDRMTms4NGRoYmJfVE82aUl2X3VieVJOdDhBQmRwdzdnRTVBNzZwaW93dUlZb3ZRYUtOeG9ULWxvNVp5a0haSjdkcUhRb3d6UGIxRUpCVkVYX2d6TkRqQVozUWxkNGFoc1FXYVd2YWNkME9Qclo0bjYxMFRWTy1nbnI5NTBJNzRMMDluUXRKYTFqQUN4d0d5aHVlamN3Tkk3NWJXeXR0TW9BeUg5Vnp4Q2RnZUY3b3AtMDlrNmlrSGR0eGRtbUdUd2lFRWg4MklEeWJHN2wwZEpVSXMxNDNOWjRFS0tPdWxhMmFCckhfRENIY184aEFDZXNrRDl2dHQtQW12UnRuQXJjaDJoTUpiYkNWQUtfRG9GMUZoNWM4UnBYZ29RWWs2NHcyUm5kdTF3Vk1GeFpiRUJLaVZ2UGFjbi1jV3lMV0N2ZDl4VERPN295X01NNG56ZjZkRzZoYUtmY1E5NlVXemx2SnVfb19iSXg0R2M3Mjd1a2JRPT0=
|
||||||
|
|
||||||
# Feature SyncDelta JIRA configuration
|
# Feature SyncDelta JIRA configuration
|
||||||
Feature_SyncDelta_JIRA_DELTA_TOKEN_SECRET = PROD_ENC:Z0FBQUFBQnBDM1Z4d3Z4d2x6N1FhUktMU0RKbkxfY2pTQkRzXzJ6UXVEbDNCaFM3UHMtQVFGYzNmYWs4N0lMM1R2SFJuZTVFVmx6MGVEbXc5U3NOTnY1TWN0ZDNaamlHQWloalM3VldmREJNSHQ1TlVkSVFJMTVhQWVGSVRMTGw4UTBqNGlQZFVuaHp4WUlKemR5UnBXZlh0REJFLXJ4ejR3PT0=
|
Feature_SyncDelta_JIRA_DELTA_TOKEN_SECRET = PROD_ENC:Z0FBQUFBQnBDM1Z4d3Z4d2x6N1FhUktMU0RKbkxfY2pTQkRzXzJ6UXVEbDNCaFM3UHMtQVFGYzNmYWs4N0lMM1R2SFJuZTVFVmx6MGVEbXc5U3NOTnY1TWN0ZDNaamlHQWloalM3VldmREJNSHQ1TlVkSVFJMTVhQWVGSVRMTGw4UTBqNGlQZFVuaHp4WUlKemR5UnBXZlh0REJFLXJ4ejR3PT0=
|
||||||
|
|
||||||
|
# Teamsbot Browser Bot Service
|
||||||
|
TEAMSBOT_BROWSER_BOT_URL = https://cae-poweron-shared.redwater-53d21339.switzerlandnorth.azurecontainerapps.io
|
||||||
|
|
||||||
# Debug Configuration
|
# Debug Configuration
|
||||||
APP_DEBUG_CHAT_WORKFLOW_ENABLED = FALSE
|
APP_DEBUG_CHAT_WORKFLOW_ENABLED = FALSE
|
||||||
APP_DEBUG_CHAT_WORKFLOW_DIR = ./test-chat
|
APP_DEBUG_CHAT_WORKFLOW_DIR = ./test-chat
|
||||||
|
APP_DEBUG_ACCOUNTING_SYNC_ENABLED = FALSE
|
||||||
|
APP_DEBUG_ACCOUNTING_SYNC_DIR = ./debug/sync
|
||||||
|
|
||||||
# Manadate Pre-Processing Servers
|
# Manadate Pre-Processing Servers
|
||||||
PREPROCESS_ALTHAUS_CHAT_SECRET = kj823u90209mj020394jp2msakhfkjashjkf
|
PREPROCESS_ALTHAUS_CHAT_SECRET = PROD_ENC:Z0FBQUFBQnBaSnM4RVRmYW5IelNIbklTUDZIMEoycEN4ZFF0YUJoWWlUTUh2M0dhSXpYRXcwVkRGd1VieDNsYkdCRlpxMUR5Rjk1RDhPRkE5bmVtc2VDMURfLW9QNkxMVHN0M1JhbU9sa3JHWmdDZnlHS3BQRVBGTERVMHhXOVdDOWVqNkhfSUQyOHo=
|
||||||
|
|
||||||
|
# Preprocessor API Configuration
|
||||||
|
PP_QUERY_API_KEY=ouho02j0rj2oijroi3rj2oijro23jr0990
|
||||||
|
PP_QUERY_BASE_URL=https://poweron-althaus-preprocess-prod-e3fegaatc7faency.switzerlandnorth-01.azurewebsites.net/api/v1/dataquery/query
|
||||||
|
|
||||||
|
# Azure Communication Services Email Configuration
|
||||||
|
MESSAGING_ACS_CONNECTION_STRING = endpoint=https://mailing-poweron-prod.switzerland.communication.azure.com/;accesskey=4UizRfBKBgMhDgQ92IYINM6dJsO1HIeL6W1DvIX9S0GtaS1PjIXqJQQJ99CAACULyCpHwxUcAAAAAZCSuSCt
|
||||||
|
MESSAGING_ACS_SENDER_EMAIL = DoNotReply@poweron.swiss
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,5 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
"""
|
"""
|
||||||
Base connector interface for AI connectors.
|
Base connector interface for AI connectors.
|
||||||
All AI connectors should inherit from this class.
|
All AI connectors should inherit from this class.
|
||||||
|
|
@ -9,9 +11,36 @@ IMPORTANT: Model Registration Requirements
|
||||||
- If duplicate displayNames are detected during registration, an error will be raised
|
- If duplicate displayNames are detected during registration, an error will be raised
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import re as _re
|
||||||
|
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
from typing import List, Dict, Any, Optional
|
from typing import List, Dict, Any, Optional, AsyncGenerator, Union
|
||||||
from modules.datamodels.datamodelAi import AiModel
|
from modules.datamodels.datamodelAi import AiModel, AiModelCall, AiModelResponse
|
||||||
|
|
||||||
|
|
||||||
|
_RETRY_AFTER_PATTERN = _re.compile(
|
||||||
|
r"(?:try again in|retry after)\s+(\d+(?:\.\d+)?)\s*s", _re.IGNORECASE
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _parseRetryAfterSeconds(message: str) -> float:
|
||||||
|
"""Extract retry-after seconds from provider error messages like 'Please try again in 6.558s'."""
|
||||||
|
match = _RETRY_AFTER_PATTERN.search(message)
|
||||||
|
return float(match.group(1)) if match else 0.0
|
||||||
|
|
||||||
|
|
||||||
|
class RateLimitExceededException(Exception):
|
||||||
|
"""Raised when a provider's rate limit (TPM / RPM) is exceeded."""
|
||||||
|
def __init__(self, message: str = "Rate limit exceeded", retryAfterSeconds: float = 0.0):
|
||||||
|
super().__init__(message)
|
||||||
|
if retryAfterSeconds <= 0:
|
||||||
|
retryAfterSeconds = _parseRetryAfterSeconds(message)
|
||||||
|
self.retryAfterSeconds = retryAfterSeconds
|
||||||
|
|
||||||
|
|
||||||
|
class ContextLengthExceededException(Exception):
|
||||||
|
"""Raised when the input exceeds a model's context window."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class BaseConnectorAi(ABC):
|
class BaseConnectorAi(ABC):
|
||||||
|
|
@ -100,3 +129,24 @@ class BaseConnectorAi(ABC):
|
||||||
"""Get only available models."""
|
"""Get only available models."""
|
||||||
models = self.getCachedModels()
|
models = self.getCachedModels()
|
||||||
return [model for model in models if model.isAvailable]
|
return [model for model in models if model.isAvailable]
|
||||||
|
|
||||||
|
async def callAiBasicStream(self, modelCall: AiModelCall) -> AsyncGenerator[Union[str, AiModelResponse], None]:
|
||||||
|
"""Stream AI response. Yields str deltas during generation, then final AiModelResponse.
|
||||||
|
|
||||||
|
Default implementation: falls back to non-streaming callAiBasic.
|
||||||
|
Override in connectors that support streaming.
|
||||||
|
"""
|
||||||
|
response = await self.callAiBasic(modelCall)
|
||||||
|
if response.content:
|
||||||
|
yield response.content
|
||||||
|
yield response
|
||||||
|
|
||||||
|
async def callEmbedding(self, modelCall: AiModelCall) -> AiModelResponse:
|
||||||
|
"""Generate embeddings for input texts. Override in connectors that support embeddings.
|
||||||
|
|
||||||
|
Reads texts from modelCall.embeddingInput.
|
||||||
|
Returns AiModelResponse with metadata["embeddings"] containing the vectors.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError(
|
||||||
|
f"{self.__class__.__name__} does not support embeddings"
|
||||||
|
)
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,5 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
"""
|
"""
|
||||||
Dynamic model registry that collects models from all AI connectors.
|
Dynamic model registry that collects models from all AI connectors.
|
||||||
Implements plugin-like architecture for connector discovery.
|
Implements plugin-like architecture for connector discovery.
|
||||||
|
|
@ -6,12 +8,20 @@ Implements plugin-like architecture for connector discovery.
|
||||||
import logging
|
import logging
|
||||||
import importlib
|
import importlib
|
||||||
import os
|
import os
|
||||||
from typing import Dict, List, Optional, Any
|
import time
|
||||||
|
from typing import Dict, List, Optional, Any, Tuple
|
||||||
from modules.datamodels.datamodelAi import AiModel
|
from modules.datamodels.datamodelAi import AiModel
|
||||||
from modules.aicore.aicoreBase import BaseConnectorAi
|
from .aicoreBase import BaseConnectorAi
|
||||||
|
from modules.datamodels.datamodelUam import User
|
||||||
|
from modules.security.rbacHelpers import checkResourceAccess
|
||||||
|
from modules.security.rbac import RbacClass
|
||||||
|
from modules.connectors.connectorDbPostgre import DatabaseConnector
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# TODO TESTING: Override maxTokens for all models during testing
|
||||||
|
# Set to None to disable override, or set to an integer (e.g., 20000) to override all models
|
||||||
|
TESTING_MAX_TOKENS_OVERRIDE: Optional[int] = None # TODO TESTING: Set to None to disable
|
||||||
|
|
||||||
class ModelRegistry:
|
class ModelRegistry:
|
||||||
"""Dynamic registry for AI models from all connectors."""
|
"""Dynamic registry for AI models from all connectors."""
|
||||||
|
|
@ -21,6 +31,10 @@ class ModelRegistry:
|
||||||
self._connectors: Dict[str, BaseConnectorAi] = {}
|
self._connectors: Dict[str, BaseConnectorAi] = {}
|
||||||
self._lastRefresh: Optional[float] = None
|
self._lastRefresh: Optional[float] = None
|
||||||
self._refreshInterval: float = 300.0 # 5 minutes
|
self._refreshInterval: float = 300.0 # 5 minutes
|
||||||
|
self._connectorsInitialized: bool = False
|
||||||
|
self._discoveredConnectorsCache: Optional[List[BaseConnectorAi]] = None # Avoid re-instantiating on every discoverConnectors() call
|
||||||
|
self._getAvailableModelsCache: Dict[Tuple[str, int], Tuple[List[AiModel], float]] = {} # (user_id, rbac_id) -> (models, ts)
|
||||||
|
self._getAvailableModelsCacheTtl: float = 30.0 # seconds
|
||||||
|
|
||||||
def registerConnector(self, connector: BaseConnectorAi):
|
def registerConnector(self, connector: BaseConnectorAi):
|
||||||
"""Register a connector and collect its models."""
|
"""Register a connector and collect its models."""
|
||||||
|
|
@ -44,6 +58,12 @@ class ModelRegistry:
|
||||||
logger.error(errorMsg)
|
logger.error(errorMsg)
|
||||||
raise ValueError(errorMsg)
|
raise ValueError(errorMsg)
|
||||||
|
|
||||||
|
# TODO TESTING: Override maxTokens if testing override is enabled
|
||||||
|
if TESTING_MAX_TOKENS_OVERRIDE is not None and model.maxTokens > TESTING_MAX_TOKENS_OVERRIDE:
|
||||||
|
originalMaxTokens = model.maxTokens
|
||||||
|
model.maxTokens = TESTING_MAX_TOKENS_OVERRIDE
|
||||||
|
logger.debug(f"TESTING: Overrode maxTokens for {model.displayName}: {originalMaxTokens} -> {TESTING_MAX_TOKENS_OVERRIDE}")
|
||||||
|
|
||||||
# Use displayName as the key (must be unique)
|
# Use displayName as the key (must be unique)
|
||||||
self._models[model.displayName] = model
|
self._models[model.displayName] = model
|
||||||
logger.debug(f"Registered model: {model.displayName} (name: {model.name}) from {connectorType}")
|
logger.debug(f"Registered model: {model.displayName} (name: {model.name}) from {connectorType}")
|
||||||
|
|
@ -52,7 +72,10 @@ class ModelRegistry:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
def discoverConnectors(self) -> List[BaseConnectorAi]:
|
def discoverConnectors(self) -> List[BaseConnectorAi]:
|
||||||
"""Auto-discover connectors by scanning aicorePlugin*.py files."""
|
"""Auto-discover connectors by scanning aicorePlugin*.py files. Cached after first call to avoid 4-8 s re-init on every use."""
|
||||||
|
if self._discoveredConnectorsCache is not None:
|
||||||
|
return self._discoveredConnectorsCache
|
||||||
|
|
||||||
connectors = []
|
connectors = []
|
||||||
connectorDir = os.path.dirname(__file__)
|
connectorDir = os.path.dirname(__file__)
|
||||||
|
|
||||||
|
|
@ -80,12 +103,24 @@ class ModelRegistry:
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning(f"Failed to discover connector from {filename}: {e}")
|
logger.warning(f"Failed to discover connector from {filename}: {e}")
|
||||||
|
|
||||||
|
self._discoveredConnectorsCache = connectors
|
||||||
return connectors
|
return connectors
|
||||||
|
|
||||||
|
def ensureConnectorsRegistered(self):
|
||||||
|
"""Register connectors once to avoid per-request discovery."""
|
||||||
|
if self._connectorsInitialized:
|
||||||
|
return
|
||||||
|
discovered = self.discoverConnectors()
|
||||||
|
for connector in discovered:
|
||||||
|
self.registerConnector(connector)
|
||||||
|
self._connectorsInitialized = True
|
||||||
|
|
||||||
def refreshModels(self, force: bool = False):
|
def refreshModels(self, force: bool = False):
|
||||||
"""Refresh models from all registered connectors."""
|
"""Refresh models from all registered connectors."""
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
self.ensureConnectorsRegistered()
|
||||||
|
|
||||||
currentTime = time.time()
|
currentTime = time.time()
|
||||||
|
|
||||||
# Check if refresh is needed
|
# Check if refresh is needed
|
||||||
|
|
@ -112,6 +147,12 @@ class ModelRegistry:
|
||||||
logger.error(errorMsg)
|
logger.error(errorMsg)
|
||||||
raise ValueError(errorMsg)
|
raise ValueError(errorMsg)
|
||||||
|
|
||||||
|
# TODO TESTING: Override maxTokens if testing override is enabled
|
||||||
|
if TESTING_MAX_TOKENS_OVERRIDE is not None and model.maxTokens > TESTING_MAX_TOKENS_OVERRIDE:
|
||||||
|
originalMaxTokens = model.maxTokens
|
||||||
|
model.maxTokens = TESTING_MAX_TOKENS_OVERRIDE
|
||||||
|
logger.debug(f"TESTING: Overrode maxTokens for {model.displayName}: {originalMaxTokens} -> {TESTING_MAX_TOKENS_OVERRIDE}")
|
||||||
|
|
||||||
# Use displayName as the key (must be unique)
|
# Use displayName as the key (must be unique)
|
||||||
self._models[model.displayName] = model
|
self._models[model.displayName] = model
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|
@ -142,11 +183,49 @@ class ModelRegistry:
|
||||||
self.refreshModels()
|
self.refreshModels()
|
||||||
return [model for model in self._models.values() if model.priority == priority]
|
return [model for model in self._models.values() if model.priority == priority]
|
||||||
|
|
||||||
def getAvailableModels(self) -> List[AiModel]:
|
def getAvailableModels(
|
||||||
"""Get only available models."""
|
self,
|
||||||
|
currentUser: Optional[User] = None,
|
||||||
|
rbacInstance: Optional[RbacClass] = None,
|
||||||
|
mandateId: Optional[str] = None,
|
||||||
|
featureInstanceId: Optional[str] = None
|
||||||
|
) -> List[AiModel]:
|
||||||
|
"""Get only available models, optionally filtered by RBAC permissions.
|
||||||
|
Results are cached per (user, rbac) for 30s to avoid repeated filtering on each LLM call.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
currentUser: Optional user object for RBAC filtering
|
||||||
|
rbacInstance: Optional RBAC instance for permission checks
|
||||||
|
mandateId: Optional mandate context for faster RBAC (loads fewer roles)
|
||||||
|
featureInstanceId: Optional feature instance for RBAC context
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of available models (filtered by RBAC if user provided)
|
||||||
|
"""
|
||||||
self.refreshModels()
|
self.refreshModels()
|
||||||
|
cache_key = (currentUser.id if currentUser else "", id(rbacInstance) if rbacInstance else 0)
|
||||||
|
now = time.time()
|
||||||
|
if cache_key in self._getAvailableModelsCache:
|
||||||
|
cached_models, cached_ts = self._getAvailableModelsCache[cache_key]
|
||||||
|
if now - cached_ts < self._getAvailableModelsCacheTtl:
|
||||||
|
logger.debug(f"getAvailableModels: cache hit for user={cache_key[0][:8] if cache_key[0] else 'anon'}...")
|
||||||
|
return cached_models
|
||||||
|
|
||||||
allModels = list(self._models.values())
|
allModels = list(self._models.values())
|
||||||
availableModels = [model for model in allModels if model.isAvailable]
|
availableModels = [model for model in allModels if model.isAvailable]
|
||||||
|
|
||||||
|
# Apply RBAC filtering if user and RBAC instance provided (batch check for performance)
|
||||||
|
if currentUser and rbacInstance:
|
||||||
|
availableModels = self._filterModelsByRbac(
|
||||||
|
availableModels, currentUser, rbacInstance, mandateId, featureInstanceId
|
||||||
|
)
|
||||||
|
|
||||||
|
self._getAvailableModelsCache[cache_key] = (availableModels, now)
|
||||||
|
# Prune expired entries to avoid unbounded growth
|
||||||
|
expired = [k for k, (_, ts) in self._getAvailableModelsCache.items() if now - ts >= self._getAvailableModelsCacheTtl]
|
||||||
|
for k in expired:
|
||||||
|
del self._getAvailableModelsCache[k]
|
||||||
|
|
||||||
unavailableCount = len(allModels) - len(availableModels)
|
unavailableCount = len(allModels) - len(availableModels)
|
||||||
if unavailableCount > 0:
|
if unavailableCount > 0:
|
||||||
unavailableModels = [m.name for m in allModels if not m.isAvailable]
|
unavailableModels = [m.name for m in allModels if not m.isAvailable]
|
||||||
|
|
@ -154,6 +233,66 @@ class ModelRegistry:
|
||||||
logger.debug(f"getAvailableModels: Returning {len(availableModels)} models: {[m.name for m in availableModels]}")
|
logger.debug(f"getAvailableModels: Returning {len(availableModels)} models: {[m.name for m in availableModels]}")
|
||||||
return availableModels
|
return availableModels
|
||||||
|
|
||||||
|
def _filterModelsByRbac(
|
||||||
|
self,
|
||||||
|
models: List[AiModel],
|
||||||
|
currentUser: User,
|
||||||
|
rbacInstance: RbacClass,
|
||||||
|
mandateId: Optional[str] = None,
|
||||||
|
featureInstanceId: Optional[str] = None
|
||||||
|
) -> List[AiModel]:
|
||||||
|
"""Filter models based on RBAC permissions. Uses bulk check for performance."""
|
||||||
|
paths = []
|
||||||
|
model_paths = {} # model -> (connector_path, model_path)
|
||||||
|
for model in models:
|
||||||
|
connector_path = f"ai.model.{model.connectorType}"
|
||||||
|
model_path = f"ai.model.{model.connectorType}.{model.displayName}"
|
||||||
|
paths.extend([connector_path, model_path])
|
||||||
|
model_paths[id(model)] = (connector_path, model_path)
|
||||||
|
# Single bulk RBAC call instead of 2*N per-model calls
|
||||||
|
access = rbacInstance.checkResourceAccessBulk(
|
||||||
|
currentUser, list(dict.fromkeys(paths)), mandateId, featureInstanceId
|
||||||
|
)
|
||||||
|
filteredModels = []
|
||||||
|
for model in models:
|
||||||
|
connector_path, model_path = model_paths[id(model)]
|
||||||
|
if access.get(connector_path, False) or access.get(model_path, False):
|
||||||
|
filteredModels.append(model)
|
||||||
|
else:
|
||||||
|
logger.debug(f"User {currentUser.username} does not have access to model {model.displayName} (connector: {model.connectorType})")
|
||||||
|
return filteredModels
|
||||||
|
|
||||||
|
def getModel(self, displayName: str, currentUser: Optional[User] = None, rbacInstance: Optional[RbacClass] = None) -> Optional[AiModel]:
|
||||||
|
"""Get a specific model by displayName, optionally checking RBAC permissions.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
displayName: Model display name
|
||||||
|
currentUser: Optional user object for RBAC check
|
||||||
|
rbacInstance: Optional RBAC instance for permission check
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Model if found and user has access (or if no user provided), None otherwise
|
||||||
|
"""
|
||||||
|
self.refreshModels()
|
||||||
|
model = self._models.get(displayName)
|
||||||
|
|
||||||
|
if not model:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Check RBAC permission if user provided
|
||||||
|
if currentUser and rbacInstance:
|
||||||
|
connectorResourcePath = f"ai.model.{model.connectorType}"
|
||||||
|
modelResourcePath = f"ai.model.{model.connectorType}.{model.displayName}"
|
||||||
|
|
||||||
|
hasConnectorAccess = checkResourceAccess(rbacInstance, currentUser, connectorResourcePath)
|
||||||
|
hasModelAccess = checkResourceAccess(rbacInstance, currentUser, modelResourcePath)
|
||||||
|
|
||||||
|
if not (hasConnectorAccess or hasModelAccess):
|
||||||
|
logger.warning(f"User {currentUser.username} does not have access to model {displayName}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
return model
|
||||||
|
|
||||||
def getConnectorForModel(self, displayName: str) -> Optional[BaseConnectorAi]:
|
def getConnectorForModel(self, displayName: str) -> Optional[BaseConnectorAi]:
|
||||||
"""Get the connector instance for a specific model by displayName."""
|
"""Get the connector instance for a specific model by displayName."""
|
||||||
model = self.getModel(displayName)
|
model = self.getModel(displayName)
|
||||||
|
|
@ -200,3 +339,17 @@ class ModelRegistry:
|
||||||
|
|
||||||
# Global registry instance
|
# Global registry instance
|
||||||
modelRegistry = ModelRegistry()
|
modelRegistry = ModelRegistry()
|
||||||
|
|
||||||
|
# Eager pre-warm on first import: ensures connectors are ready in this process.
|
||||||
|
# Critical for chatbot performance — avoids 4–8 s latency on first request.
|
||||||
|
# Runs when this module is first imported (lifespan or first chatbot request).
|
||||||
|
def _eager_prewarm() -> None:
|
||||||
|
try:
|
||||||
|
modelRegistry.ensureConnectorsRegistered()
|
||||||
|
modelRegistry.refreshModels(force=True)
|
||||||
|
logger.info("AI connectors and model registry pre-warmed (module load)")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"AI eager pre-warm skipped: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
_eager_prewarm()
|
||||||
|
|
|
||||||
|
|
@ -1,20 +1,47 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
"""
|
"""
|
||||||
Simplified model selection based on model properties and priority-based sorting.
|
Simplified model selection based on model properties and priority-based sorting.
|
||||||
No complex rules needed - just filter by properties and sort by priority!
|
No complex rules needed - just filter by properties and sort by priority!
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
from typing import List, Dict, Any, Optional
|
import time
|
||||||
|
from typing import List, Dict, Any, Optional, Tuple
|
||||||
from modules.datamodels.datamodelAi import AiModel, AiCallOptions, OperationTypeEnum, PriorityEnum, ProcessingModeEnum
|
from modules.datamodels.datamodelAi import AiModel, AiCallOptions, OperationTypeEnum, PriorityEnum, ProcessingModeEnum
|
||||||
|
|
||||||
# Configure logger
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
_COOLDOWN_DURATION = 60.0
|
||||||
|
|
||||||
|
|
||||||
class ModelSelector:
|
class ModelSelector:
|
||||||
"""Simple model selector based on properties and priority-based sorting."""
|
"""Model selector with priority scoring and recent-failure cooldown."""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
logger.info("ModelSelector initialized with simplified approach")
|
self._failureLog: Dict[str, Tuple[float, float]] = {}
|
||||||
|
logger.info("ModelSelector initialized with failure cooldown support")
|
||||||
|
|
||||||
|
def reportFailure(self, modelName: str, cooldownSeconds: float = 0.0):
|
||||||
|
"""Record that a model just failed (rate limit, error, etc.).
|
||||||
|
The model will be deprioritized for *cooldownSeconds* (default: _COOLDOWN_DURATION)."""
|
||||||
|
if cooldownSeconds <= 0:
|
||||||
|
cooldownSeconds = _COOLDOWN_DURATION
|
||||||
|
self._failureLog[modelName] = (time.time(), cooldownSeconds)
|
||||||
|
logger.info(f"ModelSelector: Recorded failure for {modelName}, cooldown {cooldownSeconds:.1f}s")
|
||||||
|
|
||||||
|
def _getCooldownPenalty(self, modelName: str) -> float:
|
||||||
|
"""Return a score penalty (0.0 = no penalty, large negative = recently failed)."""
|
||||||
|
entry = self._failureLog.get(modelName)
|
||||||
|
if entry is None:
|
||||||
|
return 0.0
|
||||||
|
failedAt, cooldown = entry
|
||||||
|
elapsed = time.time() - failedAt
|
||||||
|
if elapsed > cooldown:
|
||||||
|
del self._failureLog[modelName]
|
||||||
|
return 0.0
|
||||||
|
remaining = cooldown - elapsed
|
||||||
|
return -(remaining / cooldown) * 5000.0
|
||||||
|
|
||||||
def selectModel(self,
|
def selectModel(self,
|
||||||
prompt: str,
|
prompt: str,
|
||||||
|
|
@ -70,10 +97,18 @@ class ModelSelector:
|
||||||
promptSize = len(prompt.encode("utf-8"))
|
promptSize = len(prompt.encode("utf-8"))
|
||||||
contextSize = len(context.encode("utf-8"))
|
contextSize = len(context.encode("utf-8"))
|
||||||
totalSize = promptSize + contextSize
|
totalSize = promptSize + contextSize
|
||||||
# Convert bytes to approximate tokens (1 token ≈ 4 bytes)
|
# Convert bytes to approximate tokens
|
||||||
promptTokens = promptSize / 4
|
# Balanced estimate: 1 token ≈ 3 bytes
|
||||||
contextTokens = contextSize / 4
|
# Note: Actual tokenization varies by content type and model
|
||||||
totalTokens = totalSize / 4
|
# - English text: ~4 bytes/token
|
||||||
|
# - German/European text: ~3.5 bytes/token
|
||||||
|
# - Structured data/JSON: ~2.5-3 bytes/token
|
||||||
|
# - Base64/encoded data: ~1.5-2 bytes/token
|
||||||
|
# Using 3 as balanced estimate (previously 2 which overestimated by ~2x)
|
||||||
|
bytesPerToken = 3 # Balanced estimate for mixed content
|
||||||
|
promptTokens = promptSize / bytesPerToken
|
||||||
|
contextTokens = contextSize / bytesPerToken
|
||||||
|
totalTokens = totalSize / bytesPerToken
|
||||||
|
|
||||||
logger.debug(f"Request sizes - Prompt: {promptTokens:.0f} tokens ({promptSize} bytes), Context: {contextTokens:.0f} tokens ({contextSize} bytes), Total: {totalTokens:.0f} tokens ({totalSize} bytes)")
|
logger.debug(f"Request sizes - Prompt: {promptTokens:.0f} tokens ({promptSize} bytes), Context: {contextTokens:.0f} tokens ({contextSize} bytes), Total: {totalTokens:.0f} tokens ({totalSize} bytes)")
|
||||||
|
|
||||||
|
|
@ -90,9 +125,16 @@ class ModelSelector:
|
||||||
logger.debug(f"Models with {options.operationType.value}: {[m.name for m in operationFiltered]}")
|
logger.debug(f"Models with {options.operationType.value}: {[m.name for m in operationFiltered]}")
|
||||||
|
|
||||||
# Step 2: Filter by prompt size (MUST be <= 80% of context size)
|
# Step 2: Filter by prompt size (MUST be <= 80% of context size)
|
||||||
|
# AND by maxInputTokensPerRequest (provider rate limit / TPM)
|
||||||
# Note: contextLength is in tokens, so we need to compare tokens with tokens
|
# Note: contextLength is in tokens, so we need to compare tokens with tokens
|
||||||
promptFiltered = []
|
promptFiltered = []
|
||||||
for model in operationFiltered:
|
for model in operationFiltered:
|
||||||
|
# Check provider rate limit first (maxInputTokensPerRequest)
|
||||||
|
maxRequestTokens = getattr(model, 'maxInputTokensPerRequest', None)
|
||||||
|
if maxRequestTokens and maxRequestTokens > 0 and totalTokens > maxRequestTokens:
|
||||||
|
logger.debug(f"Model {model.name} filtered out: totalTokens={totalTokens:.0f} > maxInputTokensPerRequest={maxRequestTokens} (provider rate limit)")
|
||||||
|
continue
|
||||||
|
|
||||||
if model.contextLength == 0:
|
if model.contextLength == 0:
|
||||||
# No context length limit - always pass
|
# No context length limit - always pass
|
||||||
promptFiltered.append(model)
|
promptFiltered.append(model)
|
||||||
|
|
@ -112,10 +154,14 @@ class ModelSelector:
|
||||||
maxAllowed = model.contextLength * 0.8 / 4 if model.contextLength > 0 else "unlimited"
|
maxAllowed = model.contextLength * 0.8 / 4 if model.contextLength > 0 else "unlimited"
|
||||||
logger.warning(f" - {model.name}: contextLength={model.contextLength} tokens, maxAllowed={maxAllowed} tokens")
|
logger.warning(f" - {model.name}: contextLength={model.contextLength} tokens, maxAllowed={maxAllowed} tokens")
|
||||||
|
|
||||||
# Step 3: Calculate scores for each model
|
# Step 3: Calculate scores for each model (including cooldown penalties)
|
||||||
scoredModels = []
|
scoredModels = []
|
||||||
for model in promptFiltered:
|
for model in promptFiltered:
|
||||||
score = self._calculateModelScore(model, promptSize, contextSize, totalSize, options)
|
score = self._calculateModelScore(model, promptSize, contextSize, totalSize, options)
|
||||||
|
penalty = self._getCooldownPenalty(model.name)
|
||||||
|
if penalty < 0:
|
||||||
|
logger.debug(f"Model {model.name}: base_score={score:.3f}, cooldown_penalty={penalty:.0f}")
|
||||||
|
score += penalty
|
||||||
scoredModels.append((model, score))
|
scoredModels.append((model, score))
|
||||||
logger.debug(f"Model {model.name}: score={score:.3f}")
|
logger.debug(f"Model {model.name}: score={score:.3f}")
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,10 +1,13 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
import json
|
||||||
import logging
|
import logging
|
||||||
import httpx
|
import httpx
|
||||||
import os
|
import os
|
||||||
from typing import Dict, Any, List
|
from typing import Dict, Any, List, AsyncGenerator, Optional, Union
|
||||||
from fastapi import HTTPException
|
from fastapi import HTTPException
|
||||||
from modules.shared.configuration import APP_CONFIG
|
from modules.shared.configuration import APP_CONFIG
|
||||||
from modules.aicore.aicoreBase import BaseConnectorAi
|
from .aicoreBase import BaseConnectorAi, RateLimitExceededException
|
||||||
from modules.datamodels.datamodelAi import AiModel, PriorityEnum, ProcessingModeEnum, OperationTypeEnum, AiModelCall, AiModelResponse, createOperationTypeRatings
|
from modules.datamodels.datamodelAi import AiModel, PriorityEnum, ProcessingModeEnum, OperationTypeEnum, AiModelCall, AiModelResponse, createOperationTypeRatings
|
||||||
|
|
||||||
# Configure logger
|
# Configure logger
|
||||||
|
|
@ -44,9 +47,7 @@ class AiAnthropic(BaseConnectorAi):
|
||||||
return "anthropic"
|
return "anthropic"
|
||||||
|
|
||||||
def getModels(self) -> List[AiModel]:
|
def getModels(self) -> List[AiModel]:
|
||||||
return [] # TODO: DEBUG TO TURN ON AFTER TESTING
|
# Get all available Anthropic models.
|
||||||
|
|
||||||
"""Get all available Anthropic models."""
|
|
||||||
return [
|
return [
|
||||||
AiModel(
|
AiModel(
|
||||||
name="claude-sonnet-4-5-20250929",
|
name="claude-sonnet-4-5-20250929",
|
||||||
|
|
@ -56,22 +57,78 @@ class AiAnthropic(BaseConnectorAi):
|
||||||
temperature=0.2,
|
temperature=0.2,
|
||||||
maxTokens=8192,
|
maxTokens=8192,
|
||||||
contextLength=200000,
|
contextLength=200000,
|
||||||
costPer1kTokensInput=0.015,
|
costPer1kTokensInput=0.003, # $3/M tokens (updated 2026-02)
|
||||||
costPer1kTokensOutput=0.075,
|
costPer1kTokensOutput=0.015, # $15/M tokens (updated 2026-02)
|
||||||
speedRating=6, # Slower due to high-quality processing
|
speedRating=6, # Slower due to high-quality processing
|
||||||
qualityRating=10, # Best quality available
|
qualityRating=10, # Best quality available
|
||||||
# capabilities removed (not used in business logic)
|
|
||||||
functionCall=self.callAiBasic,
|
functionCall=self.callAiBasic,
|
||||||
|
functionCallStream=self.callAiBasicStream,
|
||||||
priority=PriorityEnum.QUALITY,
|
priority=PriorityEnum.QUALITY,
|
||||||
processingMode=ProcessingModeEnum.DETAILED,
|
processingMode=ProcessingModeEnum.DETAILED,
|
||||||
operationTypes=createOperationTypeRatings(
|
operationTypes=createOperationTypeRatings(
|
||||||
(OperationTypeEnum.PLAN, 9),
|
(OperationTypeEnum.PLAN, 9),
|
||||||
(OperationTypeEnum.DATA_ANALYSE, 10),
|
(OperationTypeEnum.DATA_ANALYSE, 9),
|
||||||
(OperationTypeEnum.DATA_GENERATE, 9),
|
(OperationTypeEnum.DATA_GENERATE, 9),
|
||||||
(OperationTypeEnum.DATA_EXTRACT, 8)
|
(OperationTypeEnum.DATA_EXTRACT, 8),
|
||||||
|
(OperationTypeEnum.AGENT, 9),
|
||||||
|
(OperationTypeEnum.DATA_QUERY, 9),
|
||||||
),
|
),
|
||||||
version="claude-sonnet-4-5-20250929",
|
version="claude-sonnet-4-5-20250929",
|
||||||
calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.015 + (bytesReceived / 4 / 1000) * 0.075
|
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.003 + (bytesReceived / 4 / 1000) * 0.015
|
||||||
|
),
|
||||||
|
AiModel(
|
||||||
|
name="claude-haiku-4-5-20251001",
|
||||||
|
displayName="Anthropic Claude Haiku 4.5",
|
||||||
|
connectorType="anthropic",
|
||||||
|
apiUrl="https://api.anthropic.com/v1/messages",
|
||||||
|
temperature=0.2,
|
||||||
|
maxTokens=8192,
|
||||||
|
contextLength=200000,
|
||||||
|
costPer1kTokensInput=0.001, # $1/M tokens (updated 2026-02)
|
||||||
|
costPer1kTokensOutput=0.005, # $5/M tokens (updated 2026-02)
|
||||||
|
speedRating=9, # Very fast, lightweight model
|
||||||
|
qualityRating=8, # Good quality, cost-efficient
|
||||||
|
functionCall=self.callAiBasic,
|
||||||
|
functionCallStream=self.callAiBasicStream,
|
||||||
|
priority=PriorityEnum.SPEED,
|
||||||
|
processingMode=ProcessingModeEnum.BASIC,
|
||||||
|
operationTypes=createOperationTypeRatings(
|
||||||
|
(OperationTypeEnum.PLAN, 8),
|
||||||
|
(OperationTypeEnum.DATA_ANALYSE, 8),
|
||||||
|
(OperationTypeEnum.DATA_GENERATE, 8),
|
||||||
|
(OperationTypeEnum.DATA_EXTRACT, 7),
|
||||||
|
(OperationTypeEnum.AGENT, 7),
|
||||||
|
(OperationTypeEnum.DATA_QUERY, 10),
|
||||||
|
),
|
||||||
|
version="claude-haiku-4-5-20251001",
|
||||||
|
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.001 + (bytesReceived / 4 / 1000) * 0.005
|
||||||
|
),
|
||||||
|
AiModel(
|
||||||
|
name="claude-opus-4-6",
|
||||||
|
displayName="Anthropic Claude Opus 4.6",
|
||||||
|
connectorType="anthropic",
|
||||||
|
apiUrl="https://api.anthropic.com/v1/messages",
|
||||||
|
temperature=0.2,
|
||||||
|
maxTokens=8192,
|
||||||
|
contextLength=200000,
|
||||||
|
costPer1kTokensInput=0.005, # $5/M tokens (updated 2026-02)
|
||||||
|
costPer1kTokensOutput=0.025, # $25/M tokens (updated 2026-02)
|
||||||
|
speedRating=5, # Moderate latency, most capable
|
||||||
|
qualityRating=10, # Top-tier intelligence
|
||||||
|
functionCall=self.callAiBasic,
|
||||||
|
functionCallStream=self.callAiBasicStream,
|
||||||
|
priority=PriorityEnum.QUALITY,
|
||||||
|
processingMode=ProcessingModeEnum.DETAILED,
|
||||||
|
operationTypes=createOperationTypeRatings(
|
||||||
|
(OperationTypeEnum.PLAN, 10),
|
||||||
|
(OperationTypeEnum.DATA_ANALYSE, 8),
|
||||||
|
(OperationTypeEnum.DATA_GENERATE, 10),
|
||||||
|
(OperationTypeEnum.DATA_EXTRACT, 9),
|
||||||
|
(OperationTypeEnum.AGENT, 10),
|
||||||
|
(OperationTypeEnum.DATA_QUERY, 3),
|
||||||
|
),
|
||||||
|
version="claude-opus-4-6",
|
||||||
|
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.005 + (bytesReceived / 4 / 1000) * 0.025
|
||||||
),
|
),
|
||||||
AiModel(
|
AiModel(
|
||||||
name="claude-sonnet-4-5-20250929",
|
name="claude-sonnet-4-5-20250929",
|
||||||
|
|
@ -81,8 +138,8 @@ class AiAnthropic(BaseConnectorAi):
|
||||||
temperature=0.2,
|
temperature=0.2,
|
||||||
maxTokens=8192,
|
maxTokens=8192,
|
||||||
contextLength=200000,
|
contextLength=200000,
|
||||||
costPer1kTokensInput=0.015,
|
costPer1kTokensInput=0.003, # $3/M tokens (updated 2026-02)
|
||||||
costPer1kTokensOutput=0.075,
|
costPer1kTokensOutput=0.015, # $15/M tokens (updated 2026-02)
|
||||||
speedRating=6,
|
speedRating=6,
|
||||||
qualityRating=10,
|
qualityRating=10,
|
||||||
functionCall=self.callAiImage,
|
functionCall=self.callAiImage,
|
||||||
|
|
@ -92,7 +149,7 @@ class AiAnthropic(BaseConnectorAi):
|
||||||
(OperationTypeEnum.IMAGE_ANALYSE, 10)
|
(OperationTypeEnum.IMAGE_ANALYSE, 10)
|
||||||
),
|
),
|
||||||
version="claude-sonnet-4-5-20250929",
|
version="claude-sonnet-4-5-20250929",
|
||||||
calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.015 + (bytesReceived / 4 / 1000) * 0.075
|
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.003 + (bytesReceived / 4 / 1000) * 0.015
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
@ -111,8 +168,6 @@ class AiAnthropic(BaseConnectorAi):
|
||||||
HTTPException: For errors in API communication
|
HTTPException: For errors in API communication
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
# Extract parameters from modelCall
|
|
||||||
messages = modelCall.messages
|
|
||||||
model = modelCall.model
|
model = modelCall.model
|
||||||
options = modelCall.options
|
options = modelCall.options
|
||||||
temperature = getattr(options, "temperature", None)
|
temperature = getattr(options, "temperature", None)
|
||||||
|
|
@ -120,44 +175,8 @@ class AiAnthropic(BaseConnectorAi):
|
||||||
temperature = model.temperature
|
temperature = model.temperature
|
||||||
maxTokens = model.maxTokens
|
maxTokens = model.maxTokens
|
||||||
|
|
||||||
# Transform OpenAI-style messages to Anthropic format:
|
converted_messages, system_prompt = _convertMessagesForAnthropic(modelCall.messages)
|
||||||
# - Move any 'system' role content to top-level 'system'
|
|
||||||
# - Keep only 'user'/'assistant' messages in the list
|
|
||||||
system_contents: List[str] = []
|
|
||||||
converted_messages: List[Dict[str, Any]] = []
|
|
||||||
for m in messages:
|
|
||||||
role = m.get("role")
|
|
||||||
content = m.get("content", "")
|
|
||||||
if role == "system":
|
|
||||||
# Collect system content; Anthropic expects top-level 'system'
|
|
||||||
if isinstance(content, list):
|
|
||||||
# Join text parts if provided as blocks
|
|
||||||
joined = "\n\n".join(
|
|
||||||
[
|
|
||||||
(part.get("text") if isinstance(part, dict) else str(part))
|
|
||||||
for part in content
|
|
||||||
]
|
|
||||||
)
|
|
||||||
system_contents.append(joined)
|
|
||||||
else:
|
|
||||||
system_contents.append(str(content))
|
|
||||||
continue
|
|
||||||
# For Anthropic, content can be a string; pass through strings, collapse blocks
|
|
||||||
if isinstance(content, list):
|
|
||||||
# Collapse to text if blocks are provided
|
|
||||||
collapsed = "\n\n".join(
|
|
||||||
[
|
|
||||||
(part.get("text") if isinstance(part, dict) else str(part))
|
|
||||||
for part in content
|
|
||||||
]
|
|
||||||
)
|
|
||||||
converted_messages.append({"role": role, "content": collapsed})
|
|
||||||
else:
|
|
||||||
converted_messages.append({"role": role, "content": content})
|
|
||||||
|
|
||||||
system_prompt = "\n\n".join([s for s in system_contents if s]) if system_contents else None
|
|
||||||
|
|
||||||
# Create Anthropic API payload
|
|
||||||
payload: Dict[str, Any] = {
|
payload: Dict[str, Any] = {
|
||||||
"model": model.name,
|
"model": model.name,
|
||||||
"messages": converted_messages,
|
"messages": converted_messages,
|
||||||
|
|
@ -171,6 +190,13 @@ class AiAnthropic(BaseConnectorAi):
|
||||||
if system_prompt:
|
if system_prompt:
|
||||||
payload["system"] = system_prompt
|
payload["system"] = system_prompt
|
||||||
|
|
||||||
|
if modelCall.tools:
|
||||||
|
payload["tools"] = _convertToolsToAnthropicFormat(modelCall.tools)
|
||||||
|
if modelCall.toolChoice:
|
||||||
|
payload["tool_choice"] = modelCall.toolChoice
|
||||||
|
else:
|
||||||
|
payload["tool_choice"] = {"type": "auto"}
|
||||||
|
|
||||||
response = await self.httpClient.post(
|
response = await self.httpClient.post(
|
||||||
model.apiUrl,
|
model.apiUrl,
|
||||||
json=payload
|
json=payload
|
||||||
|
|
@ -180,11 +206,12 @@ class AiAnthropic(BaseConnectorAi):
|
||||||
error_detail = f"Anthropic API error: {response.status_code} - {response.text}"
|
error_detail = f"Anthropic API error: {response.status_code} - {response.text}"
|
||||||
logger.error(error_detail)
|
logger.error(error_detail)
|
||||||
|
|
||||||
# Provide more specific error messages based on status code
|
if response.status_code == 429:
|
||||||
|
raise RateLimitExceededException(
|
||||||
|
f"Rate limit exceeded for {model.name}: {response.text}"
|
||||||
|
)
|
||||||
if response.status_code == 529:
|
if response.status_code == 529:
|
||||||
error_message = "Anthropic API is currently overloaded. Please try again in a few minutes."
|
error_message = "Anthropic API is currently overloaded. Please try again in a few minutes."
|
||||||
elif response.status_code == 429:
|
|
||||||
error_message = "Rate limit exceeded. Please wait before making another request."
|
|
||||||
elif response.status_code == 401:
|
elif response.status_code == 401:
|
||||||
error_message = "Invalid API key. Please check your Anthropic API configuration."
|
error_message = "Invalid API key. Please check your Anthropic API configuration."
|
||||||
elif response.status_code == 400:
|
elif response.status_code == 400:
|
||||||
|
|
@ -197,31 +224,43 @@ class AiAnthropic(BaseConnectorAi):
|
||||||
# Parse response
|
# Parse response
|
||||||
anthropicResponse = response.json()
|
anthropicResponse = response.json()
|
||||||
|
|
||||||
# Extract content from response
|
# Extract content and tool_use blocks from response
|
||||||
content = ""
|
content = ""
|
||||||
|
toolCalls = []
|
||||||
if "content" in anthropicResponse:
|
if "content" in anthropicResponse:
|
||||||
if isinstance(anthropicResponse["content"], list):
|
if isinstance(anthropicResponse["content"], list):
|
||||||
# Content is a list of parts (in newer API versions)
|
|
||||||
for part in anthropicResponse["content"]:
|
for part in anthropicResponse["content"]:
|
||||||
if part.get("type") == "text":
|
if part.get("type") == "text":
|
||||||
content += part.get("text", "")
|
content += part.get("text", "")
|
||||||
|
elif part.get("type") == "tool_use":
|
||||||
|
toolCalls.append({
|
||||||
|
"id": part.get("id", ""),
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": part.get("name", ""),
|
||||||
|
"arguments": json.dumps(part.get("input", {})) if isinstance(part.get("input"), dict) else str(part.get("input", "{}"))
|
||||||
|
}
|
||||||
|
})
|
||||||
else:
|
else:
|
||||||
# Direct content as string (in older API versions)
|
|
||||||
content = anthropicResponse["content"]
|
content = anthropicResponse["content"]
|
||||||
|
|
||||||
# Debug logging for empty responses
|
if not content and not toolCalls:
|
||||||
if not content or content.strip() == "":
|
|
||||||
logger.warning(f"Anthropic API returned empty content. Full response: {anthropicResponse}")
|
logger.warning(f"Anthropic API returned empty content. Full response: {anthropicResponse}")
|
||||||
content = "[Anthropic API returned empty response]"
|
content = "[Anthropic API returned empty response]"
|
||||||
|
|
||||||
# Return standardized response
|
metadata = {"response_id": anthropicResponse.get("id", "")}
|
||||||
|
if toolCalls:
|
||||||
|
metadata["toolCalls"] = toolCalls
|
||||||
|
|
||||||
return AiModelResponse(
|
return AiModelResponse(
|
||||||
content=content,
|
content=content,
|
||||||
success=True,
|
success=True,
|
||||||
modelId=model.name,
|
modelId=model.name,
|
||||||
metadata={"response_id": anthropicResponse.get("id", "")}
|
metadata=metadata
|
||||||
)
|
)
|
||||||
|
|
||||||
|
except (RateLimitExceededException, HTTPException):
|
||||||
|
raise
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
error_msg = str(e) if str(e) else f"{type(e).__name__}"
|
error_msg = str(e) if str(e) else f"{type(e).__name__}"
|
||||||
error_detail = f"Error calling Anthropic API: {error_msg}"
|
error_detail = f"Error calling Anthropic API: {error_msg}"
|
||||||
|
|
@ -232,6 +271,128 @@ class AiAnthropic(BaseConnectorAi):
|
||||||
logger.error(error_detail, exc_info=True)
|
logger.error(error_detail, exc_info=True)
|
||||||
raise HTTPException(status_code=500, detail=error_detail)
|
raise HTTPException(status_code=500, detail=error_detail)
|
||||||
|
|
||||||
|
async def callAiBasicStream(self, modelCall: AiModelCall) -> AsyncGenerator[Union[str, AiModelResponse], None]:
|
||||||
|
"""Stream Anthropic response. Yields str deltas, then final AiModelResponse."""
|
||||||
|
try:
|
||||||
|
model = modelCall.model
|
||||||
|
options = modelCall.options
|
||||||
|
temperature = getattr(options, "temperature", None)
|
||||||
|
if temperature is None:
|
||||||
|
temperature = model.temperature
|
||||||
|
|
||||||
|
converted, system_prompt = _convertMessagesForAnthropic(modelCall.messages)
|
||||||
|
|
||||||
|
payload: Dict[str, Any] = {
|
||||||
|
"model": model.name,
|
||||||
|
"messages": converted,
|
||||||
|
"temperature": temperature,
|
||||||
|
"max_tokens": model.maxTokens,
|
||||||
|
"stream": True,
|
||||||
|
}
|
||||||
|
if system_prompt:
|
||||||
|
payload["system"] = system_prompt
|
||||||
|
if modelCall.tools:
|
||||||
|
payload["tools"] = _convertToolsToAnthropicFormat(modelCall.tools)
|
||||||
|
payload["tool_choice"] = modelCall.toolChoice or {"type": "auto"}
|
||||||
|
|
||||||
|
fullContent = ""
|
||||||
|
toolUseBlocks: Dict[int, Dict[str, Any]] = {}
|
||||||
|
currentToolIdx = -1
|
||||||
|
stopReason: Optional[str] = None
|
||||||
|
|
||||||
|
async with self.httpClient.stream("POST", model.apiUrl, json=payload) as response:
|
||||||
|
if response.status_code != 200:
|
||||||
|
body = await response.aread()
|
||||||
|
bodyStr = body.decode()
|
||||||
|
if response.status_code == 429:
|
||||||
|
raise RateLimitExceededException(
|
||||||
|
f"Rate limit exceeded for {model.name}: {bodyStr}"
|
||||||
|
)
|
||||||
|
raise HTTPException(status_code=500, detail=f"Anthropic stream error: {response.status_code} - {bodyStr}")
|
||||||
|
|
||||||
|
async for line in response.aiter_lines():
|
||||||
|
if not line.startswith("data: "):
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
event = json.loads(line[6:])
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
continue
|
||||||
|
|
||||||
|
eventType = event.get("type", "")
|
||||||
|
|
||||||
|
if eventType == "error":
|
||||||
|
errDetail = event.get("error", {})
|
||||||
|
errMsg = errDetail.get("message", str(errDetail))
|
||||||
|
errType = errDetail.get("type", "unknown")
|
||||||
|
logger.error(f"Anthropic stream error event: type={errType}, message={errMsg}")
|
||||||
|
if "overloaded" in errMsg.lower() or "overloaded" in errType.lower():
|
||||||
|
raise HTTPException(status_code=500, detail=f"Anthropic API is currently overloaded. Please try again in a few minutes.")
|
||||||
|
raise HTTPException(status_code=500, detail=f"Anthropic stream error: [{errType}] {errMsg}")
|
||||||
|
|
||||||
|
elif eventType == "content_block_start":
|
||||||
|
block = event.get("content_block", {})
|
||||||
|
idx = event.get("index", 0)
|
||||||
|
if block.get("type") == "tool_use":
|
||||||
|
currentToolIdx = idx
|
||||||
|
toolUseBlocks[idx] = {
|
||||||
|
"id": block.get("id", ""),
|
||||||
|
"name": block.get("name", ""),
|
||||||
|
"arguments": "",
|
||||||
|
}
|
||||||
|
|
||||||
|
elif eventType == "content_block_delta":
|
||||||
|
delta = event.get("delta", {})
|
||||||
|
if delta.get("type") == "text_delta":
|
||||||
|
text = delta.get("text", "")
|
||||||
|
fullContent += text
|
||||||
|
yield text
|
||||||
|
elif delta.get("type") == "input_json_delta":
|
||||||
|
idx = event.get("index", currentToolIdx)
|
||||||
|
if idx in toolUseBlocks:
|
||||||
|
toolUseBlocks[idx]["arguments"] += delta.get("partial_json", "")
|
||||||
|
|
||||||
|
elif eventType == "message_delta":
|
||||||
|
delta = event.get("delta", {})
|
||||||
|
stopReason = delta.get("stop_reason", stopReason)
|
||||||
|
|
||||||
|
elif eventType == "message_stop":
|
||||||
|
break
|
||||||
|
|
||||||
|
if not fullContent and not toolUseBlocks:
|
||||||
|
logger.warning(
|
||||||
|
f"Anthropic stream returned empty response: model={model.name}, "
|
||||||
|
f"stopReason={stopReason}"
|
||||||
|
)
|
||||||
|
|
||||||
|
metadata: Dict[str, Any] = {}
|
||||||
|
if stopReason:
|
||||||
|
metadata["stopReason"] = stopReason
|
||||||
|
if toolUseBlocks:
|
||||||
|
metadata["toolCalls"] = [
|
||||||
|
{
|
||||||
|
"id": tb["id"],
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": tb["name"],
|
||||||
|
"arguments": tb["arguments"],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for tb in toolUseBlocks.values()
|
||||||
|
]
|
||||||
|
|
||||||
|
yield AiModelResponse(
|
||||||
|
content=fullContent,
|
||||||
|
success=True,
|
||||||
|
modelId=model.name,
|
||||||
|
metadata=metadata,
|
||||||
|
)
|
||||||
|
|
||||||
|
except (RateLimitExceededException, HTTPException):
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error streaming Anthropic API: {e}", exc_info=True)
|
||||||
|
raise HTTPException(status_code=500, detail=f"Error streaming Anthropic API: {e}")
|
||||||
|
|
||||||
async def callAiImage(self, modelCall: AiModelCall) -> AiModelResponse:
|
async def callAiImage(self, modelCall: AiModelCall) -> AiModelResponse:
|
||||||
"""
|
"""
|
||||||
Analyzes an image using Anthropic's vision capabilities using standardized pattern.
|
Analyzes an image using Anthropic's vision capabilities using standardized pattern.
|
||||||
|
|
@ -284,6 +445,20 @@ class AiAnthropic(BaseConnectorAi):
|
||||||
mimeType = parts[0].replace("data:", "")
|
mimeType = parts[0].replace("data:", "")
|
||||||
base64Data = parts[1]
|
base64Data = parts[1]
|
||||||
|
|
||||||
|
import base64 as _b64
|
||||||
|
try:
|
||||||
|
rawHead = _b64.b64decode(base64Data[:32])
|
||||||
|
if rawHead[:3] == b"\xff\xd8\xff":
|
||||||
|
mimeType = "image/jpeg"
|
||||||
|
elif rawHead[:8] == b"\x89PNG\r\n\x1a\n":
|
||||||
|
mimeType = "image/png"
|
||||||
|
elif rawHead[:4] == b"GIF8":
|
||||||
|
mimeType = "image/gif"
|
||||||
|
elif rawHead[:4] == b"RIFF" and rawHead[8:12] == b"WEBP":
|
||||||
|
mimeType = "image/webp"
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
# Convert to Anthropic's vision format
|
# Convert to Anthropic's vision format
|
||||||
anthropicMessages = [{
|
anthropicMessages = [{
|
||||||
"role": "user",
|
"role": "user",
|
||||||
|
|
@ -378,3 +553,100 @@ class AiAnthropic(BaseConnectorAi):
|
||||||
success=False,
|
success=False,
|
||||||
error=f"Error during image analysis: {str(e)}"
|
error=f"Error during image analysis: {str(e)}"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _convertMessagesForAnthropic(messages: List[Dict[str, Any]]):
|
||||||
|
"""Convert OpenAI-style messages to Anthropic format. Returns (messages, system_prompt)."""
|
||||||
|
system_contents: List[str] = []
|
||||||
|
converted_messages: List[Dict[str, Any]] = []
|
||||||
|
pendingToolResults: List[Dict[str, Any]] = []
|
||||||
|
|
||||||
|
def _flush():
|
||||||
|
if not pendingToolResults:
|
||||||
|
return
|
||||||
|
converted_messages.append({"role": "user", "content": list(pendingToolResults)})
|
||||||
|
pendingToolResults.clear()
|
||||||
|
|
||||||
|
def _collapse(content):
|
||||||
|
if isinstance(content, list):
|
||||||
|
return "\n\n".join(
|
||||||
|
(part.get("text") if isinstance(part, dict) else str(part))
|
||||||
|
for part in content
|
||||||
|
)
|
||||||
|
return str(content) if content else ""
|
||||||
|
|
||||||
|
for m in messages:
|
||||||
|
role = m.get("role")
|
||||||
|
content = m.get("content", "")
|
||||||
|
|
||||||
|
if role == "system":
|
||||||
|
system_contents.append(_collapse(content))
|
||||||
|
continue
|
||||||
|
if role == "tool":
|
||||||
|
pendingToolResults.append({
|
||||||
|
"type": "tool_result",
|
||||||
|
"tool_use_id": m.get("tool_call_id", ""),
|
||||||
|
"content": str(content) if content else "",
|
||||||
|
})
|
||||||
|
continue
|
||||||
|
|
||||||
|
_flush()
|
||||||
|
|
||||||
|
if role == "assistant" and m.get("tool_calls"):
|
||||||
|
contentBlocks = []
|
||||||
|
textPart = _collapse(content)
|
||||||
|
if textPart:
|
||||||
|
contentBlocks.append({"type": "text", "text": textPart})
|
||||||
|
for tc in m["tool_calls"]:
|
||||||
|
fn = tc.get("function", {})
|
||||||
|
inputData = fn.get("arguments", "{}")
|
||||||
|
if isinstance(inputData, str):
|
||||||
|
try:
|
||||||
|
inputData = json.loads(inputData)
|
||||||
|
except (json.JSONDecodeError, ValueError):
|
||||||
|
inputData = {}
|
||||||
|
contentBlocks.append({
|
||||||
|
"type": "tool_use",
|
||||||
|
"id": tc.get("id", ""),
|
||||||
|
"name": fn.get("name", ""),
|
||||||
|
"input": inputData,
|
||||||
|
})
|
||||||
|
converted_messages.append({"role": "assistant", "content": contentBlocks})
|
||||||
|
continue
|
||||||
|
|
||||||
|
converted_messages.append({"role": role, "content": _collapse(content)})
|
||||||
|
|
||||||
|
_flush()
|
||||||
|
|
||||||
|
merged: List[Dict[str, Any]] = []
|
||||||
|
for msg in converted_messages:
|
||||||
|
if merged and merged[-1]["role"] == msg["role"]:
|
||||||
|
prev = merged[-1]
|
||||||
|
pc, nc = prev["content"], msg["content"]
|
||||||
|
if isinstance(pc, str) and isinstance(nc, str):
|
||||||
|
prev["content"] = pc + "\n\n" + nc
|
||||||
|
elif isinstance(pc, list) and isinstance(nc, list):
|
||||||
|
prev["content"] = pc + nc
|
||||||
|
elif isinstance(pc, str) and isinstance(nc, list):
|
||||||
|
prev["content"] = [{"type": "text", "text": pc}] + nc
|
||||||
|
elif isinstance(pc, list) and isinstance(nc, str):
|
||||||
|
prev["content"] = pc + [{"type": "text", "text": nc}]
|
||||||
|
else:
|
||||||
|
merged.append(msg)
|
||||||
|
|
||||||
|
system_prompt = "\n\n".join([s for s in system_contents if s]) if system_contents else None
|
||||||
|
return merged, system_prompt
|
||||||
|
|
||||||
|
|
||||||
|
def _convertToolsToAnthropicFormat(openaiTools: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||||
|
"""Convert OpenAI-style tool definitions to Anthropic format."""
|
||||||
|
anthropicTools = []
|
||||||
|
for tool in openaiTools:
|
||||||
|
if tool.get("type") == "function":
|
||||||
|
fn = tool["function"]
|
||||||
|
anthropicTools.append({
|
||||||
|
"name": fn["name"],
|
||||||
|
"description": fn.get("description", ""),
|
||||||
|
"input_schema": fn.get("parameters", {"type": "object", "properties": {}})
|
||||||
|
})
|
||||||
|
return anthropicTools
|
||||||
|
|
@ -1,6 +1,8 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
import logging
|
import logging
|
||||||
from typing import List
|
from typing import List
|
||||||
from modules.aicore.aicoreBase import BaseConnectorAi
|
from .aicoreBase import BaseConnectorAi
|
||||||
from modules.datamodels.datamodelAi import AiModel, PriorityEnum, ProcessingModeEnum, OperationTypeEnum, AiModelCall, AiModelResponse, createOperationTypeRatings
|
from modules.datamodels.datamodelAi import AiModel, PriorityEnum, ProcessingModeEnum, OperationTypeEnum, AiModelCall, AiModelResponse, createOperationTypeRatings
|
||||||
|
|
||||||
# Configure logger
|
# Configure logger
|
||||||
|
|
@ -38,7 +40,7 @@ class AiInternal(BaseConnectorAi):
|
||||||
processingMode=ProcessingModeEnum.BASIC,
|
processingMode=ProcessingModeEnum.BASIC,
|
||||||
operationTypes=createOperationTypeRatings(),
|
operationTypes=createOperationTypeRatings(),
|
||||||
version="internal-extractor-v1",
|
version="internal-extractor-v1",
|
||||||
calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: 0.001 + (bytesSent + bytesReceived) / (1024 * 1024) * 0.01
|
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: 0.001 + (bytesSent + bytesReceived) / (1024 * 1024) * 0.01
|
||||||
),
|
),
|
||||||
AiModel(
|
AiModel(
|
||||||
name="internal-generator",
|
name="internal-generator",
|
||||||
|
|
@ -58,7 +60,7 @@ class AiInternal(BaseConnectorAi):
|
||||||
processingMode=ProcessingModeEnum.BASIC,
|
processingMode=ProcessingModeEnum.BASIC,
|
||||||
operationTypes=createOperationTypeRatings(),
|
operationTypes=createOperationTypeRatings(),
|
||||||
version="internal-generator-v1",
|
version="internal-generator-v1",
|
||||||
calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: 0.002 + (bytesReceived / (1024 * 1024)) * 0.005
|
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: 0.002 + (bytesReceived / (1024 * 1024)) * 0.005
|
||||||
),
|
),
|
||||||
AiModel(
|
AiModel(
|
||||||
name="internal-renderer",
|
name="internal-renderer",
|
||||||
|
|
@ -78,7 +80,7 @@ class AiInternal(BaseConnectorAi):
|
||||||
processingMode=ProcessingModeEnum.DETAILED,
|
processingMode=ProcessingModeEnum.DETAILED,
|
||||||
operationTypes=createOperationTypeRatings(),
|
operationTypes=createOperationTypeRatings(),
|
||||||
version="internal-renderer-v1",
|
version="internal-renderer-v1",
|
||||||
calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: 0.003 + (bytesReceived / (1024 * 1024)) * 0.008
|
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: 0.003 + (bytesReceived / (1024 * 1024)) * 0.008
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
|
||||||
451
modules/aicore/aicorePluginMistral.py
Normal file
451
modules/aicore/aicorePluginMistral.py
Normal file
|
|
@ -0,0 +1,451 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
import logging
|
||||||
|
import json as _json
|
||||||
|
import httpx
|
||||||
|
from typing import List, Dict, Any, AsyncGenerator, Union
|
||||||
|
from fastapi import HTTPException
|
||||||
|
from modules.shared.configuration import APP_CONFIG
|
||||||
|
from .aicoreBase import BaseConnectorAi, RateLimitExceededException, ContextLengthExceededException
|
||||||
|
from modules.datamodels.datamodelAi import AiModel, PriorityEnum, ProcessingModeEnum, OperationTypeEnum, AiModelCall, AiModelResponse, createOperationTypeRatings
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
def loadConfigData():
|
||||||
|
"""Load configuration data for Mistral connector"""
|
||||||
|
return {
|
||||||
|
"apiKey": APP_CONFIG.get('Connector_AiMistral_API_SECRET'),
|
||||||
|
}
|
||||||
|
|
||||||
|
class AiMistral(BaseConnectorAi):
|
||||||
|
"""Connector for communication with the Mistral AI API (Le Chat Mistral)."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
# Load configuration
|
||||||
|
self.config = loadConfigData()
|
||||||
|
self.apiKey = self.config["apiKey"]
|
||||||
|
|
||||||
|
# HttpClient for API calls
|
||||||
|
# Timeout set to 600 seconds (10 minutes) for complex requests that may take longer
|
||||||
|
# AiService calls can take significantly longer due to prompt building and processing overhead
|
||||||
|
self.httpClient = httpx.AsyncClient(
|
||||||
|
timeout=600.0,
|
||||||
|
headers={
|
||||||
|
"Authorization": f"Bearer {self.apiKey}",
|
||||||
|
"Content-Type": "application/json"
|
||||||
|
}
|
||||||
|
)
|
||||||
|
logger.info("Mistral Connector initialized")
|
||||||
|
|
||||||
|
def getConnectorType(self) -> str:
|
||||||
|
"""Get the connector type identifier."""
|
||||||
|
return "mistral"
|
||||||
|
|
||||||
|
def getModels(self) -> List[AiModel]:
|
||||||
|
"""Get all available Mistral models."""
|
||||||
|
return [
|
||||||
|
AiModel(
|
||||||
|
name="mistral-large-latest",
|
||||||
|
displayName="Mistral Large 3",
|
||||||
|
connectorType="mistral",
|
||||||
|
apiUrl="https://api.mistral.ai/v1/chat/completions",
|
||||||
|
temperature=0.2,
|
||||||
|
maxTokens=16384,
|
||||||
|
contextLength=256000,
|
||||||
|
costPer1kTokensInput=0.0005, # $0.50/M tokens (updated 2026-02)
|
||||||
|
costPer1kTokensOutput=0.0015, # $1.50/M tokens (updated 2026-02)
|
||||||
|
speedRating=8, # Good speed for complex tasks
|
||||||
|
qualityRating=9, # High quality
|
||||||
|
functionCall=self.callAiBasic,
|
||||||
|
functionCallStream=self.callAiBasicStream,
|
||||||
|
priority=PriorityEnum.BALANCED,
|
||||||
|
processingMode=ProcessingModeEnum.ADVANCED,
|
||||||
|
operationTypes=createOperationTypeRatings(
|
||||||
|
(OperationTypeEnum.PLAN, 9),
|
||||||
|
(OperationTypeEnum.DATA_ANALYSE, 9),
|
||||||
|
(OperationTypeEnum.DATA_GENERATE, 9),
|
||||||
|
(OperationTypeEnum.DATA_EXTRACT, 8),
|
||||||
|
(OperationTypeEnum.AGENT, 8),
|
||||||
|
(OperationTypeEnum.DATA_QUERY, 7),
|
||||||
|
),
|
||||||
|
version="mistral-large-latest",
|
||||||
|
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.0005 + (bytesReceived / 4 / 1000) * 0.0015
|
||||||
|
),
|
||||||
|
AiModel(
|
||||||
|
name="mistral-small-latest",
|
||||||
|
displayName="Mistral Small 3.2",
|
||||||
|
connectorType="mistral",
|
||||||
|
apiUrl="https://api.mistral.ai/v1/chat/completions",
|
||||||
|
temperature=0.2,
|
||||||
|
maxTokens=16384,
|
||||||
|
contextLength=128000,
|
||||||
|
costPer1kTokensInput=0.00006, # $0.06/M tokens (updated 2026-02)
|
||||||
|
costPer1kTokensOutput=0.00018, # $0.18/M tokens (updated 2026-02)
|
||||||
|
speedRating=9, # Very fast, lightweight model
|
||||||
|
qualityRating=7, # Good quality, cost-efficient
|
||||||
|
functionCall=self.callAiBasic,
|
||||||
|
functionCallStream=self.callAiBasicStream,
|
||||||
|
priority=PriorityEnum.SPEED,
|
||||||
|
processingMode=ProcessingModeEnum.BASIC,
|
||||||
|
operationTypes=createOperationTypeRatings(
|
||||||
|
(OperationTypeEnum.PLAN, 7),
|
||||||
|
(OperationTypeEnum.DATA_ANALYSE, 7),
|
||||||
|
(OperationTypeEnum.DATA_GENERATE, 8),
|
||||||
|
(OperationTypeEnum.DATA_EXTRACT, 7),
|
||||||
|
(OperationTypeEnum.AGENT, 6),
|
||||||
|
(OperationTypeEnum.DATA_QUERY, 9),
|
||||||
|
),
|
||||||
|
version="mistral-small-latest",
|
||||||
|
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.00006 + (bytesReceived / 4 / 1000) * 0.00018
|
||||||
|
),
|
||||||
|
AiModel(
|
||||||
|
name="mistral-embed",
|
||||||
|
displayName="Mistral Embed",
|
||||||
|
connectorType="mistral",
|
||||||
|
apiUrl="https://api.mistral.ai/v1/embeddings",
|
||||||
|
temperature=0.0,
|
||||||
|
maxTokens=0,
|
||||||
|
contextLength=8192,
|
||||||
|
costPer1kTokensInput=0.0001, # $0.10/M tokens
|
||||||
|
costPer1kTokensOutput=0.0,
|
||||||
|
speedRating=10,
|
||||||
|
qualityRating=7,
|
||||||
|
functionCall=self.callEmbedding,
|
||||||
|
priority=PriorityEnum.COST,
|
||||||
|
processingMode=ProcessingModeEnum.BASIC,
|
||||||
|
operationTypes=createOperationTypeRatings(
|
||||||
|
(OperationTypeEnum.EMBEDDING, 8)
|
||||||
|
),
|
||||||
|
version="mistral-embed",
|
||||||
|
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.0001
|
||||||
|
),
|
||||||
|
AiModel(
|
||||||
|
name="mistral-large-latest",
|
||||||
|
displayName="Mistral Large 3 Vision",
|
||||||
|
connectorType="mistral",
|
||||||
|
apiUrl="https://api.mistral.ai/v1/chat/completions",
|
||||||
|
temperature=0.2,
|
||||||
|
maxTokens=16384,
|
||||||
|
contextLength=256000,
|
||||||
|
costPer1kTokensInput=0.0005, # $0.50/M tokens (updated 2026-02)
|
||||||
|
costPer1kTokensOutput=0.0015, # $1.50/M tokens (updated 2026-02)
|
||||||
|
speedRating=6, # Slower for vision tasks
|
||||||
|
qualityRating=8, # Good quality vision
|
||||||
|
functionCall=self.callAiImage,
|
||||||
|
priority=PriorityEnum.QUALITY,
|
||||||
|
processingMode=ProcessingModeEnum.DETAILED,
|
||||||
|
operationTypes=createOperationTypeRatings(
|
||||||
|
(OperationTypeEnum.IMAGE_ANALYSE, 8)
|
||||||
|
),
|
||||||
|
version="mistral-large-latest",
|
||||||
|
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.0005 + (bytesReceived / 4 / 1000) * 0.0015
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
async def callAiBasic(self, modelCall: AiModelCall) -> AiModelResponse:
|
||||||
|
"""
|
||||||
|
Calls the Mistral AI API with the given messages using standardized pattern.
|
||||||
|
|
||||||
|
Mistral's chat completions API is OpenAI-compatible: it accepts the same
|
||||||
|
message format (role/content) including system messages, and returns
|
||||||
|
responses in the same choices[0].message.content structure.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
modelCall: AiModelCall with messages and options
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
AiModelResponse with content and metadata
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
HTTPException: For errors in API communication
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Extract parameters from modelCall
|
||||||
|
messages = modelCall.messages
|
||||||
|
model = modelCall.model
|
||||||
|
options = modelCall.options
|
||||||
|
temperature = getattr(options, "temperature", None)
|
||||||
|
if temperature is None:
|
||||||
|
temperature = model.temperature
|
||||||
|
maxTokens = model.maxTokens
|
||||||
|
|
||||||
|
payload = {
|
||||||
|
"model": model.name,
|
||||||
|
"messages": messages,
|
||||||
|
"temperature": temperature,
|
||||||
|
"max_tokens": maxTokens
|
||||||
|
}
|
||||||
|
|
||||||
|
if modelCall.tools:
|
||||||
|
payload["tools"] = modelCall.tools
|
||||||
|
payload["tool_choice"] = modelCall.toolChoice or "auto"
|
||||||
|
|
||||||
|
response = await self.httpClient.post(
|
||||||
|
model.apiUrl,
|
||||||
|
json=payload
|
||||||
|
)
|
||||||
|
|
||||||
|
if response.status_code != 200:
|
||||||
|
error_message = f"Mistral API error: {response.status_code} - {response.text}"
|
||||||
|
logger.error(error_message)
|
||||||
|
|
||||||
|
# Check for rate limit exceeded (429 TPM)
|
||||||
|
if response.status_code == 429:
|
||||||
|
try:
|
||||||
|
error_data = response.json()
|
||||||
|
error_msg = error_data.get("error", {}).get("message", "Rate limit exceeded")
|
||||||
|
raise RateLimitExceededException(
|
||||||
|
f"Rate limit exceeded for {model.name}: {error_msg}"
|
||||||
|
)
|
||||||
|
except (ValueError, KeyError):
|
||||||
|
raise RateLimitExceededException(
|
||||||
|
f"Rate limit exceeded for {model.name}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check for context length exceeded error
|
||||||
|
if response.status_code == 400:
|
||||||
|
try:
|
||||||
|
error_data = response.json()
|
||||||
|
if (error_data.get("error", {}).get("code") == "context_length_exceeded" or
|
||||||
|
"context length" in error_data.get("error", {}).get("message", "").lower() or
|
||||||
|
"too many tokens" in error_data.get("error", {}).get("message", "").lower()):
|
||||||
|
raise ContextLengthExceededException(
|
||||||
|
f"Context length exceeded: {error_data.get('error', {}).get('message', 'Unknown error')}"
|
||||||
|
)
|
||||||
|
except (ValueError, KeyError):
|
||||||
|
pass # If we can't parse the error, fall through to generic error
|
||||||
|
|
||||||
|
# Include the actual error details in the exception
|
||||||
|
raise HTTPException(status_code=500, detail=error_message)
|
||||||
|
|
||||||
|
responseJson = response.json()
|
||||||
|
choiceMessage = responseJson["choices"][0]["message"]
|
||||||
|
content = choiceMessage.get("content") or ""
|
||||||
|
|
||||||
|
metadata = {"response_id": responseJson.get("id", "")}
|
||||||
|
if choiceMessage.get("tool_calls"):
|
||||||
|
metadata["toolCalls"] = choiceMessage["tool_calls"]
|
||||||
|
|
||||||
|
return AiModelResponse(
|
||||||
|
content=content,
|
||||||
|
success=True,
|
||||||
|
modelId=model.name,
|
||||||
|
metadata=metadata,
|
||||||
|
)
|
||||||
|
|
||||||
|
except ContextLengthExceededException:
|
||||||
|
# Re-raise context length exceptions without wrapping
|
||||||
|
raise
|
||||||
|
except RateLimitExceededException:
|
||||||
|
# Re-raise rate limit exceptions without wrapping
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error calling Mistral API: {str(e)}")
|
||||||
|
raise HTTPException(status_code=500, detail=f"Error calling Mistral API: {str(e)}")
|
||||||
|
|
||||||
|
async def callAiBasicStream(self, modelCall: AiModelCall) -> AsyncGenerator[Union[str, AiModelResponse], None]:
|
||||||
|
"""Stream Mistral response. Yields str deltas, then final AiModelResponse."""
|
||||||
|
try:
|
||||||
|
model = modelCall.model
|
||||||
|
options = modelCall.options
|
||||||
|
temperature = getattr(options, "temperature", None)
|
||||||
|
if temperature is None:
|
||||||
|
temperature = model.temperature
|
||||||
|
|
||||||
|
payload: Dict[str, Any] = {
|
||||||
|
"model": model.name,
|
||||||
|
"messages": modelCall.messages,
|
||||||
|
"temperature": temperature,
|
||||||
|
"max_tokens": model.maxTokens,
|
||||||
|
"stream": True,
|
||||||
|
}
|
||||||
|
|
||||||
|
if modelCall.tools:
|
||||||
|
payload["tools"] = modelCall.tools
|
||||||
|
payload["tool_choice"] = modelCall.toolChoice or "auto"
|
||||||
|
|
||||||
|
fullContent = ""
|
||||||
|
toolCallsAccum: Dict[int, Dict[str, Any]] = {}
|
||||||
|
|
||||||
|
async with self.httpClient.stream("POST", model.apiUrl, json=payload) as response:
|
||||||
|
if response.status_code != 200:
|
||||||
|
body = await response.aread()
|
||||||
|
bodyStr = body.decode()
|
||||||
|
if response.status_code == 429:
|
||||||
|
try:
|
||||||
|
errorMsg = _json.loads(bodyStr).get("error", {}).get("message", "Rate limit exceeded")
|
||||||
|
except (ValueError, KeyError):
|
||||||
|
errorMsg = f"Rate limit exceeded for {model.name}"
|
||||||
|
raise RateLimitExceededException(f"Rate limit exceeded for {model.name}: {errorMsg}")
|
||||||
|
raise HTTPException(status_code=500, detail=f"Mistral stream error: {response.status_code} - {bodyStr}")
|
||||||
|
|
||||||
|
async for line in response.aiter_lines():
|
||||||
|
if not line.startswith("data: "):
|
||||||
|
continue
|
||||||
|
data = line[6:]
|
||||||
|
if data.strip() == "[DONE]":
|
||||||
|
break
|
||||||
|
try:
|
||||||
|
chunk = _json.loads(data)
|
||||||
|
except _json.JSONDecodeError:
|
||||||
|
continue
|
||||||
|
|
||||||
|
delta = chunk.get("choices", [{}])[0].get("delta", {})
|
||||||
|
if "content" in delta and delta["content"]:
|
||||||
|
fullContent += delta["content"]
|
||||||
|
yield delta["content"]
|
||||||
|
|
||||||
|
for tcDelta in delta.get("tool_calls", []):
|
||||||
|
idx = tcDelta.get("index", 0)
|
||||||
|
if idx not in toolCallsAccum:
|
||||||
|
toolCallsAccum[idx] = {
|
||||||
|
"id": tcDelta.get("id", ""),
|
||||||
|
"type": "function",
|
||||||
|
"function": {"name": "", "arguments": ""},
|
||||||
|
}
|
||||||
|
if tcDelta.get("id"):
|
||||||
|
toolCallsAccum[idx]["id"] = tcDelta["id"]
|
||||||
|
fn = tcDelta.get("function", {})
|
||||||
|
if fn.get("name"):
|
||||||
|
toolCallsAccum[idx]["function"]["name"] = fn["name"]
|
||||||
|
if fn.get("arguments"):
|
||||||
|
toolCallsAccum[idx]["function"]["arguments"] += fn["arguments"]
|
||||||
|
|
||||||
|
metadata: Dict[str, Any] = {}
|
||||||
|
if toolCallsAccum:
|
||||||
|
metadata["toolCalls"] = [toolCallsAccum[i] for i in sorted(toolCallsAccum)]
|
||||||
|
|
||||||
|
yield AiModelResponse(
|
||||||
|
content=fullContent,
|
||||||
|
success=True,
|
||||||
|
modelId=model.name,
|
||||||
|
metadata=metadata,
|
||||||
|
)
|
||||||
|
|
||||||
|
except (RateLimitExceededException, ContextLengthExceededException, HTTPException):
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error streaming Mistral API: {e}")
|
||||||
|
raise HTTPException(status_code=500, detail=f"Error streaming Mistral API: {e}")
|
||||||
|
|
||||||
|
async def callEmbedding(self, modelCall: AiModelCall) -> AiModelResponse:
|
||||||
|
"""Generate embeddings via the Mistral Embeddings API.
|
||||||
|
|
||||||
|
Reads texts from modelCall.embeddingInput.
|
||||||
|
Returns vectors in metadata["embeddings"].
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
model = modelCall.model
|
||||||
|
texts = modelCall.embeddingInput or []
|
||||||
|
if not texts:
|
||||||
|
return AiModelResponse(
|
||||||
|
content="", success=False, error="No embeddingInput provided"
|
||||||
|
)
|
||||||
|
|
||||||
|
payload = {"model": model.name, "input": texts}
|
||||||
|
response = await self.httpClient.post(model.apiUrl, json=payload)
|
||||||
|
|
||||||
|
if response.status_code != 200:
|
||||||
|
errorMessage = f"Mistral Embedding API error: {response.status_code} - {response.text}"
|
||||||
|
logger.error(errorMessage)
|
||||||
|
if response.status_code == 429:
|
||||||
|
raise RateLimitExceededException(f"Rate limit exceeded for {model.name}")
|
||||||
|
if response.status_code == 400:
|
||||||
|
try:
|
||||||
|
errorData = response.json()
|
||||||
|
errMsg = errorData.get("error", {}).get("message", "").lower()
|
||||||
|
errCode = errorData.get("error", {}).get("code", "")
|
||||||
|
if errCode == "context_length_exceeded" or "too many tokens" in errMsg or "maximum context length" in errMsg:
|
||||||
|
raise ContextLengthExceededException(
|
||||||
|
f"Embedding context length exceeded for {model.name}: {errorData.get('error', {}).get('message', '')}"
|
||||||
|
)
|
||||||
|
except (ValueError, KeyError):
|
||||||
|
pass
|
||||||
|
raise HTTPException(status_code=500, detail=errorMessage)
|
||||||
|
|
||||||
|
responseJson = response.json()
|
||||||
|
embeddings = [item["embedding"] for item in responseJson["data"]]
|
||||||
|
usage = responseJson.get("usage", {})
|
||||||
|
|
||||||
|
return AiModelResponse(
|
||||||
|
content="",
|
||||||
|
success=True,
|
||||||
|
modelId=model.name,
|
||||||
|
tokensUsed={
|
||||||
|
"input": usage.get("prompt_tokens", 0),
|
||||||
|
"output": 0,
|
||||||
|
"total": usage.get("total_tokens", 0),
|
||||||
|
},
|
||||||
|
metadata={"embeddings": embeddings},
|
||||||
|
)
|
||||||
|
except (RateLimitExceededException, ContextLengthExceededException):
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error calling Mistral Embedding API: {str(e)}")
|
||||||
|
raise HTTPException(status_code=500, detail=f"Error calling Mistral Embedding API: {str(e)}")
|
||||||
|
|
||||||
|
async def callAiImage(self, modelCall: AiModelCall) -> AiModelResponse:
|
||||||
|
"""
|
||||||
|
Analyzes an image with the Mistral Vision API using standardized pattern.
|
||||||
|
|
||||||
|
Mistral Large 3 is multimodal and accepts image inputs in OpenAI-compatible
|
||||||
|
format: {"type": "image_url", "image_url": {"url": "data:...base64,..."}}
|
||||||
|
|
||||||
|
Args:
|
||||||
|
modelCall: AiModelCall with messages and image data in options
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
AiModelResponse with analysis content
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Extract parameters from modelCall
|
||||||
|
messages = modelCall.messages
|
||||||
|
model = modelCall.model
|
||||||
|
|
||||||
|
# Messages should already be in the correct format with image data embedded
|
||||||
|
# Just verify they contain image data
|
||||||
|
if not messages or not messages[0].get("content"):
|
||||||
|
raise ValueError("No messages provided for image analysis")
|
||||||
|
|
||||||
|
logger.debug(f"Starting image analysis with {len(messages)} message(s)...")
|
||||||
|
|
||||||
|
# Use the messages directly - they should already contain the image data
|
||||||
|
# in the format: {"type": "image_url", "image_url": {"url": "data:...base64,..."}}
|
||||||
|
# Mistral Large 3 supports this OpenAI-compatible vision format natively
|
||||||
|
|
||||||
|
# Use parameters from model
|
||||||
|
temperature = model.temperature
|
||||||
|
|
||||||
|
payload = {
|
||||||
|
"model": model.name,
|
||||||
|
"messages": messages,
|
||||||
|
"temperature": temperature
|
||||||
|
}
|
||||||
|
|
||||||
|
response = await self.httpClient.post(
|
||||||
|
model.apiUrl,
|
||||||
|
json=payload
|
||||||
|
)
|
||||||
|
|
||||||
|
if response.status_code != 200:
|
||||||
|
logger.error(f"Mistral API error: {response.status_code} - {response.text}")
|
||||||
|
raise HTTPException(status_code=500, detail="Error communicating with Mistral API")
|
||||||
|
|
||||||
|
responseJson = response.json()
|
||||||
|
content = responseJson["choices"][0]["message"]["content"]
|
||||||
|
|
||||||
|
return AiModelResponse(
|
||||||
|
content=content,
|
||||||
|
success=True,
|
||||||
|
modelId=model.name,
|
||||||
|
metadata={"response_id": responseJson.get("id", "")}
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error during image analysis: {str(e)}", exc_info=True)
|
||||||
|
return AiModelResponse(
|
||||||
|
content="",
|
||||||
|
success=False,
|
||||||
|
error=f"Error during image analysis: {str(e)}"
|
||||||
|
)
|
||||||
|
|
@ -1,18 +1,16 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
import logging
|
import logging
|
||||||
|
import json as _json
|
||||||
import httpx
|
import httpx
|
||||||
from typing import List
|
from typing import List, Dict, Any, AsyncGenerator, Union
|
||||||
from fastapi import HTTPException
|
from fastapi import HTTPException
|
||||||
from modules.shared.configuration import APP_CONFIG
|
from modules.shared.configuration import APP_CONFIG
|
||||||
from modules.aicore.aicoreBase import BaseConnectorAi
|
from .aicoreBase import BaseConnectorAi, RateLimitExceededException, ContextLengthExceededException
|
||||||
from modules.datamodels.datamodelAi import AiModel, PriorityEnum, ProcessingModeEnum, OperationTypeEnum, AiModelCall, AiModelResponse, createOperationTypeRatings
|
from modules.datamodels.datamodelAi import AiModel, PriorityEnum, ProcessingModeEnum, OperationTypeEnum, AiModelCall, AiModelResponse, createOperationTypeRatings, AiCallPromptImage
|
||||||
|
|
||||||
# Configure logger
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
class ContextLengthExceededException(Exception):
|
|
||||||
"""Exception raised when the context length exceeds the model's limit"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def loadConfigData():
|
def loadConfigData():
|
||||||
"""Load configuration data for OpenAI connector"""
|
"""Load configuration data for OpenAI connector"""
|
||||||
return {
|
return {
|
||||||
|
|
@ -55,58 +53,65 @@ class AiOpenai(BaseConnectorAi):
|
||||||
temperature=0.2,
|
temperature=0.2,
|
||||||
maxTokens=16384,
|
maxTokens=16384,
|
||||||
contextLength=128000,
|
contextLength=128000,
|
||||||
costPer1kTokensInput=0.03,
|
maxInputTokensPerRequest=25000, # OpenAI org TPM limit is 30K, keep 5K buffer
|
||||||
costPer1kTokensOutput=0.06,
|
costPer1kTokensInput=0.0025, # $2.50/M tokens (updated 2026-02)
|
||||||
speedRating=7, # Good speed for complex tasks
|
costPer1kTokensOutput=0.01, # $10.00/M tokens (updated 2026-02)
|
||||||
qualityRating=9, # High quality
|
speedRating=8, # Good speed for complex tasks
|
||||||
# capabilities removed (not used in business logic)
|
qualityRating=10, # High quality
|
||||||
functionCall=self.callAiBasic,
|
functionCall=self.callAiBasic,
|
||||||
|
functionCallStream=self.callAiBasicStream,
|
||||||
priority=PriorityEnum.BALANCED,
|
priority=PriorityEnum.BALANCED,
|
||||||
processingMode=ProcessingModeEnum.ADVANCED,
|
processingMode=ProcessingModeEnum.ADVANCED,
|
||||||
operationTypes=createOperationTypeRatings(
|
operationTypes=createOperationTypeRatings(
|
||||||
(OperationTypeEnum.PLAN, 8),
|
(OperationTypeEnum.PLAN, 9),
|
||||||
(OperationTypeEnum.DATA_ANALYSE, 9),
|
(OperationTypeEnum.DATA_ANALYSE, 10),
|
||||||
(OperationTypeEnum.DATA_GENERATE, 9),
|
(OperationTypeEnum.DATA_GENERATE, 10),
|
||||||
(OperationTypeEnum.DATA_EXTRACT, 7)
|
(OperationTypeEnum.DATA_EXTRACT, 7),
|
||||||
|
(OperationTypeEnum.AGENT, 9),
|
||||||
|
(OperationTypeEnum.DATA_QUERY, 8),
|
||||||
),
|
),
|
||||||
version="gpt-4o",
|
version="gpt-4o",
|
||||||
calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.03 + (bytesReceived / 4 / 1000) * 0.06
|
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.0025 + (bytesReceived / 4 / 1000) * 0.01
|
||||||
),
|
),
|
||||||
AiModel(
|
AiModel(
|
||||||
name="gpt-3.5-turbo",
|
name="gpt-4o-mini",
|
||||||
displayName="OpenAI GPT-3.5 Turbo",
|
displayName="OpenAI GPT-4o Mini",
|
||||||
connectorType="openai",
|
|
||||||
apiUrl="https://api.openai.com/v1/chat/completions",
|
|
||||||
temperature=0.2,
|
|
||||||
maxTokens=4096,
|
|
||||||
contextLength=16000,
|
|
||||||
costPer1kTokensInput=0.0015,
|
|
||||||
costPer1kTokensOutput=0.002,
|
|
||||||
speedRating=9, # Very fast
|
|
||||||
qualityRating=7, # Good but not premium
|
|
||||||
# capabilities removed (not used in business logic)
|
|
||||||
functionCall=self.callAiBasic,
|
|
||||||
priority=PriorityEnum.SPEED,
|
|
||||||
processingMode=ProcessingModeEnum.BASIC,
|
|
||||||
operationTypes=createOperationTypeRatings(
|
|
||||||
(OperationTypeEnum.PLAN, 7),
|
|
||||||
(OperationTypeEnum.DATA_ANALYSE, 8),
|
|
||||||
(OperationTypeEnum.DATA_GENERATE, 8)
|
|
||||||
# Note: GPT-3.5-turbo does NOT support vision/image operations
|
|
||||||
),
|
|
||||||
version="gpt-3.5-turbo",
|
|
||||||
calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.0015 + (bytesReceived / 4 / 1000) * 0.002
|
|
||||||
),
|
|
||||||
AiModel(
|
|
||||||
name="gpt-4o",
|
|
||||||
displayName="OpenAI GPT-4o Instance Vision",
|
|
||||||
connectorType="openai",
|
connectorType="openai",
|
||||||
apiUrl="https://api.openai.com/v1/chat/completions",
|
apiUrl="https://api.openai.com/v1/chat/completions",
|
||||||
temperature=0.2,
|
temperature=0.2,
|
||||||
maxTokens=16384,
|
maxTokens=16384,
|
||||||
contextLength=128000,
|
contextLength=128000,
|
||||||
costPer1kTokensInput=0.03,
|
maxInputTokensPerRequest=25000, # OpenAI org TPM limit, keep buffer
|
||||||
costPer1kTokensOutput=0.06,
|
costPer1kTokensInput=0.00015, # $0.15/M tokens (updated 2026-02)
|
||||||
|
costPer1kTokensOutput=0.0006, # $0.60/M tokens (updated 2026-02)
|
||||||
|
speedRating=9, # Very fast
|
||||||
|
qualityRating=8, # Good quality, replaces gpt-3.5-turbo
|
||||||
|
functionCall=self.callAiBasic,
|
||||||
|
functionCallStream=self.callAiBasicStream,
|
||||||
|
priority=PriorityEnum.SPEED,
|
||||||
|
processingMode=ProcessingModeEnum.BASIC,
|
||||||
|
operationTypes=createOperationTypeRatings(
|
||||||
|
(OperationTypeEnum.PLAN, 8),
|
||||||
|
(OperationTypeEnum.DATA_ANALYSE, 8),
|
||||||
|
(OperationTypeEnum.DATA_GENERATE, 9),
|
||||||
|
(OperationTypeEnum.DATA_EXTRACT, 7),
|
||||||
|
(OperationTypeEnum.AGENT, 8),
|
||||||
|
(OperationTypeEnum.DATA_QUERY, 10),
|
||||||
|
),
|
||||||
|
version="gpt-4o-mini",
|
||||||
|
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.00015 + (bytesReceived / 4 / 1000) * 0.0006
|
||||||
|
),
|
||||||
|
AiModel(
|
||||||
|
name="gpt-4o",
|
||||||
|
displayName="OpenAI GPT-4o Vision",
|
||||||
|
connectorType="openai",
|
||||||
|
apiUrl="https://api.openai.com/v1/chat/completions",
|
||||||
|
temperature=0.2,
|
||||||
|
maxTokens=16384,
|
||||||
|
contextLength=128000,
|
||||||
|
maxInputTokensPerRequest=25000, # OpenAI org TPM limit is 30K, keep 5K buffer
|
||||||
|
costPer1kTokensInput=0.0025, # $2.50/M tokens (updated 2026-02)
|
||||||
|
costPer1kTokensOutput=0.01, # $10.00/M tokens (updated 2026-02)
|
||||||
speedRating=6, # Slower for vision tasks
|
speedRating=6, # Slower for vision tasks
|
||||||
qualityRating=9, # High quality vision
|
qualityRating=9, # High quality vision
|
||||||
functionCall=self.callAiImage,
|
functionCall=self.callAiImage,
|
||||||
|
|
@ -116,7 +121,49 @@ class AiOpenai(BaseConnectorAi):
|
||||||
(OperationTypeEnum.IMAGE_ANALYSE, 9)
|
(OperationTypeEnum.IMAGE_ANALYSE, 9)
|
||||||
),
|
),
|
||||||
version="gpt-4o",
|
version="gpt-4o",
|
||||||
calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.03 + (bytesReceived / 4 / 1000) * 0.06
|
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.0025 + (bytesReceived / 4 / 1000) * 0.01
|
||||||
|
),
|
||||||
|
AiModel(
|
||||||
|
name="text-embedding-3-small",
|
||||||
|
displayName="OpenAI Embedding Small",
|
||||||
|
connectorType="openai",
|
||||||
|
apiUrl="https://api.openai.com/v1/embeddings",
|
||||||
|
temperature=0.0,
|
||||||
|
maxTokens=0,
|
||||||
|
contextLength=8191,
|
||||||
|
costPer1kTokensInput=0.00002, # $0.02/M tokens
|
||||||
|
costPer1kTokensOutput=0.0,
|
||||||
|
speedRating=10,
|
||||||
|
qualityRating=8,
|
||||||
|
functionCall=self.callEmbedding,
|
||||||
|
priority=PriorityEnum.COST,
|
||||||
|
processingMode=ProcessingModeEnum.BASIC,
|
||||||
|
operationTypes=createOperationTypeRatings(
|
||||||
|
(OperationTypeEnum.EMBEDDING, 10)
|
||||||
|
),
|
||||||
|
version="text-embedding-3-small",
|
||||||
|
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.00002
|
||||||
|
),
|
||||||
|
AiModel(
|
||||||
|
name="text-embedding-3-large",
|
||||||
|
displayName="OpenAI Embedding Large",
|
||||||
|
connectorType="openai",
|
||||||
|
apiUrl="https://api.openai.com/v1/embeddings",
|
||||||
|
temperature=0.0,
|
||||||
|
maxTokens=0,
|
||||||
|
contextLength=8191,
|
||||||
|
costPer1kTokensInput=0.00013, # $0.13/M tokens
|
||||||
|
costPer1kTokensOutput=0.0,
|
||||||
|
speedRating=9,
|
||||||
|
qualityRating=10,
|
||||||
|
functionCall=self.callEmbedding,
|
||||||
|
priority=PriorityEnum.QUALITY,
|
||||||
|
processingMode=ProcessingModeEnum.ADVANCED,
|
||||||
|
operationTypes=createOperationTypeRatings(
|
||||||
|
(OperationTypeEnum.EMBEDDING, 10)
|
||||||
|
),
|
||||||
|
version="text-embedding-3-large",
|
||||||
|
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.00013
|
||||||
),
|
),
|
||||||
AiModel(
|
AiModel(
|
||||||
name="dall-e-3",
|
name="dall-e-3",
|
||||||
|
|
@ -138,7 +185,7 @@ class AiOpenai(BaseConnectorAi):
|
||||||
(OperationTypeEnum.IMAGE_GENERATE, 10)
|
(OperationTypeEnum.IMAGE_GENERATE, 10)
|
||||||
),
|
),
|
||||||
version="dall-e-3",
|
version="dall-e-3",
|
||||||
calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.04
|
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.04
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
@ -172,6 +219,10 @@ class AiOpenai(BaseConnectorAi):
|
||||||
"max_tokens": maxTokens
|
"max_tokens": maxTokens
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if modelCall.tools:
|
||||||
|
payload["tools"] = modelCall.tools
|
||||||
|
payload["tool_choice"] = modelCall.toolChoice or "auto"
|
||||||
|
|
||||||
response = await self.httpClient.post(
|
response = await self.httpClient.post(
|
||||||
model.apiUrl,
|
model.apiUrl,
|
||||||
json=payload
|
json=payload
|
||||||
|
|
@ -181,6 +232,19 @@ class AiOpenai(BaseConnectorAi):
|
||||||
error_message = f"OpenAI API error: {response.status_code} - {response.text}"
|
error_message = f"OpenAI API error: {response.status_code} - {response.text}"
|
||||||
logger.error(error_message)
|
logger.error(error_message)
|
||||||
|
|
||||||
|
# Check for rate limit exceeded (429 TPM)
|
||||||
|
if response.status_code == 429:
|
||||||
|
try:
|
||||||
|
error_data = response.json()
|
||||||
|
error_msg = error_data.get("error", {}).get("message", "Rate limit exceeded")
|
||||||
|
raise RateLimitExceededException(
|
||||||
|
f"Rate limit exceeded for {model.name}: {error_msg}"
|
||||||
|
)
|
||||||
|
except (ValueError, KeyError):
|
||||||
|
raise RateLimitExceededException(
|
||||||
|
f"Rate limit exceeded for {model.name}"
|
||||||
|
)
|
||||||
|
|
||||||
# Check for context length exceeded error
|
# Check for context length exceeded error
|
||||||
if response.status_code == 400:
|
if response.status_code == 400:
|
||||||
try:
|
try:
|
||||||
|
|
@ -198,22 +262,168 @@ class AiOpenai(BaseConnectorAi):
|
||||||
raise HTTPException(status_code=500, detail=error_message)
|
raise HTTPException(status_code=500, detail=error_message)
|
||||||
|
|
||||||
responseJson = response.json()
|
responseJson = response.json()
|
||||||
content = responseJson["choices"][0]["message"]["content"]
|
choiceMessage = responseJson["choices"][0]["message"]
|
||||||
|
content = choiceMessage.get("content") or ""
|
||||||
|
|
||||||
|
metadata = {"response_id": responseJson.get("id", "")}
|
||||||
|
if choiceMessage.get("tool_calls"):
|
||||||
|
metadata["toolCalls"] = choiceMessage["tool_calls"]
|
||||||
|
|
||||||
return AiModelResponse(
|
return AiModelResponse(
|
||||||
content=content,
|
content=content,
|
||||||
success=True,
|
success=True,
|
||||||
modelId=model.name,
|
modelId=model.name,
|
||||||
metadata={"response_id": responseJson.get("id", "")}
|
metadata=metadata
|
||||||
)
|
)
|
||||||
|
|
||||||
except ContextLengthExceededException:
|
except ContextLengthExceededException:
|
||||||
# Re-raise context length exceptions without wrapping
|
|
||||||
raise
|
raise
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Error calling OpenAI API: {str(e)}")
|
logger.error(f"Error calling OpenAI API: {str(e)}")
|
||||||
raise HTTPException(status_code=500, detail=f"Error calling OpenAI API: {str(e)}")
|
raise HTTPException(status_code=500, detail=f"Error calling OpenAI API: {str(e)}")
|
||||||
|
|
||||||
|
async def callAiBasicStream(self, modelCall: AiModelCall) -> AsyncGenerator[Union[str, AiModelResponse], None]:
|
||||||
|
"""Stream OpenAI response. Yields str deltas, then final AiModelResponse."""
|
||||||
|
try:
|
||||||
|
messages = modelCall.messages
|
||||||
|
model = modelCall.model
|
||||||
|
options = modelCall.options
|
||||||
|
temperature = getattr(options, "temperature", None)
|
||||||
|
if temperature is None:
|
||||||
|
temperature = model.temperature
|
||||||
|
|
||||||
|
payload: Dict[str, Any] = {
|
||||||
|
"model": model.name,
|
||||||
|
"messages": messages,
|
||||||
|
"temperature": temperature,
|
||||||
|
"max_tokens": model.maxTokens,
|
||||||
|
"stream": True,
|
||||||
|
}
|
||||||
|
if modelCall.tools:
|
||||||
|
payload["tools"] = modelCall.tools
|
||||||
|
payload["tool_choice"] = modelCall.toolChoice or "auto"
|
||||||
|
|
||||||
|
fullContent = ""
|
||||||
|
toolCallsAccum: Dict[int, Dict[str, Any]] = {}
|
||||||
|
|
||||||
|
async with self.httpClient.stream("POST", model.apiUrl, json=payload) as response:
|
||||||
|
if response.status_code != 200:
|
||||||
|
body = await response.aread()
|
||||||
|
bodyStr = body.decode()
|
||||||
|
if response.status_code == 429:
|
||||||
|
try:
|
||||||
|
errorMsg = _json.loads(bodyStr).get("error", {}).get("message", "Rate limit exceeded")
|
||||||
|
except (ValueError, KeyError):
|
||||||
|
errorMsg = f"Rate limit exceeded for {model.name}"
|
||||||
|
raise RateLimitExceededException(f"Rate limit exceeded for {model.name}: {errorMsg}")
|
||||||
|
raise HTTPException(status_code=500, detail=f"OpenAI stream error: {response.status_code} - {bodyStr}")
|
||||||
|
|
||||||
|
async for line in response.aiter_lines():
|
||||||
|
if not line.startswith("data: "):
|
||||||
|
continue
|
||||||
|
data = line[6:]
|
||||||
|
if data.strip() == "[DONE]":
|
||||||
|
break
|
||||||
|
try:
|
||||||
|
chunk = _json.loads(data)
|
||||||
|
except _json.JSONDecodeError:
|
||||||
|
continue
|
||||||
|
|
||||||
|
delta = chunk.get("choices", [{}])[0].get("delta", {})
|
||||||
|
|
||||||
|
if "content" in delta and delta["content"]:
|
||||||
|
fullContent += delta["content"]
|
||||||
|
yield delta["content"]
|
||||||
|
|
||||||
|
for tcDelta in delta.get("tool_calls", []):
|
||||||
|
idx = tcDelta.get("index", 0)
|
||||||
|
if idx not in toolCallsAccum:
|
||||||
|
toolCallsAccum[idx] = {
|
||||||
|
"id": tcDelta.get("id", ""),
|
||||||
|
"type": "function",
|
||||||
|
"function": {"name": "", "arguments": ""},
|
||||||
|
}
|
||||||
|
if tcDelta.get("id"):
|
||||||
|
toolCallsAccum[idx]["id"] = tcDelta["id"]
|
||||||
|
fn = tcDelta.get("function", {})
|
||||||
|
if fn.get("name"):
|
||||||
|
toolCallsAccum[idx]["function"]["name"] = fn["name"]
|
||||||
|
if fn.get("arguments"):
|
||||||
|
toolCallsAccum[idx]["function"]["arguments"] += fn["arguments"]
|
||||||
|
|
||||||
|
metadata: Dict[str, Any] = {}
|
||||||
|
if toolCallsAccum:
|
||||||
|
metadata["toolCalls"] = [toolCallsAccum[i] for i in sorted(toolCallsAccum)]
|
||||||
|
|
||||||
|
yield AiModelResponse(
|
||||||
|
content=fullContent,
|
||||||
|
success=True,
|
||||||
|
modelId=model.name,
|
||||||
|
metadata=metadata,
|
||||||
|
)
|
||||||
|
|
||||||
|
except (RateLimitExceededException, ContextLengthExceededException, HTTPException):
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error streaming OpenAI API: {e}")
|
||||||
|
raise HTTPException(status_code=500, detail=f"Error streaming OpenAI API: {e}")
|
||||||
|
|
||||||
|
async def callEmbedding(self, modelCall: AiModelCall) -> AiModelResponse:
|
||||||
|
"""Generate embeddings via the OpenAI Embeddings API.
|
||||||
|
|
||||||
|
Reads texts from modelCall.embeddingInput.
|
||||||
|
Returns vectors in metadata["embeddings"].
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
model = modelCall.model
|
||||||
|
texts = modelCall.embeddingInput or []
|
||||||
|
if not texts:
|
||||||
|
return AiModelResponse(
|
||||||
|
content="", success=False, error="No embeddingInput provided"
|
||||||
|
)
|
||||||
|
|
||||||
|
payload = {"model": model.name, "input": texts}
|
||||||
|
response = await self.httpClient.post(model.apiUrl, json=payload)
|
||||||
|
|
||||||
|
if response.status_code != 200:
|
||||||
|
errorMessage = f"OpenAI Embedding API error: {response.status_code} - {response.text}"
|
||||||
|
logger.error(errorMessage)
|
||||||
|
if response.status_code == 429:
|
||||||
|
raise RateLimitExceededException(f"Rate limit exceeded for {model.name}")
|
||||||
|
if response.status_code == 400:
|
||||||
|
try:
|
||||||
|
errorData = response.json()
|
||||||
|
errMsg = errorData.get("error", {}).get("message", "").lower()
|
||||||
|
errCode = errorData.get("error", {}).get("code", "")
|
||||||
|
if errCode == "context_length_exceeded" or "too many tokens" in errMsg or "maximum context length" in errMsg:
|
||||||
|
raise ContextLengthExceededException(
|
||||||
|
f"Embedding context length exceeded for {model.name}: {errorData.get('error', {}).get('message', '')}"
|
||||||
|
)
|
||||||
|
except (ValueError, KeyError):
|
||||||
|
pass
|
||||||
|
raise HTTPException(status_code=500, detail=errorMessage)
|
||||||
|
|
||||||
|
responseJson = response.json()
|
||||||
|
embeddings = [item["embedding"] for item in responseJson["data"]]
|
||||||
|
usage = responseJson.get("usage", {})
|
||||||
|
|
||||||
|
return AiModelResponse(
|
||||||
|
content="",
|
||||||
|
success=True,
|
||||||
|
modelId=model.name,
|
||||||
|
tokensUsed={
|
||||||
|
"input": usage.get("prompt_tokens", 0),
|
||||||
|
"output": 0,
|
||||||
|
"total": usage.get("total_tokens", 0),
|
||||||
|
},
|
||||||
|
metadata={"embeddings": embeddings},
|
||||||
|
)
|
||||||
|
except (RateLimitExceededException, ContextLengthExceededException):
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error calling OpenAI Embedding API: {str(e)}")
|
||||||
|
raise HTTPException(status_code=500, detail=f"Error calling OpenAI Embedding API: {str(e)}")
|
||||||
|
|
||||||
async def callAiImage(self, modelCall: AiModelCall) -> AiModelResponse:
|
async def callAiImage(self, modelCall: AiModelCall) -> AiModelResponse:
|
||||||
"""
|
"""
|
||||||
Analyzes an image with the OpenAI Vision API using standardized pattern.
|
Analyzes an image with the OpenAI Vision API using standardized pattern.
|
||||||
|
|
@ -296,7 +506,6 @@ class AiOpenai(BaseConnectorAi):
|
||||||
promptContent = messages[0]["content"] if messages else ""
|
promptContent = messages[0]["content"] if messages else ""
|
||||||
|
|
||||||
# Parse prompt using AiCallPromptImage model
|
# Parse prompt using AiCallPromptImage model
|
||||||
from modules.datamodels.datamodelAi import AiCallPromptImage
|
|
||||||
import json
|
import json
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|
@ -333,29 +542,20 @@ class AiOpenai(BaseConnectorAi):
|
||||||
"response_format": "b64_json" # Get base64 data directly instead of URLs
|
"response_format": "b64_json" # Get base64 data directly instead of URLs
|
||||||
}
|
}
|
||||||
|
|
||||||
# Create a separate client for DALL-E API calls
|
# Use existing httpClient to benefit from connection pooling
|
||||||
# Timeout set to 600 seconds (10 minutes) for complex image generation requests
|
# This avoids TLS connection issues that can occur with fresh clients
|
||||||
dalle_client = httpx.AsyncClient(
|
response = await self.httpClient.post(
|
||||||
timeout=600.0,
|
|
||||||
headers={
|
|
||||||
"Authorization": f"Bearer {self.apiKey}",
|
|
||||||
"Content-Type": "application/json"
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
response = await dalle_client.post(
|
|
||||||
dalle_url,
|
dalle_url,
|
||||||
json=payload
|
json=payload
|
||||||
)
|
)
|
||||||
|
|
||||||
await dalle_client.aclose()
|
|
||||||
|
|
||||||
if response.status_code != 200:
|
if response.status_code != 200:
|
||||||
logger.error(f"DALL-E API error: {response.status_code} - {response.text}")
|
logger.error(f"DALL-E API error: {response.status_code} - {response.text}")
|
||||||
return {
|
return AiModelResponse(
|
||||||
"success": False,
|
content="",
|
||||||
"error": f"DALL-E API error: {response.status_code} - {response.text}"
|
success=False,
|
||||||
}
|
error=f"DALL-E API error: {response.status_code} - {response.text}"
|
||||||
|
)
|
||||||
|
|
||||||
responseJson = response.json()
|
responseJson = response.json()
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,10 +1,12 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
import logging
|
import logging
|
||||||
import httpx
|
import httpx
|
||||||
from typing import List
|
from typing import List
|
||||||
from fastapi import HTTPException
|
from fastapi import HTTPException
|
||||||
from modules.shared.configuration import APP_CONFIG
|
from modules.shared.configuration import APP_CONFIG
|
||||||
from modules.aicore.aicoreBase import BaseConnectorAi
|
from .aicoreBase import BaseConnectorAi
|
||||||
from modules.datamodels.datamodelAi import AiModel, PriorityEnum, ProcessingModeEnum, OperationTypeEnum, AiModelCall, AiModelResponse, createOperationTypeRatings, AiCallPromptWebSearch, AiCallPromptWebCrawl
|
from modules.datamodels.datamodelAi import AiModel, PriorityEnum, ProcessingModeEnum, OperationTypeEnum, AiModelCall, AiModelResponse, createOperationTypeRatings, AiCallPromptWebSearch, AiCallPromptWebCrawl, AiCallOptions
|
||||||
from modules.datamodels.datamodelTools import CountryCodes
|
from modules.datamodels.datamodelTools import CountryCodes
|
||||||
|
|
||||||
# Configure logger
|
# Configure logger
|
||||||
|
|
@ -57,22 +59,21 @@ class AiPerplexity(BaseConnectorAi):
|
||||||
connectorType="perplexity",
|
connectorType="perplexity",
|
||||||
apiUrl="https://api.perplexity.ai/chat/completions",
|
apiUrl="https://api.perplexity.ai/chat/completions",
|
||||||
temperature=0.2,
|
temperature=0.2,
|
||||||
maxTokens=24000, # Increased for detailed web crawl responses (Perplexity supports up to 25k)
|
maxTokens=24000,
|
||||||
contextLength=32000,
|
contextLength=127000, # 127K context window (updated 2026-02)
|
||||||
costPer1kTokensInput=0.005,
|
costPer1kTokensInput=0.001, # $1/M tokens (updated 2026-02)
|
||||||
costPer1kTokensOutput=0.005,
|
costPer1kTokensOutput=0.001, # $1/M tokens (updated 2026-02)
|
||||||
speedRating=8,
|
speedRating=8,
|
||||||
qualityRating=8,
|
qualityRating=8,
|
||||||
# capabilities removed (not used in business logic)
|
|
||||||
functionCall=self._routeWebOperation,
|
functionCall=self._routeWebOperation,
|
||||||
priority=PriorityEnum.BALANCED,
|
priority=PriorityEnum.BALANCED,
|
||||||
processingMode=ProcessingModeEnum.ADVANCED,
|
processingMode=ProcessingModeEnum.ADVANCED,
|
||||||
operationTypes=createOperationTypeRatings(
|
operationTypes=createOperationTypeRatings(
|
||||||
(OperationTypeEnum.WEB_SEARCH, 9),
|
(OperationTypeEnum.WEB_SEARCH_DATA, 9),
|
||||||
(OperationTypeEnum.WEB_CRAWL, 7)
|
(OperationTypeEnum.WEB_CRAWL, 7)
|
||||||
),
|
),
|
||||||
version="sonar",
|
version="sonar",
|
||||||
calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.005 + (bytesReceived / 4 / 1000) * 0.005
|
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.001 + (bytesReceived / 4 / 1000) * 0.001
|
||||||
),
|
),
|
||||||
AiModel(
|
AiModel(
|
||||||
name="sonar-pro",
|
name="sonar-pro",
|
||||||
|
|
@ -80,22 +81,21 @@ class AiPerplexity(BaseConnectorAi):
|
||||||
connectorType="perplexity",
|
connectorType="perplexity",
|
||||||
apiUrl="https://api.perplexity.ai/chat/completions",
|
apiUrl="https://api.perplexity.ai/chat/completions",
|
||||||
temperature=0.2,
|
temperature=0.2,
|
||||||
maxTokens=24000, # Increased for detailed web crawl responses (Perplexity supports up to 25k)
|
maxTokens=24000,
|
||||||
contextLength=32000,
|
contextLength=200000, # 200K context window (updated 2026-02)
|
||||||
costPer1kTokensInput=0.01,
|
costPer1kTokensInput=0.003, # $3/M tokens (updated 2026-02)
|
||||||
costPer1kTokensOutput=0.01,
|
costPer1kTokensOutput=0.015, # $15/M tokens (updated 2026-02)
|
||||||
speedRating=6, # Slower due to AI analysis
|
speedRating=6, # Slower due to AI analysis
|
||||||
qualityRating=9, # Best AI analysis quality
|
qualityRating=9, # Best AI analysis quality
|
||||||
# capabilities removed (not used in business logic)
|
|
||||||
functionCall=self._routeWebOperation,
|
functionCall=self._routeWebOperation,
|
||||||
priority=PriorityEnum.QUALITY,
|
priority=PriorityEnum.QUALITY,
|
||||||
processingMode=ProcessingModeEnum.DETAILED,
|
processingMode=ProcessingModeEnum.DETAILED,
|
||||||
operationTypes=createOperationTypeRatings(
|
operationTypes=createOperationTypeRatings(
|
||||||
(OperationTypeEnum.WEB_SEARCH, 9),
|
(OperationTypeEnum.WEB_SEARCH_DATA, 9),
|
||||||
(OperationTypeEnum.WEB_CRAWL, 8)
|
(OperationTypeEnum.WEB_CRAWL, 8)
|
||||||
),
|
),
|
||||||
version="sonar-pro",
|
version="sonar-pro",
|
||||||
calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.01 + (bytesReceived / 4 / 1000) * 0.01
|
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.003 + (bytesReceived / 4 / 1000) * 0.015
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
@ -182,7 +182,6 @@ class AiPerplexity(BaseConnectorAi):
|
||||||
]
|
]
|
||||||
|
|
||||||
# Create a model call for testing
|
# Create a model call for testing
|
||||||
from modules.datamodels.datamodelAi import AiCallOptions
|
|
||||||
model = self.getModels()[0] # Get first model for testing
|
model = self.getModels()[0] # Get first model for testing
|
||||||
testCall = AiModelCall(
|
testCall = AiModelCall(
|
||||||
messages=testMessages,
|
messages=testMessages,
|
||||||
|
|
@ -209,7 +208,7 @@ class AiPerplexity(BaseConnectorAi):
|
||||||
"""
|
"""
|
||||||
operationType = modelCall.options.operationType
|
operationType = modelCall.options.operationType
|
||||||
|
|
||||||
if operationType == OperationTypeEnum.WEB_SEARCH:
|
if operationType == OperationTypeEnum.WEB_SEARCH_DATA:
|
||||||
return await self.webSearch(modelCall)
|
return await self.webSearch(modelCall)
|
||||||
elif operationType == OperationTypeEnum.WEB_CRAWL:
|
elif operationType == OperationTypeEnum.WEB_CRAWL:
|
||||||
return await self.webCrawl(modelCall)
|
return await self.webCrawl(modelCall)
|
||||||
|
|
@ -255,7 +254,7 @@ class AiPerplexity(BaseConnectorAi):
|
||||||
|
|
||||||
async def webSearch(self, modelCall: AiModelCall) -> AiModelResponse:
|
async def webSearch(self, modelCall: AiModelCall) -> AiModelResponse:
|
||||||
"""
|
"""
|
||||||
WEB_SEARCH operation - returns list of URLs based on search query.
|
WEB_SEARCH_DATA operation - returns list of URLs based on search query.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
modelCall: AiModelCall with AiCallPromptWebSearch as prompt
|
modelCall: AiModelCall with AiCallPromptWebSearch as prompt
|
||||||
|
|
@ -338,7 +337,7 @@ Return ONLY a JSON array of URLs, no additional text:
|
||||||
content=content,
|
content=content,
|
||||||
success=True,
|
success=True,
|
||||||
modelId=model.name,
|
modelId=model.name,
|
||||||
metadata={"response_id": apiResponse.get("id", ""), "operation": "WEB_SEARCH"}
|
metadata={"response_id": apiResponse.get("id", ""), "operation": "WEB_SEARCH_DATA"}
|
||||||
)
|
)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|
|
||||||
506
modules/aicore/aicorePluginPrivateLlm.py
Normal file
506
modules/aicore/aicorePluginPrivateLlm.py
Normal file
|
|
@ -0,0 +1,506 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""
|
||||||
|
AI Connector for PowerOn Private-LLM Service.
|
||||||
|
|
||||||
|
Connects to the private-llm service running on-premise with Ollama backend.
|
||||||
|
Provides OCR and Vision capabilities via local AI models.
|
||||||
|
|
||||||
|
Models:
|
||||||
|
- poweron-text-general: Text (qwen2.5); NEUTRALIZATION_TEXT + data/plan ops
|
||||||
|
- poweron-vision-general: Vision (qwen2.5vl); IMAGE_ANALYSE + NEUTRALIZATION_IMAGE
|
||||||
|
- poweron-vision-deep: Vision (granite3.2); IMAGE_ANALYSE + NEUTRALIZATION_IMAGE
|
||||||
|
|
||||||
|
Pricing (CHF per call):
|
||||||
|
- Text models: CHF 0.010
|
||||||
|
- Vision models: CHF 0.100
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import httpx
|
||||||
|
import time
|
||||||
|
from typing import List, Optional, Dict, Any
|
||||||
|
from fastapi import HTTPException
|
||||||
|
from modules.shared.configuration import APP_CONFIG
|
||||||
|
from .aicoreBase import BaseConnectorAi, RateLimitExceededException
|
||||||
|
from modules.datamodels.datamodelAi import (
|
||||||
|
AiModel,
|
||||||
|
PriorityEnum,
|
||||||
|
ProcessingModeEnum,
|
||||||
|
OperationTypeEnum,
|
||||||
|
AiModelCall,
|
||||||
|
AiModelResponse,
|
||||||
|
createOperationTypeRatings
|
||||||
|
)
|
||||||
|
|
||||||
|
# Configure logger
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# Pricing constants (CHF)
|
||||||
|
PRICE_TEXT_PER_CALL = 0.01 # CHF 0.010 per text model call
|
||||||
|
PRICE_VISION_PER_CALL = 0.10 # CHF 0.100 per vision model call
|
||||||
|
|
||||||
|
|
||||||
|
# Private-LLM Service URL (fix, nicht via env konfigurierbar)
|
||||||
|
PRIVATE_LLM_BASE_URL = "https://llm.poweron.swiss:8000"
|
||||||
|
|
||||||
|
|
||||||
|
def _loadConfigData():
|
||||||
|
"""Load configuration data for Private-LLM connector."""
|
||||||
|
return {
|
||||||
|
"apiKey": APP_CONFIG.get("Connector_AiPrivateLlm_API_SECRET"),
|
||||||
|
"baseUrl": PRIVATE_LLM_BASE_URL,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class AiPrivateLlm(BaseConnectorAi):
|
||||||
|
"""Connector for communication with the PowerOn Private-LLM Service."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
# Load configuration
|
||||||
|
self.config = _loadConfigData()
|
||||||
|
self.apiKey = self.config["apiKey"]
|
||||||
|
self.baseUrl = self.config["baseUrl"]
|
||||||
|
|
||||||
|
# HTTP client for API calls
|
||||||
|
# Timeout set to 3600 seconds (60 minutes) for large model processing
|
||||||
|
headers = {"Content-Type": "application/json"}
|
||||||
|
if self.apiKey:
|
||||||
|
headers["X-API-Key"] = self.apiKey
|
||||||
|
|
||||||
|
self.httpClient = httpx.AsyncClient(
|
||||||
|
timeout=3600.0,
|
||||||
|
headers=headers
|
||||||
|
)
|
||||||
|
|
||||||
|
# Cache for service availability check
|
||||||
|
self._serviceAvailable: Optional[bool] = None
|
||||||
|
self._availableOllamaModels: Optional[List[str]] = None
|
||||||
|
self._lastAvailabilityCheck: float = 0
|
||||||
|
self._availabilityCacheTtl: float = 60.0 # 60 seconds cache
|
||||||
|
|
||||||
|
logger.info(f"Private-LLM Connector initialized (URL: {self.baseUrl})")
|
||||||
|
|
||||||
|
def getConnectorType(self) -> str:
|
||||||
|
"""Get the connector type identifier."""
|
||||||
|
return "privatellm"
|
||||||
|
|
||||||
|
def _checkServiceAvailability(self) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Check if the Private-LLM service is available and which Ollama models are installed.
|
||||||
|
Uses caching to avoid excessive health checks.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with 'serviceAvailable', 'ollamaConnected', 'availableModels'
|
||||||
|
"""
|
||||||
|
import asyncio
|
||||||
|
|
||||||
|
currentTime = time.time()
|
||||||
|
|
||||||
|
# Return cached result if still valid
|
||||||
|
if (self._serviceAvailable is not None and
|
||||||
|
currentTime - self._lastAvailabilityCheck < self._availabilityCacheTtl):
|
||||||
|
return {
|
||||||
|
"serviceAvailable": self._serviceAvailable,
|
||||||
|
"ollamaConnected": self._serviceAvailable,
|
||||||
|
"availableModels": self._availableOllamaModels or []
|
||||||
|
}
|
||||||
|
|
||||||
|
# Perform availability check
|
||||||
|
try:
|
||||||
|
# Use synchronous client for blocking check during initialization
|
||||||
|
with httpx.Client(timeout=5.0) as client:
|
||||||
|
headers = {"Content-Type": "application/json"}
|
||||||
|
if self.apiKey:
|
||||||
|
headers["X-API-Key"] = self.apiKey
|
||||||
|
|
||||||
|
# Check health endpoint
|
||||||
|
healthResponse = client.get(
|
||||||
|
f"{self.baseUrl}/api/health",
|
||||||
|
headers=headers
|
||||||
|
)
|
||||||
|
|
||||||
|
if healthResponse.status_code != 200:
|
||||||
|
logger.warning(f"Private-LLM service not available: HTTP {healthResponse.status_code}")
|
||||||
|
self._serviceAvailable = False
|
||||||
|
self._availableOllamaModels = []
|
||||||
|
self._lastAvailabilityCheck = currentTime
|
||||||
|
return {"serviceAvailable": False, "ollamaConnected": False, "availableModels": []}
|
||||||
|
|
||||||
|
healthData = healthResponse.json()
|
||||||
|
ollamaConnected = healthData.get("ollamaConnected", False)
|
||||||
|
|
||||||
|
if not ollamaConnected:
|
||||||
|
logger.warning("Private-LLM service available but Ollama not connected")
|
||||||
|
self._serviceAvailable = True
|
||||||
|
self._availableOllamaModels = []
|
||||||
|
self._lastAvailabilityCheck = currentTime
|
||||||
|
return {"serviceAvailable": True, "ollamaConnected": False, "availableModels": []}
|
||||||
|
|
||||||
|
# Check Ollama status for available models
|
||||||
|
statusResponse = client.get(
|
||||||
|
f"{self.baseUrl}/api/ollama/status",
|
||||||
|
headers=headers
|
||||||
|
)
|
||||||
|
|
||||||
|
if statusResponse.status_code == 200:
|
||||||
|
statusData = statusResponse.json()
|
||||||
|
self._availableOllamaModels = statusData.get("models", [])
|
||||||
|
else:
|
||||||
|
self._availableOllamaModels = []
|
||||||
|
|
||||||
|
self._serviceAvailable = True
|
||||||
|
self._lastAvailabilityCheck = currentTime
|
||||||
|
|
||||||
|
logger.info(f"Private-LLM availability check: service=OK, ollama=OK, models={len(self._availableOllamaModels)}")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"serviceAvailable": True,
|
||||||
|
"ollamaConnected": True,
|
||||||
|
"availableModels": self._availableOllamaModels
|
||||||
|
}
|
||||||
|
|
||||||
|
except httpx.ConnectError:
|
||||||
|
logger.warning(f"Private-LLM service not reachable at {self.baseUrl}")
|
||||||
|
self._serviceAvailable = False
|
||||||
|
self._availableOllamaModels = []
|
||||||
|
self._lastAvailabilityCheck = currentTime
|
||||||
|
return {"serviceAvailable": False, "ollamaConnected": False, "availableModels": []}
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Error checking Private-LLM availability: {e}")
|
||||||
|
self._serviceAvailable = False
|
||||||
|
self._availableOllamaModels = []
|
||||||
|
self._lastAvailabilityCheck = currentTime
|
||||||
|
return {"serviceAvailable": False, "ollamaConnected": False, "availableModels": []}
|
||||||
|
|
||||||
|
def _isModelAvailableInOllama(self, ollamaModelName: str, availableModels: List[str]) -> bool:
|
||||||
|
"""
|
||||||
|
Check if a model is available in Ollama.
|
||||||
|
Handles model name variations (with/without tags).
|
||||||
|
"""
|
||||||
|
if not availableModels:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Direct match
|
||||||
|
if ollamaModelName in availableModels:
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Check without tag (e.g., "qwen2.5vl:72b" -> "qwen2.5vl")
|
||||||
|
baseModelName = ollamaModelName.split(":")[0]
|
||||||
|
for availModel in availableModels:
|
||||||
|
availBase = availModel.split(":")[0]
|
||||||
|
if baseModelName == availBase:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
def getModels(self) -> List[AiModel]:
|
||||||
|
"""
|
||||||
|
Get all available Private-LLM models.
|
||||||
|
|
||||||
|
Checks service availability and returns only models that are actually available
|
||||||
|
in the connected Ollama instance. Returns empty list if service is not reachable.
|
||||||
|
"""
|
||||||
|
# Check service availability
|
||||||
|
availability = self._checkServiceAvailability()
|
||||||
|
|
||||||
|
if not availability["serviceAvailable"]:
|
||||||
|
logger.warning("Private-LLM service not available - no models returned")
|
||||||
|
return []
|
||||||
|
|
||||||
|
if not availability["ollamaConnected"]:
|
||||||
|
logger.warning("Private-LLM service available but Ollama not connected - no models returned")
|
||||||
|
return []
|
||||||
|
|
||||||
|
availableOllamaModels = availability.get("availableModels", [])
|
||||||
|
|
||||||
|
# Define all models with their Ollama backend names
|
||||||
|
# Actual model specs (for 31GB RAM + 22GB GPU server):
|
||||||
|
# Context sizes reduced to fit in available RAM
|
||||||
|
# - qwen2.5:7b: 7.6B params, ~4.7GB RAM (Text) - 8K context
|
||||||
|
# - qwen2.5vl:7b: 8.29B params, ~6GB RAM (Vision) - 4K context
|
||||||
|
# - granite3.2-vision: 2B params, ~2.4GB RAM (Vision) - 4K context
|
||||||
|
# - deepseek-ocr: ~6.7GB RAM (OCR) - 4K context
|
||||||
|
modelDefinitions = [
|
||||||
|
# Text Model (qwen2.5:7b: 7.6B)
|
||||||
|
{
|
||||||
|
"model": AiModel(
|
||||||
|
name="poweron-text-general",
|
||||||
|
displayName="PowerOn Text General",
|
||||||
|
connectorType="privatellm",
|
||||||
|
apiUrl=f"{self.baseUrl}/api/analyze",
|
||||||
|
temperature=0.1,
|
||||||
|
maxTokens=4096,
|
||||||
|
contextLength=8192, # Reduced for RAM constraints
|
||||||
|
costPer1kTokensInput=0.0, # Flat rate pricing
|
||||||
|
costPer1kTokensOutput=0.0, # Flat rate pricing
|
||||||
|
speedRating=8, # Fast and efficient
|
||||||
|
qualityRating=9, # High quality text model
|
||||||
|
functionCall=self.callAiText,
|
||||||
|
priority=PriorityEnum.COST,
|
||||||
|
processingMode=ProcessingModeEnum.BASIC,
|
||||||
|
operationTypes=createOperationTypeRatings(
|
||||||
|
(OperationTypeEnum.PLAN, 7),
|
||||||
|
(OperationTypeEnum.DATA_ANALYSE, 8),
|
||||||
|
(OperationTypeEnum.DATA_GENERATE, 8),
|
||||||
|
(OperationTypeEnum.DATA_EXTRACT, 8),
|
||||||
|
(OperationTypeEnum.NEUTRALIZATION_TEXT, 9),
|
||||||
|
),
|
||||||
|
version="qwen2.5:7b",
|
||||||
|
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: PRICE_TEXT_PER_CALL
|
||||||
|
),
|
||||||
|
"ollamaModel": "qwen2.5:7b"
|
||||||
|
},
|
||||||
|
# Vision General Model (qwen2.5vl:7b: 8.29B)
|
||||||
|
{
|
||||||
|
"model": AiModel(
|
||||||
|
name="poweron-vision-general",
|
||||||
|
displayName="PowerOn Vision General",
|
||||||
|
connectorType="privatellm",
|
||||||
|
apiUrl=f"{self.baseUrl}/api/analyze",
|
||||||
|
temperature=0.2,
|
||||||
|
maxTokens=2048,
|
||||||
|
contextLength=4096, # Reduced for RAM constraints (vision needs more)
|
||||||
|
costPer1kTokensInput=0.0, # Flat rate pricing
|
||||||
|
costPer1kTokensOutput=0.0, # Flat rate pricing
|
||||||
|
speedRating=7,
|
||||||
|
qualityRating=9,
|
||||||
|
functionCall=self.callAiVision,
|
||||||
|
priority=PriorityEnum.BALANCED,
|
||||||
|
processingMode=ProcessingModeEnum.ADVANCED,
|
||||||
|
operationTypes=createOperationTypeRatings(
|
||||||
|
(OperationTypeEnum.IMAGE_ANALYSE, 9),
|
||||||
|
(OperationTypeEnum.NEUTRALIZATION_IMAGE, 9),
|
||||||
|
),
|
||||||
|
version="qwen2.5vl:7b",
|
||||||
|
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: PRICE_VISION_PER_CALL
|
||||||
|
),
|
||||||
|
"ollamaModel": "qwen2.5vl:7b"
|
||||||
|
},
|
||||||
|
# Vision Deep Model (granite3.2-vision: 2B)
|
||||||
|
{
|
||||||
|
"model": AiModel(
|
||||||
|
name="poweron-vision-deep",
|
||||||
|
displayName="PowerOn Vision Deep",
|
||||||
|
connectorType="privatellm",
|
||||||
|
apiUrl=f"{self.baseUrl}/api/analyze",
|
||||||
|
temperature=0.1,
|
||||||
|
maxTokens=2048,
|
||||||
|
contextLength=4096, # Reduced for RAM constraints
|
||||||
|
costPer1kTokensInput=0.0, # Flat rate pricing
|
||||||
|
costPer1kTokensOutput=0.0, # Flat rate pricing
|
||||||
|
speedRating=9, # Fast due to small 2B model
|
||||||
|
qualityRating=8, # Good for document understanding
|
||||||
|
functionCall=self.callAiVision,
|
||||||
|
priority=PriorityEnum.QUALITY,
|
||||||
|
processingMode=ProcessingModeEnum.DETAILED,
|
||||||
|
operationTypes=createOperationTypeRatings(
|
||||||
|
(OperationTypeEnum.IMAGE_ANALYSE, 9),
|
||||||
|
(OperationTypeEnum.NEUTRALIZATION_IMAGE, 9),
|
||||||
|
),
|
||||||
|
version="granite3.2-vision",
|
||||||
|
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: PRICE_VISION_PER_CALL
|
||||||
|
),
|
||||||
|
"ollamaModel": "granite3.2-vision"
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
# Filter models by Ollama availability
|
||||||
|
availableModels = []
|
||||||
|
unavailableModels = []
|
||||||
|
|
||||||
|
for modelDef in modelDefinitions:
|
||||||
|
ollamaModelName = modelDef["ollamaModel"]
|
||||||
|
if self._isModelAvailableInOllama(ollamaModelName, availableOllamaModels):
|
||||||
|
availableModels.append(modelDef["model"])
|
||||||
|
else:
|
||||||
|
unavailableModels.append(modelDef["model"].name)
|
||||||
|
|
||||||
|
if unavailableModels:
|
||||||
|
logger.warning(
|
||||||
|
f"Private-LLM: {len(unavailableModels)} models not available in Ollama: {', '.join(unavailableModels)}. "
|
||||||
|
f"Install with: ollama pull <model-name>"
|
||||||
|
)
|
||||||
|
|
||||||
|
if availableModels:
|
||||||
|
logger.info(f"Private-LLM: {len(availableModels)} models available")
|
||||||
|
else:
|
||||||
|
logger.warning("Private-LLM: No models available. Check Ollama installation.")
|
||||||
|
|
||||||
|
return availableModels
|
||||||
|
|
||||||
|
async def callAiText(self, modelCall: AiModelCall) -> AiModelResponse:
|
||||||
|
"""
|
||||||
|
Call the Private-LLM API for text-based analysis.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
modelCall: AiModelCall with messages
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
AiModelResponse with content and metadata
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
messages = modelCall.messages
|
||||||
|
model = modelCall.model
|
||||||
|
|
||||||
|
# Extract prompt from messages
|
||||||
|
prompt = ""
|
||||||
|
for msg in messages:
|
||||||
|
content = msg.get("content", "")
|
||||||
|
if isinstance(content, str):
|
||||||
|
prompt += content + "\n"
|
||||||
|
elif isinstance(content, list):
|
||||||
|
for part in content:
|
||||||
|
if isinstance(part, dict) and part.get("type") == "text":
|
||||||
|
prompt += part.get("text", "") + "\n"
|
||||||
|
|
||||||
|
payload = {
|
||||||
|
"modelName": model.name,
|
||||||
|
"prompt": prompt.strip(),
|
||||||
|
"imageBase64": None
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.debug(f"Calling Private-LLM text API with model {model.name}")
|
||||||
|
|
||||||
|
response = await self.httpClient.post(
|
||||||
|
model.apiUrl,
|
||||||
|
json=payload
|
||||||
|
)
|
||||||
|
|
||||||
|
if response.status_code != 200:
|
||||||
|
errorMessage = f"Private-LLM API error: {response.status_code} - {response.text}"
|
||||||
|
if response.status_code == 429:
|
||||||
|
logger.warning(errorMessage)
|
||||||
|
raise RateLimitExceededException(errorMessage)
|
||||||
|
logger.error(errorMessage)
|
||||||
|
raise HTTPException(status_code=500, detail=errorMessage)
|
||||||
|
|
||||||
|
responseJson = response.json()
|
||||||
|
|
||||||
|
if not responseJson.get("success", False):
|
||||||
|
errorMsg = responseJson.get("error", "Unknown error")
|
||||||
|
logger.error(f"Private-LLM returned error: {errorMsg}")
|
||||||
|
return AiModelResponse(
|
||||||
|
content="",
|
||||||
|
success=False,
|
||||||
|
error=errorMsg
|
||||||
|
)
|
||||||
|
|
||||||
|
# Extract content from response
|
||||||
|
data = responseJson.get("data", {})
|
||||||
|
rawResponse = responseJson.get("rawResponse", "")
|
||||||
|
|
||||||
|
# Prefer rawResponse for full content, fall back to data
|
||||||
|
content = rawResponse if rawResponse else str(data.get("response", data))
|
||||||
|
|
||||||
|
return AiModelResponse(
|
||||||
|
content=content,
|
||||||
|
success=True,
|
||||||
|
modelId=model.name,
|
||||||
|
metadata={"data": data}
|
||||||
|
)
|
||||||
|
|
||||||
|
except HTTPException:
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error calling Private-LLM text API: {str(e)}")
|
||||||
|
raise HTTPException(status_code=500, detail=f"Error calling Private-LLM API: {str(e)}")
|
||||||
|
|
||||||
|
async def callAiVision(self, modelCall: AiModelCall) -> AiModelResponse:
|
||||||
|
"""
|
||||||
|
Call the Private-LLM API for vision-based analysis.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
modelCall: AiModelCall with messages containing image data
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
AiModelResponse with analysis content
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
messages = modelCall.messages
|
||||||
|
model = modelCall.model
|
||||||
|
|
||||||
|
# Extract prompt and image from messages
|
||||||
|
prompt = ""
|
||||||
|
imageBase64 = None
|
||||||
|
|
||||||
|
for msg in messages:
|
||||||
|
content = msg.get("content", "")
|
||||||
|
|
||||||
|
if isinstance(content, str):
|
||||||
|
prompt += content + "\n"
|
||||||
|
elif isinstance(content, list):
|
||||||
|
for part in content:
|
||||||
|
if isinstance(part, dict):
|
||||||
|
if part.get("type") == "text":
|
||||||
|
prompt += part.get("text", "") + "\n"
|
||||||
|
elif part.get("type") == "image_url":
|
||||||
|
imageUrl = part.get("image_url", {}).get("url", "")
|
||||||
|
# Extract base64 from data URL
|
||||||
|
if imageUrl.startswith("data:"):
|
||||||
|
# Format: data:image/png;base64,<base64data>
|
||||||
|
parts = imageUrl.split(",", 1)
|
||||||
|
if len(parts) == 2:
|
||||||
|
imageBase64 = parts[1]
|
||||||
|
else:
|
||||||
|
imageBase64 = imageUrl
|
||||||
|
|
||||||
|
if not imageBase64:
|
||||||
|
logger.warning("No image provided for vision model call")
|
||||||
|
|
||||||
|
payload = {
|
||||||
|
"modelName": model.name,
|
||||||
|
"prompt": prompt.strip(),
|
||||||
|
"imageBase64": imageBase64
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.debug(f"Calling Private-LLM vision API with model {model.name}")
|
||||||
|
|
||||||
|
response = await self.httpClient.post(
|
||||||
|
model.apiUrl,
|
||||||
|
json=payload
|
||||||
|
)
|
||||||
|
|
||||||
|
if response.status_code != 200:
|
||||||
|
errorMessage = f"Private-LLM API error: {response.status_code} - {response.text}"
|
||||||
|
if response.status_code == 429:
|
||||||
|
logger.warning(errorMessage)
|
||||||
|
raise RateLimitExceededException(errorMessage)
|
||||||
|
logger.error(errorMessage)
|
||||||
|
raise HTTPException(status_code=500, detail=errorMessage)
|
||||||
|
|
||||||
|
responseJson = response.json()
|
||||||
|
|
||||||
|
if not responseJson.get("success", False):
|
||||||
|
errorMsg = responseJson.get("error", "Unknown error")
|
||||||
|
logger.error(f"Private-LLM returned error: {errorMsg}")
|
||||||
|
return AiModelResponse(
|
||||||
|
content="",
|
||||||
|
success=False,
|
||||||
|
error=errorMsg
|
||||||
|
)
|
||||||
|
|
||||||
|
# Extract content from response
|
||||||
|
data = responseJson.get("data", {})
|
||||||
|
rawResponse = responseJson.get("rawResponse", "")
|
||||||
|
|
||||||
|
# Prefer rawResponse for full content
|
||||||
|
content = rawResponse if rawResponse else str(data.get("response", data))
|
||||||
|
|
||||||
|
return AiModelResponse(
|
||||||
|
content=content,
|
||||||
|
success=True,
|
||||||
|
modelId=model.name,
|
||||||
|
metadata={"data": data}
|
||||||
|
)
|
||||||
|
|
||||||
|
except HTTPException:
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error calling Private-LLM vision API: {str(e)}", exc_info=True)
|
||||||
|
return AiModelResponse(
|
||||||
|
content="",
|
||||||
|
success=False,
|
||||||
|
error=f"Error during vision analysis: {str(e)}"
|
||||||
|
)
|
||||||
|
|
@ -1,3 +1,5 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
"""Tavily web search class.
|
"""Tavily web search class.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
@ -8,7 +10,7 @@ from dataclasses import dataclass
|
||||||
from typing import Optional, List, Dict
|
from typing import Optional, List, Dict
|
||||||
from tavily import AsyncTavilyClient
|
from tavily import AsyncTavilyClient
|
||||||
from modules.shared.configuration import APP_CONFIG
|
from modules.shared.configuration import APP_CONFIG
|
||||||
from modules.aicore.aicoreBase import BaseConnectorAi
|
from .aicoreBase import BaseConnectorAi
|
||||||
from modules.datamodels.datamodelAi import AiModel, PriorityEnum, ProcessingModeEnum, OperationTypeEnum, AiModelCall, AiModelResponse, createOperationTypeRatings, AiCallPromptWebSearch, AiCallPromptWebCrawl
|
from modules.datamodels.datamodelAi import AiModel, PriorityEnum, ProcessingModeEnum, OperationTypeEnum, AiModelCall, AiModelResponse, createOperationTypeRatings, AiCallPromptWebSearch, AiCallPromptWebCrawl
|
||||||
from modules.datamodels.datamodelTools import CountryCodes
|
from modules.datamodels.datamodelTools import CountryCodes
|
||||||
|
|
||||||
|
|
@ -65,11 +67,11 @@ class AiTavily(BaseConnectorAi):
|
||||||
priority=PriorityEnum.BALANCED,
|
priority=PriorityEnum.BALANCED,
|
||||||
processingMode=ProcessingModeEnum.BASIC,
|
processingMode=ProcessingModeEnum.BASIC,
|
||||||
operationTypes=createOperationTypeRatings(
|
operationTypes=createOperationTypeRatings(
|
||||||
(OperationTypeEnum.WEB_SEARCH, 9),
|
(OperationTypeEnum.WEB_SEARCH_DATA, 9),
|
||||||
(OperationTypeEnum.WEB_CRAWL, 10)
|
(OperationTypeEnum.WEB_CRAWL, 10)
|
||||||
),
|
),
|
||||||
version="tavily-search",
|
version="tavily-search",
|
||||||
calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: 0.008 # Simple flat rate
|
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: 0.008 # Simple flat rate
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
@ -286,7 +288,16 @@ class AiTavily(BaseConnectorAi):
|
||||||
if maxResults < minResults or maxResults > maxAllowedResults:
|
if maxResults < minResults or maxResults > maxAllowedResults:
|
||||||
raise ValueError(f"maxResults must be between {minResults} and {maxAllowedResults}")
|
raise ValueError(f"maxResults must be between {minResults} and {maxAllowedResults}")
|
||||||
|
|
||||||
# Perform actual API call
|
# Tavily enforces a 400-character query limit
|
||||||
|
TAVILY_MAX_QUERY_LENGTH = 400
|
||||||
|
if len(query) > TAVILY_MAX_QUERY_LENGTH:
|
||||||
|
truncated = query[:TAVILY_MAX_QUERY_LENGTH]
|
||||||
|
lastSpace = truncated.rfind(' ')
|
||||||
|
if lastSpace > TAVILY_MAX_QUERY_LENGTH // 2:
|
||||||
|
truncated = truncated[:lastSpace]
|
||||||
|
logger.warning(f"Tavily query truncated from {len(query)} to {len(truncated)} chars")
|
||||||
|
query = truncated
|
||||||
|
|
||||||
# Build kwargs only for provided options to avoid API rejections
|
# Build kwargs only for provided options to avoid API rejections
|
||||||
kwargs: dict = {"query": query, "max_results": maxResults}
|
kwargs: dict = {"query": query, "max_results": maxResults}
|
||||||
if searchDepth is not None:
|
if searchDepth is not None:
|
||||||
|
|
@ -319,13 +330,28 @@ class AiTavily(BaseConnectorAi):
|
||||||
|
|
||||||
# Return all results without score filtering
|
# Return all results without score filtering
|
||||||
# Tavily's scoring is already applied by the API
|
# Tavily's scoring is already applied by the API
|
||||||
logger.info(f"Tavily returned {len(response.get('results', []))} results")
|
results_count = len(response.get('results', []))
|
||||||
|
logger.info(f"Tavily returned {results_count} results")
|
||||||
|
|
||||||
|
# Log content availability
|
||||||
|
results_with_content = 0
|
||||||
|
for result in response.get('results', []):
|
||||||
|
if result.get("raw_content"):
|
||||||
|
results_with_content += 1
|
||||||
|
logger.info(f"Tavily results with raw_content: {results_with_content}/{results_count}")
|
||||||
|
|
||||||
|
# Log first result structure for debugging
|
||||||
|
if response.get('results') and len(response['results']) > 0:
|
||||||
|
first_result = response['results'][0]
|
||||||
|
logger.debug(f"First result keys: {list(first_result.keys())}")
|
||||||
|
raw_content = first_result.get('raw_content') or ''
|
||||||
|
logger.debug(f"First result has raw_content: {'raw_content' in first_result}, content length: {len(raw_content)}")
|
||||||
|
|
||||||
return [
|
return [
|
||||||
WebSearchResult(
|
WebSearchResult(
|
||||||
title=result["title"],
|
title=result.get("title", ""),
|
||||||
url=self._cleanUrl(result["url"]),
|
url=self._cleanUrl(result.get("url", "")),
|
||||||
rawContent=result.get("raw_content")
|
rawContent=result.get("raw_content") or result.get("content") or ""
|
||||||
)
|
)
|
||||||
for result in response["results"]
|
for result in response["results"]
|
||||||
]
|
]
|
||||||
|
|
@ -343,8 +369,8 @@ class AiTavily(BaseConnectorAi):
|
||||||
retryDelay = self.crawlRetryDelay
|
retryDelay = self.crawlRetryDelay
|
||||||
timeout = self.crawlTimeout
|
timeout = self.crawlTimeout
|
||||||
|
|
||||||
logger.debug(f"Starting crawl of URL: {url}")
|
logger.info(f"Starting crawl of URL: {url}")
|
||||||
logger.debug(f"Crawl settings: instructions={instructions}, limit={limit}, maxDepth={maxDepth}, maxBreadth={maxBreadth}, timeout={timeout}s")
|
logger.info(f"Crawl settings: instructions={instructions[:100] if instructions else None}..., limit={limit}, maxDepth={maxDepth}, maxBreadth={maxBreadth}, timeout={timeout}s")
|
||||||
|
|
||||||
for attempt in range(maxRetries + 1):
|
for attempt in range(maxRetries + 1):
|
||||||
try:
|
try:
|
||||||
|
|
@ -369,7 +395,7 @@ class AiTavily(BaseConnectorAi):
|
||||||
if maxBreadth:
|
if maxBreadth:
|
||||||
kwargsCrawl["max_breadth"] = maxBreadth
|
kwargsCrawl["max_breadth"] = maxBreadth
|
||||||
|
|
||||||
logger.debug(f"Sending request to Tavily with kwargs: {kwargsCrawl}")
|
logger.info(f"Sending request to Tavily API with parameters: {kwargsCrawl}")
|
||||||
|
|
||||||
response = await asyncio.wait_for(
|
response = await asyncio.wait_for(
|
||||||
self.client.crawl(**kwargsCrawl),
|
self.client.crawl(**kwargsCrawl),
|
||||||
|
|
@ -379,24 +405,90 @@ class AiTavily(BaseConnectorAi):
|
||||||
logger.debug(f"Tavily response received: {type(response)}")
|
logger.debug(f"Tavily response received: {type(response)}")
|
||||||
|
|
||||||
# Parse response - could be dict with results or list
|
# Parse response - could be dict with results or list
|
||||||
if isinstance(response, dict) and "results" in response:
|
if isinstance(response, dict):
|
||||||
|
if "results" in response:
|
||||||
pageResults = response["results"]
|
pageResults = response["results"]
|
||||||
|
logger.debug(f"Found 'results' key in response dict with {len(pageResults)} items")
|
||||||
|
else:
|
||||||
|
logger.warning(f"Response dict keys: {list(response.keys())}")
|
||||||
|
# Check for other possible keys
|
||||||
|
if "pages" in response:
|
||||||
|
pageResults = response["pages"]
|
||||||
|
logger.debug(f"Found 'pages' key with {len(pageResults)} items")
|
||||||
|
elif "content" in response:
|
||||||
|
# Single page result
|
||||||
|
pageResults = [response]
|
||||||
|
logger.debug("Found 'content' key, treating as single page result")
|
||||||
|
else:
|
||||||
|
logger.warning(f"Unexpected response dict structure: {list(response.keys())}")
|
||||||
|
pageResults = []
|
||||||
elif isinstance(response, list):
|
elif isinstance(response, list):
|
||||||
pageResults = response
|
pageResults = response
|
||||||
|
logger.debug(f"Response is a list with {len(pageResults)} items")
|
||||||
else:
|
else:
|
||||||
logger.warning(f"Unexpected response format: {type(response)}")
|
logger.warning(f"Unexpected response format: {type(response)}, value: {str(response)[:200]}")
|
||||||
pageResults = []
|
pageResults = []
|
||||||
|
|
||||||
logger.debug(f"Got {len(pageResults)} pages from crawl")
|
logger.info(f"Got {len(pageResults)} pages from crawl for URL: {url}")
|
||||||
|
if len(pageResults) == 0:
|
||||||
|
logger.warning(f"Tavily crawl returned 0 pages for URL: {url}. Response structure: {type(response)}")
|
||||||
|
if isinstance(response, dict):
|
||||||
|
logger.warning(f"Response keys: {list(response.keys())}")
|
||||||
|
# Log all values to debug (not just first 3)
|
||||||
|
for key, value in response.items():
|
||||||
|
value_str = str(value)
|
||||||
|
if len(value_str) > 200:
|
||||||
|
value_str = value_str[:200] + "..."
|
||||||
|
logger.warning(f" {key}: {type(value)} - {value_str}")
|
||||||
|
|
||||||
# Convert to WebCrawlResult format
|
# Check for error messages in response
|
||||||
|
if "error" in response:
|
||||||
|
logger.error(f"Tavily API error in response: {response.get('error')}")
|
||||||
|
if "message" in response:
|
||||||
|
logger.warning(f"Tavily API message: {response.get('message')}")
|
||||||
|
elif isinstance(response, str):
|
||||||
|
logger.warning(f"Tavily returned string response (first 500 chars): {response[:500]}")
|
||||||
|
else:
|
||||||
|
logger.warning(f"Unexpected response type: {type(response)}, value: {str(response)[:500]}")
|
||||||
|
|
||||||
|
# Convert to WebCrawlResult format with error handling
|
||||||
results = []
|
results = []
|
||||||
for result in pageResults:
|
for idx, result in enumerate(pageResults):
|
||||||
|
try:
|
||||||
|
# Safely extract fields
|
||||||
|
result_url = result.get("url") if isinstance(result, dict) else (getattr(result, "url", None) if hasattr(result, "url") else url)
|
||||||
|
result_content = ""
|
||||||
|
if isinstance(result, dict):
|
||||||
|
result_content = result.get("raw_content") or result.get("content") or ""
|
||||||
|
elif hasattr(result, "raw_content"):
|
||||||
|
result_content = result.raw_content or ""
|
||||||
|
elif hasattr(result, "content"):
|
||||||
|
result_content = result.content or ""
|
||||||
|
|
||||||
|
result_title = ""
|
||||||
|
if isinstance(result, dict):
|
||||||
|
result_title = result.get("title", "")
|
||||||
|
elif hasattr(result, "title"):
|
||||||
|
result_title = result.title or ""
|
||||||
|
|
||||||
|
results.append(WebCrawlResult(
|
||||||
|
url=result_url or url,
|
||||||
|
content=result_content,
|
||||||
|
title=result_title
|
||||||
|
))
|
||||||
|
except Exception as resultError:
|
||||||
|
logger.warning(f"Error processing crawl result {idx}: {resultError}")
|
||||||
|
# Try to create a minimal result with at least the URL
|
||||||
|
try:
|
||||||
|
if isinstance(result, dict) and result.get("url"):
|
||||||
results.append(WebCrawlResult(
|
results.append(WebCrawlResult(
|
||||||
url=result.get("url", url),
|
url=result.get("url", url),
|
||||||
content=result.get("raw_content", result.get("content", "")),
|
content="",
|
||||||
title=result.get("title", "")
|
title=""
|
||||||
))
|
))
|
||||||
|
except Exception:
|
||||||
|
logger.error(f"Failed to create minimal result for crawl result {idx}")
|
||||||
|
continue
|
||||||
|
|
||||||
logger.debug(f"Crawl successful: extracted {len(results)} pages from URL")
|
logger.debug(f"Crawl successful: extracted {len(results)} pages from URL")
|
||||||
return results
|
return results
|
||||||
|
|
@ -411,7 +503,7 @@ class AiTavily(BaseConnectorAi):
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning(f"Crawl attempt {attempt + 1} failed for URL {url}: {str(e)}")
|
logger.warning(f"Crawl attempt {attempt + 1} failed for URL {url}: {str(e)}")
|
||||||
logger.debug(f"Full error details: {type(e).__name__}: {str(e)}")
|
logger.debug(f"Full error details: {type(e).__name__}: {str(e)}", exc_info=True)
|
||||||
|
|
||||||
# Check if it's a validation error and log more details
|
# Check if it's a validation error and log more details
|
||||||
if "validation" in str(e).lower():
|
if "validation" in str(e).lower():
|
||||||
|
|
@ -425,10 +517,22 @@ class AiTavily(BaseConnectorAi):
|
||||||
if len(url) > 2000:
|
if len(url) > 2000:
|
||||||
logger.debug(f" WARNING: URL is very long ({len(url)} chars)")
|
logger.debug(f" WARNING: URL is very long ({len(url)} chars)")
|
||||||
|
|
||||||
|
# Log API-specific errors
|
||||||
|
error_str = str(e).lower()
|
||||||
|
if "rate limit" in error_str or "429" in error_str:
|
||||||
|
logger.error(f"Tavily API rate limit hit for URL: {url}")
|
||||||
|
elif "401" in error_str or "unauthorized" in error_str:
|
||||||
|
logger.error(f"Tavily API authentication failed for URL: {url}")
|
||||||
|
elif "404" in error_str or "not found" in error_str:
|
||||||
|
logger.warning(f"URL not found (404) for: {url}")
|
||||||
|
elif "timeout" in error_str:
|
||||||
|
logger.warning(f"Timeout error for URL: {url}")
|
||||||
|
|
||||||
if attempt < maxRetries:
|
if attempt < maxRetries:
|
||||||
logger.info(f"Retrying in {retryDelay} seconds...")
|
logger.info(f"Retrying in {retryDelay} seconds...")
|
||||||
await asyncio.sleep(retryDelay)
|
await asyncio.sleep(retryDelay)
|
||||||
else:
|
else:
|
||||||
|
logger.error(f"Crawl failed after {maxRetries + 1} attempts for URL: {url}")
|
||||||
raise Exception(f"Crawl failed after {maxRetries + 1} attempts: {str(e)}")
|
raise Exception(f"Crawl failed after {maxRetries + 1} attempts: {str(e)}")
|
||||||
|
|
||||||
async def _routeWebOperation(self, modelCall: AiModelCall) -> "AiModelResponse":
|
async def _routeWebOperation(self, modelCall: AiModelCall) -> "AiModelResponse":
|
||||||
|
|
@ -443,7 +547,7 @@ class AiTavily(BaseConnectorAi):
|
||||||
"""
|
"""
|
||||||
operationType = modelCall.options.operationType
|
operationType = modelCall.options.operationType
|
||||||
|
|
||||||
if operationType == OperationTypeEnum.WEB_SEARCH:
|
if operationType == OperationTypeEnum.WEB_SEARCH_DATA:
|
||||||
return await self.webSearch(modelCall)
|
return await self.webSearch(modelCall)
|
||||||
elif operationType == OperationTypeEnum.WEB_CRAWL:
|
elif operationType == OperationTypeEnum.WEB_CRAWL:
|
||||||
return await self.webCrawl(modelCall)
|
return await self.webCrawl(modelCall)
|
||||||
|
|
@ -457,7 +561,7 @@ class AiTavily(BaseConnectorAi):
|
||||||
|
|
||||||
async def webSearch(self, modelCall: AiModelCall) -> "AiModelResponse":
|
async def webSearch(self, modelCall: AiModelCall) -> "AiModelResponse":
|
||||||
"""
|
"""
|
||||||
WEB_SEARCH operation - returns list of URLs using Tavily search.
|
WEB_SEARCH_DATA operation - returns list of URLs using Tavily search.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
modelCall: AiModelCall with AiCallPromptWebSearch as prompt
|
modelCall: AiModelCall with AiCallPromptWebSearch as prompt
|
||||||
|
|
@ -506,21 +610,84 @@ class AiTavily(BaseConnectorAi):
|
||||||
includeRawContent="text"
|
includeRawContent="text"
|
||||||
)
|
)
|
||||||
|
|
||||||
# Extract URLs from results
|
# Extract URLs and content from results with error handling
|
||||||
urls = [result.url for result in searchResults]
|
urls = []
|
||||||
|
results_with_content = []
|
||||||
|
content_count = 0
|
||||||
|
|
||||||
# Return as JSON array
|
try:
|
||||||
|
for result in searchResults:
|
||||||
|
try:
|
||||||
|
# Safely extract URL
|
||||||
|
url = result.url if hasattr(result, 'url') and result.url else ""
|
||||||
|
if url:
|
||||||
|
urls.append(url)
|
||||||
|
|
||||||
|
# Safely extract content
|
||||||
|
content = ""
|
||||||
|
if hasattr(result, 'rawContent'):
|
||||||
|
content = result.rawContent or ""
|
||||||
|
if not content and hasattr(result, 'content'):
|
||||||
|
content = result.content or ""
|
||||||
|
|
||||||
|
if content:
|
||||||
|
content_count += 1
|
||||||
|
|
||||||
|
# Safely extract title
|
||||||
|
title = result.title if hasattr(result, 'title') and result.title else ""
|
||||||
|
|
||||||
|
results_with_content.append({
|
||||||
|
"url": url,
|
||||||
|
"title": title,
|
||||||
|
"content": content,
|
||||||
|
"score": getattr(result, 'score', 0)
|
||||||
|
})
|
||||||
|
except Exception as resultError:
|
||||||
|
logger.warning(f"Error processing individual search result: {resultError}")
|
||||||
|
# Continue processing other results
|
||||||
|
continue
|
||||||
|
|
||||||
|
logger.info(f"Tavily search: {len(urls)} URLs, {content_count} with content, {len(results_with_content)} total results")
|
||||||
|
if content_count == 0:
|
||||||
|
logger.warning("Tavily search returned no content - results may need crawling")
|
||||||
|
except Exception as extractionError:
|
||||||
|
logger.error(f"Error extracting URLs and content from search results: {extractionError}")
|
||||||
|
# Try to recover at least URLs
|
||||||
|
try:
|
||||||
|
urls = [result.url for result in searchResults if hasattr(result, 'url') and result.url]
|
||||||
|
logger.info(f"Recovered {len(urls)} URLs after extraction error")
|
||||||
|
except Exception:
|
||||||
|
logger.error("Failed to recover any URLs from search results")
|
||||||
|
|
||||||
|
# Return both URLs and full results in JSON for direct extraction
|
||||||
|
# Format: {"urls": [...], "results": [...]}
|
||||||
import json
|
import json
|
||||||
|
response_data = {
|
||||||
|
"urls": urls,
|
||||||
|
"results": results_with_content
|
||||||
|
}
|
||||||
|
|
||||||
return AiModelResponse(
|
return AiModelResponse(
|
||||||
content=json.dumps(urls, indent=2),
|
content=json.dumps(response_data, indent=2),
|
||||||
success=True,
|
success=True,
|
||||||
metadata={"total_urls": len(urls), "operation": "WEB_SEARCH"}
|
metadata={
|
||||||
|
"total_urls": len(urls),
|
||||||
|
"operation": "WEB_SEARCH_DATA",
|
||||||
|
"results_with_content": results_with_content # Also in metadata for compatibility
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Error in Tavily web search: {str(e)}")
|
logger.error(f"Error in Tavily web search: {str(e)}", exc_info=True)
|
||||||
|
import json
|
||||||
|
# Return error response with empty results
|
||||||
|
error_response = {
|
||||||
|
"urls": [],
|
||||||
|
"results": [],
|
||||||
|
"error": str(e)
|
||||||
|
}
|
||||||
return AiModelResponse(
|
return AiModelResponse(
|
||||||
content="[]",
|
content=json.dumps(error_response, indent=2),
|
||||||
success=False,
|
success=False,
|
||||||
error=str(e)
|
error=str(e)
|
||||||
)
|
)
|
||||||
|
|
@ -570,26 +737,46 @@ class AiTavily(BaseConnectorAi):
|
||||||
maxBreadth=webCrawlPrompt.maxWidth or 40 # Use same as limit for breadth
|
maxBreadth=webCrawlPrompt.maxWidth or 40 # Use same as limit for breadth
|
||||||
)
|
)
|
||||||
|
|
||||||
# If we got multiple pages from the crawl, we need to format them differently
|
# Format multiple pages from the crawl into a single response
|
||||||
# Return the first result for backwards compatibility, but include total page count
|
|
||||||
if crawlResults and len(crawlResults) > 0:
|
if crawlResults and len(crawlResults) > 0:
|
||||||
# Get all pages content
|
# Get all pages content with error handling
|
||||||
allContent = ""
|
allContent = ""
|
||||||
|
pageUrls = []
|
||||||
for i, result in enumerate(crawlResults, 1):
|
for i, result in enumerate(crawlResults, 1):
|
||||||
|
try:
|
||||||
pageHeader = f"\n{'='*60}\nPAGE {i}: {result.url}\n{'='*60}\n"
|
pageHeader = f"\n{'='*60}\nPAGE {i}: {result.url}\n{'='*60}\n"
|
||||||
if result.title:
|
if result.title:
|
||||||
allContent += f"{pageHeader}Title: {result.title}\n\n"
|
allContent += f"{pageHeader}Title: {result.title}\n\n"
|
||||||
allContent += f"{result.content}\n"
|
else:
|
||||||
|
allContent += f"{pageHeader}\n"
|
||||||
|
allContent += f"{result.content or ''}\n"
|
||||||
|
pageUrls.append(result.url)
|
||||||
|
except Exception as pageError:
|
||||||
|
logger.warning(f"Error formatting page {i} from crawl: {pageError}")
|
||||||
|
# Try to add at least the URL
|
||||||
|
try:
|
||||||
|
pageUrls.append(result.url if hasattr(result, 'url') and result.url else webCrawlPrompt.url)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
resultData = {
|
resultData = {
|
||||||
"url": webCrawlPrompt.url,
|
"url": webCrawlPrompt.url,
|
||||||
"title": crawlResults[0].title if crawlResults[0].title else "Content",
|
"title": crawlResults[0].title if crawlResults and crawlResults[0].title else "Content",
|
||||||
"content": allContent,
|
"content": allContent,
|
||||||
"pagesCrawled": len(crawlResults),
|
"pagesCrawled": len(crawlResults),
|
||||||
"pageUrls": [result.url for result in crawlResults]
|
"pageUrls": pageUrls
|
||||||
}
|
}
|
||||||
|
logger.info(f"Crawl successful: {len(crawlResults)} pages extracted from {webCrawlPrompt.url}")
|
||||||
else:
|
else:
|
||||||
resultData = {"url": webCrawlPrompt.url, "title": "", "content": "", "error": "No content extracted", "pagesCrawled": 0}
|
logger.warning(f"Crawl returned no results for URL: {webCrawlPrompt.url}")
|
||||||
|
resultData = {
|
||||||
|
"url": webCrawlPrompt.url,
|
||||||
|
"title": "",
|
||||||
|
"content": "",
|
||||||
|
"error": "No content extracted - Tavily crawl returned 0 pages",
|
||||||
|
"pagesCrawled": 0,
|
||||||
|
"pageUrls": []
|
||||||
|
}
|
||||||
|
|
||||||
# Return as JSON - same format as Perplexity but with multiple pages content
|
# Return as JSON - same format as Perplexity but with multiple pages content
|
||||||
import json
|
import json
|
||||||
|
|
@ -600,9 +787,17 @@ class AiTavily(BaseConnectorAi):
|
||||||
)
|
)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Error in Tavily web crawl: {str(e)}")
|
logger.error(f"Error in Tavily web crawl: {str(e)}", exc_info=True)
|
||||||
import json
|
import json
|
||||||
errorResult = {"error": str(e), "url": webCrawlPrompt.url if 'webCrawlPrompt' in locals() else ""}
|
crawl_url = webCrawlPrompt.url if 'webCrawlPrompt' in locals() else ""
|
||||||
|
errorResult = {
|
||||||
|
"url": crawl_url,
|
||||||
|
"title": "",
|
||||||
|
"content": "",
|
||||||
|
"error": str(e),
|
||||||
|
"pagesCrawled": 0,
|
||||||
|
"pageUrls": []
|
||||||
|
}
|
||||||
return AiModelResponse(
|
return AiModelResponse(
|
||||||
content=json.dumps(errorResult, indent=2),
|
content=json.dumps(errorResult, indent=2),
|
||||||
success=False,
|
success=False,
|
||||||
|
|
|
||||||
68
modules/auth/__init__.py
Normal file
68
modules/auth/__init__.py
Normal file
|
|
@ -0,0 +1,68 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""
|
||||||
|
Authentication and authorization modules for routes and services.
|
||||||
|
High-level security functionality that depends on FastAPI and interfaces.
|
||||||
|
|
||||||
|
Multi-Tenant Design:
|
||||||
|
- RequestContext: Per-request context with user, mandate, feature instance, roles
|
||||||
|
- getRequestContext: FastAPI dependency to extract context from X-Mandate-Id header
|
||||||
|
- requireSysAdmin: FastAPI dependency for INFRASTRUCTURE-level operations
|
||||||
|
(logs, tokens, DB-health, i18n-master). Includes RBAC bypass.
|
||||||
|
- requirePlatformAdmin: FastAPI dependency for CROSS-MANDATE GOVERNANCE
|
||||||
|
(user-/mandate-/RBAC-/feature-registry mgmt). No bypass.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from .authentication import (
|
||||||
|
getCurrentUser,
|
||||||
|
limiter,
|
||||||
|
SECRET_KEY,
|
||||||
|
ALGORITHM,
|
||||||
|
cookieAuth,
|
||||||
|
RequestContext,
|
||||||
|
getRequestContext,
|
||||||
|
requireSysAdmin,
|
||||||
|
requirePlatformAdmin,
|
||||||
|
)
|
||||||
|
from .jwtService import (
|
||||||
|
createAccessToken,
|
||||||
|
createRefreshToken,
|
||||||
|
setAccessTokenCookie,
|
||||||
|
setRefreshTokenCookie,
|
||||||
|
clearAccessTokenCookie,
|
||||||
|
clearRefreshTokenCookie
|
||||||
|
)
|
||||||
|
from .tokenManager import TokenManager
|
||||||
|
from .tokenRefreshService import token_refresh_service, TokenRefreshService
|
||||||
|
from .tokenRefreshMiddleware import TokenRefreshMiddleware, ProactiveTokenRefreshMiddleware
|
||||||
|
from .csrf import CSRFMiddleware
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
# Authentication
|
||||||
|
"getCurrentUser",
|
||||||
|
"limiter",
|
||||||
|
"SECRET_KEY",
|
||||||
|
"ALGORITHM",
|
||||||
|
"cookieAuth",
|
||||||
|
# Multi-Tenant Context
|
||||||
|
"RequestContext",
|
||||||
|
"getRequestContext",
|
||||||
|
"requireSysAdmin",
|
||||||
|
"requirePlatformAdmin",
|
||||||
|
# JWT Service
|
||||||
|
"createAccessToken",
|
||||||
|
"createRefreshToken",
|
||||||
|
"setAccessTokenCookie",
|
||||||
|
"setRefreshTokenCookie",
|
||||||
|
"clearAccessTokenCookie",
|
||||||
|
"clearRefreshTokenCookie",
|
||||||
|
# Token Management
|
||||||
|
"TokenManager",
|
||||||
|
"token_refresh_service",
|
||||||
|
"TokenRefreshService",
|
||||||
|
"TokenRefreshMiddleware",
|
||||||
|
"ProactiveTokenRefreshMiddleware",
|
||||||
|
# CSRF
|
||||||
|
"CSRFMiddleware",
|
||||||
|
]
|
||||||
|
|
||||||
497
modules/auth/authentication.py
Normal file
497
modules/auth/authentication.py
Normal file
|
|
@ -0,0 +1,497 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""
|
||||||
|
Authentication module for backend API.
|
||||||
|
Handles JWT-based authentication, token generation, and user context.
|
||||||
|
|
||||||
|
Multi-Tenant Design:
|
||||||
|
- Token ist NICHT an einen Mandanten gebunden
|
||||||
|
- User arbeitet parallel in mehreren Mandanten (z.B. mehrere Browser-Tabs)
|
||||||
|
- Mandant-Kontext wird per Request-Header (X-Mandate-Id) bestimmt
|
||||||
|
- Request-Context kapselt User + Mandant + Feature-Instanz + geladene Rollen
|
||||||
|
"""
|
||||||
|
|
||||||
|
from typing import Optional, Dict, Any, Tuple, List
|
||||||
|
from fastapi import Depends, HTTPException, status, Request, Response, Header
|
||||||
|
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
|
||||||
|
from jose import JWTError, jwt
|
||||||
|
import logging
|
||||||
|
from slowapi import Limiter
|
||||||
|
from slowapi.util import get_remote_address
|
||||||
|
|
||||||
|
from modules.shared.configuration import APP_CONFIG
|
||||||
|
from modules.security.rootAccess import getRootDbAppConnector, getRootUser
|
||||||
|
from modules.interfaces.interfaceDbApp import getInterface, getRootInterface
|
||||||
|
from modules.datamodels.datamodelUam import User, AuthAuthority, AccessLevel
|
||||||
|
from modules.datamodels.datamodelSecurity import Token, TokenPurpose
|
||||||
|
from modules.datamodels.datamodelRbac import AccessRule
|
||||||
|
|
||||||
|
# Get Config Data
|
||||||
|
SECRET_KEY = APP_CONFIG.get("APP_JWT_KEY_SECRET")
|
||||||
|
ALGORITHM = APP_CONFIG.get("Auth_ALGORITHM")
|
||||||
|
ACCESS_TOKEN_EXPIRE_MINUTES = int(APP_CONFIG.get("APP_TOKEN_EXPIRY"))
|
||||||
|
REFRESH_TOKEN_EXPIRE_DAYS = int(APP_CONFIG.get("APP_REFRESH_TOKEN_EXPIRY", "7"))
|
||||||
|
|
||||||
|
# Cookie-based Authentication Setup
|
||||||
|
class CookieAuth(HTTPBearer):
|
||||||
|
"""Cookie-based authentication that checks httpOnly cookies first, then Authorization header"""
|
||||||
|
def __init__(self, auto_error: bool = True):
|
||||||
|
super().__init__(auto_error=auto_error)
|
||||||
|
|
||||||
|
async def __call__(self, request: Request) -> Optional[str]:
|
||||||
|
# 1. Check httpOnly cookie first (preferred method)
|
||||||
|
token = request.cookies.get('auth_token')
|
||||||
|
if token:
|
||||||
|
return token
|
||||||
|
|
||||||
|
# 2. Fallback to Authorization header for API calls
|
||||||
|
authorization = request.headers.get("Authorization")
|
||||||
|
if authorization and authorization.startswith("Bearer "):
|
||||||
|
return authorization.split(" ")[1]
|
||||||
|
|
||||||
|
if self.auto_error:
|
||||||
|
raise HTTPException(status_code=401, detail="Not authenticated")
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Initialize cookie-based auth
|
||||||
|
cookieAuth = CookieAuth(auto_error=False)
|
||||||
|
|
||||||
|
# Rate Limiter
|
||||||
|
limiter = Limiter(key_func=get_remote_address)
|
||||||
|
|
||||||
|
# Logger
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# Note: JWT creation and cookie helpers moved to modules.auth.jwtService
|
||||||
|
|
||||||
|
def _getUserBase(token: str = Depends(cookieAuth)) -> User:
|
||||||
|
"""
|
||||||
|
Extracts and validates the current user from the JWT token.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
token: JWT Token from the Authorization header
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
User model instance
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
HTTPException: For invalid token or user
|
||||||
|
"""
|
||||||
|
credentialsException = HTTPException(
|
||||||
|
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||||
|
detail="Invalid authentication credentials",
|
||||||
|
headers={"WWW-Authenticate": "Bearer"},
|
||||||
|
)
|
||||||
|
|
||||||
|
# Guard: token may be None or malformed when cookie/header is missing or bad
|
||||||
|
if not token or not isinstance(token, str):
|
||||||
|
logger.warning("Missing JWT Token (no cookie/header)")
|
||||||
|
raise credentialsException
|
||||||
|
# Basic JWT format check (header.payload.signature)
|
||||||
|
try:
|
||||||
|
if token.count(".") != 2:
|
||||||
|
logger.warning("Malformed JWT token format")
|
||||||
|
raise credentialsException
|
||||||
|
except Exception:
|
||||||
|
# If anything odd happens while checking format, treat as invalid creds
|
||||||
|
raise credentialsException
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Decode token
|
||||||
|
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
|
||||||
|
|
||||||
|
# Extract username from token
|
||||||
|
username: str = payload.get("sub")
|
||||||
|
if username is None:
|
||||||
|
raise credentialsException
|
||||||
|
|
||||||
|
# Extract user ID from token
|
||||||
|
# MULTI-TENANT: mandateId is NO LONGER in the token - it comes from X-Mandate-Id header
|
||||||
|
userId: str = payload.get("userId")
|
||||||
|
authority: str = payload.get("authenticationAuthority")
|
||||||
|
tokenId: Optional[str] = payload.get("jti")
|
||||||
|
sessionId: Optional[str] = payload.get("sid") or payload.get("sessionId")
|
||||||
|
|
||||||
|
# Only userId is required in token now (no mandateId)
|
||||||
|
if not userId:
|
||||||
|
logger.error(f"Missing userId in token")
|
||||||
|
raise credentialsException
|
||||||
|
|
||||||
|
except JWTError:
|
||||||
|
logger.warning("Invalid JWT Token")
|
||||||
|
raise credentialsException
|
||||||
|
|
||||||
|
# Get root user and interface for database access
|
||||||
|
rootUser = getRootUser()
|
||||||
|
appInterface = getInterface(rootUser)
|
||||||
|
|
||||||
|
# Retrieve user from database
|
||||||
|
user = appInterface.getUserByUsername(username)
|
||||||
|
|
||||||
|
if user is None:
|
||||||
|
logger.warning(f"User {username} not found")
|
||||||
|
raise credentialsException
|
||||||
|
|
||||||
|
# Check if user is enabled
|
||||||
|
if not user.enabled:
|
||||||
|
logger.warning(f"User {username} is disabled")
|
||||||
|
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="User is disabled")
|
||||||
|
|
||||||
|
# Ensure the user ID in token matches the user in database
|
||||||
|
# MULTI-TENANT: mandateId is NO LONGER checked here - it comes from headers
|
||||||
|
if str(user.id) != str(userId):
|
||||||
|
logger.error(f"User ID mismatch: token(userId={userId}) vs user(id={user.id})")
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||||
|
detail="User context has changed. Please log in again.",
|
||||||
|
headers={"WWW-Authenticate": "Bearer"},
|
||||||
|
)
|
||||||
|
|
||||||
|
# For LOCAL gateway JWTs, enforce DB-backed token validity and revocation
|
||||||
|
try:
|
||||||
|
# Normalize authority to string for comparison
|
||||||
|
normalized_authority = (str(authority).lower() if authority is not None else None)
|
||||||
|
|
||||||
|
# If we have a token id, check if a corresponding DB token exists for local authority
|
||||||
|
db_tokens = []
|
||||||
|
if tokenId:
|
||||||
|
try:
|
||||||
|
dbApp = getRootDbAppConnector()
|
||||||
|
db_tokens = dbApp.getRecordset(
|
||||||
|
Token, recordFilter={"id": tokenId}
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
# Check if this is a table not found error (token table was deleted)
|
||||||
|
if "does not exist" in str(e).lower() or "relation" in str(e).lower():
|
||||||
|
logger.error("Token table does not exist - database may have been reset")
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
|
||||||
|
detail="Authentication service temporarily unavailable. Please contact administrator."
|
||||||
|
)
|
||||||
|
db_tokens = []
|
||||||
|
|
||||||
|
if db_tokens:
|
||||||
|
# There is a server record for this token; enforce status and context when local
|
||||||
|
db_token = db_tokens[0]
|
||||||
|
token_authority = str(db_token.get("authority", "")).lower()
|
||||||
|
if token_authority == str(AuthAuthority.LOCAL.value):
|
||||||
|
# Must be active and match user/session
|
||||||
|
# MULTI-TENANT: mandateId is NOT checked here - tokens are no longer mandate-bound
|
||||||
|
active_token = appInterface.findActiveTokenById(
|
||||||
|
tokenId=tokenId,
|
||||||
|
userId=user.id,
|
||||||
|
authority=AuthAuthority.LOCAL,
|
||||||
|
sessionId=sessionId,
|
||||||
|
mandateId=None, # Token is no longer mandate-bound
|
||||||
|
)
|
||||||
|
if not active_token:
|
||||||
|
logger.info(
|
||||||
|
f"Local JWT db record not active/valid: jti={tokenId}, userId={user.id}, sessionId={sessionId}"
|
||||||
|
)
|
||||||
|
raise credentialsException
|
||||||
|
elif token_authority == str(AuthAuthority.GOOGLE.value):
|
||||||
|
active_token = appInterface.findActiveTokenById(
|
||||||
|
tokenId=tokenId,
|
||||||
|
userId=user.id,
|
||||||
|
authority=AuthAuthority.GOOGLE,
|
||||||
|
sessionId=sessionId,
|
||||||
|
mandateId=None,
|
||||||
|
tokenPurpose=TokenPurpose.AUTH_SESSION.value,
|
||||||
|
)
|
||||||
|
if not active_token:
|
||||||
|
logger.info(
|
||||||
|
f"Google JWT db record not active/valid: jti={tokenId}, userId={user.id}"
|
||||||
|
)
|
||||||
|
raise credentialsException
|
||||||
|
elif token_authority == str(AuthAuthority.MSFT.value):
|
||||||
|
active_token = appInterface.findActiveTokenById(
|
||||||
|
tokenId=tokenId,
|
||||||
|
userId=user.id,
|
||||||
|
authority=AuthAuthority.MSFT,
|
||||||
|
sessionId=sessionId,
|
||||||
|
mandateId=None,
|
||||||
|
tokenPurpose=TokenPurpose.AUTH_SESSION.value,
|
||||||
|
)
|
||||||
|
if not active_token:
|
||||||
|
logger.info(
|
||||||
|
f"Microsoft JWT db record not active/valid: jti={tokenId}, userId={user.id}"
|
||||||
|
)
|
||||||
|
raise credentialsException
|
||||||
|
else:
|
||||||
|
# No DB record for this token. If the claim says local (or missing/unknown), require DB record.
|
||||||
|
if normalized_authority in (
|
||||||
|
None,
|
||||||
|
"",
|
||||||
|
str(AuthAuthority.LOCAL.value),
|
||||||
|
str(AuthAuthority.GOOGLE.value),
|
||||||
|
str(AuthAuthority.MSFT.value),
|
||||||
|
):
|
||||||
|
logger.info(
|
||||||
|
"JWT without server record or missing authority claim (local/google/msft require DB row)"
|
||||||
|
)
|
||||||
|
raise credentialsException
|
||||||
|
except HTTPException:
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error during local token validation: {str(e)}")
|
||||||
|
raise credentialsException
|
||||||
|
|
||||||
|
return user
|
||||||
|
|
||||||
|
def getCurrentUser(currentUser: User = Depends(_getUserBase)) -> User:
|
||||||
|
"""Get current active user with additional validation."""
|
||||||
|
# Check if current user is enabled
|
||||||
|
if not currentUser.enabled:
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_403_FORBIDDEN,
|
||||||
|
detail="User is disabled"
|
||||||
|
)
|
||||||
|
return currentUser
|
||||||
|
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# MULTI-TENANT: Request Context System
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
class RequestContext:
|
||||||
|
"""
|
||||||
|
Request context for multi-tenant operations.
|
||||||
|
|
||||||
|
Contains user, mandate context, feature instance context, and loaded role IDs.
|
||||||
|
This context is per-request (not persisted) - follows stateless design.
|
||||||
|
|
||||||
|
IMPORTANT: SysAdmin also needs explicit membership for mandate context!
|
||||||
|
isSysAdmin flag does NOT give implicit access to mandate data.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, user: User):
|
||||||
|
self.user: User = user
|
||||||
|
self.mandateId: Optional[str] = None
|
||||||
|
self.featureInstanceId: Optional[str] = None
|
||||||
|
self.roleIds: List[str] = []
|
||||||
|
|
||||||
|
# Request-scoped cache: rules loaded only once per request
|
||||||
|
self._cachedRules: Optional[List[tuple]] = None
|
||||||
|
|
||||||
|
def getRules(self) -> List[tuple]:
|
||||||
|
"""
|
||||||
|
Loads rules once per request (not across requests).
|
||||||
|
Returns list of (priority, AccessRule) tuples.
|
||||||
|
"""
|
||||||
|
if self._cachedRules is None:
|
||||||
|
if not self.mandateId:
|
||||||
|
# No mandate context = no rules
|
||||||
|
self._cachedRules = []
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
rootUser = getRootUser()
|
||||||
|
appInterface = getInterface(rootUser)
|
||||||
|
self._cachedRules = appInterface.rbac.getRulesForUserBulk(
|
||||||
|
self.user.id,
|
||||||
|
self.mandateId,
|
||||||
|
self.featureInstanceId
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error loading RBAC rules: {e}")
|
||||||
|
self._cachedRules = []
|
||||||
|
return self._cachedRules
|
||||||
|
|
||||||
|
@property
|
||||||
|
def isSysAdmin(self) -> bool:
|
||||||
|
"""Convenience property: Infrastructure/System Operator flag.
|
||||||
|
For Category A (Logs, Tokens, DB-Health, i18n-Master, Registry).
|
||||||
|
Wirkt auch als RBAC-Engine-Bypass (siehe rbac.py:getUserPermissions)."""
|
||||||
|
return getattr(self.user, 'isSysAdmin', False)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def isPlatformAdmin(self) -> bool:
|
||||||
|
"""Convenience property: Cross-Mandate-Governance flag.
|
||||||
|
For Categories B–E (User-/Mandate-/RBAC-/Feature-Registry über alle Mandanten).
|
||||||
|
KEIN RBAC-Bypass — Daten-Zugriff geht weiterhin über Mandanten-Mitgliedschaft."""
|
||||||
|
return getattr(self.user, 'isPlatformAdmin', False)
|
||||||
|
|
||||||
|
def getRequestContext(
|
||||||
|
request: Request,
|
||||||
|
mandateId: Optional[str] = Header(None, alias="X-Mandate-Id"),
|
||||||
|
featureInstanceId: Optional[str] = Header(None, alias="X-Instance-Id"),
|
||||||
|
currentUser: User = Depends(getCurrentUser)
|
||||||
|
) -> RequestContext:
|
||||||
|
"""
|
||||||
|
Determines request context from headers.
|
||||||
|
Checks authorization and loads role IDs.
|
||||||
|
|
||||||
|
Security Model:
|
||||||
|
- Regular users: Must be explicit members of mandates/feature instances.
|
||||||
|
- isSysAdmin users: RBAC-Engine-Bypass; können jeden Mandant für
|
||||||
|
Infrastruktur-Operationen betreten ohne Mitgliedschaft. ``ctx.roleIds``
|
||||||
|
bleibt leer (Bypass läuft direkt in ``rbac.py:getUserPermissions``).
|
||||||
|
- isPlatformAdmin users: Cross-Mandate-Governance; können jeden Mandant
|
||||||
|
betreten, aber Routen prüfen die Berechtigung explizit via
|
||||||
|
``requirePlatformAdmin``. ``ctx.roleIds`` bleibt leer.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
request: FastAPI Request object
|
||||||
|
mandateId: Mandate ID from X-Mandate-Id header
|
||||||
|
featureInstanceId: Feature instance ID from X-Instance-Id header
|
||||||
|
currentUser: Current authenticated user
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
RequestContext with user, mandate, roles
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
HTTPException 403: If user is not member of mandate (and not Sys/Platform admin)
|
||||||
|
"""
|
||||||
|
ctx = RequestContext(user=currentUser)
|
||||||
|
isSysAdmin = getattr(currentUser, 'isSysAdmin', False)
|
||||||
|
isPlatformAdmin = getattr(currentUser, 'isPlatformAdmin', False)
|
||||||
|
|
||||||
|
# Get root interface for membership checks
|
||||||
|
rootInterface = getRootInterface()
|
||||||
|
|
||||||
|
if mandateId:
|
||||||
|
# Check mandate membership
|
||||||
|
membership = rootInterface.getUserMandate(currentUser.id, mandateId)
|
||||||
|
|
||||||
|
if membership:
|
||||||
|
# User is a member - load their roles
|
||||||
|
if not membership.enabled:
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_403_FORBIDDEN,
|
||||||
|
detail="Mandate membership is disabled"
|
||||||
|
)
|
||||||
|
ctx.mandateId = mandateId
|
||||||
|
ctx.roleIds = rootInterface.getRoleIdsForUserMandate(membership.id)
|
||||||
|
elif isSysAdmin or isPlatformAdmin:
|
||||||
|
# Platform-level authority can enter any mandate without membership.
|
||||||
|
# No fake role loading: isSysAdmin bypasses RBAC engine; platform-admin
|
||||||
|
# routes verify authority explicitly via requirePlatformAdmin.
|
||||||
|
ctx.mandateId = mandateId
|
||||||
|
ctx.roleIds = []
|
||||||
|
logger.debug(
|
||||||
|
f"Platform-level user {currentUser.id} accessing mandate {mandateId} "
|
||||||
|
f"(isSysAdmin={isSysAdmin}, isPlatformAdmin={isPlatformAdmin})"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# Regular user without membership - denied
|
||||||
|
logger.warning(f"User {currentUser.id} is not member of mandate {mandateId}")
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_403_FORBIDDEN,
|
||||||
|
detail="Not member of mandate"
|
||||||
|
)
|
||||||
|
|
||||||
|
if featureInstanceId:
|
||||||
|
# Check feature access
|
||||||
|
access = rootInterface.getFeatureAccess(currentUser.id, featureInstanceId)
|
||||||
|
|
||||||
|
if access:
|
||||||
|
# User has access - load their instance roles
|
||||||
|
if not access.enabled:
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_403_FORBIDDEN,
|
||||||
|
detail="Feature access is disabled"
|
||||||
|
)
|
||||||
|
ctx.featureInstanceId = featureInstanceId
|
||||||
|
instanceRoleIds = rootInterface.getRoleIdsForFeatureAccess(access.id)
|
||||||
|
ctx.roleIds.extend(instanceRoleIds)
|
||||||
|
elif isSysAdmin or isPlatformAdmin:
|
||||||
|
# Platform-level authority can enter any feature instance without
|
||||||
|
# explicit access record.
|
||||||
|
ctx.featureInstanceId = featureInstanceId
|
||||||
|
logger.debug(
|
||||||
|
f"Platform-level user {currentUser.id} accessing feature instance "
|
||||||
|
f"{featureInstanceId} (isSysAdmin={isSysAdmin}, "
|
||||||
|
f"isPlatformAdmin={isPlatformAdmin})"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# Regular user without access - denied
|
||||||
|
logger.warning(f"User {currentUser.id} has no access to feature instance {featureInstanceId}")
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_403_FORBIDDEN,
|
||||||
|
detail="No access to feature instance"
|
||||||
|
)
|
||||||
|
|
||||||
|
return ctx
|
||||||
|
|
||||||
|
|
||||||
|
def requireSysAdmin(currentUser: User = Depends(getCurrentUser)) -> User:
|
||||||
|
"""
|
||||||
|
SysAdmin check for system-level operations.
|
||||||
|
|
||||||
|
Use this dependency for endpoints that require SysAdmin privileges.
|
||||||
|
SysAdmin has access to system-level operations, but NOT to mandate data.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
currentUser: Current authenticated user
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
User if they are a SysAdmin
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
HTTPException 403: If user is not a SysAdmin
|
||||||
|
"""
|
||||||
|
if not getattr(currentUser, 'isSysAdmin', False):
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_403_FORBIDDEN,
|
||||||
|
detail="SysAdmin privileges required"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Audit for all SysAdmin actions
|
||||||
|
try:
|
||||||
|
from modules.shared.auditLogger import audit_logger
|
||||||
|
audit_logger.logSecurityEvent(
|
||||||
|
userId=str(currentUser.id),
|
||||||
|
mandateId="system",
|
||||||
|
action="sysadmin_action",
|
||||||
|
details="System-level operation"
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
# Don't fail if audit logging fails
|
||||||
|
pass
|
||||||
|
|
||||||
|
return currentUser
|
||||||
|
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# PLATFORM ADMIN: Flag-based cross-mandate governance (replaces sysadmin role)
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
def requirePlatformAdmin(currentUser: User = Depends(getCurrentUser)) -> User:
|
||||||
|
"""
|
||||||
|
Require Platform-Admin flag for cross-mandate governance operations.
|
||||||
|
|
||||||
|
Verwendung für alle Operationen, die mandanten-übergreifend wirken:
|
||||||
|
User-Mgmt, Mandate-Mgmt, RBAC-Catalog, Feature-Registry, User-Access-Overview,
|
||||||
|
Cross-Mandate-Audit, Cross-Mandate-Billing-Übersicht, Subscription-Mgmt.
|
||||||
|
|
||||||
|
KEIN RBAC-Bypass: Daten-Zugriff auf einen einzelnen Mandanten erfordert
|
||||||
|
weiterhin Mitgliedschaft (oder zusätzlich isSysAdmin für Infrastruktur-Bypass).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
currentUser: Current authenticated user
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
User if they have isPlatformAdmin=True
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
HTTPException 403: If user is not a Platform Admin
|
||||||
|
"""
|
||||||
|
if not getattr(currentUser, 'isPlatformAdmin', False):
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_403_FORBIDDEN,
|
||||||
|
detail="Platform admin privileges required"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Audit for all Platform-Admin actions
|
||||||
|
try:
|
||||||
|
from modules.shared.auditLogger import audit_logger
|
||||||
|
audit_logger.logSecurityEvent(
|
||||||
|
userId=str(currentUser.id),
|
||||||
|
mandateId="system",
|
||||||
|
action="platform_admin_action",
|
||||||
|
details="Cross-mandate governance operation"
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return currentUser
|
||||||
|
|
||||||
|
|
@ -1,3 +1,5 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
"""
|
"""
|
||||||
CSRF Protection Middleware for PowerOn Gateway
|
CSRF Protection Middleware for PowerOn Gateway
|
||||||
|
|
||||||
|
|
@ -22,12 +24,27 @@ class CSRFMiddleware(BaseHTTPMiddleware):
|
||||||
self.exempt_paths = exempt_paths or {
|
self.exempt_paths = exempt_paths or {
|
||||||
"/api/local/login",
|
"/api/local/login",
|
||||||
"/api/local/register",
|
"/api/local/register",
|
||||||
"/api/msft/login",
|
# OAuth Auth app + Data app (GET redirects / callbacks)
|
||||||
"/api/google/login",
|
"/api/msft/auth/login",
|
||||||
"/api/msft/callback",
|
"/api/msft/auth/login/callback",
|
||||||
"/api/google/callback"
|
"/api/msft/auth/connect",
|
||||||
|
"/api/msft/auth/connect/callback",
|
||||||
|
"/api/msft/adminconsent",
|
||||||
|
"/api/msft/adminconsent/callback",
|
||||||
|
"/api/google/auth/login",
|
||||||
|
"/api/google/auth/login/callback",
|
||||||
|
"/api/google/auth/connect",
|
||||||
|
"/api/google/auth/connect/callback",
|
||||||
|
"/api/clickup/auth/connect",
|
||||||
|
"/api/clickup/auth/connect/callback",
|
||||||
|
"/api/billing/webhook/stripe", # Stripe webhook (auth via Stripe-Signature)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Path prefixes exempt from CSRF (for service-to-service callbacks)
|
||||||
|
self._exemptPrefixes = [
|
||||||
|
"/api/teamsbot/", # .NET Media Bridge callbacks (bridge/status, bridge/audio)
|
||||||
|
]
|
||||||
|
|
||||||
# State-changing HTTP methods that require CSRF protection
|
# State-changing HTTP methods that require CSRF protection
|
||||||
self.protected_methods = {"POST", "PUT", "DELETE", "PATCH"}
|
self.protected_methods = {"POST", "PUT", "DELETE", "PATCH"}
|
||||||
|
|
||||||
|
|
@ -35,10 +52,15 @@ class CSRFMiddleware(BaseHTTPMiddleware):
|
||||||
"""
|
"""
|
||||||
Check CSRF token for state-changing operations.
|
Check CSRF token for state-changing operations.
|
||||||
"""
|
"""
|
||||||
# Skip CSRF check for exempt paths
|
# Skip CSRF check for exempt paths (exact match)
|
||||||
if request.url.path in self.exempt_paths:
|
if request.url.path in self.exempt_paths:
|
||||||
return await call_next(request)
|
return await call_next(request)
|
||||||
|
|
||||||
|
# Skip CSRF check for exempt path prefixes (service-to-service callbacks)
|
||||||
|
if any(request.url.path.startswith(p) for p in self._exemptPrefixes):
|
||||||
|
if "/bridge/" in request.url.path or "/bot/" in request.url.path:
|
||||||
|
return await call_next(request)
|
||||||
|
|
||||||
# Skip CSRF check for non-state-changing methods
|
# Skip CSRF check for non-state-changing methods
|
||||||
if request.method not in self.protected_methods:
|
if request.method not in self.protected_methods:
|
||||||
return await call_next(request)
|
return await call_next(request)
|
||||||
|
|
@ -66,12 +88,15 @@ class CSRFMiddleware(BaseHTTPMiddleware):
|
||||||
content={"detail": "Invalid CSRF token format"}
|
content={"detail": "Invalid CSRF token format"}
|
||||||
)
|
)
|
||||||
|
|
||||||
# Additional CSRF validation could be added here:
|
try:
|
||||||
# - Check token against session
|
|
||||||
# - Validate token expiration
|
|
||||||
# - Verify token origin
|
|
||||||
|
|
||||||
return await call_next(request)
|
return await call_next(request)
|
||||||
|
except Exception as exc:
|
||||||
|
logger.error("Unhandled exception in %s %s: %s", request.method, request.url.path, exc)
|
||||||
|
from fastapi.responses import JSONResponse
|
||||||
|
return JSONResponse(
|
||||||
|
status_code=500,
|
||||||
|
content={"detail": "Internal server error"},
|
||||||
|
)
|
||||||
|
|
||||||
def _is_valid_csrf_token(self, token: str) -> bool:
|
def _is_valid_csrf_token(self, token: str) -> bool:
|
||||||
"""
|
"""
|
||||||
|
|
@ -96,3 +121,4 @@ class CSRFMiddleware(BaseHTTPMiddleware):
|
||||||
return True
|
return True
|
||||||
except ValueError:
|
except ValueError:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
@ -1,3 +1,5 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
"""
|
"""
|
||||||
JWT Service
|
JWT Service
|
||||||
Centralizes local JWT creation and cookie helpers.
|
Centralizes local JWT creation and cookie helpers.
|
||||||
42
modules/auth/oauthProviderConfig.py
Normal file
42
modules/auth/oauthProviderConfig.py
Normal file
|
|
@ -0,0 +1,42 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""OAuth scope sets for split Auth- vs Data-apps (Google / Microsoft)."""
|
||||||
|
|
||||||
|
# Google — Auth app only (no Gmail/Drive API scopes)
|
||||||
|
googleAuthScopes = [
|
||||||
|
"openid",
|
||||||
|
"https://www.googleapis.com/auth/userinfo.email",
|
||||||
|
"https://www.googleapis.com/auth/userinfo.profile",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Google — Data app (Gmail + Drive + identity for token responses)
|
||||||
|
googleDataScopes = [
|
||||||
|
"openid",
|
||||||
|
"https://www.googleapis.com/auth/userinfo.email",
|
||||||
|
"https://www.googleapis.com/auth/userinfo.profile",
|
||||||
|
"https://www.googleapis.com/auth/gmail.readonly",
|
||||||
|
"https://www.googleapis.com/auth/drive.readonly",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Microsoft — Auth app: Graph profile only (MSAL adds openid, profile, offline_access, …)
|
||||||
|
msftAuthScopes = [
|
||||||
|
"User.Read",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Microsoft — Data app (delegated; requires admin consent for several)
|
||||||
|
msftDataScopes = [
|
||||||
|
"User.Read",
|
||||||
|
"Mail.ReadWrite",
|
||||||
|
"Mail.Send",
|
||||||
|
"Files.ReadWrite.All",
|
||||||
|
"Sites.ReadWrite.All",
|
||||||
|
"Team.ReadBasic.All",
|
||||||
|
"OnlineMeetings.Read",
|
||||||
|
"Chat.ReadWrite",
|
||||||
|
"ChatMessage.Send",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def msftDataScopesForRefresh() -> str:
|
||||||
|
"""Space-separated scope string identical to authorization request (Token v2 refresh)."""
|
||||||
|
return " ".join(msftDataScopes)
|
||||||
|
|
@ -1,3 +1,5 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
"""
|
"""
|
||||||
Token Manager Service
|
Token Manager Service
|
||||||
Handles all token operations including automatic refresh for backend services.
|
Handles all token operations including automatic refresh for backend services.
|
||||||
|
|
@ -7,10 +9,11 @@ import logging
|
||||||
import httpx
|
import httpx
|
||||||
from typing import Optional, Dict, Any, Callable
|
from typing import Optional, Dict, Any, Callable
|
||||||
|
|
||||||
from modules.datamodels.datamodelSecurity import Token
|
from modules.datamodels.datamodelSecurity import Token, TokenPurpose
|
||||||
from modules.datamodels.datamodelUam import AuthAuthority
|
from modules.datamodels.datamodelUam import AuthAuthority
|
||||||
from modules.shared.configuration import APP_CONFIG
|
from modules.shared.configuration import APP_CONFIG
|
||||||
from modules.shared.timeUtils import getUtcTimestamp, createExpirationTimestamp, parseTimestamp
|
from modules.shared.timeUtils import getUtcTimestamp, createExpirationTimestamp, parseTimestamp
|
||||||
|
from modules.auth.oauthProviderConfig import msftDataScopesForRefresh
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
@ -18,14 +21,14 @@ class TokenManager:
|
||||||
"""Centralized token management service"""
|
"""Centralized token management service"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
# Microsoft OAuth configuration
|
# Microsoft Data-app OAuth (refresh + token exchange for connections)
|
||||||
self.msft_client_id = APP_CONFIG.get("Service_MSFT_CLIENT_ID")
|
self.msft_client_id = APP_CONFIG.get("Service_MSFT_DATA_CLIENT_ID")
|
||||||
self.msft_client_secret = APP_CONFIG.get("Service_MSFT_CLIENT_SECRET")
|
self.msft_client_secret = APP_CONFIG.get("Service_MSFT_DATA_CLIENT_SECRET")
|
||||||
self.msft_tenant_id = APP_CONFIG.get("Service_MSFT_TENANT_ID", "common")
|
self.msft_tenant_id = APP_CONFIG.get("Service_MSFT_TENANT_ID", "common")
|
||||||
|
|
||||||
# Google OAuth configuration
|
# Google Data-app OAuth
|
||||||
self.google_client_id = APP_CONFIG.get("Service_GOOGLE_CLIENT_ID")
|
self.google_client_id = APP_CONFIG.get("Service_GOOGLE_DATA_CLIENT_ID")
|
||||||
self.google_client_secret = APP_CONFIG.get("Service_GOOGLE_CLIENT_SECRET")
|
self.google_client_secret = APP_CONFIG.get("Service_GOOGLE_DATA_CLIENT_SECRET")
|
||||||
|
|
||||||
def refreshMicrosoftToken(self, refreshToken: str, userId: str, oldToken: Token) -> Optional[Token]:
|
def refreshMicrosoftToken(self, refreshToken: str, userId: str, oldToken: Token) -> Optional[Token]:
|
||||||
"""Refresh Microsoft OAuth token using refresh token"""
|
"""Refresh Microsoft OAuth token using refresh token"""
|
||||||
|
|
@ -47,7 +50,7 @@ class TokenManager:
|
||||||
"client_secret": self.msft_client_secret,
|
"client_secret": self.msft_client_secret,
|
||||||
"grant_type": "refresh_token",
|
"grant_type": "refresh_token",
|
||||||
"refresh_token": refreshToken,
|
"refresh_token": refreshToken,
|
||||||
"scope": "Mail.ReadWrite Mail.Send Mail.ReadWrite.Shared User.Read"
|
"scope": msftDataScopesForRefresh(),
|
||||||
}
|
}
|
||||||
logger.debug(f"refreshMicrosoftToken: Refresh request data prepared (refreshToken length: {len(refreshToken) if refreshToken else 0})")
|
logger.debug(f"refreshMicrosoftToken: Refresh request data prepared (refreshToken length: {len(refreshToken) if refreshToken else 0})")
|
||||||
|
|
||||||
|
|
@ -66,6 +69,7 @@ class TokenManager:
|
||||||
userId=userId,
|
userId=userId,
|
||||||
authority=AuthAuthority.MSFT,
|
authority=AuthAuthority.MSFT,
|
||||||
connectionId=oldToken.connectionId, # Preserve connection ID
|
connectionId=oldToken.connectionId, # Preserve connection ID
|
||||||
|
tokenPurpose=TokenPurpose.DATA_CONNECTION,
|
||||||
tokenAccess=tokenData["access_token"],
|
tokenAccess=tokenData["access_token"],
|
||||||
tokenRefresh=tokenData.get("refresh_token", refreshToken), # Keep old refresh token if new one not provided
|
tokenRefresh=tokenData.get("refresh_token", refreshToken), # Keep old refresh token if new one not provided
|
||||||
tokenType=tokenData.get("token_type", "bearer"),
|
tokenType=tokenData.get("token_type", "bearer"),
|
||||||
|
|
@ -126,6 +130,7 @@ class TokenManager:
|
||||||
userId=userId,
|
userId=userId,
|
||||||
authority=AuthAuthority.GOOGLE,
|
authority=AuthAuthority.GOOGLE,
|
||||||
connectionId=oldToken.connectionId, # Preserve connection ID
|
connectionId=oldToken.connectionId, # Preserve connection ID
|
||||||
|
tokenPurpose=TokenPurpose.DATA_CONNECTION,
|
||||||
tokenAccess=tokenData["access_token"],
|
tokenAccess=tokenData["access_token"],
|
||||||
tokenRefresh=tokenData.get("refresh_token", refreshToken), # Use new refresh token if provided
|
tokenRefresh=tokenData.get("refresh_token", refreshToken), # Use new refresh token if provided
|
||||||
tokenType=tokenData.get("token_type", "bearer"),
|
tokenType=tokenData.get("token_type", "bearer"),
|
||||||
|
|
@ -163,11 +168,20 @@ class TokenManager:
|
||||||
logger.debug(f"refreshToken: Starting refresh for token {oldToken.id}, authority: {oldToken.authority}")
|
logger.debug(f"refreshToken: Starting refresh for token {oldToken.id}, authority: {oldToken.authority}")
|
||||||
logger.debug(f"refreshToken: Token details: userId={oldToken.userId}, connectionId={oldToken.connectionId}, hasRefreshToken={bool(oldToken.tokenRefresh)}")
|
logger.debug(f"refreshToken: Token details: userId={oldToken.userId}, connectionId={oldToken.connectionId}, hasRefreshToken={bool(oldToken.tokenRefresh)}")
|
||||||
|
|
||||||
|
_tp = (
|
||||||
|
oldToken.tokenPurpose.value
|
||||||
|
if isinstance(oldToken.tokenPurpose, TokenPurpose)
|
||||||
|
else oldToken.tokenPurpose
|
||||||
|
)
|
||||||
|
if _tp != TokenPurpose.DATA_CONNECTION.value:
|
||||||
|
logger.warning("refreshToken: skipped — token is not dataConnection")
|
||||||
|
return None
|
||||||
|
|
||||||
# Cooldown: avoid refreshing too frequently if a workflow triggers refresh repeatedly
|
# Cooldown: avoid refreshing too frequently if a workflow triggers refresh repeatedly
|
||||||
# Only allow a new refresh if at least 10 minutes passed since the token was created/refreshed
|
# Only allow a new refresh if at least 10 minutes passed since the token was created/refreshed
|
||||||
try:
|
try:
|
||||||
nowTs = getUtcTimestamp()
|
nowTs = getUtcTimestamp()
|
||||||
createdTs = parseTimestamp(oldToken.createdAt, default=0.0)
|
createdTs = parseTimestamp(oldToken.sysCreatedAt, default=0.0)
|
||||||
secondsSinceLastRefresh = nowTs - createdTs
|
secondsSinceLastRefresh = nowTs - createdTs
|
||||||
if secondsSinceLastRefresh < 10 * 60:
|
if secondsSinceLastRefresh < 10 * 60:
|
||||||
logger.info(
|
logger.info(
|
||||||
|
|
@ -242,25 +256,44 @@ class TokenManager:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
# Convenience wrapper to fetch and ensure fresh token for a connection via interface layer
|
# Convenience wrapper to fetch and ensure fresh token for a connection via interface layer
|
||||||
def getFreshToken(self, connectionId: str, secondsBeforeExpiry: int = 30 * 60) -> Optional[Token]:
|
def getFreshToken(self, connectionId: str, secondsBeforeExpiry: int = 30 * 60, interface=None) -> Optional[Token]:
|
||||||
"""Return a fresh token for a connection, refreshing when expiring soon.
|
"""Return a fresh token for a connection, refreshing when expiring soon.
|
||||||
|
|
||||||
Reads the latest stored token via interface layer, then
|
Reads the latest stored token via interface layer, then
|
||||||
uses ensure_fresh_token to refresh if needed and persists the refreshed
|
uses ensure_fresh_token to refresh if needed and persists the refreshed
|
||||||
token via interface layer.
|
token via interface layer.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
connectionId: Connection ID to get token for
|
||||||
|
secondsBeforeExpiry: Seconds before expiry to refresh
|
||||||
|
interface: Optional interface instance (if None, uses root interface)
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
from modules.interfaces.interfaceDbAppObjects import getRootInterface
|
if interface is None:
|
||||||
interfaceDbApp = getRootInterface()
|
from modules.security.rootAccess import getRootUser
|
||||||
|
from modules.interfaces.interfaceDbApp import getInterface
|
||||||
|
rootUser = getRootUser()
|
||||||
|
interface = getInterface(rootUser)
|
||||||
|
|
||||||
token = interfaceDbApp.getConnectionToken(connectionId)
|
token = interface.getConnectionToken(connectionId)
|
||||||
if not token:
|
if not token:
|
||||||
return None
|
return None
|
||||||
|
_tp = (
|
||||||
|
token.tokenPurpose.value
|
||||||
|
if isinstance(token.tokenPurpose, TokenPurpose)
|
||||||
|
else token.tokenPurpose
|
||||||
|
)
|
||||||
|
if _tp != TokenPurpose.DATA_CONNECTION.value:
|
||||||
|
logger.warning(
|
||||||
|
f"getFreshToken: connection {connectionId} tokenPurpose is {_tp}, expected dataConnection"
|
||||||
|
)
|
||||||
|
return None
|
||||||
return self.ensureFreshToken(
|
return self.ensureFreshToken(
|
||||||
token,
|
token,
|
||||||
secondsBeforeExpiry=secondsBeforeExpiry,
|
secondsBeforeExpiry=secondsBeforeExpiry,
|
||||||
saveCallback=lambda t: interfaceDbApp.saveConnectionToken(t)
|
saveCallback=lambda t: interface.saveConnectionToken(t)
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"getFreshToken: Error fetching or refreshing token for connection {connectionId}: {e}")
|
logger.error(f"getFreshToken: Error fetching or refreshing token for connection {connectionId}: {e}")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
@ -1,3 +1,5 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
"""
|
"""
|
||||||
Token Refresh Middleware for PowerOn Gateway
|
Token Refresh Middleware for PowerOn Gateway
|
||||||
|
|
||||||
|
|
@ -10,7 +12,7 @@ from fastapi import Request, Response
|
||||||
from starlette.middleware.base import BaseHTTPMiddleware
|
from starlette.middleware.base import BaseHTTPMiddleware
|
||||||
from typing import Callable
|
from typing import Callable
|
||||||
import asyncio
|
import asyncio
|
||||||
from modules.security.tokenRefreshService import token_refresh_service
|
from modules.auth.tokenRefreshService import token_refresh_service
|
||||||
from modules.shared.timeUtils import getUtcTimestamp
|
from modules.shared.timeUtils import getUtcTimestamp
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
@ -183,3 +185,4 @@ class ProactiveTokenRefreshMiddleware(BaseHTTPMiddleware):
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Error in proactive token refresh for user {user_id}: {str(e)}")
|
logger.error(f"Error in proactive token refresh for user {user_id}: {str(e)}")
|
||||||
|
|
||||||
|
|
@ -1,3 +1,5 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
"""
|
"""
|
||||||
Token Refresh Service for PowerOn Gateway
|
Token Refresh Service for PowerOn Gateway
|
||||||
|
|
||||||
|
|
@ -56,7 +58,7 @@ class TokenRefreshService:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# Import Google token refresh logic
|
# Import Google token refresh logic
|
||||||
from modules.security.tokenManager import TokenManager
|
from modules.auth.tokenManager import TokenManager
|
||||||
token_manager = TokenManager()
|
token_manager = TokenManager()
|
||||||
|
|
||||||
# Attempt to refresh the token
|
# Attempt to refresh the token
|
||||||
|
|
@ -68,7 +70,7 @@ class TokenRefreshService:
|
||||||
# Update connection status
|
# Update connection status
|
||||||
interface.db.recordModify(UserConnection, connection.id, {
|
interface.db.recordModify(UserConnection, connection.id, {
|
||||||
"lastChecked": getUtcTimestamp(),
|
"lastChecked": getUtcTimestamp(),
|
||||||
"expiresAt": refreshed_token.expiresAt
|
"expiresAt": refreshedToken.expiresAt
|
||||||
})
|
})
|
||||||
|
|
||||||
logger.info(f"Successfully refreshed Google token for connection {connection.id}")
|
logger.info(f"Successfully refreshed Google token for connection {connection.id}")
|
||||||
|
|
@ -105,7 +107,7 @@ class TokenRefreshService:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# Import Microsoft token refresh logic
|
# Import Microsoft token refresh logic
|
||||||
from modules.security.tokenManager import TokenManager
|
from modules.auth.tokenManager import TokenManager
|
||||||
token_manager = TokenManager()
|
token_manager = TokenManager()
|
||||||
|
|
||||||
# Attempt to refresh the token
|
# Attempt to refresh the token
|
||||||
|
|
@ -117,7 +119,7 @@ class TokenRefreshService:
|
||||||
# Update connection status
|
# Update connection status
|
||||||
interface.db.recordModify(UserConnection, connection.id, {
|
interface.db.recordModify(UserConnection, connection.id, {
|
||||||
"lastChecked": getUtcTimestamp(),
|
"lastChecked": getUtcTimestamp(),
|
||||||
"expiresAt": refreshed_token.expiresAt
|
"expiresAt": refreshedToken.expiresAt
|
||||||
})
|
})
|
||||||
|
|
||||||
logger.info(f"Successfully refreshed Microsoft token for connection {connection.id}")
|
logger.info(f"Successfully refreshed Microsoft token for connection {connection.id}")
|
||||||
|
|
@ -156,8 +158,10 @@ class TokenRefreshService:
|
||||||
logger.debug(f"Starting silent token refresh for user {user_id}")
|
logger.debug(f"Starting silent token refresh for user {user_id}")
|
||||||
|
|
||||||
# Get user interface
|
# Get user interface
|
||||||
from modules.interfaces.interfaceDbAppObjects import getRootInterface
|
from modules.security.rootAccess import getRootUser
|
||||||
root_interface = getRootInterface()
|
from modules.interfaces.interfaceDbApp import getInterface
|
||||||
|
rootUser = getRootUser()
|
||||||
|
root_interface = getInterface(rootUser)
|
||||||
|
|
||||||
# Get user connections
|
# Get user connections
|
||||||
connections = root_interface.getUserConnections(user_id)
|
connections = root_interface.getUserConnections(user_id)
|
||||||
|
|
@ -223,8 +227,10 @@ class TokenRefreshService:
|
||||||
logger.debug(f"Starting proactive token refresh for user {user_id}")
|
logger.debug(f"Starting proactive token refresh for user {user_id}")
|
||||||
|
|
||||||
# Get user interface
|
# Get user interface
|
||||||
from modules.interfaces.interfaceDbAppObjects import getRootInterface
|
from modules.security.rootAccess import getRootUser
|
||||||
root_interface = getRootInterface()
|
from modules.interfaces.interfaceDbApp import getInterface
|
||||||
|
rootUser = getRootUser()
|
||||||
|
root_interface = getInterface(rootUser)
|
||||||
|
|
||||||
# Get user connections
|
# Get user connections
|
||||||
connections = root_interface.getUserConnections(user_id)
|
connections = root_interface.getUserConnections(user_id)
|
||||||
|
|
@ -287,3 +293,4 @@ class TokenRefreshService:
|
||||||
|
|
||||||
# Global service instance
|
# Global service instance
|
||||||
token_refresh_service = TokenRefreshService()
|
token_refresh_service = TokenRefreshService()
|
||||||
|
|
||||||
|
|
@ -1,678 +0,0 @@
|
||||||
import json
|
|
||||||
import os
|
|
||||||
from typing import List, Dict, Any, Optional, TypedDict
|
|
||||||
import logging
|
|
||||||
import uuid
|
|
||||||
from pydantic import BaseModel
|
|
||||||
import threading
|
|
||||||
import time
|
|
||||||
|
|
||||||
from modules.shared.timeUtils import getUtcTimestamp
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
class TableCache(TypedDict):
|
|
||||||
"""Type definition for table cache entries"""
|
|
||||||
recordIds: List[str]
|
|
||||||
|
|
||||||
class DatabaseConnector:
|
|
||||||
"""
|
|
||||||
A connector for JSON-based data storage.
|
|
||||||
Provides generic database operations without user/mandate filtering.
|
|
||||||
Stores tables as folders and records as individual files.
|
|
||||||
"""
|
|
||||||
def __init__(self, dbHost: str, dbDatabase: str, dbUser: str = None, dbPassword: str = None, userId: str = None):
|
|
||||||
# Store the input parameters
|
|
||||||
self.dbHost = dbHost
|
|
||||||
self.dbDatabase = dbDatabase
|
|
||||||
self.dbUser = dbUser
|
|
||||||
self.dbPassword = dbPassword
|
|
||||||
|
|
||||||
# Set userId (default to empty string if None)
|
|
||||||
self.userId = userId if userId is not None else ""
|
|
||||||
|
|
||||||
# Initialize database system
|
|
||||||
self.initDbSystem()
|
|
||||||
|
|
||||||
# Set up database folder path
|
|
||||||
self.dbFolder = os.path.join(self.dbHost, self.dbDatabase)
|
|
||||||
|
|
||||||
# Cache for loaded data
|
|
||||||
self._tablesCache: Dict[str, List[Dict[str, Any]]] = {}
|
|
||||||
self._tableMetadataCache: Dict[str, TableCache] = {} # Cache for table metadata (record IDs, etc.)
|
|
||||||
|
|
||||||
# File locks with timeout protection
|
|
||||||
self._file_locks = {}
|
|
||||||
self._lock_manager = threading.Lock()
|
|
||||||
self._lock_timeouts = {} # Track when locks were acquired
|
|
||||||
|
|
||||||
# Initialize system table
|
|
||||||
self._systemTableName = "_system"
|
|
||||||
self._initializeSystemTable()
|
|
||||||
|
|
||||||
logger.debug(f"Context: userId={self.userId}")
|
|
||||||
|
|
||||||
def initDbSystem(self):
|
|
||||||
"""Initialize the database system - creates necessary directories and structure."""
|
|
||||||
try:
|
|
||||||
# Ensure the database directory exists
|
|
||||||
self.dbFolder = os.path.join(self.dbHost, self.dbDatabase)
|
|
||||||
os.makedirs(self.dbFolder, exist_ok=True)
|
|
||||||
logger.info(f"Database system initialized: {self.dbFolder}")
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error initializing database system: {e}")
|
|
||||||
raise
|
|
||||||
|
|
||||||
def _initializeSystemTable(self):
|
|
||||||
"""Initializes the system table if it doesn't exist yet."""
|
|
||||||
systemTablePath = self._getTablePath(self._systemTableName)
|
|
||||||
if not os.path.exists(systemTablePath):
|
|
||||||
emptySystemTable = {}
|
|
||||||
self._saveSystemTable(emptySystemTable)
|
|
||||||
logger.info(f"System table initialized in {systemTablePath}")
|
|
||||||
else:
|
|
||||||
# Load existing system table to ensure it's available
|
|
||||||
self._loadSystemTable()
|
|
||||||
logger.debug(f"Existing system table loaded from {systemTablePath}")
|
|
||||||
|
|
||||||
def _loadSystemTable(self) -> Dict[str, str]:
|
|
||||||
"""Loads the system table with the initial IDs."""
|
|
||||||
# Check if system table is in cache
|
|
||||||
if f"_{self._systemTableName}" in self._tablesCache:
|
|
||||||
return self._tablesCache[f"_{self._systemTableName}"]
|
|
||||||
|
|
||||||
systemTablePath = self._getTablePath(self._systemTableName)
|
|
||||||
try:
|
|
||||||
if os.path.exists(systemTablePath):
|
|
||||||
with open(systemTablePath, 'r', encoding='utf-8') as f:
|
|
||||||
data = json.load(f)
|
|
||||||
# Store in cache with special prefix to avoid collision with regular tables
|
|
||||||
self._tablesCache[f"_{self._systemTableName}"] = data
|
|
||||||
return data
|
|
||||||
else:
|
|
||||||
self._tablesCache[f"_{self._systemTableName}"] = {}
|
|
||||||
return {}
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error loading the system table: {e}")
|
|
||||||
self._tablesCache[f"_{self._systemTableName}"] = {}
|
|
||||||
return {}
|
|
||||||
|
|
||||||
def _saveSystemTable(self, data: Dict[str, str]) -> bool:
|
|
||||||
"""Saves the system table with the initial IDs."""
|
|
||||||
systemTablePath = self._getTablePath(self._systemTableName)
|
|
||||||
try:
|
|
||||||
with open(systemTablePath, 'w', encoding='utf-8') as f:
|
|
||||||
json.dump(data, f, indent=2, ensure_ascii=False)
|
|
||||||
# Update cache
|
|
||||||
self._tablesCache[f"_{self._systemTableName}"] = data
|
|
||||||
return True
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error saving the system table: {e}")
|
|
||||||
return False
|
|
||||||
|
|
||||||
def _getTablePath(self, table: str) -> str:
|
|
||||||
"""Returns the full path to a table folder"""
|
|
||||||
return os.path.join(self.dbFolder, table)
|
|
||||||
|
|
||||||
def _getRecordPath(self, table: str, recordId: str) -> str:
|
|
||||||
"""Returns the full path to a record file"""
|
|
||||||
return os.path.join(self._getTablePath(table), f"{recordId}.json")
|
|
||||||
|
|
||||||
def _get_file_lock(self, filepath: str, timeout_seconds: int = 30):
|
|
||||||
"""Get file lock with timeout protection"""
|
|
||||||
with self._lock_manager:
|
|
||||||
if filepath not in self._file_locks:
|
|
||||||
self._file_locks[filepath] = threading.Lock()
|
|
||||||
|
|
||||||
lock = self._file_locks[filepath]
|
|
||||||
|
|
||||||
# Check if lock is stale (held too long)
|
|
||||||
if filepath in self._lock_timeouts:
|
|
||||||
lock_age = time.time() - self._lock_timeouts[filepath]
|
|
||||||
if lock_age > timeout_seconds:
|
|
||||||
logger.warning(f"Stale lock detected for {filepath}, age: {lock_age}s")
|
|
||||||
# Force release stale lock
|
|
||||||
try:
|
|
||||||
lock.release()
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
# Create new lock
|
|
||||||
self._file_locks[filepath] = threading.Lock()
|
|
||||||
lock = self._file_locks[filepath]
|
|
||||||
|
|
||||||
return lock
|
|
||||||
|
|
||||||
def _get_table_lock(self, table: str, timeout_seconds: int = 30):
|
|
||||||
"""Get table-level lock for metadata operations"""
|
|
||||||
table_lock_key = f"table_{table}"
|
|
||||||
return self._get_file_lock(table_lock_key, timeout_seconds)
|
|
||||||
|
|
||||||
def _ensureTableDirectory(self, table: str) -> bool:
|
|
||||||
"""Ensures the table directory exists."""
|
|
||||||
if table == self._systemTableName:
|
|
||||||
return True
|
|
||||||
|
|
||||||
tablePath = self._getTablePath(table)
|
|
||||||
try:
|
|
||||||
os.makedirs(tablePath, exist_ok=True)
|
|
||||||
return True
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error creating table directory {tablePath}: {e}")
|
|
||||||
return False
|
|
||||||
|
|
||||||
def _loadTableMetadata(self, table: str) -> Dict[str, Any]:
|
|
||||||
"""Loads table metadata (list of record IDs) without loading actual records.
|
|
||||||
NOTE: This method is safe to call without additional locking.
|
|
||||||
"""
|
|
||||||
if table in self._tableMetadataCache:
|
|
||||||
return self._tableMetadataCache[table]
|
|
||||||
|
|
||||||
# Ensure table directory exists
|
|
||||||
if not self._ensureTableDirectory(table):
|
|
||||||
return {"recordIds": []}
|
|
||||||
|
|
||||||
tablePath = self._getTablePath(table)
|
|
||||||
metadata = {"recordIds": []}
|
|
||||||
|
|
||||||
try:
|
|
||||||
if os.path.exists(tablePath):
|
|
||||||
for fileName in os.listdir(tablePath):
|
|
||||||
if fileName.endswith('.json') and fileName != '_metadata.json':
|
|
||||||
recordId = fileName[:-5] # Remove .json extension
|
|
||||||
metadata["recordIds"].append(recordId)
|
|
||||||
|
|
||||||
metadata["recordIds"].sort()
|
|
||||||
self._tableMetadataCache[table] = metadata
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error loading table metadata for {table}: {e}")
|
|
||||||
|
|
||||||
return metadata
|
|
||||||
|
|
||||||
def _loadRecord(self, table: str, recordId: str) -> Optional[Dict[str, Any]]:
|
|
||||||
"""Loads a single record from the table."""
|
|
||||||
recordPath = self._getRecordPath(table, recordId)
|
|
||||||
try:
|
|
||||||
if os.path.exists(recordPath):
|
|
||||||
with open(recordPath, 'r', encoding='utf-8') as f:
|
|
||||||
record = json.load(f)
|
|
||||||
return record
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error loading record {recordId} from table {table}: {e}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
def _saveRecord(self, table: str, recordId: str, record: Dict[str, Any]) -> bool:
|
|
||||||
"""Saves a single record to the table with atomic metadata operations."""
|
|
||||||
recordPath = self._getRecordPath(table, recordId)
|
|
||||||
record_lock = self._get_file_lock(recordPath)
|
|
||||||
table_lock = self._get_table_lock(table)
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Acquire both locks with timeout - record lock first, then table lock
|
|
||||||
if not record_lock.acquire(timeout=30):
|
|
||||||
raise TimeoutError(f"Could not acquire record lock for {recordPath} within 30 seconds")
|
|
||||||
|
|
||||||
if not table_lock.acquire(timeout=30):
|
|
||||||
record_lock.release()
|
|
||||||
raise TimeoutError(f"Could not acquire table lock for {table} within 30 seconds")
|
|
||||||
|
|
||||||
# Record lock acquisition time
|
|
||||||
self._lock_timeouts[recordPath] = time.time()
|
|
||||||
self._lock_timeouts[f"table_{table}"] = time.time()
|
|
||||||
|
|
||||||
# Ensure table directory exists
|
|
||||||
if not self._ensureTableDirectory(table):
|
|
||||||
raise ValueError(f"Error creating table directory for {table}")
|
|
||||||
|
|
||||||
# Ensure recordId is a string
|
|
||||||
recordId = str(recordId)
|
|
||||||
|
|
||||||
# CRITICAL: Ensure record ID matches the file name
|
|
||||||
if "id" in record and str(record["id"]) != recordId:
|
|
||||||
logger.error(f"Record ID mismatch: file name ID ({recordId}) does not match record ID ({record['id']})")
|
|
||||||
raise ValueError(f"Record ID mismatch: file name ID ({recordId}) does not match record ID ({record['id']})")
|
|
||||||
|
|
||||||
# Add metadata
|
|
||||||
currentTime = getUtcTimestamp()
|
|
||||||
if "_createdAt" not in record:
|
|
||||||
record["_createdAt"] = currentTime
|
|
||||||
record["_createdBy"] = self.userId
|
|
||||||
record["_modifiedAt"] = currentTime
|
|
||||||
record["_modifiedBy"] = self.userId
|
|
||||||
|
|
||||||
# Save the record file using atomic write
|
|
||||||
tempPath = recordPath + '.tmp'
|
|
||||||
|
|
||||||
# Ensure directory exists
|
|
||||||
os.makedirs(os.path.dirname(recordPath), exist_ok=True)
|
|
||||||
|
|
||||||
# Write to temporary file first
|
|
||||||
with open(tempPath, 'w', encoding='utf-8') as f:
|
|
||||||
json.dump(record, f, indent=2, ensure_ascii=False)
|
|
||||||
|
|
||||||
# Verify the temporary file can be read back (validation)
|
|
||||||
try:
|
|
||||||
with open(tempPath, 'r', encoding='utf-8') as f:
|
|
||||||
json.load(f) # This will fail if file is corrupted
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Validation failed for record {recordId}: {e}")
|
|
||||||
# Clean up temp file
|
|
||||||
if os.path.exists(tempPath):
|
|
||||||
os.remove(tempPath)
|
|
||||||
raise ValueError(f"Record validation failed: {e}")
|
|
||||||
|
|
||||||
# Atomic move from temp to final location
|
|
||||||
os.replace(tempPath, recordPath)
|
|
||||||
|
|
||||||
# ATOMIC: Update metadata while holding both locks
|
|
||||||
metadata = self._loadTableMetadata(table)
|
|
||||||
if recordId not in metadata["recordIds"]:
|
|
||||||
metadata["recordIds"].append(recordId)
|
|
||||||
metadata["recordIds"].sort()
|
|
||||||
self._saveTableMetadata(table, metadata)
|
|
||||||
|
|
||||||
# Update cache if it exists (also protected by table lock)
|
|
||||||
if table in self._tablesCache:
|
|
||||||
# Find and update existing record or append new one
|
|
||||||
found = False
|
|
||||||
for i, existing_record in enumerate(self._tablesCache[table]):
|
|
||||||
if str(existing_record.get("id")) == recordId:
|
|
||||||
self._tablesCache[table][i] = record
|
|
||||||
found = True
|
|
||||||
break
|
|
||||||
if not found:
|
|
||||||
self._tablesCache[table].append(record)
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error saving record {recordId} to table {table}: {e}")
|
|
||||||
# Clean up temp file if it exists
|
|
||||||
tempPath = self._getRecordPath(table, recordId) + '.tmp'
|
|
||||||
if os.path.exists(tempPath):
|
|
||||||
try:
|
|
||||||
os.remove(tempPath)
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
return False
|
|
||||||
|
|
||||||
finally:
|
|
||||||
# ALWAYS release both locks, even on error
|
|
||||||
try:
|
|
||||||
if table_lock.locked():
|
|
||||||
table_lock.release()
|
|
||||||
if f"table_{table}" in self._lock_timeouts:
|
|
||||||
del self._lock_timeouts[f"table_{table}"]
|
|
||||||
except Exception as release_error:
|
|
||||||
logger.error(f"Error releasing table lock for {table}: {release_error}")
|
|
||||||
|
|
||||||
try:
|
|
||||||
if record_lock.locked():
|
|
||||||
record_lock.release()
|
|
||||||
if recordPath in self._lock_timeouts:
|
|
||||||
del self._lock_timeouts[recordPath]
|
|
||||||
except Exception as release_error:
|
|
||||||
logger.error(f"Error releasing record lock for {recordPath}: {release_error}")
|
|
||||||
|
|
||||||
def _loadTable(self, table: str) -> List[Dict[str, Any]]:
|
|
||||||
"""Loads all records from a table folder."""
|
|
||||||
# If the table is the system table, load it directly
|
|
||||||
if table == self._systemTableName:
|
|
||||||
return self._loadSystemTable()
|
|
||||||
|
|
||||||
# If the table is already in the cache, use the cache
|
|
||||||
if table in self._tablesCache:
|
|
||||||
return self._tablesCache[table]
|
|
||||||
|
|
||||||
# Load metadata first
|
|
||||||
metadata = self._loadTableMetadata(table)
|
|
||||||
records = []
|
|
||||||
|
|
||||||
# Load each record
|
|
||||||
for recordId in metadata["recordIds"]:
|
|
||||||
# Skip metadata file
|
|
||||||
if recordId == "_metadata":
|
|
||||||
continue
|
|
||||||
record = self._loadRecord(table, recordId)
|
|
||||||
if record:
|
|
||||||
records.append(record)
|
|
||||||
|
|
||||||
self._tablesCache[table] = records
|
|
||||||
return records
|
|
||||||
|
|
||||||
def _saveTable(self, table: str, data: List[Dict[str, Any]]) -> bool:
|
|
||||||
"""Saves all records to a table folder"""
|
|
||||||
# The system table is handled specially
|
|
||||||
if table == self._systemTableName:
|
|
||||||
return self._saveSystemTable(data)
|
|
||||||
|
|
||||||
tablePath = self._getTablePath(table)
|
|
||||||
try:
|
|
||||||
# Ensure table directory exists
|
|
||||||
os.makedirs(tablePath, exist_ok=True)
|
|
||||||
|
|
||||||
# Save each record as a separate file
|
|
||||||
for record in data:
|
|
||||||
if "id" not in record:
|
|
||||||
logger.error(f"Record missing ID in table {table}")
|
|
||||||
continue
|
|
||||||
|
|
||||||
recordPath = self._getRecordPath(table, record["id"])
|
|
||||||
with open(recordPath, 'w', encoding='utf-8') as f:
|
|
||||||
json.dump(record, f, indent=2, ensure_ascii=False)
|
|
||||||
|
|
||||||
# Update the cache
|
|
||||||
self._tablesCache[table] = data
|
|
||||||
logger.debug(f"Successfully saved table {table}")
|
|
||||||
return True
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error saving table {table}: {str(e)}")
|
|
||||||
logger.error(f"Error type: {type(e).__name__}")
|
|
||||||
logger.error(f"Error details: {e.__dict__ if hasattr(e, '__dict__') else 'No details available'}")
|
|
||||||
return False
|
|
||||||
|
|
||||||
def _applyRecordFilter(self, records: List[Dict[str, Any]], recordFilter: Dict[str, Any] = None) -> List[Dict[str, Any]]:
|
|
||||||
"""Applies a record filter to the records"""
|
|
||||||
if not recordFilter:
|
|
||||||
return records
|
|
||||||
|
|
||||||
filteredRecords = []
|
|
||||||
|
|
||||||
for record in records:
|
|
||||||
match = True
|
|
||||||
|
|
||||||
for field, value in recordFilter.items():
|
|
||||||
# Check if the field exists
|
|
||||||
if field not in record:
|
|
||||||
match = False
|
|
||||||
break
|
|
||||||
|
|
||||||
# Convert both values to strings for comparison
|
|
||||||
recordValue = str(record[field])
|
|
||||||
filterValue = str(value)
|
|
||||||
|
|
||||||
# Direct string comparison
|
|
||||||
if recordValue != filterValue:
|
|
||||||
match = False
|
|
||||||
break
|
|
||||||
|
|
||||||
if match:
|
|
||||||
filteredRecords.append(record)
|
|
||||||
|
|
||||||
return filteredRecords
|
|
||||||
|
|
||||||
def _registerInitialId(self, table: str, initialId: str) -> bool:
|
|
||||||
"""Registers the initial ID for a table."""
|
|
||||||
try:
|
|
||||||
systemData = self._loadSystemTable()
|
|
||||||
|
|
||||||
if table not in systemData:
|
|
||||||
systemData[table] = initialId
|
|
||||||
success = self._saveSystemTable(systemData)
|
|
||||||
if success:
|
|
||||||
logger.info(f"Initial ID {initialId} for table {table} registered")
|
|
||||||
return success
|
|
||||||
return True # If already present, this is not an error
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error registering the initial ID for table {table}: {e}")
|
|
||||||
return False
|
|
||||||
|
|
||||||
def _removeInitialId(self, table: str) -> bool:
|
|
||||||
"""Removes the initial ID for a table from the system table."""
|
|
||||||
try:
|
|
||||||
systemData = self._loadSystemTable()
|
|
||||||
|
|
||||||
if table in systemData:
|
|
||||||
del systemData[table]
|
|
||||||
success = self._saveSystemTable(systemData)
|
|
||||||
if success:
|
|
||||||
logger.info(f"Initial ID for table {table} removed from system table")
|
|
||||||
return success
|
|
||||||
return True # If not present, this is not an error
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error removing initial ID for table {table}: {e}")
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def _saveTableMetadata(self, table: str, metadata: Dict[str, Any]) -> bool:
|
|
||||||
"""Saves table metadata to a metadata file.
|
|
||||||
NOTE: This method assumes the caller already holds the table lock.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
# Create metadata file path
|
|
||||||
metadataPath = os.path.join(self._getTablePath(table), "_metadata.json")
|
|
||||||
|
|
||||||
# Save metadata (caller should already hold table lock)
|
|
||||||
with open(metadataPath, 'w', encoding='utf-8') as f:
|
|
||||||
json.dump(metadata, f, indent=2, ensure_ascii=False)
|
|
||||||
|
|
||||||
# Update cache
|
|
||||||
self._tableMetadataCache[table] = metadata
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error saving metadata for table {table}: {e}")
|
|
||||||
return False
|
|
||||||
|
|
||||||
def updateContext(self, userId: str) -> None:
|
|
||||||
"""Updates the context of the database connector."""
|
|
||||||
if userId is None:
|
|
||||||
raise ValueError("userId must be provided")
|
|
||||||
|
|
||||||
self.userId = userId
|
|
||||||
logger.info(f"Updated database context: userId={self.userId}")
|
|
||||||
|
|
||||||
# Clear cache to ensure fresh data with new context
|
|
||||||
self._tablesCache = {}
|
|
||||||
self._tableMetadataCache = {}
|
|
||||||
|
|
||||||
def clearTableCache(self, table: str) -> None:
|
|
||||||
"""Clears cache for a specific table to ensure fresh data."""
|
|
||||||
if table in self._tablesCache:
|
|
||||||
del self._tablesCache[table]
|
|
||||||
logger.debug(f"Cleared cache for table: {table}")
|
|
||||||
|
|
||||||
if table in self._tableMetadataCache:
|
|
||||||
del self._tableMetadataCache[table]
|
|
||||||
logger.debug(f"Cleared metadata cache for table: {table}")
|
|
||||||
|
|
||||||
# Public API
|
|
||||||
|
|
||||||
def getTables(self) -> List[str]:
|
|
||||||
"""Returns a list of all available tables."""
|
|
||||||
tables = []
|
|
||||||
|
|
||||||
try:
|
|
||||||
for item in os.listdir(self.dbFolder):
|
|
||||||
itemPath = os.path.join(self.dbFolder, item)
|
|
||||||
if os.path.isdir(itemPath) and not item.startswith('_'):
|
|
||||||
tables.append(item)
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error reading the database directory: {e}")
|
|
||||||
|
|
||||||
return tables
|
|
||||||
|
|
||||||
def getFields(self, table: str) -> List[str]:
|
|
||||||
"""Returns a list of all fields in a table."""
|
|
||||||
data = self._loadTable(table)
|
|
||||||
|
|
||||||
if not data:
|
|
||||||
return []
|
|
||||||
|
|
||||||
fields = list(data[0].keys()) if data else []
|
|
||||||
|
|
||||||
return fields
|
|
||||||
|
|
||||||
def getSchema(self, table: str, language: str = None) -> Dict[str, Dict[str, Any]]:
|
|
||||||
"""Returns a schema object for a table with data types and labels."""
|
|
||||||
data = self._loadTable(table)
|
|
||||||
|
|
||||||
schema = {}
|
|
||||||
|
|
||||||
if not data:
|
|
||||||
return schema
|
|
||||||
|
|
||||||
firstRecord = data[0]
|
|
||||||
|
|
||||||
for field, value in firstRecord.items():
|
|
||||||
dataType = type(value).__name__
|
|
||||||
label = field
|
|
||||||
|
|
||||||
schema[field] = {
|
|
||||||
"type": dataType,
|
|
||||||
"label": label
|
|
||||||
}
|
|
||||||
|
|
||||||
return schema
|
|
||||||
|
|
||||||
def getRecordset(self, table: str, fieldFilter: List[str] = None, recordFilter: Dict[str, Any] = None) -> List[Dict[str, Any]]:
|
|
||||||
"""Returns a list of records from a table, filtered by criteria."""
|
|
||||||
# If we have specific record IDs in the filter, only load those records
|
|
||||||
if recordFilter and "id" in recordFilter:
|
|
||||||
recordId = recordFilter["id"]
|
|
||||||
record = self._loadRecord(table, recordId)
|
|
||||||
if record:
|
|
||||||
records = [record]
|
|
||||||
else:
|
|
||||||
return []
|
|
||||||
else:
|
|
||||||
# Load all records if no specific ID filter
|
|
||||||
records = self._loadTable(table)
|
|
||||||
|
|
||||||
# Apply recordFilter if available
|
|
||||||
if recordFilter:
|
|
||||||
records = self._applyRecordFilter(records, recordFilter)
|
|
||||||
|
|
||||||
# If fieldFilter is available, reduce the fields
|
|
||||||
if fieldFilter and isinstance(fieldFilter, list):
|
|
||||||
result = []
|
|
||||||
for record in records:
|
|
||||||
filteredRecord = {}
|
|
||||||
for field in fieldFilter:
|
|
||||||
if field in record:
|
|
||||||
filteredRecord[field] = record[field]
|
|
||||||
result.append(filteredRecord)
|
|
||||||
return result
|
|
||||||
|
|
||||||
return records
|
|
||||||
|
|
||||||
def recordCreate(self, table: str, record: Dict[str, Any]) -> Dict[str, Any]:
|
|
||||||
"""Creates a new record in a table."""
|
|
||||||
# Ensure record has an ID
|
|
||||||
if "id" not in record:
|
|
||||||
record["id"] = str(uuid.uuid4())
|
|
||||||
|
|
||||||
# If record is a Pydantic model, convert to dict
|
|
||||||
if isinstance(record, BaseModel):
|
|
||||||
record = record.model_dump()
|
|
||||||
|
|
||||||
# Save record
|
|
||||||
self._saveRecord(table, record["id"], record)
|
|
||||||
return record
|
|
||||||
|
|
||||||
def recordModify(self, table: str, recordId: str, record: Dict[str, Any]) -> Dict[str, Any]:
|
|
||||||
"""Modifies an existing record in a table."""
|
|
||||||
# Load existing record
|
|
||||||
existingRecord = self._loadRecord(table, recordId)
|
|
||||||
if not existingRecord:
|
|
||||||
raise ValueError(f"Record {recordId} not found in table {table}")
|
|
||||||
|
|
||||||
# If record is a Pydantic model, convert to dict
|
|
||||||
if isinstance(record, BaseModel):
|
|
||||||
record = record.model_dump()
|
|
||||||
|
|
||||||
# CRITICAL: Ensure we never modify the ID
|
|
||||||
if "id" in record and str(record["id"]) != recordId:
|
|
||||||
logger.error(f"Attempted to modify record ID from {recordId} to {record['id']}")
|
|
||||||
raise ValueError("Cannot modify record ID - it must match the file name")
|
|
||||||
|
|
||||||
# Update existing record with new data
|
|
||||||
existingRecord.update(record)
|
|
||||||
|
|
||||||
# Save updated record
|
|
||||||
self._saveRecord(table, recordId, existingRecord)
|
|
||||||
return existingRecord
|
|
||||||
|
|
||||||
def recordDelete(self, table: str, recordId: str) -> bool:
|
|
||||||
"""Deletes a record from the table with atomic metadata operations."""
|
|
||||||
recordPath = self._getRecordPath(table, recordId)
|
|
||||||
record_lock = self._get_file_lock(recordPath)
|
|
||||||
table_lock = self._get_table_lock(table)
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Acquire both locks with timeout - record lock first, then table lock
|
|
||||||
if not record_lock.acquire(timeout=30):
|
|
||||||
raise TimeoutError(f"Could not acquire record lock for {recordPath} within 30 seconds")
|
|
||||||
|
|
||||||
if not table_lock.acquire(timeout=30):
|
|
||||||
record_lock.release()
|
|
||||||
raise TimeoutError(f"Could not acquire table lock for {table} within 30 seconds")
|
|
||||||
|
|
||||||
# Record lock acquisition time
|
|
||||||
self._lock_timeouts[recordPath] = time.time()
|
|
||||||
self._lock_timeouts[f"table_{table}"] = time.time()
|
|
||||||
|
|
||||||
# Load metadata
|
|
||||||
metadata = self._loadTableMetadata(table)
|
|
||||||
|
|
||||||
if recordId not in metadata["recordIds"]:
|
|
||||||
return False
|
|
||||||
|
|
||||||
# Check if it's an initial record
|
|
||||||
initialId = self.getInitialId(table)
|
|
||||||
if initialId is not None and initialId == recordId:
|
|
||||||
self._removeInitialId(table)
|
|
||||||
logger.info(f"Initial ID {recordId} for table {table} has been removed from the system table")
|
|
||||||
|
|
||||||
# Delete the record file
|
|
||||||
if os.path.exists(recordPath):
|
|
||||||
os.remove(recordPath)
|
|
||||||
|
|
||||||
# ATOMIC: Update metadata while holding both locks
|
|
||||||
metadata["recordIds"].remove(recordId)
|
|
||||||
self._saveTableMetadata(table, metadata)
|
|
||||||
|
|
||||||
# Update table cache if it exists (also protected by table lock)
|
|
||||||
if table in self._tablesCache:
|
|
||||||
self._tablesCache[table] = [r for r in self._tablesCache[table] if r.get("id") != recordId]
|
|
||||||
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
return False
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error deleting record {recordId} from table {table}: {e}")
|
|
||||||
return False
|
|
||||||
|
|
||||||
finally:
|
|
||||||
# ALWAYS release both locks, even on error
|
|
||||||
try:
|
|
||||||
if table_lock.locked():
|
|
||||||
table_lock.release()
|
|
||||||
if f"table_{table}" in self._lock_timeouts:
|
|
||||||
del self._lock_timeouts[f"table_{table}"]
|
|
||||||
except Exception as release_error:
|
|
||||||
logger.error(f"Error releasing table lock for {table}: {release_error}")
|
|
||||||
|
|
||||||
try:
|
|
||||||
if record_lock.locked():
|
|
||||||
record_lock.release()
|
|
||||||
if recordPath in self._lock_timeouts:
|
|
||||||
del self._lock_timeouts[recordPath]
|
|
||||||
except Exception as release_error:
|
|
||||||
logger.error(f"Error releasing record lock for {recordPath}: {release_error}")
|
|
||||||
|
|
||||||
def getInitialId(self, table_or_model) -> Optional[str]:
|
|
||||||
"""Returns the initial ID for a table."""
|
|
||||||
# Handle both string table names (legacy) and model classes (new)
|
|
||||||
if isinstance(table_or_model, str):
|
|
||||||
table = table_or_model
|
|
||||||
else:
|
|
||||||
table = table_or_model.__name__
|
|
||||||
|
|
||||||
systemData = self._loadSystemTable()
|
|
||||||
initialId = systemData.get(table)
|
|
||||||
logger.debug(f"Initial ID for table '{table}': {initialId}")
|
|
||||||
return initialId
|
|
||||||
|
|
||||||
File diff suppressed because it is too large
Load diff
88
modules/connectors/connectorMessagingEmail.py
Normal file
88
modules/connectors/connectorMessagingEmail.py
Normal file
|
|
@ -0,0 +1,88 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""
|
||||||
|
Azure Communication Services Email Connector
|
||||||
|
Handles email sending via Azure Communication Services
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from typing import Optional
|
||||||
|
from azure.communication.email import EmailClient
|
||||||
|
from modules.shared.configuration import APP_CONFIG
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class ConnectorMessagingEmail:
|
||||||
|
"""
|
||||||
|
Azure Communication Services Email connector.
|
||||||
|
Handles email sending.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
"""
|
||||||
|
Initialize Azure Communication Services Email client using APP_CONFIG.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
connectionString = APP_CONFIG.get("MESSAGING_ACS_CONNECTION_STRING")
|
||||||
|
senderEmail = APP_CONFIG.get("MESSAGING_ACS_SENDER_EMAIL")
|
||||||
|
|
||||||
|
if not connectionString or not senderEmail:
|
||||||
|
logger.warning("Azure Communication Services credentials not configured for email")
|
||||||
|
self._client = None
|
||||||
|
self._senderEmail = None
|
||||||
|
return
|
||||||
|
|
||||||
|
self._client = EmailClient.from_connection_string(connectionString)
|
||||||
|
self._senderEmail = senderEmail
|
||||||
|
|
||||||
|
logger.info("Azure Communication Services Email client initialized successfully")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to initialize Azure Communication Services Email client: {e}")
|
||||||
|
self._client = None
|
||||||
|
self._senderEmail = None
|
||||||
|
|
||||||
|
def send(self, recipient: str, subject: str, message: str) -> bool:
|
||||||
|
"""
|
||||||
|
Send an email via Azure Communication Services.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
recipient: Recipient email address
|
||||||
|
subject: Email subject
|
||||||
|
message: Email message content (can be HTML)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if successful, False otherwise
|
||||||
|
"""
|
||||||
|
if not self._client or not self._senderEmail:
|
||||||
|
logger.error("Azure Communication Services Email client not initialized")
|
||||||
|
return False
|
||||||
|
|
||||||
|
try:
|
||||||
|
messageData = {
|
||||||
|
"senderAddress": self._senderEmail,
|
||||||
|
"recipients": {
|
||||||
|
"to": [{"address": recipient}]
|
||||||
|
},
|
||||||
|
"content": {
|
||||||
|
"subject": subject,
|
||||||
|
"html": message
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Try both API versions for compatibility
|
||||||
|
try:
|
||||||
|
poller = self._client.begin_send(messageData)
|
||||||
|
poller.result()
|
||||||
|
except AttributeError:
|
||||||
|
poller = self._client.begin_send_message(messageData)
|
||||||
|
poller.result()
|
||||||
|
|
||||||
|
logger.info(f"Email sent successfully to {recipient}")
|
||||||
|
return True
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to send email to {recipient}: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
80
modules/connectors/connectorMessagingSms.py
Normal file
80
modules/connectors/connectorMessagingSms.py
Normal file
|
|
@ -0,0 +1,80 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""
|
||||||
|
Twilio SMS Connector
|
||||||
|
Handles SMS sending via Twilio
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from typing import Optional
|
||||||
|
from modules.shared.configuration import APP_CONFIG
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class ConnectorMessagingSms:
|
||||||
|
"""
|
||||||
|
Twilio SMS connector.
|
||||||
|
Handles SMS sending.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
"""
|
||||||
|
Initialize Twilio SMS client using APP_CONFIG.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
accountSid = APP_CONFIG.get("MESSAGING_TWILIO_ACCOUNT_SID")
|
||||||
|
authToken = APP_CONFIG.get("MESSAGING_TWILIO_AUTH_TOKEN")
|
||||||
|
fromNumber = APP_CONFIG.get("MESSAGING_TWILIO_FROM_NUMBER")
|
||||||
|
|
||||||
|
if not accountSid or not authToken:
|
||||||
|
logger.warning("Twilio credentials not configured for SMS")
|
||||||
|
self._client = None
|
||||||
|
self._fromNumber = None
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
from twilio.rest import Client
|
||||||
|
self._client = Client(accountSid, authToken)
|
||||||
|
self._fromNumber = fromNumber
|
||||||
|
logger.info("Twilio SMS client initialized successfully")
|
||||||
|
except ImportError:
|
||||||
|
logger.error("Twilio library not installed. Please install with: pip install twilio")
|
||||||
|
self._client = None
|
||||||
|
self._fromNumber = None
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to initialize Twilio SMS client: {e}")
|
||||||
|
self._client = None
|
||||||
|
self._fromNumber = None
|
||||||
|
|
||||||
|
def send(self, recipient: str, subject: str, message: str) -> bool:
|
||||||
|
"""
|
||||||
|
Send an SMS via Twilio.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
recipient: Recipient phone number (with country code, e.g., '+41791234567')
|
||||||
|
subject: Ignored (SMS has no subject)
|
||||||
|
message: SMS message content
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if successful, False otherwise
|
||||||
|
"""
|
||||||
|
if not self._client or not self._fromNumber:
|
||||||
|
logger.error("Twilio SMS client not initialized")
|
||||||
|
return False
|
||||||
|
|
||||||
|
try:
|
||||||
|
messageObj = self._client.messages.create(
|
||||||
|
body=message,
|
||||||
|
from_=self._fromNumber,
|
||||||
|
to=recipient
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info(f"SMS sent successfully to {recipient}. SID: {messageObj.sid}")
|
||||||
|
return True
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to send SMS to {recipient}: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
525
modules/connectors/connectorOerebWfs.py
Normal file
525
modules/connectors/connectorOerebWfs.py
Normal file
|
|
@ -0,0 +1,525 @@
|
||||||
|
"""
|
||||||
|
ÖREB WFS Connector
|
||||||
|
|
||||||
|
This connector handles interactions with ÖREB (Öffentlich-rechtliche Eigentumsbeschränkungen)
|
||||||
|
WFS services for zone information retrieval.
|
||||||
|
|
||||||
|
ÖREB provides zoning information (Bauzonen) through WFS services.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from typing import Dict, List, Any, Optional
|
||||||
|
import aiohttp
|
||||||
|
import xml.etree.ElementTree as ET
|
||||||
|
from shapely.geometry import Polygon
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class OerebWfsConnector:
|
||||||
|
"""
|
||||||
|
Connector for ÖREB WFS services.
|
||||||
|
|
||||||
|
Provides methods for:
|
||||||
|
- Querying zone information (Bauzonen) by parcel geometry
|
||||||
|
- Retrieving zoning data from canton-specific WFS services
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
timeout: int = 10,
|
||||||
|
max_retries: int = 3,
|
||||||
|
retry_delay: float = 1.0
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Initialize ÖREB WFS connector.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
timeout: Request timeout in seconds
|
||||||
|
max_retries: Maximum number of retry attempts
|
||||||
|
retry_delay: Initial retry delay in seconds (exponential backoff)
|
||||||
|
"""
|
||||||
|
self.timeout = aiohttp.ClientTimeout(total=timeout)
|
||||||
|
self.max_retries = max_retries
|
||||||
|
self.retry_delay = retry_delay
|
||||||
|
self._wfs_cache: Dict[str, List[Dict[str, Any]]] = {} # Cache for WFS queries by bbox
|
||||||
|
|
||||||
|
logger.info("ÖREB WFS Connector initialized")
|
||||||
|
|
||||||
|
def _get_oereb_wfs_url(self, canton: str) -> Optional[str]:
|
||||||
|
"""
|
||||||
|
Get ÖREB WFS service URL for a given canton.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
canton: Canton abbreviation (e.g., "ZH", "BE")
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
WFS service URL or None if canton not supported
|
||||||
|
"""
|
||||||
|
oereb_wfs_urls = {
|
||||||
|
"ZH": "https://maps.zh.ch/wfs/OerebKatasterZHWFS",
|
||||||
|
}
|
||||||
|
return oereb_wfs_urls.get(canton.upper())
|
||||||
|
|
||||||
|
def _geometry_to_shapely_polygon(self, geometry: Dict[str, Any]) -> Optional[Polygon]:
|
||||||
|
"""
|
||||||
|
Convert parcel geometry (ESRI rings or GeoJSON coordinates) to Shapely Polygon.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
geometry: Geometry dictionary (ESRI rings or GeoJSON coordinates)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Shapely Polygon or None if invalid
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Handle ESRI geometry format (rings)
|
||||||
|
if "rings" in geometry:
|
||||||
|
rings = geometry.get("rings", [])
|
||||||
|
if not rings or not rings[0]:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Use the first ring (exterior) for the polygon
|
||||||
|
exterior_ring = rings[0]
|
||||||
|
if len(exterior_ring) < 3:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Ensure polygon is closed
|
||||||
|
coords = list(exterior_ring)
|
||||||
|
if coords[0] != coords[-1]:
|
||||||
|
coords.append(coords[0])
|
||||||
|
|
||||||
|
return Polygon(coords)
|
||||||
|
|
||||||
|
# Handle GeoJSON format (coordinates)
|
||||||
|
elif "coordinates" in geometry:
|
||||||
|
coords = geometry.get("coordinates", [])
|
||||||
|
if not coords:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Handle Polygon coordinates: [[[x1,y1], [x2,y2], ...]]
|
||||||
|
# Flatten to get the exterior ring
|
||||||
|
def extract_exterior(coord_list, depth=0):
|
||||||
|
if depth == 0 and isinstance(coord_list, list) and len(coord_list) > 0:
|
||||||
|
# First level might be array of rings, take first one
|
||||||
|
if isinstance(coord_list[0], list) and len(coord_list[0]) > 0:
|
||||||
|
if isinstance(coord_list[0][0], list):
|
||||||
|
# This is Polygon format: [[[x,y],...]]
|
||||||
|
return extract_exterior(coord_list[0], depth + 1)
|
||||||
|
elif isinstance(coord_list[0][0], (int, float)):
|
||||||
|
# This is already a ring: [[x,y],...]
|
||||||
|
return coord_list[0]
|
||||||
|
elif depth == 1 and isinstance(coord_list, list) and len(coord_list) > 0:
|
||||||
|
if isinstance(coord_list[0], (int, float)):
|
||||||
|
return coord_list
|
||||||
|
elif isinstance(coord_list[0], list):
|
||||||
|
return coord_list
|
||||||
|
return coord_list
|
||||||
|
|
||||||
|
exterior_coords = extract_exterior(coords)
|
||||||
|
if not exterior_coords or len(exterior_coords) < 3:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Ensure polygon is closed
|
||||||
|
coords_list = list(exterior_coords)
|
||||||
|
if coords_list[0] != coords_list[-1]:
|
||||||
|
coords_list.append(coords_list[0])
|
||||||
|
|
||||||
|
return Polygon(coords_list)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.debug(f"Error converting geometry to Shapely Polygon: {e}")
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _parse_gml_geometry(self, feature_elem: ET.Element) -> Optional[Polygon]:
|
||||||
|
"""
|
||||||
|
Parse GML geometry from WFS feature element and convert to Shapely Polygon.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
feature_elem: XML element containing the feature
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Shapely Polygon or None if geometry not found or invalid
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Common GML namespaces
|
||||||
|
namespaces = {
|
||||||
|
'gml': 'http://www.opengis.net/gml',
|
||||||
|
'gml3': 'http://www.opengis.net/gml/3.2',
|
||||||
|
'gml32': 'http://www.opengis.net/gml/3.2'
|
||||||
|
}
|
||||||
|
|
||||||
|
# Try to find polygon geometry
|
||||||
|
polygon_elem = None
|
||||||
|
for ns_prefix, ns_url in namespaces.items():
|
||||||
|
# Try different GML polygon element names
|
||||||
|
for tag_name in ['Polygon', 'polygon', 'PolygonProperty', 'geometryProperty']:
|
||||||
|
polygon_elem = feature_elem.find(f'.//{{{ns_url}}}{tag_name}')
|
||||||
|
if polygon_elem is not None:
|
||||||
|
break
|
||||||
|
# Also try without namespace prefix
|
||||||
|
polygon_elem = feature_elem.find(f'.//{tag_name}')
|
||||||
|
if polygon_elem is not None:
|
||||||
|
break
|
||||||
|
if polygon_elem is not None:
|
||||||
|
break
|
||||||
|
|
||||||
|
if polygon_elem is None:
|
||||||
|
# Try to find any geometry element
|
||||||
|
for ns_prefix, ns_url in namespaces.items():
|
||||||
|
polygon_elem = feature_elem.find(f'.//{{{ns_url}}}*')
|
||||||
|
if polygon_elem is not None and 'polygon' in polygon_elem.tag.lower():
|
||||||
|
break
|
||||||
|
|
||||||
|
if polygon_elem is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Extract coordinates from GML
|
||||||
|
# GML Polygon typically has exterior ring with posList or pos elements
|
||||||
|
coords = []
|
||||||
|
|
||||||
|
# Try posList (most common in GML 3.2)
|
||||||
|
for ns_prefix, ns_url in namespaces.items():
|
||||||
|
pos_list = polygon_elem.find(f'.//{{{ns_url}}}posList')
|
||||||
|
if pos_list is not None and pos_list.text:
|
||||||
|
# posList format: "x1 y1 x2 y2 x3 y3 ..."
|
||||||
|
coord_strings = pos_list.text.strip().split()
|
||||||
|
for i in range(0, len(coord_strings) - 1, 2):
|
||||||
|
if i + 1 < len(coord_strings):
|
||||||
|
x = float(coord_strings[i])
|
||||||
|
y = float(coord_strings[i + 1])
|
||||||
|
coords.append((x, y))
|
||||||
|
break
|
||||||
|
|
||||||
|
# If no posList, try pos elements
|
||||||
|
if not coords:
|
||||||
|
for ns_prefix, ns_url in namespaces.items():
|
||||||
|
pos_elems = polygon_elem.findall(f'.//{{{ns_url}}}pos')
|
||||||
|
if pos_elems:
|
||||||
|
for pos in pos_elems:
|
||||||
|
if pos.text:
|
||||||
|
parts = pos.text.strip().split()
|
||||||
|
if len(parts) >= 2:
|
||||||
|
x = float(parts[0])
|
||||||
|
y = float(parts[1])
|
||||||
|
coords.append((x, y))
|
||||||
|
break
|
||||||
|
|
||||||
|
# If still no coords, try coordinates element (GML 2)
|
||||||
|
if not coords:
|
||||||
|
for ns_prefix, ns_url in namespaces.items():
|
||||||
|
coords_elem = polygon_elem.find(f'.//{{{ns_url}}}coordinates')
|
||||||
|
if coords_elem is not None and coords_elem.text:
|
||||||
|
# GML 2 coordinates format: "x1,y1 x2,y2 ..." or "x1,y1,z1 x2,y2,z2 ..."
|
||||||
|
coord_strings = coords_elem.text.strip().split()
|
||||||
|
for coord_str in coord_strings:
|
||||||
|
parts = coord_str.split(',')
|
||||||
|
if len(parts) >= 2:
|
||||||
|
x = float(parts[0])
|
||||||
|
y = float(parts[1])
|
||||||
|
coords.append((x, y))
|
||||||
|
break
|
||||||
|
|
||||||
|
if len(coords) < 3:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Ensure polygon is closed
|
||||||
|
if coords[0] != coords[-1]:
|
||||||
|
coords.append(coords[0])
|
||||||
|
|
||||||
|
return Polygon(coords)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.debug(f"Error parsing GML geometry: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _calculate_bbox_from_geometry(self, geometry: Dict[str, Any]) -> Optional[str]:
|
||||||
|
"""
|
||||||
|
Calculate bounding box from geometry for WFS queries.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
geometry: Geometry dictionary (ESRI rings or GeoJSON coordinates)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Bounding box string in format "min_x,min_y,max_x,max_y" or None if invalid
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Handle ESRI geometry format (rings)
|
||||||
|
if "rings" in geometry:
|
||||||
|
rings = geometry.get("rings", [])
|
||||||
|
if not rings or not rings[0]:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Flatten all coordinates from all rings
|
||||||
|
all_coords = []
|
||||||
|
for ring in rings:
|
||||||
|
all_coords.extend(ring)
|
||||||
|
|
||||||
|
if not all_coords:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Calculate bbox
|
||||||
|
x_coords = [coord[0] for coord in all_coords]
|
||||||
|
y_coords = [coord[1] for coord in all_coords]
|
||||||
|
|
||||||
|
min_x = min(x_coords)
|
||||||
|
min_y = min(y_coords)
|
||||||
|
max_x = max(x_coords)
|
||||||
|
max_y = max(y_coords)
|
||||||
|
|
||||||
|
return f"{min_x},{min_y},{max_x},{max_y}"
|
||||||
|
|
||||||
|
# Handle GeoJSON format (coordinates)
|
||||||
|
elif "coordinates" in geometry:
|
||||||
|
coords = geometry.get("coordinates", [])
|
||||||
|
if not coords:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Flatten coordinates based on geometry type
|
||||||
|
def flatten_coords(coord_list, depth=0):
|
||||||
|
if depth < 2:
|
||||||
|
result = []
|
||||||
|
for item in coord_list:
|
||||||
|
if isinstance(item, (int, float)):
|
||||||
|
return coord_list
|
||||||
|
result.extend(flatten_coords(item, depth + 1))
|
||||||
|
return result
|
||||||
|
return coord_list
|
||||||
|
|
||||||
|
flat_coords = flatten_coords(coords)
|
||||||
|
if not flat_coords or len(flat_coords) < 2:
|
||||||
|
return None
|
||||||
|
|
||||||
|
x_coords = [flat_coords[i] for i in range(0, len(flat_coords), 2)]
|
||||||
|
y_coords = [flat_coords[i+1] for i in range(0, len(flat_coords)-1, 2)]
|
||||||
|
|
||||||
|
min_x = min(x_coords)
|
||||||
|
min_y = min(y_coords)
|
||||||
|
max_x = max(x_coords)
|
||||||
|
max_y = max(y_coords)
|
||||||
|
|
||||||
|
return f"{min_x},{min_y},{max_x},{max_y}"
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.debug(f"Error calculating bbox from geometry: {e}")
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
async def _query_wfs_get_feature(
|
||||||
|
self,
|
||||||
|
wfs_url: str,
|
||||||
|
type_name: str,
|
||||||
|
bbox: str,
|
||||||
|
srs: str = "EPSG:2056"
|
||||||
|
) -> List[Dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Query WFS GetFeature to retrieve zone features within a bounding box.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
wfs_url: WFS service URL
|
||||||
|
type_name: Feature type name (e.g., "nutzungsplanung")
|
||||||
|
bbox: Bounding box string "min_x,min_y,max_x,max_y"
|
||||||
|
srs: Spatial reference system (default: EPSG:2056 for LV95)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of feature dictionaries with properties and attributes
|
||||||
|
"""
|
||||||
|
# Only use WFS 1.1.0 (we know this works)
|
||||||
|
params = {
|
||||||
|
"service": "WFS",
|
||||||
|
"version": "1.1.0",
|
||||||
|
"request": "GetFeature",
|
||||||
|
"typeName": type_name,
|
||||||
|
"bbox": bbox,
|
||||||
|
"srsName": srs
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.debug(f"Querying WFS GetFeature: {wfs_url} with typeName={type_name}, bbox={bbox}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
async with aiohttp.ClientSession(timeout=self.timeout) as session:
|
||||||
|
async with session.get(wfs_url, params=params) as response:
|
||||||
|
if response.status != 200:
|
||||||
|
logger.debug(f"WFS GetFeature returned status {response.status}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
# Parse XML/GML response
|
||||||
|
xml_content = await response.text()
|
||||||
|
|
||||||
|
try:
|
||||||
|
root = ET.fromstring(xml_content)
|
||||||
|
|
||||||
|
features = []
|
||||||
|
members = root.findall('.//{http://www.opengis.net/gml}featureMember') or \
|
||||||
|
root.findall('.//featureMember')
|
||||||
|
|
||||||
|
for member in members:
|
||||||
|
attrs = {}
|
||||||
|
|
||||||
|
# Find feature element
|
||||||
|
feature_elem = member
|
||||||
|
for child in member:
|
||||||
|
if child.tag and ('nutzung' in child.tag.lower() or 'plan' in child.tag.lower()):
|
||||||
|
feature_elem = child
|
||||||
|
break
|
||||||
|
|
||||||
|
# Extract attributes
|
||||||
|
for elem in feature_elem.iter():
|
||||||
|
if elem.tag and elem.text and elem.text.strip():
|
||||||
|
tag_lower = elem.tag.lower()
|
||||||
|
if any(term in tag_lower for term in [
|
||||||
|
'pos', 'coordinates', 'point', 'polygon', 'linestring',
|
||||||
|
'geometry', 'boundedby', 'envelope', 'gml'
|
||||||
|
]):
|
||||||
|
continue
|
||||||
|
|
||||||
|
tag_name = elem.tag.split('}')[-1] if '}' in elem.tag else elem.tag
|
||||||
|
if ':' in tag_name:
|
||||||
|
tag_name = tag_name.split(':')[-1]
|
||||||
|
|
||||||
|
if tag_name not in attrs:
|
||||||
|
attrs[tag_name] = elem.text.strip()
|
||||||
|
|
||||||
|
# Parse geometry from GML
|
||||||
|
geometry_polygon = self._parse_gml_geometry(feature_elem)
|
||||||
|
|
||||||
|
if attrs:
|
||||||
|
feature_dict = {"properties": attrs, "attributes": attrs}
|
||||||
|
if geometry_polygon:
|
||||||
|
feature_dict["geometry"] = geometry_polygon
|
||||||
|
features.append(feature_dict)
|
||||||
|
|
||||||
|
return features
|
||||||
|
|
||||||
|
except ET.ParseError as e:
|
||||||
|
logger.debug(f"Failed to parse WFS XML response: {e}")
|
||||||
|
return []
|
||||||
|
except Exception as e:
|
||||||
|
logger.debug(f"Error parsing WFS XML: {e}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.debug(f"WFS GetFeature query failed: {e}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
async def query_zone_layer(
|
||||||
|
self,
|
||||||
|
egrid: str,
|
||||||
|
x: float,
|
||||||
|
y: float,
|
||||||
|
canton: Optional[str] = None,
|
||||||
|
geometry: Optional[Dict[str, Any]] = None
|
||||||
|
) -> List[Dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Query zone information using ÖREB WFS service.
|
||||||
|
|
||||||
|
Returns only zones that contain the parcel based on the parcel geometry.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
egrid: EGRID identifier (not currently used but kept for API compatibility)
|
||||||
|
x: X coordinate (LV95) - not used but kept for compatibility
|
||||||
|
y: Y coordinate (LV95) - not used but kept for compatibility
|
||||||
|
canton: Canton abbreviation (e.g., "ZH", "BE")
|
||||||
|
geometry: Parcel geometry dictionary (ESRI rings or GeoJSON coordinates)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of zone dictionaries with layerBodId and attributes, or empty list if not found
|
||||||
|
"""
|
||||||
|
if not canton or not geometry:
|
||||||
|
return []
|
||||||
|
|
||||||
|
wfs_url = self._get_oereb_wfs_url(canton)
|
||||||
|
if not wfs_url:
|
||||||
|
return []
|
||||||
|
|
||||||
|
try:
|
||||||
|
bbox = self._calculate_bbox_from_geometry(geometry)
|
||||||
|
if not bbox:
|
||||||
|
return []
|
||||||
|
|
||||||
|
# Check cache
|
||||||
|
cache_key = f"{wfs_url}:{bbox}"
|
||||||
|
if cache_key in self._wfs_cache:
|
||||||
|
cached_features = self._wfs_cache[cache_key]
|
||||||
|
else:
|
||||||
|
cached_features = await self._query_wfs_get_feature(wfs_url, "nutzungsplanung", bbox)
|
||||||
|
self._wfs_cache[cache_key] = cached_features
|
||||||
|
|
||||||
|
if not cached_features:
|
||||||
|
return []
|
||||||
|
|
||||||
|
# Convert parcel geometry to Shapely Polygon for spatial validation
|
||||||
|
parcel_polygon = self._geometry_to_shapely_polygon(geometry)
|
||||||
|
if not parcel_polygon:
|
||||||
|
logger.debug("Could not convert parcel geometry to Shapely Polygon")
|
||||||
|
# Fallback to first zone if geometry conversion fails
|
||||||
|
for feature in cached_features:
|
||||||
|
attrs = feature.get("properties", feature.get("attributes", {}))
|
||||||
|
typ_gde_abkuerzung = attrs.get("typ_gde_abkuerzung")
|
||||||
|
if typ_gde_abkuerzung:
|
||||||
|
return [{
|
||||||
|
"layerBodId": "oereb_wfs",
|
||||||
|
"attributes": {"typ_gde_abkuerzung": typ_gde_abkuerzung}
|
||||||
|
}]
|
||||||
|
return []
|
||||||
|
|
||||||
|
# Find the zone that actually contains or intersects the parcel
|
||||||
|
# Since a parcel is always in exactly one zone, we check for containment first,
|
||||||
|
# then find the zone with the largest intersection area if no perfect containment is found
|
||||||
|
containing_zone = None
|
||||||
|
best_intersecting_zone = None
|
||||||
|
best_intersection_area = 0.0
|
||||||
|
|
||||||
|
for feature in cached_features:
|
||||||
|
attrs = feature.get("properties", feature.get("attributes", {}))
|
||||||
|
typ_gde_abkuerzung = attrs.get("typ_gde_abkuerzung")
|
||||||
|
|
||||||
|
if not typ_gde_abkuerzung:
|
||||||
|
continue
|
||||||
|
|
||||||
|
zone_geometry = feature.get("geometry")
|
||||||
|
if not zone_geometry:
|
||||||
|
# If geometry not parsed, skip spatial check for this feature
|
||||||
|
# But keep it as fallback if no geometry-based match is found
|
||||||
|
if not best_intersecting_zone:
|
||||||
|
best_intersecting_zone = feature
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Check if zone contains the parcel (most precise)
|
||||||
|
if zone_geometry.contains(parcel_polygon):
|
||||||
|
containing_zone = feature
|
||||||
|
break # Found perfect match, stop searching
|
||||||
|
|
||||||
|
# Check if zone intersects the parcel (for border cases)
|
||||||
|
if zone_geometry.intersects(parcel_polygon):
|
||||||
|
# Calculate intersection area to find the best match
|
||||||
|
intersection = zone_geometry.intersection(parcel_polygon)
|
||||||
|
if not intersection.is_empty:
|
||||||
|
intersection_area = intersection.area
|
||||||
|
# Keep the zone with the largest intersection area
|
||||||
|
if intersection_area > best_intersection_area:
|
||||||
|
best_intersection_area = intersection_area
|
||||||
|
best_intersecting_zone = feature
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.debug(f"Error checking spatial relationship: {e}")
|
||||||
|
# If spatial check fails, keep as fallback
|
||||||
|
if not best_intersecting_zone:
|
||||||
|
best_intersecting_zone = feature
|
||||||
|
|
||||||
|
# Return the containing zone if found, otherwise the best intersecting zone
|
||||||
|
selected_feature = containing_zone or best_intersecting_zone
|
||||||
|
if selected_feature:
|
||||||
|
attrs = selected_feature.get("properties", selected_feature.get("attributes", {}))
|
||||||
|
typ_gde_abkuerzung = attrs.get("typ_gde_abkuerzung")
|
||||||
|
if typ_gde_abkuerzung:
|
||||||
|
return [{
|
||||||
|
"layerBodId": "oereb_wfs",
|
||||||
|
"attributes": {"typ_gde_abkuerzung": typ_gde_abkuerzung}
|
||||||
|
}]
|
||||||
|
|
||||||
|
return []
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
return []
|
||||||
185
modules/connectors/connectorPreprocessor.py
Normal file
185
modules/connectors/connectorPreprocessor.py
Normal file
|
|
@ -0,0 +1,185 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""
|
||||||
|
Preprocessor connector for executing SQL queries via HTTP API.
|
||||||
|
Connects to remote preprocessing service that hosts the SQLite database.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import httpx
|
||||||
|
from typing import Optional
|
||||||
|
from modules.shared.configuration import APP_CONFIG
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class PreprocessorConnector:
|
||||||
|
"""
|
||||||
|
Connector for executing SQL queries via preprocessing API.
|
||||||
|
Makes HTTP POST requests to remote preprocessing service.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
"""Initialize the preprocessor connector."""
|
||||||
|
self.api_key = APP_CONFIG.get("PP_QUERY_API_KEY")
|
||||||
|
self.base_url = APP_CONFIG.get("PP_QUERY_BASE_URL")
|
||||||
|
|
||||||
|
if not self.api_key:
|
||||||
|
logger.warning("PP_QUERY_API_KEY not found in configuration")
|
||||||
|
if not self.base_url:
|
||||||
|
logger.warning("PP_QUERY_BASE_URL not found in configuration")
|
||||||
|
|
||||||
|
# HTTP client with timeout
|
||||||
|
self.http_client = httpx.AsyncClient(
|
||||||
|
timeout=30.0,
|
||||||
|
headers={
|
||||||
|
"Content-Type": "application/json"
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info("PreprocessorConnector initialized")
|
||||||
|
|
||||||
|
async def executeQuery(self, sql_query: str, return_json: bool = False):
|
||||||
|
"""
|
||||||
|
Execute a SQL query via the preprocessing API.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
sql_query: SQL SELECT query to execute
|
||||||
|
return_json: If True, returns dict with 'text' and 'data' keys. If False, returns formatted string.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
If return_json=False: Formatted result string with query results
|
||||||
|
If return_json=True: Dict with 'text' (formatted string) and 'data' (raw JSON data list)
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If query is invalid or contains forbidden keywords
|
||||||
|
Exception: If API request fails
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Validate query
|
||||||
|
validation_error = self._validateQuery(sql_query)
|
||||||
|
if validation_error:
|
||||||
|
if return_json:
|
||||||
|
return {"text": validation_error, "data": []}
|
||||||
|
return validation_error
|
||||||
|
|
||||||
|
# Check configuration
|
||||||
|
if not self.api_key:
|
||||||
|
error_msg = "Error: PP_QUERY_API_KEY not configured"
|
||||||
|
logger.error(error_msg)
|
||||||
|
if return_json:
|
||||||
|
return {"text": error_msg, "data": []}
|
||||||
|
return error_msg
|
||||||
|
if not self.base_url:
|
||||||
|
error_msg = "Error: PP_QUERY_BASE_URL not configured"
|
||||||
|
logger.error(error_msg)
|
||||||
|
if return_json:
|
||||||
|
return {"text": error_msg, "data": []}
|
||||||
|
return error_msg
|
||||||
|
|
||||||
|
# Make HTTP POST request to preprocessing API
|
||||||
|
logger.info(f"Executing SQL query via preprocessing API: {self.base_url} (query: {sql_query[:100]}...)")
|
||||||
|
|
||||||
|
response = await self.http_client.post(
|
||||||
|
self.base_url,
|
||||||
|
json={"query": sql_query},
|
||||||
|
headers={
|
||||||
|
"X-DB-API-Key": self.api_key
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
response.raise_for_status()
|
||||||
|
result = response.json()
|
||||||
|
|
||||||
|
# Parse response
|
||||||
|
if not result.get("success"):
|
||||||
|
error_message = result.get("message", "Unknown error")
|
||||||
|
error_text = f"Query failed: {error_message}"
|
||||||
|
if return_json:
|
||||||
|
return {"text": error_text, "data": []}
|
||||||
|
return error_text
|
||||||
|
|
||||||
|
# Format results
|
||||||
|
data = result.get("data", [])
|
||||||
|
row_count = result.get("row_count", 0)
|
||||||
|
|
||||||
|
# Limit to 50 rows for display
|
||||||
|
display_data = data[:50]
|
||||||
|
|
||||||
|
# Format results as string
|
||||||
|
if not display_data:
|
||||||
|
result_text = f"Query executed successfully. Returned {row_count} rows (no data)."
|
||||||
|
if return_json:
|
||||||
|
return {"text": result_text, "data": data}
|
||||||
|
return result_text
|
||||||
|
|
||||||
|
# Format each row
|
||||||
|
results = []
|
||||||
|
for row in display_data:
|
||||||
|
results.append(str(row))
|
||||||
|
|
||||||
|
result_text = (
|
||||||
|
f"Query executed successfully. Returned {row_count} rows "
|
||||||
|
f"(showing first {min(row_count, 50)}):\n"
|
||||||
|
+ "\n".join(results)
|
||||||
|
)
|
||||||
|
|
||||||
|
if return_json:
|
||||||
|
return {"text": result_text, "data": data}
|
||||||
|
return result_text
|
||||||
|
|
||||||
|
except httpx.HTTPStatusError as e:
|
||||||
|
error_msg = f"API error: HTTP {e.response.status_code}"
|
||||||
|
try:
|
||||||
|
error_text = e.response.text
|
||||||
|
error_msg += f" - {error_text}"
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
logger.error(f"Preprocessing API HTTP error: {error_msg}")
|
||||||
|
return error_msg
|
||||||
|
|
||||||
|
except httpx.RequestError as e:
|
||||||
|
error_msg = f"Network error: {str(e)}"
|
||||||
|
logger.error(f"Preprocessing API network error: {error_msg}")
|
||||||
|
return error_msg
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
error_msg = f"Error executing query: {str(e)}"
|
||||||
|
logger.error(f"Preprocessing API error: {error_msg}")
|
||||||
|
return error_msg
|
||||||
|
|
||||||
|
def _validateQuery(self, sql_query: str) -> Optional[str]:
|
||||||
|
"""
|
||||||
|
Validate SQL query to ensure only SELECT queries are allowed.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
sql_query: SQL query to validate
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Error message if validation fails, None if valid
|
||||||
|
"""
|
||||||
|
if not sql_query or not isinstance(sql_query, str):
|
||||||
|
return "Error: SQL query must be a non-empty string"
|
||||||
|
|
||||||
|
query_upper = sql_query.strip().upper()
|
||||||
|
|
||||||
|
# Check if query starts with SELECT
|
||||||
|
if not query_upper.startswith("SELECT"):
|
||||||
|
return "Error: Only SELECT queries are allowed. Query must start with SELECT."
|
||||||
|
|
||||||
|
# Check for forbidden keywords
|
||||||
|
forbidden_keywords = [
|
||||||
|
"DROP", "CREATE", "ALTER", "INSERT", "UPDATE",
|
||||||
|
"DELETE", "PRAGMA", "ATTACH", "DETACH", "TRUNCATE"
|
||||||
|
]
|
||||||
|
|
||||||
|
for keyword in forbidden_keywords:
|
||||||
|
if keyword in query_upper:
|
||||||
|
return f"Error: Query contains forbidden keyword '{keyword}'. Only SELECT queries are allowed."
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
async def close(self):
|
||||||
|
"""Close the HTTP client."""
|
||||||
|
await self.http_client.aclose()
|
||||||
|
|
||||||
84
modules/connectors/connectorProviderBase.py
Normal file
84
modules/connectors/connectorProviderBase.py
Normal file
|
|
@ -0,0 +1,84 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""Abstract base classes for the Provider-Connector architecture (1:n).
|
||||||
|
|
||||||
|
One ProviderConnector per vendor (e.g. MsftConnector, GoogleConnector).
|
||||||
|
Each ProviderConnector exposes n ServiceAdapters (e.g. SharepointAdapter, OutlookAdapter).
|
||||||
|
All ServiceAdapters share the same access token from the UserConnection.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from typing import List, Optional, Union
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class DownloadResult:
|
||||||
|
"""Rich return type for ServiceAdapter.download() when metadata is available."""
|
||||||
|
data: bytes = field(default=b"", repr=False)
|
||||||
|
fileName: str = ""
|
||||||
|
mimeType: str = ""
|
||||||
|
|
||||||
|
|
||||||
|
class ServiceAdapter(ABC):
|
||||||
|
"""Standardized operations for a single service of a provider."""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def browse(
|
||||||
|
self,
|
||||||
|
path: str,
|
||||||
|
filter: Optional[str] = None,
|
||||||
|
limit: Optional[int] = None,
|
||||||
|
) -> list:
|
||||||
|
"""List items (files/folders) at the given path.
|
||||||
|
|
||||||
|
``limit`` is an optional upper bound for the number of returned entries.
|
||||||
|
Adapters that talk to paginated APIs should keep paging until either
|
||||||
|
the API is exhausted OR ``limit`` is reached. ``None`` means "use the
|
||||||
|
adapter's sensible default" (NOT "unlimited") so an over-eager caller
|
||||||
|
cannot accidentally pull millions of records. Adapters that have no
|
||||||
|
pagination (single page result) may ignore this parameter.
|
||||||
|
"""
|
||||||
|
...
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def download(self, path: str) -> Union[bytes, DownloadResult]:
|
||||||
|
"""Download a file. Return bytes or DownloadResult with metadata."""
|
||||||
|
...
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def upload(self, path: str, data: bytes, fileName: str) -> dict:
|
||||||
|
"""Upload a file to the given path. Returns metadata of the created entry."""
|
||||||
|
...
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def search(
|
||||||
|
self,
|
||||||
|
query: str,
|
||||||
|
path: Optional[str] = None,
|
||||||
|
limit: Optional[int] = None,
|
||||||
|
) -> list:
|
||||||
|
"""Search for items matching the query.
|
||||||
|
|
||||||
|
See :meth:`browse` for the semantics of ``limit``.
|
||||||
|
"""
|
||||||
|
...
|
||||||
|
|
||||||
|
|
||||||
|
class ProviderConnector(ABC):
|
||||||
|
"""One connector per provider. Manages a UserConnection + token.
|
||||||
|
Provides access to n services of the provider."""
|
||||||
|
|
||||||
|
def __init__(self, connection, accessToken: str):
|
||||||
|
self.connection = connection
|
||||||
|
self.accessToken = accessToken
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def getAvailableServices(self) -> List[str]:
|
||||||
|
"""Which services does this provider offer?"""
|
||||||
|
...
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def getServiceAdapter(self, service: str) -> ServiceAdapter:
|
||||||
|
"""Return the ServiceAdapter for a specific service."""
|
||||||
|
...
|
||||||
100
modules/connectors/connectorResolver.py
Normal file
100
modules/connectors/connectorResolver.py
Normal file
|
|
@ -0,0 +1,100 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""ConnectorResolver -- resolves a connectionId to the correct ProviderConnector and ServiceAdapter.
|
||||||
|
|
||||||
|
Registry maps authority values to ProviderConnector classes.
|
||||||
|
The resolver loads the UserConnection, obtains a fresh token via SecurityService,
|
||||||
|
and instantiates the appropriate connector.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from typing import Dict, Any, Type, Optional
|
||||||
|
|
||||||
|
from modules.connectors.connectorProviderBase import ProviderConnector, ServiceAdapter
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class ConnectorResolver:
|
||||||
|
"""Resolves connectionId → ProviderConnector (with fresh token) → ServiceAdapter."""
|
||||||
|
|
||||||
|
_providerRegistry: Dict[str, Type[ProviderConnector]] = {}
|
||||||
|
|
||||||
|
def __init__(self, securityService, dbInterface):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
securityService: SecurityService instance (for getFreshToken)
|
||||||
|
dbInterface: DB interface with getUserConnection(connectionId)
|
||||||
|
"""
|
||||||
|
self._security = securityService
|
||||||
|
self._db = dbInterface
|
||||||
|
self._ensureRegistered()
|
||||||
|
|
||||||
|
def _ensureRegistered(self):
|
||||||
|
"""Lazy-register known providers on first instantiation."""
|
||||||
|
if ConnectorResolver._providerRegistry:
|
||||||
|
return
|
||||||
|
try:
|
||||||
|
from modules.connectors.providerMsft.connectorMsft import MsftConnector
|
||||||
|
ConnectorResolver._providerRegistry["msft"] = MsftConnector
|
||||||
|
except ImportError:
|
||||||
|
logger.warning("MsftConnector not available")
|
||||||
|
|
||||||
|
try:
|
||||||
|
from modules.connectors.providerGoogle.connectorGoogle import GoogleConnector
|
||||||
|
ConnectorResolver._providerRegistry["google"] = GoogleConnector
|
||||||
|
except ImportError:
|
||||||
|
logger.debug("GoogleConnector not available (stub)")
|
||||||
|
|
||||||
|
try:
|
||||||
|
from modules.connectors.providerFtp.connectorFtp import FtpConnector
|
||||||
|
ConnectorResolver._providerRegistry["local:ftp"] = FtpConnector
|
||||||
|
except ImportError:
|
||||||
|
logger.debug("FtpConnector not available (stub)")
|
||||||
|
|
||||||
|
try:
|
||||||
|
from modules.connectors.providerClickup.connectorClickup import ClickupConnector
|
||||||
|
ConnectorResolver._providerRegistry["clickup"] = ClickupConnector
|
||||||
|
except ImportError:
|
||||||
|
logger.warning("ClickupConnector not available")
|
||||||
|
|
||||||
|
async def resolve(self, connectionId: str) -> ProviderConnector:
|
||||||
|
"""Resolve connectionId to a ProviderConnector with a fresh access token."""
|
||||||
|
connection = await self._loadConnection(connectionId)
|
||||||
|
if not connection:
|
||||||
|
raise ValueError(f"UserConnection not found: {connectionId}")
|
||||||
|
|
||||||
|
authority = getattr(connection, "authority", None)
|
||||||
|
if not authority:
|
||||||
|
raise ValueError(f"Connection {connectionId} has no authority")
|
||||||
|
|
||||||
|
authorityStr = authority.value if hasattr(authority, "value") else str(authority)
|
||||||
|
providerClass = self._providerRegistry.get(authorityStr)
|
||||||
|
if not providerClass:
|
||||||
|
raise ValueError(f"No ProviderConnector registered for authority: {authorityStr}")
|
||||||
|
|
||||||
|
token = self._security.getFreshToken(connectionId)
|
||||||
|
if not token or not token.tokenAccess:
|
||||||
|
raise ValueError(f"No valid token for connection {connectionId}")
|
||||||
|
|
||||||
|
return providerClass(connection, token.tokenAccess)
|
||||||
|
|
||||||
|
async def resolveService(self, connectionId: str, service: str) -> ServiceAdapter:
|
||||||
|
"""Resolve connectionId + service name to a concrete ServiceAdapter."""
|
||||||
|
provider = await self.resolve(connectionId)
|
||||||
|
available = provider.getAvailableServices()
|
||||||
|
if service not in available:
|
||||||
|
raise ValueError(f"Service '{service}' not available. Options: {available}")
|
||||||
|
return provider.getServiceAdapter(service)
|
||||||
|
|
||||||
|
async def _loadConnection(self, connectionId: str) -> Optional[Any]:
|
||||||
|
"""Load UserConnection from DB."""
|
||||||
|
try:
|
||||||
|
if hasattr(self._db, "getUserConnection"):
|
||||||
|
return self._db.getUserConnection(connectionId)
|
||||||
|
if hasattr(self._db, "loadRecord"):
|
||||||
|
from modules.datamodels.datamodelUam import UserConnection
|
||||||
|
return self._db.loadRecord(UserConnection, connectionId)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to load connection {connectionId}: {e}")
|
||||||
|
return None
|
||||||
1342
modules/connectors/connectorSwissTopoMapServer.py
Normal file
1342
modules/connectors/connectorSwissTopoMapServer.py
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -1,3 +1,5 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
"""ClickUp connector for CRUD operations (compatible with TicketInterface).
|
"""ClickUp connector for CRUD operations (compatible with TicketInterface).
|
||||||
|
|
||||||
This module defines its own minimal abstractions to avoid coupling.
|
This module defines its own minimal abstractions to avoid coupling.
|
||||||
|
|
@ -7,6 +9,7 @@ from typing import Optional
|
||||||
import logging
|
import logging
|
||||||
import aiohttp
|
import aiohttp
|
||||||
from modules.datamodels.datamodelTickets import TicketBase, TicketFieldAttribute
|
from modules.datamodels.datamodelTickets import TicketBase, TicketFieldAttribute
|
||||||
|
from modules.serviceCenter.services.serviceClickup.mainServiceClickup import clickup_authorization_header
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
@ -28,7 +31,7 @@ class ConnectorTicketClickup(TicketBase):
|
||||||
|
|
||||||
def _headers(self) -> dict:
|
def _headers(self) -> dict:
|
||||||
return {
|
return {
|
||||||
"Authorization": self.apiToken,
|
"Authorization": clickup_authorization_header(self.apiToken),
|
||||||
"Content-Type": "application/json",
|
"Content-Type": "application/json",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,5 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
"""Jira connector for CRUD operations (neutralized to generic ticket interface).
|
"""Jira connector for CRUD operations (neutralized to generic ticket interface).
|
||||||
|
|
||||||
This module defines its own minimal abstractions to avoid coupling.
|
This module defines its own minimal abstractions to avoid coupling.
|
||||||
|
|
|
||||||
419
modules/connectors/connectorTicketsRedmine.py
Normal file
419
modules/connectors/connectorTicketsRedmine.py
Normal file
|
|
@ -0,0 +1,419 @@
|
||||||
|
# Copyright (c) 2026 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""Redmine REST connector.
|
||||||
|
|
||||||
|
Async / aiohttp port of the SSS pilot client
|
||||||
|
(``pamocreate/projects/valueon/sss/project_mars/redmine-sync/code/_redmineClient.py``)
|
||||||
|
plus the read-side helpers required by ``serviceRedmine`` and
|
||||||
|
``serviceRedmineStats``.
|
||||||
|
|
||||||
|
Auth: ``X-Redmine-API-Key`` header. The key is *never* logged.
|
||||||
|
|
||||||
|
Idempotency / safety:
|
||||||
|
- ``DELETE /issues/{id}`` is often forbidden in Redmine (HTTP 403).
|
||||||
|
``deleteIssue`` returns ``False`` instead of raising in that case so
|
||||||
|
the higher layer can fall back to status-based archival.
|
||||||
|
- A small ``_throttleSeconds`` delay (default 150 ms) is awaited after
|
||||||
|
every write call to keep the SSS server happy.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
from typing import Any, Dict, List, Optional, Tuple
|
||||||
|
from urllib.parse import urlencode
|
||||||
|
|
||||||
|
import aiohttp
|
||||||
|
|
||||||
|
from modules.datamodels.datamodelTickets import TicketBase, TicketFieldAttribute
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class RedmineApiError(RuntimeError):
|
||||||
|
"""Raised when the Redmine API returns a non-success status."""
|
||||||
|
|
||||||
|
def __init__(self, status: int, body: str, method: str, path: str):
|
||||||
|
self.status = status
|
||||||
|
self.body = body
|
||||||
|
self.method = method
|
||||||
|
self.path = path
|
||||||
|
super().__init__(f"Redmine {method} {path} failed: HTTP {status} {body[:300]}")
|
||||||
|
|
||||||
|
|
||||||
|
class ConnectorTicketsRedmine(TicketBase):
|
||||||
|
"""Async Redmine connector. One instance per (baseUrl, apiKey, projectId)."""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
*,
|
||||||
|
baseUrl: str,
|
||||||
|
apiKey: str,
|
||||||
|
projectId: str,
|
||||||
|
throttleSeconds: float = 0.15,
|
||||||
|
timeoutSeconds: float = 30.0,
|
||||||
|
) -> None:
|
||||||
|
if not baseUrl:
|
||||||
|
raise ValueError("Redmine baseUrl is required")
|
||||||
|
if not apiKey:
|
||||||
|
raise ValueError("Redmine apiKey is required")
|
||||||
|
self._baseUrl = baseUrl.rstrip("/")
|
||||||
|
self._apiKey = apiKey
|
||||||
|
self._projectId = str(projectId) if projectId is not None else ""
|
||||||
|
self._throttleSeconds = max(0.0, float(throttleSeconds))
|
||||||
|
self._timeoutSeconds = float(timeoutSeconds)
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
# Low-level
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
|
||||||
|
def _headers(self) -> Dict[str, str]:
|
||||||
|
return {
|
||||||
|
"X-Redmine-API-Key": self._apiKey,
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
"Accept": "application/json",
|
||||||
|
}
|
||||||
|
|
||||||
|
async def _call(
|
||||||
|
self,
|
||||||
|
method: str,
|
||||||
|
path: str,
|
||||||
|
*,
|
||||||
|
payload: Optional[Dict[str, Any]] = None,
|
||||||
|
params: Optional[Dict[str, Any]] = None,
|
||||||
|
) -> Tuple[int, Optional[Dict[str, Any]], str]:
|
||||||
|
"""Single REST call. Returns ``(status, json_or_none, raw_body)``.
|
||||||
|
|
||||||
|
Does *not* raise -- the caller decides whether a non-2xx is fatal
|
||||||
|
(e.g. 403 on DELETE is expected and handled).
|
||||||
|
"""
|
||||||
|
url = f"{self._baseUrl}{path}"
|
||||||
|
if params:
|
||||||
|
url = f"{url}?{urlencode(params)}"
|
||||||
|
timeout = aiohttp.ClientTimeout(total=self._timeoutSeconds)
|
||||||
|
try:
|
||||||
|
async with aiohttp.ClientSession(timeout=timeout) as session:
|
||||||
|
async with session.request(method, url, headers=self._headers(), json=payload) as resp:
|
||||||
|
raw = await resp.text()
|
||||||
|
parsed: Optional[Dict[str, Any]] = None
|
||||||
|
if raw:
|
||||||
|
try:
|
||||||
|
parsed = await resp.json(content_type=None)
|
||||||
|
except Exception:
|
||||||
|
parsed = None
|
||||||
|
return resp.status, parsed, raw
|
||||||
|
except aiohttp.ClientError as e:
|
||||||
|
logger.warning(f"Redmine {method} {path} client error: {e}")
|
||||||
|
return -1, None, f"ClientError: {e}"
|
||||||
|
except asyncio.TimeoutError:
|
||||||
|
logger.warning(f"Redmine {method} {path} timeout after {self._timeoutSeconds}s")
|
||||||
|
return -1, None, "Timeout"
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _isOk(status: int) -> bool:
|
||||||
|
return 200 <= status < 300
|
||||||
|
|
||||||
|
async def _gentle(self) -> None:
|
||||||
|
if self._throttleSeconds > 0:
|
||||||
|
await asyncio.sleep(self._throttleSeconds)
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
# Identity / health
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
|
||||||
|
async def whoAmI(self) -> Dict[str, Any]:
|
||||||
|
status, body, raw = await self._call("GET", "/users/current.json")
|
||||||
|
if not self._isOk(status) or not body:
|
||||||
|
raise RedmineApiError(status, raw, "GET", "/users/current.json")
|
||||||
|
return body.get("user", {})
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
# Project meta -- trackers, statuses, priorities, custom fields, users
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
|
||||||
|
async def getTrackers(self) -> List[Dict[str, Any]]:
|
||||||
|
status, body, raw = await self._call("GET", "/trackers.json")
|
||||||
|
if not self._isOk(status) or not body:
|
||||||
|
raise RedmineApiError(status, raw, "GET", "/trackers.json")
|
||||||
|
return body.get("trackers", []) or []
|
||||||
|
|
||||||
|
async def getStatuses(self) -> List[Dict[str, Any]]:
|
||||||
|
status, body, raw = await self._call("GET", "/issue_statuses.json")
|
||||||
|
if not self._isOk(status) or not body:
|
||||||
|
raise RedmineApiError(status, raw, "GET", "/issue_statuses.json")
|
||||||
|
return body.get("issue_statuses", []) or []
|
||||||
|
|
||||||
|
async def getPriorities(self) -> List[Dict[str, Any]]:
|
||||||
|
status, body, raw = await self._call(
|
||||||
|
"GET", "/enumerations/issue_priorities.json"
|
||||||
|
)
|
||||||
|
if not self._isOk(status) or not body:
|
||||||
|
return []
|
||||||
|
return body.get("issue_priorities", []) or []
|
||||||
|
|
||||||
|
async def getCustomFields(self) -> List[Dict[str, Any]]:
|
||||||
|
"""Requires admin privileges in Redmine. Returns ``[]`` if forbidden."""
|
||||||
|
status, body, raw = await self._call("GET", "/custom_fields.json")
|
||||||
|
if status == 403 or status == 401:
|
||||||
|
logger.info("Redmine /custom_fields.json forbidden -- using per-issue field discovery")
|
||||||
|
return []
|
||||||
|
if not self._isOk(status) or not body:
|
||||||
|
raise RedmineApiError(status, raw, "GET", "/custom_fields.json")
|
||||||
|
return body.get("custom_fields", []) or []
|
||||||
|
|
||||||
|
async def getProjectUsers(self) -> List[Dict[str, Any]]:
|
||||||
|
status, body, raw = await self._call(
|
||||||
|
"GET", f"/projects/{self._projectId}/memberships.json", params={"limit": 100}
|
||||||
|
)
|
||||||
|
if not self._isOk(status) or not body:
|
||||||
|
return []
|
||||||
|
members = body.get("memberships", []) or []
|
||||||
|
users: List[Dict[str, Any]] = []
|
||||||
|
seen: set[int] = set()
|
||||||
|
for m in members:
|
||||||
|
user = m.get("user")
|
||||||
|
if not user:
|
||||||
|
continue
|
||||||
|
uid = user.get("id")
|
||||||
|
if uid in seen:
|
||||||
|
continue
|
||||||
|
seen.add(uid)
|
||||||
|
users.append(user)
|
||||||
|
return users
|
||||||
|
|
||||||
|
async def getProjectInfo(self) -> Dict[str, Any]:
|
||||||
|
status, body, raw = await self._call("GET", f"/projects/{self._projectId}.json")
|
||||||
|
if not self._isOk(status) or not body:
|
||||||
|
raise RedmineApiError(status, raw, "GET", f"/projects/{self._projectId}.json")
|
||||||
|
return body.get("project", {})
|
||||||
|
|
||||||
|
async def getIssueCategories(self) -> List[Dict[str, Any]]:
|
||||||
|
"""Per-project issue categories. Returns ``[]`` if the endpoint
|
||||||
|
is forbidden or the project has no categories defined."""
|
||||||
|
path = f"/projects/{self._projectId}/issue_categories.json"
|
||||||
|
status, body, raw = await self._call("GET", path)
|
||||||
|
if status in (401, 403, 404) or not self._isOk(status) or not body:
|
||||||
|
return []
|
||||||
|
return body.get("issue_categories", []) or []
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
# Issues -- read
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
|
||||||
|
async def getIssue(
|
||||||
|
self, issueId: int, *, includeRelations: bool = True, includeChildren: bool = False
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
includes = ["custom_fields", "journals"]
|
||||||
|
if includeRelations:
|
||||||
|
includes.append("relations")
|
||||||
|
if includeChildren:
|
||||||
|
includes.append("children")
|
||||||
|
params = {"include": ",".join(includes)}
|
||||||
|
status, body, raw = await self._call("GET", f"/issues/{issueId}.json", params=params)
|
||||||
|
if not self._isOk(status) or not body:
|
||||||
|
raise RedmineApiError(status, raw, "GET", f"/issues/{issueId}.json")
|
||||||
|
return body.get("issue", {})
|
||||||
|
|
||||||
|
async def listIssues(
|
||||||
|
self,
|
||||||
|
*,
|
||||||
|
trackerId: Optional[int] = None,
|
||||||
|
statusId: Optional[str] = "*",
|
||||||
|
updatedOnFrom: Optional[str] = None,
|
||||||
|
updatedOnTo: Optional[str] = None,
|
||||||
|
createdOnFrom: Optional[str] = None,
|
||||||
|
createdOnTo: Optional[str] = None,
|
||||||
|
assignedToId: Optional[int] = None,
|
||||||
|
subjectContains: Optional[str] = None,
|
||||||
|
limit: int = 100,
|
||||||
|
offset: int = 0,
|
||||||
|
include: Optional[List[str]] = None,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""Single-page list. Returns the raw envelope ``{issues, total_count, offset, limit}``."""
|
||||||
|
params: Dict[str, Any] = {
|
||||||
|
"project_id": self._projectId,
|
||||||
|
"limit": str(limit),
|
||||||
|
"offset": str(offset),
|
||||||
|
}
|
||||||
|
if statusId is not None:
|
||||||
|
params["status_id"] = str(statusId)
|
||||||
|
if trackerId is not None:
|
||||||
|
params["tracker_id"] = str(trackerId)
|
||||||
|
if assignedToId is not None:
|
||||||
|
params["assigned_to_id"] = str(assignedToId)
|
||||||
|
if subjectContains:
|
||||||
|
params["subject"] = f"~{subjectContains}"
|
||||||
|
if updatedOnFrom and updatedOnTo:
|
||||||
|
params["updated_on"] = f"><{updatedOnFrom}|{updatedOnTo}"
|
||||||
|
elif updatedOnFrom:
|
||||||
|
params["updated_on"] = f">={updatedOnFrom}"
|
||||||
|
elif updatedOnTo:
|
||||||
|
params["updated_on"] = f"<={updatedOnTo}"
|
||||||
|
if createdOnFrom and createdOnTo:
|
||||||
|
params["created_on"] = f"><{createdOnFrom}|{createdOnTo}"
|
||||||
|
elif createdOnFrom:
|
||||||
|
params["created_on"] = f">={createdOnFrom}"
|
||||||
|
elif createdOnTo:
|
||||||
|
params["created_on"] = f"<={createdOnTo}"
|
||||||
|
if include:
|
||||||
|
params["include"] = ",".join(include)
|
||||||
|
|
||||||
|
status, body, raw = await self._call("GET", "/issues.json", params=params)
|
||||||
|
if not self._isOk(status) or not body:
|
||||||
|
raise RedmineApiError(status, raw, "GET", "/issues.json")
|
||||||
|
return body
|
||||||
|
|
||||||
|
async def listAllIssues(
|
||||||
|
self,
|
||||||
|
*,
|
||||||
|
trackerId: Optional[int] = None,
|
||||||
|
statusId: Optional[str] = "*",
|
||||||
|
updatedOnFrom: Optional[str] = None,
|
||||||
|
updatedOnTo: Optional[str] = None,
|
||||||
|
createdOnFrom: Optional[str] = None,
|
||||||
|
createdOnTo: Optional[str] = None,
|
||||||
|
assignedToId: Optional[int] = None,
|
||||||
|
pageSize: int = 100,
|
||||||
|
maxPages: int = 50,
|
||||||
|
include: Optional[List[str]] = None,
|
||||||
|
) -> List[Dict[str, Any]]:
|
||||||
|
"""Paginate ``listIssues`` and return all matching raw issues."""
|
||||||
|
all_issues: List[Dict[str, Any]] = []
|
||||||
|
offset = 0
|
||||||
|
for _page in range(maxPages):
|
||||||
|
envelope = await self.listIssues(
|
||||||
|
trackerId=trackerId,
|
||||||
|
statusId=statusId,
|
||||||
|
updatedOnFrom=updatedOnFrom,
|
||||||
|
updatedOnTo=updatedOnTo,
|
||||||
|
createdOnFrom=createdOnFrom,
|
||||||
|
createdOnTo=createdOnTo,
|
||||||
|
assignedToId=assignedToId,
|
||||||
|
limit=pageSize,
|
||||||
|
offset=offset,
|
||||||
|
include=include,
|
||||||
|
)
|
||||||
|
page_issues = envelope.get("issues", []) or []
|
||||||
|
all_issues.extend(page_issues)
|
||||||
|
total = int(envelope.get("total_count") or 0)
|
||||||
|
offset += len(page_issues)
|
||||||
|
if not page_issues or offset >= total:
|
||||||
|
break
|
||||||
|
return all_issues
|
||||||
|
|
||||||
|
async def listRelations(self, issueId: int) -> List[Dict[str, Any]]:
|
||||||
|
issue = await self.getIssue(issueId, includeRelations=True)
|
||||||
|
return issue.get("relations", []) or []
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
# Issues -- write
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
|
||||||
|
async def createIssue(self, fields: Dict[str, Any]) -> Dict[str, Any]:
|
||||||
|
body_in = {"issue": dict(fields)}
|
||||||
|
body_in["issue"].setdefault("project_id", self._projectId)
|
||||||
|
status, body, raw = await self._call("POST", "/issues.json", payload=body_in)
|
||||||
|
await self._gentle()
|
||||||
|
if not self._isOk(status) or not body:
|
||||||
|
raise RedmineApiError(status, raw, "POST", "/issues.json")
|
||||||
|
return body.get("issue", {})
|
||||||
|
|
||||||
|
async def updateIssue(
|
||||||
|
self, issueId: int, fields: Dict[str, Any], *, notes: Optional[str] = None
|
||||||
|
) -> bool:
|
||||||
|
body_in: Dict[str, Any] = {"issue": dict(fields)}
|
||||||
|
if notes:
|
||||||
|
body_in["issue"]["notes"] = notes
|
||||||
|
status, body, raw = await self._call("PUT", f"/issues/{issueId}.json", payload=body_in)
|
||||||
|
await self._gentle()
|
||||||
|
if status == 204:
|
||||||
|
return True
|
||||||
|
if not self._isOk(status):
|
||||||
|
raise RedmineApiError(status, raw, "PUT", f"/issues/{issueId}.json")
|
||||||
|
return True
|
||||||
|
|
||||||
|
async def deleteIssue(self, issueId: int) -> bool:
|
||||||
|
"""Returns ``False`` if Redmine forbids deletion (HTTP 403/401)."""
|
||||||
|
status, body, raw = await self._call("DELETE", f"/issues/{issueId}.json")
|
||||||
|
await self._gentle()
|
||||||
|
if status in (200, 204):
|
||||||
|
return True
|
||||||
|
if status in (401, 403):
|
||||||
|
logger.info(f"Redmine DELETE issue {issueId} forbidden ({status}) -- caller should fall back")
|
||||||
|
return False
|
||||||
|
raise RedmineApiError(status, raw, "DELETE", f"/issues/{issueId}.json")
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
# Relations -- write
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
|
||||||
|
async def addRelation(
|
||||||
|
self, fromId: int, toId: int, *, relationType: str = "relates", delay: Optional[int] = None
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
rel: Dict[str, Any] = {"issue_to_id": toId, "relation_type": relationType}
|
||||||
|
if delay is not None:
|
||||||
|
rel["delay"] = int(delay)
|
||||||
|
status, body, raw = await self._call(
|
||||||
|
"POST", f"/issues/{fromId}/relations.json", payload={"relation": rel}
|
||||||
|
)
|
||||||
|
await self._gentle()
|
||||||
|
if not self._isOk(status) or not body:
|
||||||
|
raise RedmineApiError(status, raw, "POST", f"/issues/{fromId}/relations.json")
|
||||||
|
return body.get("relation", {})
|
||||||
|
|
||||||
|
async def deleteRelation(self, relationId: int) -> bool:
|
||||||
|
status, body, raw = await self._call("DELETE", f"/relations/{relationId}.json")
|
||||||
|
await self._gentle()
|
||||||
|
if status in (200, 204):
|
||||||
|
return True
|
||||||
|
if status in (401, 403):
|
||||||
|
return False
|
||||||
|
raise RedmineApiError(status, raw, "DELETE", f"/relations/{relationId}.json")
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
# TicketBase compliance (used by AI-tool path)
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
|
||||||
|
async def readAttributes(self) -> List[TicketFieldAttribute]:
|
||||||
|
"""Static base attributes + project custom fields (best-effort)."""
|
||||||
|
attrs: List[TicketFieldAttribute] = [
|
||||||
|
TicketFieldAttribute(fieldName="Subject", field="subject"),
|
||||||
|
TicketFieldAttribute(fieldName="Description", field="description"),
|
||||||
|
TicketFieldAttribute(fieldName="Tracker", field="tracker_id"),
|
||||||
|
TicketFieldAttribute(fieldName="Status", field="status_id"),
|
||||||
|
TicketFieldAttribute(fieldName="Priority", field="priority_id"),
|
||||||
|
TicketFieldAttribute(fieldName="Assignee", field="assigned_to_id"),
|
||||||
|
TicketFieldAttribute(fieldName="Parent", field="parent_issue_id"),
|
||||||
|
TicketFieldAttribute(fieldName="Target Version", field="fixed_version_id"),
|
||||||
|
]
|
||||||
|
try:
|
||||||
|
cfs = await self.getCustomFields()
|
||||||
|
except Exception:
|
||||||
|
cfs = []
|
||||||
|
for cf in cfs:
|
||||||
|
try:
|
||||||
|
attrs.append(
|
||||||
|
TicketFieldAttribute(
|
||||||
|
fieldName=str(cf.get("name", f"cf_{cf.get('id')}")),
|
||||||
|
field=f"cf_{cf.get('id')}",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
continue
|
||||||
|
return attrs
|
||||||
|
|
||||||
|
async def readTasks(self, *, limit: int = 0) -> List[Dict[str, Any]]:
|
||||||
|
if limit and limit > 0:
|
||||||
|
envelope = await self.listIssues(limit=limit)
|
||||||
|
return envelope.get("issues", []) or []
|
||||||
|
return await self.listAllIssues()
|
||||||
|
|
||||||
|
async def writeTasks(self, tasklist: List[Dict[str, Any]]) -> None:
|
||||||
|
for task in tasklist:
|
||||||
|
issue_id = task.get("id")
|
||||||
|
fields = {k: v for k, v in task.items() if k != "id"}
|
||||||
|
if issue_id:
|
||||||
|
await self.updateIssue(int(issue_id), fields)
|
||||||
|
else:
|
||||||
|
await self.createIssue(fields)
|
||||||
|
|
@ -1,3 +1,5 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
"""
|
"""
|
||||||
Google Cloud Speech-to-Text and Translation Connector
|
Google Cloud Speech-to-Text and Translation Connector
|
||||||
Replaces Azure Speech Services with Google Cloud APIs
|
Replaces Azure Speech Services with Google Cloud APIs
|
||||||
|
|
@ -5,15 +7,27 @@ Replaces Azure Speech Services with Google Cloud APIs
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import html
|
import html
|
||||||
|
import asyncio
|
||||||
import logging
|
import logging
|
||||||
from typing import Dict, Optional, Any
|
import time
|
||||||
|
from typing import AsyncGenerator, Dict, Optional, Any, List, Tuple
|
||||||
from google.cloud import speech
|
from google.cloud import speech
|
||||||
from google.cloud import translate_v2 as translate
|
from google.cloud import translate_v2 as translate
|
||||||
from google.cloud import texttospeech
|
from google.cloud import texttospeech
|
||||||
from modules.shared.configuration import APP_CONFIG
|
from modules.shared.configuration import APP_CONFIG
|
||||||
|
from modules.shared.voiceCatalog import getDefaultVoice as _catalogDefaultVoice
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# Gemini-TTS speaker IDs from voices.list use short names (e.g. "Kore") and require
|
||||||
|
# SynthesisInput.prompt + VoiceSelectionParams.model_name (google-cloud-texttospeech >= 2.24.0).
|
||||||
|
_GEMINI_TTS_DEFAULT_MODEL = "gemini-2.5-flash-tts"
|
||||||
|
_GEMINI_TTS_NEUTRAL_PROMPT = "Say the following"
|
||||||
|
_GEMINI_TTS_MIN_CLIENT_HINT = (
|
||||||
|
"Gemini-TTS requires google-cloud-texttospeech>=2.24.0 (SynthesisInput.prompt, VoiceSelectionParams.model_name)."
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class ConnectorGoogleSpeech:
|
class ConnectorGoogleSpeech:
|
||||||
"""
|
"""
|
||||||
Google Cloud Speech-to-Text and Translation connector.
|
Google Cloud Speech-to-Text and Translation connector.
|
||||||
|
|
@ -56,7 +70,10 @@ class ConnectorGoogleSpeech:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
async def speechToText(self, audioContent: bytes, language: str = "de-DE",
|
async def speechToText(self, audioContent: bytes, language: str = "de-DE",
|
||||||
sampleRate: int = None, channels: int = None) -> Dict:
|
sampleRate: int = None, channels: int = None,
|
||||||
|
skipFallbacks: bool = False,
|
||||||
|
phraseHints: Optional[list] = None,
|
||||||
|
alternativeLanguages: Optional[list] = None) -> Dict:
|
||||||
"""
|
"""
|
||||||
Convert speech to text using Google Cloud Speech-to-Text API.
|
Convert speech to text using Google Cloud Speech-to-Text API.
|
||||||
|
|
||||||
|
|
@ -70,6 +87,11 @@ class ConnectorGoogleSpeech:
|
||||||
Dict containing transcribed text, confidence, and metadata
|
Dict containing transcribed text, confidence, and metadata
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
|
# Treat sampleRate=0 as unknown (invalid value from client)
|
||||||
|
if sampleRate is not None and sampleRate <= 0:
|
||||||
|
logger.warning(f"Invalid sampleRate={sampleRate}, treating as unknown for auto-detection")
|
||||||
|
sampleRate = None
|
||||||
|
|
||||||
# Auto-detect audio format if not provided
|
# Auto-detect audio format if not provided
|
||||||
if sampleRate is None or channels is None:
|
if sampleRate is None or channels is None:
|
||||||
validation = self.validateAudioFormat(audioContent)
|
validation = self.validateAudioFormat(audioContent)
|
||||||
|
|
@ -84,6 +106,9 @@ class ConnectorGoogleSpeech:
|
||||||
channels = validation["channels"]
|
channels = validation["channels"]
|
||||||
audioFormat = validation["format"]
|
audioFormat = validation["format"]
|
||||||
logger.info(f"Auto-detected audio: {audioFormat}, {sampleRate}Hz, {channels}ch")
|
logger.info(f"Auto-detected audio: {audioFormat}, {sampleRate}Hz, {channels}ch")
|
||||||
|
else:
|
||||||
|
# When sampleRate and channels are explicitly provided, assume raw PCM (LINEAR16)
|
||||||
|
audioFormat = "linear16"
|
||||||
|
|
||||||
logger.info(f"Processing audio with Google Cloud Speech-to-Text")
|
logger.info(f"Processing audio with Google Cloud Speech-to-Text")
|
||||||
logger.info(f"Audio: {len(audioContent)} bytes, {sampleRate}Hz, {channels}ch")
|
logger.info(f"Audio: {len(audioContent)} bytes, {sampleRate}Hz, {channels}ch")
|
||||||
|
|
@ -94,15 +119,10 @@ class ConnectorGoogleSpeech:
|
||||||
# Determine encoding based on detected format
|
# Determine encoding based on detected format
|
||||||
# Google Cloud Speech API has specific requirements for different formats
|
# Google Cloud Speech API has specific requirements for different formats
|
||||||
if audioFormat == "webm_opus":
|
if audioFormat == "webm_opus":
|
||||||
# For WEBM OPUS, we need to ensure proper format
|
|
||||||
encoding = speech.RecognitionConfig.AudioEncoding.WEBM_OPUS
|
encoding = speech.RecognitionConfig.AudioEncoding.WEBM_OPUS
|
||||||
# WEBM_OPUS requires specific sample rate handling - must match header
|
if sampleRate not in [8000, 12000, 16000, 24000, 48000]:
|
||||||
if sampleRate != 48000:
|
|
||||||
logger.warning(f"WEBM_OPUS detected but sample rate is {sampleRate}, adjusting to 48000")
|
|
||||||
sampleRate = 48000
|
sampleRate = 48000
|
||||||
# For WEBM_OPUS, don't specify sample_rate_hertz in config
|
useSampleRate = True
|
||||||
# Google Cloud will read it from the WEBM header
|
|
||||||
useSampleRate = False
|
|
||||||
elif audioFormat == "linear16":
|
elif audioFormat == "linear16":
|
||||||
# For LINEAR16 format (PCM)
|
# For LINEAR16 format (PCM)
|
||||||
encoding = speech.RecognitionConfig.AudioEncoding.LINEAR16
|
encoding = speech.RecognitionConfig.AudioEncoding.LINEAR16
|
||||||
|
|
@ -137,13 +157,22 @@ class ConnectorGoogleSpeech:
|
||||||
"audio_channel_count": channels,
|
"audio_channel_count": channels,
|
||||||
"language_code": language,
|
"language_code": language,
|
||||||
"enable_automatic_punctuation": True,
|
"enable_automatic_punctuation": True,
|
||||||
"model": "latest_long", # Try latest_long model for better recognition
|
"model": "latest_long",
|
||||||
"enable_word_time_offsets": True, # Enable word-level timing
|
"enable_word_time_offsets": True,
|
||||||
"enable_word_confidence": True, # Enable word-level confidence
|
"enable_word_confidence": True,
|
||||||
"max_alternatives": 3, # Try more alternatives
|
"max_alternatives": 3,
|
||||||
"use_enhanced": True # Use enhanced model if available
|
"use_enhanced": True,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if phraseHints:
|
||||||
|
configParams["speech_contexts"] = [speech.SpeechContext(
|
||||||
|
phrases=phraseHints,
|
||||||
|
boost=15.0,
|
||||||
|
)]
|
||||||
|
|
||||||
|
if alternativeLanguages:
|
||||||
|
configParams["alternative_language_codes"] = alternativeLanguages
|
||||||
|
|
||||||
# Only add sample_rate_hertz if needed (not for WEBM_OPUS)
|
# Only add sample_rate_hertz if needed (not for WEBM_OPUS)
|
||||||
if useSampleRate:
|
if useSampleRate:
|
||||||
configParams["sample_rate_hertz"] = sampleRate
|
configParams["sample_rate_hertz"] = sampleRate
|
||||||
|
|
@ -158,26 +187,31 @@ class ConnectorGoogleSpeech:
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Use regular recognition for single audio files (not streaming)
|
# Use regular recognition for single audio files (not streaming)
|
||||||
|
# Run in thread pool to avoid blocking the asyncio event loop
|
||||||
logger.info("Using regular recognition for single audio file...")
|
logger.info("Using regular recognition for single audio file...")
|
||||||
response = self.speech_client.recognize(config=config, audio=audio)
|
response = await asyncio.to_thread(
|
||||||
|
self.speech_client.recognize, config=config, audio=audio
|
||||||
|
)
|
||||||
logger.debug(f"Google Cloud response: {response}")
|
logger.debug(f"Google Cloud response: {response}")
|
||||||
|
|
||||||
except Exception as apiError:
|
except Exception as apiError:
|
||||||
logger.error(f"Google Cloud API error: {apiError}")
|
logger.error(f"Google Cloud API error: {apiError}")
|
||||||
# Try with different encoding as fallback
|
if skipFallbacks:
|
||||||
|
raise
|
||||||
if encoding != speech.RecognitionConfig.AudioEncoding.LINEAR16:
|
if encoding != speech.RecognitionConfig.AudioEncoding.LINEAR16:
|
||||||
logger.info("Trying fallback with LINEAR16 encoding...")
|
logger.info("Trying fallback with LINEAR16 encoding...")
|
||||||
fallbackConfig = speech.RecognitionConfig(
|
fallbackConfig = speech.RecognitionConfig(
|
||||||
encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16,
|
encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16,
|
||||||
sample_rate_hertz=16000, # Use standard sample rate
|
sample_rate_hertz=16000,
|
||||||
audio_channel_count=1,
|
audio_channel_count=1,
|
||||||
language_code=language,
|
language_code=language,
|
||||||
enable_automatic_punctuation=True,
|
enable_automatic_punctuation=True,
|
||||||
model="latest_long"
|
model="latest_long"
|
||||||
)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
response = self.speech_client.recognize(config=fallbackConfig, audio=audio)
|
response = await asyncio.to_thread(
|
||||||
|
self.speech_client.recognize, config=fallbackConfig, audio=audio
|
||||||
|
)
|
||||||
logger.debug(f"Google Cloud fallback response: {response}")
|
logger.debug(f"Google Cloud fallback response: {response}")
|
||||||
except Exception as fallbackError:
|
except Exception as fallbackError:
|
||||||
logger.error(f"Google Cloud fallback error: {fallbackError}")
|
logger.error(f"Google Cloud fallback error: {fallbackError}")
|
||||||
|
|
@ -185,14 +219,20 @@ class ConnectorGoogleSpeech:
|
||||||
else:
|
else:
|
||||||
raise apiError
|
raise apiError
|
||||||
|
|
||||||
# Process results
|
# Process results - use longest transcript (complete utterance, avoids partials)
|
||||||
if response.results:
|
if response.results:
|
||||||
result = response.results[0]
|
bestText = ""
|
||||||
|
bestConfidence = 0.0
|
||||||
|
for result in response.results:
|
||||||
if result.alternatives:
|
if result.alternatives:
|
||||||
alternative = result.alternatives[0]
|
for alt in result.alternatives:
|
||||||
transcribed_text = alternative.transcript
|
t = alt.transcript.strip()
|
||||||
confidence = alternative.confidence
|
if len(t) > len(bestText):
|
||||||
|
bestText = t
|
||||||
|
bestConfidence = alt.confidence
|
||||||
|
if bestText:
|
||||||
|
transcribed_text = bestText
|
||||||
|
confidence = bestConfidence
|
||||||
logger.info(f"Transcription successful: '{transcribed_text}' (confidence: {confidence:.2f})")
|
logger.info(f"Transcription successful: '{transcribed_text}' (confidence: {confidence:.2f})")
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
|
@ -229,6 +269,15 @@ class ConnectorGoogleSpeech:
|
||||||
"error": f"Google Cloud error: {response.error}"
|
"error": f"Google Cloud error: {response.error}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Skip fallbacks when format is known (e.g. teamsbot with explicit LINEAR16 16kHz)
|
||||||
|
if skipFallbacks:
|
||||||
|
return {
|
||||||
|
"success": False,
|
||||||
|
"text": "",
|
||||||
|
"confidence": 0.0,
|
||||||
|
"error": "No recognition results (silence or unclear audio)"
|
||||||
|
}
|
||||||
|
|
||||||
# Try multiple fallback approaches
|
# Try multiple fallback approaches
|
||||||
fallback_configs = []
|
fallback_configs = []
|
||||||
|
|
||||||
|
|
@ -282,7 +331,18 @@ class ConnectorGoogleSpeech:
|
||||||
"description": f"LINEAR16 with {std_rate}Hz"
|
"description": f"LINEAR16 with {std_rate}Hz"
|
||||||
})
|
})
|
||||||
|
|
||||||
# Try with different models
|
# Detect likely silence before expensive fallback loop
|
||||||
|
if len(audioContent) > 100:
|
||||||
|
sampleSlice = audioContent[100:min(500, len(audioContent))]
|
||||||
|
if len(set(sampleSlice)) < 3:
|
||||||
|
logger.warning("Audio appears silent (low byte variation) - skipping fallbacks")
|
||||||
|
return {
|
||||||
|
"success": False,
|
||||||
|
"text": "",
|
||||||
|
"confidence": 0.0,
|
||||||
|
"error": "No recognition results (silence or unclear audio)"
|
||||||
|
}
|
||||||
|
|
||||||
models = ["latest_long", "phone_call", "latest_short"]
|
models = ["latest_long", "phone_call", "latest_short"]
|
||||||
|
|
||||||
for fallback_config in fallback_configs:
|
for fallback_config in fallback_configs:
|
||||||
|
|
@ -290,7 +350,6 @@ class ConnectorGoogleSpeech:
|
||||||
try:
|
try:
|
||||||
logger.info(f"Trying fallback: {fallback_config['description']} with {model} model...")
|
logger.info(f"Trying fallback: {fallback_config['description']} with {model} model...")
|
||||||
|
|
||||||
# Build fallback config with proper sample rate handling
|
|
||||||
fallback_config_params = {
|
fallback_config_params = {
|
||||||
"encoding": fallback_config["encoding"],
|
"encoding": fallback_config["encoding"],
|
||||||
"audio_channel_count": fallback_config["channels"],
|
"audio_channel_count": fallback_config["channels"],
|
||||||
|
|
@ -299,20 +358,27 @@ class ConnectorGoogleSpeech:
|
||||||
"model": model
|
"model": model
|
||||||
}
|
}
|
||||||
|
|
||||||
# Only add sample_rate_hertz if needed
|
|
||||||
if fallback_config["use_sample_rate"]:
|
if fallback_config["use_sample_rate"]:
|
||||||
fallback_config_params["sample_rate_hertz"] = fallback_config["sample_rate"]
|
fallback_config_params["sample_rate_hertz"] = fallback_config["sample_rate"]
|
||||||
|
|
||||||
fallback_config_obj = speech.RecognitionConfig(**fallback_config_params)
|
fallback_config_obj = speech.RecognitionConfig(**fallback_config_params)
|
||||||
fallback_response = self.speech_client.recognize(config=fallback_config_obj, audio=audio)
|
fallback_response = await asyncio.to_thread(
|
||||||
|
self.speech_client.recognize, config=fallback_config_obj, audio=audio
|
||||||
|
)
|
||||||
|
|
||||||
if fallback_response.results:
|
if fallback_response.results:
|
||||||
result = fallback_response.results[0]
|
bestText = ""
|
||||||
|
bestConfidence = 0.0
|
||||||
|
for result in fallback_response.results:
|
||||||
if result.alternatives:
|
if result.alternatives:
|
||||||
alternative = result.alternatives[0]
|
for alt in result.alternatives:
|
||||||
transcribed_text = alternative.transcript
|
t = alt.transcript.strip()
|
||||||
confidence = alternative.confidence
|
if len(t) > len(bestText):
|
||||||
|
bestText = t
|
||||||
|
bestConfidence = alt.confidence
|
||||||
|
if bestText:
|
||||||
|
transcribed_text = bestText
|
||||||
|
confidence = bestConfidence
|
||||||
logger.info(f"Fallback transcription successful: '{transcribed_text}' (confidence: {confidence:.2f})")
|
logger.info(f"Fallback transcription successful: '{transcribed_text}' (confidence: {confidence:.2f})")
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
|
@ -348,15 +414,182 @@ class ConnectorGoogleSpeech:
|
||||||
"error": str(e)
|
"error": str(e)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async def streamingRecognize(
|
||||||
|
self,
|
||||||
|
audioQueue: asyncio.Queue,
|
||||||
|
language: str = "de-DE",
|
||||||
|
phraseHints: Optional[list] = None,
|
||||||
|
) -> AsyncGenerator[Dict[str, Any], None]:
|
||||||
|
"""
|
||||||
|
Stream audio chunks to Google Cloud Speech-to-Text Streaming API.
|
||||||
|
Google handles silence/endpoint detection natively.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
audioQueue: Queue of (bytes, bool) tuples. bytes=audio data, bool=isLast.
|
||||||
|
Send (b"", True) to signal end of stream.
|
||||||
|
language: Language code
|
||||||
|
phraseHints: Optional boost phrases
|
||||||
|
|
||||||
|
Yields:
|
||||||
|
Dicts with keys: isFinal, transcript, confidence, stabilityScore, audioDurationSec
|
||||||
|
"""
|
||||||
|
STREAM_LIMIT_SEC = 290
|
||||||
|
streamStartTs = time.time()
|
||||||
|
totalAudioBytes = 0
|
||||||
|
|
||||||
|
configParams = {
|
||||||
|
"encoding": speech.RecognitionConfig.AudioEncoding.WEBM_OPUS,
|
||||||
|
"sample_rate_hertz": 48000,
|
||||||
|
"audio_channel_count": 1,
|
||||||
|
"language_code": language,
|
||||||
|
"enable_automatic_punctuation": True,
|
||||||
|
"model": "latest_long",
|
||||||
|
"use_enhanced": True,
|
||||||
|
}
|
||||||
|
if phraseHints:
|
||||||
|
configParams["speech_contexts"] = [speech.SpeechContext(phrases=phraseHints, boost=15.0)]
|
||||||
|
|
||||||
|
recognitionConfig = speech.RecognitionConfig(**configParams)
|
||||||
|
streamingConfig = speech.StreamingRecognitionConfig(
|
||||||
|
config=recognitionConfig,
|
||||||
|
interim_results=True,
|
||||||
|
single_utterance=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
import queue as threadQueue
|
||||||
|
audioInQ: threadQueue.Queue = threadQueue.Queue()
|
||||||
|
resultOutQ: asyncio.Queue = asyncio.Queue()
|
||||||
|
|
||||||
|
async def _pumpAudioToThread():
|
||||||
|
try:
|
||||||
|
while True:
|
||||||
|
item = await audioQueue.get()
|
||||||
|
audioInQ.put(item)
|
||||||
|
if item[1]:
|
||||||
|
return
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
audioInQ.put((b"", True))
|
||||||
|
|
||||||
|
def _requestGenerator():
|
||||||
|
nonlocal totalAudioBytes
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
chunk, isLast = audioInQ.get(timeout=30.0)
|
||||||
|
except threadQueue.Empty:
|
||||||
|
return
|
||||||
|
if isLast or not chunk:
|
||||||
|
return
|
||||||
|
totalAudioBytes += len(chunk)
|
||||||
|
yield speech.StreamingRecognizeRequest(audio_content=chunk)
|
||||||
|
|
||||||
|
def _runStreamingInThread():
|
||||||
|
try:
|
||||||
|
responseStream = self.speech_client.streaming_recognize(
|
||||||
|
config=streamingConfig,
|
||||||
|
requests=_requestGenerator(),
|
||||||
|
)
|
||||||
|
for response in responseStream:
|
||||||
|
elapsed = time.time() - streamStartTs
|
||||||
|
estimatedDurationSec = totalAudioBytes / (48000 * 1 * 2) if totalAudioBytes else 0
|
||||||
|
|
||||||
|
finalTexts = []
|
||||||
|
interimTexts = []
|
||||||
|
lastFinalConfidence = 0.0
|
||||||
|
|
||||||
|
for result in response.results:
|
||||||
|
alt = result.alternatives[0] if result.alternatives else None
|
||||||
|
if not alt or not alt.transcript.strip():
|
||||||
|
continue
|
||||||
|
if result.is_final:
|
||||||
|
finalTexts.append(alt.transcript.strip())
|
||||||
|
lastFinalConfidence = alt.confidence
|
||||||
|
else:
|
||||||
|
interimTexts.append(alt.transcript.strip())
|
||||||
|
|
||||||
|
for ft in finalTexts:
|
||||||
|
asyncio.run_coroutine_threadsafe(resultOutQ.put({
|
||||||
|
"isFinal": True,
|
||||||
|
"transcript": ft,
|
||||||
|
"confidence": lastFinalConfidence,
|
||||||
|
"stabilityScore": 0.0,
|
||||||
|
"audioDurationSec": estimatedDurationSec,
|
||||||
|
}), loop)
|
||||||
|
|
||||||
|
if interimTexts:
|
||||||
|
combined = " ".join(interimTexts)
|
||||||
|
asyncio.run_coroutine_threadsafe(resultOutQ.put({
|
||||||
|
"isFinal": False,
|
||||||
|
"transcript": combined,
|
||||||
|
"confidence": 0.0,
|
||||||
|
"stabilityScore": 0.0,
|
||||||
|
"audioDurationSec": estimatedDurationSec,
|
||||||
|
}), loop)
|
||||||
|
if elapsed >= STREAM_LIMIT_SEC:
|
||||||
|
logger.info("Streaming STT approaching 5-min limit, client should reconnect")
|
||||||
|
asyncio.run_coroutine_threadsafe(resultOutQ.put({
|
||||||
|
"isFinal": False, "transcript": "", "confidence": 0.0,
|
||||||
|
"reconnectRequired": True, "audioDurationSec": 0,
|
||||||
|
}), loop)
|
||||||
|
return
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Google Streaming STT error: {e}")
|
||||||
|
asyncio.run_coroutine_threadsafe(resultOutQ.put({
|
||||||
|
"error": str(e),
|
||||||
|
}), loop)
|
||||||
|
finally:
|
||||||
|
asyncio.run_coroutine_threadsafe(resultOutQ.put(None), loop)
|
||||||
|
|
||||||
|
loop = asyncio.get_running_loop()
|
||||||
|
pumpTask = asyncio.ensure_future(_pumpAudioToThread())
|
||||||
|
streamFuture = loop.run_in_executor(None, _runStreamingInThread)
|
||||||
|
|
||||||
|
try:
|
||||||
|
while True:
|
||||||
|
item = await resultOutQ.get()
|
||||||
|
if item is None:
|
||||||
|
break
|
||||||
|
if "error" in item:
|
||||||
|
raise RuntimeError(item["error"])
|
||||||
|
yield item
|
||||||
|
finally:
|
||||||
|
pumpTask.cancel()
|
||||||
|
await asyncio.shield(streamFuture)
|
||||||
|
|
||||||
|
def calculateSttCostCHF(self, audioDurationSec: float) -> float:
|
||||||
|
"""Google STT cost: ~$0.016/min (standard model)."""
|
||||||
|
return round((audioDurationSec / 60.0) * 0.016, 8)
|
||||||
|
|
||||||
|
def calculateTtsCostCHF(self, characterCount: int) -> float:
|
||||||
|
"""Google TTS WaveNet cost: ~$0.000004/char."""
|
||||||
|
return round(characterCount * 0.000004, 8)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _normalizeLanguageCode(code: Optional[str]) -> Optional[str]:
|
||||||
|
"""Normalize a user/LLM-supplied language hint to an ISO-639-1 code or None.
|
||||||
|
|
||||||
|
Google Cloud Translation v2 only accepts ISO codes (e.g. 'de', 'en') or
|
||||||
|
an omitted source for auto-detection. Strings like 'auto', '' or full
|
||||||
|
BCP-47 tags ('de-DE') would otherwise reach the API and trigger
|
||||||
|
'400 Invalid Value'. Centralising the mapping here keeps every caller
|
||||||
|
(tools, interface, internal pipelines) safe.
|
||||||
|
"""
|
||||||
|
if not code:
|
||||||
|
return None
|
||||||
|
normalized = code.strip().lower()
|
||||||
|
if not normalized or normalized in ("auto", "detect", "any", "*"):
|
||||||
|
return None
|
||||||
|
return normalized.split("-")[0]
|
||||||
|
|
||||||
async def translateText(self, text: str, targetLanguage: str = "en",
|
async def translateText(self, text: str, targetLanguage: str = "en",
|
||||||
sourceLanguage: str = "de") -> Dict:
|
sourceLanguage: Optional[str] = None) -> Dict:
|
||||||
"""
|
"""
|
||||||
Translate text using Google Cloud Translation API.
|
Translate text using Google Cloud Translation API.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
text: Text to translate
|
text: Text to translate
|
||||||
target_language: Target language code (e.g., 'en', 'de')
|
targetLanguage: Target language code (e.g., 'en', 'de')
|
||||||
source_language: Source language code (e.g., 'de', 'en')
|
sourceLanguage: Source language code (e.g., 'de', 'en'); pass None
|
||||||
|
or 'auto' for Google's auto-detection.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Dict containing translated text and metadata
|
Dict containing translated text and metadata
|
||||||
|
|
@ -370,13 +603,17 @@ class ConnectorGoogleSpeech:
|
||||||
"error": "Empty text provided"
|
"error": "Empty text provided"
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.info(f"🌐 Translating: '{text}' ({sourceLanguage} -> {targetLanguage})")
|
normalizedSource = self._normalizeLanguageCode(sourceLanguage)
|
||||||
|
normalizedTarget = self._normalizeLanguageCode(targetLanguage) or "en"
|
||||||
|
logger.info(
|
||||||
|
f"🌐 Translating: '{text}' "
|
||||||
|
f"({normalizedSource or 'auto'} -> {normalizedTarget})"
|
||||||
|
)
|
||||||
|
|
||||||
# Perform translation
|
|
||||||
result = self.translate_client.translate(
|
result = self.translate_client.translate(
|
||||||
text,
|
text,
|
||||||
source_language=sourceLanguage,
|
source_language=normalizedSource,
|
||||||
target_language=targetLanguage
|
target_language=normalizedTarget,
|
||||||
)
|
)
|
||||||
|
|
||||||
translatedText = result['translatedText']
|
translatedText = result['translatedText']
|
||||||
|
|
@ -403,6 +640,61 @@ class ConnectorGoogleSpeech:
|
||||||
"error": str(e)
|
"error": str(e)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async def detectLanguage(self, text: str) -> Dict:
|
||||||
|
"""
|
||||||
|
Detect the language of text using Google Cloud Translation API.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
text: Text to detect language for
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict containing detected language code and confidence
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if not text.strip():
|
||||||
|
logger.warning("⚠️ Empty text provided for language detection")
|
||||||
|
return {
|
||||||
|
"success": False,
|
||||||
|
"language": "",
|
||||||
|
"error": "Empty text provided"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Use a sample of the text (middle 1000 bytes or full text if smaller)
|
||||||
|
textBytes = text.encode('utf-8')
|
||||||
|
if len(textBytes) > 1000:
|
||||||
|
# Take 1000 bytes from the middle
|
||||||
|
startPos = (len(textBytes) - 1000) // 2
|
||||||
|
textSample = textBytes[startPos:startPos + 1000].decode('utf-8', errors='ignore')
|
||||||
|
else:
|
||||||
|
textSample = text
|
||||||
|
|
||||||
|
logger.info(f"🔍 Detecting language for text sample: '{textSample[:100]}...'")
|
||||||
|
|
||||||
|
# Use translation API with auto-detection (source_language=None)
|
||||||
|
result = self.translate_client.translate(
|
||||||
|
textSample,
|
||||||
|
source_language=None, # Auto-detect
|
||||||
|
target_language='en' # Dummy target, we only need detection
|
||||||
|
)
|
||||||
|
|
||||||
|
detectedLanguage = result.get('detectedSourceLanguage', '')
|
||||||
|
|
||||||
|
logger.info(f"✅ Language detected: {detectedLanguage}")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"language": detectedLanguage,
|
||||||
|
"confidence": 1.0 # Google Translation API doesn't provide confidence, assume high
|
||||||
|
}
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"❌ Google Cloud Language Detection error: {e}")
|
||||||
|
return {
|
||||||
|
"success": False,
|
||||||
|
"language": "",
|
||||||
|
"error": str(e)
|
||||||
|
}
|
||||||
|
|
||||||
async def speechToTranslatedText(self, audioContent: bytes,
|
async def speechToTranslatedText(self, audioContent: bytes,
|
||||||
fromLanguage: str = "de-DE",
|
fromLanguage: str = "de-DE",
|
||||||
toLanguage: str = "en") -> Dict:
|
toLanguage: str = "en") -> Dict:
|
||||||
|
|
@ -439,8 +731,8 @@ class ConnectorGoogleSpeech:
|
||||||
# Step 2: Translation
|
# Step 2: Translation
|
||||||
translationResult = await self.translateText(
|
translationResult = await self.translateText(
|
||||||
text=originalText,
|
text=originalText,
|
||||||
sourceLanguage=fromLanguage.split('-')[0], # Convert 'de-DE' to 'de'
|
sourceLanguage=fromLanguage,
|
||||||
targetLanguage=toLanguage.split('-')[0] # Convert 'en-US' to 'en'
|
targetLanguage=toLanguage,
|
||||||
)
|
)
|
||||||
|
|
||||||
if not translationResult["success"]:
|
if not translationResult["success"]:
|
||||||
|
|
@ -642,14 +934,24 @@ class ConnectorGoogleSpeech:
|
||||||
"error": f"Validation error: {e}"
|
"error": f"Validation error: {e}"
|
||||||
}
|
}
|
||||||
|
|
||||||
async def textToSpeech(self, text: str, languageCode: str = "de-DE", voiceName: str = None) -> Dict[str, Any]:
|
def _isGeminiTtsSpeakerVoiceName(self, voiceName: str) -> bool:
|
||||||
|
"""True when voice name is a Gemini-TTS speaker id (no BCP-47 prefix like en-US-...)."""
|
||||||
|
if not voiceName or not isinstance(voiceName, str):
|
||||||
|
return False
|
||||||
|
stripped = voiceName.strip()
|
||||||
|
return bool(stripped) and "-" not in stripped
|
||||||
|
|
||||||
|
async def textToSpeech(self, text: str, languageCode: str = "de-DE", voiceName: Optional[str] = None) -> Dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
Convert text to speech using Google Cloud Text-to-Speech.
|
Convert text to speech using Google Cloud Text-to-Speech.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
text: Text to convert to speech
|
text: Text to convert to speech
|
||||||
language_code: Language code (e.g., 'de-DE', 'en-US')
|
languageCode: BCP-47 language code (e.g., 'de-DE', 'en-US', 'ru-RU')
|
||||||
voice_name: Specific voice name (optional)
|
voiceName: Specific voice name (optional). If omitted, a curated
|
||||||
|
default is used; if no curated default exists for the language,
|
||||||
|
Google selects a default voice automatically based on
|
||||||
|
languageCode + ssml_gender (no hard failure).
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Dict with success status and audio data
|
Dict with success status and audio data
|
||||||
|
|
@ -657,98 +959,78 @@ class ConnectorGoogleSpeech:
|
||||||
try:
|
try:
|
||||||
logger.info(f"Converting text to speech: '{text[:50]}...' in {languageCode}")
|
logger.info(f"Converting text to speech: '{text[:50]}...' in {languageCode}")
|
||||||
|
|
||||||
# Set up the synthesis input
|
|
||||||
synthesisInput = texttospeech.SynthesisInput(text=text)
|
|
||||||
|
|
||||||
# Build the voice request
|
|
||||||
selectedVoice = voiceName or self._getDefaultVoice(languageCode)
|
selectedVoice = voiceName or self._getDefaultVoice(languageCode)
|
||||||
|
isGeminiVoice = self._isGeminiTtsSpeakerVoiceName(selectedVoice) if selectedVoice else False
|
||||||
|
|
||||||
if not selectedVoice:
|
if isGeminiVoice:
|
||||||
return {
|
synthesisInput = texttospeech.SynthesisInput(
|
||||||
"success": False,
|
text=text,
|
||||||
"error": f"No voice specified for language {languageCode}. Please select a voice."
|
prompt=_GEMINI_TTS_NEUTRAL_PROMPT,
|
||||||
}
|
)
|
||||||
|
|
||||||
logger.info(f"Using TTS voice: {selectedVoice} for language: {languageCode}")
|
|
||||||
|
|
||||||
voice = texttospeech.VoiceSelectionParams(
|
voice = texttospeech.VoiceSelectionParams(
|
||||||
language_code=languageCode,
|
language_code=languageCode,
|
||||||
name=selectedVoice,
|
name=selectedVoice,
|
||||||
ssml_gender=texttospeech.SsmlVoiceGender.NEUTRAL
|
model_name=_GEMINI_TTS_DEFAULT_MODEL,
|
||||||
|
ssml_gender=texttospeech.SsmlVoiceGender.NEUTRAL,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
synthesisInput = texttospeech.SynthesisInput(text=text)
|
||||||
|
voiceKwargs: Dict[str, Any] = {
|
||||||
|
"language_code": languageCode,
|
||||||
|
"ssml_gender": texttospeech.SsmlVoiceGender.NEUTRAL,
|
||||||
|
}
|
||||||
|
if selectedVoice:
|
||||||
|
voiceKwargs["name"] = selectedVoice
|
||||||
|
else:
|
||||||
|
logger.info(
|
||||||
|
f"TTS: no curated voice for '{languageCode}', "
|
||||||
|
f"letting Google auto-select by language + gender"
|
||||||
|
)
|
||||||
|
voice = texttospeech.VoiceSelectionParams(**voiceKwargs)
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
f"Using TTS voice: {selectedVoice or '<google-auto>'} "
|
||||||
|
f"for language: {languageCode}"
|
||||||
)
|
)
|
||||||
|
|
||||||
# Select the type of audio file to return
|
|
||||||
audioConfig = texttospeech.AudioConfig(
|
audioConfig = texttospeech.AudioConfig(
|
||||||
audio_encoding=texttospeech.AudioEncoding.MP3
|
audio_encoding=texttospeech.AudioEncoding.MP3
|
||||||
)
|
)
|
||||||
|
|
||||||
# Perform the text-to-speech request
|
|
||||||
response = self.tts_client.synthesize_speech(
|
response = self.tts_client.synthesize_speech(
|
||||||
input=synthesisInput,
|
input=synthesisInput,
|
||||||
voice=voice,
|
voice=voice,
|
||||||
audio_config=audioConfig
|
audio_config=audioConfig,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Return the audio content
|
|
||||||
return {
|
return {
|
||||||
"success": True,
|
"success": True,
|
||||||
"audio_content": response.audio_content,
|
"audio_content": response.audio_content,
|
||||||
"audio_format": "mp3",
|
"audio_format": "mp3",
|
||||||
"language_code": languageCode,
|
"language_code": languageCode,
|
||||||
"voice_name": voice.name
|
"voice_name": selectedVoice or "<google-auto>",
|
||||||
}
|
}
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Text-to-Speech error: {e}")
|
logger.error(f"Text-to-Speech error: {e}")
|
||||||
|
detail = str(e)
|
||||||
|
extra = ""
|
||||||
|
low = detail.lower()
|
||||||
|
if "prompt" in low or "model_name" in low or "unknown field" in low:
|
||||||
|
extra = f" {_GEMINI_TTS_MIN_CLIENT_HINT}"
|
||||||
return {
|
return {
|
||||||
"success": False,
|
"success": False,
|
||||||
"error": f"Text-to-Speech failed: {str(e)}"
|
"error": f"Text-to-Speech failed: {detail}{extra}",
|
||||||
}
|
}
|
||||||
|
|
||||||
def _getDefaultVoice(self, languageCode: str) -> str:
|
def _getDefaultVoice(self, languageCode: str) -> Optional[str]:
|
||||||
|
"""Return the curated default Google TTS voice for `languageCode`.
|
||||||
|
|
||||||
|
Delegates to the central voice catalog; returns None when no curated
|
||||||
|
voice exists, in which case the caller omits `name` and Google
|
||||||
|
auto-selects based on languageCode + ssml_gender.
|
||||||
"""
|
"""
|
||||||
Get default voice name for a language code.
|
return _catalogDefaultVoice(languageCode)
|
||||||
Returns None - no defaults, let the frontend handle voice selection.
|
|
||||||
"""
|
|
||||||
return None
|
|
||||||
|
|
||||||
async def getAvailableLanguages(self) -> Dict[str, Any]:
|
|
||||||
"""
|
|
||||||
Get available languages from Google Cloud Text-to-Speech.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Dict containing success status and list of available languages
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
logger.info("🌐 Getting available languages from Google Cloud TTS")
|
|
||||||
|
|
||||||
# List voices from Google Cloud TTS
|
|
||||||
response = self.tts_client.list_voices()
|
|
||||||
|
|
||||||
# Extract unique language codes
|
|
||||||
# Note: Google TTS API doesn't provide language descriptions, only codes
|
|
||||||
language_codes = set()
|
|
||||||
for voice in response.voices:
|
|
||||||
if voice.language_codes:
|
|
||||||
language_codes.update(voice.language_codes)
|
|
||||||
|
|
||||||
# Convert to sorted list of language codes
|
|
||||||
available_languages = sorted(list(language_codes))
|
|
||||||
|
|
||||||
logger.info(f"✅ Found {len(available_languages)} available languages")
|
|
||||||
|
|
||||||
return {
|
|
||||||
"success": True,
|
|
||||||
"languages": available_languages
|
|
||||||
}
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"❌ Failed to get available languages: {e}")
|
|
||||||
return {
|
|
||||||
"success": False,
|
|
||||||
"error": str(e),
|
|
||||||
"languages": []
|
|
||||||
}
|
|
||||||
|
|
||||||
async def getAvailableVoices(self, languageCode: Optional[str] = None) -> Dict[str, Any]:
|
async def getAvailableVoices(self, languageCode: Optional[str] = None) -> Dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
|
|
@ -791,7 +1073,8 @@ class ConnectorGoogleSpeech:
|
||||||
"language_codes": list(voice.language_codes) if voice.language_codes else [],
|
"language_codes": list(voice.language_codes) if voice.language_codes else [],
|
||||||
"gender": gender,
|
"gender": gender,
|
||||||
"ssml_gender": voice.ssml_gender.name if voice.ssml_gender else "NEUTRAL",
|
"ssml_gender": voice.ssml_gender.name if voice.ssml_gender else "NEUTRAL",
|
||||||
"natural_sample_rate_hertz": voice.natural_sample_rate_hertz
|
"natural_sample_rate_hertz": voice.natural_sample_rate_hertz,
|
||||||
|
"geminiTts": self._isGeminiTtsSpeakerVoiceName(voice.name or ""),
|
||||||
}
|
}
|
||||||
|
|
||||||
# Include any additional fields if available from Google API
|
# Include any additional fields if available from Google API
|
||||||
|
|
|
||||||
89
modules/connectors/connectorZhWfsParcels.py
Normal file
89
modules/connectors/connectorZhWfsParcels.py
Normal file
|
|
@ -0,0 +1,89 @@
|
||||||
|
"""
|
||||||
|
Swiss Parcel (Liegenschaften) Connector
|
||||||
|
|
||||||
|
Fetches parcel data from geodienste.ch OGC API Features (Amtliche Vermessung).
|
||||||
|
Covers all of Switzerland. Returns GeoJSON in WGS84.
|
||||||
|
|
||||||
|
Uses: geodienste.ch OGC API - RESF collection (Liegenschaften)
|
||||||
|
No config override needed - this is the single working solution.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from typing import Dict, Any
|
||||||
|
|
||||||
|
import requests
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# geodienste.ch OGC API - RESF = Liegenschaften (parcels), all Switzerland
|
||||||
|
# API returns WGS84 directly when bbox-crs=EPSG:2056 is used
|
||||||
|
_OGC_API_BASE = "https://www.geodienste.ch/db/av_0/deu/ogcapi/collections/RESF/items"
|
||||||
|
_MAX_ITEMS = 2000
|
||||||
|
_TIMEOUT = 30
|
||||||
|
|
||||||
|
|
||||||
|
class ZhWfsParcelsConnector:
|
||||||
|
"""
|
||||||
|
Connector for Swiss parcel (Liegenschaften) data via geodienste.ch OGC API.
|
||||||
|
Returns GeoJSON FeatureCollection in WGS84.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, timeout: int = _TIMEOUT):
|
||||||
|
self.timeout = timeout
|
||||||
|
logger.info("ZhWfsParcelsConnector initialized (geodienste.ch OGC API)")
|
||||||
|
|
||||||
|
def get_parcels_by_bbox(self, bbox: str) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Fetch parcels within bounding box.
|
||||||
|
Returns GeoJSON FeatureCollection in WGS84 (EPSG:4326).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
bbox: Bounding box as "minx,miny,maxx,maxy" in LV95 (EPSG:2056)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
GeoJSON FeatureCollection with geometries in WGS84
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
parts = [p.strip() for p in bbox.split(",")]
|
||||||
|
if len(parts) != 4:
|
||||||
|
raise ValueError(f"Invalid bbox: expected minx,miny,maxx,maxy, got {bbox}")
|
||||||
|
minx, miny, maxx, maxy = (float(p) for p in parts)
|
||||||
|
|
||||||
|
params = {
|
||||||
|
"f": "json",
|
||||||
|
"limit": _MAX_ITEMS,
|
||||||
|
"bbox": f"{minx},{miny},{maxx},{maxy}",
|
||||||
|
"bbox-crs": "http://www.opengis.net/def/crs/EPSG/0/2056",
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.debug(f"Requesting parcels: bbox={bbox}")
|
||||||
|
resp = requests.get(_OGC_API_BASE, params=params, timeout=self.timeout)
|
||||||
|
|
||||||
|
if resp.status_code != 200:
|
||||||
|
logger.error(f"Parcel API failed: status={resp.status_code}, body={resp.text[:500]}")
|
||||||
|
return {"type": "FeatureCollection", "features": []}
|
||||||
|
|
||||||
|
data = resp.json()
|
||||||
|
|
||||||
|
# OGC API returns FeatureCollection in WGS84 directly
|
||||||
|
features = data.get("features", [])
|
||||||
|
if not features:
|
||||||
|
return {"type": "FeatureCollection", "features": []}
|
||||||
|
|
||||||
|
# Pass through - geodienste returns WGS84 GeoJSON
|
||||||
|
result = {
|
||||||
|
"type": "FeatureCollection",
|
||||||
|
"features": features,
|
||||||
|
}
|
||||||
|
logger.info(f"Returned {len(features)} parcels in WGS84")
|
||||||
|
return result
|
||||||
|
|
||||||
|
except ValueError as e:
|
||||||
|
logger.warning(f"Invalid bbox: {e}")
|
||||||
|
raise
|
||||||
|
except requests.RequestException as e:
|
||||||
|
logger.error(f"Parcel API request error: {e}")
|
||||||
|
return {"type": "FeatureCollection", "features": []}
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error fetching parcels: {e}", exc_info=True)
|
||||||
|
return {"type": "FeatureCollection", "features": []}
|
||||||
7
modules/connectors/providerClickup/__init__.py
Normal file
7
modules/connectors/providerClickup/__init__.py
Normal file
|
|
@ -0,0 +1,7 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""ClickUp provider connector."""
|
||||||
|
|
||||||
|
from .connectorClickup import ClickupConnector
|
||||||
|
|
||||||
|
__all__ = ["ClickupConnector"]
|
||||||
286
modules/connectors/providerClickup/connectorClickup.py
Normal file
286
modules/connectors/providerClickup/connectorClickup.py
Normal file
|
|
@ -0,0 +1,286 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""ClickUp ProviderConnector — virtual paths for teams → lists → tasks (table rows).
|
||||||
|
|
||||||
|
Path convention (leading slash, no trailing slash except root):
|
||||||
|
/ — authorized workspaces (teams)
|
||||||
|
/team/{teamId} — spaces in the workspace
|
||||||
|
/team/{teamId}/space/{spaceId} — folders + folderless lists
|
||||||
|
/team/{teamId}/space/{spaceId}/folder/{folderId} — lists in folder
|
||||||
|
/team/{teamId}/list/{listId} — tasks in list (rows)
|
||||||
|
/team/{teamId}/list/{listId}/task/{taskId} — single task (download = JSON)
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import re
|
||||||
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
|
from modules.connectors.connectorProviderBase import (
|
||||||
|
ProviderConnector,
|
||||||
|
ServiceAdapter,
|
||||||
|
DownloadResult,
|
||||||
|
)
|
||||||
|
from modules.datamodels.datamodelDataSource import ExternalEntry
|
||||||
|
from modules.serviceCenter.services.serviceClickup.mainServiceClickup import ClickupService
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# type metadata for ExternalEntry.metadata["cuType"]
|
||||||
|
_CU_TEAM = "team"
|
||||||
|
_CU_SPACE = "space"
|
||||||
|
_CU_FOLDER = "folder"
|
||||||
|
_CU_LIST = "list"
|
||||||
|
_CU_TASK = "task"
|
||||||
|
|
||||||
|
|
||||||
|
def _norm(path: str) -> str:
|
||||||
|
p = (path or "").strip() or "/"
|
||||||
|
if not p.startswith("/"):
|
||||||
|
p = "/" + p
|
||||||
|
if p != "/" and p.endswith("/"):
|
||||||
|
p = p.rstrip("/")
|
||||||
|
return p
|
||||||
|
|
||||||
|
|
||||||
|
class ClickupListsAdapter(ServiceAdapter):
|
||||||
|
"""Maps ClickUp hierarchy + list tasks to browse/download/upload/search."""
|
||||||
|
|
||||||
|
def __init__(self, access_token: str):
|
||||||
|
self._token = access_token
|
||||||
|
# Minimal service instance for API calls (no ServiceCenter context)
|
||||||
|
self._svc = ClickupService(context=None, get_service=lambda _: None)
|
||||||
|
self._svc.setAccessToken(access_token)
|
||||||
|
|
||||||
|
async def browse(
|
||||||
|
self,
|
||||||
|
path: str,
|
||||||
|
filter: Optional[str] = None,
|
||||||
|
limit: Optional[int] = None,
|
||||||
|
) -> List[ExternalEntry]:
|
||||||
|
p = _norm(path)
|
||||||
|
out: List[ExternalEntry] = []
|
||||||
|
|
||||||
|
if p == "/":
|
||||||
|
data = await self._svc.getAuthorizedTeams()
|
||||||
|
if isinstance(data, dict) and data.get("error"):
|
||||||
|
logger.warning(f"ClickUp browse root: {data.get('error')}")
|
||||||
|
return []
|
||||||
|
teams = data.get("teams", []) if isinstance(data, dict) else []
|
||||||
|
for t in teams:
|
||||||
|
tid = str(t.get("id", ""))
|
||||||
|
name = t.get("name") or tid
|
||||||
|
out.append(
|
||||||
|
ExternalEntry(
|
||||||
|
name=name,
|
||||||
|
path=f"/team/{tid}",
|
||||||
|
isFolder=True,
|
||||||
|
metadata={"cuType": _CU_TEAM, "id": tid, "raw": t},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return out
|
||||||
|
|
||||||
|
m = re.match(r"^/team/([^/]+)$", p)
|
||||||
|
if m:
|
||||||
|
team_id = m.group(1)
|
||||||
|
data = await self._svc.getSpaces(team_id)
|
||||||
|
if isinstance(data, dict) and data.get("error"):
|
||||||
|
return []
|
||||||
|
spaces = data.get("spaces", []) if isinstance(data, dict) else []
|
||||||
|
for s in spaces:
|
||||||
|
sid = str(s.get("id", ""))
|
||||||
|
name = s.get("name") or sid
|
||||||
|
out.append(
|
||||||
|
ExternalEntry(
|
||||||
|
name=name,
|
||||||
|
path=f"/team/{team_id}/space/{sid}",
|
||||||
|
isFolder=True,
|
||||||
|
metadata={"cuType": _CU_SPACE, "id": sid, "raw": s},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return out
|
||||||
|
|
||||||
|
m = re.match(r"^/team/([^/]+)/space/([^/]+)$", p)
|
||||||
|
if m:
|
||||||
|
team_id, space_id = m.group(1), m.group(2)
|
||||||
|
folders_r = await self._svc.getFolders(space_id)
|
||||||
|
lists_r = await self._svc.getFolderlessLists(space_id)
|
||||||
|
if isinstance(folders_r, dict) and not folders_r.get("error"):
|
||||||
|
for f in folders_r.get("folders", []) or []:
|
||||||
|
fid = str(f.get("id", ""))
|
||||||
|
name = f.get("name") or fid
|
||||||
|
out.append(
|
||||||
|
ExternalEntry(
|
||||||
|
name=name,
|
||||||
|
path=f"/team/{team_id}/space/{space_id}/folder/{fid}",
|
||||||
|
isFolder=True,
|
||||||
|
metadata={"cuType": _CU_FOLDER, "id": fid, "raw": f},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if isinstance(lists_r, dict) and not lists_r.get("error"):
|
||||||
|
for lst in lists_r.get("lists", []) or []:
|
||||||
|
lid = str(lst.get("id", ""))
|
||||||
|
name = lst.get("name") or lid
|
||||||
|
out.append(
|
||||||
|
ExternalEntry(
|
||||||
|
name=name,
|
||||||
|
path=f"/team/{team_id}/list/{lid}",
|
||||||
|
isFolder=True,
|
||||||
|
metadata={"cuType": _CU_LIST, "id": lid, "raw": lst},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return out
|
||||||
|
|
||||||
|
m = re.match(r"^/team/([^/]+)/space/([^/]+)/folder/([^/]+)$", p)
|
||||||
|
if m:
|
||||||
|
team_id, _space_id, folder_id = m.group(1), m.group(2), m.group(3)
|
||||||
|
data = await self._svc.getListsInFolder(folder_id)
|
||||||
|
if isinstance(data, dict) and data.get("error"):
|
||||||
|
return []
|
||||||
|
for lst in data.get("lists", []) or []:
|
||||||
|
lid = str(lst.get("id", ""))
|
||||||
|
name = lst.get("name") or lid
|
||||||
|
out.append(
|
||||||
|
ExternalEntry(
|
||||||
|
name=name,
|
||||||
|
path=f"/team/{team_id}/list/{lid}",
|
||||||
|
isFolder=True,
|
||||||
|
metadata={"cuType": _CU_LIST, "id": lid, "raw": lst},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return out
|
||||||
|
|
||||||
|
m = re.match(r"^/team/([^/]+)/list/([^/]+)$", p)
|
||||||
|
if m:
|
||||||
|
team_id, list_id = m.group(1), m.group(2)
|
||||||
|
page = 0
|
||||||
|
while True:
|
||||||
|
data = await self._svc.getTasksInList(list_id, page=page)
|
||||||
|
if isinstance(data, dict) and data.get("error"):
|
||||||
|
break
|
||||||
|
tasks = data.get("tasks", []) if isinstance(data, dict) else []
|
||||||
|
for task in tasks:
|
||||||
|
tid = str(task.get("id", ""))
|
||||||
|
name = task.get("name") or tid
|
||||||
|
out.append(
|
||||||
|
ExternalEntry(
|
||||||
|
name=name,
|
||||||
|
path=f"/team/{team_id}/list/{list_id}/task/{tid}",
|
||||||
|
isFolder=False,
|
||||||
|
metadata={
|
||||||
|
"cuType": _CU_TASK,
|
||||||
|
"id": tid,
|
||||||
|
"task": task,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if len(tasks) < 100:
|
||||||
|
break
|
||||||
|
if limit is not None and len(out) >= int(limit):
|
||||||
|
break
|
||||||
|
page += 1
|
||||||
|
if limit is not None:
|
||||||
|
out = out[: max(1, int(limit))]
|
||||||
|
return out
|
||||||
|
|
||||||
|
m = re.match(r"^/team/([^/]+)/list/([^/]+)/task/([^/]+)$", p)
|
||||||
|
if m:
|
||||||
|
team_id, list_id, task_id = m.group(1), m.group(2), m.group(3)
|
||||||
|
out.append(
|
||||||
|
ExternalEntry(
|
||||||
|
name=f"task-{task_id}.json",
|
||||||
|
path=p,
|
||||||
|
isFolder=False,
|
||||||
|
metadata={"cuType": _CU_TASK, "id": task_id, "listId": list_id, "teamId": team_id},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return out
|
||||||
|
|
||||||
|
logger.warning(f"ClickUp browse: unsupported path {p}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
async def download(self, path: str) -> Any:
|
||||||
|
p = _norm(path)
|
||||||
|
m = re.match(r"^/team/([^/]+)/list/([^/]+)/task/([^/]+)$", p)
|
||||||
|
if not m:
|
||||||
|
return b""
|
||||||
|
task_id = m.group(3)
|
||||||
|
data = await self._svc.getTask(task_id)
|
||||||
|
if isinstance(data, dict) and data.get("error"):
|
||||||
|
return json.dumps(data).encode("utf-8")
|
||||||
|
payload = json.dumps(data, indent=2).encode("utf-8")
|
||||||
|
return DownloadResult(data=payload, fileName=f"task-{task_id}.json", mimeType="application/json")
|
||||||
|
|
||||||
|
async def upload(self, path: str, data: bytes, fileName: str) -> dict:
|
||||||
|
"""Upload attachment to a task. Path must be .../list/{listId}/task/{taskId}."""
|
||||||
|
p = _norm(path)
|
||||||
|
m = re.match(r"^/team/([^/]+)/list/([^/]+)/task/([^/]+)$", p)
|
||||||
|
if not m:
|
||||||
|
return {"error": "Path must be /team/{teamId}/list/{listId}/task/{taskId} for upload"}
|
||||||
|
task_id = m.group(3)
|
||||||
|
return await self._svc.uploadTaskAttachment(task_id, data, fileName)
|
||||||
|
|
||||||
|
async def search(
|
||||||
|
self,
|
||||||
|
query: str,
|
||||||
|
path: Optional[str] = None,
|
||||||
|
limit: Optional[int] = None,
|
||||||
|
) -> List[ExternalEntry]:
|
||||||
|
base = _norm(path or "/")
|
||||||
|
team_id: Optional[str] = None
|
||||||
|
mt = re.match(r"^/team/([^/]+)", base)
|
||||||
|
if mt:
|
||||||
|
team_id = mt.group(1)
|
||||||
|
if not team_id:
|
||||||
|
teams = await self._svc.getAuthorizedTeams()
|
||||||
|
if not isinstance(teams, dict) or teams.get("error"):
|
||||||
|
return []
|
||||||
|
tl = teams.get("teams") or []
|
||||||
|
if not tl:
|
||||||
|
return []
|
||||||
|
team_id = str(tl[0].get("id", ""))
|
||||||
|
|
||||||
|
out: List[ExternalEntry] = []
|
||||||
|
page = 0
|
||||||
|
while True:
|
||||||
|
data = await self._svc.searchTeamTasks(team_id, query=query, page=page)
|
||||||
|
if isinstance(data, dict) and data.get("error"):
|
||||||
|
break
|
||||||
|
tasks = data.get("tasks", []) if isinstance(data, dict) else []
|
||||||
|
for task in tasks:
|
||||||
|
tid = str(task.get("id", ""))
|
||||||
|
name = task.get("name") or tid
|
||||||
|
list_obj = task.get("list") or {}
|
||||||
|
lid = str(list_obj.get("id", "")) if list_obj else ""
|
||||||
|
if not lid:
|
||||||
|
continue
|
||||||
|
out.append(
|
||||||
|
ExternalEntry(
|
||||||
|
name=name,
|
||||||
|
path=f"/team/{team_id}/list/{lid}/task/{tid}",
|
||||||
|
isFolder=False,
|
||||||
|
metadata={"cuType": _CU_TASK, "id": tid, "task": task},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if len(tasks) < 25:
|
||||||
|
break
|
||||||
|
if limit is not None and len(out) >= int(limit):
|
||||||
|
break
|
||||||
|
page += 1
|
||||||
|
if limit is not None:
|
||||||
|
out = out[: max(1, int(limit))]
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
class ClickupConnector(ProviderConnector):
|
||||||
|
"""One ClickUp connection → clickup virtual file service."""
|
||||||
|
|
||||||
|
def getAvailableServices(self) -> List[str]:
|
||||||
|
return ["clickup"]
|
||||||
|
|
||||||
|
def getServiceAdapter(self, service: str) -> ServiceAdapter:
|
||||||
|
if service != "clickup":
|
||||||
|
raise ValueError(f"ClickUp only supports 'clickup' service, got '{service}'")
|
||||||
|
return ClickupListsAdapter(self.accessToken)
|
||||||
3
modules/connectors/providerFtp/__init__.py
Normal file
3
modules/connectors/providerFtp/__init__.py
Normal file
|
|
@ -0,0 +1,3 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""FTP/SFTP Provider Connector stub."""
|
||||||
58
modules/connectors/providerFtp/connectorFtp.py
Normal file
58
modules/connectors/providerFtp/connectorFtp.py
Normal file
|
|
@ -0,0 +1,58 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""FTP/SFTP ProviderConnector stub.
|
||||||
|
|
||||||
|
Implements the ProviderConnector interface for FTP/SFTP file access.
|
||||||
|
Full implementation follows when FTP integration is prioritized.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from typing import List, Optional
|
||||||
|
|
||||||
|
from modules.connectors.connectorProviderBase import ProviderConnector, ServiceAdapter
|
||||||
|
from modules.datamodels.datamodelDataSource import ExternalEntry
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class FtpFilesAdapter(ServiceAdapter):
|
||||||
|
"""FTP files ServiceAdapter (stub)."""
|
||||||
|
|
||||||
|
def __init__(self, accessToken: str):
|
||||||
|
self._accessToken = accessToken
|
||||||
|
|
||||||
|
async def browse(
|
||||||
|
self,
|
||||||
|
path: str,
|
||||||
|
filter: Optional[str] = None,
|
||||||
|
limit: Optional[int] = None,
|
||||||
|
) -> List[ExternalEntry]:
|
||||||
|
logger.info(f"FTP browse stub: {path}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
async def download(self, path: str) -> bytes:
|
||||||
|
logger.info(f"FTP download stub: {path}")
|
||||||
|
return b""
|
||||||
|
|
||||||
|
async def upload(self, path: str, data: bytes, fileName: str) -> dict:
|
||||||
|
return {"error": "FTP upload not yet implemented"}
|
||||||
|
|
||||||
|
async def search(
|
||||||
|
self,
|
||||||
|
query: str,
|
||||||
|
path: Optional[str] = None,
|
||||||
|
limit: Optional[int] = None,
|
||||||
|
) -> List[ExternalEntry]:
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
class FtpConnector(ProviderConnector):
|
||||||
|
"""FTP ProviderConnector -- 1 connection -> files."""
|
||||||
|
|
||||||
|
def getAvailableServices(self) -> List[str]:
|
||||||
|
return ["files"]
|
||||||
|
|
||||||
|
def getServiceAdapter(self, service: str) -> ServiceAdapter:
|
||||||
|
if service != "files":
|
||||||
|
raise ValueError(f"FTP only supports 'files' service, got '{service}'")
|
||||||
|
return FtpFilesAdapter(self.accessToken)
|
||||||
3
modules/connectors/providerGoogle/__init__.py
Normal file
3
modules/connectors/providerGoogle/__init__.py
Normal file
|
|
@ -0,0 +1,3 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""Google Provider Connector -- 1 Connection : n Services (Drive, Gmail)."""
|
||||||
292
modules/connectors/providerGoogle/connectorGoogle.py
Normal file
292
modules/connectors/providerGoogle/connectorGoogle.py
Normal file
|
|
@ -0,0 +1,292 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""Google ProviderConnector -- Drive and Gmail via Google OAuth."""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
|
import aiohttp
|
||||||
|
|
||||||
|
from modules.connectors.connectorProviderBase import ProviderConnector, ServiceAdapter, DownloadResult
|
||||||
|
from modules.datamodels.datamodelDataSource import ExternalEntry
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
_DRIVE_BASE = "https://www.googleapis.com/drive/v3"
|
||||||
|
_GMAIL_BASE = "https://gmail.googleapis.com/gmail/v1"
|
||||||
|
|
||||||
|
|
||||||
|
async def _googleGet(token: str, url: str) -> Dict[str, Any]:
|
||||||
|
headers = {"Authorization": f"Bearer {token}"}
|
||||||
|
timeout = aiohttp.ClientTimeout(total=20)
|
||||||
|
try:
|
||||||
|
async with aiohttp.ClientSession(timeout=timeout) as session:
|
||||||
|
async with session.get(url, headers=headers) as resp:
|
||||||
|
if resp.status in (200, 201):
|
||||||
|
return await resp.json()
|
||||||
|
errorText = await resp.text()
|
||||||
|
logger.warning(f"Google API {resp.status}: {errorText[:300]}")
|
||||||
|
return {"error": f"{resp.status}: {errorText[:200]}"}
|
||||||
|
except Exception as e:
|
||||||
|
return {"error": str(e)}
|
||||||
|
|
||||||
|
|
||||||
|
class DriveAdapter(ServiceAdapter):
|
||||||
|
"""Google Drive ServiceAdapter -- browse files and folders."""
|
||||||
|
|
||||||
|
def __init__(self, accessToken: str):
|
||||||
|
self._token = accessToken
|
||||||
|
|
||||||
|
async def browse(
|
||||||
|
self,
|
||||||
|
path: str,
|
||||||
|
filter: Optional[str] = None,
|
||||||
|
limit: Optional[int] = None,
|
||||||
|
) -> List[ExternalEntry]:
|
||||||
|
folderId = (path or "").strip("/") or "root"
|
||||||
|
query = f"'{folderId}' in parents and trashed=false"
|
||||||
|
fields = "files(id,name,mimeType,size,modifiedTime,parents)"
|
||||||
|
pageSize = max(1, min(int(limit or 100), 1000))
|
||||||
|
url = f"{_DRIVE_BASE}/files?q={query}&fields={fields}&pageSize={pageSize}&orderBy=folder,name"
|
||||||
|
|
||||||
|
result = await _googleGet(self._token, url)
|
||||||
|
if "error" in result:
|
||||||
|
logger.warning(f"Google Drive browse failed: {result['error']}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
entries = []
|
||||||
|
for f in result.get("files", []):
|
||||||
|
isFolder = f.get("mimeType") == "application/vnd.google-apps.folder"
|
||||||
|
entries.append(ExternalEntry(
|
||||||
|
name=f.get("name", ""),
|
||||||
|
path=f"/{f.get('id', '')}",
|
||||||
|
isFolder=isFolder,
|
||||||
|
size=int(f.get("size", 0)) if f.get("size") else None,
|
||||||
|
mimeType=f.get("mimeType") if not isFolder else None,
|
||||||
|
metadata={"id": f.get("id"), "modifiedTime": f.get("modifiedTime")},
|
||||||
|
))
|
||||||
|
return entries
|
||||||
|
|
||||||
|
_EXPORT_MIME_MAP = {
|
||||||
|
"application/vnd.google-apps.document": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
||||||
|
"application/vnd.google-apps.spreadsheet": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
|
||||||
|
"application/vnd.google-apps.presentation": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
|
||||||
|
"application/vnd.google-apps.drawing": "application/pdf",
|
||||||
|
}
|
||||||
|
|
||||||
|
async def download(self, path: str) -> bytes:
|
||||||
|
fileId = (path or "").strip("/")
|
||||||
|
if not fileId:
|
||||||
|
return b""
|
||||||
|
headers = {"Authorization": f"Bearer {self._token}"}
|
||||||
|
timeout = aiohttp.ClientTimeout(total=60)
|
||||||
|
try:
|
||||||
|
async with aiohttp.ClientSession(timeout=timeout) as session:
|
||||||
|
# Try direct download first
|
||||||
|
url = f"{_DRIVE_BASE}/files/{fileId}?alt=media"
|
||||||
|
async with session.get(url, headers=headers) as resp:
|
||||||
|
if resp.status == 200:
|
||||||
|
return await resp.read()
|
||||||
|
logger.debug(f"Google Drive direct download returned {resp.status} for {fileId}")
|
||||||
|
|
||||||
|
# If 403/404, check if it's a native Google file that needs export
|
||||||
|
metaUrl = f"{_DRIVE_BASE}/files/{fileId}?fields=mimeType,name"
|
||||||
|
async with session.get(metaUrl, headers=headers) as metaResp:
|
||||||
|
if metaResp.status != 200:
|
||||||
|
logger.warning(f"Google Drive metadata fetch failed ({metaResp.status}) for {fileId}")
|
||||||
|
return b""
|
||||||
|
meta = await metaResp.json()
|
||||||
|
fileMime = meta.get("mimeType", "")
|
||||||
|
fileName = meta.get("name", fileId)
|
||||||
|
|
||||||
|
exportMime = self._EXPORT_MIME_MAP.get(fileMime)
|
||||||
|
if not exportMime:
|
||||||
|
logger.warning(f"Google Drive: unsupported mimeType '{fileMime}' for file '{fileName}' ({fileId})")
|
||||||
|
return b""
|
||||||
|
|
||||||
|
exportUrl = f"{_DRIVE_BASE}/files/{fileId}/export?mimeType={exportMime}"
|
||||||
|
logger.info(f"Google Drive: exporting '{fileName}' as {exportMime}")
|
||||||
|
async with session.get(exportUrl, headers=headers) as exportResp:
|
||||||
|
if exportResp.status == 200:
|
||||||
|
return await exportResp.read()
|
||||||
|
logger.warning(f"Google Drive export failed ({exportResp.status}) for '{fileName}'")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Google Drive download failed for {fileId}: {e}")
|
||||||
|
return b""
|
||||||
|
|
||||||
|
async def upload(self, path: str, data: bytes, fileName: str) -> dict:
|
||||||
|
return {"error": "Google Drive upload not yet implemented"}
|
||||||
|
|
||||||
|
async def search(
|
||||||
|
self,
|
||||||
|
query: str,
|
||||||
|
path: Optional[str] = None,
|
||||||
|
limit: Optional[int] = None,
|
||||||
|
) -> List[ExternalEntry]:
|
||||||
|
safeQuery = query.replace("'", "\\'")
|
||||||
|
folderId = (path or "").strip("/")
|
||||||
|
qParts = [f"name contains '{safeQuery}'", "trashed=false"]
|
||||||
|
if folderId:
|
||||||
|
qParts.append(f"'{folderId}' in parents")
|
||||||
|
qStr = " and ".join(qParts)
|
||||||
|
pageSize = max(1, min(int(limit or 100), 1000))
|
||||||
|
url = f"{_DRIVE_BASE}/files?q={qStr}&fields=files(id,name,mimeType,size)&pageSize={pageSize}"
|
||||||
|
logger.debug(f"Google Drive search: q={qStr}")
|
||||||
|
result = await _googleGet(self._token, url)
|
||||||
|
if "error" in result:
|
||||||
|
return []
|
||||||
|
return [
|
||||||
|
ExternalEntry(
|
||||||
|
name=f.get("name", ""),
|
||||||
|
path=f"/{f.get('id', '')}",
|
||||||
|
isFolder=f.get("mimeType") == "application/vnd.google-apps.folder",
|
||||||
|
size=int(f.get("size", 0)) if f.get("size") else None,
|
||||||
|
)
|
||||||
|
for f in result.get("files", [])
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
class GmailAdapter(ServiceAdapter):
|
||||||
|
"""Gmail ServiceAdapter -- browse labels and messages."""
|
||||||
|
|
||||||
|
def __init__(self, accessToken: str):
|
||||||
|
self._token = accessToken
|
||||||
|
|
||||||
|
_DEFAULT_MESSAGE_LIMIT = 100
|
||||||
|
_MAX_MESSAGE_LIMIT = 500
|
||||||
|
|
||||||
|
async def browse(
|
||||||
|
self,
|
||||||
|
path: str,
|
||||||
|
filter: Optional[str] = None,
|
||||||
|
limit: Optional[int] = None,
|
||||||
|
) -> list:
|
||||||
|
cleanPath = (path or "").strip("/")
|
||||||
|
|
||||||
|
if not cleanPath:
|
||||||
|
url = f"{_GMAIL_BASE}/users/me/labels"
|
||||||
|
result = await _googleGet(self._token, url)
|
||||||
|
if "error" in result:
|
||||||
|
logger.warning(f"Gmail labels failed: {result['error']}")
|
||||||
|
return []
|
||||||
|
_SYSTEM_LABELS = {"INBOX", "SENT", "DRAFT", "TRASH", "SPAM", "STARRED", "IMPORTANT"}
|
||||||
|
labels = []
|
||||||
|
for lbl in result.get("labels", []):
|
||||||
|
labelId = lbl.get("id", "")
|
||||||
|
labelName = lbl.get("name", labelId)
|
||||||
|
if lbl.get("type") == "system" and labelId not in _SYSTEM_LABELS:
|
||||||
|
continue
|
||||||
|
labels.append(ExternalEntry(
|
||||||
|
name=labelName,
|
||||||
|
path=f"/{labelId}",
|
||||||
|
isFolder=True,
|
||||||
|
metadata={"id": labelId, "type": lbl.get("type", "")},
|
||||||
|
))
|
||||||
|
labels.sort(key=lambda e: (0 if e.metadata.get("type") == "system" else 1, e.name))
|
||||||
|
return labels
|
||||||
|
|
||||||
|
effectiveLimit = self._DEFAULT_MESSAGE_LIMIT if limit is None else max(1, min(int(limit), self._MAX_MESSAGE_LIMIT))
|
||||||
|
url = f"{_GMAIL_BASE}/users/me/messages?labelIds={cleanPath}&maxResults={effectiveLimit}"
|
||||||
|
result = await _googleGet(self._token, url)
|
||||||
|
if "error" in result:
|
||||||
|
return []
|
||||||
|
|
||||||
|
entries = []
|
||||||
|
for msg in result.get("messages", [])[:effectiveLimit]:
|
||||||
|
msgId = msg.get("id", "")
|
||||||
|
detailUrl = f"{_GMAIL_BASE}/users/me/messages/{msgId}?format=metadata&metadataHeaders=Subject&metadataHeaders=From&metadataHeaders=Date"
|
||||||
|
detail = await _googleGet(self._token, detailUrl)
|
||||||
|
if "error" in detail:
|
||||||
|
entries.append(ExternalEntry(name=f"Message {msgId}", path=f"/{cleanPath}/{msgId}", isFolder=False))
|
||||||
|
continue
|
||||||
|
headers = {h.get("name", ""): h.get("value", "") for h in detail.get("payload", {}).get("headers", [])}
|
||||||
|
entries.append(ExternalEntry(
|
||||||
|
name=headers.get("Subject", "(no subject)"),
|
||||||
|
path=f"/{cleanPath}/{msgId}",
|
||||||
|
isFolder=False,
|
||||||
|
metadata={
|
||||||
|
"id": msgId,
|
||||||
|
"from": headers.get("From", ""),
|
||||||
|
"date": headers.get("Date", ""),
|
||||||
|
"snippet": detail.get("snippet", ""),
|
||||||
|
},
|
||||||
|
))
|
||||||
|
return entries
|
||||||
|
|
||||||
|
async def download(self, path: str) -> DownloadResult:
|
||||||
|
"""Download a Gmail message as RFC 822 EML via format=raw."""
|
||||||
|
import base64
|
||||||
|
import re
|
||||||
|
cleanPath = (path or "").strip("/")
|
||||||
|
msgId = cleanPath.split("/")[-1] if cleanPath else ""
|
||||||
|
if not msgId:
|
||||||
|
return DownloadResult()
|
||||||
|
|
||||||
|
url = f"{_GMAIL_BASE}/users/me/messages/{msgId}?format=raw"
|
||||||
|
result = await _googleGet(self._token, url)
|
||||||
|
if "error" in result:
|
||||||
|
return DownloadResult()
|
||||||
|
|
||||||
|
rawB64 = result.get("raw", "")
|
||||||
|
if not rawB64:
|
||||||
|
return DownloadResult()
|
||||||
|
|
||||||
|
emlBytes = base64.urlsafe_b64decode(rawB64)
|
||||||
|
|
||||||
|
metaUrl = f"{_GMAIL_BASE}/users/me/messages/{msgId}?format=metadata&metadataHeaders=Subject"
|
||||||
|
meta = await _googleGet(self._token, metaUrl)
|
||||||
|
subject = msgId
|
||||||
|
if "error" not in meta:
|
||||||
|
for h in meta.get("payload", {}).get("headers", []):
|
||||||
|
if h.get("name", "").lower() == "subject":
|
||||||
|
subject = h.get("value", msgId)
|
||||||
|
break
|
||||||
|
safeName = re.sub(r'[<>:"/\\|?*\x00-\x1f]', "_", subject)[:80].strip(". ") or "email"
|
||||||
|
|
||||||
|
return DownloadResult(
|
||||||
|
data=emlBytes,
|
||||||
|
fileName=f"{safeName}.eml",
|
||||||
|
mimeType="message/rfc822",
|
||||||
|
)
|
||||||
|
|
||||||
|
async def upload(self, path: str, data: bytes, fileName: str) -> dict:
|
||||||
|
return {"error": "Gmail upload not applicable"}
|
||||||
|
|
||||||
|
async def search(
|
||||||
|
self,
|
||||||
|
query: str,
|
||||||
|
path: Optional[str] = None,
|
||||||
|
limit: Optional[int] = None,
|
||||||
|
) -> list:
|
||||||
|
effectiveLimit = self._DEFAULT_MESSAGE_LIMIT if limit is None else max(1, min(int(limit), self._MAX_MESSAGE_LIMIT))
|
||||||
|
url = f"{_GMAIL_BASE}/users/me/messages?q={query}&maxResults={effectiveLimit}"
|
||||||
|
result = await _googleGet(self._token, url)
|
||||||
|
if "error" in result:
|
||||||
|
return []
|
||||||
|
return [
|
||||||
|
ExternalEntry(
|
||||||
|
name=f"Message {m.get('id', '')}",
|
||||||
|
path=f"/{m.get('id', '')}",
|
||||||
|
isFolder=False,
|
||||||
|
metadata={"id": m.get("id")},
|
||||||
|
)
|
||||||
|
for m in result.get("messages", [])
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
class GoogleConnector(ProviderConnector):
|
||||||
|
"""Google ProviderConnector -- 1 connection -> Drive + Gmail."""
|
||||||
|
|
||||||
|
_SERVICE_MAP = {
|
||||||
|
"drive": DriveAdapter,
|
||||||
|
"gmail": GmailAdapter,
|
||||||
|
}
|
||||||
|
|
||||||
|
def getAvailableServices(self) -> List[str]:
|
||||||
|
return list(self._SERVICE_MAP.keys())
|
||||||
|
|
||||||
|
def getServiceAdapter(self, service: str) -> ServiceAdapter:
|
||||||
|
adapterClass = self._SERVICE_MAP.get(service)
|
||||||
|
if not adapterClass:
|
||||||
|
raise ValueError(f"Unknown Google service: {service}. Available: {list(self._SERVICE_MAP.keys())}")
|
||||||
|
return adapterClass(self.accessToken)
|
||||||
3
modules/connectors/providerMsft/__init__.py
Normal file
3
modules/connectors/providerMsft/__init__.py
Normal file
|
|
@ -0,0 +1,3 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""Microsoft Provider Connector -- 1 Connection : n Services (SharePoint, Outlook, Teams, OneDrive)."""
|
||||||
893
modules/connectors/providerMsft/connectorMsft.py
Normal file
893
modules/connectors/providerMsft/connectorMsft.py
Normal file
|
|
@ -0,0 +1,893 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""Microsoft ProviderConnector -- one MSFT connection serves SharePoint, Outlook, Teams, OneDrive.
|
||||||
|
|
||||||
|
All ServiceAdapters share the same OAuth access token obtained from the
|
||||||
|
UserConnection (authority=msft).
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import aiohttp
|
||||||
|
import asyncio
|
||||||
|
from typing import Dict, Any, List, Optional
|
||||||
|
|
||||||
|
from modules.connectors.connectorProviderBase import ProviderConnector, ServiceAdapter, DownloadResult
|
||||||
|
from modules.datamodels.datamodelDataSource import ExternalEntry
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
_GRAPH_BASE = "https://graph.microsoft.com/v1.0"
|
||||||
|
|
||||||
|
|
||||||
|
class _GraphApiMixin:
|
||||||
|
"""Shared Graph API call logic for all MSFT service adapters."""
|
||||||
|
|
||||||
|
def __init__(self, accessToken: str):
|
||||||
|
self._accessToken = accessToken
|
||||||
|
|
||||||
|
async def _graphGet(self, endpoint: str) -> Dict[str, Any]:
|
||||||
|
return await _makeGraphCall(self._accessToken, endpoint, "GET")
|
||||||
|
|
||||||
|
async def _graphPost(self, endpoint: str, data: Any = None) -> Dict[str, Any]:
|
||||||
|
return await _makeGraphCall(self._accessToken, endpoint, "POST", data)
|
||||||
|
|
||||||
|
async def _graphPut(self, endpoint: str, data: bytes = None) -> Dict[str, Any]:
|
||||||
|
return await _makeGraphCall(self._accessToken, endpoint, "PUT", data)
|
||||||
|
|
||||||
|
async def _graphPatch(self, endpoint: str, data: Any = None) -> Dict[str, Any]:
|
||||||
|
return await _makeGraphCall(self._accessToken, endpoint, "PATCH", data)
|
||||||
|
|
||||||
|
async def _graphDelete(self, endpoint: str) -> Dict[str, Any]:
|
||||||
|
return await _makeGraphCall(self._accessToken, endpoint, "DELETE")
|
||||||
|
|
||||||
|
async def _graphDownload(self, endpoint: str) -> Optional[bytes]:
|
||||||
|
"""Download binary content from Graph API."""
|
||||||
|
headers = {"Authorization": f"Bearer {self._accessToken}"}
|
||||||
|
timeout = aiohttp.ClientTimeout(total=60)
|
||||||
|
url = f"{_GRAPH_BASE}/{endpoint.lstrip('/')}"
|
||||||
|
try:
|
||||||
|
async with aiohttp.ClientSession(timeout=timeout) as session:
|
||||||
|
async with session.get(url, headers=headers) as resp:
|
||||||
|
if resp.status == 200:
|
||||||
|
return await resp.read()
|
||||||
|
logger.error(f"Download failed {resp.status}: {await resp.text()}")
|
||||||
|
return None
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Graph download error: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
async def _makeGraphCall(
|
||||||
|
token: str, endpoint: str, method: str = "GET", data: Any = None
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""Execute a single Microsoft Graph API call."""
|
||||||
|
url = f"{_GRAPH_BASE}/{endpoint.lstrip('/')}"
|
||||||
|
contentType = "application/json"
|
||||||
|
if method == "PUT" and isinstance(data, bytes):
|
||||||
|
contentType = "application/octet-stream"
|
||||||
|
headers = {
|
||||||
|
"Authorization": f"Bearer {token}",
|
||||||
|
"Content-Type": contentType,
|
||||||
|
}
|
||||||
|
timeout = aiohttp.ClientTimeout(total=30)
|
||||||
|
try:
|
||||||
|
async with aiohttp.ClientSession(timeout=timeout) as session:
|
||||||
|
kwargs: Dict[str, Any] = {"headers": headers}
|
||||||
|
if data is not None:
|
||||||
|
kwargs["data"] = data
|
||||||
|
|
||||||
|
if method == "GET":
|
||||||
|
async with session.get(url, **kwargs) as resp:
|
||||||
|
return await _handleResponse(resp)
|
||||||
|
elif method == "POST":
|
||||||
|
async with session.post(url, **kwargs) as resp:
|
||||||
|
return await _handleResponse(resp)
|
||||||
|
elif method == "PUT":
|
||||||
|
async with session.put(url, **kwargs) as resp:
|
||||||
|
return await _handleResponse(resp)
|
||||||
|
elif method == "PATCH":
|
||||||
|
async with session.patch(url, **kwargs) as resp:
|
||||||
|
return await _handleResponse(resp)
|
||||||
|
elif method == "DELETE":
|
||||||
|
async with session.delete(url, **kwargs) as resp:
|
||||||
|
if resp.status in (200, 204):
|
||||||
|
return {}
|
||||||
|
return await _handleResponse(resp)
|
||||||
|
|
||||||
|
except asyncio.TimeoutError:
|
||||||
|
return {"error": f"Graph API timeout: {endpoint}"}
|
||||||
|
except Exception as e:
|
||||||
|
return {"error": f"Graph API error: {e}"}
|
||||||
|
|
||||||
|
return {"error": f"Unsupported method: {method}"}
|
||||||
|
|
||||||
|
|
||||||
|
async def _handleResponse(resp: aiohttp.ClientResponse) -> Dict[str, Any]:
|
||||||
|
if resp.status in (200, 201):
|
||||||
|
return await resp.json()
|
||||||
|
if resp.status == 202:
|
||||||
|
return {"accepted": True}
|
||||||
|
if resp.status == 204:
|
||||||
|
return {}
|
||||||
|
errorText = await resp.text()
|
||||||
|
logger.error(f"Graph API {resp.status}: {errorText}")
|
||||||
|
return {"error": f"{resp.status}: {errorText}"}
|
||||||
|
|
||||||
|
|
||||||
|
def _stripGraphBase(url: str) -> str:
|
||||||
|
"""Convert an absolute Graph URL (used by @odata.nextLink) into the
|
||||||
|
relative endpoint that ``_makeGraphCall`` expects."""
|
||||||
|
if not url:
|
||||||
|
return ""
|
||||||
|
if url.startswith(_GRAPH_BASE):
|
||||||
|
return url[len(_GRAPH_BASE):].lstrip("/")
|
||||||
|
return url
|
||||||
|
|
||||||
|
|
||||||
|
def _graphItemToExternalEntry(item: Dict[str, Any], basePath: str = "") -> ExternalEntry:
|
||||||
|
isFolder = "folder" in item
|
||||||
|
return ExternalEntry(
|
||||||
|
name=item.get("name", ""),
|
||||||
|
path=f"{basePath}/{item.get('name', '')}" if basePath else item.get("name", ""),
|
||||||
|
isFolder=isFolder,
|
||||||
|
size=item.get("size"),
|
||||||
|
mimeType=item.get("file", {}).get("mimeType") if not isFolder else None,
|
||||||
|
lastModified=None,
|
||||||
|
metadata={
|
||||||
|
"id": item.get("id"),
|
||||||
|
"webUrl": item.get("webUrl"),
|
||||||
|
"childCount": item.get("folder", {}).get("childCount") if isFolder else None,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# SharePoint Adapter
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
class SharepointAdapter(_GraphApiMixin, ServiceAdapter):
|
||||||
|
"""ServiceAdapter for SharePoint (files, sites) via Microsoft Graph."""
|
||||||
|
|
||||||
|
async def browse(
|
||||||
|
self,
|
||||||
|
path: str,
|
||||||
|
filter: Optional[str] = None,
|
||||||
|
limit: Optional[int] = None,
|
||||||
|
) -> List[ExternalEntry]:
|
||||||
|
"""List items in a SharePoint folder.
|
||||||
|
|
||||||
|
Path format: /sites/<SiteName>/<FolderPath>
|
||||||
|
Root "/" lists available sites via discovery.
|
||||||
|
"""
|
||||||
|
if not path or path == "/":
|
||||||
|
return await self._discoverSites()
|
||||||
|
|
||||||
|
siteId, folderPath = _parseSharepointPath(path)
|
||||||
|
if not siteId:
|
||||||
|
return await self._discoverSites()
|
||||||
|
|
||||||
|
if not folderPath or folderPath == "/":
|
||||||
|
endpoint = f"sites/{siteId}/drive/root/children"
|
||||||
|
else:
|
||||||
|
cleanPath = folderPath.lstrip("/")
|
||||||
|
endpoint = f"sites/{siteId}/drive/root:/{cleanPath}:/children"
|
||||||
|
|
||||||
|
result = await self._graphGet(endpoint)
|
||||||
|
if "error" in result:
|
||||||
|
logger.warning(f"SharePoint browse failed: {result['error']}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
entries = [_graphItemToExternalEntry(item, path) for item in result.get("value", [])]
|
||||||
|
if filter:
|
||||||
|
entries = [e for e in entries if _matchFilter(e, filter)]
|
||||||
|
if limit is not None:
|
||||||
|
entries = entries[: max(1, int(limit))]
|
||||||
|
return entries
|
||||||
|
|
||||||
|
async def _discoverSites(self) -> List[ExternalEntry]:
|
||||||
|
"""Discover accessible SharePoint sites."""
|
||||||
|
result = await self._graphGet("sites?search=*&$top=50")
|
||||||
|
if "error" in result:
|
||||||
|
logger.warning(f"SharePoint site discovery failed: {result['error']}")
|
||||||
|
return []
|
||||||
|
return [
|
||||||
|
ExternalEntry(
|
||||||
|
name=s.get("displayName") or s.get("name", ""),
|
||||||
|
path=f"/sites/{s.get('id', '')}",
|
||||||
|
isFolder=True,
|
||||||
|
metadata={
|
||||||
|
"id": s.get("id"),
|
||||||
|
"webUrl": s.get("webUrl"),
|
||||||
|
"description": s.get("description", ""),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
for s in result.get("value", [])
|
||||||
|
if s.get("displayName")
|
||||||
|
]
|
||||||
|
|
||||||
|
async def download(self, path: str) -> bytes:
|
||||||
|
siteId, filePath = _parseSharepointPath(path)
|
||||||
|
if not siteId or not filePath:
|
||||||
|
return b""
|
||||||
|
cleanPath = filePath.strip("/")
|
||||||
|
endpoint = f"sites/{siteId}/drive/root:/{cleanPath}:/content"
|
||||||
|
data = await self._graphDownload(endpoint)
|
||||||
|
return data or b""
|
||||||
|
|
||||||
|
async def upload(self, path: str, data: bytes, fileName: str) -> dict:
|
||||||
|
siteId, folderPath = _parseSharepointPath(path)
|
||||||
|
if not siteId:
|
||||||
|
return {"error": "Invalid SharePoint path"}
|
||||||
|
cleanFolder = (folderPath or "").strip("/")
|
||||||
|
uploadPath = f"{cleanFolder}/{fileName}" if cleanFolder else fileName
|
||||||
|
endpoint = f"sites/{siteId}/drive/root:/{uploadPath}:/content"
|
||||||
|
result = await self._graphPut(endpoint, data)
|
||||||
|
return result
|
||||||
|
|
||||||
|
async def search(
|
||||||
|
self,
|
||||||
|
query: str,
|
||||||
|
path: Optional[str] = None,
|
||||||
|
limit: Optional[int] = None,
|
||||||
|
) -> List[ExternalEntry]:
|
||||||
|
siteId, _ = _parseSharepointPath(path or "")
|
||||||
|
if not siteId:
|
||||||
|
return []
|
||||||
|
safeQuery = query.replace("'", "''")
|
||||||
|
endpoint = f"sites/{siteId}/drive/root/search(q='{safeQuery}')"
|
||||||
|
result = await self._graphGet(endpoint)
|
||||||
|
if "error" in result:
|
||||||
|
return []
|
||||||
|
entries = [_graphItemToExternalEntry(item) for item in result.get("value", [])]
|
||||||
|
if limit is not None:
|
||||||
|
entries = entries[: max(1, int(limit))]
|
||||||
|
return entries
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Outlook Adapter
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
class OutlookAdapter(_GraphApiMixin, ServiceAdapter):
|
||||||
|
"""ServiceAdapter for Outlook (mail, calendar) via Microsoft Graph."""
|
||||||
|
|
||||||
|
# Default upper bound for messages returned from a single browse() call.
|
||||||
|
# Graph allows $top up to 1000 per page; we keep the default modest so
|
||||||
|
# accidental "browse all" calls don't blow up the LLM context. Callers
|
||||||
|
# (e.g. the agent's browseDataSource tool) can override via ``limit``.
|
||||||
|
_DEFAULT_MESSAGE_LIMIT = 100
|
||||||
|
_MAX_MESSAGE_LIMIT = 1000
|
||||||
|
_PAGE_SIZE = 100
|
||||||
|
|
||||||
|
async def browse(
|
||||||
|
self,
|
||||||
|
path: str,
|
||||||
|
filter: Optional[str] = None,
|
||||||
|
limit: Optional[int] = None,
|
||||||
|
) -> List[ExternalEntry]:
|
||||||
|
"""List mail folders or messages.
|
||||||
|
|
||||||
|
path = "" or "/" → list ALL top-level mail folders (paginated)
|
||||||
|
path = "/<folderId>" → list messages in that folder (paginated, up to ``limit``)
|
||||||
|
"""
|
||||||
|
if not path or path == "/":
|
||||||
|
# Graph default page size for /me/mailFolders is 10. Mailboxes with
|
||||||
|
# localized + many system folders (Posteingang, Gesendet, Archiv, …)
|
||||||
|
# often exceed that, so the well-known Inbox can fall off the first
|
||||||
|
# page. We page through all results AND hard-fall-back to the
|
||||||
|
# well-known shortcut /me/mailFolders/inbox so the default folder
|
||||||
|
# is always visible regardless of locale/order.
|
||||||
|
folders: List[Dict[str, Any]] = []
|
||||||
|
seenIds: set = set()
|
||||||
|
endpoint: Optional[str] = "me/mailFolders?$top=100"
|
||||||
|
while endpoint:
|
||||||
|
result = await self._graphGet(endpoint)
|
||||||
|
if "error" in result:
|
||||||
|
break
|
||||||
|
for f in result.get("value", []):
|
||||||
|
fid = f.get("id")
|
||||||
|
if fid and fid not in seenIds:
|
||||||
|
seenIds.add(fid)
|
||||||
|
folders.append(f)
|
||||||
|
nextLink = result.get("@odata.nextLink")
|
||||||
|
if not nextLink:
|
||||||
|
endpoint = None
|
||||||
|
else:
|
||||||
|
endpoint = _stripGraphBase(nextLink)
|
||||||
|
|
||||||
|
# Guarantee Inbox is present (well-known name, locale-independent)
|
||||||
|
if not any((f.get("displayName") or "").lower() in ("inbox", "posteingang") for f in folders):
|
||||||
|
inbox = await self._graphGet("me/mailFolders/inbox")
|
||||||
|
if "error" not in inbox and inbox.get("id") and inbox.get("id") not in seenIds:
|
||||||
|
folders.insert(0, inbox)
|
||||||
|
|
||||||
|
return [
|
||||||
|
ExternalEntry(
|
||||||
|
name=f.get("displayName", ""),
|
||||||
|
path=f"/{f.get('id', '')}",
|
||||||
|
isFolder=True,
|
||||||
|
metadata={
|
||||||
|
"id": f.get("id"),
|
||||||
|
"totalItemCount": f.get("totalItemCount"),
|
||||||
|
"unreadItemCount": f.get("unreadItemCount"),
|
||||||
|
"childFolderCount": f.get("childFolderCount"),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
for f in folders
|
||||||
|
]
|
||||||
|
|
||||||
|
folderId = path.strip("/")
|
||||||
|
effectiveLimit = self._DEFAULT_MESSAGE_LIMIT if limit is None else max(1, min(int(limit), self._MAX_MESSAGE_LIMIT))
|
||||||
|
pageSize = min(self._PAGE_SIZE, effectiveLimit)
|
||||||
|
endpoint: Optional[str] = (
|
||||||
|
f"me/mailFolders/{folderId}/messages"
|
||||||
|
f"?$top={pageSize}&$orderby=receivedDateTime desc"
|
||||||
|
)
|
||||||
|
messages: List[Dict[str, Any]] = []
|
||||||
|
while endpoint and len(messages) < effectiveLimit:
|
||||||
|
result = await self._graphGet(endpoint)
|
||||||
|
if "error" in result:
|
||||||
|
break
|
||||||
|
for m in result.get("value", []):
|
||||||
|
messages.append(m)
|
||||||
|
if len(messages) >= effectiveLimit:
|
||||||
|
break
|
||||||
|
nextLink = result.get("@odata.nextLink")
|
||||||
|
endpoint = _stripGraphBase(nextLink) if nextLink else None
|
||||||
|
return [
|
||||||
|
ExternalEntry(
|
||||||
|
name=m.get("subject", "(no subject)"),
|
||||||
|
path=f"{path}/{m.get('id', '')}",
|
||||||
|
isFolder=False,
|
||||||
|
metadata={
|
||||||
|
"id": m.get("id"),
|
||||||
|
"from": m.get("from", {}).get("emailAddress", {}).get("address"),
|
||||||
|
"receivedDateTime": m.get("receivedDateTime"),
|
||||||
|
"hasAttachments": m.get("hasAttachments", False),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
for m in messages
|
||||||
|
]
|
||||||
|
|
||||||
|
async def download(self, path: str) -> DownloadResult:
|
||||||
|
"""Download a mail message as RFC 822 EML via Graph API $value endpoint."""
|
||||||
|
import re
|
||||||
|
messageId = path.strip("/").split("/")[-1]
|
||||||
|
|
||||||
|
meta = await self._graphGet(f"me/messages/{messageId}?$select=subject")
|
||||||
|
subject = meta.get("subject", messageId) if "error" not in meta else messageId
|
||||||
|
safeName = re.sub(r'[<>:"/\\|?*\x00-\x1f]', "_", subject)[:80].strip(". ") or "email"
|
||||||
|
|
||||||
|
emlBytes = await self._graphDownload(f"me/messages/{messageId}/$value")
|
||||||
|
if not emlBytes:
|
||||||
|
return DownloadResult()
|
||||||
|
|
||||||
|
return DownloadResult(
|
||||||
|
data=emlBytes,
|
||||||
|
fileName=f"{safeName}.eml",
|
||||||
|
mimeType="message/rfc822",
|
||||||
|
)
|
||||||
|
|
||||||
|
async def upload(self, path: str, data: bytes, fileName: str) -> dict:
|
||||||
|
"""Not applicable for Outlook in the file sense."""
|
||||||
|
return {"error": "Upload not supported for Outlook"}
|
||||||
|
|
||||||
|
async def search(
|
||||||
|
self,
|
||||||
|
query: str,
|
||||||
|
path: Optional[str] = None,
|
||||||
|
limit: Optional[int] = None,
|
||||||
|
) -> List[ExternalEntry]:
|
||||||
|
safeQuery = query.replace("'", "''")
|
||||||
|
effectiveLimit = self._DEFAULT_MESSAGE_LIMIT if limit is None else max(1, min(int(limit), self._MAX_MESSAGE_LIMIT))
|
||||||
|
# NOTE: Graph $search does not support $orderby and may return a single
|
||||||
|
# page (no @odata.nextLink). We still pass $top to lift the implicit 25.
|
||||||
|
endpoint = f"me/messages?$search=\"{safeQuery}\"&$top={effectiveLimit}"
|
||||||
|
result = await self._graphGet(endpoint)
|
||||||
|
if "error" in result:
|
||||||
|
return []
|
||||||
|
return [
|
||||||
|
ExternalEntry(
|
||||||
|
name=m.get("subject", "(no subject)"),
|
||||||
|
path=f"/search/{m.get('id', '')}",
|
||||||
|
isFolder=False,
|
||||||
|
metadata={
|
||||||
|
"id": m.get("id"),
|
||||||
|
"from": m.get("from", {}).get("emailAddress", {}).get("address"),
|
||||||
|
"receivedDateTime": m.get("receivedDateTime"),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
for m in result.get("value", [])
|
||||||
|
]
|
||||||
|
|
||||||
|
def _buildMessage(
|
||||||
|
self, to: List[str], subject: str, body: str,
|
||||||
|
bodyType: str = "Text",
|
||||||
|
cc: Optional[List[str]] = None,
|
||||||
|
attachments: Optional[List[Dict]] = None,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""Build a Graph API message object.
|
||||||
|
|
||||||
|
attachments: list of {"name": str, "contentBytes": str (base64), "contentType": str}
|
||||||
|
"""
|
||||||
|
message: Dict[str, Any] = {
|
||||||
|
"subject": subject,
|
||||||
|
"body": {"contentType": bodyType, "content": body},
|
||||||
|
"toRecipients": [{"emailAddress": {"address": addr}} for addr in to],
|
||||||
|
}
|
||||||
|
if cc:
|
||||||
|
message["ccRecipients"] = [{"emailAddress": {"address": addr}} for addr in cc]
|
||||||
|
if attachments:
|
||||||
|
message["attachments"] = [
|
||||||
|
{
|
||||||
|
"@odata.type": "#microsoft.graph.fileAttachment",
|
||||||
|
"name": att["name"],
|
||||||
|
"contentBytes": att["contentBytes"],
|
||||||
|
"contentType": att.get("contentType", "application/octet-stream"),
|
||||||
|
}
|
||||||
|
for att in attachments
|
||||||
|
]
|
||||||
|
return message
|
||||||
|
|
||||||
|
async def sendMail(
|
||||||
|
self, to: List[str], subject: str, body: str,
|
||||||
|
bodyType: str = "Text",
|
||||||
|
cc: Optional[List[str]] = None,
|
||||||
|
attachments: Optional[List[Dict]] = None,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""Send an email via Microsoft Graph. bodyType: 'Text' or 'HTML'."""
|
||||||
|
import json
|
||||||
|
message = self._buildMessage(to, subject, body, bodyType, cc, attachments)
|
||||||
|
payload = json.dumps({"message": message, "saveToSentItems": True}).encode("utf-8")
|
||||||
|
result = await self._graphPost("me/sendMail", payload)
|
||||||
|
if "error" in result:
|
||||||
|
return result
|
||||||
|
return {"success": True}
|
||||||
|
|
||||||
|
async def createDraft(
|
||||||
|
self, to: List[str], subject: str, body: str,
|
||||||
|
bodyType: str = "Text",
|
||||||
|
cc: Optional[List[str]] = None,
|
||||||
|
attachments: Optional[List[Dict]] = None,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""Create a draft email in the user's Drafts folder via Microsoft Graph."""
|
||||||
|
import json
|
||||||
|
message = self._buildMessage(to, subject, body, bodyType, cc, attachments)
|
||||||
|
payload = json.dumps(message).encode("utf-8")
|
||||||
|
result = await self._graphPost("me/messages", payload)
|
||||||
|
if "error" in result:
|
||||||
|
return result
|
||||||
|
return {"success": True, "draft": True, "messageId": result.get("id", "")}
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
# Reply / Reply-All / Forward
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
# Microsoft Graph distinguishes between "send-immediately" endpoints
|
||||||
|
# (``/reply``, ``/replyAll``, ``/forward``) and their "create-draft"
|
||||||
|
# counterparts (``/createReply``, ``/createReplyAll``, ``/createForward``).
|
||||||
|
# The send-immediately variant accepts a free-text ``comment`` string
|
||||||
|
# that Graph prepends to the original conversation; the createReply*
|
||||||
|
# variants return a fully-populated draft message that the caller can
|
||||||
|
# further edit (e.g. via PATCH /me/messages/{id} with a richer body)
|
||||||
|
# before posting via /send. We expose both flavours so the agent can
|
||||||
|
# choose between "draft for review" and "send right now".
|
||||||
|
|
||||||
|
async def replyToMail(
|
||||||
|
self, messageId: str, comment: str,
|
||||||
|
replyAll: bool = False,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""Reply (or reply-all) to an existing message immediately.
|
||||||
|
|
||||||
|
Preserves the conversation thread and the ``AW:`` prefix in Outlook --
|
||||||
|
unlike sendMail() which creates a brand-new conversation.
|
||||||
|
"""
|
||||||
|
import json
|
||||||
|
endpointAction = "replyAll" if replyAll else "reply"
|
||||||
|
payload = json.dumps({"comment": comment}).encode("utf-8")
|
||||||
|
result = await self._graphPost(f"me/messages/{messageId}/{endpointAction}", payload)
|
||||||
|
if "error" in result:
|
||||||
|
return result
|
||||||
|
return {"success": True, "messageId": messageId, "action": endpointAction}
|
||||||
|
|
||||||
|
async def forwardMail(
|
||||||
|
self, messageId: str, to: List[str], comment: str = "",
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""Forward an existing message to new recipients."""
|
||||||
|
import json
|
||||||
|
payload = json.dumps({
|
||||||
|
"comment": comment,
|
||||||
|
"toRecipients": [{"emailAddress": {"address": addr}} for addr in to],
|
||||||
|
}).encode("utf-8")
|
||||||
|
result = await self._graphPost(f"me/messages/{messageId}/forward", payload)
|
||||||
|
if "error" in result:
|
||||||
|
return result
|
||||||
|
return {"success": True, "messageId": messageId, "action": "forward"}
|
||||||
|
|
||||||
|
async def createReplyDraft(
|
||||||
|
self, messageId: str, comment: str = "",
|
||||||
|
replyAll: bool = False,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""Create a reply-draft (in the Drafts folder) that the user can edit before sending."""
|
||||||
|
import json
|
||||||
|
endpointAction = "createReplyAll" if replyAll else "createReply"
|
||||||
|
payload = json.dumps({"comment": comment}).encode("utf-8") if comment else b"{}"
|
||||||
|
result = await self._graphPost(f"me/messages/{messageId}/{endpointAction}", payload)
|
||||||
|
if "error" in result:
|
||||||
|
return result
|
||||||
|
return {"success": True, "draft": True, "messageId": result.get("id", ""), "originalMessageId": messageId}
|
||||||
|
|
||||||
|
async def createForwardDraft(
|
||||||
|
self, messageId: str, to: Optional[List[str]] = None, comment: str = "",
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""Create a forward-draft (in the Drafts folder) that the user can edit before sending."""
|
||||||
|
import json
|
||||||
|
body: Dict[str, Any] = {}
|
||||||
|
if comment:
|
||||||
|
body["comment"] = comment
|
||||||
|
if to:
|
||||||
|
body["toRecipients"] = [{"emailAddress": {"address": addr}} for addr in to]
|
||||||
|
payload = json.dumps(body).encode("utf-8") if body else b"{}"
|
||||||
|
result = await self._graphPost(f"me/messages/{messageId}/createForward", payload)
|
||||||
|
if "error" in result:
|
||||||
|
return result
|
||||||
|
return {"success": True, "draft": True, "messageId": result.get("id", ""), "originalMessageId": messageId}
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
# Folder-Management & Mail-Management
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
|
||||||
|
# Mapping of Microsoft Graph "well-known folder names" plus a few common
|
||||||
|
# localized display names (DE) so the LLM can write natural names like
|
||||||
|
# "Posteingang", "Archiv", "deletedItems" without having to look up the
|
||||||
|
# opaque mailbox folder ID first.
|
||||||
|
_WELL_KNOWN_FOLDERS = {
|
||||||
|
"inbox": "inbox",
|
||||||
|
"posteingang": "inbox",
|
||||||
|
"drafts": "drafts",
|
||||||
|
"entwürfe": "drafts",
|
||||||
|
"entwurf": "drafts",
|
||||||
|
"sentitems": "sentitems",
|
||||||
|
"gesendet": "sentitems",
|
||||||
|
"gesendete elemente": "sentitems",
|
||||||
|
"deleteditems": "deleteditems",
|
||||||
|
"gelöscht": "deleteditems",
|
||||||
|
"gelöschte elemente": "deleteditems",
|
||||||
|
"papierkorb": "deleteditems",
|
||||||
|
"trash": "deleteditems",
|
||||||
|
"junkemail": "junkemail",
|
||||||
|
"spam": "junkemail",
|
||||||
|
"junk": "junkemail",
|
||||||
|
"outbox": "outbox",
|
||||||
|
"postausgang": "outbox",
|
||||||
|
"archive": "archive",
|
||||||
|
"archiv": "archive",
|
||||||
|
"msgfolderroot": "msgfolderroot",
|
||||||
|
"root": "msgfolderroot",
|
||||||
|
}
|
||||||
|
|
||||||
|
async def listMailFolders(self) -> List[Dict[str, Any]]:
|
||||||
|
"""List all top-level mail folders with id, name and counts.
|
||||||
|
|
||||||
|
Returns a flat list of dicts so the caller (e.g. an LLM tool) does not
|
||||||
|
need to know the Graph nesting model. Use ``_resolveFolderId()`` to
|
||||||
|
translate a user-provided name into a Graph folder ID.
|
||||||
|
"""
|
||||||
|
folders: List[Dict[str, Any]] = []
|
||||||
|
seenIds: set = set()
|
||||||
|
endpoint: Optional[str] = "me/mailFolders?$top=100"
|
||||||
|
while endpoint:
|
||||||
|
result = await self._graphGet(endpoint)
|
||||||
|
if "error" in result:
|
||||||
|
break
|
||||||
|
for f in result.get("value", []):
|
||||||
|
fid = f.get("id")
|
||||||
|
if fid and fid not in seenIds:
|
||||||
|
seenIds.add(fid)
|
||||||
|
folders.append({
|
||||||
|
"id": fid,
|
||||||
|
"displayName": f.get("displayName", ""),
|
||||||
|
"totalItemCount": f.get("totalItemCount", 0),
|
||||||
|
"unreadItemCount": f.get("unreadItemCount", 0),
|
||||||
|
"childFolderCount": f.get("childFolderCount", 0),
|
||||||
|
})
|
||||||
|
nextLink = result.get("@odata.nextLink")
|
||||||
|
endpoint = _stripGraphBase(nextLink) if nextLink else None
|
||||||
|
return folders
|
||||||
|
|
||||||
|
async def _resolveFolderId(self, folderRef: str) -> Optional[str]:
|
||||||
|
"""Resolve any user-supplied folder reference to a Graph folder ID.
|
||||||
|
|
||||||
|
Resolution order:
|
||||||
|
1. If it matches a well-known shortcut (locale-aware), return that
|
||||||
|
shortcut directly -- Graph accepts ``inbox``, ``drafts`` etc. in
|
||||||
|
the URL path.
|
||||||
|
2. If it looks like a Graph folder ID (long base64-ish string),
|
||||||
|
return as-is.
|
||||||
|
3. Otherwise fall back to a case-insensitive ``displayName`` match
|
||||||
|
against the user's mail folders.
|
||||||
|
|
||||||
|
Returns ``None`` if nothing matches so the caller can surface a clear
|
||||||
|
error instead of silently moving mail into the wrong place.
|
||||||
|
"""
|
||||||
|
if not folderRef:
|
||||||
|
return None
|
||||||
|
ref = folderRef.strip()
|
||||||
|
wellKnown = self._WELL_KNOWN_FOLDERS.get(ref.lower())
|
||||||
|
if wellKnown:
|
||||||
|
return wellKnown
|
||||||
|
# Heuristic: Graph folder IDs are long URL-safe base64 strings; never
|
||||||
|
# contain spaces; and almost always include "==" or AAAAA padding.
|
||||||
|
if len(ref) > 60 and " " not in ref:
|
||||||
|
return ref
|
||||||
|
for f in await self.listMailFolders():
|
||||||
|
if (f.get("displayName") or "").strip().lower() == ref.lower():
|
||||||
|
return f.get("id")
|
||||||
|
return None
|
||||||
|
|
||||||
|
async def moveMail(
|
||||||
|
self, messageId: str, destinationFolder: str,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""Move a message to another folder (well-known name, displayName, or folder id)."""
|
||||||
|
import json
|
||||||
|
destId = await self._resolveFolderId(destinationFolder)
|
||||||
|
if not destId:
|
||||||
|
return {"error": f"Folder not found: '{destinationFolder}'. Use listMailFolders to inspect available folders."}
|
||||||
|
payload = json.dumps({"destinationId": destId}).encode("utf-8")
|
||||||
|
result = await self._graphPost(f"me/messages/{messageId}/move", payload)
|
||||||
|
if "error" in result:
|
||||||
|
return result
|
||||||
|
return {"success": True, "messageId": result.get("id", messageId), "destinationFolder": destinationFolder}
|
||||||
|
|
||||||
|
async def copyMail(
|
||||||
|
self, messageId: str, destinationFolder: str,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""Copy a message into another folder (original stays in place)."""
|
||||||
|
import json
|
||||||
|
destId = await self._resolveFolderId(destinationFolder)
|
||||||
|
if not destId:
|
||||||
|
return {"error": f"Folder not found: '{destinationFolder}'. Use listMailFolders to inspect available folders."}
|
||||||
|
payload = json.dumps({"destinationId": destId}).encode("utf-8")
|
||||||
|
result = await self._graphPost(f"me/messages/{messageId}/copy", payload)
|
||||||
|
if "error" in result:
|
||||||
|
return result
|
||||||
|
return {"success": True, "newMessageId": result.get("id", ""), "destinationFolder": destinationFolder}
|
||||||
|
|
||||||
|
async def archiveMail(self, messageId: str) -> Dict[str, Any]:
|
||||||
|
"""Move a message to the user's Archive folder.
|
||||||
|
|
||||||
|
Outlook's Archive is a regular mail folder, not a flag, so this is a
|
||||||
|
thin convenience wrapper around :py:meth:`moveMail`.
|
||||||
|
"""
|
||||||
|
return await self.moveMail(messageId, "archive")
|
||||||
|
|
||||||
|
async def deleteMail(
|
||||||
|
self, messageId: str,
|
||||||
|
*,
|
||||||
|
hardDelete: bool = False,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""Delete a message.
|
||||||
|
|
||||||
|
Default behaviour (``hardDelete=False``) moves the message to the
|
||||||
|
``Deleted Items`` folder, which mirrors what users see in the Outlook
|
||||||
|
UI when they press Delete. Set ``hardDelete=True`` to perform an
|
||||||
|
unrecoverable removal -- agent tools must require an extra
|
||||||
|
confirmation before invoking this path.
|
||||||
|
"""
|
||||||
|
if hardDelete:
|
||||||
|
result = await self._graphDelete(f"me/messages/{messageId}")
|
||||||
|
if "error" in result:
|
||||||
|
return result
|
||||||
|
return {"success": True, "messageId": messageId, "hardDelete": True}
|
||||||
|
return await self.moveMail(messageId, "deleteditems")
|
||||||
|
|
||||||
|
async def markMailAsRead(self, messageId: str) -> Dict[str, Any]:
|
||||||
|
"""Mark a message as read (sets ``isRead=true``)."""
|
||||||
|
import json
|
||||||
|
payload = json.dumps({"isRead": True}).encode("utf-8")
|
||||||
|
result = await self._graphPatch(f"me/messages/{messageId}", payload)
|
||||||
|
if "error" in result:
|
||||||
|
return result
|
||||||
|
return {"success": True, "messageId": messageId, "isRead": True}
|
||||||
|
|
||||||
|
async def markMailAsUnread(self, messageId: str) -> Dict[str, Any]:
|
||||||
|
"""Mark a message as unread (sets ``isRead=false``)."""
|
||||||
|
import json
|
||||||
|
payload = json.dumps({"isRead": False}).encode("utf-8")
|
||||||
|
result = await self._graphPatch(f"me/messages/{messageId}", payload)
|
||||||
|
if "error" in result:
|
||||||
|
return result
|
||||||
|
return {"success": True, "messageId": messageId, "isRead": False}
|
||||||
|
|
||||||
|
async def flagMail(
|
||||||
|
self, messageId: str,
|
||||||
|
*,
|
||||||
|
flagStatus: str = "flagged",
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""Set or clear the follow-up flag on a message.
|
||||||
|
|
||||||
|
``flagStatus`` accepts ``"flagged"`` (default), ``"complete"`` or
|
||||||
|
``"notFlagged"`` -- the three values Microsoft Graph recognises for
|
||||||
|
``followupFlag.flagStatus``.
|
||||||
|
"""
|
||||||
|
import json
|
||||||
|
if flagStatus not in ("flagged", "complete", "notFlagged"):
|
||||||
|
return {"error": f"Invalid flagStatus '{flagStatus}'. Use one of: flagged, complete, notFlagged."}
|
||||||
|
payload = json.dumps({"flag": {"flagStatus": flagStatus}}).encode("utf-8")
|
||||||
|
result = await self._graphPatch(f"me/messages/{messageId}", payload)
|
||||||
|
if "error" in result:
|
||||||
|
return result
|
||||||
|
return {"success": True, "messageId": messageId, "flagStatus": flagStatus}
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Teams Adapter (Stub)
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
class TeamsAdapter(_GraphApiMixin, ServiceAdapter):
|
||||||
|
"""ServiceAdapter for Microsoft Teams -- browse joined teams and channels."""
|
||||||
|
|
||||||
|
async def browse(
|
||||||
|
self,
|
||||||
|
path: str,
|
||||||
|
filter: Optional[str] = None,
|
||||||
|
limit: Optional[int] = None,
|
||||||
|
) -> list:
|
||||||
|
cleanPath = (path or "").strip("/")
|
||||||
|
|
||||||
|
if not cleanPath:
|
||||||
|
result = await self._graphGet("me/joinedTeams")
|
||||||
|
if "error" in result:
|
||||||
|
logger.warning(f"Teams browse failed: {result['error']}")
|
||||||
|
return []
|
||||||
|
return [
|
||||||
|
ExternalEntry(
|
||||||
|
name=t.get("displayName", ""),
|
||||||
|
path=f"/{t.get('id', '')}",
|
||||||
|
isFolder=True,
|
||||||
|
metadata={"id": t.get("id"), "description": t.get("description", "")},
|
||||||
|
)
|
||||||
|
for t in result.get("value", [])
|
||||||
|
]
|
||||||
|
|
||||||
|
parts = cleanPath.split("/", 1)
|
||||||
|
teamId = parts[0]
|
||||||
|
if len(parts) == 1:
|
||||||
|
result = await self._graphGet(f"teams/{teamId}/channels")
|
||||||
|
if "error" in result:
|
||||||
|
return []
|
||||||
|
return [
|
||||||
|
ExternalEntry(
|
||||||
|
name=ch.get("displayName", ""),
|
||||||
|
path=f"/{teamId}/{ch.get('id', '')}",
|
||||||
|
isFolder=True,
|
||||||
|
metadata={"id": ch.get("id"), "membershipType": ch.get("membershipType", "")},
|
||||||
|
)
|
||||||
|
for ch in result.get("value", [])
|
||||||
|
]
|
||||||
|
|
||||||
|
return []
|
||||||
|
|
||||||
|
async def download(self, path: str) -> bytes:
|
||||||
|
return b""
|
||||||
|
|
||||||
|
async def upload(self, path: str, data: bytes, fileName: str) -> dict:
|
||||||
|
return {"error": "Teams upload not implemented"}
|
||||||
|
|
||||||
|
async def search(
|
||||||
|
self,
|
||||||
|
query: str,
|
||||||
|
path: Optional[str] = None,
|
||||||
|
limit: Optional[int] = None,
|
||||||
|
) -> list:
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# OneDrive Adapter (Stub -- similar to SharePoint but personal drive)
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
class OneDriveAdapter(_GraphApiMixin, ServiceAdapter):
|
||||||
|
"""ServiceAdapter stub for OneDrive (personal drive)."""
|
||||||
|
|
||||||
|
async def browse(
|
||||||
|
self,
|
||||||
|
path: str,
|
||||||
|
filter: Optional[str] = None,
|
||||||
|
limit: Optional[int] = None,
|
||||||
|
) -> List[ExternalEntry]:
|
||||||
|
cleanPath = (path or "").strip("/")
|
||||||
|
if not cleanPath:
|
||||||
|
endpoint = "me/drive/root/children"
|
||||||
|
else:
|
||||||
|
endpoint = f"me/drive/root:/{cleanPath}:/children"
|
||||||
|
|
||||||
|
result = await self._graphGet(endpoint)
|
||||||
|
if "error" in result:
|
||||||
|
return []
|
||||||
|
entries = [_graphItemToExternalEntry(item, path) for item in result.get("value", [])]
|
||||||
|
if filter:
|
||||||
|
entries = [e for e in entries if _matchFilter(e, filter)]
|
||||||
|
if limit is not None:
|
||||||
|
entries = entries[: max(1, int(limit))]
|
||||||
|
return entries
|
||||||
|
|
||||||
|
async def download(self, path: str) -> bytes:
|
||||||
|
cleanPath = (path or "").strip("/")
|
||||||
|
if not cleanPath:
|
||||||
|
return b""
|
||||||
|
data = await self._graphDownload(f"me/drive/root:/{cleanPath}:/content")
|
||||||
|
return data or b""
|
||||||
|
|
||||||
|
async def upload(self, path: str, data: bytes, fileName: str) -> dict:
|
||||||
|
cleanPath = (path or "").strip("/")
|
||||||
|
uploadPath = f"{cleanPath}/{fileName}" if cleanPath else fileName
|
||||||
|
endpoint = f"me/drive/root:/{uploadPath}:/content"
|
||||||
|
return await self._graphPut(endpoint, data)
|
||||||
|
|
||||||
|
async def search(
|
||||||
|
self,
|
||||||
|
query: str,
|
||||||
|
path: Optional[str] = None,
|
||||||
|
limit: Optional[int] = None,
|
||||||
|
) -> List[ExternalEntry]:
|
||||||
|
safeQuery = query.replace("'", "''")
|
||||||
|
endpoint = f"me/drive/root/search(q='{safeQuery}')"
|
||||||
|
result = await self._graphGet(endpoint)
|
||||||
|
if "error" in result:
|
||||||
|
return []
|
||||||
|
entries = [_graphItemToExternalEntry(item) for item in result.get("value", [])]
|
||||||
|
if limit is not None:
|
||||||
|
entries = entries[: max(1, int(limit))]
|
||||||
|
return entries
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# MsftConnector (1:n)
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
class MsftConnector(ProviderConnector):
|
||||||
|
"""Microsoft ProviderConnector -- 1 connection → n services."""
|
||||||
|
|
||||||
|
_SERVICE_MAP = {
|
||||||
|
"sharepoint": SharepointAdapter,
|
||||||
|
"outlook": OutlookAdapter,
|
||||||
|
"teams": TeamsAdapter,
|
||||||
|
"onedrive": OneDriveAdapter,
|
||||||
|
}
|
||||||
|
|
||||||
|
def getAvailableServices(self) -> List[str]:
|
||||||
|
return list(self._SERVICE_MAP.keys())
|
||||||
|
|
||||||
|
def getServiceAdapter(self, service: str) -> ServiceAdapter:
|
||||||
|
adapterClass = self._SERVICE_MAP.get(service)
|
||||||
|
if not adapterClass:
|
||||||
|
raise ValueError(f"Unknown MSFT service: {service}. Available: {list(self._SERVICE_MAP.keys())}")
|
||||||
|
return adapterClass(self.accessToken)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Helpers
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
def _parseSharepointPath(path: str) -> tuple:
|
||||||
|
"""Parse a SharePoint path into (siteId, innerPath).
|
||||||
|
|
||||||
|
Expected format: /sites/<siteId>/<innerPath>
|
||||||
|
Also accepts bare siteId if no /sites/ prefix.
|
||||||
|
"""
|
||||||
|
if not path:
|
||||||
|
return ("", "")
|
||||||
|
clean = path.strip("/")
|
||||||
|
if clean.startswith("sites/"):
|
||||||
|
parts = clean.split("/", 2)
|
||||||
|
siteId = parts[1] if len(parts) > 1 else ""
|
||||||
|
innerPath = parts[2] if len(parts) > 2 else ""
|
||||||
|
return (siteId, innerPath)
|
||||||
|
parts = clean.split("/", 1)
|
||||||
|
return (parts[0], parts[1] if len(parts) > 1 else "")
|
||||||
|
|
||||||
|
|
||||||
|
def _matchFilter(entry: ExternalEntry, pattern: str) -> bool:
|
||||||
|
"""Simple glob-like filter (supports * wildcard)."""
|
||||||
|
import fnmatch
|
||||||
|
return fnmatch.fnmatch(entry.name.lower(), pattern.lower())
|
||||||
|
|
@ -1,3 +1,5 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
"""
|
"""
|
||||||
Unified modules.datamodels package.
|
Unified modules.datamodels package.
|
||||||
|
|
||||||
|
|
@ -8,7 +10,6 @@ Usage examples:
|
||||||
from . import datamodelAi as ai
|
from . import datamodelAi as ai
|
||||||
from . import datamodelUam as uam
|
from . import datamodelUam as uam
|
||||||
from . import datamodelSecurity as security
|
from . import datamodelSecurity as security
|
||||||
from . import datamodelNeutralizer as neutralizer
|
|
||||||
from . import datamodelChat as chat
|
from . import datamodelChat as chat
|
||||||
from . import datamodelFiles as files
|
from . import datamodelFiles as files
|
||||||
from . import datamodelVoice as voice
|
from . import datamodelVoice as voice
|
||||||
|
|
|
||||||
|
|
@ -1,11 +1,11 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
from typing import Optional, List, Dict, Any, Callable, TYPE_CHECKING, Tuple
|
from typing import Optional, List, Dict, Any, Callable, TYPE_CHECKING, Tuple
|
||||||
from pydantic import BaseModel, Field, ConfigDict
|
from pydantic import BaseModel, Field, ConfigDict
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
|
|
||||||
# Import ContentPart for runtime use (needed for Pydantic model rebuilding)
|
# Import ContentPart for runtime use (needed for Pydantic model rebuilding)
|
||||||
from modules.datamodels.datamodelExtraction import ContentPart
|
from modules.datamodels.datamodelExtraction import ContentPart
|
||||||
# Import JSON utilities for safe conversion
|
|
||||||
from modules.shared.jsonUtils import extractJsonString, tryParseJson, repairBrokenJson
|
|
||||||
|
|
||||||
# Operation Types
|
# Operation Types
|
||||||
class OperationTypeEnum(str, Enum):
|
class OperationTypeEnum(str, Enum):
|
||||||
|
|
@ -22,10 +22,24 @@ class OperationTypeEnum(str, Enum):
|
||||||
IMAGE_ANALYSE = "imageAnalyse"
|
IMAGE_ANALYSE = "imageAnalyse"
|
||||||
IMAGE_GENERATE = "imageGenerate"
|
IMAGE_GENERATE = "imageGenerate"
|
||||||
|
|
||||||
|
# Neutralization (dedicated model selection; text vs vision backends)
|
||||||
|
NEUTRALIZATION_TEXT = "neutralizationText"
|
||||||
|
NEUTRALIZATION_IMAGE = "neutralizationImage"
|
||||||
|
|
||||||
# Web Operations
|
# Web Operations
|
||||||
WEB_SEARCH = "webSearch" # Returns list of URLs only
|
WEB_SEARCH_DATA = "webSearch" # Returns list of URLs only
|
||||||
WEB_CRAWL = "webCrawl" # Web crawl for a given URL
|
WEB_CRAWL = "webCrawl" # Web crawl for a given URL
|
||||||
|
|
||||||
|
# Agent Operations
|
||||||
|
AGENT = "agent" # Agent loop: reasoning + tool use
|
||||||
|
DATA_QUERY = "dataQuery" # Data query sub-agent: fast model, schema-aware
|
||||||
|
|
||||||
|
# Embedding Operations
|
||||||
|
EMBEDDING = "embedding" # Text → vector conversion for semantic search
|
||||||
|
|
||||||
|
# Speech Operations (dedicated pipeline, bypasses standard model selection)
|
||||||
|
SPEECH_TEAMS = "speechTeams" # Teams Meeting AI analysis: decide if/how to respond
|
||||||
|
|
||||||
|
|
||||||
# Operation Type Rating - Helper class for capability ratings
|
# Operation Type Rating - Helper class for capability ratings
|
||||||
class OperationTypeRating(BaseModel):
|
class OperationTypeRating(BaseModel):
|
||||||
|
|
@ -48,7 +62,7 @@ def createOperationTypeRatings(*ratings: Tuple[OperationTypeEnum, int]) -> List[
|
||||||
Usage:
|
Usage:
|
||||||
operationTypes = createOperationTypeRatings(
|
operationTypes = createOperationTypeRatings(
|
||||||
(OperationTypeEnum.DATA_ANALYSE, 8),
|
(OperationTypeEnum.DATA_ANALYSE, 8),
|
||||||
(OperationTypeEnum.WEB_SEARCH, 10),
|
(OperationTypeEnum.WEB_SEARCH_DATA, 10),
|
||||||
(OperationTypeEnum.WEB_CRAWL, 9)
|
(OperationTypeEnum.WEB_CRAWL, 9)
|
||||||
)
|
)
|
||||||
"""
|
"""
|
||||||
|
|
@ -87,6 +101,7 @@ class AiModel(BaseModel):
|
||||||
# Token and context limits
|
# Token and context limits
|
||||||
maxTokens: int = Field(description="Maximum tokens this model can generate")
|
maxTokens: int = Field(description="Maximum tokens this model can generate")
|
||||||
contextLength: int = Field(description="Maximum context length this model can handle")
|
contextLength: int = Field(description="Maximum context length this model can handle")
|
||||||
|
maxInputTokensPerRequest: Optional[int] = Field(default=None, description="Max input tokens per single request (provider rate limit / TPM). If set, model selector filters requests exceeding this limit.")
|
||||||
|
|
||||||
# Cost information
|
# Cost information
|
||||||
costPer1kTokensInput: float = Field(default=0.0, description="Cost per 1000 input tokens")
|
costPer1kTokensInput: float = Field(default=0.0, description="Cost per 1000 input tokens")
|
||||||
|
|
@ -98,7 +113,8 @@ class AiModel(BaseModel):
|
||||||
|
|
||||||
# Function reference (not serialized)
|
# Function reference (not serialized)
|
||||||
functionCall: Optional[Callable] = Field(default=None, exclude=True, description="Function to call for this model")
|
functionCall: Optional[Callable] = Field(default=None, exclude=True, description="Function to call for this model")
|
||||||
calculatePriceUsd: Optional[Callable] = Field(default=None, exclude=True, description="Function to calculate price in USD")
|
functionCallStream: Optional[Callable] = Field(default=None, exclude=True, description="Streaming function: yields str deltas, then final AiModelResponse")
|
||||||
|
calculatepriceCHF: Optional[Callable] = Field(default=None, exclude=True, description="Function to calculate price in USD")
|
||||||
|
|
||||||
# Selection criteria - capabilities with ratings
|
# Selection criteria - capabilities with ratings
|
||||||
priority: PriorityEnum = Field(default=PriorityEnum.BALANCED, description="Default priority for this model. See PriorityEnum for available values.")
|
priority: PriorityEnum = Field(default=PriorityEnum.BALANCED, description="Default priority for this model. See PriorityEnum for available values.")
|
||||||
|
|
@ -144,14 +160,21 @@ class AiCallOptions(BaseModel):
|
||||||
temperature: Optional[float] = Field(default=None, ge=0.0, le=2.0, description="Temperature for response generation (0.0-2.0, lower = more consistent)")
|
temperature: Optional[float] = Field(default=None, ge=0.0, le=2.0, description="Temperature for response generation (0.0-2.0, lower = more consistent)")
|
||||||
maxParts: Optional[int] = Field(default=1000, ge=1, le=1000, description="Maximum number of continuation parts to fetch")
|
maxParts: Optional[int] = Field(default=1000, ge=1, le=1000, description="Maximum number of continuation parts to fetch")
|
||||||
|
|
||||||
|
# Provider filtering (from UI multiselect or automation config)
|
||||||
|
allowedProviders: Optional[List[str]] = Field(default=None, description="List of allowed AI providers to use (empty = all RBAC-permitted)")
|
||||||
|
|
||||||
|
|
||||||
class AiCallRequest(BaseModel):
|
class AiCallRequest(BaseModel):
|
||||||
"""Centralized AI call request payload for interface use."""
|
"""Centralized AI call request payload for interface use."""
|
||||||
|
|
||||||
prompt: str = Field(description="The user prompt")
|
prompt: str = Field(default="", description="The user prompt")
|
||||||
context: Optional[str] = Field(default=None, description="Optional external context (e.g., extracted docs)")
|
context: Optional[str] = Field(default=None, description="Optional external context (e.g., extracted docs)")
|
||||||
options: AiCallOptions = Field(default_factory=AiCallOptions)
|
options: AiCallOptions = Field(default_factory=AiCallOptions)
|
||||||
contentParts: Optional[List['ContentPart']] = None # NEW: Content parts for model-aware chunking
|
contentParts: Optional[List['ContentPart']] = None # Content parts for model-aware chunking
|
||||||
|
messages: Optional[List[Dict[str, Any]]] = Field(default=None, description="OpenAI-style messages for multi-turn agent conversations")
|
||||||
|
tools: Optional[List[Dict[str, Any]]] = Field(default=None, description="Tool definitions for native function calling")
|
||||||
|
toolChoice: Optional[Any] = Field(default=None, description="Tool choice: 'auto', 'none', or specific tool (passed through to model call)")
|
||||||
|
requireNeutralization: Optional[bool] = Field(default=None, description="Per-request neutralization override: True=force, False=skip, None=use config")
|
||||||
|
|
||||||
|
|
||||||
class AiCallResponse(BaseModel):
|
class AiCallResponse(BaseModel):
|
||||||
|
|
@ -159,19 +182,25 @@ class AiCallResponse(BaseModel):
|
||||||
|
|
||||||
content: str = Field(description="AI response content")
|
content: str = Field(description="AI response content")
|
||||||
modelName: str = Field(description="Selected model name")
|
modelName: str = Field(description="Selected model name")
|
||||||
priceUsd: float = Field(default=0.0, description="Calculated price in USD")
|
provider: str = Field(default="unknown", description="AI provider / connectorType (anthropic, openai, perplexity, etc.)")
|
||||||
|
priceCHF: float = Field(default=0.0, description="Calculated price in USD")
|
||||||
processingTime: float = Field(default=0.0, description="Duration in seconds")
|
processingTime: float = Field(default=0.0, description="Duration in seconds")
|
||||||
bytesSent: int = Field(default=0, description="Input data size in bytes")
|
bytesSent: int = Field(default=0, description="Input data size in bytes")
|
||||||
bytesReceived: int = Field(default=0, description="Output data size in bytes")
|
bytesReceived: int = Field(default=0, description="Output data size in bytes")
|
||||||
errorCount: int = Field(default=0, description="0 for success, 1+ for errors")
|
errorCount: int = Field(default=0, description="0 for success, 1+ for errors")
|
||||||
|
toolCalls: Optional[List[Dict[str, Any]]] = Field(default=None, description="Tool calls from native function calling")
|
||||||
|
metadata: Optional[Dict[str, Any]] = Field(default=None, description="Additional response metadata (e.g. embeddings vectors)")
|
||||||
|
|
||||||
|
|
||||||
class AiModelCall(BaseModel):
|
class AiModelCall(BaseModel):
|
||||||
"""Standardized input for AI model calls."""
|
"""Standardized input for AI model calls."""
|
||||||
|
|
||||||
messages: List[Dict[str, Any]] = Field(description="Messages in OpenAI format (role, content)")
|
messages: List[Dict[str, Any]] = Field(default_factory=list, description="Messages in OpenAI format (role, content)")
|
||||||
model: Optional[AiModel] = Field(default=None, description="The AI model being called")
|
model: Optional[AiModel] = Field(default=None, description="The AI model being called")
|
||||||
options: AiCallOptions = Field(default_factory=AiCallOptions, description="Additional model-specific options")
|
options: AiCallOptions = Field(default_factory=AiCallOptions, description="Additional model-specific options")
|
||||||
|
tools: Optional[List[Dict[str, Any]]] = Field(default=None, description="Tool definitions for native function calling")
|
||||||
|
toolChoice: Optional[Any] = Field(default=None, description="Tool choice: 'auto', 'none', or specific tool")
|
||||||
|
embeddingInput: Optional[List[str]] = Field(default=None, description="Input texts for embedding models (used instead of messages)")
|
||||||
|
|
||||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||||
|
|
||||||
|
|
@ -195,7 +224,7 @@ class AiModelResponse(BaseModel):
|
||||||
# Structured prompt models for specialized operations
|
# Structured prompt models for specialized operations
|
||||||
|
|
||||||
class AiCallPromptWebSearch(BaseModel):
|
class AiCallPromptWebSearch(BaseModel):
|
||||||
"""Structured prompt format for WEB_SEARCH operation - returns list of URLs."""
|
"""Structured prompt format for WEB_SEARCH_DATA operation - returns list of URLs."""
|
||||||
|
|
||||||
instruction: str = Field(description="Search instruction/query for finding relevant URLs")
|
instruction: str = Field(description="Search instruction/query for finding relevant URLs")
|
||||||
country: Optional[str] = Field(default=None, description="Two-digit country code (lowercase, e.g., ch, us, de, fr)")
|
country: Optional[str] = Field(default=None, description="Two-digit country code (lowercase, e.g., ch, us, de, fr)")
|
||||||
|
|
@ -256,3 +285,70 @@ class JsonAccumulationState(BaseModel):
|
||||||
description="KPI definitions with current values: [{id, description, jsonPath, targetValue, currentValue}, ...]"
|
description="KPI definitions with current values: [{id, description, jsonPath, targetValue, currentValue}, ...]"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ContinuationContext(BaseModel):
|
||||||
|
"""Pydantic model for continuation context information."""
|
||||||
|
section_count: int
|
||||||
|
delivered_summary: str
|
||||||
|
template_structure: Optional[str] = None
|
||||||
|
last_complete_part: Optional[str] = None
|
||||||
|
incomplete_part: Optional[str] = None
|
||||||
|
last_raw_json: Optional[str] = None
|
||||||
|
overlap_context: Optional[str] = None # From jsonContinuation.getContexts() - innermost element containing cut
|
||||||
|
hierarchy_context: Optional[str] = None # From jsonContinuation.getContexts() - full structure from root to cut
|
||||||
|
|
||||||
|
|
||||||
|
class JsonContinuationContexts(BaseModel):
|
||||||
|
"""
|
||||||
|
Pydantic model for JSON continuation contexts.
|
||||||
|
|
||||||
|
Contains contexts for truncated JSON strings:
|
||||||
|
- overlapContext: The innermost object/array element containing the cut point (for merging)
|
||||||
|
- hierarchyContext: Full structure from root to cut WITHOUT budget limitations (for internal use)
|
||||||
|
- hierarchyContextForPrompt: Full structure from root to cut WITH budget limitations (for prompts)
|
||||||
|
- completePart: Valid JSON with all structures properly closed
|
||||||
|
- jsonParsingSuccess: True if completePart is valid parseable JSON
|
||||||
|
"""
|
||||||
|
overlapContext: str = Field(description="The innermost object/array element containing the cut point (for merging)")
|
||||||
|
hierarchyContext: str = Field(description="Full structure from root to cut WITHOUT budget limitations (for internal use)")
|
||||||
|
hierarchyContextForPrompt: str = Field(description="Full structure from root to cut WITH budget limitations (for prompts)")
|
||||||
|
completePart: str = Field(description="Valid JSON with all structures properly closed")
|
||||||
|
jsonParsingSuccess: bool = Field(default=False, description="True if completePart is valid parseable JSON")
|
||||||
|
|
||||||
|
|
||||||
|
class SectionPromptArgs(BaseModel):
|
||||||
|
"""Type-safe arguments for section content prompt builder."""
|
||||||
|
section: Dict[str, Any]
|
||||||
|
contentParts: List[ContentPart]
|
||||||
|
userPrompt: str
|
||||||
|
generationHint: str
|
||||||
|
allSections: List[Dict[str, Any]]
|
||||||
|
sectionIndex: int
|
||||||
|
isAggregation: bool
|
||||||
|
language: str
|
||||||
|
|
||||||
|
|
||||||
|
class ChapterStructurePromptArgs(BaseModel):
|
||||||
|
"""Type-safe arguments for chapter structure prompt builder."""
|
||||||
|
userPrompt: str
|
||||||
|
contentParts: List[ContentPart] = Field(default_factory=list)
|
||||||
|
outputFormat: str
|
||||||
|
|
||||||
|
|
||||||
|
class CodeContentPromptArgs(BaseModel):
|
||||||
|
"""Type-safe arguments for code content prompt builder."""
|
||||||
|
filename: str
|
||||||
|
fileType: str
|
||||||
|
functions: List[Dict] = Field(default_factory=list)
|
||||||
|
classes: List[Dict] = Field(default_factory=list)
|
||||||
|
dependencies: List[str] = Field(default_factory=list)
|
||||||
|
metadata: Dict[str, Any] = Field(default_factory=dict)
|
||||||
|
userPrompt: str
|
||||||
|
contentParts: List[ContentPart] = Field(default_factory=list)
|
||||||
|
contextInfo: str = ""
|
||||||
|
|
||||||
|
|
||||||
|
class CodeStructurePromptArgs(BaseModel):
|
||||||
|
"""Type-safe arguments for code structure prompt builder."""
|
||||||
|
userPrompt: str
|
||||||
|
contentParts: List[ContentPart] = Field(default_factory=list)
|
||||||
150
modules/datamodels/datamodelAiAudit.py
Normal file
150
modules/datamodels/datamodelAiAudit.py
Normal file
|
|
@ -0,0 +1,150 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""AI Audit Log data model for Compliance & AI-Datenfluss tracking.
|
||||||
|
|
||||||
|
Records metadata (and optionally content) of every AI provider call
|
||||||
|
for compliance, audit, and data-protection reporting.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import uuid
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
from modules.shared.i18nRegistry import i18nModel
|
||||||
|
from modules.shared.timeUtils import getUtcTimestamp
|
||||||
|
|
||||||
|
|
||||||
|
@i18nModel("AI-Audit-Eintrag")
|
||||||
|
class AiAuditLogEntry(BaseModel):
|
||||||
|
id: str = Field(
|
||||||
|
default_factory=lambda: str(uuid.uuid4()),
|
||||||
|
description="Primary key",
|
||||||
|
json_schema_extra={"label": "ID"},
|
||||||
|
)
|
||||||
|
timestamp: float = Field(
|
||||||
|
default_factory=getUtcTimestamp,
|
||||||
|
description="Event timestamp (UTC epoch seconds)",
|
||||||
|
json_schema_extra={
|
||||||
|
"label": "Zeitpunkt",
|
||||||
|
"frontend_type": "timestamp",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
userId: str = Field(
|
||||||
|
description="ID of the user who triggered the AI call",
|
||||||
|
json_schema_extra={"label": "Benutzer-ID", "fk_target": {"db": "poweron_app", "table": "User"}},
|
||||||
|
)
|
||||||
|
username: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Username at time of call (denormalized for display)",
|
||||||
|
json_schema_extra={"label": "Benutzername"},
|
||||||
|
)
|
||||||
|
mandateId: str = Field(
|
||||||
|
description="Mandate context of the call",
|
||||||
|
json_schema_extra={"label": "Mandanten-ID", "fk_target": {"db": "poweron_app", "table": "Mandate"}},
|
||||||
|
)
|
||||||
|
featureInstanceId: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Feature instance context",
|
||||||
|
json_schema_extra={"label": "Feature-Instanz-ID", "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}},
|
||||||
|
)
|
||||||
|
featureCode: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Feature code (e.g. workspace, trustee)",
|
||||||
|
json_schema_extra={"label": "Feature", "fk_target": {"db": "poweron_app", "table": "Feature", "column": "code"}},
|
||||||
|
)
|
||||||
|
instanceLabel: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Human-readable instance label at time of call",
|
||||||
|
json_schema_extra={"label": "Instanz"},
|
||||||
|
)
|
||||||
|
|
||||||
|
aiProvider: str = Field(
|
||||||
|
description="AI provider key (e.g. azure-openai, anthropic)",
|
||||||
|
json_schema_extra={"label": "AI-Provider"},
|
||||||
|
)
|
||||||
|
aiModel: str = Field(
|
||||||
|
description="Model name used (e.g. gpt-4o, claude-3.5-sonnet)",
|
||||||
|
json_schema_extra={"label": "AI-Modell"},
|
||||||
|
)
|
||||||
|
operationType: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Operation type (chat, embedding, image, tts, …)",
|
||||||
|
json_schema_extra={"label": "Typ"},
|
||||||
|
)
|
||||||
|
|
||||||
|
tokensInput: Optional[int] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Input tokens consumed",
|
||||||
|
json_schema_extra={"label": "Tokens (Input)"},
|
||||||
|
)
|
||||||
|
tokensOutput: Optional[int] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Output tokens consumed",
|
||||||
|
json_schema_extra={"label": "Tokens (Output)"},
|
||||||
|
)
|
||||||
|
processingTimeMs: Optional[int] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Processing time in milliseconds",
|
||||||
|
json_schema_extra={"label": "Verarbeitungszeit (ms)"},
|
||||||
|
)
|
||||||
|
priceCHF: Optional[float] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Cost in CHF (base price, before markup)",
|
||||||
|
json_schema_extra={"label": "Kosten (CHF)"},
|
||||||
|
)
|
||||||
|
|
||||||
|
neutralizationActive: bool = Field(
|
||||||
|
default=False,
|
||||||
|
description="Whether neutralization was active for this call",
|
||||||
|
json_schema_extra={"label": "Neutralisierung"},
|
||||||
|
)
|
||||||
|
neutralizationMappingsCount: Optional[int] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Number of neutralization mappings applied",
|
||||||
|
json_schema_extra={"label": "Neutralisierungs-Mappings"},
|
||||||
|
)
|
||||||
|
|
||||||
|
contentStored: bool = Field(
|
||||||
|
default=False,
|
||||||
|
description="Whether full content was persisted (mandate opt-in)",
|
||||||
|
json_schema_extra={"label": "Inhalt gespeichert"},
|
||||||
|
)
|
||||||
|
contentInputHash: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="SHA-256 hash of the input content",
|
||||||
|
json_schema_extra={"label": "Input-Hash", "frontend_visible": False},
|
||||||
|
)
|
||||||
|
contentInputPreview: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="First ~200 chars of input (always stored)",
|
||||||
|
json_schema_extra={"label": "Input-Vorschau"},
|
||||||
|
)
|
||||||
|
contentOutputPreview: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="First ~200 chars of output (always stored)",
|
||||||
|
json_schema_extra={"label": "Output-Vorschau"},
|
||||||
|
)
|
||||||
|
contentInputFull: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Full input content (only if mandate opted in)",
|
||||||
|
json_schema_extra={"label": "Vollständiger Input", "frontend_visible": False},
|
||||||
|
)
|
||||||
|
contentOutputFull: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Full output content (only if mandate opted in)",
|
||||||
|
json_schema_extra={"label": "Vollständiger Output", "frontend_visible": False},
|
||||||
|
)
|
||||||
|
|
||||||
|
success: bool = Field(
|
||||||
|
default=True,
|
||||||
|
description="Whether the AI call succeeded",
|
||||||
|
json_schema_extra={"label": "Erfolgreich"},
|
||||||
|
)
|
||||||
|
errorMessage: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Error message if the call failed",
|
||||||
|
json_schema_extra={"label": "Fehlermeldung"},
|
||||||
|
)
|
||||||
204
modules/datamodels/datamodelAudit.py
Normal file
204
modules/datamodels/datamodelAudit.py
Normal file
|
|
@ -0,0 +1,204 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""
|
||||||
|
Audit Log Data Model for database-based audit logging.
|
||||||
|
|
||||||
|
This model stores security-relevant audit events for GDPR compliance and security monitoring.
|
||||||
|
|
||||||
|
GDPR-Relevant Events:
|
||||||
|
- User access: login, logout, failed login attempts
|
||||||
|
- Data access: create, read, update, delete operations on personal data
|
||||||
|
- Security events: password changes, token refresh, session management
|
||||||
|
- Key access: encryption/decryption of sensitive data
|
||||||
|
- GDPR actions: data export, data portability, account deletion
|
||||||
|
- Mandate/permission changes: user added/removed from mandates, role changes
|
||||||
|
"""
|
||||||
|
|
||||||
|
from typing import Optional
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
from enum import Enum
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
from modules.shared.timeUtils import getUtcTimestamp
|
||||||
|
from modules.shared.i18nRegistry import i18nModel
|
||||||
|
|
||||||
|
|
||||||
|
class AuditCategory(str, Enum):
|
||||||
|
"""Categories for audit log entries"""
|
||||||
|
ACCESS = "access" # Login/logout events
|
||||||
|
KEY = "key" # Encryption key access
|
||||||
|
DATA = "data" # Data CRUD operations
|
||||||
|
SECURITY = "security" # Security-related events
|
||||||
|
GDPR = "gdpr" # GDPR-specific actions
|
||||||
|
PERMISSION = "permission" # Permission/role changes
|
||||||
|
SYSTEM = "system" # System-level events
|
||||||
|
|
||||||
|
|
||||||
|
class AuditAction(str, Enum):
|
||||||
|
"""Actions for audit log entries"""
|
||||||
|
# Access actions
|
||||||
|
LOGIN = "login"
|
||||||
|
LOGIN_FAILED = "login_failed"
|
||||||
|
LOGOUT = "logout"
|
||||||
|
TOKEN_REFRESH = "token_refresh"
|
||||||
|
TOKEN_REVOKE = "token_revoke"
|
||||||
|
SESSION_EXPIRED = "session_expired"
|
||||||
|
|
||||||
|
# Key actions
|
||||||
|
KEY_ENCODE = "encode"
|
||||||
|
KEY_DECODE = "decode"
|
||||||
|
KEY_ACCESS = "key_access"
|
||||||
|
|
||||||
|
# Data actions
|
||||||
|
DATA_CREATE = "create"
|
||||||
|
DATA_READ = "read"
|
||||||
|
DATA_UPDATE = "update"
|
||||||
|
DATA_DELETE = "delete"
|
||||||
|
DATA_EXPORT = "export"
|
||||||
|
|
||||||
|
# Security actions
|
||||||
|
PASSWORD_CHANGE = "password_change"
|
||||||
|
PASSWORD_RESET = "password_reset"
|
||||||
|
MFA_ENABLED = "mfa_enabled"
|
||||||
|
MFA_DISABLED = "mfa_disabled"
|
||||||
|
|
||||||
|
# GDPR actions
|
||||||
|
GDPR_DATA_EXPORT = "gdpr_data_export"
|
||||||
|
GDPR_DATA_PORTABILITY = "gdpr_data_portability"
|
||||||
|
GDPR_ACCOUNT_DELETION = "gdpr_account_deletion"
|
||||||
|
GDPR_CONSENT_UPDATE = "gdpr_consent_update"
|
||||||
|
|
||||||
|
# Permission actions
|
||||||
|
USER_ADDED_TO_MANDATE = "user_added_to_mandate"
|
||||||
|
USER_REMOVED_FROM_MANDATE = "user_removed_from_mandate"
|
||||||
|
ROLE_ASSIGNED = "role_assigned"
|
||||||
|
ROLE_REVOKED = "role_revoked"
|
||||||
|
FEATURE_ACCESS_GRANTED = "feature_access_granted"
|
||||||
|
FEATURE_ACCESS_REVOKED = "feature_access_revoked"
|
||||||
|
|
||||||
|
# System actions
|
||||||
|
SYSTEM_STARTUP = "system_startup"
|
||||||
|
SYSTEM_SHUTDOWN = "system_shutdown"
|
||||||
|
CONFIG_CHANGE = "config_change"
|
||||||
|
|
||||||
|
|
||||||
|
@i18nModel("Audit-Log-Eintrag")
|
||||||
|
class AuditLogEntry(BaseModel):
|
||||||
|
"""
|
||||||
|
Audit log entry for database storage.
|
||||||
|
|
||||||
|
Stores all security-relevant events for compliance and monitoring.
|
||||||
|
Entries are immutable once created (append-only audit trail).
|
||||||
|
"""
|
||||||
|
id: str = Field(
|
||||||
|
default_factory=lambda: str(uuid.uuid4()),
|
||||||
|
description="Unique identifier for the audit entry",
|
||||||
|
json_schema_extra={"label": "ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
|
||||||
|
)
|
||||||
|
|
||||||
|
# Timestamp
|
||||||
|
timestamp: float = Field(
|
||||||
|
default_factory=getUtcTimestamp,
|
||||||
|
description="UTC timestamp when the event occurred",
|
||||||
|
json_schema_extra={"label": "Zeitstempel", "frontend_type": "datetime", "frontend_readonly": True, "frontend_required": True}
|
||||||
|
)
|
||||||
|
|
||||||
|
# Actor identification
|
||||||
|
userId: str = Field(
|
||||||
|
description="ID of the user who performed the action (or 'system' for system events)",
|
||||||
|
json_schema_extra={
|
||||||
|
"label": "Benutzer-ID",
|
||||||
|
"frontend_type": "text",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": True,
|
||||||
|
"fk_target": {"db": "poweron_app", "table": "User"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
username: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Username at the time of the event (for historical reference)",
|
||||||
|
json_schema_extra={"label": "Benutzername", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
|
||||||
|
)
|
||||||
|
|
||||||
|
# Context
|
||||||
|
mandateId: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Mandate context (if applicable)",
|
||||||
|
json_schema_extra={
|
||||||
|
"label": "Mandanten-ID",
|
||||||
|
"frontend_type": "text",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": False,
|
||||||
|
"fk_target": {"db": "poweron_app", "table": "Mandate"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
featureInstanceId: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Feature instance context (if applicable)",
|
||||||
|
json_schema_extra={
|
||||||
|
"label": "Feature-Instanz-ID",
|
||||||
|
"frontend_type": "text",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": False,
|
||||||
|
"fk_target": {"db": "poweron_app", "table": "FeatureInstance"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
# Event classification
|
||||||
|
category: str = Field(
|
||||||
|
description="Event category (access, key, data, security, gdpr, permission, system)",
|
||||||
|
json_schema_extra={"label": "Kategorie", "frontend_type": "text", "frontend_readonly": True, "frontend_required": True}
|
||||||
|
)
|
||||||
|
|
||||||
|
action: str = Field(
|
||||||
|
description="Specific action performed",
|
||||||
|
json_schema_extra={"label": "Aktion", "frontend_type": "text", "frontend_readonly": True, "frontend_required": True}
|
||||||
|
)
|
||||||
|
|
||||||
|
# Event details
|
||||||
|
resourceType: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Type of resource affected (e.g., 'User', 'ChatWorkflow', 'TrusteeContract')",
|
||||||
|
json_schema_extra={"label": "Ressourcentyp", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
|
||||||
|
)
|
||||||
|
|
||||||
|
resourceId: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="ID of the affected resource",
|
||||||
|
json_schema_extra={"label": "Ressourcen-ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
|
||||||
|
)
|
||||||
|
|
||||||
|
details: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Additional details about the event",
|
||||||
|
json_schema_extra={"label": "Details", "frontend_type": "textarea", "frontend_readonly": True, "frontend_required": False}
|
||||||
|
)
|
||||||
|
|
||||||
|
# Request metadata
|
||||||
|
ipAddress: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="IP address of the client",
|
||||||
|
json_schema_extra={"label": "IP-Adresse", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
|
||||||
|
)
|
||||||
|
|
||||||
|
userAgent: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="User agent string from the request",
|
||||||
|
json_schema_extra={"label": "User-Agent", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
|
||||||
|
)
|
||||||
|
|
||||||
|
# Outcome
|
||||||
|
success: bool = Field(
|
||||||
|
default=True,
|
||||||
|
description="Whether the action was successful",
|
||||||
|
json_schema_extra={"label": "Erfolgreich", "frontend_type": "checkbox", "frontend_readonly": True, "frontend_required": True}
|
||||||
|
)
|
||||||
|
|
||||||
|
errorMessage: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Error message if the action failed",
|
||||||
|
json_schema_extra={"label": "Fehlermeldung", "frontend_type": "textarea", "frontend_readonly": True, "frontend_required": False}
|
||||||
|
)
|
||||||
|
|
||||||
130
modules/datamodels/datamodelBackgroundJob.py
Normal file
130
modules/datamodels/datamodelBackgroundJob.py
Normal file
|
|
@ -0,0 +1,130 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""Background job models: generic, reusable infrastructure for long-running tasks.
|
||||||
|
|
||||||
|
A `BackgroundJob` record tracks the lifecycle of one async task that must not block
|
||||||
|
the calling HTTP request. Any caller (HTTP route, AI tool, scheduled task) can:
|
||||||
|
|
||||||
|
1. Register a handler once via `registerJobHandler(jobType, handler)`.
|
||||||
|
2. Submit work via `startJob(jobType, payload, ...)` which returns a `jobId`
|
||||||
|
immediately and runs the handler in the background.
|
||||||
|
3. Poll `getJobStatus(jobId)` (HTTP `GET /api/jobs/{jobId}`) until `status` is
|
||||||
|
one of {SUCCESS, ERROR, CANCELLED}.
|
||||||
|
|
||||||
|
See `modules.serviceCenter.services.serviceBackgroundJobs.mainBackgroundJobService`.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from typing import Any, Dict, Optional
|
||||||
|
from enum import Enum
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
from pydantic import Field
|
||||||
|
|
||||||
|
from modules.datamodels.datamodelBase import PowerOnModel
|
||||||
|
from modules.shared.i18nRegistry import i18nModel
|
||||||
|
|
||||||
|
|
||||||
|
class BackgroundJobStatusEnum(str, Enum):
|
||||||
|
"""Lifecycle status of a background job."""
|
||||||
|
PENDING = "PENDING"
|
||||||
|
RUNNING = "RUNNING"
|
||||||
|
SUCCESS = "SUCCESS"
|
||||||
|
ERROR = "ERROR"
|
||||||
|
CANCELLED = "CANCELLED"
|
||||||
|
|
||||||
|
|
||||||
|
TERMINAL_JOB_STATUSES = {
|
||||||
|
BackgroundJobStatusEnum.SUCCESS,
|
||||||
|
BackgroundJobStatusEnum.ERROR,
|
||||||
|
BackgroundJobStatusEnum.CANCELLED,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@i18nModel("Hintergrund-Job")
|
||||||
|
class BackgroundJob(PowerOnModel):
|
||||||
|
"""Generic record describing a long-running asynchronous task.
|
||||||
|
|
||||||
|
Scope: the combination of `mandateId` and optionally `featureInstanceId`
|
||||||
|
is used for access control on `GET /api/jobs/{jobId}`.
|
||||||
|
"""
|
||||||
|
|
||||||
|
id: str = Field(
|
||||||
|
default_factory=lambda: str(uuid.uuid4()),
|
||||||
|
description="Primary key",
|
||||||
|
json_schema_extra={"label": "ID"},
|
||||||
|
)
|
||||||
|
jobType: str = Field(
|
||||||
|
...,
|
||||||
|
description="Handler key registered via registerJobHandler() (e.g. 'trusteeAccountingSync')",
|
||||||
|
json_schema_extra={"label": "Typ"},
|
||||||
|
)
|
||||||
|
mandateId: Optional[str] = Field(
|
||||||
|
None,
|
||||||
|
description="Mandate scope (used for access checks). None for system-wide jobs.",
|
||||||
|
json_schema_extra={
|
||||||
|
"label": "Mandanten-ID",
|
||||||
|
"fk_target": {"db": "poweron_app", "table": "Mandate"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
featureInstanceId: Optional[str] = Field(
|
||||||
|
None,
|
||||||
|
description="Feature instance scope (optional)",
|
||||||
|
json_schema_extra={
|
||||||
|
"label": "Feature-Instanz",
|
||||||
|
"fk_target": {"db": "poweron_app", "table": "FeatureInstance"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
triggeredBy: Optional[str] = Field(
|
||||||
|
None,
|
||||||
|
description="UserId or 'ai-tool:<toolName>' / 'scheduler:<jobName>'",
|
||||||
|
json_schema_extra={"label": "Ausgeloest von"},
|
||||||
|
)
|
||||||
|
|
||||||
|
status: str = Field(
|
||||||
|
default=BackgroundJobStatusEnum.PENDING.value,
|
||||||
|
description="Current lifecycle status",
|
||||||
|
json_schema_extra={"label": "Status"},
|
||||||
|
)
|
||||||
|
progress: int = Field(
|
||||||
|
default=0,
|
||||||
|
description="Progress 0..100 (best-effort; may stay 0 for handlers that cannot estimate)",
|
||||||
|
json_schema_extra={"label": "Fortschritt"},
|
||||||
|
)
|
||||||
|
progressMessage: Optional[str] = Field(
|
||||||
|
None,
|
||||||
|
description="Human-readable current step (e.g. 'Importing journal entries...')",
|
||||||
|
json_schema_extra={"label": "Fortschritts-Nachricht"},
|
||||||
|
)
|
||||||
|
|
||||||
|
payload: Dict[str, Any] = Field(
|
||||||
|
default_factory=dict,
|
||||||
|
description="Job input parameters (JSON)",
|
||||||
|
json_schema_extra={"label": "Eingabe"},
|
||||||
|
)
|
||||||
|
result: Optional[Dict[str, Any]] = Field(
|
||||||
|
None,
|
||||||
|
description="Handler return value on success (JSON)",
|
||||||
|
json_schema_extra={"label": "Ergebnis"},
|
||||||
|
)
|
||||||
|
errorMessage: Optional[str] = Field(
|
||||||
|
None,
|
||||||
|
description="Truncated error message on failure (full stack trace in logs)",
|
||||||
|
json_schema_extra={"label": "Fehler"},
|
||||||
|
)
|
||||||
|
|
||||||
|
createdAt: datetime = Field(
|
||||||
|
default_factory=lambda: datetime.now(timezone.utc),
|
||||||
|
description="When the job was submitted",
|
||||||
|
json_schema_extra={"label": "Eingereicht"},
|
||||||
|
)
|
||||||
|
startedAt: Optional[datetime] = Field(
|
||||||
|
None,
|
||||||
|
description="When the handler began running",
|
||||||
|
json_schema_extra={"label": "Gestartet"},
|
||||||
|
)
|
||||||
|
finishedAt: Optional[datetime] = Field(
|
||||||
|
None,
|
||||||
|
description="When the handler reached a terminal status",
|
||||||
|
json_schema_extra={"label": "Beendet"},
|
||||||
|
)
|
||||||
74
modules/datamodels/datamodelBase.py
Normal file
74
modules/datamodels/datamodelBase.py
Normal file
|
|
@ -0,0 +1,74 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""Base Pydantic model with system-managed fields (DB + API + UI metadata)."""
|
||||||
|
|
||||||
|
from typing import Dict, Optional, Type
|
||||||
|
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
from modules.shared.i18nRegistry import i18nModel
|
||||||
|
|
||||||
|
_MODEL_REGISTRY: Dict[str, Type["PowerOnModel"]] = {}
|
||||||
|
|
||||||
|
|
||||||
|
def _getModelByTableName(tableName: str) -> Optional[Type["PowerOnModel"]]:
|
||||||
|
"""Look up a PowerOnModel subclass by its table name (= class name)."""
|
||||||
|
return _MODEL_REGISTRY.get(tableName)
|
||||||
|
|
||||||
|
|
||||||
|
@i18nModel("Basisdatensatz")
|
||||||
|
class PowerOnModel(BaseModel):
|
||||||
|
"""Basis-Datenmodell mit System-Audit-Feldern fuer alle DB-Tabellen."""
|
||||||
|
|
||||||
|
def __init_subclass__(cls, **kwargs):
|
||||||
|
super().__init_subclass__(**kwargs)
|
||||||
|
_MODEL_REGISTRY[cls.__name__] = cls
|
||||||
|
|
||||||
|
sysCreatedAt: Optional[float] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Record creation timestamp (UTC, set by system)",
|
||||||
|
json_schema_extra={
|
||||||
|
"label": "Erstellt am",
|
||||||
|
"frontend_type": "timestamp",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": False,
|
||||||
|
"frontend_visible": False,
|
||||||
|
"system": True,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
sysCreatedBy: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="User ID who created this record (set by system)",
|
||||||
|
json_schema_extra={
|
||||||
|
"label": "Erstellt von",
|
||||||
|
"frontend_type": "text",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": False,
|
||||||
|
"frontend_visible": False,
|
||||||
|
"system": True,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
sysModifiedAt: Optional[float] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Record last modification timestamp (UTC, set by system)",
|
||||||
|
json_schema_extra={
|
||||||
|
"label": "Geaendert am",
|
||||||
|
"frontend_type": "timestamp",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": False,
|
||||||
|
"frontend_visible": False,
|
||||||
|
"system": True,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
sysModifiedBy: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="User ID who last modified this record (set by system)",
|
||||||
|
json_schema_extra={
|
||||||
|
"label": "Geaendert von",
|
||||||
|
"frontend_type": "text",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": False,
|
||||||
|
"frontend_visible": False,
|
||||||
|
"system": True,
|
||||||
|
},
|
||||||
|
)
|
||||||
278
modules/datamodels/datamodelBilling.py
Normal file
278
modules/datamodels/datamodelBilling.py
Normal file
|
|
@ -0,0 +1,278 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""Billing models: BillingAccount, BillingTransaction, BillingSettings, UsageStatistics."""
|
||||||
|
|
||||||
|
from typing import List, Dict, Any, Optional
|
||||||
|
from enum import Enum
|
||||||
|
from datetime import date, datetime, timezone
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
from modules.datamodels.datamodelBase import PowerOnModel
|
||||||
|
from modules.shared.i18nRegistry import i18nModel
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
# End-customer price for storage above plan-included volume (CHF per GB per month).
|
||||||
|
STORAGE_PRICE_PER_GB_CHF = 0.50
|
||||||
|
|
||||||
|
|
||||||
|
class TransactionTypeEnum(str, Enum):
|
||||||
|
"""Transaction types for billing."""
|
||||||
|
CREDIT = "CREDIT" # Credit/top-up (positive)
|
||||||
|
DEBIT = "DEBIT" # Debit/usage (positive amount, reduces balance)
|
||||||
|
ADJUSTMENT = "ADJUSTMENT" # Manual adjustment by admin
|
||||||
|
|
||||||
|
|
||||||
|
class ReferenceTypeEnum(str, Enum):
|
||||||
|
"""Reference types for transactions."""
|
||||||
|
WORKFLOW = "WORKFLOW" # AI workflow usage
|
||||||
|
PAYMENT = "PAYMENT" # Payment/top-up
|
||||||
|
ADMIN = "ADMIN" # Admin adjustment
|
||||||
|
SYSTEM = "SYSTEM" # System credit (e.g., initial credit)
|
||||||
|
STORAGE = "STORAGE" # Metered storage overage (prepay pool)
|
||||||
|
SUBSCRIPTION = "SUBSCRIPTION" # AI budget credit from subscription plan
|
||||||
|
|
||||||
|
|
||||||
|
class PeriodTypeEnum(str, Enum):
|
||||||
|
"""Period types for usage statistics."""
|
||||||
|
DAY = "DAY"
|
||||||
|
MONTH = "MONTH"
|
||||||
|
YEAR = "YEAR"
|
||||||
|
|
||||||
|
|
||||||
|
@i18nModel("Abrechnungskonto")
|
||||||
|
class BillingAccount(PowerOnModel):
|
||||||
|
"""Billing account for mandate or user-mandate combination."""
|
||||||
|
id: str = Field(
|
||||||
|
default_factory=lambda: str(uuid.uuid4()),
|
||||||
|
description="Primary key",
|
||||||
|
json_schema_extra={"label": "ID"},
|
||||||
|
)
|
||||||
|
mandateId: str = Field(
|
||||||
|
...,
|
||||||
|
description="Foreign key to Mandate",
|
||||||
|
json_schema_extra={"label": "Mandanten-ID", "fk_target": {"db": "poweron_app", "table": "Mandate"}},
|
||||||
|
)
|
||||||
|
userId: Optional[str] = Field(
|
||||||
|
None,
|
||||||
|
description="Foreign key to User (None = mandate pool account, set = user audit account)",
|
||||||
|
json_schema_extra={"label": "Benutzer-ID", "fk_target": {"db": "poweron_app", "table": "User"}},
|
||||||
|
)
|
||||||
|
balance: float = Field(default=0.0, description="Current balance in CHF", json_schema_extra={"label": "Guthaben (CHF)"})
|
||||||
|
warningThreshold: float = Field(
|
||||||
|
default=0.0,
|
||||||
|
description="Warning threshold in CHF",
|
||||||
|
json_schema_extra={"label": "Warnschwelle (CHF)"},
|
||||||
|
)
|
||||||
|
lastWarningAt: Optional[datetime] = Field(
|
||||||
|
None,
|
||||||
|
description="Last warning sent timestamp",
|
||||||
|
json_schema_extra={"label": "Letzte Warnung"},
|
||||||
|
)
|
||||||
|
enabled: bool = Field(default=True, description="Account is active", json_schema_extra={"label": "Aktiv"})
|
||||||
|
|
||||||
|
|
||||||
|
@i18nModel("Transaktion")
|
||||||
|
class BillingTransaction(PowerOnModel):
|
||||||
|
"""Single billing transaction (credit, debit, adjustment)."""
|
||||||
|
id: str = Field(
|
||||||
|
default_factory=lambda: str(uuid.uuid4()),
|
||||||
|
description="Primary key",
|
||||||
|
json_schema_extra={"label": "ID"},
|
||||||
|
)
|
||||||
|
accountId: str = Field(
|
||||||
|
...,
|
||||||
|
description="Foreign key to BillingAccount",
|
||||||
|
json_schema_extra={"label": "Konto-ID", "fk_target": {"db": "poweron_billing", "table": "BillingAccount"}},
|
||||||
|
)
|
||||||
|
transactionType: TransactionTypeEnum = Field(..., description="Transaction type", json_schema_extra={"label": "Typ"})
|
||||||
|
amount: float = Field(..., description="Amount in CHF (always positive)", json_schema_extra={"label": "Betrag (CHF)"})
|
||||||
|
description: str = Field(..., description="Transaction description", json_schema_extra={"label": "Beschreibung"})
|
||||||
|
|
||||||
|
# Reference to source
|
||||||
|
referenceType: Optional[ReferenceTypeEnum] = Field(None, description="Reference type", json_schema_extra={"label": "Referenztyp"})
|
||||||
|
referenceId: Optional[str] = Field(None, description="Reference ID", json_schema_extra={"label": "Referenz-ID"})
|
||||||
|
|
||||||
|
# Context for workflow transactions
|
||||||
|
workflowId: Optional[str] = Field(
|
||||||
|
None,
|
||||||
|
description="Workflow ID (for WORKFLOW transactions; may be Chat or Graphical Editor)",
|
||||||
|
json_schema_extra={"label": "Workflow-ID"},
|
||||||
|
)
|
||||||
|
featureInstanceId: Optional[str] = Field(
|
||||||
|
None,
|
||||||
|
description="Feature instance ID",
|
||||||
|
json_schema_extra={"label": "Feature-Instanz-ID", "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}},
|
||||||
|
)
|
||||||
|
featureCode: Optional[str] = Field(
|
||||||
|
None,
|
||||||
|
description="Feature code (e.g., automation)",
|
||||||
|
json_schema_extra={"label": "Feature-Code", "fk_target": {"db": "poweron_app", "table": "Feature", "column": "code"}},
|
||||||
|
)
|
||||||
|
aicoreProvider: Optional[str] = Field(None, description="AICore provider (anthropic, openai, etc.)", json_schema_extra={"label": "AI-Anbieter"})
|
||||||
|
aicoreModel: Optional[str] = Field(None, description="AICore model name (e.g., claude-4-sonnet, gpt-4o)", json_schema_extra={"label": "AI-Modell"})
|
||||||
|
createdByUserId: Optional[str] = Field(
|
||||||
|
None,
|
||||||
|
description="User who created/caused this transaction",
|
||||||
|
json_schema_extra={"label": "Erstellt von Benutzer", "fk_target": {"db": "poweron_app", "table": "User"}},
|
||||||
|
)
|
||||||
|
|
||||||
|
# AI call metadata (for per-call analytics)
|
||||||
|
processingTime: Optional[float] = Field(None, description="Processing time in seconds", json_schema_extra={"label": "Verarbeitungszeit (s)"})
|
||||||
|
bytesSent: Optional[int] = Field(None, description="Bytes sent to AI model", json_schema_extra={"label": "Gesendete Bytes"})
|
||||||
|
bytesReceived: Optional[int] = Field(None, description="Bytes received from AI model", json_schema_extra={"label": "Empfangene Bytes"})
|
||||||
|
errorCount: Optional[int] = Field(None, description="Number of errors in this call", json_schema_extra={"label": "Fehleranzahl"})
|
||||||
|
|
||||||
|
|
||||||
|
@i18nModel("Abrechnungseinstellungen")
|
||||||
|
class BillingSettings(BaseModel):
|
||||||
|
"""Billing settings per mandate. Only PREPAY_MANDATE model."""
|
||||||
|
id: str = Field(
|
||||||
|
default_factory=lambda: str(uuid.uuid4()),
|
||||||
|
description="Primary key",
|
||||||
|
json_schema_extra={"label": "ID"},
|
||||||
|
)
|
||||||
|
mandateId: str = Field(
|
||||||
|
...,
|
||||||
|
description="Foreign key to Mandate (UNIQUE)",
|
||||||
|
json_schema_extra={"label": "Mandanten-ID", "fk_target": {"db": "poweron_app", "table": "Mandate"}},
|
||||||
|
)
|
||||||
|
|
||||||
|
warningThresholdPercent: float = Field(
|
||||||
|
default=10.0,
|
||||||
|
description="Warning threshold as percentage",
|
||||||
|
json_schema_extra={"label": "Warnschwelle (%)"},
|
||||||
|
)
|
||||||
|
|
||||||
|
# Stripe
|
||||||
|
stripeCustomerId: Optional[str] = Field(
|
||||||
|
None,
|
||||||
|
description="Stripe Customer ID (cus_xxx) — one per mandate",
|
||||||
|
json_schema_extra={"label": "Stripe-Kunden-ID"},
|
||||||
|
)
|
||||||
|
|
||||||
|
# Auto-Recharge for AI budget
|
||||||
|
autoRechargeEnabled: bool = Field(default=False, description="Auto-buy AI budget when low", json_schema_extra={"label": "Auto-Nachladung"})
|
||||||
|
rechargeAmountCHF: float = Field(
|
||||||
|
default=10.0,
|
||||||
|
description="Amount per auto-recharge (CHF, prepaid via Stripe)",
|
||||||
|
json_schema_extra={"label": "Nachladebetrag (CHF)"},
|
||||||
|
)
|
||||||
|
rechargeMaxPerMonth: int = Field(default=3, description="Max auto-recharges per month", json_schema_extra={"label": "Max. Nachladungen/Monat"})
|
||||||
|
rechargesThisMonth: int = Field(default=0, description="Counter: auto-recharges used this month", json_schema_extra={"label": "Nachladungen diesen Monat"})
|
||||||
|
monthResetAt: Optional[datetime] = Field(None, description="When rechargesThisMonth was last reset", json_schema_extra={"label": "Monats-Reset"})
|
||||||
|
|
||||||
|
# Notifications
|
||||||
|
notifyEmails: List[str] = Field(
|
||||||
|
default_factory=list,
|
||||||
|
description="Email addresses for billing alerts (pool exhausted, warnings, etc.)",
|
||||||
|
json_schema_extra={"label": "E-Mails fuer Billing-Alerts (Inhaber/Admin)"},
|
||||||
|
)
|
||||||
|
notifyOnWarning: bool = Field(default=True, description="Send email when warning threshold is reached", json_schema_extra={"label": "Bei Warnung benachrichtigen"})
|
||||||
|
|
||||||
|
# Storage overage (high-watermark within subscription period; resets on new period)
|
||||||
|
storageHighWatermarkMB: float = Field(
|
||||||
|
default=0.0,
|
||||||
|
description="Peak indexed data volume MB this billing period",
|
||||||
|
json_schema_extra={"label": "Speicher-Peak (MB)"},
|
||||||
|
)
|
||||||
|
storagePeriodStartAt: Optional[datetime] = Field(
|
||||||
|
None,
|
||||||
|
description="Subscription billing period start used for storage reset",
|
||||||
|
json_schema_extra={"label": "Speicher-Periodenbeginn"},
|
||||||
|
)
|
||||||
|
storageBilledUpToMB: float = Field(
|
||||||
|
default=0.0,
|
||||||
|
description="Overage MB already debited this period (above plan-included volume)",
|
||||||
|
json_schema_extra={"label": "Speicher abgerechneter Überhang (MB)"},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class StripeWebhookEvent(BaseModel):
|
||||||
|
"""Stores processed Stripe webhook event IDs for idempotency."""
|
||||||
|
id: str = Field(
|
||||||
|
default_factory=lambda: str(uuid.uuid4()),
|
||||||
|
description="Primary key",
|
||||||
|
)
|
||||||
|
event_id: str = Field(..., description="Stripe event ID (evt_xxx)")
|
||||||
|
processed_at: datetime = Field(
|
||||||
|
default_factory=lambda: datetime.now(timezone.utc),
|
||||||
|
description="When the event was processed",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@i18nModel("Nutzungsstatistik")
|
||||||
|
class UsageStatistics(BaseModel):
|
||||||
|
"""Aggregated usage statistics for quick retrieval."""
|
||||||
|
id: str = Field(
|
||||||
|
default_factory=lambda: str(uuid.uuid4()),
|
||||||
|
description="Primary key",
|
||||||
|
json_schema_extra={"label": "ID"},
|
||||||
|
)
|
||||||
|
accountId: str = Field(
|
||||||
|
...,
|
||||||
|
description="Foreign key to BillingAccount",
|
||||||
|
json_schema_extra={"label": "Konto-ID", "fk_target": {"db": "poweron_billing", "table": "BillingAccount"}},
|
||||||
|
)
|
||||||
|
periodType: PeriodTypeEnum = Field(..., description="Period type", json_schema_extra={"label": "Periodentyp"})
|
||||||
|
periodStart: date = Field(..., description="Period start date", json_schema_extra={"label": "Periodenbeginn"})
|
||||||
|
|
||||||
|
# Aggregated values
|
||||||
|
totalCostCHF: float = Field(default=0.0, description="Total cost in CHF", json_schema_extra={"label": "Gesamtkosten (CHF)"})
|
||||||
|
transactionCount: int = Field(default=0, description="Number of transactions", json_schema_extra={"label": "Anzahl Transaktionen"})
|
||||||
|
|
||||||
|
# Breakdown by provider
|
||||||
|
costByProvider: Dict[str, float] = Field(
|
||||||
|
default_factory=dict,
|
||||||
|
description="Cost breakdown by provider (e.g., {'anthropic': 12.50, 'openai': 8.30})",
|
||||||
|
json_schema_extra={"label": "Kosten nach Anbieter"},
|
||||||
|
)
|
||||||
|
|
||||||
|
# Breakdown by feature
|
||||||
|
costByFeature: Dict[str, float] = Field(
|
||||||
|
default_factory=dict,
|
||||||
|
description="Cost breakdown by feature (e.g., {'automation': 5.80, 'workspace': 3.20})",
|
||||||
|
json_schema_extra={"label": "Kosten nach Feature"},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Response Models for API
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
class BillingBalanceResponse(BaseModel):
|
||||||
|
"""Response model for balance endpoint."""
|
||||||
|
mandateId: str
|
||||||
|
mandateName: str
|
||||||
|
balance: float
|
||||||
|
currency: str = "CHF"
|
||||||
|
warningThreshold: float
|
||||||
|
isWarning: bool
|
||||||
|
|
||||||
|
|
||||||
|
class BillingStatisticsChartData(BaseModel):
|
||||||
|
"""Chart data point for statistics."""
|
||||||
|
label: str
|
||||||
|
totalCost: float
|
||||||
|
byProvider: Dict[str, float]
|
||||||
|
|
||||||
|
|
||||||
|
class BillingStatisticsResponse(BaseModel):
|
||||||
|
"""Response model for statistics endpoint."""
|
||||||
|
mandateId: str
|
||||||
|
period: PeriodTypeEnum
|
||||||
|
year: int
|
||||||
|
month: Optional[int] = None
|
||||||
|
currency: str = "CHF"
|
||||||
|
data: List[BillingStatisticsChartData]
|
||||||
|
totals: Dict[str, Any]
|
||||||
|
|
||||||
|
|
||||||
|
class BillingCheckResult(BaseModel):
|
||||||
|
"""Result of a billing balance check (budget + subscription gate)."""
|
||||||
|
allowed: bool
|
||||||
|
reason: Optional[str] = None
|
||||||
|
currentBalance: Optional[float] = None
|
||||||
|
requiredAmount: Optional[float] = None
|
||||||
|
upgradeRequired: Optional[bool] = None
|
||||||
|
subscriptionUiPath: Optional[str] = None
|
||||||
|
userAction: Optional[str] = None
|
||||||
|
|
||||||
File diff suppressed because it is too large
Load diff
61
modules/datamodels/datamodelContent.py
Normal file
61
modules/datamodels/datamodelContent.py
Normal file
|
|
@ -0,0 +1,61 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""Content Object data models for the container and content extraction pipeline.
|
||||||
|
|
||||||
|
Physical layer: Container hierarchy (ZIP, Folder, File)
|
||||||
|
Logical layer: Scalar content objects (text, image, videostream, audiostream, other)
|
||||||
|
|
||||||
|
The entire extraction pipeline up to ContentObjects runs without AI.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from typing import Dict, Any, List, Optional
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
|
||||||
|
class ContainerLimitError(Exception):
|
||||||
|
"""Raised when container extraction exceeds safety limits (size, depth, file count)."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class ContentContextRef(BaseModel):
|
||||||
|
"""Reference to the origin context within a container/file."""
|
||||||
|
containerPath: str = Field(description="e.g. 'archiv.zip/folder-a/report.pdf'")
|
||||||
|
location: str = Field(default="", description="e.g. 'page:5/region:bottomLeft'")
|
||||||
|
label: Optional[str] = Field(default=None, description="e.g. 'Abbildung 3: Uebersicht'")
|
||||||
|
pageIndex: Optional[int] = Field(default=None, description="Page number (PDF, DOCX)")
|
||||||
|
sectionId: Optional[str] = Field(default=None, description="Section/Heading ID")
|
||||||
|
sheetName: Optional[str] = Field(default=None, description="Sheet name (XLSX)")
|
||||||
|
slideIndex: Optional[int] = Field(default=None, description="Slide number (PPTX)")
|
||||||
|
|
||||||
|
|
||||||
|
class ContentObject(BaseModel):
|
||||||
|
"""Scalar content object extracted from a file. No AI involved."""
|
||||||
|
id: str = Field(default_factory=lambda: str(uuid.uuid4()))
|
||||||
|
fileId: str = Field(
|
||||||
|
description="FK to the physical file",
|
||||||
|
json_schema_extra={"fk_target": {"db": "poweron_management", "table": "FileItem"}},
|
||||||
|
)
|
||||||
|
contentType: str = Field(description="text, image, videostream, audiostream, other")
|
||||||
|
data: str = Field(default="", description="Content data (text, base64, URL)")
|
||||||
|
contextRef: ContentContextRef = Field(default_factory=ContentContextRef)
|
||||||
|
metadata: Dict[str, Any] = Field(default_factory=dict)
|
||||||
|
sequence: int = Field(default=0, description="Order within the context")
|
||||||
|
|
||||||
|
|
||||||
|
class ContentObjectSummary(BaseModel):
|
||||||
|
"""Compact description of a content object for the FileContentIndex."""
|
||||||
|
id: str = Field(description="Content object ID")
|
||||||
|
contentType: str = Field(description="text, image, videostream, audiostream, other")
|
||||||
|
contextRef: ContentContextRef = Field(default_factory=ContentContextRef)
|
||||||
|
charCount: Optional[int] = Field(default=None, description="Only for text")
|
||||||
|
dimensions: Optional[str] = Field(default=None, description="Only for image/video (e.g. '1920x1080')")
|
||||||
|
duration: Optional[float] = Field(default=None, description="Only for audio/video (seconds)")
|
||||||
|
|
||||||
|
|
||||||
|
class FileEntry(BaseModel):
|
||||||
|
"""A file extracted from a container (ZIP, TAR, Folder)."""
|
||||||
|
path: str = Field(description="Relative path within the container")
|
||||||
|
data: bytes = Field(description="File content bytes")
|
||||||
|
mimeType: str = Field(description="Detected MIME type")
|
||||||
|
size: int = Field(description="File size in bytes")
|
||||||
95
modules/datamodels/datamodelDataSource.py
Normal file
95
modules/datamodels/datamodelDataSource.py
Normal file
|
|
@ -0,0 +1,95 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""DataSource and ExternalEntry models for external data integration.
|
||||||
|
|
||||||
|
DataSource links a UserConnection to an external path (SharePoint folder,
|
||||||
|
Google Drive folder, FTP directory, etc.) for agent-accessible data containers.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from typing import Dict, Any, Optional
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
from modules.datamodels.datamodelBase import PowerOnModel
|
||||||
|
from modules.shared.i18nRegistry import i18nModel
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
|
||||||
|
@i18nModel("Datenquelle")
|
||||||
|
class DataSource(PowerOnModel):
|
||||||
|
"""Konfigurierte externe Datenquelle verknuepft mit einer UserConnection."""
|
||||||
|
id: str = Field(
|
||||||
|
default_factory=lambda: str(uuid.uuid4()),
|
||||||
|
description="Primary key",
|
||||||
|
json_schema_extra={"label": "ID"},
|
||||||
|
)
|
||||||
|
connectionId: str = Field(
|
||||||
|
description="FK to UserConnection",
|
||||||
|
json_schema_extra={"label": "Verbindungs-ID", "fk_target": {"db": "poweron_app", "table": "UserConnection"}},
|
||||||
|
)
|
||||||
|
sourceType: str = Field(
|
||||||
|
description="sharepointFolder, googleDriveFolder, outlookFolder, ftpFolder, clickupList (path under /team/...)",
|
||||||
|
json_schema_extra={"label": "Quellentyp"},
|
||||||
|
)
|
||||||
|
path: str = Field(
|
||||||
|
description="External path (e.g. '/sites/MySite/Documents/Reports')",
|
||||||
|
json_schema_extra={"label": "Pfad"},
|
||||||
|
)
|
||||||
|
label: str = Field(
|
||||||
|
description="User-visible label (often the last path segment)",
|
||||||
|
json_schema_extra={"label": "Bezeichnung"},
|
||||||
|
)
|
||||||
|
displayPath: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Human-readable full path for UI (connection-relative, slash-separated)",
|
||||||
|
json_schema_extra={"label": "Anzeigepfad"},
|
||||||
|
)
|
||||||
|
featureInstanceId: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Scoped to feature instance",
|
||||||
|
json_schema_extra={"label": "Feature-Instanz", "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}},
|
||||||
|
)
|
||||||
|
mandateId: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Mandate scope",
|
||||||
|
json_schema_extra={"label": "Mandanten-ID", "fk_target": {"db": "poweron_app", "table": "Mandate"}},
|
||||||
|
)
|
||||||
|
userId: str = Field(
|
||||||
|
default="",
|
||||||
|
description="Owner user ID",
|
||||||
|
json_schema_extra={"label": "Benutzer-ID", "fk_target": {"db": "poweron_app", "table": "User"}},
|
||||||
|
)
|
||||||
|
autoSync: bool = Field(
|
||||||
|
default=False,
|
||||||
|
description="Automatically sync on schedule",
|
||||||
|
json_schema_extra={"label": "Auto-Sync"},
|
||||||
|
)
|
||||||
|
lastSynced: Optional[float] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Last sync timestamp",
|
||||||
|
json_schema_extra={"label": "Letzter Sync"},
|
||||||
|
)
|
||||||
|
scope: str = Field(
|
||||||
|
default="personal",
|
||||||
|
description="Data visibility scope: personal, featureInstance, mandate, global",
|
||||||
|
json_schema_extra={"label": "Sichtbarkeit", "frontend_type": "select", "frontend_readonly": False, "frontend_required": False, "frontend_options": [
|
||||||
|
{"value": "personal", "label": "Persönlich"},
|
||||||
|
{"value": "featureInstance", "label": "Feature-Instanz"},
|
||||||
|
{"value": "mandate", "label": "Mandant"},
|
||||||
|
{"value": "global", "label": "Global"},
|
||||||
|
]},
|
||||||
|
)
|
||||||
|
neutralize: bool = Field(
|
||||||
|
default=False,
|
||||||
|
description="Whether this data source should be neutralized before AI processing",
|
||||||
|
json_schema_extra={"label": "Neutralisieren", "frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ExternalEntry(BaseModel):
|
||||||
|
"""An item (file or folder) from an external data source."""
|
||||||
|
name: str = Field(description="Item name")
|
||||||
|
path: str = Field(description="Full path within the source")
|
||||||
|
isFolder: bool = Field(default=False, description="True if directory/folder")
|
||||||
|
size: Optional[int] = Field(default=None, description="File size in bytes")
|
||||||
|
mimeType: Optional[str] = Field(default=None, description="MIME type (files only)")
|
||||||
|
lastModified: Optional[float] = Field(default=None, description="Last modification timestamp")
|
||||||
|
metadata: Dict[str, Any] = Field(default_factory=dict, description="Provider-specific metadata")
|
||||||
|
|
@ -1,10 +1,12 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
"""
|
"""
|
||||||
Document reference models for typed document references in workflows.
|
Document reference models for typed document references in workflows.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from typing import List, Optional
|
from typing import List, Optional
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
from modules.shared.attributeUtils import registerModelLabels
|
from modules.shared.i18nRegistry import i18nModel
|
||||||
|
|
||||||
|
|
||||||
class DocumentReference(BaseModel):
|
class DocumentReference(BaseModel):
|
||||||
|
|
@ -12,10 +14,18 @@ class DocumentReference(BaseModel):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
@i18nModel("Dokumentlisten-Referenz")
|
||||||
class DocumentListReference(DocumentReference):
|
class DocumentListReference(DocumentReference):
|
||||||
"""Reference to a document list via message label"""
|
"""Reference to a document list via message label"""
|
||||||
messageId: Optional[str] = Field(None, description="Optional message ID for cross-round references")
|
messageId: Optional[str] = Field(
|
||||||
label: str = Field(description="Document list label")
|
None,
|
||||||
|
description="Optional message ID for cross-round references",
|
||||||
|
json_schema_extra={"label": "Nachrichten-ID"},
|
||||||
|
)
|
||||||
|
label: str = Field(
|
||||||
|
description="Document list label",
|
||||||
|
json_schema_extra={"label": "Bezeichnung"},
|
||||||
|
)
|
||||||
|
|
||||||
def to_string(self) -> str:
|
def to_string(self) -> str:
|
||||||
"""Convert to string format: docList:messageId:label or docList:label"""
|
"""Convert to string format: docList:messageId:label or docList:label"""
|
||||||
|
|
@ -24,10 +34,18 @@ class DocumentListReference(DocumentReference):
|
||||||
return f"docList:{self.label}"
|
return f"docList:{self.label}"
|
||||||
|
|
||||||
|
|
||||||
|
@i18nModel("Dokumentelement-Referenz")
|
||||||
class DocumentItemReference(DocumentReference):
|
class DocumentItemReference(DocumentReference):
|
||||||
"""Reference to a specific document item"""
|
"""Reference to a specific document item"""
|
||||||
documentId: str = Field(description="Document ID")
|
documentId: str = Field(
|
||||||
fileName: Optional[str] = Field(None, description="Optional file name")
|
description="Document ID",
|
||||||
|
json_schema_extra={"label": "Dokument-ID"},
|
||||||
|
)
|
||||||
|
fileName: Optional[str] = Field(
|
||||||
|
None,
|
||||||
|
description="Optional file name",
|
||||||
|
json_schema_extra={"label": "Dateiname"},
|
||||||
|
)
|
||||||
|
|
||||||
def to_string(self) -> str:
|
def to_string(self) -> str:
|
||||||
"""Convert to string format: docItem:documentId:fileName or docItem:documentId"""
|
"""Convert to string format: docItem:documentId:fileName or docItem:documentId"""
|
||||||
|
|
@ -36,11 +54,13 @@ class DocumentItemReference(DocumentReference):
|
||||||
return f"docItem:{self.documentId}"
|
return f"docItem:{self.documentId}"
|
||||||
|
|
||||||
|
|
||||||
|
@i18nModel("Dokumentreferenz-Liste")
|
||||||
class DocumentReferenceList(BaseModel):
|
class DocumentReferenceList(BaseModel):
|
||||||
"""List of document references with conversion methods"""
|
"""List of document references with conversion methods"""
|
||||||
references: List[DocumentReference] = Field(
|
references: List[DocumentReference] = Field(
|
||||||
default_factory=list,
|
default_factory=list,
|
||||||
description="List of document references"
|
description="List of document references",
|
||||||
|
json_schema_extra={"label": "Referenzen"},
|
||||||
)
|
)
|
||||||
|
|
||||||
def to_string_list(self) -> List[str]:
|
def to_string_list(self) -> List[str]:
|
||||||
|
|
@ -95,24 +115,3 @@ class DocumentReferenceList(BaseModel):
|
||||||
references.append(DocumentListReference(label=refStr))
|
references.append(DocumentListReference(label=refStr))
|
||||||
|
|
||||||
return cls(references=references)
|
return cls(references=references)
|
||||||
|
|
||||||
|
|
||||||
registerModelLabels(
|
|
||||||
"DocumentReference",
|
|
||||||
{"en": "Document Reference", "fr": "Référence de document"},
|
|
||||||
{
|
|
||||||
"messageId": {"en": "Message ID", "fr": "ID du message"},
|
|
||||||
"label": {"en": "Label", "fr": "Étiquette"},
|
|
||||||
"documentId": {"en": "Document ID", "fr": "ID du document"},
|
|
||||||
"fileName": {"en": "File Name", "fr": "Nom du fichier"},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
registerModelLabels(
|
|
||||||
"DocumentReferenceList",
|
|
||||||
{"en": "Document Reference List", "fr": "Liste de références de documents"},
|
|
||||||
{
|
|
||||||
"references": {"en": "References", "fr": "Références"},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,7 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
from typing import Any, Dict, List, Optional, Literal, Union
|
from typing import Any, Dict, List, Optional, Literal, Union
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field, field_serializer
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -11,6 +13,8 @@ class DocumentMetadata(BaseModel):
|
||||||
sourceDocuments: List[str] = Field(default_factory=list, description="Source document IDs")
|
sourceDocuments: List[str] = Field(default_factory=list, description="Source document IDs")
|
||||||
extractionMethod: str = Field(default="ai_extraction", description="Method used for extraction")
|
extractionMethod: str = Field(default="ai_extraction", description="Method used for extraction")
|
||||||
version: str = Field(default="1.0", description="Document version")
|
version: str = Field(default="1.0", description="Document version")
|
||||||
|
documentType: Optional[str] = Field(default=None, description="Type of document (e.g., 'report', 'invoice', 'analysis')")
|
||||||
|
styles: Optional[Dict[str, Any]] = Field(default=None, description="Document styling configuration")
|
||||||
|
|
||||||
|
|
||||||
class TableData(BaseModel):
|
class TableData(BaseModel):
|
||||||
|
|
@ -105,5 +109,20 @@ class StructuredDocument(BaseModel):
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class RenderedDocument(BaseModel):
|
||||||
|
"""A single rendered document from a renderer."""
|
||||||
|
documentData: bytes = Field(description="Document content as bytes")
|
||||||
|
mimeType: str = Field(description="MIME type of the document (e.g., 'text/html', 'application/pdf')")
|
||||||
|
filename: str = Field(description="Filename for the document (e.g., 'report.html', 'image.png')")
|
||||||
|
documentType: Optional[str] = Field(default=None, description="Type of document (e.g., 'report', 'invoice', 'analysis')")
|
||||||
|
metadata: Optional[Dict[str, Any]] = Field(default=None, description="Document metadata (title, author, etc.)")
|
||||||
|
|
||||||
|
@field_serializer("documentData")
|
||||||
|
def _serializeDocumentData(self, v: bytes) -> str:
|
||||||
|
if isinstance(v, bytes):
|
||||||
|
return v.decode("utf-8", errors="replace")
|
||||||
|
return str(v)
|
||||||
|
|
||||||
|
|
||||||
# Update forward references
|
# Update forward references
|
||||||
ListItem.model_rebuild()
|
ListItem.model_rebuild()
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,5 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
from typing import Any, Dict, List, Optional, Literal
|
from typing import Any, Dict, List, Optional, Literal
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
|
@ -16,6 +18,7 @@ class ContentExtracted(BaseModel):
|
||||||
id: str = Field(description="Extraction id or source document id")
|
id: str = Field(description="Extraction id or source document id")
|
||||||
parts: List[ContentPart] = Field(default_factory=list, description="List of extracted parts")
|
parts: List[ContentPart] = Field(default_factory=list, description="List of extracted parts")
|
||||||
summary: Optional[Dict[str, Any]] = Field(default=None, description="Optional extraction summary")
|
summary: Optional[Dict[str, Any]] = Field(default=None, description="Optional extraction summary")
|
||||||
|
udm: Optional[Any] = Field(default=None, description="Optional UdmDocument (when outputFormat is udm or both)")
|
||||||
|
|
||||||
|
|
||||||
class ChunkResult(BaseModel):
|
class ChunkResult(BaseModel):
|
||||||
|
|
@ -59,19 +62,40 @@ class MergeStrategy(BaseModel):
|
||||||
capabilities: Optional[Dict[str, Any]] = Field(default=None, description="Model capabilities for intelligent merging")
|
capabilities: Optional[Dict[str, Any]] = Field(default=None, description="Model capabilities for intelligent merging")
|
||||||
|
|
||||||
|
|
||||||
|
class DocumentIntent(BaseModel):
|
||||||
|
"""Intent-Analyse für ein einzelnes Dokument"""
|
||||||
|
documentId: str = Field(description="ID des Dokuments")
|
||||||
|
intents: List[str] = Field(description="Liste von Intents: ['extract', 'render', 'reference'] - mehrere möglich")
|
||||||
|
extractionPrompt: Optional[str] = Field(default=None, description="Spezifischer Prompt für Extraktion (z.B. 'Extract text from images for legends')")
|
||||||
|
reasoning: str = Field(description="Erklärung für Debugging/Transparenz: Warum wurde dieser Intent gewählt?")
|
||||||
|
|
||||||
|
|
||||||
class ExtractionOptions(BaseModel):
|
class ExtractionOptions(BaseModel):
|
||||||
"""Options for document extraction and processing with clear data structures."""
|
"""Options for document extraction and processing with clear data structures."""
|
||||||
|
|
||||||
# Core extraction parameters
|
# Core extraction parameters
|
||||||
prompt: str = Field(description="Extraction prompt for AI processing")
|
prompt: str = Field(default="", description="Extraction prompt for AI processing")
|
||||||
processDocumentsIndividually: bool = Field(default=True, description="Process each document separately")
|
processDocumentsIndividually: bool = Field(default=True, description="Process each document separately")
|
||||||
|
|
||||||
|
outputFormat: Literal["parts", "udm", "both"] = Field(
|
||||||
|
default="parts",
|
||||||
|
description="Return flat parts only, UDM tree only, or both (parts always populated; udm when udm or both)",
|
||||||
|
)
|
||||||
|
outputDetail: Literal["full", "structure", "references"] = Field(
|
||||||
|
default="full",
|
||||||
|
description="Extraction detail: full inline data, skeleton without raw payloads, or file references only",
|
||||||
|
)
|
||||||
|
lazyContainer: bool = Field(
|
||||||
|
default=False,
|
||||||
|
description="For archives: emit file entries with metadata only (no nested extraction)",
|
||||||
|
)
|
||||||
|
|
||||||
# Image processing parameters
|
# Image processing parameters
|
||||||
imageMaxPixels: int = Field(default=1024 * 1024, ge=1, description="Maximum pixels for image processing")
|
imageMaxPixels: int = Field(default=1024 * 1024, ge=1, description="Maximum pixels for image processing")
|
||||||
imageQuality: int = Field(default=85, ge=1, le=100, description="Image quality (1-100)")
|
imageQuality: int = Field(default=85, ge=1, le=100, description="Image quality (1-100)")
|
||||||
|
|
||||||
# Merging strategy
|
# Merging strategy
|
||||||
mergeStrategy: MergeStrategy = Field(description="Strategy for merging extraction results")
|
mergeStrategy: MergeStrategy = Field(default_factory=MergeStrategy, description="Strategy for merging extraction results")
|
||||||
|
|
||||||
# Optional chunking parameters (for backward compatibility)
|
# Optional chunking parameters (for backward compatibility)
|
||||||
chunkAllowed: Optional[bool] = Field(default=None, description="Whether chunking is allowed")
|
chunkAllowed: Optional[bool] = Field(default=None, description="Whether chunking is allowed")
|
||||||
|
|
|
||||||
82
modules/datamodels/datamodelFeatureDataSource.py
Normal file
82
modules/datamodels/datamodelFeatureDataSource.py
Normal file
|
|
@ -0,0 +1,82 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""FeatureDataSource model for exposing feature instance data to the AI workspace.
|
||||||
|
|
||||||
|
A FeatureDataSource links a FeatureInstance table (DATA_OBJECT) to a workspace
|
||||||
|
so the agent can query structured feature data (e.g. TrusteePosition rows).
|
||||||
|
"""
|
||||||
|
|
||||||
|
from typing import Dict, List, Optional
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
from modules.datamodels.datamodelBase import PowerOnModel
|
||||||
|
from modules.shared.i18nRegistry import i18nModel
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
|
||||||
|
@i18nModel("Feature-Datenquelle")
|
||||||
|
class FeatureDataSource(PowerOnModel):
|
||||||
|
"""Feature-Instanz-Tabelle als Datenquelle im AI-Workspace."""
|
||||||
|
id: str = Field(
|
||||||
|
default_factory=lambda: str(uuid.uuid4()),
|
||||||
|
description="Primary key",
|
||||||
|
json_schema_extra={"label": "ID"},
|
||||||
|
)
|
||||||
|
featureInstanceId: str = Field(
|
||||||
|
description="FK to FeatureInstance",
|
||||||
|
json_schema_extra={"label": "Feature-Instanz", "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}},
|
||||||
|
)
|
||||||
|
featureCode: str = Field(
|
||||||
|
description="Feature code (e.g. trustee, commcoach)",
|
||||||
|
json_schema_extra={"label": "Feature", "fk_target": {"db": "poweron_app", "table": "Feature", "column": "code"}},
|
||||||
|
)
|
||||||
|
tableName: str = Field(
|
||||||
|
description="Table name from DATA_OBJECTS meta (e.g. TrusteePosition)",
|
||||||
|
json_schema_extra={"label": "Tabelle"},
|
||||||
|
)
|
||||||
|
objectKey: str = Field(
|
||||||
|
description="RBAC object key (e.g. data.feature.trustee.TrusteePosition)",
|
||||||
|
json_schema_extra={"label": "Objekt-Schluessel"},
|
||||||
|
)
|
||||||
|
label: str = Field(
|
||||||
|
description="User-visible label",
|
||||||
|
json_schema_extra={"label": "Bezeichnung"},
|
||||||
|
)
|
||||||
|
mandateId: str = Field(
|
||||||
|
default="",
|
||||||
|
description="Mandate scope",
|
||||||
|
json_schema_extra={"label": "Mandant", "fk_target": {"db": "poweron_app", "table": "Mandate"}},
|
||||||
|
)
|
||||||
|
userId: str = Field(
|
||||||
|
default="",
|
||||||
|
description="Owner user ID",
|
||||||
|
json_schema_extra={"label": "Benutzer", "fk_target": {"db": "poweron_app", "table": "User"}},
|
||||||
|
)
|
||||||
|
workspaceInstanceId: str = Field(
|
||||||
|
description="Workspace feature instance where this source is used",
|
||||||
|
json_schema_extra={"label": "Workspace", "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}},
|
||||||
|
)
|
||||||
|
scope: str = Field(
|
||||||
|
default="personal",
|
||||||
|
description="Data visibility scope: personal, featureInstance, mandate, global",
|
||||||
|
json_schema_extra={"label": "Sichtbarkeit", "frontend_type": "select", "frontend_readonly": False, "frontend_required": False, "frontend_options": [
|
||||||
|
{"value": "personal", "label": "Persönlich"},
|
||||||
|
{"value": "featureInstance", "label": "Feature-Instanz"},
|
||||||
|
{"value": "mandate", "label": "Mandant"},
|
||||||
|
{"value": "global", "label": "Global"},
|
||||||
|
]},
|
||||||
|
)
|
||||||
|
neutralize: bool = Field(
|
||||||
|
default=False,
|
||||||
|
description="Whether this data source should be neutralized before AI processing",
|
||||||
|
json_schema_extra={"label": "Neutralisieren", "frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False},
|
||||||
|
)
|
||||||
|
neutralizeFields: Optional[List[str]] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Column names whose values are replaced with placeholders before AI processing",
|
||||||
|
json_schema_extra={"label": "Zu neutralisierende Felder", "frontend_type": "multiselect", "frontend_readonly": False, "frontend_required": False},
|
||||||
|
)
|
||||||
|
recordFilter: Optional[Dict[str, str]] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Record-level filter applied when querying this table, e.g. {'sessionId': 'abc-123'}",
|
||||||
|
json_schema_extra={"label": "Datensatzfilter"},
|
||||||
|
)
|
||||||
73
modules/datamodels/datamodelFeatures.py
Normal file
73
modules/datamodels/datamodelFeatures.py
Normal file
|
|
@ -0,0 +1,73 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""Feature models: Feature, FeatureInstance."""
|
||||||
|
|
||||||
|
import uuid
|
||||||
|
from typing import Optional, Dict, Any
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
from modules.datamodels.datamodelBase import PowerOnModel
|
||||||
|
from modules.shared.i18nRegistry import i18nModel
|
||||||
|
from modules.datamodels.datamodelUtils import TextMultilingual
|
||||||
|
|
||||||
|
|
||||||
|
@i18nModel("Feature")
|
||||||
|
class Feature(PowerOnModel):
|
||||||
|
"""Feature-Definition (global, z.B. 'trustee', 'chatbot'). Verfuegbare Funktionalitaeten der Plattform."""
|
||||||
|
code: str = Field(
|
||||||
|
description="Unique feature code (Primary Key), z.B. 'trustee', 'chatbot'",
|
||||||
|
json_schema_extra={"label": "Code", "frontend_type": "text", "frontend_readonly": False, "frontend_required": True}
|
||||||
|
)
|
||||||
|
label: TextMultilingual = Field(
|
||||||
|
description="Feature label in multiple languages (I18n)",
|
||||||
|
json_schema_extra={"label": "Bezeichnung", "frontend_type": "multilingual", "frontend_readonly": False, "frontend_required": True}
|
||||||
|
)
|
||||||
|
icon: str = Field(
|
||||||
|
default="",
|
||||||
|
description="Icon identifier for the feature",
|
||||||
|
json_schema_extra={"label": "Symbol", "frontend_type": "text", "frontend_readonly": False, "frontend_required": False}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@i18nModel("Feature-Instanz")
|
||||||
|
class FeatureInstance(PowerOnModel):
|
||||||
|
"""Instanz eines Features in einem Mandanten. Ein Mandant kann mehrere Instanzen desselben Features haben."""
|
||||||
|
id: str = Field(
|
||||||
|
default_factory=lambda: str(uuid.uuid4()),
|
||||||
|
description="Unique ID of the feature instance",
|
||||||
|
json_schema_extra={"label": "ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
|
||||||
|
)
|
||||||
|
featureCode: str = Field(
|
||||||
|
description="FK -> Feature.code",
|
||||||
|
json_schema_extra={
|
||||||
|
"label": "Feature",
|
||||||
|
"frontend_type": "select",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": True,
|
||||||
|
"fk_target": {"db": "poweron_app", "table": "Feature", "column": "code"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
mandateId: str = Field(
|
||||||
|
description="FK -> Mandate.id (CASCADE DELETE)",
|
||||||
|
json_schema_extra={
|
||||||
|
"label": "Mandant",
|
||||||
|
"frontend_type": "text",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": True,
|
||||||
|
"fk_target": {"db": "poweron_app", "table": "Mandate"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
label: str = Field(
|
||||||
|
default="",
|
||||||
|
description="Instance label, z.B. 'Buchhaltung 2025'",
|
||||||
|
json_schema_extra={"label": "Bezeichnung", "frontend_type": "text", "frontend_readonly": False, "frontend_required": True}
|
||||||
|
)
|
||||||
|
enabled: bool = Field(
|
||||||
|
default=True,
|
||||||
|
description="Whether this feature instance is enabled",
|
||||||
|
json_schema_extra={"label": "Aktiviert", "frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False}
|
||||||
|
)
|
||||||
|
config: Optional[Dict[str, Any]] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Instance-specific configuration (JSONB). Structure depends on featureCode.",
|
||||||
|
json_schema_extra={"label": "Konfiguration", "frontend_type": "json", "frontend_readonly": False, "frontend_required": False}
|
||||||
|
)
|
||||||
82
modules/datamodels/datamodelFileFolder.py
Normal file
82
modules/datamodels/datamodelFileFolder.py
Normal file
|
|
@ -0,0 +1,82 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""FileFolder: hierarchical folder structure for file organization."""
|
||||||
|
|
||||||
|
from typing import Optional
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
from modules.datamodels.datamodelBase import PowerOnModel
|
||||||
|
from modules.shared.i18nRegistry import i18nModel
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
|
||||||
|
@i18nModel("Dateiordner")
|
||||||
|
class FileFolder(PowerOnModel):
|
||||||
|
"""Hierarchischer Ordner fuer die Dateiverwaltung."""
|
||||||
|
id: str = Field(
|
||||||
|
default_factory=lambda: str(uuid.uuid4()),
|
||||||
|
description="Primary key",
|
||||||
|
json_schema_extra={"label": "ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
|
||||||
|
)
|
||||||
|
name: str = Field(
|
||||||
|
description="Folder name",
|
||||||
|
json_schema_extra={"label": "Name", "frontend_type": "text", "frontend_readonly": False, "frontend_required": True},
|
||||||
|
)
|
||||||
|
parentId: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Parent folder ID (null = root)",
|
||||||
|
json_schema_extra={
|
||||||
|
"label": "Uebergeordneter Ordner",
|
||||||
|
"frontend_type": "text",
|
||||||
|
"frontend_readonly": False,
|
||||||
|
"frontend_required": False,
|
||||||
|
"fk_target": {"db": "poweron_management", "table": "FileFolder"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
mandateId: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Mandate context",
|
||||||
|
json_schema_extra={
|
||||||
|
"label": "Mandanten-ID",
|
||||||
|
"frontend_type": "text",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": False,
|
||||||
|
"fk_target": {"db": "poweron_app", "table": "Mandate"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
featureInstanceId: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Feature instance context",
|
||||||
|
json_schema_extra={
|
||||||
|
"label": "Feature-Instanz-ID",
|
||||||
|
"frontend_type": "text",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": False,
|
||||||
|
"fk_target": {"db": "poweron_app", "table": "FeatureInstance"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
scope: str = Field(
|
||||||
|
default="personal",
|
||||||
|
description="Data visibility scope: personal, featureInstance, mandate, global. Inherited by files in this folder.",
|
||||||
|
json_schema_extra={
|
||||||
|
"label": "Sichtbarkeit",
|
||||||
|
"frontend_type": "select",
|
||||||
|
"frontend_readonly": False,
|
||||||
|
"frontend_required": False,
|
||||||
|
"frontend_options": [
|
||||||
|
{"value": "personal", "label": "Persönlich"},
|
||||||
|
{"value": "featureInstance", "label": "Feature-Instanz"},
|
||||||
|
{"value": "mandate", "label": "Mandant"},
|
||||||
|
{"value": "global", "label": "Global"},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
)
|
||||||
|
neutralize: bool = Field(
|
||||||
|
default=False,
|
||||||
|
description="Whether files in this folder should be neutralized before AI processing. Inherited by new/moved files.",
|
||||||
|
json_schema_extra={
|
||||||
|
"label": "Neutralisieren",
|
||||||
|
"frontend_type": "checkbox",
|
||||||
|
"frontend_readonly": False,
|
||||||
|
"frontend_required": False,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
@ -1,43 +1,145 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
"""File-related datamodels: FileItem, FilePreview, FileData."""
|
"""File-related datamodels: FileItem, FilePreview, FileData."""
|
||||||
|
|
||||||
from typing import Dict, Any, Optional, Union
|
from typing import Dict, Any, List, Optional, Union
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
from modules.shared.attributeUtils import registerModelLabels
|
from modules.datamodels.datamodelBase import PowerOnModel
|
||||||
from modules.shared.timeUtils import getUtcTimestamp
|
from modules.shared.i18nRegistry import i18nModel
|
||||||
import uuid
|
import uuid
|
||||||
import base64
|
import base64
|
||||||
|
|
||||||
|
|
||||||
class FileItem(BaseModel):
|
@i18nModel("Datei")
|
||||||
id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
|
class FileItem(PowerOnModel):
|
||||||
mandateId: str = Field(description="ID of the mandate this file belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
|
"""Metadaten einer gespeicherten Datei."""
|
||||||
fileName: str = Field(description="Name of the file", json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": True})
|
id: str = Field(
|
||||||
mimeType: str = Field(description="MIME type of the file", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
|
default_factory=lambda: str(uuid.uuid4()),
|
||||||
fileHash: str = Field(description="Hash of the file", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
|
description="Primary key",
|
||||||
fileSize: int = Field(description="Size of the file in bytes", json_schema_extra={"frontend_type": "integer", "frontend_readonly": True, "frontend_required": False})
|
json_schema_extra={"label": "ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
|
||||||
creationDate: float = Field(default_factory=getUtcTimestamp, description="Date when the file was created (UTC timestamp in seconds)", json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False})
|
)
|
||||||
|
fileName: str = Field(
|
||||||
registerModelLabels(
|
description="Name of the file",
|
||||||
"FileItem",
|
json_schema_extra={"label": "Dateiname", "frontend_type": "text", "frontend_readonly": False, "frontend_required": True},
|
||||||
{"en": "File Item", "fr": "Élément de fichier"},
|
)
|
||||||
{
|
mandateId: Optional[str] = Field(
|
||||||
"id": {"en": "ID", "fr": "ID"},
|
default="",
|
||||||
"mandateId": {"en": "Mandate ID", "fr": "ID du mandat"},
|
description="ID of the mandate this file belongs to",
|
||||||
"fileName": {"en": "fileName", "fr": "Nom de fichier"},
|
json_schema_extra={
|
||||||
"mimeType": {"en": "MIME Type", "fr": "Type MIME"},
|
"label": "Mandant",
|
||||||
"fileHash": {"en": "File Hash", "fr": "Hash du fichier"},
|
"frontend_type": "text",
|
||||||
"fileSize": {"en": "File Size", "fr": "Taille du fichier"},
|
"frontend_readonly": True,
|
||||||
"creationDate": {"en": "Creation Date", "fr": "Date de création"},
|
"frontend_required": False,
|
||||||
|
"frontend_fk_source": "/api/mandates/",
|
||||||
|
"frontend_fk_display_field": "label",
|
||||||
|
"fk_model": "Mandate",
|
||||||
|
"fk_target": {"db": "poweron_app", "table": "Mandate"},
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
featureInstanceId: Optional[str] = Field(
|
||||||
|
default="",
|
||||||
|
description="ID of the feature instance this file belongs to",
|
||||||
|
json_schema_extra={
|
||||||
|
"label": "Feature-Instanz",
|
||||||
|
"frontend_type": "text",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": False,
|
||||||
|
"frontend_fk_source": "/api/features/instances",
|
||||||
|
"frontend_fk_display_field": "label",
|
||||||
|
"fk_model": "FeatureInstance",
|
||||||
|
"fk_target": {"db": "poweron_app", "table": "FeatureInstance"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
mimeType: str = Field(
|
||||||
|
description="MIME type of the file",
|
||||||
|
json_schema_extra={"label": "MIME-Typ", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
|
||||||
|
)
|
||||||
|
fileHash: str = Field(
|
||||||
|
description="Hash of the file",
|
||||||
|
json_schema_extra={"label": "Datei-Hash", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
|
||||||
|
)
|
||||||
|
fileSize: int = Field(
|
||||||
|
description="Size of the file in bytes",
|
||||||
|
json_schema_extra={
|
||||||
|
"label": "Dateigroesse",
|
||||||
|
"frontend_type": "integer",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": False,
|
||||||
|
# Auto-scale byte units (B / KB / MB / GB / TB), right-aligned in tables.
|
||||||
|
"frontend_format": "R:b",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
tags: Optional[List[str]] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Tags for categorization and search",
|
||||||
|
json_schema_extra={"label": "Tags", "frontend_type": "tags", "frontend_readonly": False, "frontend_required": False},
|
||||||
|
)
|
||||||
|
folderId: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="ID of the parent folder",
|
||||||
|
json_schema_extra={
|
||||||
|
"label": "Ordner-ID",
|
||||||
|
"frontend_type": "text",
|
||||||
|
"frontend_readonly": False,
|
||||||
|
"frontend_required": False,
|
||||||
|
"fk_target": {"db": "poweron_management", "table": "FileFolder"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
description: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="User-provided description of the file",
|
||||||
|
json_schema_extra={"label": "Beschreibung", "frontend_type": "textarea", "frontend_readonly": False, "frontend_required": False},
|
||||||
|
)
|
||||||
|
status: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Processing status: pending, extracted, embedding, indexed, failed",
|
||||||
|
json_schema_extra={"label": "Status", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
|
||||||
|
)
|
||||||
|
scope: str = Field(
|
||||||
|
default="personal",
|
||||||
|
description="Data visibility scope: personal, featureInstance, mandate, global",
|
||||||
|
json_schema_extra={"label": "Sichtbarkeit", "frontend_type": "select", "frontend_readonly": False, "frontend_required": False, "frontend_options": [
|
||||||
|
{"value": "personal", "label": "Persönlich"},
|
||||||
|
{"value": "featureInstance", "label": "Feature-Instanz"},
|
||||||
|
{"value": "mandate", "label": "Mandant"},
|
||||||
|
{"value": "global", "label": "Global"},
|
||||||
|
]},
|
||||||
|
)
|
||||||
|
neutralize: bool = Field(
|
||||||
|
default=False,
|
||||||
|
description="Whether this file should be neutralized before AI processing",
|
||||||
|
json_schema_extra={"label": "Neutralisieren", "frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@i18nModel("Datei-Vorschau")
|
||||||
class FilePreview(BaseModel):
|
class FilePreview(BaseModel):
|
||||||
content: Union[str, bytes] = Field(description="File content (text or binary)")
|
"""Vorschau-Inhalt einer Datei fuer die Anzeige."""
|
||||||
mimeType: str = Field(description="MIME type of the file")
|
content: Union[str, bytes] = Field(
|
||||||
fileName: str = Field(description="Original fileName")
|
description="File content (text or binary)",
|
||||||
isText: bool = Field(description="Whether the content is text (True) or binary (False)")
|
json_schema_extra={"label": "Inhalt"},
|
||||||
encoding: Optional[str] = Field(None, description="Text encoding if content is text")
|
)
|
||||||
size: int = Field(description="Size of the content in bytes")
|
mimeType: str = Field(
|
||||||
|
description="MIME type of the file",
|
||||||
|
json_schema_extra={"label": "MIME-Typ"},
|
||||||
|
)
|
||||||
|
fileName: str = Field(
|
||||||
|
description="Original fileName",
|
||||||
|
json_schema_extra={"label": "Dateiname"},
|
||||||
|
)
|
||||||
|
isText: bool = Field(
|
||||||
|
description="Whether the content is text (True) or binary (False)",
|
||||||
|
json_schema_extra={"label": "Ist Text"},
|
||||||
|
)
|
||||||
|
encoding: Optional[str] = Field(
|
||||||
|
None,
|
||||||
|
description="Text encoding if content is text",
|
||||||
|
json_schema_extra={"label": "Kodierung"},
|
||||||
|
)
|
||||||
|
size: int = Field(
|
||||||
|
description="Size of the content in bytes",
|
||||||
|
json_schema_extra={"label": "Groesse"},
|
||||||
|
)
|
||||||
|
|
||||||
def toDictWithBase64Encoding(self) -> Dict[str, Any]:
|
def toDictWithBase64Encoding(self) -> Dict[str, Any]:
|
||||||
"""Convert to dictionary with base64 encoding for binary content."""
|
"""Convert to dictionary with base64 encoding for binary content."""
|
||||||
|
|
@ -45,29 +147,21 @@ class FilePreview(BaseModel):
|
||||||
if isinstance(data.get("content"), bytes):
|
if isinstance(data.get("content"), bytes):
|
||||||
data["content"] = base64.b64encode(data["content"]).decode("utf-8")
|
data["content"] = base64.b64encode(data["content"]).decode("utf-8")
|
||||||
return data
|
return data
|
||||||
registerModelLabels(
|
|
||||||
"FilePreview",
|
|
||||||
{"en": "File Preview", "fr": "Aperçu du fichier"},
|
|
||||||
{
|
|
||||||
"content": {"en": "Content", "fr": "Contenu"},
|
|
||||||
"mimeType": {"en": "MIME Type", "fr": "Type MIME"},
|
|
||||||
"fileName": {"en": "fileName", "fr": "Nom de fichier"},
|
|
||||||
"isText": {"en": "Is Text", "fr": "Est du texte"},
|
|
||||||
"encoding": {"en": "Encoding", "fr": "Encodage"},
|
|
||||||
"size": {"en": "Size", "fr": "Taille"},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
class FileData(BaseModel):
|
|
||||||
id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key")
|
@i18nModel("Dateidaten")
|
||||||
data: str = Field(description="File data content")
|
class FileData(PowerOnModel):
|
||||||
base64Encoded: bool = Field(description="Whether the data is base64 encoded")
|
"""Rohdaten einer Datei (z.B. Base64)."""
|
||||||
registerModelLabels(
|
id: str = Field(
|
||||||
"FileData",
|
default_factory=lambda: str(uuid.uuid4()),
|
||||||
{"en": "File Data", "fr": "Données de fichier"},
|
description="Primary key",
|
||||||
{
|
json_schema_extra={"label": "ID"},
|
||||||
"id": {"en": "ID", "fr": "ID"},
|
)
|
||||||
"data": {"en": "Data", "fr": "Données"},
|
data: str = Field(
|
||||||
"base64Encoded": {"en": "Base64 Encoded", "fr": "Encodé en Base64"},
|
description="File data content",
|
||||||
},
|
json_schema_extra={"label": "Daten"},
|
||||||
)
|
)
|
||||||
|
base64Encoded: bool = Field(
|
||||||
|
description="Whether the data is base64 encoded",
|
||||||
|
json_schema_extra={"label": "Base64-kodiert"},
|
||||||
|
)
|
||||||
|
|
|
||||||
115
modules/datamodels/datamodelInvitation.py
Normal file
115
modules/datamodels/datamodelInvitation.py
Normal file
|
|
@ -0,0 +1,115 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""
|
||||||
|
Invitation model for self-service onboarding.
|
||||||
|
Token-basierte Einladungen für neue User zu Mandanten/Features.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import uuid
|
||||||
|
import secrets
|
||||||
|
from typing import Optional, List
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
from modules.datamodels.datamodelBase import PowerOnModel
|
||||||
|
from modules.shared.i18nRegistry import i18nModel
|
||||||
|
|
||||||
|
|
||||||
|
@i18nModel("Einladung")
|
||||||
|
class Invitation(PowerOnModel):
|
||||||
|
"""
|
||||||
|
Einladungs-Token für neue User.
|
||||||
|
Ermöglicht Self-Service Onboarding zu Mandanten und Feature-Instanzen.
|
||||||
|
"""
|
||||||
|
id: str = Field(
|
||||||
|
default_factory=lambda: str(uuid.uuid4()),
|
||||||
|
description="Unique ID of the invitation",
|
||||||
|
json_schema_extra={"label": "ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
|
||||||
|
)
|
||||||
|
token: str = Field(
|
||||||
|
default_factory=lambda: secrets.token_urlsafe(32),
|
||||||
|
description="Secure invitation token",
|
||||||
|
json_schema_extra={"label": "Token", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
|
||||||
|
)
|
||||||
|
|
||||||
|
mandateId: str = Field(
|
||||||
|
description="FK → Mandate.id - Target mandate for the invitation",
|
||||||
|
json_schema_extra={
|
||||||
|
"label": "Mandant",
|
||||||
|
"frontend_type": "text",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": True,
|
||||||
|
"fk_target": {"db": "poweron_app", "table": "Mandate"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
featureInstanceId: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Optional FK → FeatureInstance.id - Direct access to specific feature",
|
||||||
|
json_schema_extra={
|
||||||
|
"label": "Feature-Instanz",
|
||||||
|
"frontend_type": "text",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": False,
|
||||||
|
"fk_target": {"db": "poweron_app", "table": "FeatureInstance"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
roleIds: List[str] = Field(
|
||||||
|
default_factory=list,
|
||||||
|
description="List of Role IDs to assign to the invited user",
|
||||||
|
json_schema_extra={"label": "Rollen", "frontend_type": "multiselect", "frontend_readonly": False, "frontend_required": True}
|
||||||
|
)
|
||||||
|
|
||||||
|
targetUsername: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Username of the invited user (must match on acceptance)",
|
||||||
|
json_schema_extra={"label": "Ziel-Benutzername", "frontend_type": "text", "frontend_readonly": False, "frontend_required": False}
|
||||||
|
)
|
||||||
|
email: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Email address to send invitation link (optional)",
|
||||||
|
json_schema_extra={"label": "E-Mail (optional)", "frontend_type": "email", "frontend_readonly": False, "frontend_required": False}
|
||||||
|
)
|
||||||
|
expiresAt: float = Field(
|
||||||
|
description="When the invitation expires (UTC timestamp)",
|
||||||
|
json_schema_extra={"label": "Gueltig bis", "frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": True}
|
||||||
|
)
|
||||||
|
|
||||||
|
usedBy: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="User ID of the person who used the invitation",
|
||||||
|
json_schema_extra={
|
||||||
|
"label": "Verwendet von",
|
||||||
|
"frontend_type": "text",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": False,
|
||||||
|
"fk_target": {"db": "poweron_app", "table": "User"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
usedAt: Optional[float] = Field(
|
||||||
|
default=None,
|
||||||
|
description="When the invitation was used (UTC timestamp)",
|
||||||
|
json_schema_extra={"label": "Verwendet am", "frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False}
|
||||||
|
)
|
||||||
|
revokedAt: Optional[float] = Field(
|
||||||
|
default=None,
|
||||||
|
description="When the invitation was revoked (UTC timestamp)",
|
||||||
|
json_schema_extra={"label": "Widerrufen am", "frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False}
|
||||||
|
)
|
||||||
|
|
||||||
|
emailSent: Optional[bool] = Field(
|
||||||
|
default=False,
|
||||||
|
description="Whether the invitation email was successfully sent",
|
||||||
|
json_schema_extra={"label": "E-Mail gesendet", "frontend_type": "checkbox", "frontend_readonly": True, "frontend_required": False}
|
||||||
|
)
|
||||||
|
|
||||||
|
maxUses: int = Field(
|
||||||
|
default=1,
|
||||||
|
ge=1,
|
||||||
|
le=100,
|
||||||
|
description="Maximum number of times this invitation can be used",
|
||||||
|
json_schema_extra={"label": "Max. Verwendungen", "frontend_type": "number", "frontend_readonly": False, "frontend_required": False}
|
||||||
|
)
|
||||||
|
currentUses: int = Field(
|
||||||
|
default=0,
|
||||||
|
ge=0,
|
||||||
|
description="Current number of times this invitation has been used",
|
||||||
|
json_schema_extra={"label": "Aktuelle Verwendungen", "frontend_type": "number", "frontend_readonly": True, "frontend_required": False}
|
||||||
|
)
|
||||||
|
|
@ -1,3 +1,5 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
"""
|
"""
|
||||||
Unified JSON document schema and helpers used by both generation prompts and renderers.
|
Unified JSON document schema and helpers used by both generation prompts and renderers.
|
||||||
|
|
||||||
|
|
@ -17,12 +19,14 @@ supportedSectionTypes: List[str] = [
|
||||||
]
|
]
|
||||||
|
|
||||||
# Canonical JSON template used for AI generation (documents array + sections)
|
# Canonical JSON template used for AI generation (documents array + sections)
|
||||||
# Rendering pipelines can select the first document and read its sections.
|
# This template is used for STRUCTURE generation - sections have empty elements arrays.
|
||||||
|
# For content generation, elements arrays will be populated later.
|
||||||
jsonTemplateDocument: str = """{
|
jsonTemplateDocument: str = """{
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"split_strategy": "single_document",
|
"split_strategy": "single_document",
|
||||||
"source_documents": [],
|
"source_documents": [],
|
||||||
"extraction_method": "ai_generation"
|
"extraction_method": "ai_generation",
|
||||||
|
"title": "{{DOCUMENT_TITLE}}"
|
||||||
},
|
},
|
||||||
"documents": [
|
"documents": [
|
||||||
{
|
{
|
||||||
|
|
@ -31,56 +35,77 @@ jsonTemplateDocument: str = """{
|
||||||
"filename": "document.json",
|
"filename": "document.json",
|
||||||
"sections": [
|
"sections": [
|
||||||
{
|
{
|
||||||
"id": "section_heading_example",
|
"id": "section_heading_main_title",
|
||||||
"content_type": "heading",
|
"content_type": "heading",
|
||||||
"elements": [
|
"complexity": "simple",
|
||||||
{"level": 1, "text": "Heading Text"}
|
"generation_hint": "Main document title heading",
|
||||||
],
|
"order": 1,
|
||||||
"order": 0
|
"elements": []
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "section_paragraph_example",
|
"id": "section_paragraph_introduction",
|
||||||
"content_type": "paragraph",
|
"content_type": "paragraph",
|
||||||
"elements": [
|
"complexity": "simple",
|
||||||
{"text": "Paragraph text content"}
|
"generation_hint": "Introduction paragraph",
|
||||||
],
|
"order": 2,
|
||||||
"order": 0
|
"elements": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "section_heading_section_1",
|
||||||
|
"content_type": "heading",
|
||||||
|
"complexity": "simple",
|
||||||
|
"generation_hint": "Section heading for topic 1",
|
||||||
|
"order": 3,
|
||||||
|
"elements": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "section_paragraph_section_1",
|
||||||
|
"content_type": "paragraph",
|
||||||
|
"complexity": "simple",
|
||||||
|
"generation_hint": "Content paragraph for section 1",
|
||||||
|
"order": 4,
|
||||||
|
"elements": []
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "section_bullet_list_example",
|
"id": "section_bullet_list_example",
|
||||||
"content_type": "bullet_list",
|
"content_type": "bullet_list",
|
||||||
"elements": [
|
"complexity": "simple",
|
||||||
|
"generation_hint": "Bullet list items",
|
||||||
|
"order": 5,
|
||||||
|
"elements": []
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"items": ["Item 1", "Item 2"]
|
"id": "section_image_example",
|
||||||
}
|
"content_type": "image",
|
||||||
],
|
"complexity": "complex",
|
||||||
"order": 0
|
"generation_hint": "Illustration for document",
|
||||||
|
"image_prompt": "A detailed description for image generation",
|
||||||
|
"order": 6,
|
||||||
|
"elements": []
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "section_table_example",
|
"id": "section_table_example",
|
||||||
"content_type": "table",
|
"content_type": "table",
|
||||||
"elements": [
|
"complexity": "simple",
|
||||||
{
|
"generation_hint": "Data table with relevant information",
|
||||||
"headers": ["Column 1", "Column 2"],
|
"order": 7,
|
||||||
"rows": [
|
"elements": []
|
||||||
["Row 1 Col 1", "Row 1 Col 2"],
|
|
||||||
["Row 2 Col 1", "Row 2 Col 2"]
|
|
||||||
],
|
|
||||||
"caption": "Table caption"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"order": 0
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "section_code_example",
|
"id": "section_code_example",
|
||||||
"content_type": "code_block",
|
"content_type": "code_block",
|
||||||
"elements": [
|
"complexity": "simple",
|
||||||
|
"generation_hint": "Code example or snippet",
|
||||||
|
"order": 8,
|
||||||
|
"elements": []
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"code": "function example() { return true; }",
|
"id": "section_paragraph_conclusion",
|
||||||
"language": "javascript"
|
"content_type": "paragraph",
|
||||||
}
|
"complexity": "simple",
|
||||||
],
|
"generation_hint": "Conclusion paragraph",
|
||||||
"order": 0
|
"order": 9,
|
||||||
|
"elements": []
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
|
||||||
245
modules/datamodels/datamodelKnowledge.py
Normal file
245
modules/datamodels/datamodelKnowledge.py
Normal file
|
|
@ -0,0 +1,245 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""Knowledge Store data models: FileContentIndex, ContentChunk, WorkflowMemory.
|
||||||
|
|
||||||
|
These models support the 3-tier RAG architecture:
|
||||||
|
- Personal Layer: scope=personal, userId-scoped
|
||||||
|
- Instance Layer: scope=featureInstance, featureInstanceId-scoped
|
||||||
|
- Mandate Layer: scope=mandate, mandateId-scoped (visible to all mandate users)
|
||||||
|
- Global Layer: scope=global (sysAdmin only)
|
||||||
|
- Workflow Layer: workflowId-scoped (WorkflowMemory)
|
||||||
|
|
||||||
|
Vector fields use json_schema_extra={"db_type": "vector(1536)"} for pgvector.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from typing import Dict, Any, List, Optional
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
from modules.datamodels.datamodelBase import PowerOnModel
|
||||||
|
from modules.shared.i18nRegistry import i18nModel
|
||||||
|
from modules.shared.timeUtils import getUtcTimestamp
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
|
||||||
|
@i18nModel("Datei-Inhaltsindex")
|
||||||
|
class FileContentIndex(PowerOnModel):
|
||||||
|
"""Struktureller Index der Inhaltsobjekte einer Datei."""
|
||||||
|
id: str = Field(
|
||||||
|
default_factory=lambda: str(uuid.uuid4()),
|
||||||
|
description="Primary key (typically = fileId)",
|
||||||
|
json_schema_extra={"label": "ID"},
|
||||||
|
)
|
||||||
|
userId: str = Field(
|
||||||
|
description="Owner user ID",
|
||||||
|
json_schema_extra={"label": "Benutzer-ID", "fk_target": {"db": "poweron_app", "table": "User"}},
|
||||||
|
)
|
||||||
|
featureInstanceId: str = Field(
|
||||||
|
default="",
|
||||||
|
description="Feature instance scope",
|
||||||
|
json_schema_extra={"label": "Feature-Instanz-ID", "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}},
|
||||||
|
)
|
||||||
|
mandateId: str = Field(
|
||||||
|
default="",
|
||||||
|
description="Mandate scope",
|
||||||
|
json_schema_extra={"label": "Mandanten-ID", "fk_target": {"db": "poweron_app", "table": "Mandate"}},
|
||||||
|
)
|
||||||
|
fileName: str = Field(
|
||||||
|
description="Original file name",
|
||||||
|
json_schema_extra={"label": "Dateiname"},
|
||||||
|
)
|
||||||
|
mimeType: str = Field(
|
||||||
|
description="MIME type of the file",
|
||||||
|
json_schema_extra={"label": "MIME-Typ"},
|
||||||
|
)
|
||||||
|
containerPath: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Path within a container (e.g. 'archive.zip/folder/report.pdf')",
|
||||||
|
json_schema_extra={"label": "Container-Pfad"},
|
||||||
|
)
|
||||||
|
totalObjects: int = Field(
|
||||||
|
default=0,
|
||||||
|
description="Total number of content objects extracted",
|
||||||
|
json_schema_extra={"label": "Anzahl Objekte"},
|
||||||
|
)
|
||||||
|
totalSize: int = Field(
|
||||||
|
default=0,
|
||||||
|
description="Total size of all content objects in bytes",
|
||||||
|
json_schema_extra={"label": "Gesamtgroesse"},
|
||||||
|
)
|
||||||
|
structure: Dict[str, Any] = Field(
|
||||||
|
default_factory=dict,
|
||||||
|
description="Structural overview (pages, sections, hierarchy)",
|
||||||
|
json_schema_extra={"label": "Struktur"},
|
||||||
|
)
|
||||||
|
objectSummary: List[Dict[str, Any]] = Field(
|
||||||
|
default_factory=list,
|
||||||
|
description="Compact summary per content object",
|
||||||
|
json_schema_extra={"label": "Objekt-Zusammenfassung"},
|
||||||
|
)
|
||||||
|
extractedAt: float = Field(
|
||||||
|
default_factory=getUtcTimestamp,
|
||||||
|
description="Extraction timestamp",
|
||||||
|
json_schema_extra={"label": "Extrahiert am"},
|
||||||
|
)
|
||||||
|
status: str = Field(
|
||||||
|
default="pending",
|
||||||
|
description="Processing status: pending, extracted, embedding, indexed, failed",
|
||||||
|
json_schema_extra={"label": "Status"},
|
||||||
|
)
|
||||||
|
scope: str = Field(
|
||||||
|
default="personal",
|
||||||
|
description="Data visibility scope: personal, featureInstance, mandate, global",
|
||||||
|
json_schema_extra={"label": "Sichtbarkeit"},
|
||||||
|
)
|
||||||
|
neutralizationStatus: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Neutralization status: completed, failed, skipped, None = not required",
|
||||||
|
json_schema_extra={"label": "Neutralisierungsstatus"},
|
||||||
|
)
|
||||||
|
isNeutralized: bool = Field(
|
||||||
|
default=False,
|
||||||
|
description="True if content was neutralized before indexing",
|
||||||
|
json_schema_extra={"label": "Neutralisiert"},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@i18nModel("Inhalts-Chunk")
|
||||||
|
class ContentChunk(PowerOnModel):
|
||||||
|
"""Persistierter Inhalts-Chunk mit Embedding-Vektor."""
|
||||||
|
id: str = Field(
|
||||||
|
default_factory=lambda: str(uuid.uuid4()),
|
||||||
|
description="Primary key",
|
||||||
|
json_schema_extra={"label": "ID"},
|
||||||
|
)
|
||||||
|
contentObjectId: str = Field(
|
||||||
|
description="Reference to the content object within FileContentIndex",
|
||||||
|
json_schema_extra={"label": "Inhaltsobjekt-ID"},
|
||||||
|
)
|
||||||
|
fileId: str = Field(
|
||||||
|
description="FK to the source file",
|
||||||
|
json_schema_extra={"label": "Datei-ID", "fk_target": {"db": "poweron_management", "table": "FileItem"}},
|
||||||
|
)
|
||||||
|
userId: str = Field(
|
||||||
|
description="Owner user ID",
|
||||||
|
json_schema_extra={"label": "Benutzer-ID", "fk_target": {"db": "poweron_app", "table": "User"}},
|
||||||
|
)
|
||||||
|
featureInstanceId: str = Field(
|
||||||
|
default="",
|
||||||
|
description="Feature instance scope",
|
||||||
|
json_schema_extra={"label": "Feature-Instanz-ID", "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}},
|
||||||
|
)
|
||||||
|
contentType: str = Field(
|
||||||
|
description="Content type: text, image, videostream, audiostream, other",
|
||||||
|
json_schema_extra={"label": "Inhaltstyp"},
|
||||||
|
)
|
||||||
|
data: str = Field(
|
||||||
|
description="Content data (text, base64, URL)",
|
||||||
|
json_schema_extra={"label": "Daten"},
|
||||||
|
)
|
||||||
|
contextRef: Dict[str, Any] = Field(
|
||||||
|
default_factory=dict,
|
||||||
|
description="Context reference (page, position, label)",
|
||||||
|
json_schema_extra={"label": "Kontext-Referenz"},
|
||||||
|
)
|
||||||
|
summary: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="AI-generated summary (on demand)",
|
||||||
|
json_schema_extra={"label": "Zusammenfassung"},
|
||||||
|
)
|
||||||
|
chunkMetadata: Dict[str, Any] = Field(
|
||||||
|
default_factory=dict,
|
||||||
|
description="Additional metadata",
|
||||||
|
json_schema_extra={"label": "Metadaten"},
|
||||||
|
)
|
||||||
|
embedding: Optional[List[float]] = Field(
|
||||||
|
default=None,
|
||||||
|
description="pgvector embedding (NOT NULL for text chunks)",
|
||||||
|
json_schema_extra={"label": "Embedding", "db_type": "vector(1536)"},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@i18nModel("Runden-Speicher")
|
||||||
|
class RoundMemory(PowerOnModel):
|
||||||
|
"""Persistenter Speicher pro Agenten-Runde."""
|
||||||
|
id: str = Field(
|
||||||
|
default_factory=lambda: str(uuid.uuid4()),
|
||||||
|
description="Primary key",
|
||||||
|
json_schema_extra={"label": "ID"},
|
||||||
|
)
|
||||||
|
workflowId: str = Field(
|
||||||
|
description="FK to the workflow",
|
||||||
|
json_schema_extra={"label": "Workflow-ID"},
|
||||||
|
)
|
||||||
|
roundNumber: int = Field(
|
||||||
|
default=0,
|
||||||
|
description="Agent round that produced this memory",
|
||||||
|
json_schema_extra={"label": "Rundennummer"},
|
||||||
|
)
|
||||||
|
memoryType: str = Field(
|
||||||
|
description="Category: file_ref, tool_result, decision, data_source_ref",
|
||||||
|
json_schema_extra={"label": "Speichertyp"},
|
||||||
|
)
|
||||||
|
key: str = Field(
|
||||||
|
description="Dedup key, e.g. 'readFile:<fileId>' or 'plan'",
|
||||||
|
json_schema_extra={"label": "Schluessel"},
|
||||||
|
)
|
||||||
|
summary: str = Field(
|
||||||
|
default="",
|
||||||
|
description="Compact summary (max ~2000 chars)",
|
||||||
|
json_schema_extra={"label": "Zusammenfassung"},
|
||||||
|
)
|
||||||
|
fullData: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Full tool output when small enough (max ~8000 chars)",
|
||||||
|
json_schema_extra={"label": "Volldaten"},
|
||||||
|
)
|
||||||
|
fileIds: List[str] = Field(
|
||||||
|
default_factory=list,
|
||||||
|
description="Referenced file IDs",
|
||||||
|
json_schema_extra={"label": "Datei-IDs"},
|
||||||
|
)
|
||||||
|
embedding: Optional[List[float]] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Embedding of summary for semantic retrieval",
|
||||||
|
json_schema_extra={"label": "Embedding", "db_type": "vector(1536)"},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@i18nModel("Workflow-Speicher")
|
||||||
|
class WorkflowMemory(PowerOnModel):
|
||||||
|
"""Workflow-spezifischer Key-Value-Cache fuer Entitaeten und Fakten."""
|
||||||
|
id: str = Field(
|
||||||
|
default_factory=lambda: str(uuid.uuid4()),
|
||||||
|
description="Primary key",
|
||||||
|
json_schema_extra={"label": "ID"},
|
||||||
|
)
|
||||||
|
workflowId: str = Field(
|
||||||
|
description="FK to the workflow",
|
||||||
|
json_schema_extra={"label": "Workflow-ID", "fk_target": {"db": "poweron_chat", "table": "ChatWorkflow"}},
|
||||||
|
)
|
||||||
|
userId: str = Field(
|
||||||
|
description="Owner user ID",
|
||||||
|
json_schema_extra={"label": "Benutzer-ID", "fk_target": {"db": "poweron_app", "table": "User"}},
|
||||||
|
)
|
||||||
|
featureInstanceId: str = Field(
|
||||||
|
default="",
|
||||||
|
description="Feature instance scope",
|
||||||
|
json_schema_extra={"label": "Feature-Instanz-ID", "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}},
|
||||||
|
)
|
||||||
|
key: str = Field(
|
||||||
|
description="Key identifier (e.g. 'entity:companyName')",
|
||||||
|
json_schema_extra={"label": "Schluessel"},
|
||||||
|
)
|
||||||
|
value: str = Field(
|
||||||
|
description="Extracted value",
|
||||||
|
json_schema_extra={"label": "Wert"},
|
||||||
|
)
|
||||||
|
source: str = Field(
|
||||||
|
default="extraction",
|
||||||
|
description="Origin: extraction, tool, conversation, summary",
|
||||||
|
json_schema_extra={"label": "Quelle"},
|
||||||
|
)
|
||||||
|
embedding: Optional[List[float]] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Optional embedding for semantic lookup",
|
||||||
|
json_schema_extra={"label": "Embedding", "db_type": "vector(1536)"},
|
||||||
|
)
|
||||||
169
modules/datamodels/datamodelMembership.py
Normal file
169
modules/datamodels/datamodelMembership.py
Normal file
|
|
@ -0,0 +1,169 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""
|
||||||
|
Membership models: UserMandate, FeatureAccess, and Junction Tables.
|
||||||
|
|
||||||
|
Diese Models definieren die m:n Beziehungen zwischen User, Mandate und FeatureInstance.
|
||||||
|
Rollen werden über Junction Tables verknüpft für saubere CASCADE DELETE.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import uuid
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
from modules.datamodels.datamodelBase import PowerOnModel
|
||||||
|
from modules.shared.i18nRegistry import i18nModel
|
||||||
|
|
||||||
|
|
||||||
|
@i18nModel("Benutzer-Mandant")
|
||||||
|
class UserMandate(PowerOnModel):
|
||||||
|
"""
|
||||||
|
User-Mitgliedschaft in einem Mandanten.
|
||||||
|
Kein User gehört direkt zu einem Mandanten - Zugehörigkeit wird über dieses Model gesteuert.
|
||||||
|
"""
|
||||||
|
id: str = Field(
|
||||||
|
default_factory=lambda: str(uuid.uuid4()),
|
||||||
|
description="Unique ID of the user-mandate membership",
|
||||||
|
json_schema_extra={"label": "ID", "frontend_type": "text", "frontend_readonly": True, "frontend_visible": False, "frontend_required": False}
|
||||||
|
)
|
||||||
|
userId: str = Field(
|
||||||
|
description="FK → User.id (CASCADE DELETE)",
|
||||||
|
json_schema_extra={
|
||||||
|
"label": "Benutzer",
|
||||||
|
"frontend_type": "select",
|
||||||
|
"frontend_readonly": False,
|
||||||
|
"frontend_required": True,
|
||||||
|
"frontend_fk_source": "/api/users/",
|
||||||
|
"frontend_fk_display_field": "username",
|
||||||
|
"fk_model": "User",
|
||||||
|
"fk_target": {"db": "poweron_app", "table": "User"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
mandateId: str = Field(
|
||||||
|
description="FK → Mandate.id (CASCADE DELETE)",
|
||||||
|
json_schema_extra={
|
||||||
|
"label": "Mandant",
|
||||||
|
"frontend_type": "select",
|
||||||
|
"frontend_readonly": False,
|
||||||
|
"frontend_required": True,
|
||||||
|
"frontend_fk_source": "/api/mandates/",
|
||||||
|
"frontend_fk_display_field": "label",
|
||||||
|
"fk_model": "Mandate",
|
||||||
|
"fk_target": {"db": "poweron_app", "table": "Mandate"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
enabled: bool = Field(
|
||||||
|
default=True,
|
||||||
|
description="Whether this membership is enabled",
|
||||||
|
json_schema_extra={"label": "Aktiviert", "frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@i18nModel("Feature-Zugang")
|
||||||
|
class FeatureAccess(PowerOnModel):
|
||||||
|
"""
|
||||||
|
User-Zugriff auf eine Feature-Instanz.
|
||||||
|
Definiert welche User auf welche Feature-Instanzen zugreifen können.
|
||||||
|
"""
|
||||||
|
id: str = Field(
|
||||||
|
default_factory=lambda: str(uuid.uuid4()),
|
||||||
|
description="Unique ID of the feature access",
|
||||||
|
json_schema_extra={"label": "ID", "frontend_type": "text", "frontend_readonly": True, "frontend_visible": False, "frontend_required": False}
|
||||||
|
)
|
||||||
|
userId: str = Field(
|
||||||
|
description="FK → User.id (CASCADE DELETE)",
|
||||||
|
json_schema_extra={
|
||||||
|
"label": "Benutzer",
|
||||||
|
"frontend_type": "select",
|
||||||
|
"frontend_readonly": False,
|
||||||
|
"frontend_required": True,
|
||||||
|
"frontend_fk_source": "/api/users/",
|
||||||
|
"frontend_fk_display_field": "username",
|
||||||
|
"fk_target": {"db": "poweron_app", "table": "User"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
featureInstanceId: str = Field(
|
||||||
|
description="FK → FeatureInstance.id (CASCADE DELETE)",
|
||||||
|
json_schema_extra={
|
||||||
|
"label": "Feature-Instanz",
|
||||||
|
"frontend_type": "select",
|
||||||
|
"frontend_readonly": False,
|
||||||
|
"frontend_required": True,
|
||||||
|
"frontend_fk_source": "/api/features/instances",
|
||||||
|
"frontend_fk_display_field": "label",
|
||||||
|
"fk_target": {"db": "poweron_app", "table": "FeatureInstance"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
enabled: bool = Field(
|
||||||
|
default=True,
|
||||||
|
description="Whether this feature access is enabled",
|
||||||
|
json_schema_extra={"label": "Aktiviert", "frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@i18nModel("Benutzer-Mandant-Rolle")
|
||||||
|
class UserMandateRole(PowerOnModel):
|
||||||
|
"""
|
||||||
|
Junction Table: UserMandate zu Role.
|
||||||
|
Ermöglicht CASCADE DELETE auf Datenbankebene.
|
||||||
|
"""
|
||||||
|
id: str = Field(
|
||||||
|
default_factory=lambda: str(uuid.uuid4()),
|
||||||
|
description="Unique ID of the junction record",
|
||||||
|
json_schema_extra={"label": "ID", "frontend_type": "text", "frontend_readonly": True, "frontend_visible": False, "frontend_required": False}
|
||||||
|
)
|
||||||
|
userMandateId: str = Field(
|
||||||
|
description="FK → UserMandate.id (CASCADE DELETE)",
|
||||||
|
json_schema_extra={
|
||||||
|
"label": "Benutzer-Mandant",
|
||||||
|
"frontend_type": "text",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": True,
|
||||||
|
"fk_target": {"db": "poweron_app", "table": "UserMandate"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
roleId: str = Field(
|
||||||
|
description="FK → Role.id (CASCADE DELETE)",
|
||||||
|
json_schema_extra={
|
||||||
|
"label": "Rolle",
|
||||||
|
"frontend_type": "select",
|
||||||
|
"frontend_readonly": False,
|
||||||
|
"frontend_required": True,
|
||||||
|
"frontend_fk_source": "/api/rbac/roles",
|
||||||
|
"frontend_fk_display_field": "roleLabel",
|
||||||
|
"fk_target": {"db": "poweron_app", "table": "Role"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@i18nModel("Feature-Zugang-Rolle")
|
||||||
|
class FeatureAccessRole(PowerOnModel):
|
||||||
|
"""
|
||||||
|
Junction Table: FeatureAccess zu Role.
|
||||||
|
Ermöglicht CASCADE DELETE auf Datenbankebene.
|
||||||
|
"""
|
||||||
|
id: str = Field(
|
||||||
|
default_factory=lambda: str(uuid.uuid4()),
|
||||||
|
description="Unique ID of the junction record",
|
||||||
|
json_schema_extra={"label": "ID", "frontend_type": "text", "frontend_readonly": True, "frontend_visible": False, "frontend_required": False}
|
||||||
|
)
|
||||||
|
featureAccessId: str = Field(
|
||||||
|
description="FK → FeatureAccess.id (CASCADE DELETE)",
|
||||||
|
json_schema_extra={
|
||||||
|
"label": "Feature-Zugang",
|
||||||
|
"frontend_type": "text",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": True,
|
||||||
|
"fk_target": {"db": "poweron_app", "table": "FeatureAccess"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
roleId: str = Field(
|
||||||
|
description="FK → Role.id (CASCADE DELETE)",
|
||||||
|
json_schema_extra={
|
||||||
|
"label": "Rolle",
|
||||||
|
"frontend_type": "select",
|
||||||
|
"frontend_readonly": False,
|
||||||
|
"frontend_required": True,
|
||||||
|
"frontend_fk_source": "/api/rbac/roles",
|
||||||
|
"frontend_fk_display_field": "roleLabel",
|
||||||
|
"fk_target": {"db": "poweron_app", "table": "Role"},
|
||||||
|
},
|
||||||
|
)
|
||||||
389
modules/datamodels/datamodelMessaging.py
Normal file
389
modules/datamodels/datamodelMessaging.py
Normal file
|
|
@ -0,0 +1,389 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""Messaging models: MessagingSubscription, MessagingSubscriptionRegistration, MessagingDelivery."""
|
||||||
|
|
||||||
|
import uuid
|
||||||
|
from typing import Optional
|
||||||
|
from enum import Enum
|
||||||
|
from pydantic import BaseModel, Field, ConfigDict
|
||||||
|
from modules.datamodels.datamodelBase import PowerOnModel
|
||||||
|
from modules.shared.i18nRegistry import i18nModel
|
||||||
|
|
||||||
|
|
||||||
|
class MessagingChannel(str, Enum):
|
||||||
|
"""Messaging channel types"""
|
||||||
|
EMAIL = "email"
|
||||||
|
SMS = "sms"
|
||||||
|
WHATSAPP = "whatsapp"
|
||||||
|
TEAMS_CHAT = "teams_chat"
|
||||||
|
# Weitere Kanäle können hier hinzugefügt werden
|
||||||
|
|
||||||
|
|
||||||
|
class DeliveryStatus(str, Enum):
|
||||||
|
"""Individual delivery status"""
|
||||||
|
PENDING = "pending"
|
||||||
|
SENT = "sent"
|
||||||
|
FAILED = "failed"
|
||||||
|
|
||||||
|
|
||||||
|
@i18nModel("Messaging-Abonnement")
|
||||||
|
class MessagingSubscription(PowerOnModel):
|
||||||
|
"""Data model for messaging subscriptions"""
|
||||||
|
id: str = Field(
|
||||||
|
default_factory=lambda: str(uuid.uuid4()),
|
||||||
|
description="Unique ID of the subscription",
|
||||||
|
json_schema_extra={
|
||||||
|
"frontend_type": "text",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": False,
|
||||||
|
"label": "ID",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
subscriptionId: str = Field(
|
||||||
|
description="Unique subscription identifier (e.g., 'system_errors', 'audit_login')",
|
||||||
|
json_schema_extra={
|
||||||
|
"frontend_type": "text",
|
||||||
|
"frontend_readonly": False,
|
||||||
|
"frontend_required": True,
|
||||||
|
"label": "Abonnement-ID",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
subscriptionLabel: str = Field(
|
||||||
|
description="Display name of the subscription",
|
||||||
|
json_schema_extra={
|
||||||
|
"frontend_type": "text",
|
||||||
|
"frontend_readonly": False,
|
||||||
|
"frontend_required": True,
|
||||||
|
"label": "Bezeichnung",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
mandateId: str = Field(
|
||||||
|
description="ID of the mandate this subscription belongs to",
|
||||||
|
json_schema_extra={
|
||||||
|
"frontend_type": "text",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": False,
|
||||||
|
"label": "Mandanten-ID",
|
||||||
|
"fk_target": {"db": "poweron_app", "table": "Mandate"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
featureInstanceId: str = Field(
|
||||||
|
description="ID of the feature instance this subscription belongs to",
|
||||||
|
json_schema_extra={
|
||||||
|
"frontend_type": "text",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": False,
|
||||||
|
"label": "Feature-Instanz-ID",
|
||||||
|
"fk_target": {"db": "poweron_app", "table": "FeatureInstance"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
description: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Description of the subscription",
|
||||||
|
json_schema_extra={
|
||||||
|
"frontend_type": "textarea",
|
||||||
|
"frontend_readonly": False,
|
||||||
|
"frontend_required": False,
|
||||||
|
"label": "Beschreibung",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
isSystemSubscription: bool = Field(
|
||||||
|
default=False,
|
||||||
|
description="Whether this is a system subscription (only admin can create)",
|
||||||
|
json_schema_extra={
|
||||||
|
"frontend_type": "checkbox",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": False,
|
||||||
|
"label": "System-Abonnement",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
enabled: bool = Field(
|
||||||
|
default=True,
|
||||||
|
description="Whether the subscription is enabled",
|
||||||
|
json_schema_extra={
|
||||||
|
"frontend_type": "checkbox",
|
||||||
|
"frontend_readonly": False,
|
||||||
|
"frontend_required": False,
|
||||||
|
"label": "Aktiviert",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
model_config = ConfigDict(use_enum_values=True)
|
||||||
|
|
||||||
|
|
||||||
|
@i18nModel("Messaging-Registrierung")
|
||||||
|
class MessagingSubscriptionRegistration(BaseModel):
|
||||||
|
"""Data model for user registrations to messaging subscriptions"""
|
||||||
|
id: str = Field(
|
||||||
|
default_factory=lambda: str(uuid.uuid4()),
|
||||||
|
description="Unique ID of the registration",
|
||||||
|
json_schema_extra={
|
||||||
|
"frontend_type": "text",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": False,
|
||||||
|
"label": "ID",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
mandateId: str = Field(
|
||||||
|
description="ID of the mandate this registration belongs to",
|
||||||
|
json_schema_extra={
|
||||||
|
"frontend_type": "text",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": False,
|
||||||
|
"label": "Mandanten-ID",
|
||||||
|
"fk_target": {"db": "poweron_app", "table": "Mandate"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
featureInstanceId: str = Field(
|
||||||
|
description="ID of the feature instance this registration belongs to",
|
||||||
|
json_schema_extra={
|
||||||
|
"frontend_type": "text",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": False,
|
||||||
|
"label": "Feature-Instanz-ID",
|
||||||
|
"fk_target": {"db": "poweron_app", "table": "FeatureInstance"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
subscriptionId: str = Field(
|
||||||
|
description="ID of the subscription this registration belongs to",
|
||||||
|
json_schema_extra={
|
||||||
|
"frontend_type": "text",
|
||||||
|
"frontend_readonly": False,
|
||||||
|
"frontend_required": True,
|
||||||
|
"label": "Abonnement-ID",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
userId: str = Field(
|
||||||
|
description="ID of the user registered to this subscription",
|
||||||
|
json_schema_extra={
|
||||||
|
"frontend_type": "text",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": False,
|
||||||
|
"label": "Benutzer-ID",
|
||||||
|
"fk_target": {"db": "poweron_app", "table": "User"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
channel: MessagingChannel = Field(
|
||||||
|
description="Channel type for this registration",
|
||||||
|
json_schema_extra={
|
||||||
|
"frontend_type": "select",
|
||||||
|
"frontend_readonly": False,
|
||||||
|
"frontend_required": True,
|
||||||
|
"frontend_options": [
|
||||||
|
{"value": "email", "label": "Email"},
|
||||||
|
{"value": "sms", "label": "SMS"},
|
||||||
|
{"value": "whatsapp", "label": "WhatsApp"},
|
||||||
|
{"value": "teams_chat", "label": "Teams Chat"},
|
||||||
|
],
|
||||||
|
"label": "Kanal",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
channelConfig: str = Field(
|
||||||
|
default="",
|
||||||
|
description="Channel-specific configuration (e.g., email address, phone number, Teams user ID)",
|
||||||
|
json_schema_extra={
|
||||||
|
"frontend_type": "text",
|
||||||
|
"frontend_readonly": False,
|
||||||
|
"frontend_required": False,
|
||||||
|
"label": "Kanal-Konfiguration",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
enabled: bool = Field(
|
||||||
|
default=True,
|
||||||
|
description="Whether this registration is enabled",
|
||||||
|
json_schema_extra={
|
||||||
|
"frontend_type": "checkbox",
|
||||||
|
"frontend_readonly": False,
|
||||||
|
"frontend_required": False,
|
||||||
|
"label": "Aktiviert",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
model_config = ConfigDict(use_enum_values=True)
|
||||||
|
|
||||||
|
|
||||||
|
@i18nModel("Messaging-Zustellung")
|
||||||
|
class MessagingDelivery(BaseModel):
|
||||||
|
"""Data model for individual message deliveries"""
|
||||||
|
id: str = Field(
|
||||||
|
default_factory=lambda: str(uuid.uuid4()),
|
||||||
|
description="Unique ID of the delivery",
|
||||||
|
json_schema_extra={
|
||||||
|
"frontend_type": "text",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": False,
|
||||||
|
"label": "ID",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
mandateId: str = Field(
|
||||||
|
description="ID of the mandate this delivery belongs to",
|
||||||
|
json_schema_extra={
|
||||||
|
"frontend_type": "text",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": False,
|
||||||
|
"label": "Mandanten-ID",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
featureInstanceId: str = Field(
|
||||||
|
description="ID of the feature instance this delivery belongs to",
|
||||||
|
json_schema_extra={
|
||||||
|
"frontend_type": "text",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": False,
|
||||||
|
"label": "Feature-Instanz-ID",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
subscriptionId: str = Field(
|
||||||
|
description="ID of the subscription this delivery belongs to",
|
||||||
|
json_schema_extra={
|
||||||
|
"frontend_type": "text",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": False,
|
||||||
|
"label": "Abonnement-ID",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
userId: str = Field(
|
||||||
|
description="ID of the user receiving this delivery",
|
||||||
|
json_schema_extra={
|
||||||
|
"frontend_type": "text",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": False,
|
||||||
|
"label": "Benutzer-ID",
|
||||||
|
"fk_target": {"db": "poweron_app", "table": "User"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
channel: MessagingChannel = Field(
|
||||||
|
description="Channel used for this delivery",
|
||||||
|
json_schema_extra={
|
||||||
|
"frontend_type": "select",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": False,
|
||||||
|
"frontend_options": [
|
||||||
|
{"value": "email", "label": "Email"},
|
||||||
|
{"value": "sms", "label": "SMS"},
|
||||||
|
{"value": "whatsapp", "label": "WhatsApp"},
|
||||||
|
{"value": "teams_chat", "label": "Teams Chat"},
|
||||||
|
],
|
||||||
|
"label": "Kanal",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
status: DeliveryStatus = Field(
|
||||||
|
default=DeliveryStatus.PENDING,
|
||||||
|
description="Status of the delivery",
|
||||||
|
json_schema_extra={
|
||||||
|
"frontend_type": "select",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": False,
|
||||||
|
"frontend_options": [
|
||||||
|
{"value": "pending", "label": "Pending"},
|
||||||
|
{"value": "sent", "label": "Sent"},
|
||||||
|
{"value": "failed", "label": "Failed"},
|
||||||
|
],
|
||||||
|
"label": "Status",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
errorMessage: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Error message if delivery failed",
|
||||||
|
json_schema_extra={
|
||||||
|
"frontend_type": "textarea",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": False,
|
||||||
|
"label": "Fehlermeldung",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
sentAt: Optional[float] = Field(
|
||||||
|
default=None,
|
||||||
|
description="When the delivery was sent (UTC timestamp in seconds)",
|
||||||
|
json_schema_extra={
|
||||||
|
"frontend_type": "datetime",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": False,
|
||||||
|
"label": "Gesendet am",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
model_config = ConfigDict(use_enum_values=True)
|
||||||
|
|
||||||
|
|
||||||
|
@i18nModel("Messaging-Ereignisparameter")
|
||||||
|
class MessagingEventParameters(BaseModel):
|
||||||
|
"""Data model for event parameters passed to subscription functions"""
|
||||||
|
triggerData: dict = Field(
|
||||||
|
default_factory=dict,
|
||||||
|
description="Event data from trigger as dictionary/JSON",
|
||||||
|
json_schema_extra={
|
||||||
|
"frontend_type": "json",
|
||||||
|
"frontend_readonly": False,
|
||||||
|
"frontend_required": False,
|
||||||
|
"label": "Trigger-Daten",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@i18nModel("Messaging-Sendeergebnis")
|
||||||
|
class MessagingSendResult(BaseModel):
|
||||||
|
"""Data model for sendMessage result"""
|
||||||
|
success: bool = Field(
|
||||||
|
description="Whether the message was sent successfully",
|
||||||
|
json_schema_extra={
|
||||||
|
"frontend_type": "checkbox",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": True,
|
||||||
|
"label": "Erfolg",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
deliveryId: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="ID of the created MessagingDelivery record",
|
||||||
|
json_schema_extra={
|
||||||
|
"frontend_type": "text",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": False,
|
||||||
|
"label": "Zustellungs-ID",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
errorMessage: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Error message if sending failed",
|
||||||
|
json_schema_extra={
|
||||||
|
"frontend_type": "textarea",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": False,
|
||||||
|
"label": "Fehlermeldung",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@i18nModel("Messaging-Abonnement-Ausführung")
|
||||||
|
class MessagingSubscriptionExecutionResult(BaseModel):
|
||||||
|
"""Data model for subscription function execution result"""
|
||||||
|
success: bool = Field(
|
||||||
|
description="Whether the subscription execution was successful",
|
||||||
|
json_schema_extra={
|
||||||
|
"frontend_type": "checkbox",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": True,
|
||||||
|
"label": "Erfolg",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
messagesSent: int = Field(
|
||||||
|
default=0,
|
||||||
|
description="Number of messages sent",
|
||||||
|
json_schema_extra={
|
||||||
|
"frontend_type": "number",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": False,
|
||||||
|
"label": "Gesendete Nachrichten",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
errorMessage: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Error message if execution failed",
|
||||||
|
json_schema_extra={
|
||||||
|
"frontend_type": "textarea",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": False,
|
||||||
|
"label": "Fehlermeldung",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
@ -1,51 +0,0 @@
|
||||||
"""Neutralizer models: DataNeutraliserConfig and DataNeutralizerAttributes."""
|
|
||||||
|
|
||||||
import uuid
|
|
||||||
from typing import Optional
|
|
||||||
from pydantic import BaseModel, Field
|
|
||||||
from modules.shared.attributeUtils import registerModelLabels
|
|
||||||
|
|
||||||
|
|
||||||
class DataNeutraliserConfig(BaseModel):
|
|
||||||
id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Unique ID of the configuration", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
|
|
||||||
mandateId: str = Field(description="ID of the mandate this configuration belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True})
|
|
||||||
userId: str = Field(description="ID of the user who created this configuration", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True})
|
|
||||||
enabled: bool = Field(default=True, description="Whether data neutralization is enabled", json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False})
|
|
||||||
namesToParse: str = Field(default="", description="Multiline list of names to parse for neutralization", json_schema_extra={"frontend_type": "textarea", "frontend_readonly": False, "frontend_required": False})
|
|
||||||
sharepointSourcePath: str = Field(default="", description="SharePoint path to read files for neutralization", json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": False})
|
|
||||||
sharepointTargetPath: str = Field(default="", description="SharePoint path to store neutralized files", json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": False})
|
|
||||||
registerModelLabels(
|
|
||||||
"DataNeutraliserConfig",
|
|
||||||
{"en": "Data Neutralization Config", "fr": "Configuration de neutralisation des données"},
|
|
||||||
{
|
|
||||||
"id": {"en": "ID", "fr": "ID"},
|
|
||||||
"mandateId": {"en": "Mandate ID", "fr": "ID de mandat"},
|
|
||||||
"userId": {"en": "User ID", "fr": "ID utilisateur"},
|
|
||||||
"enabled": {"en": "Enabled", "fr": "Activé"},
|
|
||||||
"namesToParse": {"en": "Names to Parse", "fr": "Noms à analyser"},
|
|
||||||
"sharepointSourcePath": {"en": "Source Path", "fr": "Chemin source"},
|
|
||||||
"sharepointTargetPath": {"en": "Target Path", "fr": "Chemin cible"},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
class DataNeutralizerAttributes(BaseModel):
|
|
||||||
id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Unique ID of the attribute mapping (used as UID in neutralized files)", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
|
|
||||||
mandateId: str = Field(description="ID of the mandate this attribute belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True})
|
|
||||||
userId: str = Field(description="ID of the user who created this attribute", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True})
|
|
||||||
originalText: str = Field(description="Original text that was neutralized", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True})
|
|
||||||
fileId: Optional[str] = Field(default=None, description="ID of the file this attribute belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
|
|
||||||
patternType: str = Field(description="Type of pattern that matched (email, phone, name, etc.)", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True})
|
|
||||||
registerModelLabels(
|
|
||||||
"DataNeutralizerAttributes",
|
|
||||||
{"en": "Neutralized Data Attribute", "fr": "Attribut de données neutralisées"},
|
|
||||||
{
|
|
||||||
"id": {"en": "ID", "fr": "ID"},
|
|
||||||
"mandateId": {"en": "Mandate ID", "fr": "ID de mandat"},
|
|
||||||
"userId": {"en": "User ID", "fr": "ID utilisateur"},
|
|
||||||
"originalText": {"en": "Original Text", "fr": "Texte original"},
|
|
||||||
"fileId": {"en": "File ID", "fr": "ID de fichier"},
|
|
||||||
"patternType": {"en": "Pattern Type", "fr": "Type de modèle"},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
163
modules/datamodels/datamodelNotification.py
Normal file
163
modules/datamodels/datamodelNotification.py
Normal file
|
|
@ -0,0 +1,163 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""
|
||||||
|
Notification model for in-app notifications.
|
||||||
|
Supports actionable notifications (e.g., invitation accept/decline).
|
||||||
|
"""
|
||||||
|
|
||||||
|
import uuid
|
||||||
|
from typing import Optional, List
|
||||||
|
from enum import Enum
|
||||||
|
from pydantic import BaseModel, Field, ConfigDict
|
||||||
|
from modules.datamodels.datamodelBase import PowerOnModel
|
||||||
|
from modules.shared.i18nRegistry import i18nModel
|
||||||
|
|
||||||
|
|
||||||
|
class NotificationType(str, Enum):
|
||||||
|
"""Types of notifications"""
|
||||||
|
INVITATION = "invitation" # Einladung zu Mandat/Feature
|
||||||
|
SYSTEM = "system" # System-Nachrichten
|
||||||
|
WORKFLOW = "workflow" # Workflow-Status Updates
|
||||||
|
MENTION = "mention" # Erwähnung in Chat/Kommentar
|
||||||
|
|
||||||
|
|
||||||
|
class NotificationStatus(str, Enum):
|
||||||
|
"""Status of a notification"""
|
||||||
|
UNREAD = "unread" # Noch nicht gelesen
|
||||||
|
READ = "read" # Gelesen
|
||||||
|
ACTIONED = "actioned" # Aktion wurde durchgeführt
|
||||||
|
DISMISSED = "dismissed" # Verworfen/Geschlossen
|
||||||
|
|
||||||
|
|
||||||
|
@i18nModel("Benachrichtigungs-Aktion")
|
||||||
|
class NotificationAction(BaseModel):
|
||||||
|
"""Possible action for a notification"""
|
||||||
|
actionId: str = Field(
|
||||||
|
description="Unique identifier for the action (e.g., 'accept', 'decline')",
|
||||||
|
json_schema_extra={"label": "Aktions-ID"},
|
||||||
|
)
|
||||||
|
label: str = Field(
|
||||||
|
description="Display label for the action button",
|
||||||
|
json_schema_extra={"label": "Bezeichnung"},
|
||||||
|
)
|
||||||
|
style: str = Field(
|
||||||
|
default="default",
|
||||||
|
description="Button style: 'primary', 'danger', 'default'",
|
||||||
|
json_schema_extra={"label": "Stil"},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@i18nModel("Benachrichtigung")
|
||||||
|
class UserNotification(PowerOnModel):
|
||||||
|
"""
|
||||||
|
In-app notification for a user.
|
||||||
|
Supports actionable notifications with accept/decline buttons.
|
||||||
|
"""
|
||||||
|
id: str = Field(
|
||||||
|
default_factory=lambda: str(uuid.uuid4()),
|
||||||
|
description="Unique ID of the notification",
|
||||||
|
json_schema_extra={"label": "ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
|
||||||
|
)
|
||||||
|
userId: str = Field(
|
||||||
|
description="Target user ID for this notification",
|
||||||
|
json_schema_extra={
|
||||||
|
"label": "Benutzer",
|
||||||
|
"frontend_type": "text",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": True,
|
||||||
|
"fk_target": {"db": "poweron_app", "table": "User"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
type: NotificationType = Field(
|
||||||
|
default=NotificationType.SYSTEM,
|
||||||
|
description="Type of notification",
|
||||||
|
json_schema_extra={
|
||||||
|
"label": "Typ",
|
||||||
|
"frontend_type": "select",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": True,
|
||||||
|
"frontend_options": [
|
||||||
|
{"value": "invitation", "label": "Einladung"},
|
||||||
|
{"value": "system", "label": "System"},
|
||||||
|
{"value": "workflow", "label": "Workflow"},
|
||||||
|
{"value": "mention", "label": "Erwähnung"}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
)
|
||||||
|
status: NotificationStatus = Field(
|
||||||
|
default=NotificationStatus.UNREAD,
|
||||||
|
description="Current status of the notification",
|
||||||
|
json_schema_extra={
|
||||||
|
"label": "Status",
|
||||||
|
"frontend_type": "select",
|
||||||
|
"frontend_readonly": True,
|
||||||
|
"frontend_required": False,
|
||||||
|
"frontend_options": [
|
||||||
|
{"value": "unread", "label": "Ungelesen"},
|
||||||
|
{"value": "read", "label": "Gelesen"},
|
||||||
|
{"value": "actioned", "label": "Bearbeitet"},
|
||||||
|
{"value": "dismissed", "label": "Verworfen"}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
title: str = Field(
|
||||||
|
description="Notification title",
|
||||||
|
json_schema_extra={"label": "Titel", "frontend_type": "text", "frontend_readonly": True, "frontend_required": True}
|
||||||
|
)
|
||||||
|
message: str = Field(
|
||||||
|
description="Notification message/body",
|
||||||
|
json_schema_extra={"label": "Nachricht", "frontend_type": "textarea", "frontend_readonly": True, "frontend_required": True}
|
||||||
|
)
|
||||||
|
icon: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Optional icon identifier (e.g., 'mail', 'warning', 'info')",
|
||||||
|
json_schema_extra={"label": "Symbol", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
|
||||||
|
)
|
||||||
|
|
||||||
|
referenceType: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Type of referenced object (e.g., 'Invitation', 'Workflow')",
|
||||||
|
json_schema_extra={"label": "Referenz-Typ", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
|
||||||
|
)
|
||||||
|
referenceId: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="ID of referenced object",
|
||||||
|
json_schema_extra={"label": "Referenz-ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
|
||||||
|
)
|
||||||
|
|
||||||
|
actions: Optional[List[NotificationAction]] = Field(
|
||||||
|
default=None,
|
||||||
|
description="List of possible actions for this notification",
|
||||||
|
json_schema_extra={"label": "Aktionen", "frontend_type": "json", "frontend_readonly": True, "frontend_required": False}
|
||||||
|
)
|
||||||
|
|
||||||
|
actionTaken: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Which action was taken (actionId)",
|
||||||
|
json_schema_extra={"label": "Durchgefuehrte Aktion", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
|
||||||
|
)
|
||||||
|
actionResult: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Result message from the action",
|
||||||
|
json_schema_extra={"label": "Aktions-Ergebnis", "frontend_type": "textarea", "frontend_readonly": True, "frontend_required": False}
|
||||||
|
)
|
||||||
|
|
||||||
|
readAt: Optional[float] = Field(
|
||||||
|
default=None,
|
||||||
|
description="When the notification was read (UTC timestamp)",
|
||||||
|
json_schema_extra={"label": "Gelesen am", "frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False}
|
||||||
|
)
|
||||||
|
actionedAt: Optional[float] = Field(
|
||||||
|
default=None,
|
||||||
|
description="When action was taken (UTC timestamp)",
|
||||||
|
json_schema_extra={"label": "Bearbeitet am", "frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False}
|
||||||
|
)
|
||||||
|
expiresAt: Optional[float] = Field(
|
||||||
|
default=None,
|
||||||
|
description="When the notification expires (optional, UTC timestamp)",
|
||||||
|
json_schema_extra={"label": "Gueltig bis", "frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False}
|
||||||
|
)
|
||||||
|
|
||||||
|
model_config = ConfigDict(use_enum_values=True)
|
||||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue