fixed automation parameter flow

This commit is contained in:
ValueOn AG 2026-04-13 00:38:47 +02:00
parent c18ad6f8e7
commit 17455688a9
43 changed files with 3170 additions and 427 deletions

3
app.py
View file

@ -597,6 +597,9 @@ app.include_router(i18nRouter)
from modules.routes.routeAdminUserAccessOverview import router as userAccessOverviewRouter from modules.routes.routeAdminUserAccessOverview import router as userAccessOverviewRouter
app.include_router(userAccessOverviewRouter) app.include_router(userAccessOverviewRouter)
from modules.routes.routeAdminDemoConfig import router as demoConfigRouter
app.include_router(demoConfigRouter)
from modules.routes.routeGdpr import router as gdprRouter from modules.routes.routeGdpr import router as gdprRouter
app.include_router(gdprRouter) app.include_router(gdprRouter)

View file

@ -45,6 +45,11 @@ Connector_StacSwisstopo_MAX_RETRIES = 3
Connector_StacSwisstopo_RETRY_DELAY = 1.0 Connector_StacSwisstopo_RETRY_DELAY = 1.0
Connector_StacSwisstopo_ENABLE_CACHE = True Connector_StacSwisstopo_ENABLE_CACHE = True
# Demo RMA credentials (same for all demo trustee instances)
Demo_RMA_ApiBaseUrl = https://service.int.runmyaccounts.com/api/latest/clients/
Demo_RMA_ClientName = poweronag
Demo_RMA_ApiKey = pat_tipTbnHU26CrMzAnLSjCR_uzHJv4CDNa7obaQGHIA-4
# Operator company information (shown on invoice emails) # Operator company information (shown on invoice emails)
Operator_CompanyName = PowerOn AG Operator_CompanyName = PowerOn AG
Operator_Address = Birmensdorferstrasse 94, 8003 Zürich Operator_Address = Birmensdorferstrasse 94, 8003 Zürich

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View file

@ -0,0 +1,256 @@
# PowerOn AI Platform - Investoren-Dokumentation
## Stand: 14. Oktober 2025
---
## Executive Summary
PowerOn ist eine Software, die Unternehmen dabei hilft, wiederkehrende Aufgaben zu automatisieren. Statt dass Mitarbeiter manuell Daten sammeln, Dokumente durcharbeiten und Berichte schreiben, übernimmt PowerOn diese Arbeiten.
### Das Problem, das PowerOn löst
Mitarbeiter verbringen 30% ihrer Arbeitszeit damit, Informationen zu suchen. Unternehmen haben Schwierigkeiten, große Dokumente zu analysieren, aktuelle Marktdaten zu sammeln und regelmäßige Berichte zu erstellen. PowerOn automatisiert diese Aufgaben.
### Wie PowerOn funktioniert
Ein Benutzer gibt eine Aufgabe ein, zum Beispiel "Lese meine Mails der internen Mailbox der letzten 2 Wochen, fasse diese pro Thema im Sharepoint Marketing Ordner zusammen und verfasse eine Antwort für die wichtigsten Kunden". PowerOn verbindet sich dann automatisch mit Outlook, SharePoint und anderen Systemen, sammelt die Daten, analysiert sie und erstellt die gewünschten Zusammenfassungen und Antworten.
### Gemessene Verbesserungen
Tests mit Pilotkunden zeigen:
- Marktanalysen: von 3-4 Wochen auf 3-5 Tage
- Berichterstellung: 62% Zeitersparnis
- Prototypenentwicklung: 70% schneller
- Dokumentenanalyse: 80% weniger Zeitaufwand
---
## 1. Kernfunktionen von PowerOn
### 1.1 Was PowerOn tatsächlich macht
PowerOn ist eine KI-gestützte Workflow-Engine, die drei verschiedene Arbeitsabläufe unterstützt:
**Dynamische Workflows**: PowerOn passt sich automatisch an neue Aufgaben an. Ein Benutzer kann jede beliebige Anfrage stellen, und das System findet den besten Weg zur Lösung.
**Action-Plan Workflows**: PowerOn plant komplexe Aufgaben selbstständig. Das System teilt große Projekte in kleinere Schritte auf und führt diese automatisch aus.
**Feste Geschäftsprozesse**: Unternehmen können standardisierte Abläufe definieren, die PowerOn immer gleich ausführt, zum Beispiel monatliche Berichte oder regelmäßige Marktanalysen.
### 1.2 Kernfunktionen
**Dokumentenanalyse**: Das System liest große Dokumente (PDF, Word, Excel) und extrahiert die wichtigsten Informationen. Ein 200-seitiger Vertrag wird automatisch zusammengefasst.
**Web-Recherche**: PowerOn sucht im Internet nach aktuellen Informationen zu einem Thema und sammelt relevante Daten von verschiedenen Websites.
**Berichterstellung**: Basierend auf den gesammelten Daten und Dokumenten erstellt das System fertige Berichte in verschiedenen Formaten (PDF, Word, Excel).
**Code-Generierung**: PowerOn kann einfache Programme und Skripte erstellen, um wiederkehrende Aufgaben zu automatisieren.
### 1.3 Wie der Arbeitsablauf funktioniert
Ein Benutzer gibt eine Aufgabe ein, zum Beispiel "Analysiere die Konkurrenz im E-Mobilitätssektor". PowerOn führt dann automatisch folgende Schritte aus:
1. Sucht im Internet nach aktuellen Informationen über E-Mobilitätsunternehmen
2. Analysiert vorhandene interne Dokumente des Unternehmens
3. Erstellt einen strukturierten Bericht mit den wichtigsten Erkenntnissen
4. Stellt den Bericht in verschiedenen Formaten zur Verfügung
### 1.4 Technische Besonderheiten
**Keine Größenbeschränkungen**: PowerOn kann beliebig große Dokumente verarbeiten und unbegrenzt viele Berichte erstellen. Das System umgeht die normalen Grenzen von KI-Systemen durch intelligente Aufteilung.
**Automatische Datenschutz-Funktion**: Sensible Daten werden automatisch erkannt und vor der Verarbeitung entfernt. Nach der Analyse werden die Daten wieder eingefügt, sodass der Bericht vollständig ist, aber keine vertraulichen Informationen preisgegeben werden.
**Mehrere KI-Anbieter**: PowerOn arbeitet gleichzeitig mit verschiedenen KI-Systemen (OpenAI, Anthropic, Perplexity). Wenn ein System ausfällt oder überlastet ist, übernimmt automatisch ein anderes. Das gewährleistet einen stabilen Betrieb und macht das System unabhängig von einzelnen Anbietern.
**Sicherheit**: Jedes Unternehmen hat einen eigenen, abgeschotteten Bereich. Alle Aktivitäten werden protokolliert.
---
## 2. Warum PowerOn anders ist
### 2.1 Keine technischen Grenzen
Andere KI-Systeme haben strenge Beschränkungen: maximal 50 Seiten Dokument, höchstens 10 Berichte pro Monat. PowerOn hat diese Grenzen nicht. Das System kann 1000-seitige Verträge analysieren und hunderte Berichte erstellen, ohne zusätzliche Kosten.
### 2.2 Automatischer Datenschutz
PowerOn erkennt automatisch sensible Daten wie Namen, Adressen oder Kontonummern und entfernt sie vor der Verarbeitung. Nach der Analyse werden die Daten wieder eingefügt. So entstehen vollständige Berichte ohne Datenschutzverletzungen.
### 2.3 Stabile und unabhängige Technologie
PowerOn arbeitet mit mehreren KI-Anbietern gleichzeitig. Wenn ein System ausfällt, übernimmt automatisch ein anderes. Das reduziert Ausfallzeiten und macht das Unternehmen unabhängig von einzelnen Anbietern.
### 2.4 Direkte Integration in Unternehmenssysteme
PowerOn verbindet sich direkt mit den Systemen, die Unternehmen täglich nutzen:
- **E-Mail-Systeme**: Outlook, Gmail für automatische E-Mail-Analyse
- **Dokumentenmanagement**: SharePoint, Google Drive für Dateizugriff
- **Projektmanagement**: Jira, ClickUp für Aufgabenverwaltung
- **Cloud-Speicher**: OneDrive, Dropbox für Dateiintegration
Statt dass Mitarbeiter Daten manuell zwischen verschiedenen Systemen kopieren, arbeitet PowerOn direkt mit allen Systemen zusammen.
### 2.5 Drei verschiedene Arbeitsweisen
**Dynamisch**: PowerOn passt sich an jede neue Aufgabe an. Ein Benutzer kann jede beliebige Anfrage stellen.
**Action-Plan**: PowerOn plant komplexe Projekte selbstständig und teilt sie in machbare Schritte auf.
**Standardisiert**: Unternehmen können feste Abläufe definieren, die PowerOn immer gleich ausführt.
### 2.6 Einfache Bedienung
Mitarbeiter müssen nicht programmieren können. Sie geben einfach ein, was sie brauchen, und PowerOn macht den Rest. Ein Marketing-Manager kann eine Konkurrenzanalyse bestellen, ohne IT-Kenntnisse zu haben.
---
## 3. Markt und Geschäftsmodell
### 3.1 Zielkunden
PowerOn richtet sich hauptsächlich an mittelständische Unternehmen mit 50-500 Mitarbeitern. Diese Unternehmen haben oft komplexe Datenverarbeitungsanforderungen, aber nicht die Ressourcen, um eigene KI-Systeme zu entwickeln.
Typische Kunden sind Beratungsunternehmen, Banken, Versicherungen, Kliniken und andere Dienstleister, die regelmäßig Analysen und Berichte erstellen müssen.
### 3.2 Nutzen für Kunden
#### Gemessene Verbesserungen
Basierend auf Tests mit Pilotkunden:
- Marktanalysen werden 73% schneller durchgeführt (von 3-4 Wochen auf 3-5 Tage)
- Berichterstellung spart 62% Zeit ein
- Prototypenentwicklung ist 70% schneller
- Dokumentenanalyse reduziert den Zeitaufwand um 80%
- Kosteneinsparung von 5.000-8.000 Euro pro Marktanalyse
#### Praktische Vorteile
Mitarbeiter benötigen keine Programmierkenntnisse, um PowerOn zu nutzen. Das System arbeitet mit vorhandenen Daten und Systemen zusammen, ohne dass große Umstellungen erforderlich sind.
### 3.3 Einnahmemodelle
PowerOn plant verschiedene Einnahmequellen:
1. Monatliche Abonnements pro Benutzer
2. Nutzungsbasierte Abrechnung für Verarbeitungsleistungen
3. Individuelle Lizenzen für große Unternehmen
4. Beratungs- und Implementierungsdienstleistungen
Die genauen Preise werden basierend auf Marktanalysen festgelegt. Das Ziel ist eine Bruttomarge von 75-85% nach der Skalierung.
---
## 4. Risiken und Zukunftssicherheit
### 4.1 Risiken durch bessere KI-Systeme
#### Kurzfristige Risiken (6-12 Monate)
Wenn KI-Systeme besser werden, könnten einfache Aufgaben wie Textgenerierung zur Standardware werden. Dies könnte den Wert einzelner KI-Funktionen reduzieren. PowerOn ist jedoch darauf ausgelegt, verschiedene KI-Systeme zu koordinieren, was auch bei verbesserten Systemen wertvoll bleibt.
#### Mittelfristige Risiken (1-3 Jahre)
Einzelne KI-Systeme könnten in der Lage sein, mehr Aufgaben gleichzeitig zu erledigen. Dies könnte die Notwendigkeit der Koordination reduzieren. PowerOn konzentriert sich jedoch auf spezifische Unternehmensanforderungen und die Integration in bestehende Systeme, was weiterhin wertvoll ist.
#### Langfristige Risiken (3+ Jahre)
Sehr fortgeschrittene KI-Systeme könnten in der Lage sein, komplexe Aufgaben ohne Koordination zu lösen. PowerOn konzentriert sich jedoch auf die spezifischen Anforderungen von Unternehmen, einschließlich Sicherheit, Compliance und Integration, die auch bei fortgeschrittenen KI-Systemen wichtig bleiben.
### 4.2 Was könnte obsolet werden
Einfache Aufgaben wie grundlegende Textgenerierung oder Web-Suche könnten zu Standardfunktionen werden. Auch einfache Datenanalysen könnten automatisiert werden.
### 4.3 Was bleibt wertvoll
Die Koordination verschiedener Systeme, die Integration in Unternehmensprozesse und die Einhaltung von Sicherheits- und Datenschutzbestimmungen bleiben auch bei verbesserten KI-Systemen wichtig. PowerOn ist so aufgebaut, dass es sich an neue Technologien anpassen kann, ohne das gesamte System neu entwickeln zu müssen.
---
## 5. Finanzielle Bewertung
### 5.1 Aktuelle Bewertung der Komponenten
PowerOn besteht aus mehreren wertvollen Komponenten, die einzeln bewertet werden können:
**Frontend-System**: €150.000-250.000
- Modulare Benutzeroberfläche, die einfach erweitert werden kann
- Funktioniert in allen gängigen Browsern
- Anpassbar an verschiedene Unternehmensanforderungen
**Backend-Infrastruktur**: €200.000-300.000
- Stabile Grundstruktur für alle Funktionen
- Schnelle Verarbeitung auch bei großen Datenmengen
- Einfache Integration neuer Funktionen
**Workflow-System**: €250.000-350.000
- Kernfunktion für die Koordination verschiedener Aufgaben
- Drei verschiedene Arbeitsweisen (dynamisch, Action-Plan, standardisiert)
- Automatische Anpassung an neue Anforderungen
**Sicherheits- und Datenschutz-System**: €100.000-150.000
- Automatische Erkennung und Schutz sensibler Daten
- Verschiedene Anmeldeverfahren für Unternehmen
- Vollständige Protokollierung aller Aktivitäten
**Datenverarbeitungs-Engine**: €150.000-200.000
- Verarbeitung beliebig großer Dokumente
- Intelligente Aufteilung zur Umgehung von KI-Grenzen
- Unterstützung aller gängigen Dateiformate
**Multi-Agent-Koordinationssystem**: €300.000-400.000
- Einzigartige Technologie zur Koordination verschiedener KI-Systeme
- Automatische Auswahl des besten KI-Anbieters für jede Aufgabe
- Stabile Ausführung auch bei Ausfällen einzelner Systeme
**Unternehmens-Integration**: €200.000-300.000
- Anpassung an verschiedene Branchen und Anforderungen
- Einfache Integration in bestehende Unternehmenssysteme
- Skalierbare Architektur für wachsende Anforderungen
**Integrations-Framework**: €150.000-200.000
- Verbindungen zu verschiedenen KI-Anbietern (OpenAI, Anthropic, Perplexity)
- Direkte Integration in Unternehmenssysteme (Outlook, SharePoint, Google Drive, Jira)
- Einfache Integration neuer Systeme und Anbieter
- Unabhängigkeit von einzelnen Anbietern
**Workflow-Management-System**: €100.000-150.000
- Plan-Act-Observe-Refine-Zyklus für kontinuierliche Verbesserung
- Echtzeit-Überwachung des Arbeitsfortschritts
- Automatische Fehlerbehandlung und Wiederaufnahme
**Gesamtbewertung**: €1.6-2.4 Mio.
### 5.2 Investitionsbedarf
PowerOn benötigt Investitionsmittel, um die Entwicklung abzuschließen und den Markt zu erschließen. Die Mittel werden hauptsächlich für die Produktentwicklung, den Aufbau eines Vertriebsteams und die Infrastruktur verwendet.
### 5.3 Wachstumspotenzial
Das System ist darauf ausgelegt, mit wachsenden Anforderungen zu skalieren. Die modulare Architektur ermöglicht es, neue Funktionen hinzuzufügen und die Plattform an verschiedene Kundenanforderungen anzupassen.
---
## 6. Marktpotenzial und Ausstiegsmöglichkeiten
### 6.1 Marktpotenzial
Der Markt für KI-basierte Geschäftsanwendungen wächst schnell. Unternehmen suchen nach Lösungen, die komplexe Aufgaben automatisieren und die Effizienz steigern können. PowerOn positioniert sich in diesem wachsenden Markt.
### 6.2 Ausstiegsmöglichkeiten
Langfristig gibt es verschiedene Möglichkeiten für einen Ausstieg, darunter den Verkauf an größere Softwareunternehmen oder den Börsengang. Diese Optionen hängen von der Entwicklung des Unternehmens und des Marktes ab.
---
## 7. Fazit
### 7.1 Stärken von PowerOn
PowerOn bietet eine einzigartige Lösung für die Koordination verschiedener KI-Systeme. Das System ist darauf ausgelegt, sich an neue Technologien anzupassen, und bietet nachgewiesene Verbesserungen bei Geschäftsprozessen.
### 7.2 Risikofaktoren
Die schnelle Entwicklung der KI-Technologie stellt ein Risiko dar, da einfache Aufgaben möglicherweise obsolet werden. Der Wettbewerb durch größere Unternehmen und die Marktakzeptanz sind weitere Faktoren, die berücksichtigt werden müssen.
### 7.3 Investitionsbewertung
PowerOn befindet sich in einer frühen Entwicklungsphase mit einem funktionsfähigen Grundsystem. Das Potenzial für Wachstum ist vorhanden, aber es gibt auch erhebliche Risiken, die mit der Entwicklung neuer Technologien verbunden sind.
---
*Dokument erstellt am 14. Oktober 2025*
*Version: 1.0*
*Autor: PowerOn Development Team*

View file

@ -0,0 +1,175 @@
# PowerOn AI Platform
## Investoren-Summary
### Marktpositionierung
Die PowerOn AI Platform ist eine innovative Enterprise-Lösung für die Automatisierung und Optimierung von komplexen geschäftlichen Prozessen durch einen Multi-Agent-KI-Ansatz. Wir positionieren uns an der Schnittstelle zwischen den schnell wachsenden Märkten für:
- Künstliche Intelligenz (Marktvolumen 2025: $190 Mrd.)
- Business Process Automation (Marktvolumen 2025: $19,6 Mrd.)
- Enterprise Knowledge Management (Marktvolumen 2025: $43 Mrd.)
### Wettbewerbsvorteile
1. **Proprietäre Multi-Agent-Technologie**: Unsere Plattform orchestriert spezifische KI-Agenten für verschiedene Aufgaben, was zu deutlich überlegenen Ergebnissen im Vergleich zu Einzelagenten-Ansätzen führt.
2. **Modellunabhängigkeit**: Integration mit führenden KI-Providern (OpenAI, Anthropic) ohne Vendor Lock-in, wodurch wir immer die besten Modelle für spezifische Aufgaben einsetzen können.
3. **Enterprise-Ready**: Entwickelt mit Multi-Tenant-Architektur, umfassenden Sicherheitsfeatures und Skalierbarkeit für Unternehmensanforderungen.
4. **Anpassbar und erweiterbar**: Modulare Architektur, die kontinuierliche Feature-Erweiterungen und kundenspezifische Anpassungen ermöglicht.
5. **Fortschrittliche Workflow-Orchestrierung**:
- Intelligente Koordination mehrerer spezialisierter Agenten
- Echtzeit-Statusüberwachung und Fortschrittsanzeige
- Robuste Fehlerbehandlung und Wiederaufnahmemechanismen
- Nahtlose Integration von Dateiverarbeitung und Dokumentenmanagement
6. **Umfassende Enterprise-Features**:
- Multi-Tenant-Architektur mit Mandantenverwaltung
- Erweiterte Benutzer- und Berechtigungsverwaltung
- Enterprise-Grade Sicherheitsfeatures
- Skalierbare Infrastruktur
### Finanzielle Highlights
- **Go-to-Market-Strategie**: Initiale Fokussierung auf mittelständische Unternehmen in den Bereichen Professional Services, Finanzdienstleistungen und Gesundheitswesen.
- **Umsatzmodell**: Kombiniertes SaaS-Abonnement (pro Benutzer/Monat) und nutzungsbasierte Abrechnung (pro Verarbeitungseinheit).
- **Erwartete Bruttomarge**: 75-85% nach Erreichen der Skalierung.
- **Erwartetes ARR in Jahr 3**: €4,5 Mio. bei 150 Unternehmenskunden.
- **Kostenstrukturen**:
- 40% Produktentwicklung
- 30% Vertrieb und Marketing
- 20% Betrieb und Support
- 10% Verwaltung
### Wachstumspfad
#### Kurzfristig (12 Monate)
- Markteinführung der Core-Plattform
- Aufbau von 3-5 Schlüsselreferenzkunden
- Entwicklung branchenspezifischer Templates
#### Mittelfristig (24 Monate)
- Erweiterung auf Agentenmarktplatz
- Integration von proprietären Unternehmensmodellen
- Internationale Expansion
#### Langfristig (36+ Monate)
- Entwicklung spezialisierter Branchenlösungen
- KI-Middleware für Unternehmen
- Strategische Partnerschaften mit Enterprise-Software-Anbietern
### Investitionsbedarf
Das aktuelle Finanzierungsziel von CHF 2.5 Mio. ermöglicht:
- Abschluss der Produktentwicklung und Erreichen der Marktreife
- Aufbau eines Vertriebs- und Marketingteams
- Sicherung strategischer Partnerschaften
- 18-monatige Runway bis zur Profitabilität
### Exit-Potenzial
Das Team sieht folgende Exit-Optionen:
1. Strategische Übernahme durch Enterprise-Software-Unternehmen (5-7 Jahre)
2. Erwerb durch grössere KI-Plattform (3-5 Jahre)
3. IPO bei Erreichen von CHF 50+ Mio. ARR (7-10 Jahre)
### Extraktion aus ValueOn AG
Vor einem Exit ist die Extraktion der PowerOn AI Platform aus der ValueOn AG in eine eigenständige Organisation vorgesehen:
1. **Vergütung der Aufwände**:
- Vollständige Vergütung aller übernommenen Entwicklungskosten
- Übernahme der Infrastruktur- und Betriebskosten
- Schadloshaltung für alle bisherigen Investitionen
- Marketing & Sales-Assets verbleiben bei ValueOn AG ohne Vergütung
2. **Schlüsselpersonen**:
- Anrechnung des geschaffenen Mehrwerts für jede Schlüsselperson
- Option auf Auszahlung oder Aktienübernahme
- Individuelle Vereinbarungen basierend auf Beitrag und Verantwortung
- Langfristige Bindung durch Equity-Programme
3. **Investitionskapital**:
- Beschaffung des notwendigen Kapitals zum aktuellen Marktwert
- Berücksichtigung der Extraktionskosten
- Sicherstellung der operativen Liquidität
- Finanzierung des weiteren Wachstums
Die Extraktion wird durchgeführt, sobald:
- Die technische Basis stabil ist
- Erste Referenzkunden gewonnen wurden
- Die Marktpositionierung klar ist
- Die Wachstumsstrategie definiert ist
### Marktwert und Bewertung
#### Aktueller Wert (Juni 2025)
Basierend auf dem aktuellen Entwicklungsstand und der technologischen Basis:
1. **Technologischer Wert**:
- Basis-Frontend-Architektur (modular, aber noch in Entwicklung): CHF 0.15-0.25 Mio.
- Backend-Grundstruktur (FastAPI, Basis-Interfaces): CHF 0.2-0.3 Mio.
- Workflow-System (Grundfunktionalität): CHF 0.25-0.35 Mio.
2. **Funktionaler Wert**:
- Basis-Workflow-Orchestrierung: CHF 0.1-0.15 Mio.
- Einfache Dokumentenverarbeitung: CHF 0.05-0.1 Mio.
- Grundlegende Benutzerverwaltung: CHF 0.05-0.1 Mio.
3. **Entwicklungspotenzial**:
- Erweiterbare Architektur: CHF 0.15-0.2 Mio.
- Modulare Struktur: CHF 0.1-0.15 Mio.
- Basis für zukünftige Erweiterungen: CHF 0.15-0.2 Mio.
**Aktuelle Gesamtbewertung**: CHF 1.2-1.8 Mio.
Diese Bewertung basiert auf:
- Dem aktuellen Entwicklungsstand (Frontend und Backend)
- Der vorhandenen Grundfunktionalität
- Der modularen Basis-Architektur
- Dem Entwicklungspotenzial
#### Wert per Ende 2025
Prognostizierte Bewertung basierend auf:
- Vervollständigung der Core-Funktionalität
- Erste Referenzkunden
- Erweiterte Workflow-Funktionen
- Verbesserte Benutzeroberfläche
**Prognostizierte Bewertung Ende 2025**: CHF 2-3 Mio.
#### Wert per Ende 2026
Prognostizierte Bewertung basierend auf:
- Vollständige Multi-Agent-Implementierung
- Erweiterte Integrationen
- Wachsende Kundenbasis
- Erwartetes ARR von CHF 4,5 Mio.
**Prognostizierte Bewertung Ende 2026**: CHF 4-6 Mio.
Die Wertsteigerung wird getrieben durch:
1. **Technologische Entwicklung**:
- Vervollständigung der Agenten-Implementierung
- Erweiterung der Workflow-Funktionalitäten
- Verbesserung der Integrationen
2. **Marktentwicklung**:
- Aufbau der Kundenbasis
- Entwicklung von Branchenlösungen
- Erste internationale Expansion
3. **Geschäftsentwicklung**:
- Wachsende Umsätze
- Verbesserte Margen
- Neue Geschäftsmodelle
4. **Strategische Positionierung**:
- Etablierung in Nischenmärkten
- Aufbau von Partnerschaften
- Entwicklung proprietärer Technologien

View file

@ -0,0 +1,799 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<link rel="icon" href="/poweron-favicon.png" type="image/png">
<title>PowerOn Platform - Big Picture | PowerON</title>
<meta name="description" content="PowerON Platform Architecture - Big Picture for External Developers">
<meta name="author" content="PowerON">
<!-- Fonts -->
<link rel="preconnect" href="https://fonts.googleapis.com">
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
<link href="https://fonts.googleapis.com/css2?family=DM+Sans:ital,wght@0,400;0,500;0,700;1,400&display=swap" rel="stylesheet">
<!-- Styles -->
<link rel="stylesheet" href="doc_platform_big_picture.css">
</head>
<body>
<div class="header">
<div class="navbar">
<a href="/" class="logo">
<img src="logo2.png" alt="PowerON" class="logo-img" onerror="this.style.display='none'; this.nextElementSibling.style.display='inline';">
</a>
<nav>
<span class="nav-title">Platform Architecture</span>
</nav>
</div>
</div>
<div class="container">
<div class="hero">
<h1>PowerOn Platform - Big Picture</h1>
<p class="subtitle">Enterprise AI Workflow Platform with Integrated Data Privacy Neutralizer</p>
<p class="intro">This document provides an overview of the PowerOn platform architecture, building blocks, and capabilities for external software developers who want to contribute to or integrate with the platform.</p>
</div>
<!-- Tabs Navigation -->
<div class="tabs">
<button class="tab-button active" onclick="openTab(event, 'overview')">Overview</button>
<button class="tab-button" onclick="openTab(event, 'customer-story')">Customer Story</button>
<button class="tab-button" onclick="openTab(event, 'workflows')">Workflows</button>
<button class="tab-button" onclick="openTab(event, 'services')">Microservices</button>
<button class="tab-button" onclick="openTab(event, 'rbac')">RBAC System</button>
<button class="tab-button" onclick="openTab(event, 'ui')">UI Architecture</button>
<button class="tab-button" onclick="openTab(event, 'big-picture')">Big Picture</button>
<button class="tab-button" onclick="openTab(event, 'integration')">Integration</button>
</div>
<!-- Tab Content -->
<div id="overview" class="tab-content active">
<h2>Platform Overview</h2>
<div class="section">
<h3>Core Concept</h3>
<p>PowerOn is a <strong>Multi-Agent AI Platform for Enterprise Workflows</strong> with an integrated data privacy neutralizer. The platform enables companies to accelerate their AI transformation without data privacy risks.</p>
<div class="highlight-box">
<h4>Key Value Propositions</h4>
<ul>
<li><strong>Data Privacy First:</strong> Integrated privacy neutralizer enables safe use of ChatGPT/Copilot without privacy risks</li>
<li><strong>Unlimited Processing:</strong> No token limits - process documents of any size through intelligent chunking</li>
<li><strong>Universal Integration:</strong> Seamless integration of all enterprise data sources</li>
<li><strong>Workflow Automation:</strong> Configure workflows per customer journey with standard automation elements and AI components</li>
<li><strong>Future-Proof Architecture:</strong> Automatically improves with better AI models and larger token limits</li>
<li><strong>Plug & Play Architecture:</strong> Renderers and dynamic AI selection per intention (analyze, generate, web, plan, etc.)</li>
</ul>
</div>
</div>
<div class="section">
<h3>Architecture Layers</h3>
<div class="architecture-diagram">
<div class="layer">
<h4>UI Layer (Playground)</h4>
<p>React-based playground UI as entry point. Additional UIs (chatbots, customer UIs) can be easily integrated via REST API in React, JavaScript, or other languages.</p>
</div>
<div class="layer">
<h4>API Layer</h4>
<p>RESTful API providing full access to platform capabilities. Open API design allows external UIs and integrations.</p>
</div>
<div class="layer">
<h4>Workflow Engine</h4>
<p>Core orchestration engine managing tasks, actions, and state. Supports multiple execution modes (Learning, Actionplan, Automation).</p>
</div>
<div class="layer">
<h4>Microservices Layer</h4>
<p>Modular service architecture with specialized services for AI, data processing, security, and integrations.</p>
</div>
<div class="layer">
<h4>Data Layer</h4>
<p>Multi-tenant database with RBAC-based access control. Mandate isolation ensures secure data separation.</p>
</div>
</div>
</div>
<div class="section">
<h3>Customer Journey → Workflow</h3>
<p>For each customer journey, a workflow can be configured in the workflow editor where:</p>
<ul>
<li>Customers integrate their data sources</li>
<li>Standard automation elements are available</li>
<li>AI components can be used</li>
<li>Workflows can be executed manually or automated (hourly/daily/weekly)</li>
</ul>
</div>
<div class="section">
<h3>Plug & Play Architecture</h3>
<div class="feature-grid">
<div class="feature-card">
<h4>Dynamic Renderers</h4>
<p>Plug & play architecture for document renderers. Support for multiple formats (PDF, DOCX, XLSX, PPTX, HTML, Markdown, JSON, CSV, etc.) with easy extension capabilities.</p>
</div>
<div class="feature-card">
<h4>Dynamic AI Selection</h4>
<p>Intelligent AI model selection per intention type. The system automatically selects the best AI model based on the task: analysis, generation, web research, planning, etc.</p>
</div>
</div>
</div>
<div class="section">
<h3>System Architecture Diagram</h3>
<div class="diagram-image-container">
<img src="doc_platform_01_platform_overview.jpg" alt="PowerON Platform Architecture Diagram" class="diagram-image">
</div>
</div>
</div>
<div id="customer-story" class="tab-content">
<h2>Customer Story</h2>
<div class="section">
<h3>The Journey from Application-Centric to Data-Centric Work</h3>
<p class="lead">PowerOn enables customers to transition from <strong>application-centric</strong> to <strong>data-centric</strong> work. This is a <strong>key differentiator</strong> that transforms how businesses operate.</p>
</div>
<div class="section">
<h3>Step 1: Customer Journey Identification</h3>
<div class="step-card">
<div class="step-number">1</div>
<div class="step-content">
<h4>Identify Business Processes</h4>
<p>Work with customers to identify their key customer journeys and business processes that can benefit from automation and AI.</p>
<ul>
<li>Document analysis workflows</li>
<li>Email processing and routing</li>
<li>Data extraction and transformation</li>
<li>Report generation</li>
<li>Customer communication workflows</li>
</ul>
</div>
</div>
</div>
<div class="section">
<h3>Step 2: MVP Integration with Focus on Data Privacy & Compliance</h3>
<div class="step-card">
<div class="step-number">2</div>
<div class="step-content">
<h4>Simple MVP Integration</h4>
<p>Start with a simple MVP that integrates customer data sources with <strong>strong focus on data privacy and compliance</strong>:</p>
<ul>
<li><strong>Data Privacy Neutralizer:</strong> Automatic anonymization of sensitive data before AI processing</li>
<li><strong>Compliance First:</strong> DSGVO/GDPR compliant processing from day one</li>
<li><strong>Secure Connections:</strong> Encrypted connections to customer data sources (SharePoint, Google Drive, Outlook, etc.)</li>
<li><strong>Mandate Isolation:</strong> Complete data separation between tenants</li>
<li><strong>Audit Logging:</strong> Full traceability of all data access and processing</li>
</ul>
<p class="highlight-text">This step builds trust and demonstrates the platform's commitment to data security.</p>
</div>
</div>
</div>
<div class="section">
<h3>Step 3: Pre-Processing Engine Deployment</h3>
<div class="step-card">
<div class="step-number">3</div>
<div class="step-content">
<h4>Standard API Pre-Processing</h4>
<p>Deploy a pre-processing engine at the customer's location using a <strong>standard API</strong>:</p>
<ul>
<li><strong>On-Premise/Edge Processing:</strong> Data processing happens at the customer's location</li>
<li><strong>Standard API:</strong> Consistent interface for all customers</li>
<li><strong>Data Minimization:</strong> Only necessary data is sent to the platform</li>
<li><strong>Local Neutralization:</strong> Privacy neutralization can happen before data leaves customer premises</li>
<li><strong>Reduced Latency:</strong> Faster processing for large documents</li>
</ul>
<p class="highlight-text">This step further enhances data privacy and gives customers full control over their data processing.</p>
</div>
</div>
</div>
<div class="section">
<h3>Step 4: Gradual Component Integration - The Transformation</h3>
<div class="step-card">
<div class="step-number">4</div>
<div class="step-content">
<h4>From Application-Centric to Data-Centric</h4>
<p>Gradually integrate additional components until the customer works <strong>data-centrically</strong> instead of <strong>application-centrically</strong>:</p>
<div class="transformation-comparison">
<div class="comparison-box old">
<h5>❌ Application-Centric (Old Way)</h5>
<ul>
<li>Work within individual applications (Word, Excel, SharePoint, Outlook)</li>
<li>Manual data transfer between applications</li>
<li>Data silos in different systems</li>
<li>Workflows are application-bound</li>
<li>Difficult to automate across applications</li>
</ul>
</div>
<div class="comparison-box new">
<h5>✅ Data-Centric (PowerOn Way)</h5>
<ul>
<li>Work with data directly, regardless of source application</li>
<li>Automatic data integration across all sources</li>
<li>Unified data view across all systems</li>
<li>Workflows span multiple applications seamlessly</li>
<li>Easy automation across entire data ecosystem</li>
</ul>
</div>
</div>
<p class="highlight-text"><strong>This transformation is a KEY DIFFERENTIATOR!</strong> Customers no longer think in terms of applications, but in terms of their data and business processes.</p>
</div>
</div>
</div>
<div class="section">
<h3>Customer Journey Diagram</h3>
<div class="diagram-image-container">
<img src="doc_platform_02_customer_story.jpg" alt="Customer Story - Journey from Application-Centric to Data-Centric" class="diagram-image">
</div>
</div>
</div>
<div id="workflows" class="tab-content">
<h2>Workflow System</h2>
<div class="section">
<h3>Core Concept: Tasks with Actions</h3>
<p class="lead">The core building block is <strong>workflow elements: tasks with actions</strong>. Each workflow consists of tasks, and each task contains one or more actions that execute specific operations.</p>
<div class="workflow-structure">
<div class="workflow-item">
<h4>Workflow</h4>
<p><strong>Definition:</strong> Top-level container representing a complete customer journey or business process.</p>
<p><strong>Purpose:</strong> Orchestrates multiple tasks to achieve a business goal.</p>
</div>
<div class="workflow-item">
<h4>Task</h4>
<p><strong>Definition:</strong> A logical step in the workflow.</p>
<p><strong>Purpose:</strong> Groups related actions that work together to complete a sub-goal.</p>
</div>
<div class="workflow-item">
<h4>Action</h4>
<p><strong>Definition:</strong> Executable unit that performs a specific operation.</p>
<p><strong>Purpose:</strong> Actions belong to methods (microservices) and are the atomic units of work.</p>
</div>
</div>
</div>
<div class="section">
<h3>Execution Modes</h3>
<p class="lead">PowerOn supports three execution modes, each optimized for different use cases:</p>
<div class="mode-grid">
<div class="mode-card">
<h4>Learning Mode</h4>
<p><strong>Best for:</strong> Exploratory tasks with up to 5 steps</p>
<p><strong>Approach:</strong> Iterative Plan-Act-Observe-Refine loop</p>
<p><strong>Use Case:</strong> When the solution path is not fully known in advance</p>
</div>
<div class="mode-card">
<h4>Actionplan Mode</h4>
<p><strong>Best for:</strong> Structured, sequential processes</p>
<p><strong>Approach:</strong> Batch planning with sequential execution</p>
<p><strong>Use Case:</strong> When the workflow steps are well-defined</p>
</div>
<div class="mode-card">
<h4>Automation Mode</h4>
<p><strong>Best for:</strong> Repetitive, predefined workflows</p>
<p><strong>Approach:</strong> Automated execution (scheduled or event-triggered)</p>
<p><strong>Use Case:</strong> Production workflows that run automatically</p>
</div>
</div>
</div>
<div class="section">
<h3>Available Workflow Methods</h3>
<p class="lead">Workflow methods provide actions that can be executed within workflows. Each method exposes multiple actions accessible via <code>self.services.&lt;method&gt;.&lt;action&gt;</code>:</p>
<ul>
<li><strong>ai.*</strong> - AI operations (process, analyze, generate)</li>
<li><strong>sharepoint.*</strong> - SharePoint integration (search, read, upload)</li>
<li><strong>outlook.*</strong> - Outlook integration (read emails, send emails)</li>
<li><strong>context.*</strong> - Context management (get context, set context)</li>
</ul>
</div>
<div class="section">
<h3>Workflow System Diagram</h3>
<div class="diagram-image-container">
<img src="doc_platform_03_workflow_system.jpg" alt="Workflow System - Structure, Execution Modes, and Available Methods" class="diagram-image">
</div>
</div>
</div>
<div id="services" class="tab-content">
<h2>Microservices Architecture</h2>
<div class="section">
<h3>Service Access Pattern</h3>
<p class="lead">All microservices are accessible via <code>self.services.&lt;serviceName&gt;</code>. Services follow a consistent access pattern and are organized into logical categories.</p>
</div>
<div class="section">
<h3>Services Structure Tree</h3>
<p>Complete overview of all available microservices:</p>
<div class="services-tree">
<div class="service-category">
<h4>Core Services</h4>
<ul>
<li><code>self.services.chat</code> - Chat and conversation management
<ul>
<li>Progress logging</li>
<li>Document management</li>
<li>Connection handling</li>
</ul>
</li>
<li><code>self.services.workflow</code> - Workflow state and management</li>
<li><code>self.services.utils</code> - Utility functions (timestamps, formatting, etc.)</li>
</ul>
</div>
<div class="service-category">
<h4>AI & Processing Services</h4>
<ul>
<li><code>self.services.ai</code> - AI model management and operations
<ul>
<li>Model selection</li>
<li>Prompt processing</li>
<li>Response handling</li>
</ul>
</li>
<li><code>self.services.generation</code> - Document generation
<ul>
<li>Multiple formats (PDF, DOCX, XLSX, PPTX, HTML, Markdown, etc.)</li>
<li>Template-based rendering</li>
<li>JSON schema support</li>
</ul>
</li>
<li><code>self.services.extraction</code> - Document extraction and processing
<ul>
<li>Multiple extractors (PDF, DOCX, XLSX, PPTX, CSV, HTML, XML, JSON, Images, etc.)</li>
<li>Intelligent chunking</li>
<li>Merging strategies</li>
</ul>
</li>
<li><code>self.services.neutralization</code> - Data privacy neutralization
<ul>
<li>PII detection and anonymization</li>
<li>Pattern-based neutralization</li>
<li>Binary and text processing</li>
</ul>
</li>
</ul>
</div>
<div class="service-category">
<h4>Integration Services</h4>
<ul>
<li><code>self.services.sharepoint</code> - SharePoint integration
<ul>
<li>Site discovery</li>
<li>File operations (read, upload, search)</li>
<li>Path resolution</li>
</ul>
</li>
<li><code>self.services.web</code> - Web operations
<ul>
<li>HTTP requests</li>
<li>Web scraping</li>
<li>API integration</li>
</ul>
</li>
<li><code>self.services.ticket</code> - Ticket system integration
<ul>
<li>Jira integration</li>
<li>ClickUp integration</li>
<li>Generic ticket operations</li>
</ul>
</li>
</ul>
</div>
<div class="service-category">
<h4>Security & Infrastructure</h4>
<ul>
<li><code>self.services.security</code> - Security operations
<ul>
<li>Authentication</li>
<li>Authorization</li>
<li>Token management</li>
</ul>
</li>
</ul>
</div>
</div>
</div>
<div class="section">
<h3>Code Examples</h3>
<p>Examples of how to use services in workflow actions or methods:</p>
<pre><code># In workflow actions or methods
result = await self.services.&lt;service&gt;.&lt;method&gt;(parameters)
# Example: Using AI service
response = await self.services.ai.process(prompt="Analyze this document", documents=[...])
# Example: Using SharePoint service
files = await self.services.sharepoint.searchFiles(pathQuery="sites/my-site/documents")
# Example: Using generation service
document = self.services.generation.createDocument(format="pdf", content={...})</code></pre>
</div>
<div class="section">
<h3>Microservices Architecture Diagram</h3>
<div class="diagram-image-container">
<img src="doc_platform_04_microservice_architecture.jpg" alt="Microservices Architecture - Core Services, AI & Processing, Integration Services, and Security" class="diagram-image">
</div>
</div>
</div>
<div id="rbac" class="tab-content">
<h2>RBAC System</h2>
<div class="section">
<h3>Overview</h3>
<p class="lead">The Role-Based Access Control (RBAC) system provides <strong>complete UI configuration per tenant and user</strong>. It enables fine-grained control over data access, UI visibility, and resource availability.</p>
<div class="feature-grid">
<div class="feature-card">
<h4>Data Access</h4>
<p>Table and field-level permissions for database operations. Control who can read, create, update, or delete specific data.</p>
</div>
<div class="feature-card">
<h4>UI Access</h4>
<p>Component and feature visibility management. Configure exactly which UI elements each user or role can see.</p>
</div>
<div class="feature-card">
<h4>Resource Access</h4>
<p>System resource availability control. Manage access to AI models, actions, and other platform resources.</p>
</div>
</div>
</div>
<div class="section">
<h3>Access Levels: Opening Logic</h3>
<p class="lead">For DATA context, the system uses <strong>opening rights</strong> with four access levels. These levels determine what data a user can access:</p>
<div class="access-levels">
<div class="access-level">
<h4>none (n)</h4>
<p>No access - item is completely hidden/disabled</p>
</div>
<div class="access-level">
<h4>my (m)</h4>
<p>My records - only records created by the current user</p>
</div>
<div class="access-level">
<h4>group (g)</h4>
<p>Group records - records within the same mandate (group context)</p>
</div>
<div class="access-level">
<h4>all (a)</h4>
<p>All records - full access to all records in the mandate</p>
</div>
</div>
</div>
<div class="section">
<h3>View Logic: Open + Close</h3>
<p class="lead">The <code>view</code> attribute controls visibility and enablement. This is the fundamental on/off switch for all RBAC contexts:</p>
<ul>
<li><strong>view: true</strong> - Item is visible/enabled</li>
<li><strong>view: false</strong> - Item is hidden/disabled (regardless of other permissions)</li>
</ul>
<p><strong>Key Rule:</strong> Only objects with <code>view: true</code> are shown. This applies to:</p>
<ul>
<li><strong>DATA Context:</strong> Controls whether tables/fields are accessible</li>
<li><strong>UI Context:</strong> Controls whether UI elements are visible</li>
<li><strong>RESOURCE Context:</strong> Controls whether resources are available</li>
</ul>
</div>
<div class="section">
<h3>Rule Specificity & Hierarchy</h3>
<p class="lead">The RBAC system uses a cascading hierarchy where more specific rules override generic ones:</p>
<ol>
<li><strong>Generic Rules</strong> (<code>item = null</code>) - Apply to all items in context</li>
<li><strong>Specific Rules</strong> (<code>item = "table.field"</code> or <code>item = "ui.component.feature"</code>) - Override generic rules</li>
</ol>
<p><strong>Resolution Logic:</strong> Within a single role, the most specific rule wins. Across multiple roles, opening (union) logic applies - if ANY role enables something, it is enabled.</p>
</div>
<div class="section">
<h3>Opening Rights Principle</h3>
<p class="lead">For DATA context, read permission (R) is a prerequisite for create/update/delete operations (CUD). This ensures data integrity and proper access control:</p>
<ul>
<li>If Read = "n": No CUD operations allowed</li>
<li>If Read = "m": CUD operations limited to "m" or "n"</li>
<li>If Read = "g": CUD operations limited to "g", "m", or "n"</li>
<li>If Read = "a": CUD operations can be "a", "g", "m", or "n"</li>
</ul>
<p><strong>Key Rule:</strong> You can ONLY create/update/delete if you have read right.</p>
</div>
<div class="section">
<h3>Context Types</h3>
<p class="lead">RBAC rules apply to three different context types, each serving a specific purpose:</p>
<div class="context-grid">
<div class="context-card">
<h4>DATA</h4>
<p>Database tables and fields. Controls read/create/update/delete permissions.</p>
<p><strong>Example:</strong> <code>item: "UserInDB.email"</code></p>
</div>
<div class="context-card">
<h4>UI</h4>
<p>UI elements and features. Controls component visibility.</p>
<p><strong>Example:</strong> <code>item: "playground.voice.settings"</code></p>
</div>
<div class="context-card">
<h4>RESOURCE</h4>
<p>System resources (AI models, actions, etc.). Controls resource availability.</p>
<p><strong>Example:</strong> <code>item: "ai.model.anthropic"</code></p>
</div>
</div>
</div>
<div class="section">
<h3>RBAC System Diagram</h3>
<div class="diagram-image-container">
<img src="doc_platform_05_rbac_system.jpg" alt="RBAC System - Contexts, Access Levels, View Logic, and Rule Hierarchy" class="diagram-image">
</div>
</div>
</div>
<div id="ui" class="tab-content">
<h2>UI Architecture</h2>
<div class="section">
<h3>Playground UI</h3>
<p class="lead">The <strong>Playground</strong> serves as the main entry point and demonstration UI. It's built with React and provides a comprehensive interface for workflow interaction:</p>
<ul>
<li>Chat interface for workflow interaction</li>
<li>Workflow editor for configuration</li>
<li>Document management</li>
<li>Connection management</li>
<li>Voice input/output capabilities</li>
</ul>
</div>
<div class="section">
<h3>RBAC-Driven UI Configuration</h3>
<p class="lead">The UI is <strong>completely configurable via RBAC rules</strong>. This allows customers to configure exactly the UI they need for their use case:</p>
<ul>
<li>Per tenant configuration</li>
<li>Per user configuration</li>
<li>Component-level visibility control</li>
<li>Feature-level access control</li>
</ul>
<p>This allows customers to configure exactly the UI they need for their use case.</p>
</div>
<div class="section">
<h3>External UI Integration</h3>
<p class="lead">Additional UIs can be easily integrated via the REST API. All UI components communicate with the platform through the standardized REST API, ensuring consistent behavior and security:</p>
<ul>
<li><strong>Chatbots:</strong> Build custom chatbots using the workflow API</li>
<li><strong>Customer UIs:</strong> Create customer-specific interfaces in React, JavaScript, or other languages</li>
<li><strong>Mobile Apps:</strong> Integrate via REST API from mobile applications</li>
<li><strong>Third-Party Tools:</strong> Connect existing tools via webhooks and API</li>
</ul>
<p>All UI components communicate with the platform through the standardized REST API, ensuring consistent behavior and security.</p>
</div>
<div class="section">
<h3>Available UI Components</h3>
<p class="lead">The platform provides reusable UI components that can be configured via RBAC:</p>
<ul>
<li>Chat interface</li>
<li>Document viewer/editor</li>
<li>Workflow editor</li>
<li>Connection manager</li>
<li>Settings panels</li>
<li>Dashboard widgets</li>
</ul>
</div>
<div class="section">
<h3>UI Architecture Diagram</h3>
<div class="diagram-image-container">
<img src="doc_platform_06_ui_architecture.jpg" alt="UI Architecture - RBAC-Driven Configuration, UI Components, UI Layer, and REST API" class="diagram-image">
</div>
</div>
</div>
<div id="big-picture" class="tab-content">
<h2>Big Picture & Future Vision</h2>
<div class="section">
<h3>Vendor-Independent Platform</h3>
<div class="vision-card">
<h4>AI Model Independence</h4>
<p>PowerOn is designed as a <strong>vendor-independent platform</strong> regarding AI models:</p>
<ul>
<li>Support for multiple AI providers (OpenAI, Anthropic, Google, Azure, etc.)</li>
<li>Dynamic model selection based on task requirements</li>
<li>Easy addition of new AI providers</li>
<li>No vendor lock-in - customers can switch providers seamlessly</li>
</ul>
</div>
<div class="vision-card">
<h4>Connector Independence</h4>
<p>Universal connector architecture supporting all major platforms:</p>
<ul>
<li><strong>Microsoft:</strong> SharePoint, Outlook, Teams, OneDrive, Azure</li>
<li><strong>Google:</strong> Drive, Gmail, Workspace, Cloud</li>
<li><strong>Amazon:</strong> AWS services, S3, etc.</li>
<li><strong>Other:</strong> Jira, Slack, Salesforce, and many more</li>
</ul>
<p>Customers are not locked into a single vendor ecosystem.</p>
</div>
</div>
<div class="section">
<h3>Graphical Workflow Modeling</h3>
<div class="vision-card">
<h4>Visual Customer Journey Design</h4>
<p>Future capability to <strong>graphically model workflows</strong> for customer journeys:</p>
<ul>
<li>Drag-and-drop workflow editor</li>
<li>Visual representation of customer journeys</li>
<li>Easy workflow modification without coding</li>
<li>Template library for common workflows</li>
<li>Workflow versioning and testing</li>
</ul>
<p>This makes workflow creation accessible to business users, not just developers.</p>
</div>
</div>
<div class="section">
<h3>MCP Integration in Customer Copilot</h3>
<div class="vision-card">
<h4>Microsoft Copilot Plugin Architecture</h4>
<p>Integration of PowerOn actions as <strong>MCP (Model Context Protocol) plugins</strong> in the customer's Copilot:</p>
<ul>
<li><strong>Native Copilot Integration:</strong> PowerOn workflows accessible directly from Microsoft Copilot</li>
<li><strong>Action Library:</strong> All PowerOn actions available as Copilot plugins</li>
<li><strong>Seamless Experience:</strong> Customers use PowerOn capabilities without leaving Copilot</li>
<li><strong>Enterprise Workflows:</strong> Complex workflows triggered from simple Copilot conversations</li>
<li><strong>Data Privacy:</strong> All PowerOn privacy features work seamlessly in Copilot context</li>
</ul>
<p class="highlight-text">This enables customers to leverage PowerOn's powerful workflow capabilities directly from their familiar Copilot interface.</p>
</div>
</div>
<div class="section">
<h3>Platform Evolution</h3>
<div class="vision-grid">
<div class="vision-item">
<h4>Today</h4>
<ul>
<li>REST API-based workflows</li>
<li>Playground UI</li>
<li>Multiple AI providers</li>
<li>Standard connectors</li>
</ul>
</div>
<div class="vision-item">
<h4>Near Future</h4>
<ul>
<li>Graphical workflow editor</li>
<li>MCP Copilot integration</li>
<li>Enhanced pre-processing</li>
<li>Advanced AI selection</li>
</ul>
</div>
<div class="vision-item">
<h4>Future</h4>
<ul>
<li>AI-powered workflow generation</li>
<li>Multi-platform Copilot support</li>
<li>Edge computing expansion</li>
<li>Federated learning</li>
</ul>
</div>
</div>
</div>
<div class="section">
<h3>Big Picture & Future Vision Diagram</h3>
<div class="diagram-image-container">
<img src="doc_platform_07_big_picture_and_future_vision.jpg" alt="Big Picture & Future Vision - Platform Evolution from Today to Future" class="diagram-image">
</div>
</div>
</div>
<div id="integration" class="tab-content">
<h2>Integration Guide</h2>
<div class="section">
<h3>REST API</h3>
<p class="lead">The platform exposes a comprehensive REST API for all operations. This API serves as the primary integration point for external developers:</p>
<ul>
<li><strong>Workflow API:</strong> Create, execute, and manage workflows</li>
<li><strong>Document API:</strong> Upload, download, and process documents</li>
<li><strong>Connection API:</strong> Manage external connections (SharePoint, Outlook, etc.)</li>
<li><strong>RBAC API:</strong> Manage roles and permissions</li>
<li><strong>Options API:</strong> Dynamic options for UI components</li>
</ul>
</div>
<div class="section">
<h3>Building Blocks for Developers</h3>
<p class="lead">Developers can extend the platform by creating custom components in these areas:</p>
<div class="building-blocks">
<div class="block">
<h4>Workflow Methods</h4>
<p>Create custom workflow methods by extending <code>MethodBase</code> and registering actions.</p>
</div>
<div class="block">
<h4>Services</h4>
<p>Extend the services layer by creating new service modules following the existing pattern.</p>
</div>
<div class="block">
<h4>Connectors</h4>
<p>Build connectors for external systems (databases, APIs, services) using the connector interface.</p>
</div>
<div class="block">
<h4>UI Components</h4>
<p>Create React components that integrate with the REST API and respect RBAC rules.</p>
</div>
</div>
</div>
<div class="section">
<h3>Development Workflow</h3>
<p class="lead">Follow these steps to get started with platform development:</p>
<ol>
<li><strong>Understand the Architecture:</strong> Review this document and codebase structure</li>
<li><strong>Set Up Development Environment:</strong> Clone repository and configure local environment</li>
<li><strong>Choose Integration Point:</strong> Decide whether to extend workflows, services, or UI</li>
<li><strong>Follow Patterns:</strong> Use existing code as reference for consistent implementation</li>
<li><strong>Test with RBAC:</strong> Ensure your changes respect RBAC rules</li>
<li><strong>Document:</strong> Update documentation for your changes</li>
</ol>
</div>
<div class="section">
<h3>Key Integration Points</h3>
<p class="lead">Main directories where developers can add new functionality:</p>
<ul>
<li><code>gateway/modules/workflows/methods/</code> - Add new workflow methods</li>
<li><code>gateway/modules/services/</code> - Add new microservices</li>
<li><code>gateway/modules/connectors/</code> - Add new connectors</li>
<li><code>gateway/modules/routes/</code> - Add new API endpoints</li>
<li><code>gateway/modules/features/</code> - Add new features</li>
</ul>
</div>
</div>
</div>
<div class="footer">
<div class="container">
<p>&copy; 2025 PowerON. All rights reserved.</p>
<p>Platform Architecture Documentation v1.0</p>
</div>
</div>
<script>
function openTab(evt, tabName) {
var i, tabcontent, tablinks;
tabcontent = document.getElementsByClassName("tab-content");
for (i = 0; i < tabcontent.length; i++) {
tabcontent[i].classList.remove("active");
}
tablinks = document.getElementsByClassName("tab-button");
for (i = 0; i < tablinks.length; i++) {
tablinks[i].classList.remove("active");
}
document.getElementById(tabName).classList.add("active");
evt.currentTarget.classList.add("active");
}
</script>
</body>
</html>

View file

@ -0,0 +1,880 @@
<!DOCTYPE html>
<html lang="de">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>PowerOn Kunden und Nutzereferenzen</title>
<style>
/* PowerOn.swiss Stylesheet */
/* Tailwind CSS Custom Properties & Design Tokens */
:root {
/* Locale */
-webkit-locale: "de";
/* Tailwind Transform Properties */
--tw-border-spacing-x: 0;
--tw-border-spacing-y: 0;
--tw-translate-x: 0;
--tw-translate-y: 0;
--tw-rotate: 0;
--tw-skew-x: 0;
--tw-skew-y: 0;
--tw-scale-x: 1;
--tw-scale-y: 1;
--tw-pan-x: ;
--tw-pan-y: ;
--tw-pinch-zoom: ;
--tw-scroll-snap-strictness: proximity;
/* Tailwind Gradient Properties */
--tw-gradient-from-position: ;
--tw-gradient-via-position: ;
--tw-gradient-to-position: ;
/* Tailwind Typography Properties */
--tw-ordinal: ;
--tw-slashed-zero: ;
--tw-numeric-figure: ;
--tw-numeric-spacing: ;
--tw-numeric-fraction: ;
/* Tailwind Ring/Shadow Properties */
--tw-ring-inset: ;
--tw-ring-offset-width: 0px;
--tw-ring-offset-color: #fff;
--tw-ring-color: rgb(59 130 246 / .5);
--tw-ring-offset-shadow: 0 0 #0000;
--tw-ring-shadow: 0 0 #0000;
--tw-shadow: 0 0 #0000;
--tw-shadow-colored: 0 0 #0000;
/* Tailwind Filter Properties */
--tw-blur: ;
--tw-brightness: ;
--tw-contrast: ;
--tw-grayscale: ;
--tw-hue-rotate: ;
--tw-invert: ;
--tw-saturate: ;
--tw-sepia: ;
--tw-drop-shadow: ;
/* Tailwind Backdrop Filter Properties */
--tw-backdrop-blur: ;
--tw-backdrop-brightness: ;
--tw-backdrop-contrast: ;
--tw-backdrop-grayscale: ;
--tw-backdrop-hue-rotate: ;
--tw-backdrop-invert: ;
--tw-backdrop-opacity: ;
--tw-backdrop-saturate: ;
--tw-backdrop-sepia: ;
/* Tailwind Container Properties */
--tw-contain-size: ;
--tw-contain-layout: ;
--tw-contain-paint: ;
--tw-contain-style: ;
/* Design System Colors (HSL Format) */
/* Base Colors */
--background: 0 0% 100%;
--foreground: 222.2 84% 4.9%;
/* Card Colors */
--card: 0 0% 100%;
--card-foreground: 222.2 84% 4.9%;
/* Popover Colors */
--popover: 0 0% 100%;
--popover-foreground: 222.2 84% 4.9%;
/* Primary Colors (Red Brand Color) */
--primary: 0 84% 42%;
--primary-foreground: 0 0% 100%;
/* Secondary Colors */
--secondary: 210 40% 96.1%;
--secondary-foreground: 222.2 47.4% 11.2%;
/* Muted Colors */
--muted: 210 40% 96.1%;
--muted-foreground: 215.4 16.3% 46.9%;
/* Accent Colors */
--accent: 210 40% 96.1%;
--accent-foreground: 222.2 47.4% 11.2%;
/* Destructive Colors */
--destructive: 0 84% 42%;
--destructive-foreground: 210 40% 98%;
/* Custom Red Colors */
--red-primary: 0 84% 42%;
--red-primary-hover: 0 53% 23%;
--red-primary-light: 0 84% 60%;
--red-background-light: 0 84% 97%;
/* Border & Input Colors */
--border: 214.3 31.8% 91.4%;
--input: 214.3 31.8% 91.4%;
--ring: 222.2 84% 4.9%;
/* Tool/Brand Specific Colors */
--tool-dark: 0 0% 9.4%;
--tool-dark-light: 0 0% 16.5%;
--tool-dark-medium: 0 0% 12.2%;
--tool-beige: 43 12% 73.7%;
--tool-beige-light: 43 20% 80%;
--tool-beige-dark: 43 8% 67%;
--tool-orange: 9 90% 60.6%;
--tool-orange-light: 9 85% 65%;
--tool-orange-dark: 9 94% 53%;
/* Border Radius */
--radius: 0.5rem;
/* Sidebar Colors */
--sidebar-background: 0 0% 98%;
--sidebar-foreground: 240 5.3% 26.1%;
--sidebar-primary: 240 5.9% 10%;
--sidebar-primary-foreground: 0 0% 98%;
--sidebar-accent: 240 4.8% 95.9%;
--sidebar-accent-foreground: 240 5.9% 10%;
--sidebar-border: 220 13% 91%;
--sidebar-ring: 217.2 91.2% 59.8%;
}
/* Base Reset */
*,
*::before,
*::after {
box-sizing: border-box;
border-width: 0;
border-style: solid;
border-color: hsl(var(--border));
}
/* Body Base Styles */
body {
line-height: 1.5;
-webkit-text-size-adjust: 100%;
tab-size: 4;
font-family: 'DM Sans', sans-serif;
font-feature-settings: normal;
font-variation-settings: normal;
-webkit-tap-highlight-color: transparent;
background-color: hsl(var(--background));
color: hsl(var(--foreground));
}
/* Utility Classes für die Farben */
.bg-background { background-color: hsl(var(--background)); }
.bg-primary { background-color: hsl(var(--primary)); }
.bg-secondary { background-color: hsl(var(--secondary)); }
.bg-muted { background-color: hsl(var(--muted)); }
.bg-card { background-color: hsl(var(--card)); }
.text-foreground { color: hsl(var(--foreground)); }
.text-primary { color: hsl(var(--primary)); }
.text-primary-foreground { color: hsl(var(--primary-foreground)); }
.text-muted-foreground { color: hsl(var(--muted-foreground)); }
.border-border { border-color: hsl(var(--border)); }
/* Custom Red Button */
.btn-red-primary {
background-color: hsl(var(--red-primary));
color: hsl(var(--primary-foreground));
border-radius: var(--radius);
}
.btn-red-primary:hover {
background-color: hsl(var(--red-primary-hover));
}
/* Tool Colors */
.bg-tool-dark { background-color: hsl(var(--tool-dark)); }
.bg-tool-beige { background-color: hsl(var(--tool-beige)); }
.bg-tool-orange { background-color: hsl(var(--tool-orange)); }
/* Custom Layout Styles */
.container {
max-width: 1200px;
margin: 0 auto;
padding: 0 20px;
}
.header {
background: hsl(var(--red-primary));
color: hsl(var(--primary-foreground));
padding: 60px 0;
text-align: center;
}
.logo {
font-size: 2.5rem;
font-weight: 700;
margin-bottom: 20px;
}
.logo a {
color: hsl(var(--primary-foreground));
text-decoration: none;
}
.subtitle {
font-size: 1.2rem;
opacity: 0.9;
max-width: 600px;
margin: 0 auto;
}
.main-content {
padding: 60px 0;
}
.section {
margin-bottom: 50px;
background: hsl(var(--card));
border-radius: var(--radius);
padding: 40px;
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.05);
border: 1px solid hsl(var(--border));
}
.section h2 {
color: hsl(var(--foreground));
font-size: 2rem;
margin-bottom: 30px;
border-bottom: 3px solid hsl(var(--red-primary));
padding-bottom: 10px;
}
.section h3 {
color: hsl(var(--foreground));
font-size: 1.4rem;
margin: 30px 0 15px 0;
display: flex;
align-items: center;
}
.section h3::before {
content: "▶";
color: hsl(var(--red-primary));
margin-right: 10px;
font-size: 0.8rem;
}
.feature-grid {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(300px, 1fr));
gap: 30px;
margin-top: 30px;
}
.feature-card {
background: hsl(var(--muted));
border: 1px solid hsl(var(--border));
border-radius: var(--radius);
padding: 25px;
transition: transform 0.2s ease, box-shadow 0.2s ease;
}
.feature-card:hover {
transform: translateY(-2px);
box-shadow: 0 8px 25px rgba(0, 0, 0, 0.1);
}
.feature-card h3 {
color: hsl(var(--foreground));
margin-bottom: 15px;
font-size: 1.2rem;
}
.feature-card ul {
list-style: none;
padding: 0;
}
.feature-card li {
padding: 8px 0;
position: relative;
padding-left: 20px;
}
.feature-card li::before {
content: "✓";
color: hsl(var(--red-primary));
font-weight: bold;
position: absolute;
left: 0;
}
.use-case-card {
background: hsl(var(--card));
border: 2px solid hsl(var(--red-primary));
border-radius: var(--radius);
padding: 30px;
margin-bottom: 30px;
transition: transform 0.2s ease, box-shadow 0.2s ease;
}
.use-case-card:hover {
transform: translateY(-2px);
box-shadow: 0 8px 25px rgba(0, 0, 0, 0.1);
}
.use-case-title {
color: hsl(var(--red-primary));
font-size: 1.3rem;
font-weight: 700;
margin-bottom: 20px;
display: flex;
align-items: center;
}
.use-case-title::before {
content: attr(data-icon);
font-size: 1.5rem;
margin-right: 10px;
}
.use-case-content {
margin-bottom: 20px;
}
.use-case-content h4 {
color: hsl(var(--foreground));
font-size: 1.1rem;
font-weight: 600;
margin: 15px 0 8px 0;
}
.use-case-content p {
color: hsl(var(--muted-foreground));
margin-bottom: 10px;
line-height: 1.6;
}
.process-flow {
background: hsl(var(--red-background-light));
border: 1px solid hsl(var(--red-primary-light));
border-radius: var(--radius);
padding: 20px;
margin: 20px 0;
overflow-x: auto;
}
.process-flow-title {
color: hsl(var(--red-primary));
font-weight: 600;
margin-bottom: 15px;
font-size: 1rem;
}
.process-steps {
display: flex;
align-items: center;
gap: 10px;
flex-wrap: wrap;
}
.process-step {
background: hsl(var(--red-primary));
color: hsl(var(--primary-foreground));
padding: 8px 16px;
border-radius: 20px;
font-size: 0.9rem;
font-weight: 500;
white-space: nowrap;
transition: transform 0.2s ease;
}
.process-step:hover {
transform: scale(1.05);
}
.process-arrow {
color: hsl(var(--red-primary));
font-size: 1.2rem;
font-weight: bold;
}
.results {
background: hsl(var(--muted));
color: hsl(var(--foreground));
}
.results h2 {
color: hsl(var(--foreground));
border-bottom-color: hsl(var(--red-primary));
}
.results h3 {
color: hsl(var(--foreground));
}
.results .feature-card {
background: hsl(var(--card));
border-color: hsl(var(--border));
color: hsl(var(--foreground));
}
.results .feature-card h3 {
color: hsl(var(--foreground));
}
.results .feature-card li::before {
color: hsl(var(--red-primary));
}
.approach {
background: hsl(var(--red-background-light));
color: hsl(var(--foreground));
}
.approach h2 {
color: hsl(var(--foreground));
border-bottom-color: hsl(var(--red-primary));
}
.approach h3 {
color: hsl(var(--foreground));
}
.approach .feature-card {
background: hsl(var(--card));
border-color: hsl(var(--red-primary));
color: hsl(var(--foreground));
}
.approach .feature-card h3 {
color: hsl(var(--foreground));
}
.approach .feature-card li::before {
color: hsl(var(--red-primary));
}
.cta {
background: hsl(var(--muted));
color: hsl(var(--foreground));
}
.cta h2 {
color: hsl(var(--foreground));
border-bottom-color: hsl(var(--red-primary));
}
.cta h3 {
color: hsl(var(--foreground));
}
.cta .feature-card {
background: hsl(var(--card));
border-color: hsl(var(--border));
color: hsl(var(--foreground));
}
.cta .feature-card h3 {
color: hsl(var(--foreground));
}
.cta .feature-card li::before {
color: hsl(var(--red-primary));
}
.note {
background: hsl(var(--red-background-light));
border: 1px solid hsl(var(--red-primary-light));
border-radius: var(--radius);
padding: 20px;
margin-top: 30px;
color: hsl(var(--red-primary-hover));
}
.note::before {
content: " ";
font-weight: bold;
}
.footer {
background: hsl(var(--tool-dark));
color: hsl(var(--primary-foreground));
text-align: center;
padding: 40px 0;
}
.footer a {
color: hsl(var(--red-primary-light));
text-decoration: none;
}
.footer a:hover {
text-decoration: underline;
}
@media (max-width: 768px) {
.header {
padding: 40px 0;
}
.logo {
font-size: 2rem;
}
.section {
padding: 25px;
margin-bottom: 30px;
}
.feature-grid {
grid-template-columns: 1fr;
gap: 20px;
}
.process-steps {
flex-direction: column;
align-items: stretch;
}
.process-arrow {
transform: rotate(90deg);
}
}
</style>
</head>
<body>
<div class="header">
<div class="container">
<div class="logo">
<a href="https://poweron.swiss/">PowerOn</a>
</div>
<div class="subtitle">
Kunden und Nutzereferenzen (neutralisiert)
</div>
<div style="font-size: 0.9rem; margin-top: 10px; opacity: 0.8;">
Kurzüberblick über realisierte PowerOn-Leistungen ohne Kundennennungen
</div>
</div>
</div>
<div class="main-content">
<div class="container">
<div class="section">
<h2>Leistungsbausteine</h2>
<div class="feature-grid">
<div class="feature-card">
<h3>Impact Sessions</h3>
<ul>
<li>Orientierung für Entscheiderinnen und Entscheider</li>
<li>Klärung Nutzen, Risiken, nächste Schritte</li>
</ul>
</div>
<div class="feature-card">
<h3>Deep Dives & Academy-Module</h3>
<ul>
<li>Hands-on Training mit echten Business Cases</li>
<li>Transfer in konkrete Arbeitsabläufe</li>
</ul>
</div>
<div class="feature-card">
<h3>Workshops / Prototyping</h3>
<ul>
<li>Definition von Use Cases und KPI</li>
<li>Rapid Prototyping bis funktionsfähiges MVP</li>
</ul>
</div>
<div class="feature-card">
<h3>Transformation Labs</h3>
<ul>
<li>Begleitung bis Umsetzung und Go-Live</li>
<li>Skalierung und Betrieb</li>
</ul>
</div>
</div>
</div>
<div class="section">
<h2>Referenz-Use-Cases (ohne Kundendaten)</h2>
<div class="use-case-card">
<div class="use-case-title" data-icon="🔄">Prozessautomatisierung und KPI-Produkt</div>
<div class="use-case-content">
<h4>Kontext</h4>
<p>Hoher manueller Aufwand und intransparente Kosten in Spesen und Controlling bremsen das Tagesgeschäft</p>
<h4>Ziel</h4>
<p>Operative Kosten senken und Steuerungsfähigkeit erhöhen durch standardisierte, schnellere Freigaben</p>
<h4>Lösung</h4>
<p>EndtoEnd Workflow in PowerOn mit automatischer Belegerfassung, Prüfung und KPIAuswertung</p>
<h4>Ergebnis</h4>
<p>Kürzere Durchlaufzeiten und jederzeit transparente Kennzahlen</p>
</div>
<div class="process-flow">
<div class="process-flow-title">Prozessablauf:</div>
<div class="process-steps">
<div class="process-step">Beleg</div>
<div class="process-arrow"></div>
<div class="process-step">Erfassen</div>
<div class="process-arrow"></div>
<div class="process-step">Validieren</div>
<div class="process-arrow"></div>
<div class="process-step">Genehmigen</div>
<div class="process-arrow"></div>
<div class="process-step">Buchen</div>
<div class="process-arrow"></div>
<div class="process-step">KPIDashboard</div>
</div>
</div>
</div>
<div class="use-case-card">
<div class="use-case-title" data-icon="🧱">Enterprise-Features skalieren für bestehende Lösung</div>
<div class="use-case-content">
<h4>Kontext</h4>
<p>Wachsende Nutzerzahlen und steigende Anforderungen gefährden die wahrgenommene Servicequalität</p>
<h4>Ziel</h4>
<p>Verlässliche Skalierbarkeit sicherstellen und Kundenzufriedenheit schützen</p>
<h4>Lösung</h4>
<p>Rollen- und Berechtigungskonzept erweitern, Performance optimieren und Betriebsprozesse festigen</p>
<h4>Ergebnis</h4>
<p>Hohe Stabilität, schnellere Antwortzeiten und sicherer Betrieb</p>
</div>
<div class="process-flow">
<div class="process-flow-title">Prozessablauf:</div>
<div class="process-steps">
<div class="process-step">Users</div>
<div class="process-arrow"></div>
<div class="process-step">Auth/Rollen</div>
<div class="process-arrow"></div>
<div class="process-step">Services</div>
<div class="process-arrow"></div>
<div class="process-step">Queue/Jobs</div>
<div class="process-arrow"></div>
<div class="process-step">Monitoring</div>
<div class="process-arrow"></div>
<div class="process-step">SLO/SLA</div>
</div>
</div>
</div>
<div class="use-case-card">
<div class="use-case-title" data-icon="🧭">Management-Alignment und Entscheidvorbereitung</div>
<div class="use-case-content">
<h4>Kontext</h4>
<p>Strategische Weichenstellung für KI erfordert breite Abstützung und klare Investitionssicht</p>
<h4>Ziel</h4>
<p>Entscheidungssicherheit auf GLEbene schaffen und Investitionen fokussieren</p>
<h4>Lösung</h4>
<p>Kompakte ImpactSession mit Variantenvergleich und klarer Roadmap</p>
<h4>Ergebnis</h4>
<p>Verbindliche Entscheide zu Scope, Budget und Zeitplan</p>
</div>
<div class="process-flow">
<div class="process-flow-title">Prozessablauf:</div>
<div class="process-steps">
<div class="process-step">Ausgangslage</div>
<div class="process-arrow"></div>
<div class="process-step">Optionen</div>
<div class="process-arrow"></div>
<div class="process-step">Kosten/Nutzen</div>
<div class="process-arrow"></div>
<div class="process-step">Roadmap</div>
<div class="process-arrow"></div>
<div class="process-step">Entscheid (GL)</div>
</div>
</div>
</div>
<div class="use-case-card">
<div class="use-case-title" data-icon="🧩">TechWorkshops zu MultiAgentArchitektur</div>
<div class="use-case-content">
<h4>Kontext</h4>
<p>Unterschiedliche Vorgehensweisen und Standards verlangsamen Delivery und erschweren Skalierung</p>
<h4>Ziel</h4>
<p>Gemeinsame Spielregeln schaffen, um TimetoValue zu verkürzen und konsistente Qualität sicherzustellen</p>
<h4>Lösung</h4>
<p>Klare Architekturprinzipien, verbindliche Standards und kollaborative Working Agreements</p>
<h4>Ergebnis</h4>
<p>Einheitliche Regeln, eindeutige Verantwortlichkeiten und eine belastbare SprintRoadmap</p>
</div>
<div class="process-flow">
<div class="process-flow-title">Prozessablauf:</div>
<div class="process-steps">
<div class="process-step">Pain Points</div>
<div class="process-arrow"></div>
<div class="process-step">Prinzipien</div>
<div class="process-arrow"></div>
<div class="process-step">Standards</div>
<div class="process-arrow"></div>
<div class="process-step">Working Agreements</div>
<div class="process-arrow"></div>
<div class="process-step">SprintRoadmap</div>
</div>
</div>
</div>
<div class="use-case-card">
<div class="use-case-title" data-icon="📊">Data & Analytics Demo / Reporting</div>
<div class="use-case-content">
<h4>Kontext</h4>
<p>Entscheidungen werden mit Bauchgefühl statt mit einheitlichen Zahlen getroffen</p>
<h4>Ziel</h4>
<p>Entscheidungen im Fachbereich konsequent datenbasiert treffen</p>
<h4>Lösung</h4>
<p>Schlanke Datenaufbereitung mit PowerOnPipelines und Visualisierung im BITool</p>
<h4>Ergebnis</h4>
<p>Entscheidungsreife KPIs auf einen Blick</p>
</div>
<div class="process-flow">
<div class="process-flow-title">Prozessablauf:</div>
<div class="process-steps">
<div class="process-step">Datenquellen</div>
<div class="process-arrow"></div>
<div class="process-step">Bereinigen/Joinen</div>
<div class="process-arrow"></div>
<div class="process-step">KPIs berechnen</div>
<div class="process-arrow"></div>
<div class="process-step">Dashboard (BI)</div>
</div>
</div>
</div>
<div class="use-case-card">
<div class="use-case-title" data-icon="🛠️">CodeModernisierung und Analyse</div>
<div class="use-case-content">
<h4>Kontext</h4>
<p>Veraltete Codebasis bremst Releases, erhöht Betriebsrisiken und erschwert neue Features</p>
<h4>Ziel</h4>
<p>Risiken in LegacyCode reduzieren und Zukunftsfähigkeit herstellen</p>
<h4>Lösung</h4>
<p>Systematische CodeAnalyse mit klaren Migrationspfaden und schnellen Verbesserungen</p>
<h4>Ergebnis</h4>
<p>Priorisierte Massnahmen mit messbarem Risikoabbau</p>
</div>
<div class="process-flow">
<div class="process-flow-title">Prozessablauf:</div>
<div class="process-steps">
<div class="process-step">Systeme</div>
<div class="process-arrow"></div>
<div class="process-step">CodeAnalyse</div>
<div class="process-arrow"></div>
<div class="process-step">Risiken bewerten</div>
<div class="process-arrow"></div>
<div class="process-step">Migrationspfade</div>
<div class="process-arrow"></div>
<div class="process-step">Quick Wins</div>
<div class="process-arrow"></div>
<div class="process-step">Stabiler Release</div>
</div>
</div>
</div>
</div>
<div class="section results">
<h2>Typische Resultate</h2>
<div class="feature-grid">
<div class="feature-card">
<h3>Effizienzsteigerung</h3>
<ul>
<li>3070% Zeiteinsparung in Zielprozessen (je nach Ausgangslage)</li>
<li>Schnellere Entscheide dank standardisierten Artefakten und Dashboards</li>
</ul>
</div>
<div class="feature-card">
<h3>Risikoreduktion</h3>
<ul>
<li>Reduzierte Betriebsrisiken durch klare Architektur- und Qualitätsstandards</li>
<li>Höhere Akzeptanz durch Einbindung von Stakeholdern früh im Prozess</li>
</ul>
</div>
</div>
</div>
<div class="section approach">
<h2>Vorgehen (Kurz)</h2>
<div class="feature-grid">
<div class="feature-card">
<h3>1. Discovery</h3>
<ul>
<li>Ziele, IstProzess, Datenlage</li>
</ul>
</div>
<div class="feature-card">
<h3>2. Prototyp</h3>
<ul>
<li>Schlanker EndtoEndFlow mit messbarem Nutzen</li>
</ul>
</div>
<div class="feature-card">
<h3>3. Skalierung</h3>
<ul>
<li>Security, Performance, Betrieb</li>
</ul>
</div>
<div class="feature-card">
<h3>4. Transition</h3>
<ul>
<li>Übergabe oder Betrieb durch PowerOnTeam</li>
</ul>
</div>
</div>
</div>
<div class="section cta">
<h2>Gemeinsamer Start</h2>
<div class="feature-grid">
<div class="feature-card">
<h3>Vorbereitung</h3>
<ul>
<li>UseCase shortlist definieren</li>
<li>2h ImpactSession terminieren</li>
</ul>
</div>
<div class="feature-card">
<h3>Umsetzung</h3>
<ul>
<li>MVPScope und Erfolgskriterien festlegen</li>
<li>SprintPlanung starten</li>
</ul>
</div>
</div>
</div>
</div>
</div>
<div class="footer">
<div class="container">
<p>© 2025 <a href="https://poweron.swiss/">PowerOn</a> Intelligente Workflow-Plattform</p>
</div>
</div>
</body>
</html>

View file

@ -0,0 +1,57 @@
"""Generate tenant-dossier.pdf for neutralization demo. Run: python _generateTenantDossierPdf.py
Uses ReportLab so the PDF opens reliably in all viewers (stdlib-only PDFs are fragile).
"""
from pathlib import Path
from reportlab.lib.pagesizes import A4
from reportlab.pdfgen import canvas
def _main():
outPath = Path(__file__).resolve().parent / "tenant-dossier.pdf"
c = canvas.Canvas(str(outPath), pagesize=A4)
_, h = A4
margin = 72
y = h - margin
c.setFont("Helvetica-Bold", 13)
c.drawString(margin, y, "Tenant dossier (demo) - confidential")
y -= 22
c.setFont("Helvetica", 11)
lines = [
"Fictional demo data for neutralization testing.",
"",
"Tenant name: Hans Muster",
"Date of birth: 14.03.1982",
"Nationality: Swiss",
"",
"Residential address:",
"Bahnhofstrasse 1",
"8001 Zurich",
"Switzerland",
"",
"Email: hans.muster@example-mail.demo",
"Phone: +41 79 123 45 67",
"",
"Lease reference: LE-2024-88421",
"Monthly rent: CHF 2450.00",
"Deposit held: CHF 7350.00",
"",
"Employer: Demo Consulting AG, Limmatquai 78, 8001 Zurich",
"",
"Notes: Tenant requested balcony repair (ticket REQ-992).",
]
lineHeight = 14
for line in lines:
if y < margin and line:
c.showPage()
c.setFont("Helvetica", 11)
y = h - margin
c.drawString(margin, y, line)
y -= lineHeight
c.save()
print(f"Wrote {outPath}")
if __name__ == "__main__":
_main()

View file

@ -0,0 +1,74 @@
%PDF-1.3
%“Œ‹ž ReportLab Generated PDF document (opensource)
1 0 obj
<<
/F1 2 0 R /F2 3 0 R
>>
endobj
2 0 obj
<<
/BaseFont /Helvetica /Encoding /WinAnsiEncoding /Name /F1 /Subtype /Type1 /Type /Font
>>
endobj
3 0 obj
<<
/BaseFont /Helvetica-Bold /Encoding /WinAnsiEncoding /Name /F2 /Subtype /Type1 /Type /Font
>>
endobj
4 0 obj
<<
/Contents 8 0 R /MediaBox [ 0 0 595.2756 841.8898 ] /Parent 7 0 R /Resources <<
/Font 1 0 R /ProcSet [ /PDF /Text /ImageB /ImageC /ImageI ]
>> /Rotate 0 /Trans <<
>>
/Type /Page
>>
endobj
5 0 obj
<<
/PageMode /UseNone /Pages 7 0 R /Type /Catalog
>>
endobj
6 0 obj
<<
/Author (anonymous) /CreationDate (D:20260413002929+02'00') /Creator (anonymous) /Keywords () /ModDate (D:20260413002929+02'00') /Producer (ReportLab PDF Library - \(opensource\))
/Subject (unspecified) /Title (untitled) /Trapped /False
>>
endobj
7 0 obj
<<
/Count 1 /Kids [ 4 0 R ] /Type /Pages
>>
endobj
8 0 obj
<<
/Filter [ /ASCII85Decode /FlateDecode ] /Length 654
>>
stream
Gasam9lnc;&A@g>lnO(2=RrpscmGHAZie8p-5Y3=@t?5.P!"j*HK;Fi@]13b1HoLNhXc)p>lp^JaPgD8!#HB_>8&+nWYS,F`)(;Y<Lk/U.?Nb4Scn<JS30YZ'XG(Oo"<&;)IQU>@)>/R[H=Dq4)8esgGpgXQD3IM$H$"2L[$s#Dk8hf2E>G=!I\)qcAifY?5kL#lX:umL)C2t<$6-:MY6mu9k?#W%2[oR^VsI+.!d4gq#g2k1Vj8HiJIpNf:t7&r:FE<6naroO=f7-A\)mh3K+#;jO=Q5$Z^pXYXcahlq@-EPABR+A_HCPde%4"G)Q2m;h-`b6ENmFFmS1/_)fuc<nk^'7Nd.ZjQ)DX+b?hlicXDh:rg+(CE?=F9Jh2`Gf"K!30mVJj*_6)D.,+<>50.gZ!l8E@]BR[V=I5)R1mE7:'u=chT!!'f^Xe@:2KoYE13<lcbsh;6"Y1<fV1]0>Fj#R5slPDniWfK\<FuOQ"qgBfC(;L0I9t1Xb"J`(keS):7\>L\<E@#kcetHiE:7(*Ytq`N/PVk`NGPS<$a)n8\UEUO8UoBnDWCfD"o\<F$DDi=agk\F*6K4S<-O;FDdo1&LBP6[_hphXpf.)NqIR"9r[LsT9bl'oa`lu]DB/g$G)e?3sEoY""m)B"T~>endstream
endobj
xref
0 9
0000000000 65535 f
0000000061 00000 n
0000000102 00000 n
0000000209 00000 n
0000000321 00000 n
0000000524 00000 n
0000000592 00000 n
0000000853 00000 n
0000000912 00000 n
trailer
<<
/ID
[<fffce794bf59aca4604ad63204977686><fffce794bf59aca4604ad63204977686>]
% ReportLab generated PDF document -- digest (opensource)
/Info 6 0 R
/Root 5 0 R
/Size 9
>>
startxref
1656
%%EOF

Binary file not shown.

View file

@ -0,0 +1,49 @@
"""
Demo Configs Auto-Discovery Module
Scans this folder for Python files that contain subclasses of _BaseDemoConfig
and exposes them via _getAvailableDemoConfigs().
"""
import importlib
import inspect
import logging
import pkgutil
from typing import Dict
from modules.demoConfigs._baseDemoConfig import _BaseDemoConfig
logger = logging.getLogger(__name__)
_configCache: Dict[str, _BaseDemoConfig] = {}
def _getAvailableDemoConfigs() -> Dict[str, _BaseDemoConfig]:
"""Return a dict of code -> instance for every discovered demo config."""
if _configCache:
return _configCache
package = __name__
packagePath = __path__
for importer, moduleName, isPkg in pkgutil.iter_modules(packagePath):
if moduleName.startswith("_"):
continue
try:
module = importlib.import_module(f"{package}.{moduleName}")
for name, obj in inspect.getmembers(module, inspect.isclass):
if issubclass(obj, _BaseDemoConfig) and obj is not _BaseDemoConfig:
instance = obj()
if instance.code:
_configCache[instance.code] = instance
logger.info(f"Discovered demo config: {instance.code} ({instance.label})")
except Exception as e:
logger.warning(f"Failed to load demo config module '{moduleName}': {e}")
return _configCache
def _getDemoConfigByCode(code: str) -> _BaseDemoConfig | None:
"""Get a specific demo config by its code."""
configs = _getAvailableDemoConfigs()
return configs.get(code)

View file

@ -0,0 +1,38 @@
"""
Base class for demo configurations.
Each demo config file in this folder extends _BaseDemoConfig and provides
idempotent load() and remove() methods for setting up / tearing down
a complete demo environment (mandates, users, features, test data, etc.).
"""
import logging
from abc import ABC, abstractmethod
from typing import Dict, Any
logger = logging.getLogger(__name__)
class _BaseDemoConfig(ABC):
"""Abstract base for demo configurations."""
code: str = ""
label: str = ""
description: str = ""
@abstractmethod
def load(self, db) -> Dict[str, Any]:
"""Create all demo data (idempotent). Returns summary dict."""
raise NotImplementedError
@abstractmethod
def remove(self, db) -> Dict[str, Any]:
"""Remove all demo data. Returns summary dict."""
raise NotImplementedError
def toDict(self) -> Dict[str, Any]:
return {
"code": self.code,
"label": self.label,
"description": self.description,
}

View file

@ -0,0 +1,348 @@
"""
Investor Demo April 2026
Creates a complete demo environment with two mandates, one user,
and all feature instances needed for the investor live demo.
Mandates:
- HappyLife AG (happylife) workspace, trustee(RMA), graphEditor, chatbot, neutralization
- Alpina Treuhand AG (alpina) workspace, trustee(RMA), graphEditor, neutralization
User:
- Patrick Helvetia (p.motsch@poweron.swiss) SysAdmin, member of both mandates
"""
import json
import logging
import uuid
from typing import Dict, Any, Optional, List
from modules.demoConfigs._baseDemoConfig import _BaseDemoConfig
logger = logging.getLogger(__name__)
_DEMO_PREFIX = "demo-inv2026"
_MANDATE_HAPPYLIFE = {
"name": "happylife",
"label": "HappyLife AG",
}
_MANDATE_ALPINA = {
"name": "alpina-treuhand",
"label": "Alpina Treuhand AG",
}
_USER = {
"username": "patrick.helvetia",
"email": "p.motsch@poweron.swiss",
"fullName": "Patrick Helvetia",
"password": "patrick.helvetia",
"language": "en",
}
_FEATURES_HAPPYLIFE = ["workspace", "trustee", "graphicalEditor", "chatbot", "neutralization"]
_FEATURES_ALPINA = ["workspace", "trustee", "graphicalEditor", "neutralization"]
class InvestorDemo2026(_BaseDemoConfig):
code = "investor-demo-2026"
label = "Investor Demo April 2026"
description = (
"Two mandates (HappyLife AG + Alpina Treuhand AG), one SysAdmin user, "
"trustee with RMA, workspace, graph editor, chatbot, and neutralization."
)
# ------------------------------------------------------------------
# load
# ------------------------------------------------------------------
def load(self, db) -> Dict[str, Any]:
summary: Dict[str, Any] = {"created": [], "skipped": [], "errors": []}
try:
mandateIdHappy = self._ensureMandate(db, _MANDATE_HAPPYLIFE, summary)
mandateIdAlpina = self._ensureMandate(db, _MANDATE_ALPINA, summary)
userId = self._ensureUser(db, summary)
if mandateIdHappy:
self._ensureMembership(db, userId, mandateIdHappy, _MANDATE_HAPPYLIFE["label"], summary)
self._ensureFeatures(db, mandateIdHappy, _MANDATE_HAPPYLIFE["label"], _FEATURES_HAPPYLIFE, summary)
if mandateIdAlpina:
self._ensureMembership(db, userId, mandateIdAlpina, _MANDATE_ALPINA["label"], summary)
self._ensureFeatures(db, mandateIdAlpina, _MANDATE_ALPINA["label"], _FEATURES_ALPINA, summary)
self._ensureTrusteeRmaConfig(db, mandateIdHappy, _MANDATE_HAPPYLIFE["label"], summary)
self._ensureTrusteeRmaConfig(db, mandateIdAlpina, _MANDATE_ALPINA["label"], summary)
self._ensureNeutralizationConfig(db, mandateIdHappy, summary)
self._ensureNeutralizationConfig(db, mandateIdAlpina, summary)
self._ensureBilling(db, mandateIdHappy, _MANDATE_HAPPYLIFE["label"], summary)
self._ensureBilling(db, mandateIdAlpina, _MANDATE_ALPINA["label"], summary)
except Exception as e:
logger.error(f"Demo load failed: {e}", exc_info=True)
summary["errors"].append(str(e))
return summary
# ------------------------------------------------------------------
# remove
# ------------------------------------------------------------------
def remove(self, db) -> Dict[str, Any]:
summary: Dict[str, Any] = {"removed": [], "errors": []}
from modules.datamodels.datamodelUam import Mandate, UserInDB
from modules.datamodels.datamodelMembership import UserMandate
for mandateDef in [_MANDATE_HAPPYLIFE, _MANDATE_ALPINA]:
try:
existing = db.getRecordset(Mandate, recordFilter={"name": mandateDef["name"]})
for m in existing:
mid = m.get("id")
db.recordDelete(Mandate, mid)
summary["removed"].append(f"Mandate {mandateDef['label']} ({mid})")
logger.info(f"Removed mandate {mandateDef['label']} ({mid})")
except Exception as e:
summary["errors"].append(f"Remove mandate {mandateDef['label']}: {e}")
try:
existing = db.getRecordset(UserInDB, recordFilter={"username": _USER["username"]})
for u in existing:
uid = u.get("id")
memberships = db.getRecordset(UserMandate, recordFilter={"userId": uid})
for mem in memberships:
try:
db.recordDelete(UserMandate, mem.get("id"))
except Exception:
pass
db.recordDelete(UserInDB, uid)
summary["removed"].append(f"User {_USER['username']} ({uid})")
logger.info(f"Removed user {_USER['username']} ({uid})")
except Exception as e:
summary["errors"].append(f"Remove user: {e}")
self._removeLanguageSet(db, "es", summary)
return summary
# ------------------------------------------------------------------
# helpers
# ------------------------------------------------------------------
def _ensureMandate(self, db, mandateDef: Dict, summary: Dict) -> Optional[str]:
from modules.datamodels.datamodelUam import Mandate
from modules.interfaces.interfaceBootstrap import copySystemRolesToMandate
existing = db.getRecordset(Mandate, recordFilter={"name": mandateDef["name"]})
if existing:
mid = existing[0].get("id")
summary["skipped"].append(f"Mandate {mandateDef['label']} exists ({mid})")
return mid
mandate = Mandate(name=mandateDef["name"], label=mandateDef["label"], enabled=True)
created = db.recordCreate(Mandate, mandate)
mid = created.get("id")
logger.info(f"Created mandate {mandateDef['label']} ({mid})")
summary["created"].append(f"Mandate {mandateDef['label']}")
copySystemRolesToMandate(db, mid)
return mid
def _ensureUser(self, db, summary: Dict) -> Optional[str]:
from modules.datamodels.datamodelUam import UserInDB, AuthAuthority
from passlib.context import CryptContext
existing = db.getRecordset(UserInDB, recordFilter={"username": _USER["username"]})
if existing:
uid = existing[0].get("id")
summary["skipped"].append(f"User {_USER['username']} exists ({uid})")
return uid
pwdContext = CryptContext(schemes=["argon2"], deprecated="auto")
user = UserInDB(
username=_USER["username"],
email=_USER["email"],
fullName=_USER["fullName"],
enabled=True,
language=_USER["language"],
isSysAdmin=True,
authenticationAuthority=AuthAuthority.LOCAL,
hashedPassword=pwdContext.hash(_USER["password"]),
)
created = db.recordCreate(UserInDB, user)
uid = created.get("id")
logger.info(f"Created user {_USER['username']} ({uid})")
summary["created"].append(f"User {_USER['fullName']}")
return uid
def _ensureMembership(self, db, userId: str, mandateId: str, mandateLabel: str, summary: Dict):
from modules.datamodels.datamodelMembership import UserMandate, UserMandateRole, Role
existing = db.getRecordset(UserMandate, recordFilter={"userId": userId, "mandateId": mandateId})
if existing:
userMandateId = existing[0].get("id")
summary["skipped"].append(f"Membership {_USER['username']} -> {mandateLabel} exists")
else:
um = UserMandate(userId=userId, mandateId=mandateId, enabled=True)
created = db.recordCreate(UserMandate, um)
userMandateId = created.get("id")
summary["created"].append(f"Membership {_USER['username']} -> {mandateLabel}")
logger.info(f"Created membership {_USER['username']} -> {mandateLabel}")
adminRoles = db.getRecordset(Role, recordFilter={"mandateId": mandateId, "label": "admin"})
if adminRoles:
adminRoleId = adminRoles[0].get("id")
existingRole = db.getRecordset(UserMandateRole, recordFilter={"userMandateId": userMandateId, "roleId": adminRoleId})
if not existingRole:
umr = UserMandateRole(userMandateId=userMandateId, roleId=adminRoleId)
db.recordCreate(UserMandateRole, umr)
logger.info(f"Assigned admin role in {mandateLabel}")
def _ensureFeatures(self, db, mandateId: str, mandateLabel: str, featureCodes: List[str], summary: Dict):
from modules.interfaces.interfaceFeatures import getFeatureInterface
fi = getFeatureInterface(db)
existingInstances = fi.getFeatureInstances(mandateId)
existingCodes = {
(inst.featureCode if hasattr(inst, "featureCode") else inst.get("featureCode", ""))
for inst in existingInstances
}
for code in featureCodes:
if code in existingCodes:
summary["skipped"].append(f"Feature {code} in {mandateLabel} exists")
continue
try:
fi.createFeatureInstance(
featureCode=code,
mandateId=mandateId,
label=f"{code} ({mandateLabel})",
enabled=True,
copyTemplateRoles=True,
)
summary["created"].append(f"Feature {code} in {mandateLabel}")
logger.info(f"Created feature instance {code} in {mandateLabel}")
except Exception as e:
summary["errors"].append(f"Feature {code} in {mandateLabel}: {e}")
logger.error(f"Failed to create feature {code} in {mandateLabel}: {e}")
def _ensureTrusteeRmaConfig(self, db, mandateId: Optional[str], mandateLabel: str, summary: Dict):
if not mandateId:
return
from modules.datamodels.datamodelFeatures import FeatureInstance
from modules.features.trustee.datamodelFeatureTrustee import TrusteeAccountingConfig
from modules.shared.configuration import APP_CONFIG, encryptValue
instances = db.getRecordset(FeatureInstance, recordFilter={"mandateId": mandateId, "featureCode": "trustee"})
if not instances:
summary["skipped"].append(f"No trustee instance in {mandateLabel} for RMA config")
return
instanceId = instances[0].get("id")
existing = db.getRecordset(TrusteeAccountingConfig, recordFilter={"featureInstanceId": instanceId})
if existing:
summary["skipped"].append(f"RMA config for {mandateLabel} exists")
return
apiBaseUrl = APP_CONFIG.get("Demo_RMA_ApiBaseUrl", "")
clientName = APP_CONFIG.get("Demo_RMA_ClientName", "")
apiKey = APP_CONFIG.get("Demo_RMA_ApiKey", "")
if not apiBaseUrl or not apiKey:
summary["errors"].append(
f"RMA credentials missing in config.ini (Demo_RMA_ApiBaseUrl, Demo_RMA_ClientName, Demo_RMA_ApiKey) for {mandateLabel}"
)
return
plainConfig = {
"apiBaseUrl": apiBaseUrl,
"clientName": clientName,
"apiKey": apiKey,
}
configRecord = {
"id": str(uuid.uuid4()),
"featureInstanceId": instanceId,
"connectorType": "rma",
"displayLabel": "Run My Accounts",
"encryptedConfig": encryptValue(json.dumps(plainConfig), keyName="accountingConfig"),
"isActive": True,
"mandateId": mandateId,
}
db.recordCreate(TrusteeAccountingConfig, configRecord)
summary["created"].append(f"RMA accounting config for {mandateLabel}")
logger.info(f"Created RMA accounting config for {mandateLabel}")
def _ensureNeutralizationConfig(self, db, mandateId: Optional[str], summary: Dict):
if not mandateId:
return
from modules.datamodels.datamodelFeatures import FeatureInstance
instances = db.getRecordset(FeatureInstance, recordFilter={"mandateId": mandateId, "featureCode": "neutralization"})
if not instances:
return
instanceId = instances[0].get("id")
try:
from modules.features.neutralization.datamodelFeatureNeutralizer import DataNeutraliserConfig
existing = db.getRecordset(DataNeutraliserConfig, recordFilter={"featureInstanceId": instanceId})
if existing:
summary["skipped"].append(f"Neutralization config for mandate {mandateId} exists")
return
config = DataNeutraliserConfig(
featureInstanceId=instanceId,
mandateId=mandateId,
enabled=True,
scope="featureInstance",
)
db.recordCreate(DataNeutraliserConfig, config)
summary["created"].append(f"Neutralization config for mandate {mandateId}")
logger.info(f"Created neutralization config for mandate {mandateId}")
except Exception as e:
summary["errors"].append(f"Neutralization config: {e}")
def _ensureBilling(self, db, mandateId: Optional[str], mandateLabel: str, summary: Dict):
if not mandateId:
return
try:
from modules.interfaces.interfaceDbBilling import _getRootInterface
from modules.datamodels.datamodelBilling import BillingSettings
billingInterface = _getRootInterface()
existingSettings = billingInterface.getSettings(mandateId)
if existingSettings:
summary["skipped"].append(f"Billing for {mandateLabel} exists")
return
settings = BillingSettings(
mandateId=mandateId,
warningThresholdPercent=10.0,
notifyOnWarning=True,
)
billingInterface.db.recordCreate(BillingSettings, settings)
summary["created"].append(f"Billing settings for {mandateLabel}")
logger.info(f"Created billing settings for {mandateLabel}")
except Exception as e:
summary["errors"].append(f"Billing for {mandateLabel}: {e}")
def _removeLanguageSet(self, db, code: str, summary: Dict):
"""Remove a language set if it was created during demo (e.g. 'es' from UC4)."""
try:
from modules.datamodels.datamodelUiLanguage import UiLanguageSet
existing = db.getRecordset(UiLanguageSet, recordFilter={"id": code})
if existing:
db.recordDelete(UiLanguageSet, code)
summary["removed"].append(f"Language set '{code}'")
logger.info(f"Removed language set '{code}'")
except Exception as e:
logger.debug(f"Could not remove language set '{code}': {e}")

View file

@ -198,6 +198,11 @@ class AutoRun(PowerOnModel):
description="Workflow ID", description="Workflow ID",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True, "label": "Workflow-ID"}, json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True, "label": "Workflow-ID"},
) )
label: Optional[str] = Field(
default=None,
description="Human-readable run label, set at creation from workflow name or caller",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False, "label": "Bezeichnung"},
)
mandateId: Optional[str] = Field( mandateId: Optional[str] = Field(
default=None, default=None,
description="Mandate ID for cross-feature querying", description="Mandate ID for cross-feature querying",

View file

@ -265,12 +265,18 @@ class GraphicalEditorObjects:
# Workflow Runs # Workflow Runs
# ------------------------------------------------------------------------- # -------------------------------------------------------------------------
def createRun(self, workflowId: str, nodeOutputs: Dict = None, context: Dict = None) -> Dict[str, Any]: def createRun(self, workflowId: str, nodeOutputs: Dict = None, context: Dict = None, label: str = None) -> Dict[str, Any]:
"""Create a new workflow run.""" """Create a new workflow run.
*label* human-readable name persisted on the run. Callers should
pass the workflow label or a descriptive name; ``executeGraph`` fills
in a fallback when nothing is provided.
"""
ctx = context or {} ctx = context or {}
data = { data = {
"id": str(uuid.uuid4()), "id": str(uuid.uuid4()),
"workflowId": workflowId, "workflowId": workflowId,
"label": label,
"status": "running", "status": "running",
"nodeOutputs": _make_json_serializable(nodeOutputs or {}), "nodeOutputs": _make_json_serializable(nodeOutputs or {}),
"currentNodeId": None, "currentNodeId": None,

View file

@ -55,8 +55,8 @@ TRUSTEE_NODES = [
"label": "Dokumente verarbeiten", "label": "Dokumente verarbeiten",
"description": "TrusteeDocument + TrusteePosition aus Extraktionsergebnis erstellen.", "description": "TrusteeDocument + TrusteePosition aus Extraktionsergebnis erstellen.",
"parameters": [ "parameters": [
{"name": "documentList", "type": "string", "required": True, "frontendType": "text", {"name": "documentList", "type": "string", "required": False, "frontendType": "hidden",
"description": "Referenz auf Ergebnis"}, "description": "Automatisch via Wire-Verbindung befüllt"},
{"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden", {"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden",
"description": "Trustee Feature-Instanz-ID"}, "description": "Trustee Feature-Instanz-ID"},
], ],
@ -74,8 +74,8 @@ TRUSTEE_NODES = [
"label": "In Buchhaltung synchronisieren", "label": "In Buchhaltung synchronisieren",
"description": "Trustee-Positionen in Buchhaltungssystem übertragen.", "description": "Trustee-Positionen in Buchhaltungssystem übertragen.",
"parameters": [ "parameters": [
{"name": "documentList", "type": "string", "required": True, "frontendType": "text", {"name": "documentList", "type": "string", "required": False, "frontendType": "hidden",
"description": "Referenz auf Ergebnis"}, "description": "Automatisch via Wire-Verbindung befüllt"},
{"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden", {"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden",
"description": "Trustee Feature-Instanz-ID"}, "description": "Trustee Feature-Instanz-ID"},
], ],

View file

@ -194,6 +194,10 @@ async def post_execute(
getattr(context.user, "language", None) if context.user else None, getattr(context.user, "language", None) if context.user else None,
) )
_wfLabel = None
if workflow_for_envelope:
_wfLabel = workflow_for_envelope.get("label") if isinstance(workflow_for_envelope, dict) else getattr(workflow_for_envelope, "label", None)
ge_interface = getGraphicalEditorInterface(context.user, mandateId, instanceId) ge_interface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
result = await executeGraph( result = await executeGraph(
graph=graph, graph=graph,
@ -204,6 +208,7 @@ async def post_execute(
mandateId=mandateId, mandateId=mandateId,
automation2_interface=ge_interface, automation2_interface=ge_interface,
run_envelope=run_env, run_envelope=run_env,
label=_wfLabel,
) )
logger.info( logger.info(
"graphicalEditor execute result: success=%s error=%s nodeOutputs_keys=%s failedNode=%s paused=%s", "graphicalEditor execute result: success=%s error=%s nodeOutputs_keys=%s failedNode=%s paused=%s",

View file

@ -834,32 +834,43 @@ class ComponentObjects:
# File Utilities # File Utilities
def checkForDuplicateFile(self, fileHash: str, fileName: str) -> Optional[FileItem]: def checkForDuplicateFile(self, fileHash: str, fileName: str) -> Optional[FileItem]:
"""Checks if a file with the same hash AND fileName already exists for the current user. """Checks if a file with the same hash AND fileName already exists for the current user
**within the same scope** (mandateId + featureInstanceId).
Duplicate = same user (sysCreatedBy) + same fileHash + same fileName. Duplicate = same user + same fileHash + same fileName + same scope.
Same hash with different name is allowed (intentional copy by user). Same hash with different name is allowed (intentional copy by user).
Uses direct DB query (not RBAC) because files are isolated per user.
""" """
if not self.userId: if not self.userId:
return None return None
# Direct DB query: find files with matching hash + name + user recordFilter: dict = {
matchingFiles = self.db.getRecordset(
FileItem,
recordFilter={
"sysCreatedBy": self.userId, "sysCreatedBy": self.userId,
"fileHash": fileHash, "fileHash": fileHash,
"fileName": fileName "fileName": fileName,
} }
if self.featureInstanceId:
recordFilter["featureInstanceId"] = self.featureInstanceId
elif self.mandateId:
recordFilter["mandateId"] = self.mandateId
matchingFiles = self.db.getRecordset(
FileItem,
recordFilter=recordFilter,
) )
if not matchingFiles: if not matchingFiles:
return None return None
# Return first match
file = matchingFiles[0] file = matchingFiles[0]
fileId = file["id"]
fileDataExists = self.db.getRecordset(FileData, recordFilter={"id": fileId})
if not fileDataExists:
logger.warning(f"Duplicate FileItem {fileId} found but FileData missing — treating as new file")
return None
return FileItem( return FileItem(
id=file["id"], id=fileId,
mandateId=file.get("mandateId", ""), mandateId=file.get("mandateId", ""),
featureInstanceId=file.get("featureInstanceId", ""), featureInstanceId=file.get("featureInstanceId", ""),
fileName=file["fileName"], fileName=file["fileName"],

View file

@ -0,0 +1,86 @@
"""
Admin Demo Config API
Provides endpoints to list, load, and remove demo configurations.
SysAdmin-only access.
"""
import logging
from fastapi import APIRouter, Depends, HTTPException, Request, status
from modules.auth import limiter
from modules.auth.authentication import requireSysAdminRole
from modules.datamodels.datamodelUam import User
from modules.security.rootAccess import getRootDbAppConnector
logger = logging.getLogger(__name__)
router = APIRouter(
prefix="/api/admin/demo-config",
tags=["Admin Demo Config"],
)
@router.get("")
@limiter.limit("30/minute")
def listDemoConfigs(
request: Request,
currentUser: User = Depends(requireSysAdminRole),
) -> dict:
"""List all available demo configurations."""
from modules.demoConfigs import _getAvailableDemoConfigs
configs = _getAvailableDemoConfigs()
return {
"configs": [cfg.toDict() for cfg in configs.values()],
}
@router.post("/{code}/load")
@limiter.limit("5/minute")
def loadDemoConfig(
code: str,
request: Request,
currentUser: User = Depends(requireSysAdminRole),
) -> dict:
"""Load (create) a demo configuration. Idempotent."""
from modules.demoConfigs import _getDemoConfigByCode
config = _getDemoConfigByCode(code)
if not config:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Demo config '{code}' not found",
)
db = getRootDbAppConnector()
logger.info(f"Loading demo config '{code}' (user: {currentUser.username})")
summary = config.load(db)
logger.info(f"Demo config '{code}' loaded: {summary}")
return {"status": "ok", "code": code, "summary": summary}
@router.post("/{code}/remove")
@limiter.limit("5/minute")
def removeDemoConfig(
code: str,
request: Request,
currentUser: User = Depends(requireSysAdminRole),
) -> dict:
"""Remove all data created by a demo configuration."""
from modules.demoConfigs import _getDemoConfigByCode
config = _getDemoConfigByCode(code)
if not config:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Demo config '{code}' not found",
)
db = getRootDbAppConnector()
logger.info(f"Removing demo config '{code}' (user: {currentUser.username})")
summary = config.remove(db)
logger.info(f"Demo config '{code}' removed: {summary}")
return {"status": "ok", "code": code, "summary": summary}

View file

@ -8,11 +8,13 @@ with RBAC scoping: user sees own runs/workflows, mandate admin sees mandate
runs/workflows, sysadmin sees all. runs/workflows, sysadmin sees all.
""" """
import asyncio
import json import json
import logging import logging
import math import math
from typing import Optional, List from typing import Optional, List
from fastapi import APIRouter, Depends, Request, Query, Path, HTTPException from fastapi import APIRouter, Depends, Request, Query, Path, HTTPException
from fastapi.responses import StreamingResponse
from slowapi import Limiter from slowapi import Limiter
from slowapi.util import get_remote_address from slowapi.util import get_remote_address
@ -21,8 +23,6 @@ from modules.interfaces.interfaceDbApp import getRootInterface
from modules.connectors.connectorDbPostgre import DatabaseConnector from modules.connectors.connectorDbPostgre import DatabaseConnector
from modules.shared.configuration import APP_CONFIG from modules.shared.configuration import APP_CONFIG
from modules.datamodels.datamodelPagination import PaginationParams from modules.datamodels.datamodelPagination import PaginationParams
from modules.datamodels.datamodelFeatures import FeatureInstance
from modules.datamodels.datamodelUam import Mandate
from modules.features.graphicalEditor.datamodelFeatureGraphicalEditor import ( from modules.features.graphicalEditor.datamodelFeatureGraphicalEditor import (
AutoRun, AutoStepLog, AutoWorkflow, AutoTask, AutoRun, AutoStepLog, AutoWorkflow, AutoTask,
) )
@ -143,17 +143,6 @@ def _scopedWorkflowFilter(context: RequestContext) -> Optional[dict]:
return {"mandateId": "__impossible__"} return {"mandateId": "__impossible__"}
def _getManagementDb() -> DatabaseConnector:
"""Get connector to the management DB for Mandate/FeatureInstance lookups."""
return DatabaseConnector(
dbHost=APP_CONFIG.get("DB_HOST", "localhost"),
dbDatabase=APP_CONFIG.get("DB_NAME", "poweron_management"),
dbUser=APP_CONFIG.get("DB_USER"),
dbPassword=APP_CONFIG.get("DB_PASSWORD_SECRET") or APP_CONFIG.get("DB_PASSWORD"),
dbPort=int(APP_CONFIG.get("DB_PORT", 5432)),
userId=None,
)
@router.get("") @router.get("")
@limiter.limit("60/minute") @limiter.limit("60/minute")
@ -194,16 +183,52 @@ def get_workflow_runs(
total = result.get("totalItems", 0) if isinstance(result, dict) else result.totalItems total = result.get("totalItems", 0) if isinstance(result, dict) else result.totalItems
wfIds = list({r.get("workflowId") for r in pageRuns if r.get("workflowId")}) wfIds = list({r.get("workflowId") for r in pageRuns if r.get("workflowId")})
wfLabelMap = {} wfMap: dict = {}
if wfIds and db._ensureTableExists(AutoWorkflow): if wfIds and db._ensureTableExists(AutoWorkflow):
wfs = db.getRecordset(AutoWorkflow, recordFilter={"id": wfIds}) wfs = db.getRecordset(AutoWorkflow, recordFilter={"id": wfIds})
for wf in (wfs or []): for wf in (wfs or []):
wfLabelMap[wf.get("id")] = wf.get("label") or wf.get("id") wfMap[wf.get("id")] = wf
mandateIds = list({r.get("mandateId") for r in pageRuns if r.get("mandateId")})
instanceIds = list({
wfMap[r.get("workflowId")].get("featureInstanceId")
for r in pageRuns
if r.get("workflowId") in wfMap and wfMap[r.get("workflowId")].get("featureInstanceId")
})
mandateLabelMap: dict = {}
instanceLabelMap: dict = {}
try:
rootIface = getRootInterface()
if mandateIds:
mMap = rootIface.getMandatesByIds(mandateIds)
for mid, m in mMap.items():
mandateLabelMap[mid] = getattr(m, "label", None) or getattr(m, "name", mid) or mid
if instanceIds:
from modules.interfaces.interfaceFeatures import getFeatureInterface
featureIface = getFeatureInterface(rootIface.db)
for iid in instanceIds:
fi = featureIface.getFeatureInstance(iid)
if fi:
instanceLabelMap[iid] = fi.label or iid
except Exception as e:
logger.warning(f"Failed to enrich run labels: {e}")
runs = [] runs = []
for r in pageRuns: for r in pageRuns:
row = dict(r) row = dict(r)
row["workflowLabel"] = wfLabelMap.get(row.get("workflowId"), row.get("workflowId") or "") wfId = row.get("workflowId")
wf = wfMap.get(wfId, {})
row["workflowLabel"] = (
row.get("label")
or (wf.get("label") if isinstance(wf, dict) else None)
or wfId
or ""
)
row["mandateLabel"] = mandateLabelMap.get(row.get("mandateId"), row.get("mandateId") or "")
fiid = wf.get("featureInstanceId") if isinstance(wf, dict) else None
row["featureInstanceId"] = fiid
row["instanceLabel"] = instanceLabelMap.get(fiid, fiid or "")
runs.append(row) runs.append(row)
return {"runs": runs, "total": total, "limit": limit, "offset": offset} return {"runs": runs, "total": total, "limit": limit, "offset": offset}
@ -215,54 +240,73 @@ def get_workflow_metrics(
request: Request, request: Request,
context: RequestContext = Depends(getRequestContext), context: RequestContext = Depends(getRequestContext),
) -> dict: ) -> dict:
"""Aggregated metrics across all accessible workflow runs (SQL COUNT).""" """Aggregated metrics across all accessible workflow runs (SQL COUNT).
Uses the same RBAC scoping as the runs list and workflows list
so that metric cards always match the table data.
"""
db = _getDb() db = _getDb()
if not db._ensureTableExists(AutoRun):
return {"totalRuns": 0, "runsByStatus": {}, "totalTokens": 0, "totalCredits": 0}
baseFilter = _scopedRunFilter(context)
countPagination = PaginationParams(page=1, pageSize=1)
countResult = db.getRecordsetPaginated(AutoRun, pagination=countPagination, recordFilter=baseFilter)
totalRuns = countResult.get("totalItems", 0) if isinstance(countResult, dict) else countResult.totalItems
statusValues = db.getDistinctColumnValues(AutoRun, "status", recordFilter=baseFilter)
runsByStatus = {}
for sv in statusValues:
statusFilter = dict(baseFilter) if baseFilter else {}
statusFilter["status"] = sv
sr = db.getRecordsetPaginated(AutoRun, pagination=PaginationParams(page=1, pageSize=1), recordFilter=statusFilter)
runsByStatus[sv] = sr.get("totalItems", 0) if isinstance(sr, dict) else sr.totalItems
totalTokens = 0
totalCredits = 0.0
if totalRuns > 0 and totalRuns <= 10000:
allRuns = db.getRecordset(AutoRun, recordFilter=baseFilter, fieldFilter=["costTokens", "costCredits"]) or []
for r in allRuns:
totalTokens += r.get("costTokens", 0) or 0
totalCredits += r.get("costCredits", 0.0) or 0.0
# --- Workflow counts (same filter as /workflows endpoint) ---
workflowCount = 0 workflowCount = 0
activeWorkflows = 0 activeWorkflows = 0
if db._ensureTableExists(AutoWorkflow): if db._ensureTableExists(AutoWorkflow):
wfFilter: dict = {"isTemplate": False} wfBaseFilter = _scopedWorkflowFilter(context)
if not context.hasSysAdminRole: wfFilter = dict(wfBaseFilter) if wfBaseFilter else {}
userId = str(context.user.id) if context.user else None wfFilter["isTemplate"] = False
mandateIds = _getUserMandateIds(userId) if userId else []
if mandateIds:
wfFilter["mandateId"] = mandateIds
else:
wfFilter["mandateId"] = "__impossible__"
wfCount = db.getRecordsetPaginated(AutoWorkflow, pagination=PaginationParams(page=1, pageSize=1), recordFilter=wfFilter) wfCount = db.getRecordsetPaginated(
AutoWorkflow, pagination=PaginationParams(page=1, pageSize=1),
recordFilter=wfFilter if wfFilter else None,
)
workflowCount = wfCount.get("totalItems", 0) if isinstance(wfCount, dict) else wfCount.totalItems workflowCount = wfCount.get("totalItems", 0) if isinstance(wfCount, dict) else wfCount.totalItems
activeFilter = dict(wfFilter) activeFilter = dict(wfFilter)
activeFilter["active"] = True activeFilter["active"] = True
activeCount = db.getRecordsetPaginated(AutoWorkflow, pagination=PaginationParams(page=1, pageSize=1), recordFilter=activeFilter) activeCount = db.getRecordsetPaginated(
AutoWorkflow, pagination=PaginationParams(page=1, pageSize=1),
recordFilter=activeFilter,
)
activeWorkflows = activeCount.get("totalItems", 0) if isinstance(activeCount, dict) else activeCount.totalItems activeWorkflows = activeCount.get("totalItems", 0) if isinstance(activeCount, dict) else activeCount.totalItems
# --- Run counts (same filter as /runs endpoint) ---
if not db._ensureTableExists(AutoRun):
return {
"totalRuns": 0, "runsByStatus": {}, "totalTokens": 0,
"totalCredits": 0, "workflowCount": workflowCount,
"activeWorkflows": activeWorkflows,
}
runBaseFilter = _scopedRunFilter(context)
countResult = db.getRecordsetPaginated(
AutoRun, pagination=PaginationParams(page=1, pageSize=1),
recordFilter=runBaseFilter,
)
totalRuns = countResult.get("totalItems", 0) if isinstance(countResult, dict) else countResult.totalItems
runsByStatus: dict = {}
try:
statusValues = db.getDistinctColumnValues(AutoRun, "status", recordFilter=runBaseFilter)
for sv in (statusValues or []):
statusFilter = dict(runBaseFilter) if runBaseFilter else {}
statusFilter["status"] = sv
sr = db.getRecordsetPaginated(
AutoRun, pagination=PaginationParams(page=1, pageSize=1),
recordFilter=statusFilter,
)
runsByStatus[sv] = sr.get("totalItems", 0) if isinstance(sr, dict) else sr.totalItems
except Exception as e:
logger.warning(f"Failed to compute runsByStatus: {e}")
totalTokens = 0
totalCredits = 0.0
if 0 < totalRuns <= 10000:
allRuns = db.getRecordset(AutoRun, recordFilter=runBaseFilter, fieldFilter=["costTokens", "costCredits"]) or []
for r in allRuns:
totalTokens += r.get("costTokens", 0) or 0
totalCredits += r.get("costCredits", 0.0) or 0.0
return { return {
"totalRuns": totalRuns, "totalRuns": totalRuns,
"runsByStatus": runsByStatus, "runsByStatus": runsByStatus,
@ -367,17 +411,18 @@ def get_system_workflows(
mandateLabelMap: dict = {} mandateLabelMap: dict = {}
instanceLabelMap: dict = {} instanceLabelMap: dict = {}
try: try:
mgmtDb = _getManagementDb() rootIface = getRootInterface()
if mandateIds and mgmtDb._ensureTableExists(Mandate): if mandateIds:
mandates = mgmtDb.getRecordset(Mandate, recordFilter={"id": mandateIds}) mandateMap = rootIface.getMandatesByIds(mandateIds)
for m in (mandates or []): for mid, m in mandateMap.items():
row = dict(m) mandateLabelMap[mid] = getattr(m, "label", None) or getattr(m, "name", mid) or mid
mandateLabelMap[row.get("id")] = row.get("label") or row.get("name") or row.get("id") if instanceIds:
if instanceIds and mgmtDb._ensureTableExists(FeatureInstance): from modules.interfaces.interfaceFeatures import getFeatureInterface
instances = mgmtDb.getRecordset(FeatureInstance, recordFilter={"id": instanceIds}) featureIface = getFeatureInterface(rootIface.db)
for fi in (instances or []): for iid in instanceIds:
row = dict(fi) fi = featureIface.getFeatureInstance(iid)
instanceLabelMap[row.get("id")] = row.get("label") or row.get("id") if fi:
instanceLabelMap[iid] = fi.label or iid
except Exception as e: except Exception as e:
logger.warning(f"Failed to enrich workflow labels: {e}") logger.warning(f"Failed to enrich workflow labels: {e}")
@ -387,12 +432,37 @@ def get_system_workflows(
userMandateIds = _getUserMandateIds(userId) userMandateIds = _getUserMandateIds(userId)
adminMandateIds = _getAdminMandateIds(userId, userMandateIds) adminMandateIds = _getAdminMandateIds(userId, userMandateIds)
workflowIds = [w.get("id") for w in pageItems if w.get("id")]
activeRunMap: dict = {}
runCountMap: dict = {}
lastStartedMap: dict = {}
if workflowIds:
try:
if db._ensureTableExists(AutoRun):
for wfId in workflowIds:
runs = db.getRecordset(AutoRun, recordFilter={"workflowId": wfId})
runCountMap[wfId] = len(runs)
for r in runs:
rDict = dict(r)
ts = rDict.get("sysCreatedAt")
if ts and (lastStartedMap.get(wfId) is None or ts > lastStartedMap.get(wfId)):
lastStartedMap[wfId] = ts
if rDict.get("status") in ("running", "paused"):
activeRunMap[wfId] = rDict.get("id")
except Exception as e:
logger.warning(f"Failed to enrich workflow run info: {e}")
items = [] items = []
for w in pageItems: for w in pageItems:
row = dict(w) row = dict(w)
wMandateId = row.get("mandateId") wMandateId = row.get("mandateId")
wfId = row.get("id")
row["mandateLabel"] = mandateLabelMap.get(wMandateId, wMandateId or "") row["mandateLabel"] = mandateLabelMap.get(wMandateId, wMandateId or "")
row["instanceLabel"] = instanceLabelMap.get(row.get("featureInstanceId"), row.get("featureInstanceId") or "") row["instanceLabel"] = instanceLabelMap.get(row.get("featureInstanceId"), row.get("featureInstanceId") or "")
row["isRunning"] = wfId in activeRunMap
row["activeRunId"] = activeRunMap.get(wfId)
row["runCount"] = runCountMap.get(wfId, 0)
row["lastStartedAt"] = lastStartedMap.get(wfId)
if context.hasSysAdminRole: if context.hasSysAdminRole:
row["canEdit"] = True row["canEdit"] = True
@ -420,3 +490,110 @@ def get_system_workflows(
"totalPages": totalPages, "totalPages": totalPages,
}, },
} }
# ---------------------------------------------------------------------------
# SSE stream for live run tracing (system-level, no instanceId required)
# ---------------------------------------------------------------------------
@router.get("/{runId}/stream")
async def get_run_stream(
request: Request,
runId: str = Path(..., description="Run ID"),
context: RequestContext = Depends(getRequestContext),
):
"""SSE stream for live step-log updates during a workflow run (system-level)."""
db = _getDb()
if not db._ensureTableExists(AutoRun):
raise HTTPException(status_code=404, detail=routeApiMsg("Run not found"))
runs = db.getRecordset(AutoRun, recordFilter={"id": runId})
if not runs:
raise HTTPException(status_code=404, detail=routeApiMsg("Run not found"))
run = dict(runs[0])
if not context.hasSysAdminRole:
userId = str(context.user.id) if context.user else None
runOwner = run.get("ownerId")
runMandate = run.get("mandateId")
if runOwner == userId:
pass
elif runMandate and userId and _isUserMandateAdmin(userId, runMandate):
pass
else:
raise HTTPException(status_code=403, detail=routeApiMsg("Access denied"))
from modules.serviceCenter.core.serviceStreaming.eventManager import get_event_manager
sseEventManager = get_event_manager()
queueId = f"run-trace-{runId}"
sseEventManager.create_queue(queueId)
async def _sseGenerator():
queue = sseEventManager.get_queue(queueId)
if not queue:
return
while True:
try:
event = await asyncio.wait_for(queue.get(), timeout=30)
except asyncio.TimeoutError:
yield "data: {\"type\": \"keepalive\"}\n\n"
continue
if event is None:
break
payload = event.get("data", event) if isinstance(event, dict) else event
yield f"data: {json.dumps(payload, default=str)}\n\n"
eventType = payload.get("type", "") if isinstance(payload, dict) else ""
if eventType in ("run_complete", "run_failed"):
break
await sseEventManager.cleanup(queueId, delay=10)
return StreamingResponse(
_sseGenerator(),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"X-Accel-Buffering": "no",
},
)
@router.post("/{runId}/stop")
@limiter.limit("30/minute")
def stop_workflow_run(
request: Request,
runId: str = Path(..., description="Run ID"),
context: RequestContext = Depends(getRequestContext),
):
"""Stop a running workflow execution (system-level)."""
db = _getDb()
if not db._ensureTableExists(AutoRun):
raise HTTPException(status_code=404, detail=routeApiMsg("Run not found"))
runs = db.getRecordset(AutoRun, recordFilter={"id": runId})
if not runs:
raise HTTPException(status_code=404, detail=routeApiMsg("Run not found"))
run = dict(runs[0])
if not context.hasSysAdminRole:
userId = str(context.user.id) if context.user else None
runOwner = run.get("ownerId")
runMandate = run.get("mandateId")
if runOwner == userId:
pass
elif runMandate and userId and _isUserMandateAdmin(userId, runMandate):
pass
else:
raise HTTPException(status_code=403, detail=routeApiMsg("Access denied"))
from modules.workflows.automation2.executionEngine import requestRunStop
flagged = requestRunStop(runId)
if not flagged:
currentStatus = run.get("status", "")
if currentStatus in ("completed", "failed", "stopped"):
return {"status": currentStatus, "runId": runId, "message": "Run already finished"}
db.recordModify(AutoRun, runId, {"status": "stopped"})
return {"status": "stopped", "runId": runId, "message": "Run not active in memory, marked as stopped"}
return {"status": "stopping", "runId": runId, "message": "Stop signal sent"}

View file

@ -159,8 +159,8 @@ async def runAgentLoop(
if getExternalMemoryKeysFn: if getExternalMemoryKeysFn:
try: try:
memKeys = getExternalMemoryKeysFn() memKeys = getExternalMemoryKeysFn()
except Exception: except Exception as e:
pass logger.warning(f"getExternalMemoryKeysFn failed: {e}")
await conversation.summarize( await conversation.summarize(
state.currentRound, _summarizeCall, externalMemoryKeys=memKeys or None state.currentRound, _summarizeCall, externalMemoryKeys=memKeys or None
) )

View file

@ -170,8 +170,8 @@ def _registerDataSourceTools(registry: ToolRegistry, services):
if "." in entry.name: if "." in entry.name:
fileName = entry.name fileName = entry.name
break break
except Exception: except Exception as e:
pass logger.warning(f"downloadFromDataSource: browse for filename failed: {e}")
if "." not in fileName: if "." not in fileName:
if fileBytes[:4] == b"%PDF": if fileBytes[:4] == b"%PDF":
fileName = f"{fileName}.pdf" fileName = f"{fileName}.pdf"

View file

@ -352,8 +352,8 @@ def _registerDocumentTools(registry: ToolRegistry, services):
mimeType = "image/gif" mimeType = "image/gif"
elif rawHead[:4] == b"RIFF" and rawHead[8:12] == b"WEBP": elif rawHead[:4] == b"RIFF" and rawHead[8:12] == b"WEBP":
mimeType = "image/webp" mimeType = "image/webp"
except Exception: except Exception as e:
pass logger.warning(f"describeImage: MIME detection from base64 header failed for {fileId}: {e}")
dataUrl = f"data:{mimeType};base64,{imageData}" dataUrl = f"data:{mimeType};base64,{imageData}"
from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum as OTE from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum as OTE
@ -367,8 +367,8 @@ def _registerDocumentTools(registry: ToolRegistry, services):
if bool(_fGet("neutralize", False)): if bool(_fGet("neutralize", False)):
_opType = OTE.NEUTRALIZATION_IMAGE _opType = OTE.NEUTRALIZATION_IMAGE
logger.info(f"describeImage: file {fileId} has neutralize=True, using NEUTRALIZATION_IMAGE (internal models only)") logger.info(f"describeImage: file {fileId} has neutralize=True, using NEUTRALIZATION_IMAGE (internal models only)")
except Exception: except Exception as e:
pass logger.warning(f"describeImage: neutralize flag check failed for {fileId}: {e}")
visionRequest = AiCallRequest( visionRequest = AiCallRequest(
prompt=prompt, prompt=prompt,

View file

@ -31,8 +31,8 @@ def _getOrCreateFeatureDbConnector(featureDbName: str, userId: str):
try: try:
if conn.connection and not conn.connection.closed: if conn.connection and not conn.connection.closed:
return conn return conn
except Exception: except Exception as e:
pass logger.warning(f"Feature DB connection check failed for {featureDbName}: {e}")
_featureDbConnPool.pop(featureDbName, None) _featureDbConnPool.pop(featureDbName, None)
from modules.connectors.connectorDbPostgre import DatabaseConnector from modules.connectors.connectorDbPostgre import DatabaseConnector

View file

@ -29,8 +29,8 @@ def _resolveFileScope(fileId: str, context: dict) -> tuple:
_get = (lambda k: fm.get(k, "")) if isinstance(fm, dict) else (lambda k: getattr(fm, k, "")) _get = (lambda k: fm.get(k, "")) if isinstance(fm, dict) else (lambda k: getattr(fm, k, ""))
fiId = fiId or str(_get("featureInstanceId") or "") fiId = fiId or str(_get("featureInstanceId") or "")
mId = mId or str(_get("mandateId") or "") mId = mId or str(_get("mandateId") or "")
except Exception: except Exception as e:
pass logger.warning(f"_resolveFileScope failed for fileId={fileId}: {e}")
return fiId, mId return fiId, mId

View file

@ -240,8 +240,8 @@ def _registerMediaTools(registry: ToolRegistry, services):
knowledgeService = None knowledgeService = None
try: try:
knowledgeService = services.getService("knowledge") knowledgeService = services.getService("knowledge")
except Exception: except Exception as e:
pass logger.warning(f"renderDocument: knowledge service unavailable: {e}")
resolvedImages = 0 resolvedImages = 0
for doc in structuredContent.get("documents", []): for doc in structuredContent.get("documents", []):
for section in doc.get("sections", []): for section in doc.get("sections", []):
@ -268,8 +268,8 @@ def _registerMediaTools(registry: ToolRegistry, services):
contentObj["base64Data"] = _b64.b64encode(rawBytes).decode("ascii") contentObj["base64Data"] = _b64.b64encode(rawBytes).decode("ascii")
contentObj["mimeType"] = "image/png" contentObj["mimeType"] = "image/png"
resolvedImages += 1 resolvedImages += 1
except Exception: except Exception as e:
pass logger.warning(f"renderDocument: image resolve failed for fileRef={fileRef}: {e}")
contentObj.pop("_fileRef", None) contentObj.pop("_fileRef", None)
contentObj.pop("_srcUrl", None) contentObj.pop("_srcUrl", None)

View file

@ -136,8 +136,8 @@ def _registerWorkspaceTools(registry: ToolRegistry, services):
featureInstanceId=_fiId, featureInstanceId=_fiId,
mandateId=_mId, mandateId=_mId,
) )
except Exception: except Exception as e:
pass logger.warning(f"readFile: knowledge indexing failed for {fileId}: {e}")
joined = "" joined = ""
if knowledgeService: if knowledgeService:
@ -190,8 +190,8 @@ def _registerWorkspaceTools(registry: ToolRegistry, services):
if _fRec: if _fRec:
_fG = (lambda k, d=None: _fRec.get(k, d)) if isinstance(_fRec, dict) else (lambda k, d=None: getattr(_fRec, k, d)) _fG = (lambda k, d=None: _fRec.get(k, d)) if isinstance(_fRec, dict) else (lambda k, d=None: getattr(_fRec, k, d))
_fileNeedNeutralize = bool(_fG("neutralize", False)) _fileNeedNeutralize = bool(_fG("neutralize", False))
except Exception: except Exception as e:
pass logger.warning(f"readFile: neutralize flag check failed for {fileId}: {e}")
if _fileNeedNeutralize: if _fileNeedNeutralize:
try: try:
_nSvc = services.getService("neutralization") if hasattr(services, "getService") else None _nSvc = services.getService("neutralization") if hasattr(services, "getService") else None
@ -606,8 +606,8 @@ def _registerWorkspaceTools(registry: ToolRegistry, services):
knowledgeService = services.getService("knowledge") knowledgeService = services.getService("knowledge")
if knowledgeService and hasattr(knowledgeService, "removeFile"): if knowledgeService and hasattr(knowledgeService, "removeFile"):
knowledgeService.removeFile(fileId) knowledgeService.removeFile(fileId)
except Exception: except Exception as e:
pass logger.warning(f"deleteFile: knowledge store cleanup failed for {fileId}: {e}")
chatService.interfaceDbComponent.deleteFile(fileId) chatService.interfaceDbComponent.deleteFile(fileId)
return ToolResult( return ToolResult(
toolCallId="", toolName="deleteFile", success=True, toolCallId="", toolName="deleteFile", success=True,

View file

@ -175,8 +175,8 @@ class AgentService:
try: try:
from modules.interfaces.interfaceDbKnowledge import getInterface as _getKnowledgeInterface from modules.interfaces.interfaceDbKnowledge import getInterface as _getKnowledgeInterface
knowledgeDb = _getKnowledgeInterface() knowledgeDb = _getKnowledgeInterface()
except Exception: except Exception as e:
pass logger.warning(f"Knowledge DB interface unavailable: {e}")
fileDescriptions = [] fileDescriptions = []
for fid in fileIds: for fid in fileIds:

View file

@ -149,6 +149,10 @@ class ProgressLogger:
# Parent operation never existed - log warning # Parent operation never existed - log warning
logger.debug(f"WARNING: Parent operation '{parentOperationId}' not found in activeOperations when creating log for '{operationId}'. Available operations: {list(self.activeOperations.keys())}. Child operation may appear at root level.") logger.debug(f"WARNING: Parent operation '{parentOperationId}' not found in activeOperations when creating log for '{operationId}'. Available operations: {list(self.activeOperations.keys())}. Child operation may appear at root level.")
wfId = getattr(workflow, 'id', None) or ""
if isinstance(wfId, str) and wfId.startswith("transient-"):
return None
# Get round number from workflow - include in operationId for unique per-round operations # Get round number from workflow - include in operationId for unique per-round operations
roundNumber = getattr(workflow, 'currentRound', None) or 1 roundNumber = getattr(workflow, 'currentRound', None) or 1

View file

@ -313,6 +313,16 @@ NAVIGATION_SECTIONS = [
"adminOnly": True, "adminOnly": True,
"sysAdminOnly": True, "sysAdminOnly": True,
}, },
{
"id": "admin-demo-config",
"objectKey": "ui.admin.demoConfig",
"label": t("Demo Config"),
"icon": "FaCubes",
"path": "/admin/demo-config",
"order": 100,
"adminOnly": True,
"sysAdminOnly": True,
},
], ],
}, },
], ],

View file

@ -34,6 +34,29 @@ logger = logging.getLogger(__name__)
_NODE_DEF_BY_ID: Dict[str, dict] = {} _NODE_DEF_BY_ID: Dict[str, dict] = {}
# Registry of currently executing runs: runId -> context dict.
# Used by requestRunStop() to set context["_stopped"] = True.
_activeRunContexts: Dict[str, Dict[str, Any]] = {}
def requestRunStop(runId: str) -> bool:
"""Request a running workflow to stop at the next node boundary.
Returns True if the run was found and flagged, False otherwise.
"""
ctx = _activeRunContexts.get(runId)
if ctx is not None:
ctx["_stopped"] = True
logger.info("requestRunStop: flagged runId=%s for stop", runId)
return True
logger.warning("requestRunStop: runId=%s not found in active runs", runId)
return False
def getActiveRunIds() -> list:
"""Return list of currently executing run IDs."""
return list(_activeRunContexts.keys())
def _getNodeDef(nodeType: str) -> Optional[dict]: def _getNodeDef(nodeType: str) -> Optional[dict]:
"""Lookup static node definition by type id (cached).""" """Lookup static node definition by type id (cached)."""
@ -78,8 +101,8 @@ def _normalizeResult(result: Any, nodeType: str) -> Any:
if schema and schema != "Transit" and isinstance(result, dict): if schema and schema != "Transit" and isinstance(result, dict):
try: try:
return _normalizeToSchema(result, schema) return _normalizeToSchema(result, schema)
except Exception: except Exception as e:
pass logger.warning(f"_normalizeResult failed for nodeType={nodeType}, schema={schema}: {e}")
return result return result
@ -163,8 +186,8 @@ def _emitStepEvent(runId: str, stepData: Dict[str, Any]) -> None:
loop = asyncio.get_event_loop() loop = asyncio.get_event_loop()
if loop.is_running(): if loop.is_running():
asyncio.ensure_future(em.emit_event(queueId, "step", stepData, event_category="tracing")) asyncio.ensure_future(em.emit_event(queueId, "step", stepData, event_category="tracing"))
except Exception: except Exception as e:
pass logger.warning(f"_emitStepEvent failed for runId={runId}: {e}")
def _createStepLog(iface, runId: str, nodeId: str, nodeType: str, status: str = "running", inputSnapshot: Dict = None) -> Optional[str]: def _createStepLog(iface, runId: str, nodeId: str, nodeType: str, status: str = "running", inputSnapshot: Dict = None) -> Optional[str]:
@ -270,6 +293,7 @@ async def executeGraph(
startAfterNodeId: Optional[str] = None, startAfterNodeId: Optional[str] = None,
runId: Optional[str] = None, runId: Optional[str] = None,
run_envelope: Optional[Dict[str, Any]] = None, run_envelope: Optional[Dict[str, Any]] = None,
label: Optional[str] = None,
) -> Dict[str, Any]: ) -> Dict[str, Any]:
""" """
Execute automation2 graph. Returns { success, nodeOutputs, error?, stopped? }. Execute automation2 graph. Returns { success, nodeOutputs, error?, stopped? }.
@ -336,13 +360,30 @@ async def executeGraph(
run_context["mandateId"] = mandateId run_context["mandateId"] = mandateId
if instanceId: if instanceId:
run_context["instanceId"] = instanceId run_context["instanceId"] = instanceId
run_label = label
if not run_label and automation2_interface and workflowId:
try:
wfObj = automation2_interface.getWorkflow(workflowId)
if wfObj:
wfDict = wfObj if isinstance(wfObj, dict) else (
wfObj.model_dump() if hasattr(wfObj, "model_dump") else {}
)
run_label = wfDict.get("label")
except Exception:
pass
if not run_label:
ts = datetime.now(timezone.utc).strftime("%d.%m.%Y %H:%M")
run_label = f"Manuell ({ts})"
run = automation2_interface.createRun( run = automation2_interface.createRun(
workflowId=workflowId, workflowId=workflowId,
nodeOutputs=nodeOutputs, nodeOutputs=nodeOutputs,
context=run_context, context=run_context,
label=run_label,
) )
runId = run.get("id") if run else None runId = run.get("id") if run else None
logger.info("executeGraph created run %s", runId) logger.info("executeGraph created run %s label=%s", runId, run_label)
env_for_run = normalize_run_envelope(run_envelope, user_id=userId) env_for_run = normalize_run_envelope(run_envelope, user_id=userId)
@ -362,6 +403,9 @@ async def executeGraph(
# _context key in nodeOutputs for system variable resolution # _context key in nodeOutputs for system variable resolution
nodeOutputs["_context"] = context nodeOutputs["_context"] = context
if runId:
_activeRunContexts[runId] = context
skip_until_passed = bool(startAfterNodeId) skip_until_passed = bool(startAfterNodeId)
processed_in_loop: Set[str] = set() processed_in_loop: Set[str] = set()
_aggregateAccumulators: Dict[str, list] = {} _aggregateAccumulators: Dict[str, list] = {}
@ -432,6 +476,8 @@ async def executeGraph(
nodeOutputs[bnid] = {"error": str(ex), "success": False} nodeOutputs[bnid] = {"error": str(ex), "success": False}
if runId and automation2_interface: if runId and automation2_interface:
automation2_interface.updateRun(runId, status="failed", nodeOutputs=_serializableOutputs(nodeOutputs)) automation2_interface.updateRun(runId, status="failed", nodeOutputs=_serializableOutputs(nodeOutputs))
if runId:
_activeRunContexts.pop(runId, None)
return {"success": False, "error": str(ex), "nodeOutputs": _serializableOutputs(nodeOutputs), "failedNode": bnid, "runId": runId} return {"success": False, "error": str(ex), "nodeOutputs": _serializableOutputs(nodeOutputs), "failedNode": bnid, "runId": runId}
next_index += 1 next_index += 1
if loop_node_id: if loop_node_id:
@ -544,6 +590,8 @@ async def executeGraph(
nodeOutputs[bnid] = {"error": str(ex), "success": False} nodeOutputs[bnid] = {"error": str(ex), "success": False}
if runId and automation2_interface: if runId and automation2_interface:
automation2_interface.updateRun(runId, status="failed", nodeOutputs=_serializableOutputs(nodeOutputs)) automation2_interface.updateRun(runId, status="failed", nodeOutputs=_serializableOutputs(nodeOutputs))
if runId:
_activeRunContexts.pop(runId, None)
return {"success": False, "error": str(ex), "nodeOutputs": _serializableOutputs(nodeOutputs), "failedNode": bnid, "runId": runId} return {"success": False, "error": str(ex), "nodeOutputs": _serializableOutputs(nodeOutputs), "failedNode": bnid, "runId": runId}
nodeOutputs[nodeId] = {"items": items, "count": len(items)} nodeOutputs[nodeId] = {"items": items, "count": len(items)}
# Finalize aggregate accumulators after loop # Finalize aggregate accumulators after loop
@ -666,8 +714,10 @@ async def executeGraph(
mandateId=mandateId, mandateId=mandateId,
workflowLabel=_wfDict.get("label"), workflowLabel=_wfDict.get("label"),
) )
except Exception: except Exception as notifyErr:
pass logger.warning(f"executeGraph: failure notification failed for run={runId}: {notifyErr}")
if runId:
_activeRunContexts.pop(runId, None)
return { return {
"success": False, "success": False,
"error": str(e), "error": str(e),
@ -677,19 +727,22 @@ async def executeGraph(
} }
_safeOutputs = _serializableOutputs(nodeOutputs) _safeOutputs = _serializableOutputs(nodeOutputs)
_wasStopped = context.get("_stopped", False)
_finalStatus = "stopped" if _wasStopped else "completed"
if runId and automation2_interface: if runId and automation2_interface:
automation2_interface.updateRun(runId, status="completed", nodeOutputs=_safeOutputs) automation2_interface.updateRun(runId, status=_finalStatus, nodeOutputs=_safeOutputs)
if runId: if runId:
_emitStepEvent(runId, {"type": "run_complete", "runId": runId, "status": "completed"}) _emitStepEvent(runId, {"type": "run_complete", "runId": runId, "status": _finalStatus})
_activeRunContexts.pop(runId, None)
logger.info( logger.info(
"executeGraph complete: success=True nodeOutputs_keys=%s stopped=%s", "executeGraph complete: success=True nodeOutputs_keys=%s stopped=%s",
list(nodeOutputs.keys()), list(nodeOutputs.keys()),
context.get("_stopped", False), _wasStopped,
) )
return { return {
"success": True, "success": True,
"nodeOutputs": _safeOutputs, "nodeOutputs": _safeOutputs,
"stopped": context.get("_stopped", False), "stopped": _wasStopped,
"runId": runId, "runId": runId,
} }

View file

@ -179,7 +179,8 @@ class DataExecutor:
if isinstance(item, dict): if isinstance(item, dict):
return bool(eval(condition, {"__builtins__": {}}, item)) return bool(eval(condition, {"__builtins__": {}}, item))
return bool(item) return bool(item)
except Exception: except Exception as e:
logger.warning(f"_evalFilterCondition eval failed for condition='{condition}': {e}")
return True return True
return True return True

View file

@ -179,7 +179,8 @@ class FlowExecutor:
if a is None or b is None: if a is None or b is None:
return False return False
return op(a, b) return op(a, b)
except Exception: except Exception as e:
logger.warning(f"_compare_dates failed: left={left}, right={right}: {e}")
return False return False
def _file_exists(self, val: Any) -> bool: def _file_exists(self, val: Any) -> bool:
@ -201,7 +202,8 @@ class FlowExecutor:
if isinstance(resolved, str): if isinstance(resolved, str):
try: try:
return bool(eval(resolved)) return bool(eval(resolved))
except Exception: except Exception as e:
logger.warning(f"_evalCondition eval failed for expression: {e}")
return bool(resolved) return bool(resolved)
return bool(resolved) return bool(resolved)

View file

@ -1,302 +0,0 @@
# Copyright (c) 2025 Patrick Motsch
"""
Automation2 schedule scheduler.
Starts/stops cron jobs for workflows with schedule entry points.
"""
import asyncio
import logging
from typing import Any, Dict, Optional
from modules.shared.eventManagement import eventManager
from modules.shared.i18nRegistry import resolveText
# Main loop reference for scheduling async work from job executor (may run in thread)
_main_loop = None
def set_main_loop(loop) -> None:
global _main_loop
_main_loop = loop
from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import (
getGraphicalEditorInterface as getAutomation2Interface,
getAllWorkflowsForScheduling,
)
from modules.features.graphicalEditor.mainGraphicalEditor import getGraphicalEditorServices as getAutomation2Services
from modules.features.graphicalEditor.entryPoints import find_invocation
from modules.workflows.automation2.scheduleCron import parse_cron_to_kwargs
def _cron_to_interval_seconds(cron: str):
"""
If cron represents a simple interval, return seconds. Otherwise None.
E.g. "* * * * *" -> 60, "*/15 * * * *" -> 900, "*/30 * * * * *" -> 30.
"""
if not cron or not isinstance(cron, str):
return None
parts = cron.strip().split()
if len(parts) == 5:
minute, hour, day, month, dow = parts
second = "0"
elif len(parts) == 6:
second, minute, hour, day, month, dow = parts
else:
return None
# Interval minutes: */N * * * *
if minute.startswith("*/") and hour == "*" and day == "*" and month == "*" and dow == "*":
n = int(minute[2:]) if minute[2:].isdigit() else 0
if n > 0:
return n * 60
# Every minute: * * * * *
if minute == "*" and hour == "*" and day == "*" and month == "*" and dow == "*" and second == "0":
return 60
# Interval hours: 0 */N * * *
if minute == "0" and hour.startswith("*/") and day == "*" and month == "*" and dow == "*":
n = int(hour[2:]) if hour[2:].isdigit() else 0
if n > 0:
return n * 3600
# Interval seconds: */N * * * * * (6-field)
if len(parts) == 6 and second.startswith("*/") and minute == "*" and hour == "*" and day == "*" and month == "*" and dow in ("*", "?"):
n = int(second[2:]) if second[2:].isdigit() else 0
if n > 0:
return n
return None
from modules.workflows.automation2.executionEngine import executeGraph
from modules.workflows.automation2.runEnvelope import default_run_envelope, normalize_run_envelope
logger = logging.getLogger(__name__)
JOB_ID_PREFIX = "automation2."
def _remove_all_automation2_schedule_jobs() -> None:
"""Remove all registered Automation2 schedule jobs from the scheduler."""
if not eventManager.scheduler:
return
for job in list(eventManager.scheduler.get_jobs()):
jid = job.id if hasattr(job, "id") else str(job)
if jid.startswith(JOB_ID_PREFIX):
try:
eventManager.remove(jid)
except Exception as e:
logger.debug("Could not remove job %s: %s", jid, e)
def sync_automation2_schedule_events(event_user) -> Dict[str, Any]:
"""
Sync scheduler with all active Automation2 workflows that have schedule entry points.
Registers cron jobs for each; removes jobs for workflows no longer in the list.
"""
if not event_user:
logger.warning("Automation2 schedule: No event user, skipping sync")
return {"synced": 0, "events": {}}
_remove_all_automation2_schedule_jobs()
items = getAllWorkflowsForScheduling()
registered = {}
logger.info(
"Automation2 schedule: found %d workflow(s) with trigger.schedule and cron",
len(items),
)
for item in items:
workflow_id = item.get("workflowId")
mandate_id = item.get("mandateId")
instance_id = item.get("featureInstanceId")
entry_point_id = item.get("entryPointId")
cron = item.get("cron")
workflow = item.get("workflow")
if not workflow_id or not instance_id or not cron:
continue
job_id = f"{JOB_ID_PREFIX}{workflow_id}"
async_handler = _create_schedule_handler(
workflow_id=workflow_id,
mandate_id=mandate_id,
instance_id=instance_id,
entry_point_id=entry_point_id,
workflow=workflow,
event_user=event_user,
)
# Sync wrapper: schedule async handler on main loop (job may run in executor thread)
def sync_wrapper():
loop = _main_loop
if loop and loop.is_running():
loop.call_soon_threadsafe(
lambda: asyncio.ensure_future(async_handler(), loop=loop)
)
else:
# Fallback: run inline if no loop (shouldn't happen)
try:
asyncio.run(async_handler())
except RuntimeError:
logger.warning("Automation2 schedule: could not run handler, no event loop")
# Use IntervalTrigger for "every N minutes" - more reliable than CronTrigger
interval_seconds = _cron_to_interval_seconds(cron)
if interval_seconds is not None:
eventManager.registerInterval(
jobId=job_id,
func=sync_wrapper,
seconds=interval_seconds,
replaceExisting=True,
)
else:
try:
cron_kwargs = parse_cron_to_kwargs(cron)
eventManager.registerCron(
jobId=job_id,
func=sync_wrapper,
cronKwargs=cron_kwargs,
replaceExisting=True,
)
except ValueError as e:
logger.warning("Workflow %s: invalid cron %r: %s", workflow_id, cron, e)
continue
registered[workflow_id] = job_id
mode = "interval" if interval_seconds is not None else "cron"
logger.info(
"Automation2 schedule: registered %s for workflow %s (%s=%s)",
job_id,
workflow_id,
mode,
interval_seconds if interval_seconds is not None else cron,
)
if not registered and items:
logger.warning("Automation2 schedule: workflows found but none registered (check cron format)")
elif not items:
logger.info("Automation2 schedule: no workflows with trigger.schedule+cron (save workflow after selecting Zeitplan)")
return {"synced": len(registered), "workflowsFound": len(items), "events": registered}
def _create_schedule_handler(
workflow_id: str,
mandate_id: str,
instance_id: str,
entry_point_id: str,
workflow: Dict[str, Any],
event_user,
):
"""Create async handler for scheduled workflow execution."""
async def handler():
logger.info("Automation2 schedule: CRON FIRED for workflow %s", workflow_id)
try:
if not event_user:
logger.error("Automation2 schedule: event user not available")
return
a2 = getAutomation2Interface(event_user, mandate_id, instance_id)
wf = a2.getWorkflow(workflow_id)
if not wf or not wf.get("graph"):
logger.warning("Automation2 schedule: workflow %s not found or no graph", workflow_id)
return
if not wf.get("active", True):
logger.info("Automation2 schedule: workflow %s inactive, skipping", workflow_id)
return
inv = find_invocation(wf, entry_point_id)
if inv and (inv.get("kind") != "schedule" or not inv.get("enabled", True)):
logger.info("Automation2 schedule: entry point %s disabled for workflow %s", entry_point_id, workflow_id)
return
# If inv not found but graph has trigger.schedule, proceed (invocations may not be synced)
services = getAutomation2Services(
event_user,
mandateId=mandate_id,
featureInstanceId=instance_id,
)
from modules.workflows.processing.shared.methodDiscovery import discoverMethods
discoverMethods(services)
title = (inv or {}).get("title") or {}
requestLang: Optional[str] = getattr(event_user, "language", None)
label = resolveText(title, requestLang) if title else ""
run_env = default_run_envelope(
"schedule",
entry_point_id=entry_point_id,
entry_point_label=label or None,
)
run_env = normalize_run_envelope(run_env, user_id=str(event_user.id) if event_user else None)
# userId=None so tasks are created unassigned and visible to all instance users
result = await executeGraph(
graph=wf["graph"],
services=services,
workflowId=workflow_id,
instanceId=instance_id,
userId=None,
mandateId=mandate_id,
automation2_interface=a2,
run_envelope=run_env,
)
logger.info(
"Automation2 schedule: executed workflow %s success=%s paused=%s",
workflow_id,
result.get("success"),
result.get("paused"),
)
except Exception as e:
logger.exception("Automation2 schedule: failed to execute workflow %s: %s", workflow_id, e)
return handler
def start(event_user) -> bool:
"""
Start Automation2 schedule scheduler and sync scheduled workflows.
Registers callback so schedule is re-synced when workflows are created/updated/deleted.
"""
if not event_user:
logger.warning("Automation2 schedule: No event user provided, skipping")
return True
try:
eventManager.start()
sync_automation2_schedule_events(event_user)
logger.info("Automation2 schedule: sync complete")
# Delayed sync (5s) in case DB was not ready at startup
def do_delayed_sync():
import threading
def _run():
import time
time.sleep(5)
try:
sync_automation2_schedule_events(event_user)
logger.info("Automation2 schedule: delayed sync done")
except Exception as e:
logger.warning("Automation2 schedule: delayed sync failed: %s", e)
t = threading.Thread(target=_run, daemon=True)
t.start()
do_delayed_sync()
def on_workflow_changed(_context=None):
try:
sync_automation2_schedule_events(event_user)
logger.debug("Automation2 schedule: re-synced after workflow change")
except Exception as e:
logger.warning("Automation2 schedule: re-sync failed: %s", e)
from modules.shared.callbackRegistry import callbackRegistry
callbackRegistry.register("automation2.workflow.changed", on_workflow_changed)
except Exception as e:
logger.error("Automation2 schedule: Failed to start: %s", e)
return False
return True
def stop(event_user) -> bool:
"""Stop Automation2 schedule scheduler (remove all schedule jobs)."""
try:
_remove_all_automation2_schedule_jobs()
logger.info("Automation2 schedule: all jobs removed")
except Exception as e:
logger.warning("Automation2 schedule: error during stop: %s", e)
return True

View file

@ -574,16 +574,14 @@ async def extractFromFiles(self, parameters: Dict[str, Any]) -> ActionResult:
raw = resultDoc.documentData raw = resultDoc.documentData
data = json.loads(raw) if isinstance(raw, str) else raw data = json.loads(raw) if isinstance(raw, str) else raw
hasError = "error" in data or not data.get("extractedData") hasError = "error" in data or not data.get("extractedData")
destSub = "error" if hasError else "processed" if hasError:
logger.info(f"Extraction failed for {moveInfo.get('fileName', '?')} — leaving file in place")
return
folderPath = (moveInfo.get("folderPath") or "").strip().rstrip("/") folderPath = (moveInfo.get("folderPath") or "").strip().rstrip("/")
destFolder = f"{folderPath}/{destSub}".strip("/") if folderPath else destSub destFolder = f"{folderPath}/processed".strip("/") if folderPath else "processed"
sourceFolder = folderPath or "" sourceFolder = folderPath or ""
fileName = moveInfo.get("fileName") or "file" fileName = moveInfo.get("fileName") or "file"
destFile = ( destFile = f"{datetime.now(timezone.utc).strftime('%Y%m%d_%H%M%S')}_{fileName}"
f"{datetime.now(timezone.utc).strftime('%Y%m%d_%H%M%S')}_{fileName}"
if not hasError
else fileName
)
await sharepoint.copyFileAsync( await sharepoint.copyFileAsync(
moveInfo["siteId"], sourceFolder, fileName, destFolder, destFile moveInfo["siteId"], sourceFolder, fileName, destFolder, destFile
) )

View file

@ -242,6 +242,8 @@ class WorkflowScheduler:
) )
runEnv = normalize_run_envelope(runEnv, user_id=str(eventUser.id) if eventUser else None) runEnv = normalize_run_envelope(runEnv, user_id=str(eventUser.id) if eventUser else None)
_wfLabel = wf.get("label") if isinstance(wf, dict) else getattr(wf, "label", None)
result = await executeGraph( result = await executeGraph(
graph=wf["graph"], graph=wf["graph"],
services=services, services=services,
@ -251,6 +253,7 @@ class WorkflowScheduler:
mandateId=mandateId, mandateId=mandateId,
automation2_interface=iface, automation2_interface=iface,
run_envelope=runEnv, run_envelope=runEnv,
label=_wfLabel,
) )
logger.info( logger.info(
"WorkflowScheduler: executed workflow %s success=%s paused=%s", "WorkflowScheduler: executed workflow %s success=%s paused=%s",