Merge pull request #131 from valueonag/feat/demo-system-readieness
Feat/demo system readieness
This commit is contained in:
commit
99b9dc97b7
36 changed files with 3790 additions and 75 deletions
12
app.py
12
app.py
|
|
@ -380,6 +380,15 @@ async def lifespan(app: FastAPI):
|
|||
from modules.shared.auditLogger import registerAuditLogCleanupScheduler
|
||||
registerAuditLogCleanupScheduler()
|
||||
|
||||
# Recover background jobs that were RUNNING when the previous worker died
|
||||
try:
|
||||
from modules.serviceCenter.services.serviceBackgroundJobs.mainBackgroundJobService import (
|
||||
recoverInterruptedJobs,
|
||||
)
|
||||
recoverInterruptedJobs()
|
||||
except Exception as e:
|
||||
logger.warning(f"BackgroundJob recovery failed (non-critical): {e}")
|
||||
|
||||
yield
|
||||
|
||||
# --- Stop Managers ---
|
||||
|
|
@ -627,6 +636,9 @@ app.include_router(billingRouter)
|
|||
from modules.routes.routeSubscription import router as subscriptionRouter
|
||||
app.include_router(subscriptionRouter)
|
||||
|
||||
from modules.routes.routeJobs import router as jobsRouter
|
||||
app.include_router(jobsRouter)
|
||||
|
||||
# ============================================================================
|
||||
# SYSTEM ROUTES (Navigation, etc.)
|
||||
# ============================================================================
|
||||
|
|
|
|||
125
demoData/pwg/_generateScans.py
Normal file
125
demoData/pwg/_generateScans.py
Normal file
|
|
@ -0,0 +1,125 @@
|
|||
"""Generate the 3 fictitious PWG scan PDFs used by the pilot demo.
|
||||
|
||||
Run: python _generateScans.py
|
||||
|
||||
Produces:
|
||||
scans/mieter01-bestaetigt.pdf -> all fields ok, signed
|
||||
scans/mieter02-abweichung-betrag.pdf -> rent on scan != journal lines
|
||||
scans/mieter03-keine-unterschrift.pdf -> hasSignature=false
|
||||
"""
|
||||
from pathlib import Path
|
||||
|
||||
from reportlab.lib.pagesizes import A4
|
||||
from reportlab.pdfgen import canvas
|
||||
|
||||
|
||||
def _renderForm(outPath: Path, *, tenantName: str, tenantAddress: str,
|
||||
objectAddress: str, period: str, rentChf: float,
|
||||
tenantNotes: str, hasSignature: bool) -> None:
|
||||
c = canvas.Canvas(str(outPath), pagesize=A4)
|
||||
w, h = A4
|
||||
margin = 60
|
||||
y = h - margin
|
||||
|
||||
c.setFont("Helvetica-Bold", 16)
|
||||
c.drawString(margin, y, "Stiftung PWG")
|
||||
y -= 18
|
||||
c.setFont("Helvetica", 10)
|
||||
c.drawString(margin, y, "Postfach 1234 · 8000 Zürich")
|
||||
y -= 30
|
||||
|
||||
c.setFont("Helvetica-Bold", 14)
|
||||
c.drawString(margin, y, f"Jahresmietzinsbestätigung {period}")
|
||||
y -= 28
|
||||
|
||||
c.setFont("Helvetica", 11)
|
||||
c.drawString(margin, y, "Sehr geehrte Damen und Herren,")
|
||||
y -= 18
|
||||
c.drawString(margin, y, "hiermit bestätige ich die nachstehenden Angaben für die o.g. Periode:")
|
||||
y -= 28
|
||||
|
||||
rows = [
|
||||
("Mieter / in:", tenantName),
|
||||
("Wohnadresse:", tenantAddress),
|
||||
("Mietobjekt:", objectAddress),
|
||||
("Periode:", period),
|
||||
("Bestätigter Mietzins (CHF, monatlich):", f"{rentChf:.2f}"),
|
||||
("Anmerkungen:", tenantNotes or "(keine)"),
|
||||
]
|
||||
c.setFont("Helvetica", 11)
|
||||
for lab, val in rows:
|
||||
c.drawString(margin, y, lab)
|
||||
c.drawString(margin + 220, y, str(val))
|
||||
y -= 18
|
||||
y -= 28
|
||||
|
||||
c.drawString(margin, y, "Ort, Datum: Zürich, 12.04.2026")
|
||||
y -= 28
|
||||
c.drawString(margin, y, "Unterschrift Mieter / in:")
|
||||
y -= 36
|
||||
|
||||
if hasSignature:
|
||||
c.setFont("Helvetica-Oblique", 14)
|
||||
c.drawString(margin + 220, y + 24, _signatureFor(tenantName))
|
||||
else:
|
||||
c.setFont("Helvetica", 9)
|
||||
c.drawString(margin + 220, y + 24, "(handschriftlich)")
|
||||
c.line(margin + 215, y + 22, margin + 415, y + 22)
|
||||
|
||||
c.showPage()
|
||||
c.save()
|
||||
|
||||
|
||||
def _signatureFor(name: str) -> str:
|
||||
parts = name.split()
|
||||
if not parts:
|
||||
return "____"
|
||||
return parts[0][0] + ". " + parts[-1]
|
||||
|
||||
|
||||
def _main() -> None:
|
||||
here = Path(__file__).resolve().parent
|
||||
outDir = here / "scans"
|
||||
outDir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# 1) bestätigt — exakt passend zu seed (Anna Müller, 1850.00)
|
||||
_renderForm(
|
||||
outDir / "mieter01-bestaetigt.pdf",
|
||||
tenantName="Anna Müller",
|
||||
tenantAddress="Bahnhofstrasse 12, 8001 Zürich",
|
||||
objectAddress="Bahnhofstrasse 12, 3.OG, 8001 Zürich",
|
||||
period="2026",
|
||||
rentChf=1850.00,
|
||||
tenantNotes="",
|
||||
hasSignature=True,
|
||||
)
|
||||
|
||||
# 2) abweichung_betrag — Mieter trägt 2300 ein, Buchhaltung sagt 2200
|
||||
_renderForm(
|
||||
outDir / "mieter02-abweichung-betrag.pdf",
|
||||
tenantName="Beat Schneider",
|
||||
tenantAddress="Limmatquai 45, 8001 Zürich",
|
||||
objectAddress="Limmatquai 45, 1.OG, 8001 Zürich",
|
||||
period="2026",
|
||||
rentChf=2300.00,
|
||||
tenantNotes="Mietzins gemäss letzter Indexanpassung — bitte prüfen.",
|
||||
hasSignature=True,
|
||||
)
|
||||
|
||||
# 3) keine_unterschrift — Carla Weber, 1650 stimmt, aber nicht unterschrieben
|
||||
_renderForm(
|
||||
outDir / "mieter03-keine-unterschrift.pdf",
|
||||
tenantName="Carla Weber",
|
||||
tenantAddress="Seestrasse 88, 8002 Zürich",
|
||||
objectAddress="Seestrasse 88, EG, 8002 Zürich",
|
||||
period="2026",
|
||||
rentChf=1650.00,
|
||||
tenantNotes="",
|
||||
hasSignature=False,
|
||||
)
|
||||
|
||||
print(f"Generated 3 scans in {outDir}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
_main()
|
||||
68
demoData/pwg/_seedTrusteeData.json
Normal file
68
demoData/pwg/_seedTrusteeData.json
Normal file
|
|
@ -0,0 +1,68 @@
|
|||
{
|
||||
"_comment": "PWG-Demo Seed-Daten — fiktive Mieter (Debitoren) und Mietzins-Buchungen 2026 für Trustee-Feature. Wird von pwgDemo2026.py idempotent geladen.",
|
||||
"rentAccount": "6000",
|
||||
"rentAccountLabel": "Mietzinsertrag Wohnen",
|
||||
"year": 2026,
|
||||
"tenants": [
|
||||
{
|
||||
"contactNumber": "10001",
|
||||
"name": "Anna Müller",
|
||||
"address": "Bahnhofstrasse 12",
|
||||
"zip": "8001",
|
||||
"city": "Zürich",
|
||||
"country": "CH",
|
||||
"email": "anna.mueller@example.ch",
|
||||
"monthlyRentChf": 1850.00,
|
||||
"scenario": "bestaetigt",
|
||||
"_note": "Stimmt exakt — erwarteter Pilot-Status 'bestaetigt'"
|
||||
},
|
||||
{
|
||||
"contactNumber": "10002",
|
||||
"name": "Beat Schneider",
|
||||
"address": "Limmatquai 45",
|
||||
"zip": "8001",
|
||||
"city": "Zürich",
|
||||
"country": "CH",
|
||||
"email": "beat.schneider@example.ch",
|
||||
"monthlyRentChf": 2200.00,
|
||||
"scenario": "abweichung_betrag",
|
||||
"_note": "Scan zeigt 2300 CHF/Monat (Mieter nicht über Erhöhung informiert) — erwarteter Status 'abweichung_betrag'"
|
||||
},
|
||||
{
|
||||
"contactNumber": "10003",
|
||||
"name": "Carla Weber",
|
||||
"address": "Seestrasse 88",
|
||||
"zip": "8002",
|
||||
"city": "Zürich",
|
||||
"country": "CH",
|
||||
"email": "carla.weber@example.ch",
|
||||
"monthlyRentChf": 1650.00,
|
||||
"scenario": "keine_unterschrift",
|
||||
"_note": "Scan ist ohne Unterschrift — erwarteter Status 'keine_unterschrift'"
|
||||
},
|
||||
{
|
||||
"contactNumber": "10004",
|
||||
"name": "Daniel Keller",
|
||||
"address": "Hardturmstrasse 200",
|
||||
"zip": "8005",
|
||||
"city": "Zürich",
|
||||
"country": "CH",
|
||||
"email": "daniel.keller@example.ch",
|
||||
"monthlyRentChf": 2450.00,
|
||||
"scenario": "kein_scan",
|
||||
"_note": "Hat noch nicht zurückgesendet — taucht nicht im Pilot-Run auf"
|
||||
},
|
||||
{
|
||||
"contactNumber": "10005",
|
||||
"name": "Elena Fischer",
|
||||
"address": "Rämistrasse 71",
|
||||
"zip": "8001",
|
||||
"city": "Zürich",
|
||||
"country": "CH",
|
||||
"email": "elena.fischer@example.ch",
|
||||
"monthlyRentChf": 1990.00,
|
||||
"scenario": "kein_scan",
|
||||
"_note": "Reserve-Mieter für spätere Demo-Erweiterungen"
|
||||
}
|
||||
]
|
||||
}
|
||||
80
demoData/pwg/scans/mieter01-bestaetigt.pdf
Normal file
80
demoData/pwg/scans/mieter01-bestaetigt.pdf
Normal file
|
|
@ -0,0 +1,80 @@
|
|||
%PDF-1.3
|
||||
%“Œ‹ž ReportLab Generated PDF document http://www.reportlab.com
|
||||
1 0 obj
|
||||
<<
|
||||
/F1 2 0 R /F2 3 0 R /F3 4 0 R
|
||||
>>
|
||||
endobj
|
||||
2 0 obj
|
||||
<<
|
||||
/BaseFont /Helvetica /Encoding /WinAnsiEncoding /Name /F1 /Subtype /Type1 /Type /Font
|
||||
>>
|
||||
endobj
|
||||
3 0 obj
|
||||
<<
|
||||
/BaseFont /Helvetica-Bold /Encoding /WinAnsiEncoding /Name /F2 /Subtype /Type1 /Type /Font
|
||||
>>
|
||||
endobj
|
||||
4 0 obj
|
||||
<<
|
||||
/BaseFont /Helvetica-Oblique /Encoding /WinAnsiEncoding /Name /F3 /Subtype /Type1 /Type /Font
|
||||
>>
|
||||
endobj
|
||||
5 0 obj
|
||||
<<
|
||||
/Contents 9 0 R /MediaBox [ 0 0 595.2756 841.8898 ] /Parent 8 0 R /Resources <<
|
||||
/Font 1 0 R /ProcSet [ /PDF /Text /ImageB /ImageC /ImageI ]
|
||||
>> /Rotate 0 /Trans <<
|
||||
|
||||
>>
|
||||
/Type /Page
|
||||
>>
|
||||
endobj
|
||||
6 0 obj
|
||||
<<
|
||||
/PageMode /UseNone /Pages 8 0 R /Type /Catalog
|
||||
>>
|
||||
endobj
|
||||
7 0 obj
|
||||
<<
|
||||
/Author (anonymous) /CreationDate (D:20260420002638-01'00') /Creator (ReportLab PDF Library - www.reportlab.com) /Keywords () /ModDate (D:20260420002638-01'00') /Producer (ReportLab PDF Library - www.reportlab.com)
|
||||
/Subject (unspecified) /Title (untitled) /Trapped /False
|
||||
>>
|
||||
endobj
|
||||
8 0 obj
|
||||
<<
|
||||
/Count 1 /Kids [ 5 0 R ] /Type /Pages
|
||||
>>
|
||||
endobj
|
||||
9 0 obj
|
||||
<<
|
||||
/Filter [ /ASCII85Decode /FlateDecode ] /Length 605
|
||||
>>
|
||||
stream
|
||||
Gat$u_2b!='YNm9]OOh_`s.;Y\Ku+!X/aQ:.b.-A/gNQpRp[N%>l++NBXO3A:fg1WZM\=sbo<,Q[3'29Es](/@'O@[I#'OcS8a:5_Y<8fh=lJSmJ`RLh*-1@#UuhX,=8I86m^'+)4?n^b2N-d3/?],U+[TZQ@ZJ8,<0,Yi>eoPABDBdLBA$k+0Ik*9&VW;<a<ghE=ZquneO>5@Mh:Ji.!#+`k%CJr^^%]YVpL:\WM.^h5>]]TUiL[_3bUPl*u7tL)fSq&ABG:._)GlSks3%?6@q<#fWg]-m\(U)K<V<fZ#)"#g-=L)_=g^(43+QjCJ9nCJK5L+ut3!C0@CCq/eFOEnq$^=I2k%!i4NY9D?D2a]>AD%ZQqC(%lgdge#da<N%1N;lT3hpLr?F>uIVqb%d[b>@jSh2'HC<+`WqKT\j."HGbZ/,'GI@L]d5Gq#Bu(=GEa'j*$L`Rna35kpC)q-)VX=iB?Q>cb;U14X_hGR&cJicR65LLeK?KTlcegm"M*#IBaRqVfL6:M.[Wh$KLqAK0+g#D*30YbcTZBVL*J+KQ8j4'43h]r`7UAqHR_2FMW4U(].V2NG5u__ND;RK6I;:rW6,"=tf~>endstream
|
||||
endobj
|
||||
xref
|
||||
0 10
|
||||
0000000000 65535 f
|
||||
0000000073 00000 n
|
||||
0000000124 00000 n
|
||||
0000000231 00000 n
|
||||
0000000343 00000 n
|
||||
0000000458 00000 n
|
||||
0000000661 00000 n
|
||||
0000000729 00000 n
|
||||
0000001025 00000 n
|
||||
0000001084 00000 n
|
||||
trailer
|
||||
<<
|
||||
/ID
|
||||
[<621e745f4154d3ac7a42de07bdd8794e><621e745f4154d3ac7a42de07bdd8794e>]
|
||||
% ReportLab generated PDF document -- digest (http://www.reportlab.com)
|
||||
|
||||
/Info 7 0 R
|
||||
/Root 6 0 R
|
||||
/Size 10
|
||||
>>
|
||||
startxref
|
||||
1779
|
||||
%%EOF
|
||||
80
demoData/pwg/scans/mieter02-abweichung-betrag.pdf
Normal file
80
demoData/pwg/scans/mieter02-abweichung-betrag.pdf
Normal file
|
|
@ -0,0 +1,80 @@
|
|||
%PDF-1.3
|
||||
%“Œ‹ž ReportLab Generated PDF document http://www.reportlab.com
|
||||
1 0 obj
|
||||
<<
|
||||
/F1 2 0 R /F2 3 0 R /F3 4 0 R
|
||||
>>
|
||||
endobj
|
||||
2 0 obj
|
||||
<<
|
||||
/BaseFont /Helvetica /Encoding /WinAnsiEncoding /Name /F1 /Subtype /Type1 /Type /Font
|
||||
>>
|
||||
endobj
|
||||
3 0 obj
|
||||
<<
|
||||
/BaseFont /Helvetica-Bold /Encoding /WinAnsiEncoding /Name /F2 /Subtype /Type1 /Type /Font
|
||||
>>
|
||||
endobj
|
||||
4 0 obj
|
||||
<<
|
||||
/BaseFont /Helvetica-Oblique /Encoding /WinAnsiEncoding /Name /F3 /Subtype /Type1 /Type /Font
|
||||
>>
|
||||
endobj
|
||||
5 0 obj
|
||||
<<
|
||||
/Contents 9 0 R /MediaBox [ 0 0 595.2756 841.8898 ] /Parent 8 0 R /Resources <<
|
||||
/Font 1 0 R /ProcSet [ /PDF /Text /ImageB /ImageC /ImageI ]
|
||||
>> /Rotate 0 /Trans <<
|
||||
|
||||
>>
|
||||
/Type /Page
|
||||
>>
|
||||
endobj
|
||||
6 0 obj
|
||||
<<
|
||||
/PageMode /UseNone /Pages 8 0 R /Type /Catalog
|
||||
>>
|
||||
endobj
|
||||
7 0 obj
|
||||
<<
|
||||
/Author (anonymous) /CreationDate (D:20260420002638-01'00') /Creator (ReportLab PDF Library - www.reportlab.com) /Keywords () /ModDate (D:20260420002638-01'00') /Producer (ReportLab PDF Library - www.reportlab.com)
|
||||
/Subject (unspecified) /Title (untitled) /Trapped /False
|
||||
>>
|
||||
endobj
|
||||
8 0 obj
|
||||
<<
|
||||
/Count 1 /Kids [ 5 0 R ] /Type /Pages
|
||||
>>
|
||||
endobj
|
||||
9 0 obj
|
||||
<<
|
||||
/Filter [ /ASCII85Decode /FlateDecode ] /Length 645
|
||||
>>
|
||||
stream
|
||||
Gat$u;/=o?&:X)O\Araq7?SD+a]l*Rm7'_NodC6`..P8W>KNG3^t>i_Ce?:WES)tdE9P%)]=[OMJ;0,-k>h\dEgU/f?l\_X0L5j&\*u7>lf&mdg;Ok]pMom2O]%QZN+CTcK3Z=iK3(.L2\iD9Y:h#JK)F(Z;IH.<dKiU:oJl0Vq46<liGp-8i9;4:8h'ZhP.@f3>9AG%RA'dZ8Tl(;M;Z.lg7m%'r?#V+#+[C[+hXgYl(%>:Lj%@c-Y$GTZ`"76>Gs6G*oW%,BOGaN\3XoX9SV137[hSKN*;q*b!REa+VYE_685)jc=;j2%+poDP+1suFj9/'1o)>"7]VsjQiC>b3a;5CmR!8e_A&5;*gb0YK9R*C%hIFKTIS?Lf./'.4>sU0AXJ?:'Ki%F;f7lOdf8#o"_'B(%Dp*n'!q.>=Br1X_In@U1sS''A`Wjehl1+L*1tN,2no:=PnEL:G0[+39KTbr2jZmOrqY\k!kL,7^BBtD`<kr5,#9U5P`F4jdI8fK7f+/@#uCA.ORb$/6JX,8%UMJt<W=X1r3nMdd^aN[$dRq>;*O?sX)7aI6USk9`Ike3IM.son+Et.<>Zi+<03="'oQ`85>71#[^?PT*K9I,oI;ls,.0QF=X7oSNc#8qr<64SCKL~>endstream
|
||||
endobj
|
||||
xref
|
||||
0 10
|
||||
0000000000 65535 f
|
||||
0000000073 00000 n
|
||||
0000000124 00000 n
|
||||
0000000231 00000 n
|
||||
0000000343 00000 n
|
||||
0000000458 00000 n
|
||||
0000000661 00000 n
|
||||
0000000729 00000 n
|
||||
0000001025 00000 n
|
||||
0000001084 00000 n
|
||||
trailer
|
||||
<<
|
||||
/ID
|
||||
[<c69a670760cbecedce0d0f0aa897bce2><c69a670760cbecedce0d0f0aa897bce2>]
|
||||
% ReportLab generated PDF document -- digest (http://www.reportlab.com)
|
||||
|
||||
/Info 7 0 R
|
||||
/Root 6 0 R
|
||||
/Size 10
|
||||
>>
|
||||
startxref
|
||||
1819
|
||||
%%EOF
|
||||
74
demoData/pwg/scans/mieter03-keine-unterschrift.pdf
Normal file
74
demoData/pwg/scans/mieter03-keine-unterschrift.pdf
Normal file
|
|
@ -0,0 +1,74 @@
|
|||
%PDF-1.3
|
||||
%“Œ‹ž ReportLab Generated PDF document http://www.reportlab.com
|
||||
1 0 obj
|
||||
<<
|
||||
/F1 2 0 R /F2 3 0 R
|
||||
>>
|
||||
endobj
|
||||
2 0 obj
|
||||
<<
|
||||
/BaseFont /Helvetica /Encoding /WinAnsiEncoding /Name /F1 /Subtype /Type1 /Type /Font
|
||||
>>
|
||||
endobj
|
||||
3 0 obj
|
||||
<<
|
||||
/BaseFont /Helvetica-Bold /Encoding /WinAnsiEncoding /Name /F2 /Subtype /Type1 /Type /Font
|
||||
>>
|
||||
endobj
|
||||
4 0 obj
|
||||
<<
|
||||
/Contents 8 0 R /MediaBox [ 0 0 595.2756 841.8898 ] /Parent 7 0 R /Resources <<
|
||||
/Font 1 0 R /ProcSet [ /PDF /Text /ImageB /ImageC /ImageI ]
|
||||
>> /Rotate 0 /Trans <<
|
||||
|
||||
>>
|
||||
/Type /Page
|
||||
>>
|
||||
endobj
|
||||
5 0 obj
|
||||
<<
|
||||
/PageMode /UseNone /Pages 7 0 R /Type /Catalog
|
||||
>>
|
||||
endobj
|
||||
6 0 obj
|
||||
<<
|
||||
/Author (anonymous) /CreationDate (D:20260420002638-01'00') /Creator (ReportLab PDF Library - www.reportlab.com) /Keywords () /ModDate (D:20260420002638-01'00') /Producer (ReportLab PDF Library - www.reportlab.com)
|
||||
/Subject (unspecified) /Title (untitled) /Trapped /False
|
||||
>>
|
||||
endobj
|
||||
7 0 obj
|
||||
<<
|
||||
/Count 1 /Kids [ 4 0 R ] /Type /Pages
|
||||
>>
|
||||
endobj
|
||||
8 0 obj
|
||||
<<
|
||||
/Filter [ /ASCII85Decode /FlateDecode ] /Length 629
|
||||
>>
|
||||
stream
|
||||
Gat$u9okbt'YNU1]OOh_epoJMDS+[%:t8PjKdtN.M\BF4Rp[N%>l*c%BN.Y4;--:2/AITuo>V8jfI,n>q[27)KtHLJJe6?4"Os?2IYXhCeua]=Y\nmRL])O<JRATn*r)6Y3M[D62b"k4=V\0t^+:E*JFq#l,g/G6U^8"Vof29K0aFs:mH03k(:"'&+U$Z..%si4bA2&IPBPm.kMu&o"92)[)Oj?nq'B%I_o?4!V+)6&oT4`B7!m:s7oM%%fPppb%0bIp622oZ<,bku]V<uU]HO_9_0FC<PS/*b%63>YCu^UM"D+]L%$mi4Mg_c9Z*W=TB25q0p'VtnW+DO[lI4"^GhEIMZS%r+4-427/j88s-'(Bb"Di(5HFd8E`+E5?9&t.@c*c7+LKh&MCQ'%;!]]r.FG*TWE*:(lfNGob^n\G/l;h/P5/$kYZ($gE_$jH%mJdC=!KQ!_4S3&rBD-KT3+VX$f4PVo=p]8U1:+q/mK$e4@cA%V:!]??hl@+Wd@MMo'pV'V2F!p8Qn>0Qg]@?"`j7&8S?#Y.\n>pfT2>Qb:NYh\qGUODRXM1&D$AAhDi`&H4"4_,<b\%s4E?o?Kuu'YIscD>'nf.p$SEU*J@`KCfZ[as)_0uXW;~>endstream
|
||||
endobj
|
||||
xref
|
||||
0 9
|
||||
0000000000 65535 f
|
||||
0000000073 00000 n
|
||||
0000000114 00000 n
|
||||
0000000221 00000 n
|
||||
0000000333 00000 n
|
||||
0000000536 00000 n
|
||||
0000000604 00000 n
|
||||
0000000900 00000 n
|
||||
0000000959 00000 n
|
||||
trailer
|
||||
<<
|
||||
/ID
|
||||
[<9b415a84726399a7dd006f60068c5362><9b415a84726399a7dd006f60068c5362>]
|
||||
% ReportLab generated PDF document -- digest (http://www.reportlab.com)
|
||||
|
||||
/Info 6 0 R
|
||||
/Root 5 0 R
|
||||
/Size 9
|
||||
>>
|
||||
startxref
|
||||
1678
|
||||
%%EOF
|
||||
152
demoData/workflows/pwg-mietzinsbestaetigung-pilot.workflow.json
Normal file
152
demoData/workflows/pwg-mietzinsbestaetigung-pilot.workflow.json
Normal file
|
|
@ -0,0 +1,152 @@
|
|||
{
|
||||
"$schemaVersion": "1.0",
|
||||
"$kind": "poweron.workflow",
|
||||
"$exportedAt": "2026-04-16T10:00:00Z",
|
||||
"$gatewayVersion": "demo-2026-04",
|
||||
"label": "PWG Pilot: Jahresmietzinsbestätigung",
|
||||
"description": "Verarbeitet gescannte Rückantworten der Jahresmietzinsbestätigungen: OCR, Abgleich gegen Trustee-DB (Mieter + Mietzins-Buchungen), AI-Klassifikation pro Scan und Zustellung als CSV-Anhang im Outlook-Draft an die Sachbearbeitung. Pilot-Lieferung Sommer 2026.",
|
||||
"tags": ["pwg", "pilot", "mietzins", "trustee", "ocr"],
|
||||
"templateScope": "instance",
|
||||
"sharedReadOnly": false,
|
||||
"notifyOnFailure": true,
|
||||
"graph": {
|
||||
"nodes": [
|
||||
{
|
||||
"id": "n1",
|
||||
"type": "trigger.manual",
|
||||
"x": 50,
|
||||
"y": 200,
|
||||
"title": "Manueller Start",
|
||||
"parameters": {}
|
||||
},
|
||||
{
|
||||
"id": "n2",
|
||||
"type": "sharepoint.listFiles",
|
||||
"x": 320,
|
||||
"y": 200,
|
||||
"title": "Scan-Ordner auflisten",
|
||||
"parameters": {
|
||||
"connectionReference": "",
|
||||
"pathQuery": "PWG/Mietzinsbestaetigungen/Scans-Eingang"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "n3",
|
||||
"type": "flow.loop",
|
||||
"x": 590,
|
||||
"y": 200,
|
||||
"title": "Pro Scan-Dokument",
|
||||
"parameters": {
|
||||
"level": 1,
|
||||
"concurrency": 1
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "n4",
|
||||
"type": "sharepoint.downloadFile",
|
||||
"x": 860,
|
||||
"y": 200,
|
||||
"title": "PDF/Bild laden",
|
||||
"parameters": {
|
||||
"connectionReference": "",
|
||||
"pathQuery": "{{loop.item.path}}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "n5",
|
||||
"type": "trustee.extractFromFiles",
|
||||
"x": 1130,
|
||||
"y": 200,
|
||||
"title": "OCR & Felder extrahieren",
|
||||
"parameters": {
|
||||
"featureInstanceId": "",
|
||||
"prompt": "Extrahiere die folgenden Felder aus dieser Jahresmietzinsbestätigung und antworte als JSON: tenantName (string), tenantAddress (string), objectAddress (string), confirmedRentAmount (number|null in CHF), currency ('CHF'), period (string z.B. '2026'), tenantNotes (string|null - alle handschriftlichen Anmerkungen oder Korrekturen), hasSignature (boolean - ist eine Unterschrift vorhanden?), documentDate (ISO date|null), ocrConfidence (number 0-1)."
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "n6",
|
||||
"type": "trustee.queryData",
|
||||
"x": 1400,
|
||||
"y": 200,
|
||||
"title": "Referenzdaten Trustee-DB",
|
||||
"parameters": {
|
||||
"featureInstanceId": "",
|
||||
"mode": "lookup",
|
||||
"entity": "tenantWithRent",
|
||||
"tenantNameRef": "{{n5.output.tenantName}}",
|
||||
"tenantAddressRef": "{{n5.output.tenantAddress}}",
|
||||
"period": "{{n5.output.period}}",
|
||||
"rentAccountPattern": "6000-6099"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "n7",
|
||||
"type": "ai.prompt",
|
||||
"x": 1670,
|
||||
"y": 200,
|
||||
"title": "Prüfung & Klassifikation",
|
||||
"parameters": {
|
||||
"outputFormat": "json",
|
||||
"simpleMode": false,
|
||||
"documentList": "{{n5.output}}",
|
||||
"context": "{{n6.output}}",
|
||||
"aiPrompt": "Du bist ein Sachbearbeitungs-Assistent der Stiftung PWG. Deine Aufgabe ist es, eine eingescannte und OCR-extrahierte Jahresmietzinsbestätigung gegen die Stammdaten der Buchhaltung (Trustee-Feature) abzugleichen.\n\nEingaben:\n1. SCAN_DATEN (extrahiert per OCR aus dem Rückantwort-Dokument):\n{{scan}}\n\n2. REFERENZ_DATEN (aus Trustee-DB für diesen Mieter; ggf. leer wenn nicht eindeutig zuordenbar):\n{{reference}}\n\nVorgehen:\n1. Prüfe Identität: Stimmt SCAN_DATEN.tenantName + SCAN_DATEN.tenantAddress mit einem Datensatz in REFERENZ_DATEN.contacts überein? (Toleranz: kleine Tippfehler, Umlaute, Abkürzungen).\n2. Prüfe Mietzinsbetrag: Stimmt SCAN_DATEN.confirmedRentAmount mit dem aus REFERENZ_DATEN.expectedRentAmount erwarteten Mietzins überein? (Toleranz: ±1 CHF Rundung).\n3. Prüfe Unterschrift: hasSignature muss true sein.\n4. Prüfe OCR-Qualität: ocrConfidence < 0.6 -> 'unleserlich'.\n\nKlassifiziere in EXAKT EINEN Status:\n- 'bestaetigt': Identität stimmt, Betrag stimmt, Unterschrift vorhanden.\n- 'abweichung_betrag': Identität ok, Unterschrift ok, Betrag weicht ab.\n- 'abweichung_anmerkung': tenantNotes enthält substantielle Anmerkung (nicht leer, nicht reine Bestätigung).\n- 'keine_unterschrift': hasSignature == false.\n- 'unleserlich': OCR-Qualität ungenügend ODER Pflichtfelder fehlen.\n- 'kein_match': Mieter nicht in REFERENZ_DATEN auffindbar.\n\nBei Status != 'bestaetigt': Generiere einen kurzen, höflichen Antwortvorschlag (deutsch, Sie-Form, max. 5 Sätze, PWG-Stil) für die Sachbearbeitung. Bei 'bestaetigt': antwortVorschlag = null.\n\nAntworte AUSSCHLIESSLICH als JSON nach folgendem Schema:\n{\n \"tenantName\": string,\n \"objectAddress\": string,\n \"status\": \"bestaetigt\" | \"abweichung_betrag\" | \"abweichung_anmerkung\" | \"keine_unterschrift\" | \"unleserlich\" | \"kein_match\",\n \"scanRentAmount\": number | null,\n \"expectedRentAmount\": number | null,\n \"delta\": number | null,\n \"tenantNotes\": string | null,\n \"antwortVorschlag\": string | null,\n \"matchConfidence\": number,\n \"auditEvidence\": string\n}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "n8",
|
||||
"type": "data.aggregate",
|
||||
"x": 1940,
|
||||
"y": 200,
|
||||
"title": "Ergebnisse sammeln (im Loop)",
|
||||
"parameters": {
|
||||
"mode": "collect"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "n9",
|
||||
"type": "data.consolidate",
|
||||
"x": 2210,
|
||||
"y": 200,
|
||||
"title": "CSV bauen (nach Loop)",
|
||||
"parameters": {
|
||||
"mode": "csvJoin",
|
||||
"separator": "\n"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "n10",
|
||||
"type": "email.draftEmail",
|
||||
"x": 2480,
|
||||
"y": 200,
|
||||
"title": "Draft an Sachbearbeitung",
|
||||
"parameters": {
|
||||
"connectionReference": "",
|
||||
"to": "sachbearbeiter@pwg.ch",
|
||||
"subject": "Mietzinsbestätigungen Auswertung {{currentDate}}",
|
||||
"body": "Hallo,\n\nim Anhang die Auswertung der eingegangenen Jahresmietzinsbestätigungen.\nPro Scan eine Zeile mit Status, Betragsabgleich und (bei Abweichung) Antwortvorschlag.\n\nBitte die Zeilen mit Status != 'bestaetigt' manuell sichten.\n\nFreundliche Grüße,\nPWG Automation",
|
||||
"emailStyle": "business",
|
||||
"attachments": [
|
||||
{
|
||||
"name": "mietzinsbestaetigungen-auswertung",
|
||||
"mimeType": "text/csv",
|
||||
"csvFromVariable": "n9.output"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"connections": [
|
||||
{"source": "n1", "target": "n2", "sourceOutput": 0, "targetInput": 0},
|
||||
{"source": "n2", "target": "n3", "sourceOutput": 0, "targetInput": 0},
|
||||
{"source": "n3", "target": "n4", "sourceOutput": 0, "targetInput": 0},
|
||||
{"source": "n4", "target": "n5", "sourceOutput": 0, "targetInput": 0},
|
||||
{"source": "n5", "target": "n6", "sourceOutput": 0, "targetInput": 0},
|
||||
{"source": "n6", "target": "n7", "sourceOutput": 0, "targetInput": 0},
|
||||
{"source": "n7", "target": "n8", "sourceOutput": 0, "targetInput": 0},
|
||||
{"source": "n8", "target": "n9", "sourceOutput": 0, "targetInput": 0},
|
||||
{"source": "n9", "target": "n10", "sourceOutput": 0, "targetInput": 0}
|
||||
]
|
||||
},
|
||||
"invocations": []
|
||||
}
|
||||
130
modules/datamodels/datamodelBackgroundJob.py
Normal file
130
modules/datamodels/datamodelBackgroundJob.py
Normal file
|
|
@ -0,0 +1,130 @@
|
|||
# Copyright (c) 2025 Patrick Motsch
|
||||
# All rights reserved.
|
||||
"""Background job models: generic, reusable infrastructure for long-running tasks.
|
||||
|
||||
A `BackgroundJob` record tracks the lifecycle of one async task that must not block
|
||||
the calling HTTP request. Any caller (HTTP route, AI tool, scheduled task) can:
|
||||
|
||||
1. Register a handler once via `registerJobHandler(jobType, handler)`.
|
||||
2. Submit work via `startJob(jobType, payload, ...)` which returns a `jobId`
|
||||
immediately and runs the handler in the background.
|
||||
3. Poll `getJobStatus(jobId)` (HTTP `GET /api/jobs/{jobId}`) until `status` is
|
||||
one of {SUCCESS, ERROR, CANCELLED}.
|
||||
|
||||
See `modules.serviceCenter.services.serviceBackgroundJobs.mainBackgroundJobService`.
|
||||
"""
|
||||
|
||||
from typing import Any, Dict, Optional
|
||||
from enum import Enum
|
||||
from datetime import datetime, timezone
|
||||
import uuid
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from modules.datamodels.datamodelBase import PowerOnModel
|
||||
from modules.shared.i18nRegistry import i18nModel
|
||||
|
||||
|
||||
class BackgroundJobStatusEnum(str, Enum):
|
||||
"""Lifecycle status of a background job."""
|
||||
PENDING = "PENDING"
|
||||
RUNNING = "RUNNING"
|
||||
SUCCESS = "SUCCESS"
|
||||
ERROR = "ERROR"
|
||||
CANCELLED = "CANCELLED"
|
||||
|
||||
|
||||
TERMINAL_JOB_STATUSES = {
|
||||
BackgroundJobStatusEnum.SUCCESS,
|
||||
BackgroundJobStatusEnum.ERROR,
|
||||
BackgroundJobStatusEnum.CANCELLED,
|
||||
}
|
||||
|
||||
|
||||
@i18nModel("Hintergrund-Job")
|
||||
class BackgroundJob(PowerOnModel):
|
||||
"""Generic record describing a long-running asynchronous task.
|
||||
|
||||
Scope: the combination of `mandateId` and optionally `featureInstanceId`
|
||||
is used for access control on `GET /api/jobs/{jobId}`.
|
||||
"""
|
||||
|
||||
id: str = Field(
|
||||
default_factory=lambda: str(uuid.uuid4()),
|
||||
description="Primary key",
|
||||
json_schema_extra={"label": "ID"},
|
||||
)
|
||||
jobType: str = Field(
|
||||
...,
|
||||
description="Handler key registered via registerJobHandler() (e.g. 'trusteeAccountingSync')",
|
||||
json_schema_extra={"label": "Typ"},
|
||||
)
|
||||
mandateId: Optional[str] = Field(
|
||||
None,
|
||||
description="Mandate scope (used for access checks). None for system-wide jobs.",
|
||||
json_schema_extra={
|
||||
"label": "Mandanten-ID",
|
||||
"fk_target": {"db": "poweron_app", "table": "Mandate"},
|
||||
},
|
||||
)
|
||||
featureInstanceId: Optional[str] = Field(
|
||||
None,
|
||||
description="Feature instance scope (optional)",
|
||||
json_schema_extra={
|
||||
"label": "Feature-Instanz",
|
||||
"fk_target": {"db": "poweron_app", "table": "FeatureInstance"},
|
||||
},
|
||||
)
|
||||
triggeredBy: Optional[str] = Field(
|
||||
None,
|
||||
description="UserId or 'ai-tool:<toolName>' / 'scheduler:<jobName>'",
|
||||
json_schema_extra={"label": "Ausgeloest von"},
|
||||
)
|
||||
|
||||
status: str = Field(
|
||||
default=BackgroundJobStatusEnum.PENDING.value,
|
||||
description="Current lifecycle status",
|
||||
json_schema_extra={"label": "Status"},
|
||||
)
|
||||
progress: int = Field(
|
||||
default=0,
|
||||
description="Progress 0..100 (best-effort; may stay 0 for handlers that cannot estimate)",
|
||||
json_schema_extra={"label": "Fortschritt"},
|
||||
)
|
||||
progressMessage: Optional[str] = Field(
|
||||
None,
|
||||
description="Human-readable current step (e.g. 'Importing journal entries...')",
|
||||
json_schema_extra={"label": "Fortschritts-Nachricht"},
|
||||
)
|
||||
|
||||
payload: Dict[str, Any] = Field(
|
||||
default_factory=dict,
|
||||
description="Job input parameters (JSON)",
|
||||
json_schema_extra={"label": "Eingabe"},
|
||||
)
|
||||
result: Optional[Dict[str, Any]] = Field(
|
||||
None,
|
||||
description="Handler return value on success (JSON)",
|
||||
json_schema_extra={"label": "Ergebnis"},
|
||||
)
|
||||
errorMessage: Optional[str] = Field(
|
||||
None,
|
||||
description="Truncated error message on failure (full stack trace in logs)",
|
||||
json_schema_extra={"label": "Fehler"},
|
||||
)
|
||||
|
||||
createdAt: datetime = Field(
|
||||
default_factory=lambda: datetime.now(timezone.utc),
|
||||
description="When the job was submitted",
|
||||
json_schema_extra={"label": "Eingereicht"},
|
||||
)
|
||||
startedAt: Optional[datetime] = Field(
|
||||
None,
|
||||
description="When the handler began running",
|
||||
json_schema_extra={"label": "Gestartet"},
|
||||
)
|
||||
finishedAt: Optional[datetime] = Field(
|
||||
None,
|
||||
description="When the handler reached a terminal status",
|
||||
json_schema_extra={"label": "Beendet"},
|
||||
)
|
||||
768
modules/demoConfigs/pwgDemo2026.py
Normal file
768
modules/demoConfigs/pwgDemo2026.py
Normal file
|
|
@ -0,0 +1,768 @@
|
|||
"""PWG Pilot Demo (April 2026)
|
||||
|
||||
Bootstraps a complete PWG-Pilot demo environment in an empty dev/demo install:
|
||||
|
||||
- 1 mandate "Stiftung PWG"
|
||||
- 1 SysAdmin demo user "pwg.demo"
|
||||
- 4 features: workspace, trustee (BUHA PWG), graphicalEditor (PWG Automationen),
|
||||
neutralization (Datenschutz)
|
||||
- Trustee seed-data (5 fictitious tenants with monthly rent journal lines for
|
||||
the current year, loaded from ``demoData/pwg/_seedTrusteeData.json``)
|
||||
- Pilot workflow imported from
|
||||
``demoData/workflows/pwg-mietzinsbestaetigung-pilot.workflow.json``
|
||||
(active=false — user activates manually after triggering once).
|
||||
|
||||
Idempotent: ``load()`` skips anything that already exists; ``remove()`` deletes
|
||||
mandate, user, seed data and imported workflow cleanly.
|
||||
|
||||
Pattern: subclass of :class:`_BaseDemoConfig`, auto-discovered by
|
||||
``demoConfigs/__init__.py``. See ``investorDemo2026.py`` for the reference
|
||||
implementation we mirror here.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from modules.demoConfigs._baseDemoConfig import _BaseDemoConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_DEMO_PREFIX = "demo-pwg2026"
|
||||
|
||||
_MANDATE_PWG = {
|
||||
"name": "stiftung-pwg",
|
||||
"label": "Stiftung PWG",
|
||||
}
|
||||
|
||||
_USER = {
|
||||
"username": "pwg.demo",
|
||||
"email": "pwg.demo@poweron.swiss",
|
||||
"fullName": "PWG Demo Sachbearbeiter",
|
||||
"password": "pwg.demo.2026",
|
||||
"language": "de",
|
||||
}
|
||||
|
||||
_FEATURES_PWG = [
|
||||
{"code": "workspace", "label": "Dokumentenablage PWG"},
|
||||
{"code": "trustee", "label": "Buchhaltung PWG"},
|
||||
{"code": "graphicalEditor", "label": "PWG Automationen"},
|
||||
{"code": "neutralization", "label": "Datenschutz"},
|
||||
]
|
||||
|
||||
# Filename markers used to identify the imported pilot workflow on remove().
|
||||
_PILOT_WORKFLOW_LABEL = "PWG Pilot: Jahresmietzinsbestätigung"
|
||||
_PILOT_WORKFLOW_FILE = "pwg-mietzinsbestaetigung-pilot.workflow.json"
|
||||
_SEED_TRUSTEE_FILE = "_seedTrusteeData.json"
|
||||
|
||||
|
||||
class PwgDemo2026(_BaseDemoConfig):
|
||||
code = "pwg-demo-2026"
|
||||
label = "PWG Pilot Demo (Mietzinsbestätigungen)"
|
||||
description = (
|
||||
"Stiftung PWG, ein Demo-Sachbearbeiter, Trustee mit fiktiven Mietern, "
|
||||
"Graph-Editor mit dem Pilot-Workflow für Jahresmietzinsbestätigungen "
|
||||
"(als File importiert, active=false). Idempotent."
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# load
|
||||
# ------------------------------------------------------------------
|
||||
def load(self, db) -> Dict[str, Any]:
|
||||
summary: Dict[str, Any] = {"created": [], "skipped": [], "errors": []}
|
||||
|
||||
try:
|
||||
mandateId = self._ensureMandate(db, _MANDATE_PWG, summary)
|
||||
userId = self._ensureUser(db, summary)
|
||||
self._ensurePlatformAdminFlag(db, userId, summary)
|
||||
|
||||
if mandateId and userId:
|
||||
self._ensureMembership(db, userId, mandateId, _MANDATE_PWG["label"], summary)
|
||||
self._ensureFeatures(db, mandateId, _MANDATE_PWG["label"], _FEATURES_PWG, summary)
|
||||
self._ensureFeatureAccess(db, userId, mandateId, _MANDATE_PWG["label"], summary)
|
||||
self._ensureNeutralizationConfig(db, mandateId, userId, summary)
|
||||
self._ensureBilling(db, mandateId, _MANDATE_PWG["label"], summary)
|
||||
|
||||
trusteeInstanceId = self._getFeatureInstanceId(db, mandateId, "trustee", "Buchhaltung PWG")
|
||||
if trusteeInstanceId:
|
||||
self._ensureTrusteeSeed(mandateId, trusteeInstanceId, summary)
|
||||
|
||||
graphInstanceId = self._getFeatureInstanceId(db, mandateId, "graphicalEditor", "PWG Automationen")
|
||||
if graphInstanceId:
|
||||
self._ensurePilotWorkflow(mandateId, graphInstanceId, summary)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"PWG demo load failed: {e}", exc_info=True)
|
||||
summary["errors"].append(str(e))
|
||||
|
||||
return summary
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# remove
|
||||
# ------------------------------------------------------------------
|
||||
def remove(self, db) -> Dict[str, Any]:
|
||||
summary: Dict[str, Any] = {"removed": [], "errors": []}
|
||||
|
||||
from modules.datamodels.datamodelMembership import UserMandate
|
||||
from modules.datamodels.datamodelUam import Mandate, UserInDB
|
||||
|
||||
try:
|
||||
existing = db.getRecordset(Mandate, recordFilter={"name": _MANDATE_PWG["name"]})
|
||||
for m in existing:
|
||||
mid = m.get("id")
|
||||
self._removeMandateData(db, mid, _MANDATE_PWG["label"], summary)
|
||||
db.recordDelete(Mandate, mid)
|
||||
summary["removed"].append(f"Mandate {_MANDATE_PWG['label']} ({mid})")
|
||||
logger.info(f"Removed mandate {_MANDATE_PWG['label']} ({mid})")
|
||||
except Exception as e:
|
||||
summary["errors"].append(f"Remove mandate {_MANDATE_PWG['label']}: {e}")
|
||||
|
||||
try:
|
||||
existing = db.getRecordset(UserInDB, recordFilter={"username": _USER["username"]})
|
||||
for u in existing:
|
||||
uid = u.get("id")
|
||||
memberships = db.getRecordset(UserMandate, recordFilter={"userId": uid}) or []
|
||||
for mem in memberships:
|
||||
try:
|
||||
db.recordDelete(UserMandate, mem.get("id"))
|
||||
except Exception:
|
||||
pass
|
||||
db.recordDelete(UserInDB, uid)
|
||||
summary["removed"].append(f"User {_USER['username']} ({uid})")
|
||||
logger.info(f"Removed user {_USER['username']} ({uid})")
|
||||
except Exception as e:
|
||||
summary["errors"].append(f"Remove user: {e}")
|
||||
|
||||
return summary
|
||||
|
||||
# ==================================================================
|
||||
# — load helpers (mostly mirrors of investorDemo2026.py)
|
||||
# ==================================================================
|
||||
|
||||
def _ensureMandate(self, db, mandateDef: Dict, summary: Dict) -> Optional[str]:
|
||||
from modules.datamodels.datamodelUam import Mandate
|
||||
from modules.interfaces.interfaceBootstrap import copySystemRolesToMandate
|
||||
|
||||
existing = db.getRecordset(Mandate, recordFilter={"name": mandateDef["name"]})
|
||||
if existing:
|
||||
mid = existing[0].get("id")
|
||||
summary["skipped"].append(f"Mandate {mandateDef['label']} exists ({mid})")
|
||||
return mid
|
||||
|
||||
mandate = Mandate(name=mandateDef["name"], label=mandateDef["label"], enabled=True)
|
||||
created = db.recordCreate(Mandate, mandate)
|
||||
mid = created.get("id")
|
||||
logger.info(f"Created mandate {mandateDef['label']} ({mid})")
|
||||
summary["created"].append(f"Mandate {mandateDef['label']}")
|
||||
copySystemRolesToMandate(db, mid)
|
||||
return mid
|
||||
|
||||
def _ensureUser(self, db, summary: Dict) -> Optional[str]:
|
||||
from modules.datamodels.datamodelUam import AuthAuthority, UserInDB
|
||||
from passlib.context import CryptContext
|
||||
|
||||
existing = db.getRecordset(UserInDB, recordFilter={"username": _USER["username"]})
|
||||
if existing:
|
||||
uid = existing[0].get("id")
|
||||
summary["skipped"].append(f"User {_USER['username']} exists ({uid})")
|
||||
return uid
|
||||
|
||||
pwdContext = CryptContext(schemes=["argon2"], deprecated="auto")
|
||||
user = UserInDB(
|
||||
username=_USER["username"],
|
||||
email=_USER["email"],
|
||||
fullName=_USER["fullName"],
|
||||
enabled=True,
|
||||
language=_USER["language"],
|
||||
isSysAdmin=True,
|
||||
authenticationAuthority=AuthAuthority.LOCAL,
|
||||
hashedPassword=pwdContext.hash(_USER["password"]),
|
||||
)
|
||||
created = db.recordCreate(UserInDB, user)
|
||||
uid = created.get("id")
|
||||
logger.info(f"Created user {_USER['username']} ({uid})")
|
||||
summary["created"].append(f"User {_USER['fullName']}")
|
||||
return uid
|
||||
|
||||
def _ensurePlatformAdminFlag(self, db, userId: Optional[str], summary: Dict):
|
||||
from modules.datamodels.datamodelUam import UserInDB
|
||||
if not userId:
|
||||
return
|
||||
existing = db.getRecord(UserInDB, userId)
|
||||
if not existing:
|
||||
summary["errors"].append(f"User {userId} not found — cannot set isPlatformAdmin")
|
||||
return
|
||||
currentFlag = bool(existing.get("isPlatformAdmin", False)) if isinstance(existing, dict) else bool(getattr(existing, "isPlatformAdmin", False))
|
||||
if currentFlag:
|
||||
summary["skipped"].append("isPlatformAdmin already set")
|
||||
return
|
||||
db.recordModify(UserInDB, userId, {"isPlatformAdmin": True})
|
||||
summary["created"].append("isPlatformAdmin flag")
|
||||
|
||||
def _ensureMembership(self, db, userId: str, mandateId: str, mandateLabel: str, summary: Dict):
|
||||
from modules.datamodels.datamodelMembership import UserMandate, UserMandateRole
|
||||
from modules.datamodels.datamodelRbac import Role
|
||||
|
||||
existing = db.getRecordset(UserMandate, recordFilter={"userId": userId, "mandateId": mandateId})
|
||||
if existing:
|
||||
userMandateId = existing[0].get("id")
|
||||
summary["skipped"].append(f"Membership {_USER['username']} -> {mandateLabel} exists")
|
||||
else:
|
||||
um = UserMandate(userId=userId, mandateId=mandateId, enabled=True)
|
||||
created = db.recordCreate(UserMandate, um)
|
||||
userMandateId = created.get("id")
|
||||
summary["created"].append(f"Membership {_USER['username']} -> {mandateLabel}")
|
||||
|
||||
adminRoles = db.getRecordset(Role, recordFilter={"mandateId": mandateId, "roleLabel": "admin"})
|
||||
if adminRoles:
|
||||
adminRoleId = adminRoles[0].get("id")
|
||||
existingRole = db.getRecordset(UserMandateRole, recordFilter={"userMandateId": userMandateId, "roleId": adminRoleId})
|
||||
if not existingRole:
|
||||
umr = UserMandateRole(userMandateId=userMandateId, roleId=adminRoleId)
|
||||
db.recordCreate(UserMandateRole, umr)
|
||||
|
||||
def _ensureFeatures(self, db, mandateId: str, mandateLabel: str, featureDefs: List[Dict], summary: Dict):
|
||||
from modules.interfaces.interfaceFeatures import getFeatureInterface
|
||||
|
||||
fi = getFeatureInterface(db)
|
||||
existingInstances = fi.getFeatureInstancesForMandate(mandateId)
|
||||
existingLabels = {
|
||||
(inst.label if hasattr(inst, "label") else inst.get("label", ""))
|
||||
for inst in existingInstances
|
||||
}
|
||||
|
||||
for featureDef in featureDefs:
|
||||
code = featureDef["code"]
|
||||
instanceLabel = featureDef["label"]
|
||||
if instanceLabel in existingLabels:
|
||||
summary["skipped"].append(f"Feature '{instanceLabel}' in {mandateLabel} exists")
|
||||
continue
|
||||
try:
|
||||
fi.createFeatureInstance(
|
||||
featureCode=code,
|
||||
mandateId=mandateId,
|
||||
label=instanceLabel,
|
||||
enabled=True,
|
||||
copyTemplateRoles=True,
|
||||
)
|
||||
summary["created"].append(f"Feature '{instanceLabel}' in {mandateLabel}")
|
||||
except Exception as e:
|
||||
summary["errors"].append(f"Feature '{instanceLabel}' in {mandateLabel}: {e}")
|
||||
|
||||
def _ensureFeatureAccess(self, db, userId: str, mandateId: str, mandateLabel: str, summary: Dict):
|
||||
from modules.datamodels.datamodelFeatures import FeatureInstance
|
||||
from modules.datamodels.datamodelMembership import FeatureAccess, FeatureAccessRole
|
||||
from modules.datamodels.datamodelRbac import Role
|
||||
|
||||
instances = db.getRecordset(FeatureInstance, recordFilter={"mandateId": mandateId}) or []
|
||||
|
||||
for inst in instances:
|
||||
instId = inst.get("id")
|
||||
featureCode = inst.get("featureCode", "")
|
||||
if not instId:
|
||||
continue
|
||||
|
||||
existing = db.getRecordset(FeatureAccess, recordFilter={"userId": userId, "featureInstanceId": instId})
|
||||
if existing:
|
||||
featureAccessId = existing[0].get("id")
|
||||
summary["skipped"].append(f"FeatureAccess {featureCode} in {mandateLabel} exists")
|
||||
else:
|
||||
fa = FeatureAccess(userId=userId, featureInstanceId=instId, enabled=True)
|
||||
created = db.recordCreate(FeatureAccess, fa)
|
||||
featureAccessId = created.get("id")
|
||||
summary["created"].append(f"FeatureAccess {featureCode} in {mandateLabel}")
|
||||
|
||||
adminRoleLabel = f"{featureCode}-admin"
|
||||
adminRoles = db.getRecordset(Role, recordFilter={
|
||||
"featureInstanceId": instId,
|
||||
"roleLabel": adminRoleLabel,
|
||||
})
|
||||
if adminRoles:
|
||||
adminRoleId = adminRoles[0].get("id")
|
||||
existingRole = db.getRecordset(FeatureAccessRole, recordFilter={
|
||||
"featureAccessId": featureAccessId,
|
||||
"roleId": adminRoleId,
|
||||
})
|
||||
if not existingRole:
|
||||
far = FeatureAccessRole(featureAccessId=featureAccessId, roleId=adminRoleId)
|
||||
db.recordCreate(FeatureAccessRole, far)
|
||||
|
||||
def _ensureNeutralizationConfig(self, db, mandateId: Optional[str], userId: Optional[str], summary: Dict):
|
||||
if not mandateId or not userId:
|
||||
return
|
||||
from modules.datamodels.datamodelFeatures import FeatureInstance
|
||||
instances = db.getRecordset(FeatureInstance, recordFilter={"mandateId": mandateId, "featureCode": "neutralization"})
|
||||
if not instances:
|
||||
return
|
||||
instanceId = instances[0].get("id")
|
||||
try:
|
||||
from modules.features.neutralization.datamodelFeatureNeutralizer import DataNeutraliserConfig
|
||||
existing = db.getRecordset(DataNeutraliserConfig, recordFilter={"featureInstanceId": instanceId})
|
||||
if existing:
|
||||
summary["skipped"].append(f"Neutralization config for mandate {mandateId} exists")
|
||||
return
|
||||
config = DataNeutraliserConfig(
|
||||
featureInstanceId=instanceId,
|
||||
mandateId=mandateId,
|
||||
userId=userId,
|
||||
enabled=True,
|
||||
scope="featureInstance",
|
||||
)
|
||||
db.recordCreate(DataNeutraliserConfig, config)
|
||||
summary["created"].append(f"Neutralization config for mandate {mandateId}")
|
||||
except Exception as e:
|
||||
summary["errors"].append(f"Neutralization config: {e}")
|
||||
|
||||
def _ensureBilling(self, db, mandateId: Optional[str], mandateLabel: str, summary: Dict):
|
||||
if not mandateId:
|
||||
return
|
||||
try:
|
||||
from modules.datamodels.datamodelBilling import BillingSettings
|
||||
from modules.interfaces.interfaceDbBilling import _getRootInterface
|
||||
|
||||
billingInterface = _getRootInterface()
|
||||
existingSettings = billingInterface.getSettings(mandateId)
|
||||
if existingSettings:
|
||||
summary["skipped"].append(f"Billing for {mandateLabel} exists")
|
||||
return
|
||||
settings = BillingSettings(
|
||||
mandateId=mandateId,
|
||||
warningThresholdPercent=10.0,
|
||||
notifyOnWarning=True,
|
||||
)
|
||||
billingInterface.db.recordCreate(BillingSettings, settings)
|
||||
summary["created"].append(f"Billing settings for {mandateLabel}")
|
||||
except Exception as e:
|
||||
summary["errors"].append(f"Billing for {mandateLabel}: {e}")
|
||||
|
||||
def _getFeatureInstanceId(self, db, mandateId: str, featureCode: str, label: str) -> Optional[str]:
|
||||
from modules.datamodels.datamodelFeatures import FeatureInstance
|
||||
instances = db.getRecordset(FeatureInstance, recordFilter={
|
||||
"mandateId": mandateId,
|
||||
"featureCode": featureCode,
|
||||
"label": label,
|
||||
}) or []
|
||||
if instances:
|
||||
return instances[0].get("id")
|
||||
# fallback: any instance of that feature in the mandate
|
||||
instances = db.getRecordset(FeatureInstance, recordFilter={
|
||||
"mandateId": mandateId,
|
||||
"featureCode": featureCode,
|
||||
}) or []
|
||||
return instances[0].get("id") if instances else None
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# PWG-specific helpers — Trustee seed-data + pilot-workflow import
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _ensureTrusteeSeed(self, mandateId: str, featureInstanceId: str, summary: Dict):
|
||||
"""Idempotently load 5 fictitious tenants and their 12-month rent
|
||||
journal lines into the trustee database for this feature instance.
|
||||
|
||||
Skips any tenant whose contact (matched by name+address) already
|
||||
exists, so re-running ``load()`` is safe.
|
||||
"""
|
||||
seedPath = _demoDataDir() / "pwg" / _SEED_TRUSTEE_FILE
|
||||
if not seedPath.is_file():
|
||||
summary["errors"].append(f"PWG seed file missing: {seedPath}")
|
||||
return
|
||||
try:
|
||||
seed = json.loads(seedPath.read_text(encoding="utf-8"))
|
||||
except Exception as exc:
|
||||
summary["errors"].append(f"PWG seed file unreadable: {exc}")
|
||||
return
|
||||
|
||||
try:
|
||||
trusteeDb = _openTrusteeDb()
|
||||
except Exception as exc:
|
||||
summary["errors"].append(f"Trustee DB connection failed: {exc}")
|
||||
return
|
||||
|
||||
from modules.features.trustee.datamodelFeatureTrustee import (
|
||||
TrusteeDataAccount,
|
||||
TrusteeDataContact,
|
||||
TrusteeDataJournalEntry,
|
||||
TrusteeDataJournalLine,
|
||||
)
|
||||
|
||||
rentAccountNumber = str(seed.get("rentAccount", "6000"))
|
||||
year = int(seed.get("year", datetime.now().year))
|
||||
|
||||
# 1) Ensure rent account exists once
|
||||
existingAccounts = trusteeDb.getRecordset(TrusteeDataAccount, recordFilter={
|
||||
"featureInstanceId": featureInstanceId,
|
||||
"accountNumber": rentAccountNumber,
|
||||
}) or []
|
||||
if not existingAccounts:
|
||||
trusteeDb.recordCreate(TrusteeDataAccount, TrusteeDataAccount(
|
||||
accountNumber=rentAccountNumber,
|
||||
label=str(seed.get("rentAccountLabel", "Mietzinsertrag")),
|
||||
accountType="revenue",
|
||||
accountGroup="rental_income",
|
||||
currency="CHF",
|
||||
isActive=True,
|
||||
mandateId=mandateId,
|
||||
featureInstanceId=featureInstanceId,
|
||||
))
|
||||
summary["created"].append(f"Trustee account {rentAccountNumber}")
|
||||
|
||||
# 2) Ensure contacts + monthly journal entries
|
||||
createdTenants = 0
|
||||
skippedTenants = 0
|
||||
for tenant in seed.get("tenants", []):
|
||||
name = tenant.get("name", "")
|
||||
address = tenant.get("address", "")
|
||||
if not name:
|
||||
continue
|
||||
existing = trusteeDb.getRecordset(TrusteeDataContact, recordFilter={
|
||||
"featureInstanceId": featureInstanceId,
|
||||
"name": name,
|
||||
"address": address,
|
||||
}) or []
|
||||
if existing:
|
||||
skippedTenants += 1
|
||||
continue
|
||||
|
||||
contact = TrusteeDataContact(
|
||||
externalId=tenant.get("contactNumber"),
|
||||
contactType="customer",
|
||||
contactNumber=tenant.get("contactNumber"),
|
||||
name=name,
|
||||
address=address,
|
||||
zip=tenant.get("zip"),
|
||||
city=tenant.get("city"),
|
||||
country=tenant.get("country"),
|
||||
email=tenant.get("email"),
|
||||
mandateId=mandateId,
|
||||
featureInstanceId=featureInstanceId,
|
||||
)
|
||||
trusteeDb.recordCreate(TrusteeDataContact, contact)
|
||||
createdTenants += 1
|
||||
|
||||
# 12 monthly rent bookings (credit on rent account)
|
||||
monthlyRent = float(tenant.get("monthlyRentChf") or 0.0)
|
||||
if monthlyRent <= 0:
|
||||
continue
|
||||
for month in range(1, 13):
|
||||
bookingDate = f"{year}-{month:02d}-01"
|
||||
entryRef = f"PWG-{tenant.get('contactNumber')}-{year}{month:02d}"
|
||||
entry = TrusteeDataJournalEntry(
|
||||
externalId=entryRef,
|
||||
bookingDate=bookingDate,
|
||||
reference=entryRef,
|
||||
description=f"Mietzins {month:02d}/{year} {name}",
|
||||
currency="CHF",
|
||||
totalAmount=monthlyRent,
|
||||
mandateId=mandateId,
|
||||
featureInstanceId=featureInstanceId,
|
||||
)
|
||||
createdEntry = trusteeDb.recordCreate(TrusteeDataJournalEntry, entry)
|
||||
line = TrusteeDataJournalLine(
|
||||
journalEntryId=createdEntry.get("id"),
|
||||
accountNumber=rentAccountNumber,
|
||||
debitAmount=0.0,
|
||||
creditAmount=monthlyRent,
|
||||
currency="CHF",
|
||||
description=f"Mietzins {month:02d}/{year} {name} ({tenant.get('contactNumber')})",
|
||||
mandateId=mandateId,
|
||||
featureInstanceId=featureInstanceId,
|
||||
)
|
||||
trusteeDb.recordCreate(TrusteeDataJournalLine, line)
|
||||
|
||||
if createdTenants:
|
||||
summary["created"].append(f"PWG seed: {createdTenants} tenants × 12 monthly journal lines")
|
||||
if skippedTenants:
|
||||
summary["skipped"].append(f"PWG seed: {skippedTenants} tenants already present")
|
||||
|
||||
def _ensurePilotWorkflow(self, mandateId: str, featureInstanceId: str, summary: Dict):
|
||||
"""Import the pilot workflow JSON into the graphical-editor DB.
|
||||
|
||||
Uses the schema-aware import pipeline introduced in Phase 1
|
||||
(``_workflowFileSchema.envelopeToWorkflowData`` +
|
||||
``GraphicalEditorObjects.importWorkflowFromDict``). The workflow is
|
||||
always created with ``active=False`` so a manual trigger is required
|
||||
— this matches the demo-bootstrap safety default.
|
||||
"""
|
||||
envelopePath = _demoDataDir() / "workflows" / _PILOT_WORKFLOW_FILE
|
||||
if not envelopePath.is_file():
|
||||
summary["errors"].append(f"Pilot workflow file missing: {envelopePath}")
|
||||
return
|
||||
try:
|
||||
envelope = json.loads(envelopePath.read_text(encoding="utf-8"))
|
||||
except Exception as exc:
|
||||
summary["errors"].append(f"Pilot workflow file unreadable: {exc}")
|
||||
return
|
||||
|
||||
try:
|
||||
geDb = _openGraphicalEditorDb()
|
||||
except Exception as exc:
|
||||
summary["errors"].append(f"GraphicalEditor DB connection failed: {exc}")
|
||||
return
|
||||
|
||||
from modules.features.graphicalEditor._workflowFileSchema import (
|
||||
envelopeToWorkflowData,
|
||||
validateFileEnvelope,
|
||||
)
|
||||
from modules.features.graphicalEditor.datamodelFeatureGraphicalEditor import AutoWorkflow
|
||||
from modules.features.graphicalEditor.nodeRegistry import STATIC_NODE_TYPES
|
||||
|
||||
existing = geDb.getRecordset(AutoWorkflow, recordFilter={
|
||||
"mandateId": mandateId,
|
||||
"featureInstanceId": featureInstanceId,
|
||||
"label": _PILOT_WORKFLOW_LABEL,
|
||||
}) or []
|
||||
if existing:
|
||||
summary["skipped"].append(f"Pilot workflow already imported ({existing[0].get('id')})")
|
||||
return
|
||||
|
||||
knownTypes = [n.get("id") for n in STATIC_NODE_TYPES if isinstance(n, dict) and n.get("id")]
|
||||
try:
|
||||
normalized, warnings = validateFileEnvelope(envelope, knownNodeTypes=knownTypes)
|
||||
except Exception as exc:
|
||||
summary["errors"].append(f"Pilot workflow envelope invalid: {exc}")
|
||||
return
|
||||
if warnings:
|
||||
summary["created"].append(f"Pilot workflow warnings: {warnings}")
|
||||
|
||||
data = envelopeToWorkflowData(
|
||||
normalized,
|
||||
mandateId=mandateId,
|
||||
featureInstanceId=featureInstanceId,
|
||||
)
|
||||
# Inject the trustee feature-instance id into the parameters so the
|
||||
# node runtime resolves it without manual editor cleanup.
|
||||
trusteeInstanceId = self._guessTrusteeInstanceId(mandateId)
|
||||
if trusteeInstanceId:
|
||||
for node in data.get("graph", {}).get("nodes", []) or []:
|
||||
params = node.get("parameters") or {}
|
||||
if "featureInstanceId" in params and not params["featureInstanceId"]:
|
||||
params["featureInstanceId"] = trusteeInstanceId
|
||||
node["parameters"] = params
|
||||
|
||||
# Force-import: AutoWorkflow.create accepts our envelope-derived data
|
||||
# (graph, label, invocations, …) verbatim; we add ids/timestamps that
|
||||
# AutoWorkflow expects.
|
||||
record = AutoWorkflow(
|
||||
id=str(uuid.uuid4()),
|
||||
mandateId=mandateId,
|
||||
featureInstanceId=featureInstanceId,
|
||||
label=data.get("label") or _PILOT_WORKFLOW_LABEL,
|
||||
description=data.get("description") or "",
|
||||
tags=data.get("tags") or [],
|
||||
graph=data.get("graph") or {"nodes": [], "connections": []},
|
||||
invocations=data.get("invocations") or [],
|
||||
templateScope=data.get("templateScope") or "instance",
|
||||
sharedReadOnly=bool(data.get("sharedReadOnly")),
|
||||
notifyOnFailure=bool(data.get("notifyOnFailure", True)),
|
||||
active=False,
|
||||
)
|
||||
created = geDb.recordCreate(AutoWorkflow, record)
|
||||
summary["created"].append(f"Pilot workflow imported (active=false, id={created.get('id')})")
|
||||
logger.info(f"Imported pilot workflow into graphicalEditor instance {featureInstanceId}")
|
||||
|
||||
def _guessTrusteeInstanceId(self, mandateId: str) -> Optional[str]:
|
||||
"""Return the first trustee feature-instance id of the given mandate.
|
||||
|
||||
The demo only ever creates one trustee feature in this mandate, so a
|
||||
first-hit lookup is sufficient and avoids depending on the label.
|
||||
"""
|
||||
try:
|
||||
from modules.connectors.connectorDbPostgre import DatabaseConnector
|
||||
from modules.datamodels.datamodelFeatures import FeatureInstance
|
||||
from modules.shared.configuration import APP_CONFIG
|
||||
appDb = DatabaseConnector(
|
||||
dbHost=APP_CONFIG.get("DB_HOST", "localhost"),
|
||||
dbDatabase="poweron_app",
|
||||
dbUser=APP_CONFIG.get("DB_USER"),
|
||||
dbPassword=APP_CONFIG.get("DB_PASSWORD_SECRET") or APP_CONFIG.get("DB_PASSWORD"),
|
||||
dbPort=int(APP_CONFIG.get("DB_PORT", 5432)),
|
||||
userId=None,
|
||||
)
|
||||
instances = appDb.getRecordset(FeatureInstance, recordFilter={
|
||||
"mandateId": mandateId,
|
||||
"featureCode": "trustee",
|
||||
}) or []
|
||||
return instances[0].get("id") if instances else None
|
||||
except Exception as exc:
|
||||
logger.warning(f"Could not resolve trustee instance for mandate {mandateId}: {exc}")
|
||||
return None
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# remove helpers
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _removeMandateData(self, db, mandateId: str, mandateLabel: str, summary: Dict):
|
||||
"""Cascade-delete everything created by load() for this mandate."""
|
||||
from modules.datamodels.datamodelBilling import BillingSettings
|
||||
from modules.datamodels.datamodelChat import ChatLog, ChatMessage, ChatWorkflow
|
||||
from modules.datamodels.datamodelFeatures import FeatureInstance
|
||||
from modules.datamodels.datamodelMembership import (
|
||||
FeatureAccess,
|
||||
FeatureAccessRole,
|
||||
UserMandate,
|
||||
UserMandateRole,
|
||||
)
|
||||
from modules.datamodels.datamodelRbac import AccessRule, Role
|
||||
|
||||
instances = db.getRecordset(FeatureInstance, recordFilter={"mandateId": mandateId}) or []
|
||||
for inst in instances:
|
||||
instId = inst.get("id")
|
||||
featureCode = inst.get("featureCode", "")
|
||||
if not instId:
|
||||
continue
|
||||
|
||||
if featureCode == "graphicalEditor":
|
||||
self._removeGraphicalEditorData(instId, mandateId, mandateLabel, summary)
|
||||
if featureCode == "trustee":
|
||||
self._removeTrusteeSeed(instId, mandateLabel, summary)
|
||||
if featureCode == "neutralization":
|
||||
self._removeNeutralizationData(db, instId, mandateLabel, summary)
|
||||
|
||||
chatWorkflows = db.getRecordset(ChatWorkflow, recordFilter={"featureInstanceId": instId}) or []
|
||||
for wf in chatWorkflows:
|
||||
wfId = wf.get("id")
|
||||
for msg in db.getRecordset(ChatMessage, recordFilter={"workflowId": wfId}) or []:
|
||||
db.recordDelete(ChatMessage, msg.get("id"))
|
||||
for log in db.getRecordset(ChatLog, recordFilter={"workflowId": wfId}) or []:
|
||||
db.recordDelete(ChatLog, log.get("id"))
|
||||
db.recordDelete(ChatWorkflow, wfId)
|
||||
|
||||
accesses = db.getRecordset(FeatureAccess, recordFilter={"featureInstanceId": instId}) or []
|
||||
for access in accesses:
|
||||
for role in db.getRecordset(FeatureAccessRole, recordFilter={"featureAccessId": access.get("id")}) or []:
|
||||
db.recordDelete(FeatureAccessRole, role.get("id"))
|
||||
db.recordDelete(FeatureAccess, access.get("id"))
|
||||
|
||||
db.recordDelete(FeatureInstance, instId)
|
||||
summary["removed"].append(f"FeatureInstance {featureCode} in {mandateLabel}")
|
||||
|
||||
memberships = db.getRecordset(UserMandate, recordFilter={"mandateId": mandateId}) or []
|
||||
for um in memberships:
|
||||
for umr in db.getRecordset(UserMandateRole, recordFilter={"userMandateId": um.get("id")}) or []:
|
||||
db.recordDelete(UserMandateRole, umr.get("id"))
|
||||
db.recordDelete(UserMandate, um.get("id"))
|
||||
|
||||
roles = db.getRecordset(Role, recordFilter={"mandateId": mandateId}) or []
|
||||
for role in roles:
|
||||
for rule in db.getRecordset(AccessRule, recordFilter={"roleId": role.get("id")}) or []:
|
||||
db.recordDelete(AccessRule, rule.get("id"))
|
||||
db.recordDelete(Role, role.get("id"))
|
||||
|
||||
try:
|
||||
from modules.interfaces.interfaceDbBilling import _getRootInterface
|
||||
billingDb = _getRootInterface().db
|
||||
billingSettings = billingDb.getRecordset(BillingSettings, recordFilter={"mandateId": mandateId}) or []
|
||||
for bs in billingSettings:
|
||||
billingDb.recordDelete(BillingSettings, bs.get("id"))
|
||||
except Exception as e:
|
||||
summary["errors"].append(f"Billing cleanup for {mandateLabel}: {e}")
|
||||
|
||||
def _removeGraphicalEditorData(self, featureInstanceId: str, mandateId: str, mandateLabel: str, summary: Dict):
|
||||
try:
|
||||
from modules.features.graphicalEditor.datamodelFeatureGraphicalEditor import (
|
||||
AutoRun,
|
||||
AutoStepLog,
|
||||
AutoTask,
|
||||
AutoVersion,
|
||||
AutoWorkflow,
|
||||
)
|
||||
geDb = _openGraphicalEditorDb()
|
||||
workflows = geDb.getRecordset(AutoWorkflow, recordFilter={
|
||||
"mandateId": mandateId,
|
||||
"featureInstanceId": featureInstanceId,
|
||||
}) or []
|
||||
for wf in workflows:
|
||||
wfId = wf.get("id")
|
||||
for version in geDb.getRecordset(AutoVersion, recordFilter={"workflowId": wfId}) or []:
|
||||
geDb.recordDelete(AutoVersion, version.get("id"))
|
||||
for run in geDb.getRecordset(AutoRun, recordFilter={"workflowId": wfId}) or []:
|
||||
runId = run.get("id")
|
||||
for step in geDb.getRecordset(AutoStepLog, recordFilter={"runId": runId}) or []:
|
||||
geDb.recordDelete(AutoStepLog, step.get("id"))
|
||||
geDb.recordDelete(AutoRun, runId)
|
||||
for task in geDb.getRecordset(AutoTask, recordFilter={"workflowId": wfId}) or []:
|
||||
geDb.recordDelete(AutoTask, task.get("id"))
|
||||
geDb.recordDelete(AutoWorkflow, wfId)
|
||||
if workflows:
|
||||
summary["removed"].append(f"{len(workflows)} AutoWorkflows in {mandateLabel}")
|
||||
except Exception as e:
|
||||
summary["errors"].append(f"GraphicalEditor cleanup for {mandateLabel}: {e}")
|
||||
|
||||
def _removeTrusteeSeed(self, featureInstanceId: str, mandateLabel: str, summary: Dict):
|
||||
try:
|
||||
from modules.features.trustee.datamodelFeatureTrustee import (
|
||||
TrusteeAccountingConfig,
|
||||
TrusteeDataAccount,
|
||||
TrusteeDataContact,
|
||||
TrusteeDataJournalEntry,
|
||||
TrusteeDataJournalLine,
|
||||
)
|
||||
trusteeDb = _openTrusteeDb()
|
||||
for model in (
|
||||
TrusteeDataJournalLine,
|
||||
TrusteeDataJournalEntry,
|
||||
TrusteeDataContact,
|
||||
TrusteeDataAccount,
|
||||
TrusteeAccountingConfig,
|
||||
):
|
||||
rows = trusteeDb.getRecordset(model, recordFilter={"featureInstanceId": featureInstanceId}) or []
|
||||
for row in rows:
|
||||
trusteeDb.recordDelete(model, row.get("id"))
|
||||
if rows:
|
||||
summary["removed"].append(f"{len(rows)} {model.__name__} in {mandateLabel}")
|
||||
except Exception as e:
|
||||
summary["errors"].append(f"Trustee cleanup for {mandateLabel}: {e}")
|
||||
|
||||
def _removeNeutralizationData(self, db, featureInstanceId: str, mandateLabel: str, summary: Dict):
|
||||
try:
|
||||
from modules.features.neutralization.datamodelFeatureNeutralizer import DataNeutraliserConfig
|
||||
configs = db.getRecordset(DataNeutraliserConfig, recordFilter={"featureInstanceId": featureInstanceId}) or []
|
||||
for cfg in configs:
|
||||
db.recordDelete(DataNeutraliserConfig, cfg.get("id"))
|
||||
if configs:
|
||||
summary["removed"].append(f"DataNeutraliserConfig in {mandateLabel}")
|
||||
except Exception as e:
|
||||
summary["errors"].append(f"Neutralization cleanup for {mandateLabel}: {e}")
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# Module-level helpers (private)
|
||||
# ----------------------------------------------------------------------
|
||||
|
||||
|
||||
def _demoDataDir() -> Path:
|
||||
"""Return absolute path to ``gateway/demoData`` regardless of CWD."""
|
||||
# __file__ = .../gateway/modules/demoConfigs/pwgDemo2026.py
|
||||
return Path(__file__).resolve().parents[2] / "demoData"
|
||||
|
||||
|
||||
def _openTrusteeDb():
|
||||
"""Open a privileged DB connection to ``poweron_trustee`` (used by both
|
||||
seed and remove paths so they work consistently)."""
|
||||
from modules.connectors.connectorDbPostgre import DatabaseConnector
|
||||
from modules.shared.configuration import APP_CONFIG
|
||||
return DatabaseConnector(
|
||||
dbHost=APP_CONFIG.get("DB_HOST", "localhost"),
|
||||
dbDatabase="poweron_trustee",
|
||||
dbUser=APP_CONFIG.get("DB_USER"),
|
||||
dbPassword=APP_CONFIG.get("DB_PASSWORD_SECRET") or APP_CONFIG.get("DB_PASSWORD"),
|
||||
dbPort=int(APP_CONFIG.get("DB_PORT", 5432)),
|
||||
userId=None,
|
||||
)
|
||||
|
||||
|
||||
def _openGraphicalEditorDb():
|
||||
"""Open a privileged DB connection to ``poweron_graphicaleditor``."""
|
||||
from modules.connectors.connectorDbPostgre import DatabaseConnector
|
||||
from modules.shared.configuration import APP_CONFIG
|
||||
return DatabaseConnector(
|
||||
dbHost=APP_CONFIG.get("DB_HOST", "localhost"),
|
||||
dbDatabase="poweron_graphicaleditor",
|
||||
dbUser=APP_CONFIG.get("DB_USER"),
|
||||
dbPassword=APP_CONFIG.get("DB_PASSWORD_SECRET") or APP_CONFIG.get("DB_PASSWORD"),
|
||||
dbPort=int(APP_CONFIG.get("DB_PORT", 5432)),
|
||||
userId=None,
|
||||
)
|
||||
284
modules/features/graphicalEditor/_workflowFileSchema.py
Normal file
284
modules/features/graphicalEditor/_workflowFileSchema.py
Normal file
|
|
@ -0,0 +1,284 @@
|
|||
# Copyright (c) 2026 Patrick Motsch
|
||||
# All rights reserved.
|
||||
"""
|
||||
Workflow File Schema (Versioned Envelope) for the GraphicalEditor.
|
||||
|
||||
A *workflow file* is a portable JSON representation of an ``AutoWorkflow`` that
|
||||
can be exchanged between mandates / instances / installations. It contains the
|
||||
graph, the entry-points (invocations), and a small set of metadata under the
|
||||
``$``-prefixed envelope keys.
|
||||
|
||||
Persistence-bound fields (``id``, ``mandateId``, ``featureInstanceId``,
|
||||
``currentVersionId``, ``eventId``, ``active``, ``sysCreated*``,
|
||||
``sysModified*``) are NEVER part of the file — they are stripped on export and
|
||||
re-derived on import.
|
||||
|
||||
Reference: ``wiki/c-work/1-plan/2026-04-pwg-pilot-mietzinsbestaetigung-workflow.md``
|
||||
"""
|
||||
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
import logging
|
||||
import time
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
WORKFLOW_FILE_SCHEMA_VERSION = "1.0"
|
||||
WORKFLOW_FILE_KIND = "poweron.workflow"
|
||||
WORKFLOW_FILE_EXTENSION = ".workflow.json"
|
||||
|
||||
_PERSISTENCE_FIELDS = (
|
||||
"id",
|
||||
"mandateId",
|
||||
"featureInstanceId",
|
||||
"currentVersionId",
|
||||
"eventId",
|
||||
"active",
|
||||
"templateSourceId",
|
||||
"sysCreatedBy",
|
||||
"sysCreatedAt",
|
||||
"sysModifiedBy",
|
||||
"sysModifiedAt",
|
||||
)
|
||||
|
||||
_ENVELOPE_KEYS = (
|
||||
"$schemaVersion",
|
||||
"$kind",
|
||||
"$exportedAt",
|
||||
"$gatewayVersion",
|
||||
)
|
||||
|
||||
_PORTABLE_WORKFLOW_FIELDS = (
|
||||
"label",
|
||||
"description",
|
||||
"tags",
|
||||
"templateScope",
|
||||
"sharedReadOnly",
|
||||
"notifyOnFailure",
|
||||
"isTemplate",
|
||||
"graph",
|
||||
"invocations",
|
||||
)
|
||||
|
||||
|
||||
class WorkflowFileSchemaError(ValueError):
|
||||
"""Raised when a workflow file does not conform to the expected schema."""
|
||||
|
||||
|
||||
def isWorkflowFileEnvelope(payload: Any) -> bool:
|
||||
"""Quick content-sniff used by the UDB to decide whether a file is a
|
||||
workflow envelope (without raising on malformed input)."""
|
||||
if not isinstance(payload, dict):
|
||||
return False
|
||||
if payload.get("$kind") == WORKFLOW_FILE_KIND:
|
||||
return True
|
||||
if "$schemaVersion" in payload and isinstance(payload.get("graph"), dict):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _normalizeNodePosition(node: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Canonicalize node coordinates to top-level ``x`` / ``y``.
|
||||
|
||||
The canvas uses top-level ``x`` / ``y``; the agent ``addNode`` tool also
|
||||
accepts ``position={x, y}``. Files may use either (or both) shape — pick
|
||||
whatever is present and persist the canonical form.
|
||||
"""
|
||||
if not isinstance(node, dict):
|
||||
return node
|
||||
out = dict(node)
|
||||
pos = out.pop("position", None)
|
||||
x = out.get("x")
|
||||
y = out.get("y")
|
||||
if x is None and isinstance(pos, dict):
|
||||
x = pos.get("x")
|
||||
if y is None and isinstance(pos, dict):
|
||||
y = pos.get("y")
|
||||
if x is None:
|
||||
x = 0
|
||||
if y is None:
|
||||
y = 0
|
||||
out["x"] = x
|
||||
out["y"] = y
|
||||
return out
|
||||
|
||||
|
||||
def normalizeGraph(graph: Any) -> Dict[str, Any]:
|
||||
"""Return a graph dict with ``nodes`` and ``connections`` lists, node
|
||||
coordinates normalized to top-level ``x`` / ``y``."""
|
||||
if not isinstance(graph, dict):
|
||||
return {"nodes": [], "connections": []}
|
||||
nodes = graph.get("nodes") or []
|
||||
connections = graph.get("connections") or []
|
||||
if not isinstance(nodes, list):
|
||||
nodes = []
|
||||
if not isinstance(connections, list):
|
||||
connections = []
|
||||
return {
|
||||
"nodes": [_normalizeNodePosition(n) for n in nodes if isinstance(n, dict)],
|
||||
"connections": [c for c in connections if isinstance(c, dict)],
|
||||
}
|
||||
|
||||
|
||||
def _stripPersistenceFields(workflowDict: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Return a copy of *workflowDict* with all persistence-bound fields removed."""
|
||||
out = {}
|
||||
for k, v in workflowDict.items():
|
||||
if k in _PERSISTENCE_FIELDS:
|
||||
continue
|
||||
out[k] = v
|
||||
return out
|
||||
|
||||
|
||||
def buildFileFromWorkflow(
|
||||
workflowDict: Dict[str, Any],
|
||||
gatewayVersion: Optional[str] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""Build a portable workflow-file envelope from an ``AutoWorkflow`` row.
|
||||
|
||||
Strips persistence-bound fields, normalizes the graph, and prepends the
|
||||
``$``-envelope keys.
|
||||
"""
|
||||
if not isinstance(workflowDict, dict):
|
||||
raise WorkflowFileSchemaError("workflowDict must be a dict")
|
||||
|
||||
body: Dict[str, Any] = {}
|
||||
body["$schemaVersion"] = WORKFLOW_FILE_SCHEMA_VERSION
|
||||
body["$kind"] = WORKFLOW_FILE_KIND
|
||||
body["$exportedAt"] = _isoTimestamp()
|
||||
if gatewayVersion:
|
||||
body["$gatewayVersion"] = str(gatewayVersion)
|
||||
|
||||
stripped = _stripPersistenceFields(workflowDict)
|
||||
for field in _PORTABLE_WORKFLOW_FIELDS:
|
||||
if field in stripped:
|
||||
value = stripped[field]
|
||||
if field == "graph":
|
||||
value = normalizeGraph(value)
|
||||
body[field] = value
|
||||
|
||||
return body
|
||||
|
||||
|
||||
def validateFileEnvelope(
|
||||
payload: Any,
|
||||
knownNodeTypes: Optional[List[str]] = None,
|
||||
) -> Tuple[Dict[str, Any], List[str]]:
|
||||
"""Validate a workflow-file envelope.
|
||||
|
||||
Returns ``(normalizedEnvelope, warnings)``. Raises
|
||||
``WorkflowFileSchemaError`` on hard errors (unknown schema version,
|
||||
missing graph, unknown node types).
|
||||
"""
|
||||
if not isinstance(payload, dict):
|
||||
raise WorkflowFileSchemaError("Workflow file must be a JSON object")
|
||||
|
||||
schemaVersion = payload.get("$schemaVersion")
|
||||
if not schemaVersion:
|
||||
raise WorkflowFileSchemaError(
|
||||
"Missing $schemaVersion — file is not a recognized workflow file"
|
||||
)
|
||||
if schemaVersion != WORKFLOW_FILE_SCHEMA_VERSION:
|
||||
raise WorkflowFileSchemaError(
|
||||
f"Unsupported $schemaVersion '{schemaVersion}' "
|
||||
f"(this gateway supports '{WORKFLOW_FILE_SCHEMA_VERSION}')"
|
||||
)
|
||||
|
||||
kind = payload.get("$kind")
|
||||
if kind and kind != WORKFLOW_FILE_KIND:
|
||||
raise WorkflowFileSchemaError(
|
||||
f"Unexpected $kind '{kind}' (expected '{WORKFLOW_FILE_KIND}')"
|
||||
)
|
||||
|
||||
label = payload.get("label")
|
||||
if not isinstance(label, str) or not label.strip():
|
||||
raise WorkflowFileSchemaError("Field 'label' is required and must be a non-empty string")
|
||||
|
||||
graph = payload.get("graph")
|
||||
if not isinstance(graph, dict):
|
||||
raise WorkflowFileSchemaError("Field 'graph' is required and must be an object")
|
||||
|
||||
normalizedGraph = normalizeGraph(graph)
|
||||
warnings: List[str] = []
|
||||
|
||||
if not normalizedGraph["nodes"]:
|
||||
warnings.append("Workflow has no nodes")
|
||||
|
||||
if knownNodeTypes is not None:
|
||||
knownSet = set(knownNodeTypes)
|
||||
unknownTypes = []
|
||||
for node in normalizedGraph["nodes"]:
|
||||
nodeType = node.get("type")
|
||||
if nodeType and nodeType not in knownSet:
|
||||
unknownTypes.append(nodeType)
|
||||
if unknownTypes:
|
||||
uniqueUnknown = sorted(set(unknownTypes))
|
||||
raise WorkflowFileSchemaError(
|
||||
"Workflow file references unknown node type(s) not registered in this gateway: "
|
||||
+ ", ".join(uniqueUnknown)
|
||||
)
|
||||
|
||||
nodeIds = {n.get("id") for n in normalizedGraph["nodes"] if n.get("id")}
|
||||
for c in normalizedGraph["connections"]:
|
||||
src = c.get("source")
|
||||
tgt = c.get("target")
|
||||
if src and src not in nodeIds:
|
||||
warnings.append(f"Connection source '{src}' is not a known node id")
|
||||
if tgt and tgt not in nodeIds:
|
||||
warnings.append(f"Connection target '{tgt}' is not a known node id")
|
||||
|
||||
out: Dict[str, Any] = {}
|
||||
for k in _ENVELOPE_KEYS:
|
||||
if k in payload:
|
||||
out[k] = payload[k]
|
||||
for field in _PORTABLE_WORKFLOW_FIELDS:
|
||||
if field in payload:
|
||||
out[field] = payload[field]
|
||||
out["graph"] = normalizedGraph
|
||||
|
||||
return out, warnings
|
||||
|
||||
|
||||
def envelopeToWorkflowData(
|
||||
envelope: Dict[str, Any],
|
||||
mandateId: str,
|
||||
featureInstanceId: str,
|
||||
) -> Dict[str, Any]:
|
||||
"""Convert a validated workflow-file envelope into a dict suitable for
|
||||
``GraphicalEditorObjects.createWorkflow`` / ``updateWorkflow``.
|
||||
|
||||
Imports are always inactive — operators must explicitly activate them.
|
||||
Persistence-bound fields are NEVER copied from the envelope.
|
||||
"""
|
||||
data: Dict[str, Any] = {
|
||||
"mandateId": mandateId,
|
||||
"featureInstanceId": featureInstanceId,
|
||||
"active": False,
|
||||
}
|
||||
for field in _PORTABLE_WORKFLOW_FIELDS:
|
||||
if field in envelope:
|
||||
data[field] = envelope[field]
|
||||
if "label" not in data or not data["label"]:
|
||||
data["label"] = "Imported Workflow"
|
||||
if "graph" in data:
|
||||
data["graph"] = normalizeGraph(data["graph"])
|
||||
return data
|
||||
|
||||
|
||||
def _isoTimestamp() -> str:
|
||||
"""UTC timestamp in ISO 8601 format (used for ``$exportedAt``)."""
|
||||
return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
|
||||
|
||||
|
||||
def buildFileName(label: str) -> str:
|
||||
"""Build a safe filename ``<slug>.workflow.json`` from a workflow label."""
|
||||
base = (label or "workflow").strip().lower()
|
||||
safe_chars = []
|
||||
for ch in base:
|
||||
if ch.isalnum() or ch in ("-", "_"):
|
||||
safe_chars.append(ch)
|
||||
elif ch in (" ", ":", "/", "\\", "."):
|
||||
safe_chars.append("-")
|
||||
slug = "".join(safe_chars).strip("-") or "workflow"
|
||||
while "--" in slug:
|
||||
slug = slug.replace("--", "-")
|
||||
return f"{slug[:80]}{WORKFLOW_FILE_EXTENSION}"
|
||||
|
|
@ -653,6 +653,62 @@ class GraphicalEditorObjects:
|
|||
})
|
||||
return dict(updated)
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Workflow File IO (versioned envelope export/import)
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
def exportWorkflowToDict(self, workflowId: str) -> Optional[Dict[str, Any]]:
|
||||
"""Export an existing workflow as a portable file envelope (dict).
|
||||
|
||||
The returned dict is the canonical workflow-file payload (versioned
|
||||
envelope) and can be JSON-serialized as-is. Returns ``None`` if the
|
||||
workflow does not exist for this mandate.
|
||||
"""
|
||||
from modules.features.graphicalEditor._workflowFileSchema import buildFileFromWorkflow
|
||||
|
||||
wf = self.getWorkflow(workflowId)
|
||||
if not wf:
|
||||
return None
|
||||
return buildFileFromWorkflow(wf)
|
||||
|
||||
def importWorkflowFromDict(
|
||||
self,
|
||||
envelope: Dict[str, Any],
|
||||
existingWorkflowId: Optional[str] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""Import a workflow-file envelope.
|
||||
|
||||
Validates the envelope, then either creates a new workflow (default)
|
||||
or replaces the graph + invocations of an existing workflow when
|
||||
``existingWorkflowId`` is given. Imports are always saved with
|
||||
``active=False`` so operators can review before scheduling.
|
||||
"""
|
||||
from modules.features.graphicalEditor._workflowFileSchema import (
|
||||
envelopeToWorkflowData,
|
||||
validateFileEnvelope,
|
||||
)
|
||||
from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES
|
||||
|
||||
knownTypes = [n.get("id") for n in STATIC_NODE_TYPES if isinstance(n, dict) and n.get("id")]
|
||||
normalizedEnvelope, warnings = validateFileEnvelope(envelope, knownNodeTypes=knownTypes)
|
||||
data = envelopeToWorkflowData(
|
||||
normalizedEnvelope,
|
||||
mandateId=self.mandateId,
|
||||
featureInstanceId=self.featureInstanceId,
|
||||
)
|
||||
|
||||
if existingWorkflowId:
|
||||
existing = self.getWorkflow(existingWorkflowId)
|
||||
if not existing:
|
||||
raise ValueError(
|
||||
f"Cannot replace workflow {existingWorkflowId}: not found in this mandate"
|
||||
)
|
||||
updated = self.updateWorkflow(existingWorkflowId, data) or {}
|
||||
return {"workflow": updated, "warnings": warnings, "created": False}
|
||||
|
||||
created = self.createWorkflow(data)
|
||||
return {"workflow": created, "warnings": warnings, "created": True}
|
||||
|
||||
|
||||
# Backward-compatible alias
|
||||
Automation2Objects = GraphicalEditorObjects
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ CLICKUP_NODES = [
|
|||
"description": t("Aufgaben in einem Workspace suchen"),
|
||||
"parameters": [
|
||||
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
|
||||
"frontendOptions": {"authority": "clickup"},
|
||||
"description": t("ClickUp-Verbindung")},
|
||||
{"name": "teamId", "type": "string", "required": True, "frontendType": "text",
|
||||
"description": t("Team-/Workspace-ID")},
|
||||
|
|
@ -44,6 +45,7 @@ CLICKUP_NODES = [
|
|||
"description": t("Aufgaben einer Liste auflisten"),
|
||||
"parameters": [
|
||||
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
|
||||
"frontendOptions": {"authority": "clickup"},
|
||||
"description": t("ClickUp-Verbindung")},
|
||||
{"name": "pathQuery", "type": "string", "required": True, "frontendType": "clickupList",
|
||||
"frontendOptions": {"dependsOn": "connectionReference"},
|
||||
|
|
@ -68,6 +70,7 @@ CLICKUP_NODES = [
|
|||
"description": t("Eine Aufgabe abrufen"),
|
||||
"parameters": [
|
||||
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
|
||||
"frontendOptions": {"authority": "clickup"},
|
||||
"description": t("ClickUp-Verbindung")},
|
||||
{"name": "taskId", "type": "string", "required": False, "frontendType": "text",
|
||||
"description": t("Task-ID")},
|
||||
|
|
@ -89,6 +92,7 @@ CLICKUP_NODES = [
|
|||
"description": t("Aufgabe erstellen"),
|
||||
"parameters": [
|
||||
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
|
||||
"frontendOptions": {"authority": "clickup"},
|
||||
"description": t("ClickUp-Verbindung")},
|
||||
{"name": "teamId", "type": "string", "required": False, "frontendType": "text",
|
||||
"description": t("Workspace")},
|
||||
|
|
@ -134,6 +138,7 @@ CLICKUP_NODES = [
|
|||
"description": t("Felder der Aufgabe ändern"),
|
||||
"parameters": [
|
||||
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
|
||||
"frontendOptions": {"authority": "clickup"},
|
||||
"description": t("ClickUp-Verbindung")},
|
||||
{"name": "taskId", "type": "string", "required": False, "frontendType": "text",
|
||||
"description": t("Task-ID")},
|
||||
|
|
@ -159,6 +164,7 @@ CLICKUP_NODES = [
|
|||
"description": t("Datei an Task anhängen"),
|
||||
"parameters": [
|
||||
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
|
||||
"frontendOptions": {"authority": "clickup"},
|
||||
"description": t("ClickUp-Verbindung")},
|
||||
{"name": "taskId", "type": "string", "required": False, "frontendType": "text",
|
||||
"description": t("Task-ID")},
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ EMAIL_NODES = [
|
|||
"description": t("Neue E-Mails prüfen"),
|
||||
"parameters": [
|
||||
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
|
||||
"frontendOptions": {"authority": "msft"},
|
||||
"description": t("E-Mail-Konto Verbindung")},
|
||||
{"name": "folder", "type": "string", "required": False, "frontendType": "text",
|
||||
"description": t("Ordner"), "default": "Inbox"},
|
||||
|
|
@ -40,6 +41,7 @@ EMAIL_NODES = [
|
|||
"description": t("E-Mails suchen"),
|
||||
"parameters": [
|
||||
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
|
||||
"frontendOptions": {"authority": "msft"},
|
||||
"description": t("E-Mail-Konto Verbindung")},
|
||||
{"name": "query", "type": "string", "required": False, "frontendType": "text",
|
||||
"description": t("Suchbegriff"), "default": ""},
|
||||
|
|
@ -75,6 +77,7 @@ EMAIL_NODES = [
|
|||
"description": t("E-Mail-Entwurf erstellen"),
|
||||
"parameters": [
|
||||
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
|
||||
"frontendOptions": {"authority": "msft"},
|
||||
"description": t("E-Mail-Konto")},
|
||||
{"name": "subject", "type": "string", "required": True, "frontendType": "text",
|
||||
"description": t("Betreff")},
|
||||
|
|
@ -82,10 +85,15 @@ EMAIL_NODES = [
|
|||
"description": t("Inhalt")},
|
||||
{"name": "to", "type": "string", "required": False, "frontendType": "text",
|
||||
"description": t("Empfänger"), "default": ""},
|
||||
{"name": "attachments", "type": "json", "required": False, "frontendType": "attachmentBuilder",
|
||||
"description": t(
|
||||
"Anhänge: Liste von { contentRef | csvFromVariable | base64Content, name, mimeType }. "
|
||||
"Per Wire befüllbar (z.B. CSV aus data.consolidate)."),
|
||||
"default": []},
|
||||
],
|
||||
"inputs": 1,
|
||||
"outputs": 1,
|
||||
"inputPorts": {0: {"accepts": ["EmailDraft", "AiResult", "Transit"]}},
|
||||
"inputPorts": {0: {"accepts": ["EmailDraft", "AiResult", "Transit", "ConsolidateResult", "DocumentList"]}},
|
||||
"outputPorts": {0: {"schema": "ActionResult"}},
|
||||
"meta": {"icon": "mdi-email-edit", "color": "#1976D2", "usesAi": False},
|
||||
"_method": "outlook",
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ SHAREPOINT_NODES = [
|
|||
"description": t("Datei nach Pfad oder Suche finden"),
|
||||
"parameters": [
|
||||
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
|
||||
"frontendOptions": {"authority": "msft"},
|
||||
"description": t("SharePoint-Verbindung")},
|
||||
{"name": "searchQuery", "type": "string", "required": True, "frontendType": "text",
|
||||
"description": t("Suchanfrage oder Pfad")},
|
||||
|
|
@ -34,6 +35,7 @@ SHAREPOINT_NODES = [
|
|||
"description": t("Inhalt aus Datei extrahieren"),
|
||||
"parameters": [
|
||||
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
|
||||
"frontendOptions": {"authority": "msft"},
|
||||
"description": t("SharePoint-Verbindung")},
|
||||
{"name": "pathQuery", "type": "string", "required": True, "frontendType": "sharepointFile",
|
||||
"frontendOptions": {"dependsOn": "connectionReference"},
|
||||
|
|
@ -54,6 +56,7 @@ SHAREPOINT_NODES = [
|
|||
"description": t("Datei zu SharePoint hochladen"),
|
||||
"parameters": [
|
||||
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
|
||||
"frontendOptions": {"authority": "msft"},
|
||||
"description": t("SharePoint-Verbindung")},
|
||||
{"name": "pathQuery", "type": "string", "required": True, "frontendType": "sharepointFolder",
|
||||
"frontendOptions": {"dependsOn": "connectionReference"},
|
||||
|
|
@ -74,6 +77,7 @@ SHAREPOINT_NODES = [
|
|||
"description": t("Dateien in Ordner auflisten"),
|
||||
"parameters": [
|
||||
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
|
||||
"frontendOptions": {"authority": "msft"},
|
||||
"description": t("SharePoint-Verbindung")},
|
||||
{"name": "pathQuery", "type": "string", "required": False, "frontendType": "sharepointFolder",
|
||||
"frontendOptions": {"dependsOn": "connectionReference"},
|
||||
|
|
@ -94,6 +98,7 @@ SHAREPOINT_NODES = [
|
|||
"description": t("Datei vom Pfad herunterladen"),
|
||||
"parameters": [
|
||||
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
|
||||
"frontendOptions": {"authority": "msft"},
|
||||
"description": t("SharePoint-Verbindung")},
|
||||
{"name": "pathQuery", "type": "string", "required": True, "frontendType": "sharepointFile",
|
||||
"frontendOptions": {"dependsOn": "connectionReference"},
|
||||
|
|
@ -114,6 +119,7 @@ SHAREPOINT_NODES = [
|
|||
"description": t("Datei an Ziel kopieren"),
|
||||
"parameters": [
|
||||
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
|
||||
"frontendOptions": {"authority": "msft"},
|
||||
"description": t("SharePoint-Verbindung")},
|
||||
{"name": "sourcePath", "type": "string", "required": True, "frontendType": "sharepointFile",
|
||||
"frontendOptions": {"dependsOn": "connectionReference"},
|
||||
|
|
|
|||
|
|
@ -34,6 +34,7 @@ TRUSTEE_NODES = [
|
|||
"description": t("Dokumenttyp und Daten aus PDF/JPG per AI extrahieren."),
|
||||
"parameters": [
|
||||
{"name": "connectionReference", "type": "string", "required": False, "frontendType": "userConnection",
|
||||
"frontendOptions": {"authority": "msft"},
|
||||
"description": t("SharePoint-Verbindung"), "default": ""},
|
||||
{"name": "sharepointFolder", "type": "string", "required": False, "frontendType": "sharepointFolder",
|
||||
"frontendOptions": {"dependsOn": "connectionReference"},
|
||||
|
|
@ -89,4 +90,42 @@ TRUSTEE_NODES = [
|
|||
"_method": "trustee",
|
||||
"_action": "syncToAccounting",
|
||||
},
|
||||
{
|
||||
"id": "trustee.queryData",
|
||||
"category": "trustee",
|
||||
"label": t("Treuhand-Daten abfragen"),
|
||||
"description": t("Daten aus der Trustee-DB lesen (Lookup, Aggregation, Roh-Export). Pendant zu refreshAccountingData ohne externen Sync."),
|
||||
"parameters": [
|
||||
{"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden",
|
||||
"description": t("Trustee Feature-Instanz-ID")},
|
||||
{"name": "mode", "type": "string", "required": True, "frontendType": "select",
|
||||
"frontendOptions": {"options": ["lookup", "raw", "aggregate"]},
|
||||
"description": t("Abfragemodus"), "default": "lookup"},
|
||||
{"name": "entity", "type": "string", "required": True, "frontendType": "select",
|
||||
"frontendOptions": {"options": ["tenantWithRent", "contact", "journalLines", "accounts", "balances"]},
|
||||
"description": t("Entität, die gelesen werden soll"), "default": "tenantWithRent"},
|
||||
{"name": "tenantNameRef", "type": "string", "required": False, "frontendType": "text",
|
||||
"frontendOptions": {"dependsOn": "entity", "showWhen": ["tenantWithRent", "contact"]},
|
||||
"description": t("Mietername (oder {{wire.feld}} aus Upstream)"), "default": ""},
|
||||
{"name": "tenantAddressRef", "type": "string", "required": False, "frontendType": "text",
|
||||
"frontendOptions": {"dependsOn": "entity", "showWhen": ["tenantWithRent", "contact"]},
|
||||
"description": t("Mieteradresse (Toleranz für Tippfehler)"), "default": ""},
|
||||
{"name": "period", "type": "string", "required": False, "frontendType": "text",
|
||||
"frontendOptions": {"dependsOn": "entity", "showWhen": ["tenantWithRent", "journalLines", "balances"]},
|
||||
"description": t("Zeitraum (YYYY oder YYYY-MM-DD/YYYY-MM-DD)"), "default": ""},
|
||||
{"name": "rentAccountPattern", "type": "string", "required": False, "frontendType": "text",
|
||||
"frontendOptions": {"dependsOn": "entity", "showWhen": ["tenantWithRent"]},
|
||||
"description": t("Konto-Filter für Mietzins (z.B. '6000-6099' oder '6*')"), "default": ""},
|
||||
{"name": "filterJson", "type": "string", "required": False, "frontendType": "textarea",
|
||||
"frontendOptions": {"dependsOn": "mode", "showWhen": ["raw", "aggregate"]},
|
||||
"description": t("Optionaler JSON-Filter für mode=raw/aggregate"), "default": ""},
|
||||
],
|
||||
"inputs": 1,
|
||||
"outputs": 1,
|
||||
"inputPorts": {0: {"accepts": ["Transit", "AiResult", "ConsolidateResult"]}},
|
||||
"outputPorts": {0: {"schema": "ActionResult"}},
|
||||
"meta": {"icon": "mdi-database-search", "color": "#4CAF50", "usesAi": False},
|
||||
"_method": "trustee",
|
||||
"_action": "queryData",
|
||||
},
|
||||
]
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ import math
|
|||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from fastapi import APIRouter, Depends, Path, Query, Body, Request, HTTPException
|
||||
from fastapi.responses import JSONResponse, StreamingResponse
|
||||
from fastapi.responses import JSONResponse, StreamingResponse, Response
|
||||
from modules.auth import limiter, getRequestContext, RequestContext
|
||||
from modules.datamodels.datamodelPagination import PaginationParams, PaginationMetadata, normalize_pagination_dict
|
||||
from modules.routes.routeHelpers import _applyFiltersAndSort
|
||||
|
|
@ -135,6 +135,58 @@ def get_node_types(
|
|||
return result
|
||||
|
||||
|
||||
@router.get("/{instanceId}/options/user.connection")
|
||||
@limiter.limit("60/minute")
|
||||
def get_user_connection_options(
|
||||
request: Request,
|
||||
instanceId: str = Path(..., description="Feature instance ID"),
|
||||
authority: Optional[str] = Query(None, description="Optional authority filter (e.g. 'msft', 'google', 'clickup', 'local')"),
|
||||
activeOnly: bool = Query(True, description="If true (default), only ACTIVE connections are returned"),
|
||||
context: RequestContext = Depends(getRequestContext),
|
||||
) -> dict:
|
||||
"""Return current user's UserConnections as { options: [{ value, label }] }.
|
||||
|
||||
Used by node parameters with frontendType='userConnection'. Optional
|
||||
`authority` lets a node declare which provider it expects (e.g. SharePoint
|
||||
nodes pass authority=msft so only Microsoft connections show up).
|
||||
"""
|
||||
_validateInstanceAccess(instanceId, context)
|
||||
if not context.user:
|
||||
raise HTTPException(status_code=401, detail=routeApiMsg("Authentication required"))
|
||||
from modules.interfaces.interfaceDbApp import getRootInterface
|
||||
rootInterface = getRootInterface()
|
||||
try:
|
||||
connections = rootInterface.getUserConnections(str(context.user.id)) or []
|
||||
except Exception as e:
|
||||
logger.error("get_user_connection_options: failed to load connections: %s", e, exc_info=True)
|
||||
return {"options": []}
|
||||
wanted = (authority or "").strip().lower() or None
|
||||
options: List[Dict[str, str]] = []
|
||||
for conn in connections:
|
||||
connStatus = getattr(conn, "status", None)
|
||||
statusVal = connStatus.value if hasattr(connStatus, "value") else str(connStatus or "")
|
||||
if activeOnly and statusVal.lower() != "active":
|
||||
continue
|
||||
connAuthority = getattr(conn, "authority", None)
|
||||
authorityVal = (connAuthority.value if hasattr(connAuthority, "value") else str(connAuthority or "")).lower()
|
||||
if wanted and authorityVal != wanted:
|
||||
continue
|
||||
username = getattr(conn, "externalUsername", "") or ""
|
||||
email = getattr(conn, "externalEmail", "") or ""
|
||||
connId = str(getattr(conn, "id", "") or "")
|
||||
labelParts = [p for p in [username, email] if p]
|
||||
label = " — ".join(labelParts) if labelParts else connId
|
||||
if authorityVal:
|
||||
label = f"[{authorityVal}] {label}"
|
||||
value = f"connection:{authorityVal}:{username}" if authorityVal and username else connId
|
||||
options.append({"value": value, "label": label})
|
||||
logger.info(
|
||||
"graphicalEditor user.connection options: instanceId=%s authority=%s -> %d options",
|
||||
instanceId, wanted, len(options),
|
||||
)
|
||||
return {"options": options}
|
||||
|
||||
|
||||
@router.post("/{instanceId}/execute")
|
||||
@limiter.limit("30/minute")
|
||||
async def post_execute(
|
||||
|
|
@ -753,15 +805,32 @@ async def _runEditorAgent(
|
|||
|
||||
systemPrompt = (
|
||||
"You are a workflow EDITOR assistant for the GraphicalEditor. "
|
||||
"Your ONLY job is to BUILD or MODIFY the workflow graph (nodes + connections) "
|
||||
"for the user — you must NEVER execute the workflow or any of its actions. "
|
||||
"Even when the user says 'create a workflow that sends an email', you build the "
|
||||
"graph (e.g. add an email node, connect it) — you do NOT actually send an email. "
|
||||
"\n\nGraph-mutating tools: readWorkflowGraph, listAvailableNodeTypes, "
|
||||
"Your job is to MANAGE workflows for the user — create, rename, "
|
||||
"import/export, edit the graph (nodes + connections) — but you must "
|
||||
"NEVER execute a workflow or any of its actions. Even when the user "
|
||||
"says 'create a workflow that sends an email', you build the graph "
|
||||
"(add an email node, connect it) — you do NOT actually send an email."
|
||||
"\n\nAvailable tools (all valid — use whichever the user's intent calls for):"
|
||||
"\n Graph-mutating: readWorkflowGraph, listAvailableNodeTypes, "
|
||||
"describeNodeType, addNode, removeNode, connectNodes, setNodeParameter, "
|
||||
"autoLayoutWorkflow, validateGraph. "
|
||||
"Connection discovery (for parameters of frontendType='userConnection'): listConnections."
|
||||
"\n\nMandatory build sequence:"
|
||||
"autoLayoutWorkflow, validateGraph."
|
||||
"\n Workflow lifecycle: createWorkflow (new empty workflow), "
|
||||
"updateWorkflowMetadata (rename / change description / tags / activate), "
|
||||
"createWorkflowFromFile (import .workflow.json from UDB), "
|
||||
"exportWorkflowToFile (download envelope), deleteWorkflow (destructive — "
|
||||
"ALWAYS confirm with the user before calling)."
|
||||
"\n History: listWorkflowHistory, readWorkflowMessages."
|
||||
"\n Connections (for parameters of frontendType='userConnection'): listConnections."
|
||||
"\n\nIntent → tool mapping (do NOT improvise destructive paths):"
|
||||
"\n • 'rename / umbenennen / call it X / nenne … um' → updateWorkflowMetadata({label: \"X\"})."
|
||||
"\n • 'create empty workflow / new workflow / leeren Workflow' → createWorkflow({label: \"…\"})."
|
||||
"\n • 'import / load from file' → createWorkflowFromFile({fileId: …})."
|
||||
"\n • 'export / save to file / download' → exportWorkflowToFile()."
|
||||
"\n • 'activate / deactivate' → updateWorkflowMetadata({active: true|false})."
|
||||
"\n NEVER batch-call removeNode to 'rebuild' or 'rename' a workflow — that "
|
||||
"destroys the user's work. removeNode is for removing ONE specific node the "
|
||||
"user explicitly asked to delete."
|
||||
"\n\nMandatory build sequence WHEN editing the graph:"
|
||||
"\n1. readWorkflowGraph — understand current state."
|
||||
"\n2. listAvailableNodeTypes — find candidate node ids."
|
||||
"\n3. For EACH node type you plan to add: call describeNodeType(nodeType=...) "
|
||||
|
|
@ -781,7 +850,7 @@ async def _runEditorAgent(
|
|||
"\n\nIf a required parameter cannot be filled from the user's request and has "
|
||||
"no safe default, ask the user once for that specific value (e.g. recipient "
|
||||
"address, target language, prompt text) instead of leaving the field blank. "
|
||||
"Respond concisely in the user's language and list what you changed in the graph."
|
||||
"Respond concisely in the user's language and list what you changed."
|
||||
)
|
||||
|
||||
editorConfig = AgentConfig(
|
||||
|
|
@ -1191,6 +1260,128 @@ def delete_workflow(
|
|||
return {"success": True}
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Workflow File IO (versioned envelope export/import)
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
|
||||
@router.post("/{instanceId}/workflows/import")
|
||||
@limiter.limit("30/minute")
|
||||
def import_workflow(
|
||||
request: Request,
|
||||
instanceId: str = Path(..., description="Feature instance ID"),
|
||||
body: dict = Body(
|
||||
...,
|
||||
description=(
|
||||
"{ envelope: <workflow-file-envelope>, existingWorkflowId?: str, "
|
||||
"fileId?: str } — supply EITHER the envelope inline OR a fileId of "
|
||||
"a previously uploaded workflow file (.workflow.json)"
|
||||
),
|
||||
),
|
||||
context: RequestContext = Depends(getRequestContext),
|
||||
) -> dict:
|
||||
"""Import a workflow from a versioned-envelope file.
|
||||
|
||||
Two input modes:
|
||||
- ``envelope``: the parsed workflow-file payload (preferred for the agent)
|
||||
- ``fileId``: the id of a previously uploaded ``.workflow.json`` in
|
||||
Unified-Data-Bar (preferred for the UI "Import" modal)
|
||||
|
||||
On success returns the created/updated workflow plus any non-fatal
|
||||
warnings (e.g. dangling connection references). Imports are always
|
||||
saved with ``active=False``.
|
||||
"""
|
||||
from modules.features.graphicalEditor._workflowFileSchema import WorkflowFileSchemaError
|
||||
|
||||
mandateId = _validateInstanceAccess(instanceId, context)
|
||||
iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
|
||||
|
||||
envelope = body.get("envelope") if isinstance(body, dict) else None
|
||||
fileId = body.get("fileId") if isinstance(body, dict) else None
|
||||
existingWorkflowId = body.get("existingWorkflowId") if isinstance(body, dict) else None
|
||||
|
||||
if not envelope and fileId:
|
||||
envelope = _loadEnvelopeFromFile(str(fileId), context)
|
||||
|
||||
if not envelope:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=routeApiMsg("Body must contain 'envelope' or 'fileId'"),
|
||||
)
|
||||
|
||||
try:
|
||||
result = iface.importWorkflowFromDict(envelope, existingWorkflowId=existingWorkflowId)
|
||||
except WorkflowFileSchemaError as exc:
|
||||
raise HTTPException(status_code=400, detail=str(exc))
|
||||
except ValueError as exc:
|
||||
raise HTTPException(status_code=404, detail=str(exc))
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@router.get("/{instanceId}/workflows/{workflowId}/export")
|
||||
@limiter.limit("60/minute")
|
||||
def export_workflow(
|
||||
request: Request,
|
||||
instanceId: str = Path(..., description="Feature instance ID"),
|
||||
workflowId: str = Path(..., description="Workflow ID"),
|
||||
download: bool = Query(False, description="If true, return as file download"),
|
||||
context: RequestContext = Depends(getRequestContext),
|
||||
):
|
||||
"""Export a workflow as a versioned-envelope JSON file.
|
||||
|
||||
With ``download=true`` returns a streaming response with the canonical
|
||||
``<slug>.workflow.json`` filename so the browser triggers a save dialog.
|
||||
Without it returns the envelope inline as JSON (used by the agent and by
|
||||
the editor's "Save to file" → upload-to-UDB flow).
|
||||
"""
|
||||
from modules.features.graphicalEditor._workflowFileSchema import buildFileName
|
||||
|
||||
mandateId = _validateInstanceAccess(instanceId, context)
|
||||
iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
|
||||
envelope = iface.exportWorkflowToDict(workflowId)
|
||||
if envelope is None:
|
||||
raise HTTPException(status_code=404, detail=routeApiMsg("Workflow not found"))
|
||||
|
||||
if not download:
|
||||
return {"envelope": envelope, "fileName": buildFileName(envelope.get("label", "workflow"))}
|
||||
|
||||
fileName = buildFileName(envelope.get("label", "workflow"))
|
||||
payload = json.dumps(envelope, ensure_ascii=False, indent=2).encode("utf-8")
|
||||
return Response(
|
||||
content=payload,
|
||||
media_type="application/json",
|
||||
headers={"Content-Disposition": f'attachment; filename="{fileName}"'},
|
||||
)
|
||||
|
||||
|
||||
def _loadEnvelopeFromFile(fileId: str, context: RequestContext) -> Optional[Dict[str, Any]]:
|
||||
"""Load and parse a ``.workflow.json`` file from the Unified-Data-Bar
|
||||
by file id. Returns the parsed envelope dict or raises HTTPException."""
|
||||
try:
|
||||
import modules.interfaces.interfaceDbManagement as interfaceDbManagement
|
||||
mgmt = interfaceDbManagement.getInterface(context.user)
|
||||
rawBytes = mgmt.getFileData(fileId)
|
||||
except Exception as exc:
|
||||
logger.warning("Failed to load workflow file %s: %s", fileId, exc)
|
||||
raise HTTPException(status_code=404, detail=routeApiMsg(f"File {fileId} not found"))
|
||||
|
||||
if not rawBytes:
|
||||
raise HTTPException(status_code=404, detail=routeApiMsg(f"File {fileId} is empty"))
|
||||
|
||||
try:
|
||||
if isinstance(rawBytes, bytes):
|
||||
text = rawBytes.decode("utf-8")
|
||||
else:
|
||||
text = str(rawBytes)
|
||||
return json.loads(text)
|
||||
except Exception as exc:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=routeApiMsg(f"File {fileId} is not valid JSON: {exc}"),
|
||||
)
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Runs and Resume
|
||||
# -------------------------------------------------------------------------
|
||||
|
|
|
|||
|
|
@ -56,6 +56,7 @@ class ConnectorConfigField(BaseModel):
|
|||
secret: bool = False
|
||||
required: bool = True
|
||||
placeholder: Optional[str] = None
|
||||
suggestions: Optional[List[str]] = None
|
||||
|
||||
|
||||
class BaseAccountingConnector(ABC):
|
||||
|
|
|
|||
|
|
@ -215,17 +215,34 @@ class AccountingDataSync:
|
|||
logger.error(f"Compute balances failed: {e}")
|
||||
summary["errors"].append(f"Balances: {e}")
|
||||
|
||||
# Update config with last import timestamp
|
||||
try:
|
||||
cfgId = cfgRecord.get("id")
|
||||
if cfgId:
|
||||
self._if.db.recordModify(TrusteeAccountingConfig, cfgId, {
|
||||
"lastSyncAt": time.time(),
|
||||
"lastSyncStatus": "success" if not summary["errors"] else "partial",
|
||||
"lastSyncErrorMessage": "; ".join(summary["errors"])[:500] if summary["errors"] else None,
|
||||
})
|
||||
except Exception:
|
||||
pass
|
||||
cfgId = cfgRecord.get("id")
|
||||
if cfgId:
|
||||
corePayload = {
|
||||
"lastSyncAt": time.time(),
|
||||
"lastSyncStatus": "success" if not summary["errors"] else "partial",
|
||||
"lastSyncErrorMessage": "; ".join(summary["errors"])[:500] if summary["errors"] else None,
|
||||
}
|
||||
try:
|
||||
self._if.db.recordModify(TrusteeAccountingConfig, cfgId, corePayload)
|
||||
except Exception as coreErr:
|
||||
logger.exception(f"AccountingDataSync: failed to write core lastSync* fields for cfg {cfgId}: {coreErr}")
|
||||
summary["errors"].append(f"Persist lastSync core: {coreErr}")
|
||||
extPayload = {
|
||||
"lastSyncDateFrom": dateFrom,
|
||||
"lastSyncDateTo": dateTo,
|
||||
"lastSyncCounts": {
|
||||
"accounts": int(summary.get("accounts", 0)),
|
||||
"journalEntries": int(summary.get("journalEntries", 0)),
|
||||
"journalLines": int(summary.get("journalLines", 0)),
|
||||
"contacts": int(summary.get("contacts", 0)),
|
||||
"accountBalances": int(summary.get("accountBalances", 0)),
|
||||
},
|
||||
}
|
||||
try:
|
||||
self._if.db.recordModify(TrusteeAccountingConfig, cfgId, extPayload)
|
||||
except Exception as extErr:
|
||||
logger.exception(f"AccountingDataSync: failed to write extended lastSync* fields for cfg {cfgId}: {extErr}")
|
||||
summary["errors"].append(f"Persist lastSync ext: {extErr}")
|
||||
|
||||
summary["finishedAt"] = time.time()
|
||||
summary["durationSeconds"] = round(summary["finishedAt"] - summary["startedAt"], 1)
|
||||
|
|
|
|||
|
|
@ -47,6 +47,10 @@ class AccountingConnectorRma(BaseAccountingConnector):
|
|||
fieldType="text",
|
||||
secret=False,
|
||||
placeholder="https://service.runmyaccounts.com/api/latest/clients/",
|
||||
suggestions=[
|
||||
"https://service.runmyaccounts.com/api/latest/clients/",
|
||||
"https://service.int.runmyaccounts.com/api/latest/clients/",
|
||||
],
|
||||
),
|
||||
ConnectorConfigField(
|
||||
key="clientName",
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
"""Trustee models: TrusteeOrganisation, TrusteeRole, TrusteeAccess, TrusteeContract, TrusteeDocument, TrusteePosition."""
|
||||
|
||||
from enum import Enum
|
||||
from typing import Optional
|
||||
from typing import Optional, Dict
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from modules.datamodels.datamodelBase import PowerOnModel
|
||||
|
|
@ -818,6 +818,9 @@ class TrusteeAccountingConfig(PowerOnModel):
|
|||
lastSyncAt: Optional[float] = Field(default=None, description="Timestamp of last sync attempt", json_schema_extra={"label": "Letzte Synchronisation"})
|
||||
lastSyncStatus: Optional[str] = Field(default=None, description="Last sync result: success, error, partial", json_schema_extra={"label": "Status"})
|
||||
lastSyncErrorMessage: Optional[str] = Field(default=None, description="Error message when lastSyncStatus is error", json_schema_extra={"label": "Fehlermeldung"})
|
||||
lastSyncDateFrom: Optional[str] = Field(default=None, description="dateFrom (ISO date) of the last data import window", json_schema_extra={"label": "Letztes Import-Fenster von"})
|
||||
lastSyncDateTo: Optional[str] = Field(default=None, description="dateTo (ISO date) of the last data import window", json_schema_extra={"label": "Letztes Import-Fenster bis"})
|
||||
lastSyncCounts: Optional[Dict[str, int]] = Field(default=None, description="Per-entity counts of the last import (accounts, journalEntries, journalLines, contacts, accountBalances)", json_schema_extra={"label": "Letzte Import-Zaehler"})
|
||||
cachedChartOfAccounts: Optional[str] = Field(default=None, description="JSON-serialised chart of accounts cache (list of {accountNumber, label, accountType})", json_schema_extra={"label": "Cached Kontoplan"})
|
||||
chartCachedAt: Optional[float] = Field(default=None, description="Timestamp when cachedChartOfAccounts was last refreshed", json_schema_extra={"label": "Kontoplan-Cache-Zeitpunkt"})
|
||||
mandateId: Optional[str] = Field(default=None, json_schema_extra={"label": "Mandat", "fk_target": {"db": "poweron_app", "table": "Mandate"}})
|
||||
|
|
|
|||
|
|
@ -1643,7 +1643,46 @@ def get_position_sync_status(
|
|||
|
||||
# ===== Accounting Data Import =====
|
||||
|
||||
@router.post("/{instanceId}/accounting/import-data")
|
||||
TRUSTEE_ACCOUNTING_SYNC_JOB_TYPE = "trusteeAccountingSync"
|
||||
|
||||
|
||||
async def _trusteeAccountingSyncJobHandler(job: Dict[str, Any], progressCb) -> Dict[str, Any]:
|
||||
"""BackgroundJob handler: imports accounting data from the external system.
|
||||
|
||||
Reads inputs from `job["payload"]` (dateFrom, dateTo, userId) and runs
|
||||
`AccountingDataSync.importData(...)` in the worker's event loop without
|
||||
blocking the original HTTP request that submitted the job.
|
||||
"""
|
||||
from modules.security.rootAccess import getRootUser
|
||||
from .accounting.accountingDataSync import AccountingDataSync
|
||||
|
||||
instanceId = job["featureInstanceId"]
|
||||
mandateId = job["mandateId"]
|
||||
payload = job.get("payload") or {}
|
||||
rootUser = getRootUser()
|
||||
|
||||
progressCb(5, "Initialisiere Import...")
|
||||
interface = getInterface(rootUser, mandateId=mandateId, featureInstanceId=instanceId)
|
||||
sync = AccountingDataSync(interface)
|
||||
progressCb(10, "Lese Daten vom Buchhaltungssystem...")
|
||||
result = await sync.importData(
|
||||
featureInstanceId=instanceId,
|
||||
mandateId=mandateId,
|
||||
dateFrom=payload.get("dateFrom"),
|
||||
dateTo=payload.get("dateTo"),
|
||||
)
|
||||
progressCb(100, "Import abgeschlossen")
|
||||
return result
|
||||
|
||||
|
||||
try:
|
||||
from modules.serviceCenter.services.serviceBackgroundJobs import registerJobHandler
|
||||
registerJobHandler(TRUSTEE_ACCOUNTING_SYNC_JOB_TYPE, _trusteeAccountingSyncJobHandler)
|
||||
except Exception as _regErr:
|
||||
logger.warning("Failed to register trusteeAccountingSync job handler: %s", _regErr)
|
||||
|
||||
|
||||
@router.post("/{instanceId}/accounting/import-data", status_code=status.HTTP_202_ACCEPTED)
|
||||
@limiter.limit("3/minute")
|
||||
async def import_accounting_data(
|
||||
request: Request,
|
||||
|
|
@ -1651,20 +1690,26 @@ async def import_accounting_data(
|
|||
data: Dict[str, Any] = Body(default={}),
|
||||
context: RequestContext = Depends(getRequestContext)
|
||||
) -> Dict[str, Any]:
|
||||
"""Import accounting data (chart, journal entries, contacts) from the external system into TrusteeData* tables."""
|
||||
"""Submit a background job to import accounting data.
|
||||
|
||||
Returns immediately with `{ jobId }`; clients poll `GET /api/jobs/{jobId}`
|
||||
until status is SUCCESS / ERROR.
|
||||
"""
|
||||
from modules.serviceCenter.services.serviceBackgroundJobs import startJob
|
||||
|
||||
mandateId = _validateInstanceAccess(instanceId, context)
|
||||
interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId)
|
||||
from .accounting.accountingDataSync import AccountingDataSync
|
||||
sync = AccountingDataSync(interface)
|
||||
dateFrom = data.get("dateFrom")
|
||||
dateTo = data.get("dateTo")
|
||||
result = await sync.importData(
|
||||
featureInstanceId=instanceId,
|
||||
payload = {
|
||||
"dateFrom": data.get("dateFrom"),
|
||||
"dateTo": data.get("dateTo"),
|
||||
}
|
||||
jobId = await startJob(
|
||||
TRUSTEE_ACCOUNTING_SYNC_JOB_TYPE,
|
||||
payload,
|
||||
mandateId=mandateId,
|
||||
dateFrom=dateFrom,
|
||||
dateTo=dateTo,
|
||||
featureInstanceId=instanceId,
|
||||
triggeredBy=context.user.id if context.user else None,
|
||||
)
|
||||
return result
|
||||
return {"jobId": jobId, "status": "pending"}
|
||||
|
||||
|
||||
@router.get("/{instanceId}/accounting/import-status")
|
||||
|
|
@ -1695,6 +1740,9 @@ def get_import_status(
|
|||
counts["lastSyncAt"] = cfg.get("lastSyncAt")
|
||||
counts["lastSyncStatus"] = cfg.get("lastSyncStatus")
|
||||
counts["lastSyncErrorMessage"] = cfg.get("lastSyncErrorMessage")
|
||||
counts["lastSyncDateFrom"] = cfg.get("lastSyncDateFrom")
|
||||
counts["lastSyncDateTo"] = cfg.get("lastSyncDateTo")
|
||||
counts["lastSyncCounts"] = cfg.get("lastSyncCounts")
|
||||
return counts
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ from modules.connectors.connectorDbPostgre import DatabaseConnector, _get_cached
|
|||
from modules.shared.configuration import APP_CONFIG
|
||||
from modules.shared.dbRegistry import registerDatabase
|
||||
from modules.shared.timeUtils import getUtcTimestamp, parseTimestamp
|
||||
from modules.shared.i18nRegistry import resolveText
|
||||
from modules.interfaces.interfaceRbac import getRecordsetWithRBAC
|
||||
from modules.security.rbac import RbacClass
|
||||
from modules.datamodels.datamodelUam import (
|
||||
|
|
@ -1639,7 +1640,7 @@ class AppObjects:
|
|||
if not featureDef.get("autoCreateInstance", False):
|
||||
continue
|
||||
featureCode = featureDef.get("code", featureName)
|
||||
featureLabel = featureDef.get("label", {}).get("en", featureName)
|
||||
featureLabel = resolveText(featureDef.get("label", featureName))
|
||||
instance = featureInterface.createFeatureInstance(
|
||||
featureCode=featureCode,
|
||||
mandateId=mandateId,
|
||||
|
|
|
|||
|
|
@ -935,11 +935,34 @@ async def stripeWebhook(
|
|||
return {"received": True}
|
||||
|
||||
session_dict = session.to_dict_recursive() if hasattr(session, "to_dict_recursive") else dict(session)
|
||||
result = _creditStripeSessionIfNeeded(billingInterface, session_dict, eventId=event_id)
|
||||
logger.info(
|
||||
f"Stripe webhook processed session {result.sessionId}: "
|
||||
f"credited={result.credited}, alreadyCredited={result.alreadyCredited}"
|
||||
)
|
||||
try:
|
||||
result = _creditStripeSessionIfNeeded(billingInterface, session_dict, eventId=event_id)
|
||||
logger.info(
|
||||
f"Stripe webhook processed session {result.sessionId}: "
|
||||
f"credited={result.credited}, alreadyCredited={result.alreadyCredited}"
|
||||
)
|
||||
except HTTPException as he:
|
||||
logger.error(
|
||||
"Stripe webhook %s for session %s failed: status=%s detail=%s metadata=%s amount_total=%s",
|
||||
event_id,
|
||||
session_dict.get("id"),
|
||||
he.status_code,
|
||||
he.detail,
|
||||
session_dict.get("metadata"),
|
||||
session_dict.get("amount_total"),
|
||||
)
|
||||
if 400 <= he.status_code < 500 and event_id:
|
||||
if not billingInterface.getStripeWebhookEventByEventId(event_id):
|
||||
try:
|
||||
billingInterface.createStripeWebhookEvent(event_id)
|
||||
logger.warning(
|
||||
"Marked Stripe event %s as processed (permanent 4xx) to stop retries",
|
||||
event_id,
|
||||
)
|
||||
except Exception as markEx:
|
||||
logger.error("Failed to mark Stripe event %s as processed: %s", event_id, markEx)
|
||||
return {"received": True}
|
||||
raise
|
||||
return {"received": True}
|
||||
|
||||
|
||||
|
|
@ -1036,8 +1059,22 @@ def _handleSubscriptionCheckoutCompleted(session, eventId: str) -> None:
|
|||
|
||||
operative = subInterface.getOperativeForMandate(mandateId)
|
||||
hasActivePredecessor = operative is not None and operative["id"] != subscriptionRecordId
|
||||
predecessorIsTrial = (
|
||||
hasActivePredecessor
|
||||
and operative.get("status") == SubscriptionStatusEnum.TRIALING.value
|
||||
)
|
||||
|
||||
if hasActivePredecessor:
|
||||
if hasActivePredecessor and predecessorIsTrial:
|
||||
try:
|
||||
subInterface.forceExpire(operative["id"])
|
||||
logger.info(
|
||||
"Trial subscription %s expired immediately for mandate %s due to paid upgrade %s",
|
||||
operative["id"], mandateId, subscriptionRecordId,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error("Failed to expire trial predecessor %s: %s", operative["id"], e)
|
||||
toStatus = SubscriptionStatusEnum.ACTIVE
|
||||
elif hasActivePredecessor:
|
||||
toStatus = SubscriptionStatusEnum.SCHEDULED
|
||||
if operative.get("recurring", True):
|
||||
operativeStripeId = operative.get("stripeSubscriptionId")
|
||||
|
|
|
|||
|
|
@ -23,6 +23,55 @@ routeApiMsg = apiRouteContext("routeDataFiles")
|
|||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _resolveFileWithScope(currentUser: User, context: RequestContext, fileId: str):
|
||||
"""Returns (managementInterface, fileItem) with RBAC scoped to the file's own mandate/instance.
|
||||
|
||||
Files generated by workflows (e.g. AI report outputs) carry their own
|
||||
mandateId/featureInstanceId. Direct download links via <a href> cannot send
|
||||
custom scope headers, so we resolve the scope from the FileItem itself and
|
||||
re-check RBAC in that scope.
|
||||
|
||||
Returns (None, None) if the file does not exist or the user lacks access
|
||||
in the file's actual scope.
|
||||
"""
|
||||
requestMandateId = str(context.mandateId) if context.mandateId else None
|
||||
requestInstanceId = str(context.featureInstanceId) if context.featureInstanceId else None
|
||||
|
||||
mgmt = interfaceDbManagement.getInterface(
|
||||
currentUser,
|
||||
mandateId=requestMandateId,
|
||||
featureInstanceId=requestInstanceId,
|
||||
)
|
||||
fileItem = mgmt.getFile(fileId)
|
||||
if fileItem:
|
||||
return mgmt, fileItem
|
||||
|
||||
metas = mgmt.db.getRecordset(FileItem, recordFilter={"id": fileId})
|
||||
if not metas:
|
||||
return None, None
|
||||
|
||||
meta = metas[0]
|
||||
fileMandateId = meta.get("mandateId") or None
|
||||
fileInstanceId = meta.get("featureInstanceId") or None
|
||||
|
||||
if not fileMandateId and not fileInstanceId:
|
||||
return None, None
|
||||
|
||||
if fileMandateId == requestMandateId and fileInstanceId == requestInstanceId:
|
||||
return None, None
|
||||
|
||||
scopedMgmt = interfaceDbManagement.getInterface(
|
||||
currentUser,
|
||||
mandateId=fileMandateId,
|
||||
featureInstanceId=fileInstanceId,
|
||||
)
|
||||
fileItem = scopedMgmt.getFile(fileId)
|
||||
if not fileItem:
|
||||
return None, None
|
||||
|
||||
return scopedMgmt, fileItem
|
||||
|
||||
|
||||
async def _autoIndexFile(fileId: str, fileName: str, mimeType: str, user):
|
||||
"""Background task: pre-scan + extraction + knowledge indexing.
|
||||
Step 1: Structure Pre-Scan (AI-free) -> FileContentIndex (persisted)
|
||||
|
|
@ -975,20 +1024,18 @@ def updateFileNeutralize(
|
|||
def get_file(
|
||||
request: Request,
|
||||
fileId: str = Path(..., description="ID of the file"),
|
||||
currentUser: User = Depends(getCurrentUser)
|
||||
currentUser: User = Depends(getCurrentUser),
|
||||
context: RequestContext = Depends(getRequestContext)
|
||||
) -> FileItem:
|
||||
"""Get a file"""
|
||||
"""Get a file. Resolves the file's mandate/instance scope automatically."""
|
||||
try:
|
||||
managementInterface = interfaceDbManagement.getInterface(currentUser)
|
||||
|
||||
# Get file via LucyDOM interface from the database
|
||||
fileData = managementInterface.getFile(fileId)
|
||||
_mgmt, fileData = _resolveFileWithScope(currentUser, context, fileId)
|
||||
if not fileData:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=f"File with ID {fileId} not found"
|
||||
)
|
||||
|
||||
|
||||
return fileData
|
||||
|
||||
except interfaceDbManagement.FileNotFoundError as e:
|
||||
|
|
@ -1107,23 +1154,17 @@ def download_file(
|
|||
currentUser: User = Depends(getCurrentUser),
|
||||
context: RequestContext = Depends(getRequestContext)
|
||||
) -> Response:
|
||||
"""Download a file. Uses mandate/instance context when present (e.g. from feature pages)."""
|
||||
"""Download a file. Resolves the file's mandate/instance scope automatically,
|
||||
so direct <a href> links work even when X-Mandate-Id / X-Instance-Id headers
|
||||
are not sent by the browser."""
|
||||
try:
|
||||
managementInterface = interfaceDbManagement.getInterface(
|
||||
currentUser,
|
||||
mandateId=str(context.mandateId) if context.mandateId else None,
|
||||
featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None
|
||||
)
|
||||
|
||||
# Get file data
|
||||
fileData = managementInterface.getFile(fileId)
|
||||
managementInterface, fileData = _resolveFileWithScope(currentUser, context, fileId)
|
||||
if not fileData:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=f"File with ID {fileId} not found"
|
||||
)
|
||||
|
||||
# Get file content
|
||||
|
||||
fileContent = managementInterface.getFileData(fileId)
|
||||
if not fileContent:
|
||||
raise HTTPException(
|
||||
|
|
@ -1160,15 +1201,15 @@ def preview_file(
|
|||
currentUser: User = Depends(getCurrentUser),
|
||||
context: RequestContext = Depends(getRequestContext)
|
||||
) -> FilePreview:
|
||||
"""Preview a file's content. Uses mandate/instance context when present."""
|
||||
"""Preview a file's content. Resolves the file's mandate/instance scope automatically."""
|
||||
try:
|
||||
managementInterface = interfaceDbManagement.getInterface(
|
||||
currentUser,
|
||||
mandateId=str(context.mandateId) if context.mandateId else None,
|
||||
featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None
|
||||
)
|
||||
|
||||
# Get file preview using the correct method
|
||||
managementInterface, fileMeta = _resolveFileWithScope(currentUser, context, fileId)
|
||||
if not fileMeta:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=f"File with ID {fileId} not found"
|
||||
)
|
||||
|
||||
preview = managementInterface.getFileContent(fileId)
|
||||
if not preview:
|
||||
raise HTTPException(
|
||||
|
|
|
|||
107
modules/routes/routeJobs.py
Normal file
107
modules/routes/routeJobs.py
Normal file
|
|
@ -0,0 +1,107 @@
|
|||
# Copyright (c) 2025 Patrick Motsch
|
||||
# All rights reserved.
|
||||
"""HTTP API for the generic background job service.
|
||||
|
||||
Endpoints:
|
||||
- GET /api/jobs/{jobId} -> single job status
|
||||
- GET /api/jobs -> list (filter by jobType, instanceId)
|
||||
|
||||
Access control: a caller may read a job iff they are a member of its mandate
|
||||
(or PlatformAdmin). Jobs without a mandateId (system-wide) are restricted to
|
||||
PlatformAdmin only.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path, Query, Request
|
||||
|
||||
from modules.auth import getRequestContext, RequestContext, limiter
|
||||
from modules.serviceCenter.services.serviceBackgroundJobs import (
|
||||
getJobStatus,
|
||||
listJobs,
|
||||
)
|
||||
from modules.shared.i18nRegistry import apiRouteContext
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
routeApiMsg = apiRouteContext("routeJobs")
|
||||
|
||||
router = APIRouter(
|
||||
prefix="/api/jobs",
|
||||
tags=["BackgroundJobs"],
|
||||
responses={404: {"description": "Not found"}},
|
||||
)
|
||||
|
||||
|
||||
def _serialiseJob(job: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Strip system audit fields and ensure JSON-safe types."""
|
||||
return {k: v for k, v in job.items() if not k.startswith("sys")}
|
||||
|
||||
|
||||
def _userHasMandateAccess(context: RequestContext, mandateId: Optional[str]) -> bool:
|
||||
"""Return True if the current user can read jobs for the given mandate scope."""
|
||||
if context is None or context.user is None:
|
||||
return False
|
||||
if context.isPlatformAdmin:
|
||||
return True
|
||||
if mandateId is None:
|
||||
return False
|
||||
from modules.interfaces.interfaceDbApp import getRootInterface
|
||||
from modules.datamodels.datamodelMembership import UserMandate
|
||||
rootIf = getRootInterface()
|
||||
try:
|
||||
memberships = rootIf.db.getRecordset(
|
||||
UserMandate,
|
||||
recordFilter={"userId": context.user.id, "mandateId": mandateId},
|
||||
)
|
||||
return bool(memberships)
|
||||
except Exception as ex:
|
||||
logger.warning(
|
||||
"Mandate access check failed for user=%s mandate=%s: %s",
|
||||
context.user.id, mandateId, ex,
|
||||
)
|
||||
return False
|
||||
|
||||
|
||||
@router.get("/{jobId}")
|
||||
@limiter.limit("60/minute")
|
||||
def get_job(
|
||||
request: Request,
|
||||
jobId: str = Path(..., description="Background job ID"),
|
||||
context: RequestContext = Depends(getRequestContext),
|
||||
) -> Dict[str, Any]:
|
||||
"""Return the current state of one background job."""
|
||||
job = getJobStatus(jobId)
|
||||
if not job:
|
||||
raise HTTPException(status_code=404, detail=routeApiMsg("Job not found"))
|
||||
if not _userHasMandateAccess(context, job.get("mandateId")):
|
||||
raise HTTPException(status_code=403, detail=routeApiMsg("Access denied"))
|
||||
return _serialiseJob(job)
|
||||
|
||||
|
||||
@router.get("")
|
||||
@limiter.limit("30/minute")
|
||||
def list_jobs(
|
||||
request: Request,
|
||||
jobType: Optional[str] = Query(None),
|
||||
mandateId: Optional[str] = Query(None),
|
||||
instanceId: Optional[str] = Query(None, description="Feature instance scope"),
|
||||
limit: int = Query(20, ge=1, le=100),
|
||||
context: RequestContext = Depends(getRequestContext),
|
||||
) -> Dict[str, List[Dict[str, Any]]]:
|
||||
"""List recent jobs filtered by scope. Newest first."""
|
||||
if mandateId is None:
|
||||
if not context or not context.isPlatformAdmin:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=routeApiMsg("mandateId is required (only PlatformAdmin may list system-wide)"),
|
||||
)
|
||||
elif not _userHasMandateAccess(context, mandateId):
|
||||
raise HTTPException(status_code=403, detail=routeApiMsg("Access denied"))
|
||||
jobs = listJobs(
|
||||
mandateId=mandateId,
|
||||
featureInstanceId=instanceId,
|
||||
jobType=jobType,
|
||||
limit=limit,
|
||||
)
|
||||
return {"items": [_serialiseJob(j) for j in jobs]}
|
||||
|
|
@ -86,6 +86,7 @@ async def getSharepointFolderOptionsByReference(
|
|||
connectionReference: str = Query(..., description="Connection reference string (e.g., 'connection:msft:user@email.com')"),
|
||||
siteId: Optional[str] = Query(None, description="Specific site ID to browse (if omitted, returns sites only)"),
|
||||
path: Optional[str] = Query(None, description="Folder path within site to browse"),
|
||||
includeFiles: bool = Query(False, description="If true, also include files (not only folders) in the response"),
|
||||
currentUser: User = Depends(getCurrentUser)
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
|
|
@ -156,10 +157,10 @@ async def getSharepointFolderOptionsByReference(
|
|||
|
||||
folderOptions = []
|
||||
for item in items:
|
||||
if item.get("type") == "folder":
|
||||
itemType = item.get("type")
|
||||
if itemType == "folder":
|
||||
folderName = item.get("name", "")
|
||||
itemPath = f"{folderPath}/{folderName}" if folderPath else folderName
|
||||
|
||||
folderOptions.append({
|
||||
"type": "folder",
|
||||
"value": itemPath,
|
||||
|
|
@ -167,9 +168,21 @@ async def getSharepointFolderOptionsByReference(
|
|||
"siteId": siteId,
|
||||
"folderName": folderName,
|
||||
"path": itemPath,
|
||||
"hasChildren": True # Assume folders may have children
|
||||
"hasChildren": True
|
||||
})
|
||||
elif includeFiles and itemType == "file":
|
||||
fileName = item.get("name", "")
|
||||
itemPath = f"{folderPath}/{fileName}" if folderPath else fileName
|
||||
folderOptions.append({
|
||||
"type": "file",
|
||||
"value": itemPath,
|
||||
"label": fileName,
|
||||
"siteId": siteId,
|
||||
"fileName": fileName,
|
||||
"path": itemPath,
|
||||
"mimeType": item.get("mimeType"),
|
||||
"size": item.get("size"),
|
||||
})
|
||||
|
||||
return folderOptions
|
||||
|
||||
except HTTPException:
|
||||
|
|
|
|||
|
|
@ -200,8 +200,11 @@ def _registerDefaultToolboxes() -> None:
|
|||
isDefault=False,
|
||||
tools=[
|
||||
"readWorkflowGraph", "addNode", "removeNode", "connectNodes",
|
||||
"setNodeParameter", "listAvailableNodeTypes", "validateGraph",
|
||||
"setNodeParameter", "listAvailableNodeTypes", "describeNodeType",
|
||||
"autoLayoutWorkflow", "validateGraph",
|
||||
"listWorkflowHistory", "readWorkflowMessages",
|
||||
"createWorkflow", "updateWorkflowMetadata", "createWorkflowFromFile",
|
||||
"exportWorkflowToFile", "deleteWorkflow",
|
||||
],
|
||||
),
|
||||
ToolboxDefinition(
|
||||
|
|
|
|||
|
|
@ -650,6 +650,209 @@ async def _readWorkflowMessages(params: Dict[str, Any], context: Any) -> ToolRes
|
|||
return _err(name, str(e))
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Full-CRUD tools — create / load-from-file / export-to-file / delete
|
||||
# (Phase 3 of 2026-04-pwg-pilot-mietzinsbestaetigung-workflow.md)
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def _updateWorkflowMetadata(params: Dict[str, Any], context: Any) -> ToolResult:
|
||||
"""Update workflow metadata (label / description / tags / active flag).
|
||||
|
||||
This is the **rename / re-tag / activate** tool. It does NOT touch the
|
||||
graph or invocations. Use this whenever the user wants to:
|
||||
- rename the workflow ("nenne den workflow um", "rename to X")
|
||||
- change description
|
||||
- add/remove tags
|
||||
- toggle active
|
||||
|
||||
NEVER use ``removeNode`` / ``deleteWorkflow`` for a rename — they are
|
||||
destructive and irreversible. If only ``label`` is provided, only the
|
||||
label is changed.
|
||||
"""
|
||||
name = "updateWorkflowMetadata"
|
||||
try:
|
||||
workflowId, instanceId = _resolveIds(params, context)
|
||||
if not workflowId or not instanceId:
|
||||
return _err(name, "workflowId and instanceId required")
|
||||
patch: Dict[str, Any] = {}
|
||||
for key in ("label", "description", "tags", "active"):
|
||||
if key in params and params[key] is not None:
|
||||
patch[key] = params[key]
|
||||
if not patch:
|
||||
return _err(name, "at least one of label/description/tags/active must be provided")
|
||||
if "label" in patch:
|
||||
label = (patch["label"] or "").strip()
|
||||
if not label:
|
||||
return _err(name, "label, if provided, must be a non-empty string")
|
||||
patch["label"] = label
|
||||
iface = _getInterface(context, instanceId)
|
||||
updated = iface.updateWorkflow(workflowId, patch)
|
||||
if updated is None:
|
||||
return _err(name, f"Workflow {workflowId} not found")
|
||||
changedFields = sorted(patch.keys())
|
||||
return _ok(name, {
|
||||
"workflowId": updated.get("id"),
|
||||
"label": updated.get("label"),
|
||||
"active": updated.get("active"),
|
||||
"changed": changedFields,
|
||||
"message": f"Workflow metadata updated ({', '.join(changedFields)}).",
|
||||
})
|
||||
except Exception as e:
|
||||
logger.exception("updateWorkflowMetadata failed: %s", e)
|
||||
return _err(name, str(e))
|
||||
|
||||
|
||||
async def _createWorkflow(params: Dict[str, Any], context: Any) -> ToolResult:
|
||||
"""Create a new (empty) workflow in the current feature instance.
|
||||
|
||||
The newly created workflow is returned so subsequent ``addNode``/
|
||||
``connectNodes`` calls can target it via ``workflowId`` (or via the
|
||||
agent's auto-injected context once the editor switches to it).
|
||||
"""
|
||||
name = "createWorkflow"
|
||||
try:
|
||||
_, instanceId = _resolveIds(params, context)
|
||||
if not instanceId:
|
||||
return _err(name, "instanceId required (and not present in agent context)")
|
||||
label = (params.get("label") or "").strip()
|
||||
if not label:
|
||||
return _err(name, "label required")
|
||||
|
||||
iface = _getInterface(context, instanceId)
|
||||
graph = params.get("graph") or {"nodes": [], "connections": []}
|
||||
invocations = params.get("invocations") or []
|
||||
data = {
|
||||
"label": label,
|
||||
"description": params.get("description") or "",
|
||||
"tags": params.get("tags") or [],
|
||||
"graph": graph,
|
||||
"invocations": invocations,
|
||||
"active": False,
|
||||
}
|
||||
created = iface.createWorkflow(data)
|
||||
return _ok(name, {
|
||||
"workflowId": created.get("id"),
|
||||
"label": created.get("label"),
|
||||
"message": f"Workflow '{label}' created (active=false; activate via UI when ready).",
|
||||
})
|
||||
except Exception as e:
|
||||
logger.exception("createWorkflow failed: %s", e)
|
||||
return _err(name, str(e))
|
||||
|
||||
|
||||
async def _createWorkflowFromFile(params: Dict[str, Any], context: Any) -> ToolResult:
|
||||
"""Import a workflow from a UDB-uploaded ``.workflow.json`` envelope.
|
||||
|
||||
Accepts either ``fileId`` (preferred — re-uses uploaded file from the
|
||||
Unified-Data-Bar) or ``envelope`` (inline dict, useful for tests). Always
|
||||
creates a new workflow with ``active=false``.
|
||||
"""
|
||||
name = "createWorkflowFromFile"
|
||||
try:
|
||||
_, instanceId = _resolveIds(params, context)
|
||||
if not instanceId:
|
||||
return _err(name, "instanceId required")
|
||||
fileId = params.get("fileId")
|
||||
envelope = params.get("envelope")
|
||||
if not fileId and not envelope:
|
||||
return _err(name, "either fileId or envelope required")
|
||||
|
||||
if not envelope and fileId:
|
||||
envelope = _loadEnvelopeFromUdb(fileId, context)
|
||||
if envelope is None:
|
||||
return _err(name, f"Could not read workflow file {fileId}")
|
||||
|
||||
iface = _getInterface(context, instanceId)
|
||||
try:
|
||||
result = iface.importWorkflowFromDict(envelope, existingWorkflowId=params.get("existingWorkflowId"))
|
||||
except Exception as exc:
|
||||
return _err(name, f"Import failed: {exc}")
|
||||
wf = result.get("workflow") or {}
|
||||
return _ok(name, {
|
||||
"workflowId": wf.get("id"),
|
||||
"label": wf.get("label"),
|
||||
"created": result.get("created"),
|
||||
"warnings": result.get("warnings") or [],
|
||||
"message": f"Workflow '{wf.get('label')}' {'created' if result.get('created') else 'updated'} from file (active=false).",
|
||||
})
|
||||
except Exception as e:
|
||||
logger.exception("createWorkflowFromFile failed: %s", e)
|
||||
return _err(name, str(e))
|
||||
|
||||
|
||||
async def _exportWorkflowToFile(params: Dict[str, Any], context: Any) -> ToolResult:
|
||||
"""Export a workflow as a versioned envelope.
|
||||
|
||||
Returns the canonical envelope dict and a suggested filename so the
|
||||
agent can offer the user a download link or re-upload to UDB.
|
||||
"""
|
||||
name = "exportWorkflowToFile"
|
||||
try:
|
||||
workflowId, instanceId = _resolveIds(params, context)
|
||||
if not workflowId or not instanceId:
|
||||
return _err(name, "workflowId and instanceId required")
|
||||
iface = _getInterface(context, instanceId)
|
||||
envelope = iface.exportWorkflowToDict(workflowId)
|
||||
if envelope is None:
|
||||
return _err(name, f"Workflow {workflowId} not found")
|
||||
from modules.features.graphicalEditor._workflowFileSchema import buildFileName
|
||||
return _ok(name, {
|
||||
"fileName": buildFileName(envelope.get("label", "workflow")),
|
||||
"envelope": envelope,
|
||||
"schemaVersion": envelope.get("$schemaVersion"),
|
||||
})
|
||||
except Exception as e:
|
||||
logger.exception("exportWorkflowToFile failed: %s", e)
|
||||
return _err(name, str(e))
|
||||
|
||||
|
||||
async def _deleteWorkflow(params: Dict[str, Any], context: Any) -> ToolResult:
|
||||
"""Delete a workflow. Requires explicit ``confirm=true`` to avoid
|
||||
accidental deletion by an over-eager agent."""
|
||||
name = "deleteWorkflow"
|
||||
try:
|
||||
workflowId, instanceId = _resolveIds(params, context)
|
||||
if not workflowId or not instanceId:
|
||||
return _err(name, "workflowId and instanceId required")
|
||||
if not params.get("confirm"):
|
||||
return _err(name, "confirm=true required (deletion is permanent)")
|
||||
iface = _getInterface(context, instanceId)
|
||||
ok = iface.deleteWorkflow(workflowId)
|
||||
if not ok:
|
||||
return _err(name, f"Workflow {workflowId} not found")
|
||||
return _ok(name, {"workflowId": workflowId, "message": "Workflow deleted."})
|
||||
except Exception as e:
|
||||
logger.exception("deleteWorkflow failed: %s", e)
|
||||
return _err(name, str(e))
|
||||
|
||||
|
||||
def _loadEnvelopeFromUdb(fileId: str, context: Any):
|
||||
"""Load and JSON-parse a workflow file from the Unified-Data-Bar.
|
||||
|
||||
Returns ``None`` if the file cannot be read or is not valid JSON — the
|
||||
caller turns that into a tool error message.
|
||||
"""
|
||||
import json
|
||||
try:
|
||||
import modules.interfaces.interfaceDbManagement as interfaceDbManagement
|
||||
user = _resolveUser(context)
|
||||
mandateId = _resolveMandateId(context)
|
||||
mgmt = interfaceDbManagement.getInterface(user, mandateId)
|
||||
rawBytes = mgmt.getFileData(fileId)
|
||||
except Exception as exc:
|
||||
logger.warning("workflowTools: cannot read UDB file %s: %s", fileId, exc)
|
||||
return None
|
||||
if not rawBytes:
|
||||
return None
|
||||
try:
|
||||
text = rawBytes.decode("utf-8") if isinstance(rawBytes, bytes) else str(rawBytes)
|
||||
return json.loads(text)
|
||||
except Exception as exc:
|
||||
logger.warning("workflowTools: file %s is not valid JSON: %s", fileId, exc)
|
||||
return None
|
||||
|
||||
|
||||
def getWorkflowToolDefinitions() -> List[Dict[str, Any]]:
|
||||
"""Return tool definitions for registration in the ToolRegistry.
|
||||
|
||||
|
|
@ -696,7 +899,14 @@ def getWorkflowToolDefinitions() -> List[Dict[str, Any]]:
|
|||
{
|
||||
"name": "removeNode",
|
||||
"handler": _removeNode,
|
||||
"description": "Remove a node and its connections from the graph.",
|
||||
"description": (
|
||||
"Remove a SINGLE node and its connections from the graph. "
|
||||
"DESTRUCTIVE — only call when the user explicitly asks to "
|
||||
"delete that specific node. NEVER use this to 'rename' or "
|
||||
"'rebuild' a workflow — for renaming use updateWorkflowMetadata; "
|
||||
"for replacing the whole graph use createWorkflowFromFile with "
|
||||
"existingWorkflowId. NEVER batch-remove all nodes."
|
||||
),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
|
@ -829,4 +1039,107 @@ def getWorkflowToolDefinitions() -> List[Dict[str, Any]]:
|
|||
"readOnly": True,
|
||||
"toolSet": TOOLBOX_ID,
|
||||
},
|
||||
{
|
||||
"name": "updateWorkflowMetadata",
|
||||
"handler": _updateWorkflowMetadata,
|
||||
"description": (
|
||||
"Rename / re-tag / (de)activate an EXISTING workflow. This is "
|
||||
"the ONLY correct way to rename a workflow — DO NOT delete and "
|
||||
"recreate, and NEVER call removeNode for a rename. Provide any "
|
||||
"subset of label/description/tags/active; omitted fields stay "
|
||||
"unchanged. Graph and invocations are not affected."
|
||||
),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
**_idFields,
|
||||
"label": {"type": "string", "description": "New workflow label (rename target)"},
|
||||
"description": {"type": "string", "description": "New description"},
|
||||
"tags": {"type": "array", "items": {"type": "string"}, "description": "New tag list (replaces existing)"},
|
||||
"active": {"type": "boolean", "description": "Activate (true) or deactivate (false) the workflow"},
|
||||
},
|
||||
"required": [],
|
||||
},
|
||||
"toolSet": TOOLBOX_ID,
|
||||
},
|
||||
{
|
||||
"name": "createWorkflow",
|
||||
"handler": _createWorkflow,
|
||||
"description": (
|
||||
"Create a NEW empty workflow in the current feature instance. "
|
||||
"The workflow is created with active=false; the user activates "
|
||||
"it from the editor. Use this when the user wants to start "
|
||||
"building a new automation from scratch."
|
||||
),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"instanceId": _idFields["instanceId"],
|
||||
"label": {"type": "string", "description": "Workflow label (required, shown in the editor list)"},
|
||||
"description": {"type": "string", "description": "Optional description"},
|
||||
"tags": {"type": "array", "items": {"type": "string"}, "description": "Optional tags"},
|
||||
"graph": {"type": "object", "description": "Optional initial graph {nodes, connections} (defaults to empty)"},
|
||||
"invocations": {"type": "array", "description": "Optional invocation triggers"},
|
||||
},
|
||||
"required": ["label"],
|
||||
},
|
||||
"toolSet": TOOLBOX_ID,
|
||||
},
|
||||
{
|
||||
"name": "createWorkflowFromFile",
|
||||
"handler": _createWorkflowFromFile,
|
||||
"description": (
|
||||
"Import a workflow from a previously uploaded .workflow.json "
|
||||
"envelope (Unified-Data-Bar). Pass the UDB ``fileId``; the file "
|
||||
"is parsed, validated against the envelopeVersioned schema and "
|
||||
"saved as a new workflow with active=false. Use ``existingWorkflowId`` "
|
||||
"to overwrite an existing workflow instead of creating a new one."
|
||||
),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"instanceId": _idFields["instanceId"],
|
||||
"fileId": {"type": "string", "description": "FileItem.id of the uploaded .workflow.json (preferred)"},
|
||||
"envelope": {"type": "object", "description": "Inline envelope dict (alternative to fileId, mainly for tests)"},
|
||||
"existingWorkflowId": {"type": "string", "description": "Optional — overwrite this workflow instead of creating a new one"},
|
||||
},
|
||||
"required": [],
|
||||
},
|
||||
"toolSet": TOOLBOX_ID,
|
||||
},
|
||||
{
|
||||
"name": "exportWorkflowToFile",
|
||||
"handler": _exportWorkflowToFile,
|
||||
"description": (
|
||||
"Export the current workflow as a portable envelopeVersioned "
|
||||
"JSON dict and a suggested filename. The agent can then offer "
|
||||
"the user a download or re-upload to UDB. Persistence-bound "
|
||||
"fields (timestamps, mandate ids) are stripped automatically."
|
||||
),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {**_idFields},
|
||||
"required": [],
|
||||
},
|
||||
"readOnly": True,
|
||||
"toolSet": TOOLBOX_ID,
|
||||
},
|
||||
{
|
||||
"name": "deleteWorkflow",
|
||||
"handler": _deleteWorkflow,
|
||||
"description": (
|
||||
"Permanently delete a workflow. Requires ``confirm=true`` to "
|
||||
"execute — this is a destructive operation. Always confirm "
|
||||
"with the user in chat BEFORE calling this tool with confirm=true."
|
||||
),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
**_idFields,
|
||||
"confirm": {"type": "boolean", "description": "Must be true to actually delete"},
|
||||
},
|
||||
"required": ["confirm"],
|
||||
},
|
||||
"toolSet": TOOLBOX_ID,
|
||||
},
|
||||
]
|
||||
|
|
|
|||
|
|
@ -0,0 +1,19 @@
|
|||
# Copyright (c) 2025 Patrick Motsch
|
||||
# All rights reserved.
|
||||
"""Background job service: generic, reusable infrastructure for long-running tasks."""
|
||||
|
||||
from .mainBackgroundJobService import (
|
||||
registerJobHandler,
|
||||
startJob,
|
||||
getJobStatus,
|
||||
listJobs,
|
||||
JobProgressCallback,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"registerJobHandler",
|
||||
"startJob",
|
||||
"getJobStatus",
|
||||
"listJobs",
|
||||
"JobProgressCallback",
|
||||
]
|
||||
|
|
@ -0,0 +1,245 @@
|
|||
# Copyright (c) 2025 Patrick Motsch
|
||||
# All rights reserved.
|
||||
"""Background job service.
|
||||
|
||||
Generic infrastructure for fire-and-forget async tasks. Any caller (HTTP route,
|
||||
AI tool, scheduler) can submit work and get a `jobId` back immediately; status
|
||||
is polled via `GET /api/jobs/{jobId}`.
|
||||
|
||||
Usage (registration, once at module load):
|
||||
|
||||
from modules.serviceCenter.services.serviceBackgroundJobs import registerJobHandler
|
||||
|
||||
async def _myHandler(job, progressCb):
|
||||
progressCb(10, "starting...")
|
||||
result = await doExpensiveWork(job["payload"])
|
||||
return result # stored as job.result
|
||||
|
||||
registerJobHandler("myJobType", _myHandler)
|
||||
|
||||
Usage (submission):
|
||||
|
||||
from modules.serviceCenter.services.serviceBackgroundJobs import startJob
|
||||
jobId = await startJob("myJobType", {"foo": "bar"}, mandateId=mid, triggeredBy=userId)
|
||||
return {"jobId": jobId}
|
||||
|
||||
Restart semantics: jobs are tracked in DB. If the worker process dies mid-job,
|
||||
`_recoverInterruptedJobs()` (called at boot) flips RUNNING jobs to ERROR with a
|
||||
clear message. No silent zombies.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any, Awaitable, Callable, Dict, List, Optional
|
||||
|
||||
from modules.connectors.connectorDbPostgre import DatabaseConnector
|
||||
from modules.shared.configuration import APP_CONFIG
|
||||
from modules.shared.dbRegistry import registerDatabase
|
||||
from modules.datamodels.datamodelBackgroundJob import (
|
||||
BackgroundJob,
|
||||
BackgroundJobStatusEnum,
|
||||
TERMINAL_JOB_STATUSES,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
JOBS_DATABASE = APP_CONFIG.get("DB_DATABASE", "poweron_app")
|
||||
registerDatabase(JOBS_DATABASE)
|
||||
|
||||
|
||||
JobProgressCallback = Callable[[int, Optional[str]], None]
|
||||
JobHandler = Callable[[Dict[str, Any], JobProgressCallback], Awaitable[Optional[Dict[str, Any]]]]
|
||||
|
||||
|
||||
_JOB_HANDLERS: Dict[str, JobHandler] = {}
|
||||
|
||||
|
||||
def registerJobHandler(jobType: str, handler: JobHandler) -> None:
|
||||
"""Register a handler for a job type. Idempotent — last registration wins."""
|
||||
if jobType in _JOB_HANDLERS and _JOB_HANDLERS[jobType] is not handler:
|
||||
logger.info("Re-registering background job handler for type %s", jobType)
|
||||
_JOB_HANDLERS[jobType] = handler
|
||||
|
||||
|
||||
def _getDb() -> DatabaseConnector:
|
||||
return DatabaseConnector(
|
||||
dbDatabase=JOBS_DATABASE,
|
||||
dbHost=APP_CONFIG.get("DB_HOST", "localhost"),
|
||||
dbPort=int(APP_CONFIG.get("DB_PORT", "5432")),
|
||||
dbUser=APP_CONFIG.get("DB_USER"),
|
||||
dbPassword=APP_CONFIG.get("DB_PASSWORD_SECRET"),
|
||||
)
|
||||
|
||||
|
||||
def _serialiseDatetimes(data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Return copy of dict with datetime values converted to ISO 8601 strings."""
|
||||
out: Dict[str, Any] = {}
|
||||
for k, v in data.items():
|
||||
if isinstance(v, datetime):
|
||||
out[k] = v.isoformat()
|
||||
else:
|
||||
out[k] = v
|
||||
return out
|
||||
|
||||
|
||||
async def startJob(
|
||||
jobType: str,
|
||||
payload: Optional[Dict[str, Any]] = None,
|
||||
*,
|
||||
mandateId: Optional[str] = None,
|
||||
featureInstanceId: Optional[str] = None,
|
||||
triggeredBy: Optional[str] = None,
|
||||
) -> str:
|
||||
"""Insert a new BackgroundJob, kick off its handler in the background, return jobId.
|
||||
|
||||
Returns immediately; the handler runs via `asyncio.create_task`.
|
||||
"""
|
||||
if jobType not in _JOB_HANDLERS:
|
||||
raise ValueError(f"No handler registered for jobType '{jobType}'")
|
||||
|
||||
job = BackgroundJob(
|
||||
jobType=jobType,
|
||||
mandateId=mandateId,
|
||||
featureInstanceId=featureInstanceId,
|
||||
triggeredBy=triggeredBy,
|
||||
payload=payload or {},
|
||||
)
|
||||
db = _getDb()
|
||||
record = db.recordCreate(BackgroundJob, _serialiseDatetimes(job.model_dump()))
|
||||
jobId = record["id"]
|
||||
|
||||
asyncio.create_task(_runJob(jobId))
|
||||
logger.info(
|
||||
"BackgroundJob %s submitted: type=%s mandate=%s instance=%s by=%s",
|
||||
jobId, jobType, mandateId, featureInstanceId, triggeredBy,
|
||||
)
|
||||
return jobId
|
||||
|
||||
|
||||
def _loadJob(jobId: str) -> Optional[Dict[str, Any]]:
|
||||
db = _getDb()
|
||||
rows = db.getRecordset(BackgroundJob, recordFilter={"id": jobId})
|
||||
return dict(rows[0]) if rows else None
|
||||
|
||||
|
||||
def _updateJob(jobId: str, fields: Dict[str, Any]) -> None:
|
||||
db = _getDb()
|
||||
db.recordModify(BackgroundJob, jobId, _serialiseDatetimes(fields))
|
||||
|
||||
|
||||
def _markStarted(jobId: str) -> None:
|
||||
_updateJob(jobId, {
|
||||
"status": BackgroundJobStatusEnum.RUNNING.value,
|
||||
"startedAt": datetime.now(timezone.utc),
|
||||
})
|
||||
|
||||
|
||||
def _markSuccess(jobId: str, result: Optional[Dict[str, Any]]) -> None:
|
||||
_updateJob(jobId, {
|
||||
"status": BackgroundJobStatusEnum.SUCCESS.value,
|
||||
"result": result or {},
|
||||
"progress": 100,
|
||||
"finishedAt": datetime.now(timezone.utc),
|
||||
})
|
||||
|
||||
|
||||
def _markError(jobId: str, errorMessage: str) -> None:
|
||||
truncated = (errorMessage or "")[:1000]
|
||||
_updateJob(jobId, {
|
||||
"status": BackgroundJobStatusEnum.ERROR.value,
|
||||
"errorMessage": truncated,
|
||||
"finishedAt": datetime.now(timezone.utc),
|
||||
})
|
||||
|
||||
|
||||
def _makeProgressCallback(jobId: str) -> JobProgressCallback:
|
||||
def _cb(progress: int, message: Optional[str] = None) -> None:
|
||||
try:
|
||||
clamped = max(0, min(100, int(progress)))
|
||||
fields: Dict[str, Any] = {"progress": clamped}
|
||||
if message is not None:
|
||||
fields["progressMessage"] = message[:500]
|
||||
_updateJob(jobId, fields)
|
||||
except Exception as ex:
|
||||
logger.warning("Progress update failed for job %s: %s", jobId, ex)
|
||||
return _cb
|
||||
|
||||
|
||||
async def _runJob(jobId: str) -> None:
|
||||
job = _loadJob(jobId)
|
||||
if not job:
|
||||
logger.error("BackgroundJob %s vanished before runner started", jobId)
|
||||
return
|
||||
handler = _JOB_HANDLERS.get(job["jobType"])
|
||||
if not handler:
|
||||
msg = f"No handler registered for jobType '{job['jobType']}'"
|
||||
logger.error("BackgroundJob %s: %s", jobId, msg)
|
||||
_markError(jobId, msg)
|
||||
return
|
||||
|
||||
_markStarted(jobId)
|
||||
try:
|
||||
result = await handler(job, _makeProgressCallback(jobId))
|
||||
_markSuccess(jobId, result if isinstance(result, dict) else None)
|
||||
logger.info("BackgroundJob %s (%s) completed successfully", jobId, job["jobType"])
|
||||
except Exception as e:
|
||||
logger.exception("BackgroundJob %s (%s) failed", jobId, job["jobType"])
|
||||
_markError(jobId, str(e))
|
||||
|
||||
|
||||
def getJobStatus(jobId: str) -> Optional[Dict[str, Any]]:
|
||||
"""Load current job state. Returns None if not found."""
|
||||
return _loadJob(jobId)
|
||||
|
||||
|
||||
def listJobs(
|
||||
*,
|
||||
mandateId: Optional[str] = None,
|
||||
featureInstanceId: Optional[str] = None,
|
||||
jobType: Optional[str] = None,
|
||||
limit: int = 20,
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""List recent jobs filtered by scope. Newest first."""
|
||||
db = _getDb()
|
||||
rows = db.getRecordset(BackgroundJob)
|
||||
out = [dict(r) for r in rows]
|
||||
if mandateId is not None:
|
||||
out = [r for r in out if r.get("mandateId") == mandateId]
|
||||
if featureInstanceId is not None:
|
||||
out = [r for r in out if r.get("featureInstanceId") == featureInstanceId]
|
||||
if jobType is not None:
|
||||
out = [r for r in out if r.get("jobType") == jobType]
|
||||
out.sort(key=lambda r: r.get("createdAt") or "", reverse=True)
|
||||
return out[:limit]
|
||||
|
||||
|
||||
def isTerminalStatus(status: str) -> bool:
|
||||
"""True if the given status is one of {SUCCESS, ERROR, CANCELLED}."""
|
||||
return status in {s.value for s in TERMINAL_JOB_STATUSES}
|
||||
|
||||
|
||||
def recoverInterruptedJobs() -> int:
|
||||
"""Flip any RUNNING jobs to ERROR (called at worker boot).
|
||||
|
||||
A RUNNING job in the DB after process restart means the previous worker
|
||||
died mid-execution; the asyncio task is gone and the job will never
|
||||
finish on its own.
|
||||
"""
|
||||
db = _getDb()
|
||||
try:
|
||||
rows = db.getRecordset(BackgroundJob, recordFilter={"status": BackgroundJobStatusEnum.RUNNING.value})
|
||||
except Exception as ex:
|
||||
logger.warning("recoverInterruptedJobs: failed to scan RUNNING jobs: %s", ex)
|
||||
return 0
|
||||
count = 0
|
||||
for row in rows:
|
||||
try:
|
||||
_markError(row["id"], "Interrupted by worker restart")
|
||||
count += 1
|
||||
except Exception as ex:
|
||||
logger.warning("recoverInterruptedJobs: could not mark %s as ERROR: %s", row.get("id"), ex)
|
||||
if count:
|
||||
logger.warning("Recovered %d interrupted background job(s) after restart", count)
|
||||
return count
|
||||
|
|
@ -179,6 +179,10 @@ class SubscriptionService:
|
|||
checkoutUrl = self._createCheckoutSession(mid, plan, created, currentOperative, returnUrl)
|
||||
created["redirectUrl"] = checkoutUrl
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
"Checkout creation failed for mandate %s, plan %s — force-expiring PENDING %s",
|
||||
mid, planKey, created["id"],
|
||||
)
|
||||
self._interface.forceExpire(created["id"])
|
||||
self.invalidateCache(mid)
|
||||
raise ValueError(f"Subscription konnte nicht erstellt werden: {e}") from e
|
||||
|
|
@ -276,7 +280,11 @@ class SubscriptionService:
|
|||
},
|
||||
}
|
||||
|
||||
if currentOperative and currentOperative.get("currentPeriodEnd"):
|
||||
isTrialPredecessor = (
|
||||
currentOperative is not None
|
||||
and currentOperative.get("status") == SubscriptionStatusEnum.TRIALING.value
|
||||
)
|
||||
if currentOperative and currentOperative.get("currentPeriodEnd") and not isTrialPredecessor:
|
||||
periodEnd = currentOperative["currentPeriodEnd"]
|
||||
if isinstance(periodEnd, str):
|
||||
periodEnd = datetime.fromisoformat(periodEnd)
|
||||
|
|
|
|||
|
|
@ -17,6 +17,20 @@ async def composeAndDraftEmailWithContext(self, parameters: Dict[str, Any]) -> A
|
|||
context = parameters.get("context")
|
||||
documentList = parameters.get("documentList") or []
|
||||
replySourceDocuments = parameters.get("replySourceDocuments") or [] # Original email(s) for reply attachment
|
||||
# ``attachments`` (added in 2026-04 for the PWG pilot) is a list of
|
||||
# explicit attachment specs that bypass the AI selection step.
|
||||
# Supported shapes per item:
|
||||
# { name, mimeType, base64Content } – inline bytes (already base64)
|
||||
# { name, mimeType, contentRef } – upstream wire variable name
|
||||
# from ``parameters[contentRef]``
|
||||
# (e.g. ``csv`` produced by
|
||||
# ``data.consolidate``)
|
||||
# { name, csvFromVariable } – shorthand for CSV ref
|
||||
attachmentSpecs = parameters.get("attachments") or []
|
||||
if isinstance(attachmentSpecs, dict):
|
||||
attachmentSpecs = [attachmentSpecs]
|
||||
if not isinstance(attachmentSpecs, list):
|
||||
attachmentSpecs = []
|
||||
cc = parameters.get("cc") or []
|
||||
bcc = parameters.get("bcc") or []
|
||||
emailStyle = parameters.get("emailStyle") or "business"
|
||||
|
|
@ -273,8 +287,14 @@ Return JSON:
|
|||
# Supports: 1) inline ActionDocuments (dict with documentData from e.g. sharepoint.downloadFile)
|
||||
# 2) docItem:... references (chat workflow documents)
|
||||
# 3) replySourceDocuments: original email(s) for reply – attach when use_direct_content
|
||||
# 4) explicit attachment specs from the new ``attachments`` parameter
|
||||
# When use_direct_content: upstream AI doc IS the email body – do not attach it, BUT attach reply sources
|
||||
attachments_doc_list = (replySourceDocuments or []) if use_direct_content else (documentList or [])
|
||||
# Materialize explicit attachment specs into inline ActionDocument-shaped dicts
|
||||
for spec in attachmentSpecs:
|
||||
resolved = _resolveAttachmentSpec(spec, parameters)
|
||||
if resolved is not None:
|
||||
attachments_doc_list = list(attachments_doc_list) + [resolved]
|
||||
if attachments_doc_list:
|
||||
message["attachments"] = []
|
||||
for attachment_ref in attachments_doc_list:
|
||||
|
|
@ -484,3 +504,49 @@ Return JSON:
|
|||
logger.error(f"Error in composeAndDraftEmailWithContext: {str(e)}")
|
||||
return ActionResult.isFailure(error=str(e))
|
||||
|
||||
|
||||
def _resolveAttachmentSpec(spec: Any, parameters: Dict[str, Any]) -> Any:
|
||||
"""Resolve one attachment-spec dict into an inline-document shape that the
|
||||
existing attachment loop already understands ({documentName, documentData,
|
||||
mimeType}).
|
||||
|
||||
Three input shapes are supported:
|
||||
|
||||
- ``{name, mimeType, base64Content}``: inline bytes already encoded as
|
||||
base64 — used by the agent when synthesising small text attachments.
|
||||
- ``{name, mimeType, contentRef}``: pull the bytes from another
|
||||
parameter on this node call (i.e. an upstream wire variable, e.g.
|
||||
``contentRef='csv'`` reads ``parameters['csv']``).
|
||||
- ``{name, csvFromVariable}``: shorthand for the most common case — the
|
||||
CSV produced by ``data.consolidate`` arriving via wire.
|
||||
|
||||
Returns ``None`` for malformed specs (logged) so a single bad spec does
|
||||
not abort the whole email draft.
|
||||
"""
|
||||
if not isinstance(spec, dict):
|
||||
return None
|
||||
name = spec.get("name") or spec.get("fileName") or "attachment.bin"
|
||||
mimeType = spec.get("mimeType") or spec.get("contentType")
|
||||
|
||||
raw = None
|
||||
if "base64Content" in spec and spec.get("base64Content"):
|
||||
raw = spec.get("base64Content")
|
||||
elif spec.get("csvFromVariable"):
|
||||
raw = parameters.get(spec["csvFromVariable"])
|
||||
if not mimeType:
|
||||
mimeType = "text/csv"
|
||||
if not name.lower().endswith(".csv"):
|
||||
name = f"{name}.csv"
|
||||
elif spec.get("contentRef"):
|
||||
raw = parameters.get(spec["contentRef"])
|
||||
|
||||
if raw is None or raw == "":
|
||||
logger.warning("email.draftEmail: attachment spec %r resolved to empty content, skipping", name)
|
||||
return None
|
||||
|
||||
return {
|
||||
"documentName": name,
|
||||
"documentData": raw,
|
||||
"mimeType": mimeType or "application/octet-stream",
|
||||
}
|
||||
|
||||
|
|
|
|||
390
modules/workflows/methods/methodTrustee/actions/queryData.py
Normal file
390
modules/workflows/methods/methodTrustee/actions/queryData.py
Normal file
|
|
@ -0,0 +1,390 @@
|
|||
# Copyright (c) 2026 Patrick Motsch
|
||||
# All rights reserved.
|
||||
"""
|
||||
Query data from the Trustee feature DB.
|
||||
|
||||
Three modes:
|
||||
- ``lookup`` (default): tenant-aware lookup. For ``entity=tenantWithRent`` the
|
||||
action joins ``TrusteeDataContact`` (identity match by tenantName +
|
||||
tenantAddress with light tolerance) with derived rent amounts from
|
||||
``TrusteeDataJournalLine`` filtered by an account-pattern. Output is a
|
||||
compact dict ready to feed into ``ai.prompt`` ``context``.
|
||||
- ``raw``: return the recordset for the given entity (filtered by
|
||||
``filterJson``). Use for debugging or advanced workflows.
|
||||
- ``aggregate``: count records per group (basic group-by helper).
|
||||
|
||||
This action does NOT trigger an external sync — use
|
||||
``trustee.refreshAccountingData`` first if data may be stale.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from modules.datamodels.datamodelChat import ActionResult
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
_NAME_NORMALIZE_RE = re.compile(r"[^a-z0-9]+")
|
||||
_ENTITY_TO_MODEL = {
|
||||
"contact": "TrusteeDataContact",
|
||||
"accounts": "TrusteeDataAccount",
|
||||
"balances": "TrusteeDataAccountBalance",
|
||||
"journalLines": "TrusteeDataJournalLine",
|
||||
}
|
||||
|
||||
|
||||
async def queryData(self, parameters: Dict[str, Any]) -> ActionResult:
|
||||
"""Query the Trustee feature DB. See module docstring for modes."""
|
||||
featureInstanceId = parameters.get("featureInstanceId") or (
|
||||
self.services.featureInstanceId if hasattr(self.services, "featureInstanceId") else None
|
||||
)
|
||||
if not featureInstanceId:
|
||||
return ActionResult.isFailure(error="featureInstanceId is required")
|
||||
|
||||
mode = (parameters.get("mode") or "lookup").lower()
|
||||
entity = (parameters.get("entity") or "tenantWithRent")
|
||||
|
||||
try:
|
||||
from modules.features.trustee.interfaceFeatureTrustee import getInterface as getTrusteeInterface
|
||||
trusteeInterface = getTrusteeInterface(
|
||||
self.services.user,
|
||||
mandateId=self.services.mandateId,
|
||||
featureInstanceId=featureInstanceId,
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.exception("trustee.queryData: cannot open trustee interface")
|
||||
return ActionResult.isFailure(error=f"Trustee interface unavailable: {exc}")
|
||||
|
||||
if mode == "lookup" and entity == "tenantWithRent":
|
||||
payload = _lookupTenantWithRent(trusteeInterface, featureInstanceId, parameters)
|
||||
return ActionResult.isSuccess(data=payload)
|
||||
|
||||
if mode == "lookup" and entity == "contact":
|
||||
contact = _lookupContact(
|
||||
trusteeInterface,
|
||||
featureInstanceId,
|
||||
parameters.get("tenantNameRef") or "",
|
||||
parameters.get("tenantAddressRef") or "",
|
||||
)
|
||||
return ActionResult.isSuccess(data={"contact": contact})
|
||||
|
||||
if mode in ("raw", "aggregate"):
|
||||
modelName = _ENTITY_TO_MODEL.get(entity)
|
||||
if not modelName:
|
||||
return ActionResult.isFailure(
|
||||
error=f"entity '{entity}' is not supported in mode '{mode}'"
|
||||
)
|
||||
records = _readRecordset(
|
||||
trusteeInterface,
|
||||
featureInstanceId,
|
||||
modelName,
|
||||
_parseFilterJson(parameters.get("filterJson")),
|
||||
)
|
||||
if mode == "raw":
|
||||
return ActionResult.isSuccess(data={"entity": entity, "count": len(records), "records": records})
|
||||
return ActionResult.isSuccess(data={
|
||||
"entity": entity,
|
||||
"count": len(records),
|
||||
"summary": _summarizeAggregate(records),
|
||||
})
|
||||
|
||||
return ActionResult.isFailure(
|
||||
error=f"Unsupported combination mode='{mode}' entity='{entity}'"
|
||||
)
|
||||
|
||||
|
||||
def _lookupTenantWithRent(
|
||||
trusteeInterface,
|
||||
featureInstanceId: str,
|
||||
parameters: Dict[str, Any],
|
||||
) -> Dict[str, Any]:
|
||||
"""Return ``{contact, expectedRent, rentLines}`` for one tenant.
|
||||
|
||||
Identity match is intentionally tolerant (case-insensitive, punctuation
|
||||
stripped) so OCR results with minor variations still hit. Rent amount is
|
||||
derived from ``TrusteeDataJournalLine`` rows whose ``accountNumber``
|
||||
matches ``rentAccountPattern`` and whose booking date (via the journal
|
||||
entry header) falls inside the requested period.
|
||||
"""
|
||||
tenantName = parameters.get("tenantNameRef") or ""
|
||||
tenantAddress = parameters.get("tenantAddressRef") or ""
|
||||
period = parameters.get("period") or ""
|
||||
accountPattern = parameters.get("rentAccountPattern") or ""
|
||||
|
||||
contact = _lookupContact(trusteeInterface, featureInstanceId, tenantName, tenantAddress)
|
||||
if not contact:
|
||||
return {
|
||||
"matched": False,
|
||||
"tenantNameRef": tenantName,
|
||||
"tenantAddressRef": tenantAddress,
|
||||
"contact": None,
|
||||
"expectedRent": None,
|
||||
"rentLines": [],
|
||||
}
|
||||
|
||||
rentLines, expectedRent = _deriveRentForContact(
|
||||
trusteeInterface,
|
||||
featureInstanceId,
|
||||
contact,
|
||||
period,
|
||||
accountPattern,
|
||||
)
|
||||
return {
|
||||
"matched": True,
|
||||
"tenantNameRef": tenantName,
|
||||
"tenantAddressRef": tenantAddress,
|
||||
"contact": contact,
|
||||
"period": period,
|
||||
"rentAccountPattern": accountPattern,
|
||||
"rentLines": rentLines,
|
||||
"expectedRent": expectedRent,
|
||||
}
|
||||
|
||||
|
||||
def _lookupContact(
|
||||
trusteeInterface,
|
||||
featureInstanceId: str,
|
||||
tenantName: str,
|
||||
tenantAddress: str,
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
from modules.features.trustee.datamodelFeatureTrustee import TrusteeDataContact
|
||||
|
||||
records = trusteeInterface.db.getRecordset(
|
||||
TrusteeDataContact,
|
||||
recordFilter={"featureInstanceId": featureInstanceId},
|
||||
) or []
|
||||
if not records:
|
||||
return None
|
||||
|
||||
nameKey = _normalizeText(tenantName)
|
||||
addressKey = _normalizeText(tenantAddress)
|
||||
|
||||
if not nameKey and not addressKey:
|
||||
return None
|
||||
|
||||
bestScore = -1
|
||||
bestMatch: Optional[Dict[str, Any]] = None
|
||||
for raw in records:
|
||||
rec = dict(raw)
|
||||
recName = _normalizeText(rec.get("name") or "")
|
||||
recAddress = _normalizeText(
|
||||
" ".join([rec.get("address") or "", rec.get("zip") or "", rec.get("city") or ""]).strip()
|
||||
)
|
||||
score = 0
|
||||
if nameKey and recName:
|
||||
if recName == nameKey:
|
||||
score += 10
|
||||
elif nameKey in recName or recName in nameKey:
|
||||
score += 6
|
||||
if addressKey and recAddress:
|
||||
if recAddress == addressKey:
|
||||
score += 5
|
||||
elif addressKey in recAddress or recAddress in addressKey:
|
||||
score += 3
|
||||
if score > bestScore:
|
||||
bestScore = score
|
||||
bestMatch = rec
|
||||
|
||||
if bestScore < 5:
|
||||
return None
|
||||
return _shrinkContact(bestMatch)
|
||||
|
||||
|
||||
def _deriveRentForContact(
|
||||
trusteeInterface,
|
||||
featureInstanceId: str,
|
||||
contact: Dict[str, Any],
|
||||
period: str,
|
||||
accountPattern: str,
|
||||
) -> tuple:
|
||||
"""Derive expected annual rent from journal lines.
|
||||
|
||||
The trustee DB does not store a ``Mietvertrag`` entity; the expected
|
||||
annual rent is the sum of all credit amounts on rent-revenue accounts
|
||||
referenced in journal entries whose description / reference contains
|
||||
the contact name. This is intentionally a heuristic — when no match is
|
||||
found we return ``(None, None)`` so the caller can flag ``unleserlich``.
|
||||
"""
|
||||
from modules.features.trustee.datamodelFeatureTrustee import (
|
||||
TrusteeDataJournalEntry,
|
||||
TrusteeDataJournalLine,
|
||||
)
|
||||
|
||||
entries = trusteeInterface.db.getRecordset(
|
||||
TrusteeDataJournalEntry,
|
||||
recordFilter={"featureInstanceId": featureInstanceId},
|
||||
) or []
|
||||
lines = trusteeInterface.db.getRecordset(
|
||||
TrusteeDataJournalLine,
|
||||
recordFilter={"featureInstanceId": featureInstanceId},
|
||||
) or []
|
||||
if not entries or not lines:
|
||||
return [], None
|
||||
|
||||
fromDate, toDate = _parsePeriod(period)
|
||||
accountMatcher = _accountMatcher(accountPattern)
|
||||
nameKey = _normalizeText(contact.get("name") or "")
|
||||
contactNumber = (contact.get("contactNumber") or "").strip()
|
||||
|
||||
relevantEntryIds = set()
|
||||
entryById = {}
|
||||
for raw in entries:
|
||||
e = dict(raw)
|
||||
eid = e.get("id")
|
||||
if not eid:
|
||||
continue
|
||||
bDate = e.get("bookingDate") or ""
|
||||
if fromDate and bDate and bDate < fromDate:
|
||||
continue
|
||||
if toDate and bDate and bDate > toDate:
|
||||
continue
|
||||
descKey = _normalizeText(" ".join([e.get("description") or "", e.get("reference") or ""]))
|
||||
if (nameKey and nameKey in descKey) or (contactNumber and contactNumber in (e.get("reference") or "")):
|
||||
relevantEntryIds.add(eid)
|
||||
entryById[eid] = e
|
||||
|
||||
rentLines = []
|
||||
total = 0.0
|
||||
for raw in lines:
|
||||
ln = dict(raw)
|
||||
if ln.get("journalEntryId") not in relevantEntryIds:
|
||||
continue
|
||||
accountNo = (ln.get("accountNumber") or "")
|
||||
if not accountMatcher(accountNo):
|
||||
continue
|
||||
credit = float(ln.get("creditAmount") or 0.0)
|
||||
debit = float(ln.get("debitAmount") or 0.0)
|
||||
amount = credit - debit
|
||||
e = entryById.get(ln.get("journalEntryId"), {})
|
||||
rentLines.append({
|
||||
"date": e.get("bookingDate"),
|
||||
"ref": e.get("reference"),
|
||||
"account": accountNo,
|
||||
"amount": round(amount, 2),
|
||||
"description": ln.get("description") or e.get("description"),
|
||||
})
|
||||
total += amount
|
||||
|
||||
expectedRent = round(total, 2) if rentLines else None
|
||||
return rentLines, expectedRent
|
||||
|
||||
|
||||
def _readRecordset(
|
||||
trusteeInterface,
|
||||
featureInstanceId: str,
|
||||
modelName: str,
|
||||
extraFilter: Optional[Dict[str, Any]],
|
||||
) -> List[Dict[str, Any]]:
|
||||
from modules.features.trustee.datamodelFeatureTrustee import (
|
||||
TrusteeDataAccount,
|
||||
TrusteeDataAccountBalance,
|
||||
TrusteeDataContact,
|
||||
TrusteeDataJournalLine,
|
||||
)
|
||||
|
||||
modelMap = {
|
||||
"TrusteeDataAccount": TrusteeDataAccount,
|
||||
"TrusteeDataAccountBalance": TrusteeDataAccountBalance,
|
||||
"TrusteeDataContact": TrusteeDataContact,
|
||||
"TrusteeDataJournalLine": TrusteeDataJournalLine,
|
||||
}
|
||||
model = modelMap.get(modelName)
|
||||
if not model:
|
||||
return []
|
||||
rf: Dict[str, Any] = {"featureInstanceId": featureInstanceId}
|
||||
if isinstance(extraFilter, dict):
|
||||
rf.update(extraFilter)
|
||||
raw = trusteeInterface.db.getRecordset(model, recordFilter=rf) or []
|
||||
return [dict(r) for r in raw]
|
||||
|
||||
|
||||
def _summarizeAggregate(records: List[Dict[str, Any]]) -> Dict[str, Any]:
|
||||
"""Quick counts by common fields. Avoids heavy SQL for the prototype."""
|
||||
summary: Dict[str, Any] = {"total": len(records)}
|
||||
for field in ("contactType", "accountType", "currency"):
|
||||
bucket: Dict[str, int] = {}
|
||||
for r in records:
|
||||
key = str(r.get(field) or "")
|
||||
bucket[key] = bucket.get(key, 0) + 1
|
||||
if bucket:
|
||||
summary[f"by_{field}"] = bucket
|
||||
return summary
|
||||
|
||||
|
||||
def _normalizeText(value: str) -> str:
|
||||
return _NAME_NORMALIZE_RE.sub("", (value or "").lower())
|
||||
|
||||
|
||||
def _shrinkContact(rec: Dict[str, Any]) -> Dict[str, Any]:
|
||||
return {
|
||||
"id": rec.get("id"),
|
||||
"externalId": rec.get("externalId"),
|
||||
"contactType": rec.get("contactType"),
|
||||
"contactNumber": rec.get("contactNumber"),
|
||||
"name": rec.get("name"),
|
||||
"address": rec.get("address"),
|
||||
"zip": rec.get("zip"),
|
||||
"city": rec.get("city"),
|
||||
"email": rec.get("email"),
|
||||
}
|
||||
|
||||
|
||||
def _parseFilterJson(raw: Any) -> Dict[str, Any]:
|
||||
if not raw:
|
||||
return {}
|
||||
if isinstance(raw, dict):
|
||||
return raw
|
||||
try:
|
||||
parsed = json.loads(str(raw))
|
||||
return parsed if isinstance(parsed, dict) else {}
|
||||
except Exception:
|
||||
logger.warning("trustee.queryData: invalid filterJson, ignoring")
|
||||
return {}
|
||||
|
||||
|
||||
def _parsePeriod(period: str) -> tuple:
|
||||
"""Parse ``"YYYY"`` or ``"YYYY-MM-DD/YYYY-MM-DD"`` into ``(from, to)``.
|
||||
|
||||
Empty string → ``(None, None)``. Invalid input is treated as no filter
|
||||
rather than raising — workflows must not abort on malformed period text.
|
||||
"""
|
||||
if not period:
|
||||
return None, None
|
||||
period = period.strip()
|
||||
if "/" in period:
|
||||
parts = period.split("/", 1)
|
||||
return parts[0].strip() or None, parts[1].strip() or None
|
||||
if len(period) == 4 and period.isdigit():
|
||||
return f"{period}-01-01", f"{period}-12-31"
|
||||
return period, period
|
||||
|
||||
|
||||
def _accountMatcher(pattern: str):
|
||||
"""Return a predicate ``str -> bool`` that matches account numbers.
|
||||
|
||||
Supports ``"6*"`` (prefix), ``"6000-6099"`` (numeric range), and exact
|
||||
matches. Empty pattern matches everything (caller decides if that's wise).
|
||||
"""
|
||||
pattern = (pattern or "").strip()
|
||||
if not pattern:
|
||||
return lambda _x: True
|
||||
if "-" in pattern and pattern.replace("-", "").isdigit():
|
||||
lo, hi = pattern.split("-", 1)
|
||||
try:
|
||||
lo_i = int(lo)
|
||||
hi_i = int(hi)
|
||||
def _rangeMatch(acc: str) -> bool:
|
||||
try:
|
||||
return lo_i <= int(acc) <= hi_i
|
||||
except (TypeError, ValueError):
|
||||
return False
|
||||
return _rangeMatch
|
||||
except ValueError:
|
||||
pass
|
||||
if pattern.endswith("*"):
|
||||
prefix = pattern[:-1]
|
||||
return lambda acc: (acc or "").startswith(prefix)
|
||||
return lambda acc: acc == pattern
|
||||
|
|
@ -13,6 +13,7 @@ from .actions.extractFromFiles import extractFromFiles
|
|||
from .actions.processDocuments import processDocuments
|
||||
from .actions.syncToAccounting import syncToAccounting
|
||||
from .actions.refreshAccountingData import refreshAccountingData
|
||||
from .actions.queryData import queryData
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -149,6 +150,70 @@ class MethodTrustee(MethodBase):
|
|||
},
|
||||
execute=refreshAccountingData.__get__(self, self.__class__),
|
||||
),
|
||||
"queryData": WorkflowActionDefinition(
|
||||
actionId="trustee.queryData",
|
||||
description="Read data from the Trustee DB (lookup tenant+rent, raw recordset, or aggregate). Does NOT trigger an external sync.",
|
||||
dynamicMode=False,
|
||||
parameters={
|
||||
"featureInstanceId": WorkflowActionParameter(
|
||||
name="featureInstanceId",
|
||||
type="str",
|
||||
frontendType=FrontendType.TEXT,
|
||||
required=True,
|
||||
description="Trustee feature instance ID",
|
||||
),
|
||||
"mode": WorkflowActionParameter(
|
||||
name="mode",
|
||||
type="str",
|
||||
frontendType=FrontendType.TEXT,
|
||||
required=True,
|
||||
description="Query mode: lookup | raw | aggregate",
|
||||
),
|
||||
"entity": WorkflowActionParameter(
|
||||
name="entity",
|
||||
type="str",
|
||||
frontendType=FrontendType.TEXT,
|
||||
required=True,
|
||||
description="Entity to query: tenantWithRent | contact | journalLines | accounts | balances",
|
||||
),
|
||||
"tenantNameRef": WorkflowActionParameter(
|
||||
name="tenantNameRef",
|
||||
type="str",
|
||||
frontendType=FrontendType.TEXT,
|
||||
required=False,
|
||||
description="Tenant name to match (or {{wire.field}} placeholder)",
|
||||
),
|
||||
"tenantAddressRef": WorkflowActionParameter(
|
||||
name="tenantAddressRef",
|
||||
type="str",
|
||||
frontendType=FrontendType.TEXT,
|
||||
required=False,
|
||||
description="Tenant address to match (tolerant)",
|
||||
),
|
||||
"period": WorkflowActionParameter(
|
||||
name="period",
|
||||
type="str",
|
||||
frontendType=FrontendType.TEXT,
|
||||
required=False,
|
||||
description="Period filter: YYYY or YYYY-MM-DD/YYYY-MM-DD",
|
||||
),
|
||||
"rentAccountPattern": WorkflowActionParameter(
|
||||
name="rentAccountPattern",
|
||||
type="str",
|
||||
frontendType=FrontendType.TEXT,
|
||||
required=False,
|
||||
description="Account-number pattern for rent revenue (e.g. '6000-6099' or '6*')",
|
||||
),
|
||||
"filterJson": WorkflowActionParameter(
|
||||
name="filterJson",
|
||||
type="str",
|
||||
frontendType=FrontendType.TEXTAREA,
|
||||
required=False,
|
||||
description="Optional JSON filter for mode=raw/aggregate",
|
||||
),
|
||||
},
|
||||
execute=queryData.__get__(self, self.__class__),
|
||||
),
|
||||
}
|
||||
self._validateActions()
|
||||
|
||||
|
|
@ -156,3 +221,4 @@ class MethodTrustee(MethodBase):
|
|||
self.processDocuments = processDocuments.__get__(self, self.__class__)
|
||||
self.syncToAccounting = syncToAccounting.__get__(self, self.__class__)
|
||||
self.refreshAccountingData = refreshAccountingData.__get__(self, self.__class__)
|
||||
self.queryData = queryData.__get__(self, self.__class__)
|
||||
|
|
|
|||
88
tests/unit/workflow/test_trusteeQueryData.py
Normal file
88
tests/unit/workflow/test_trusteeQueryData.py
Normal file
|
|
@ -0,0 +1,88 @@
|
|||
# Copyright (c) 2026 Patrick Motsch
|
||||
# All rights reserved.
|
||||
"""Unit tests for trustee.queryData helpers (pure-logic, no DB required)."""
|
||||
|
||||
import pytest
|
||||
|
||||
from modules.workflows.methods.methodTrustee.actions.queryData import (
|
||||
_accountMatcher,
|
||||
_normalizeText,
|
||||
_parseFilterJson,
|
||||
_parsePeriod,
|
||||
_summarizeAggregate,
|
||||
)
|
||||
|
||||
|
||||
class TestNormalize:
|
||||
def test_normalizeStripsCaseAndPunctuation(self):
|
||||
assert _normalizeText("Müller AG") == "mllerag"
|
||||
assert _normalizeText("Mueller, AG") == "muellerag"
|
||||
|
||||
def test_normalizeHandlesEmpty(self):
|
||||
assert _normalizeText("") == ""
|
||||
assert _normalizeText(None) == ""
|
||||
|
||||
|
||||
class TestParsePeriod:
|
||||
def test_yearOnlyExpandsToFullYear(self):
|
||||
assert _parsePeriod("2025") == ("2025-01-01", "2025-12-31")
|
||||
|
||||
def test_rangeWithSlash(self):
|
||||
assert _parsePeriod("2025-01-01/2025-06-30") == ("2025-01-01", "2025-06-30")
|
||||
|
||||
def test_emptyReturnsNone(self):
|
||||
assert _parsePeriod("") == (None, None)
|
||||
assert _parsePeriod(None) == (None, None)
|
||||
|
||||
|
||||
class TestAccountMatcher:
|
||||
def test_exactMatch(self):
|
||||
m = _accountMatcher("6000")
|
||||
assert m("6000") is True
|
||||
assert m("6001") is False
|
||||
|
||||
def test_prefixMatch(self):
|
||||
m = _accountMatcher("6*")
|
||||
assert m("6000") is True
|
||||
assert m("6999") is True
|
||||
assert m("7000") is False
|
||||
|
||||
def test_rangeMatch(self):
|
||||
m = _accountMatcher("6000-6099")
|
||||
assert m("6000") is True
|
||||
assert m("6050") is True
|
||||
assert m("6099") is True
|
||||
assert m("6100") is False
|
||||
assert m("not-a-number") is False
|
||||
|
||||
def test_emptyMatchesAll(self):
|
||||
m = _accountMatcher("")
|
||||
assert m("anything") is True
|
||||
|
||||
|
||||
class TestParseFilterJson:
|
||||
def test_validJson(self):
|
||||
assert _parseFilterJson('{"foo": "bar"}') == {"foo": "bar"}
|
||||
|
||||
def test_dictPassThrough(self):
|
||||
assert _parseFilterJson({"a": 1}) == {"a": 1}
|
||||
|
||||
def test_emptyOrInvalidReturnsEmptyDict(self):
|
||||
assert _parseFilterJson("") == {}
|
||||
assert _parseFilterJson(None) == {}
|
||||
assert _parseFilterJson("not json") == {}
|
||||
|
||||
|
||||
class TestSummarizeAggregate:
|
||||
def test_countsByContactType(self):
|
||||
records = [
|
||||
{"contactType": "customer"},
|
||||
{"contactType": "customer"},
|
||||
{"contactType": "vendor"},
|
||||
]
|
||||
summary = _summarizeAggregate(records)
|
||||
assert summary["total"] == 3
|
||||
assert summary["by_contactType"] == {"customer": 2, "vendor": 1}
|
||||
|
||||
def test_emptyRecordsReturnsZero(self):
|
||||
assert _summarizeAggregate([]) == {"total": 0}
|
||||
166
tests/unit/workflow/test_workflowFileSchema.py
Normal file
166
tests/unit/workflow/test_workflowFileSchema.py
Normal file
|
|
@ -0,0 +1,166 @@
|
|||
# Copyright (c) 2026 Patrick Motsch
|
||||
# All rights reserved.
|
||||
"""Unit tests for the workflow-file (versioned envelope) schema."""
|
||||
|
||||
import pytest
|
||||
|
||||
from modules.features.graphicalEditor._workflowFileSchema import (
|
||||
WORKFLOW_FILE_KIND,
|
||||
WORKFLOW_FILE_SCHEMA_VERSION,
|
||||
WorkflowFileSchemaError,
|
||||
buildFileFromWorkflow,
|
||||
buildFileName,
|
||||
envelopeToWorkflowData,
|
||||
isWorkflowFileEnvelope,
|
||||
normalizeGraph,
|
||||
validateFileEnvelope,
|
||||
)
|
||||
|
||||
|
||||
def _sampleWorkflowRow() -> dict:
|
||||
return {
|
||||
"id": "wf-123",
|
||||
"mandateId": "mand-1",
|
||||
"featureInstanceId": "inst-1",
|
||||
"currentVersionId": "ver-1",
|
||||
"eventId": "evt-1",
|
||||
"active": True,
|
||||
"label": "Test Workflow",
|
||||
"description": "Round-trip sample",
|
||||
"tags": ["test"],
|
||||
"templateScope": None,
|
||||
"sharedReadOnly": False,
|
||||
"notifyOnFailure": True,
|
||||
"isTemplate": False,
|
||||
"graph": {
|
||||
"nodes": [
|
||||
{"id": "n1", "type": "trigger.manual", "x": 50, "y": 200, "parameters": {}},
|
||||
{"id": "n2", "type": "ai.prompt", "position": {"x": 300, "y": 200}, "parameters": {"aiPrompt": "Hi"}},
|
||||
],
|
||||
"connections": [
|
||||
{"source": "n1", "target": "n2", "sourceOutput": 0, "targetInput": 0},
|
||||
],
|
||||
},
|
||||
"invocations": [{"id": "inv-1", "kind": "manual", "enabled": True, "title": {"de": "Start"}}],
|
||||
"sysCreatedAt": 1700000000.0,
|
||||
"sysModifiedAt": 1700000100.0,
|
||||
}
|
||||
|
||||
|
||||
def _sampleNodeTypes() -> list:
|
||||
return ["trigger.manual", "ai.prompt"]
|
||||
|
||||
|
||||
class TestBuildFile:
|
||||
def test_envelopeContainsKindAndSchemaVersion(self):
|
||||
envelope = buildFileFromWorkflow(_sampleWorkflowRow())
|
||||
assert envelope["$kind"] == WORKFLOW_FILE_KIND
|
||||
assert envelope["$schemaVersion"] == WORKFLOW_FILE_SCHEMA_VERSION
|
||||
assert "$exportedAt" in envelope
|
||||
|
||||
def test_persistenceFieldsAreStripped(self):
|
||||
envelope = buildFileFromWorkflow(_sampleWorkflowRow())
|
||||
for forbidden in ("id", "mandateId", "featureInstanceId", "currentVersionId", "eventId", "active", "sysCreatedAt", "sysModifiedAt"):
|
||||
assert forbidden not in envelope, f"{forbidden} must not appear in exported file"
|
||||
|
||||
def test_portableFieldsAreCopied(self):
|
||||
envelope = buildFileFromWorkflow(_sampleWorkflowRow())
|
||||
assert envelope["label"] == "Test Workflow"
|
||||
assert envelope["description"] == "Round-trip sample"
|
||||
assert envelope["tags"] == ["test"]
|
||||
assert envelope["notifyOnFailure"] is True
|
||||
|
||||
def test_graphPositionsAreNormalized(self):
|
||||
envelope = buildFileFromWorkflow(_sampleWorkflowRow())
|
||||
nodes = envelope["graph"]["nodes"]
|
||||
assert nodes[0]["x"] == 50
|
||||
assert nodes[0]["y"] == 200
|
||||
assert nodes[1]["x"] == 300
|
||||
assert nodes[1]["y"] == 200
|
||||
assert "position" not in nodes[1]
|
||||
|
||||
|
||||
class TestValidate:
|
||||
def test_validEnvelopeReturnsNoErrors(self):
|
||||
envelope = buildFileFromWorkflow(_sampleWorkflowRow())
|
||||
normalized, warnings = validateFileEnvelope(envelope, knownNodeTypes=_sampleNodeTypes())
|
||||
assert normalized["label"] == "Test Workflow"
|
||||
assert warnings == []
|
||||
|
||||
def test_missingSchemaVersionRaises(self):
|
||||
with pytest.raises(WorkflowFileSchemaError):
|
||||
validateFileEnvelope({"label": "x", "graph": {}})
|
||||
|
||||
def test_unsupportedSchemaVersionRaises(self):
|
||||
with pytest.raises(WorkflowFileSchemaError):
|
||||
validateFileEnvelope({"$schemaVersion": "99.0", "label": "x", "graph": {}})
|
||||
|
||||
def test_missingLabelRaises(self):
|
||||
with pytest.raises(WorkflowFileSchemaError):
|
||||
validateFileEnvelope({"$schemaVersion": WORKFLOW_FILE_SCHEMA_VERSION, "graph": {}})
|
||||
|
||||
def test_missingGraphRaises(self):
|
||||
with pytest.raises(WorkflowFileSchemaError):
|
||||
validateFileEnvelope({"$schemaVersion": WORKFLOW_FILE_SCHEMA_VERSION, "label": "x"})
|
||||
|
||||
def test_unknownNodeTypeRaises(self):
|
||||
envelope = buildFileFromWorkflow(_sampleWorkflowRow())
|
||||
with pytest.raises(WorkflowFileSchemaError):
|
||||
validateFileEnvelope(envelope, knownNodeTypes=["trigger.manual"])
|
||||
|
||||
def test_emptyNodesProducesWarning(self):
|
||||
envelope = {
|
||||
"$schemaVersion": WORKFLOW_FILE_SCHEMA_VERSION,
|
||||
"label": "Empty",
|
||||
"graph": {"nodes": [], "connections": []},
|
||||
}
|
||||
_, warnings = validateFileEnvelope(envelope)
|
||||
assert any("no nodes" in w.lower() for w in warnings)
|
||||
|
||||
def test_danglingConnectionProducesWarning(self):
|
||||
envelope = {
|
||||
"$schemaVersion": WORKFLOW_FILE_SCHEMA_VERSION,
|
||||
"label": "Bad",
|
||||
"graph": {
|
||||
"nodes": [{"id": "a", "type": "trigger.manual"}],
|
||||
"connections": [{"source": "a", "target": "ghost"}],
|
||||
},
|
||||
}
|
||||
_, warnings = validateFileEnvelope(envelope, knownNodeTypes=["trigger.manual"])
|
||||
assert any("ghost" in w for w in warnings)
|
||||
|
||||
|
||||
class TestRoundTrip:
|
||||
def test_exportThenImportPreservesGraphStructure(self):
|
||||
original = _sampleWorkflowRow()
|
||||
envelope = buildFileFromWorkflow(original)
|
||||
normalized, _ = validateFileEnvelope(envelope, knownNodeTypes=_sampleNodeTypes())
|
||||
data = envelopeToWorkflowData(normalized, mandateId="mand-2", featureInstanceId="inst-2")
|
||||
|
||||
assert data["mandateId"] == "mand-2"
|
||||
assert data["featureInstanceId"] == "inst-2"
|
||||
assert data["active"] is False, "imports must be inactive by default"
|
||||
assert data["label"] == original["label"]
|
||||
assert data["description"] == original["description"]
|
||||
assert len(data["graph"]["nodes"]) == len(original["graph"]["nodes"])
|
||||
assert len(data["graph"]["connections"]) == len(original["graph"]["connections"])
|
||||
for forbidden in ("id", "currentVersionId", "eventId"):
|
||||
assert forbidden not in data
|
||||
|
||||
|
||||
class TestHelpers:
|
||||
def test_isWorkflowFileEnvelopeAcceptsValid(self):
|
||||
assert isWorkflowFileEnvelope(buildFileFromWorkflow(_sampleWorkflowRow())) is True
|
||||
|
||||
def test_isWorkflowFileEnvelopeRejectsRandom(self):
|
||||
assert isWorkflowFileEnvelope({"foo": "bar"}) is False
|
||||
assert isWorkflowFileEnvelope("not-a-dict") is False
|
||||
assert isWorkflowFileEnvelope(None) is False
|
||||
|
||||
def test_buildFileNameProducesSafeSlug(self):
|
||||
assert buildFileName("PWG: Pilot Workflow!") == "pwg-pilot-workflow.workflow.json"
|
||||
assert buildFileName("") == "workflow.workflow.json"
|
||||
|
||||
def test_normalizeGraphHandlesMissingFields(self):
|
||||
assert normalizeGraph(None) == {"nodes": [], "connections": []}
|
||||
assert normalizeGraph({}) == {"nodes": [], "connections": []}
|
||||
Loading…
Reference in a new issue