Tested workflow engine 3.1
This commit is contained in:
parent
472353fea0
commit
29c31e79bd
32 changed files with 1360 additions and 2063 deletions
6
app.py
6
app.py
|
|
@ -208,9 +208,6 @@ async def lifespan(app: FastAPI):
|
|||
logger.info("Application has been shut down")
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# START APP
|
||||
app = FastAPI(
|
||||
title="PowerOn | Data Platform API",
|
||||
|
|
@ -248,6 +245,9 @@ app.add_middleware(TokenRefreshMiddleware, enabled=True)
|
|||
# Proactive token refresh middleware (refresh tokens before they expire)
|
||||
app.add_middleware(ProactiveTokenRefreshMiddleware, enabled=True, check_interval_minutes=5)
|
||||
|
||||
# Run triggered features
|
||||
import modules.features.init
|
||||
|
||||
# Include all routers
|
||||
from modules.routes.routeAdmin import router as generalRouter
|
||||
app.include_router(generalRouter)
|
||||
|
|
|
|||
|
|
@ -5,6 +5,8 @@ APP_ENV_TYPE = dev
|
|||
APP_ENV_LABEL = Development Instance Patrick
|
||||
APP_API_URL = http://localhost:8000
|
||||
APP_KEY_SYSVAR = D:/Athi/Local/Web/poweron/local/key.txt
|
||||
APP_INIT_PASS_ADMIN_SECRET = DEV_ENC:Z0FBQUFBQm8xRjFXZXRwU0NnLTNhdFVUU3ZlcFU4emRMa2xLRno5c0xwSVhOcjgzNlBUWnZ2V2RmQ0RmRmE4a3BTQ3FRMHN2aWdsSTAxSDJrUGJ2UmQwME5Pa3RNTTgyVFh6NUl4YTJoZTVxdkExUVkyWnpac1k9
|
||||
APP_INIT_PASS_EVENT_SECRET = DEV_ENC:Z0FBQUFBQm8xRjFXQWY5WWpQYXUzX2dTQllVNk1Vb1J1S2t2NG9PMEYzSWNLeVR6WlhvYjU4TDFmYjZva01oZll5QWI0MHU5cXJvT0lvZHdoNW01WWxqRG9pdEQyWTcxYWlJVE5SRXVIMkh2VTRlYk1kSGRVNnM9
|
||||
|
||||
# PostgreSQL Storage (new)
|
||||
DB_APP_HOST=localhost
|
||||
|
|
@ -76,3 +78,6 @@ Connector_WebTavily_API_KEY_SECRET = DEV_ENC:Z0FBQUFBQm8wSFJNSEJ2YmVieFRaWk5yR1k
|
|||
|
||||
# Google Cloud Speech Services configuration
|
||||
Connector_GoogleSpeech_API_KEY_SECRET = DEV_ENC:Z0FBQUFBQm8wSFJNSFFITGlUVzF3NE5Ldk10d3o5MS10Q2o4aEJGM250WF9CeWxFQVNaNHBhMk1hS3E5YXRrakh5dmx0VDJuZ3BsWGVMTC0tbU9wWFRWZWM1N25ibWpkeF84enJ1Y2ViMVd1V0plUWdxN3VId1VRUzBhN3MzLVBkSXEwM1BHT2Z2c3JBalh6eXVKMUNFX2pfbGdGYUg1ZUFfcXhSRnJyT0tzdWVVdG1HSHBZOUgwLUVPMVQ4YkZUc3dMcFlLWjRxQUM1X05OWm5ndmJGcjFETV9UM1FoLWt2RVVEem92UGhvZlRFXzNxOVRzQkhyV0hqeTRWQXdMdDVDbEMwOWFkTnV3UXpsYWZwRENaRzd4QjlwTjJUWHhHLVZPTzd1eXNhSWh5ajNwelgxSDRlNUx0N05yTlI1N1RjSzdIZGhFLXBOMjEwMkxsT0daSVhiWVpQZUtfNVdwdGVrazVMM2NkUGZPOHBuNjM3YXdFcGFPdlVtY01ReGhsVENwNnRvNGhJejNHd3hFOFA0bWgxalFFNDVoQ2xYTG5VN1dDZGhndEdWRlFjYzBRMUgwbzRfS2N3VVgyaXJpYmJfZzNadmx5cTFxS2Vja1I5Qm1UT0hDM1FuNk5JRmYtT2p3RWp2SWxTWGZuU1psOUN4NEJTOHkweWIzY2NjbTJRZG5oRjVxNGh4LTUwZE1zZi1zLU43Ulk4UGtmR0N6dU5RcVVvRF9DQlE5Sk1FR1YtOE84WnVuTDlOUHhQR1JLT2g0VkNIT2ctWTBuMXIwNHhSSjcxNnNWRFhQc18zSm1UR1M0Mm54TGxsRG5uX2tDSWhBNDRGaHFObkhuVmtnVVlQU1FhVWhTdnpGUDRfcDQ1OWpERklHMmN5Y0RVWC1JYlItTUozaWY1dmxZUW12NXAtUEtsQWpqUFk4NzFwWVNfSUNqeDNkc25wMnJHN3c5NTB1dmxmUFZfU0NWS1hQMTc1NmdOTmEyREZRVXB0cmlyaldkT3B0Q3FQMFdpdWQ3WU1RZDZKYlFneDdnQ2NWWHFHSXl1c2xRN21LbDdyUGFUcWFxeVVTOWoxSkVJaFZiUHI2VFBHWEdvM2Q1cXdIVGYyc3Y2cVdRd00ydHdrME8tcDVqSmNLV193R291VElTNWFNa2pMQi1zX21VdnZ1R0tTbEJndndvbWRrVE52eW1aTFFzRURtdGItc3FJeXJDenVTWTlIZ0E1eG1yX2N1SHJSUWIxdm8wakdzaDIyaDQ0cE9UdDlhclp2MzVVamQ2em0zbmdLUzBJa1ZaRFpQaTBnZGpTWnRhRGZxUVNZWDg5VDFndWFmZlZnVG5SUEhlWkpfQnREWS0xbEZfNXd5OUpEUkZHa1NZNWtPbnBadFFialgzazlyM0dTb3ctR2x5LUozT3VDc3F1Tk5TbGN2MnRRS1hTb1gzWUNVSlJuUl85azhxaGxCMzVNQUQzVGg1cDZHalRaOUFrM1JPSGJKaGlKRTAwbnV4TmxIZnhkMF9FODVKUk1GZGlWZk1ScnhmQnJXWmRxMTk3SWhIdnBjSVJJOElkalRUWXFRTFNvQXZpdFpFOUdDWkhHOTRLVmN2cEh0X2JpYjNvRjhvUHFVQVNQdXY4OWxQSWNvcUNfZW5HYy10dEFicldhRHZLS1ktY2RGczQta2lGWXkxb2RhNUZMNExabWx0dXdhR3BSWGpSYVUxRXJZVTNBYmdNVFd5NW1vY2s0T0RlV3hqZjNSMHhJakY1TDBackV5bmM2V1o2SEJlT3RSbnpPR0VXbmhQTUtPMzYyU1RjbFRmQUlWTUZjVGRheXBuekZJN3NNZVFFZ3JHenNnOFdQVWxsbFBoYTVvQUd1NGx2SDdYcGhrdUpSWlRIRWVVUkpxdjJSZV9zb0J3N3o4QnRpYXpTRHdkZ1pqSWswSjdJMjVEZDZUNzZuWDVXWkNxUDRtQ1p1dnk2ZEx0S0NKT2ZUc3B5eEdRdEpnTlZQMkt5OHFjQ3FfcHpzUFZEY3Z5WDdEQkt4cEN2MFg2eXF4bDZFeHZFWk5tMFpUR0xDZi1JVjN4eUtRaXlNXzBJUFV2N19MVTRhMWtxWnd6d0Y2bVNFQUJSdEU5Z01FTjEtZDJmWkpEYUlsTVJnTEJYdU1iVFoySEttd3libURrSUNJelVic2Mzb0t5ZzNDX0hjZUtfOFQ1QkxRWmx2dmhnbDhNZllla1dNa0Y5akVpNDRKdHRSUU9fTE9sYVUzdzZtTkJEYTBWdkxkRURSa01TOGxWcVZkUmxkWTA1QjJjS1pOUjJEQTZxeDdSVXhNWldXbnE1V1J2STVCNkt2VHRuNEdtaHUweWdEbUZyMlhWd09FWWI0UUFyQVpUeDE3QXdfQkMtcjdpUU5GUTQzUEczNWg1Wm5rVEgwRW11RFowVnFxYnpGNUYwYks1Y3JPbTdUc2ZXS1ZfYzdhcno3U1ZXZUVkblRoOVl5XzZpTUgwRXFZeFd6NXdqTGlvNm1QeXgxS2ZFTVJSV1JVejliWFBVRGU1MWVudEZzRDFwSW94YlU1Y3JmallsVldXcHdvTmFQdnU5UE0tNHNHMXhPWE1JQUxCNC1WVVRJNmNJcTM3a1dUWWwzSVptTFg3OXlWLWxITkdiR0MyTmRzRWFOeHBMZEVzbms3RC1MTFo1TVhKeURhUW9peHk1bHhJbHphVzR4RmxiUkJwcmkzcWZ3S3dWV0Jkb2VaZ3pMTXdUNUJmZjZfVEVXeDFNMnBvemM0TUJNeUQ2SE1aeWczc0V6M0NUMHFGdURMbTRka3AzZ1d1TUh2V1c5RzBKQVVlTEstWEthOTdaWUZHTlRHaVNmbEFJRFU3M0l2TWlBNF9kaFpJUXlxMHJYa2lxOGFRbDNqMTA1RDFFclFTcGxmb0g2WVI3Z0NrLWN4cUNzNWVuR2VMaE41dWRqMnR5eWNuM0gwUmIwcTFEQ09qbmJCUFIwbjM4MGF6TlhxQWpKOFZXWGNKdnl2Wi1zU1BsZU5NYWpsbzVKMGxTLUJKckd6enJnZWhXemstenN3NGNqUk9HeGlGaFNhSl83TlUzLTVZWW9zYVZZTTZzSjNfd3JkVDNaZVp4dk1GQVMxblJBRW1BWUZLU1VKUFkyQ1dPbndUNjYwdll2U0JxN1FQNk5OaGVYR3U5TXdGNGFVZGVXcS1tS2dwbVc1V3hEeXhVNkJ2cjdGX2FpY1NvOTJhcWFyOUVGOFpOdmd0R29Rb2RIaU01R05LeWRxUE00WlhOQVlMbkZxZDNyUFRXdUFGZ0lOUmp2RzIyaDlzMGxNQk40VzFzYjAwMEhjRVlrNWJ5cFhpVWYxQkxYQ25rUDJ3RTY1VlVFLThiNG1nY1hkdnZTMGoyVlN6dkJleFhndDNCODhlOVl1ZHBkci1hd3l0NGNXeWZ6aUp4S3pHS1c4aDM3WElBTjBwYlNSbmJoMk5SNF81VVNqd0dXY1JUejVsZnpGS1Z5dHFPNUVVM1I5eGhjblZjMV9idFJkc3NZaUdHRlIzQWJQdHhzT01qVW8xUUwxNHZmY3Q1aHBnNHhXTGRjb1BmTmM2X0NmdkpxNS1JMHNQNVg1N0xsd0pmdE8wNktkUGpuX0F3LURyaGhyajg3eWNDdkozUFZIYmpJTTZ3WWVCVFZUd1AtRklFUUxTNXkzalpfdlc4VE1tOHU1Q0MtUWdLbEdYRzdVU1RkM3gyeEY3eXBWLUhXVVo4VkZoUHVkakJPNk0tNTJKTU1JZjVISlR3SmJBQkVhRW51UHg3UjBOMVRPRnF2dzIwRkgxczBBUWZpemFFMzFTeDJfWHZhSkhsTzBhcFIzVmZRODEzRUl1b1ZDUGFqYUxjN2JsbkhYdHVPT00yYlUwbmpVbkU0RkJXbWx5UVFJdHNvNUdxQzMyQnQycDJpMjlnd2xwb3huRUJiZUg5dkhaMjhMV2R5T0NsU0N4WjdBX2ZfODhOdTZOZ0x6WlRIUGI3MzR1ZkJicHN6NzUzRzlsUmVkNlR6MjZjTTA3c290Qzh4ejRiWERHbmFtV1BQV2ZKb2pGU0F1OGsySG9hNHdtSkkxTWpwV2gyaVpWcFpsRWs5a0hSY3UzMk4wQ0dkZWtMbG4xOFZ6TXdEOXBob3I0NjNkT28tZk5IcW5FUkg4YnBtUVFLY1Q5M1lzYzhrRGZOaDF6SnpnejRuM1Y3SW1xMUJmLXpJdEM0UjNHU0t5OEhoamxxLXRmWmtyOS1ud09XeGFzc3VFXzNPWWNGcXFwdHN2cVFEZ0dWdUNKbF9Lc3d6dVhPb3NLMlNEaW1xd3JPLUViYV9GTnNRPT0=
|
||||
|
||||
# Feature SyncDelta JIRA configuration
|
||||
Feature_SyncDelta_JIRA_DELTA_TOKEN_SECRET = DEV_ENC:Z0FBQUFBQm8xRnZIVUdfQTg5dXRfenlxanFvdXBCSmltQmNhRFNkUnNYai0zWlZqV1NmS212SlhsRUdwYXRRcjVvRVdqaWN0RWNrMjNYWG1VazJOaU5xWnZDWFJoWGxvTXpTbHNzbk9nTGlxa0ZZQk5WZHNHaHVDdmNQYTBRbHBQaFlvY0FYM3NzU05MUHNZU1AxaWNCM1ZTQllLdFpZX2pWektnUTF1WGRiRGtsOGE0bVdPYXp4aEhlWDVTWmYzWlRuVl84Zk5pREZJay1zMWRpTWxKYnFtYzBXM01vMzdiN0JlQl9kYXN2Sy1ZZUdnT2dYNzFuajA1QUJVbTFKS1dfUjNUMS1MSC0wOGc1ZDd2NTJESTR6M2Y2SDhsUG40OWM0b2djeDdZTS1qOVl2YlhhbGx2dG1KWkd3andQZXJVVC1zWDdBMzdHOVdRMHRJN3RabzZNWWlEY0xqcUg4Nk9RPT0=
|
||||
|
|
|
|||
22
env_int.env
22
env_int.env
File diff suppressed because one or more lines are too long
25
env_prod.env
25
env_prod.env
File diff suppressed because one or more lines are too long
|
|
@ -6,19 +6,34 @@ import logging
|
|||
from typing import List, Dict, Any, Optional, Union, get_origin, get_args
|
||||
from datetime import datetime
|
||||
import uuid
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, Field
|
||||
import threading
|
||||
import time
|
||||
|
||||
from modules.shared.attributeUtils import to_dict
|
||||
from modules.shared.attributeUtils import to_dict, ModelMixin
|
||||
from modules.shared.timezoneUtils import get_utc_timestamp
|
||||
from modules.shared.configuration import APP_CONFIG
|
||||
from modules.interfaces.interfaceAppModel import SystemTable
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# No mapping needed - table name = Pydantic model name exactly
|
||||
|
||||
class SystemTable(BaseModel, ModelMixin):
|
||||
"""Data model for system table entries"""
|
||||
table_name: str = Field(
|
||||
description="Name of the table",
|
||||
frontend_type="text",
|
||||
frontend_readonly=True,
|
||||
frontend_required=True
|
||||
)
|
||||
initial_id: Optional[str] = Field(
|
||||
default=None,
|
||||
description="Initial ID for the table",
|
||||
frontend_type="text",
|
||||
frontend_readonly=True,
|
||||
frontend_required=False
|
||||
)
|
||||
|
||||
def _get_model_fields(model_class) -> Dict[str, str]:
|
||||
"""Get all fields from Pydantic model and map to SQL types."""
|
||||
if not hasattr(model_class, '__fields__'):
|
||||
|
|
|
|||
145
modules/connectors/connectorTicketsClickup.py
Normal file
145
modules/connectors/connectorTicketsClickup.py
Normal file
|
|
@ -0,0 +1,145 @@
|
|||
"""ClickUp connector for CRUD operations (compatible with TicketInterface)."""
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Optional
|
||||
import logging
|
||||
import aiohttp
|
||||
|
||||
from modules.interfaces.interfaceTicketModel import (
|
||||
TicketBase,
|
||||
TicketFieldAttribute,
|
||||
Task,
|
||||
)
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ConnectorTicketClickup(TicketBase):
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
apiToken: str,
|
||||
teamId: str,
|
||||
listId: Optional[str] = None,
|
||||
apiUrl: str = "https://api.clickup.com/api/v2",
|
||||
) -> None:
|
||||
self.apiToken = apiToken
|
||||
self.teamId = teamId
|
||||
self.listId = listId
|
||||
self.apiUrl = apiUrl
|
||||
|
||||
def _headers(self) -> dict:
|
||||
return {
|
||||
"Authorization": self.apiToken,
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
async def read_attributes(self) -> list[TicketFieldAttribute]:
|
||||
"""Fetch field attributes. Uses list custom fields if listId provided; else basic fields."""
|
||||
attributes: list[TicketFieldAttribute] = []
|
||||
try:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
if self.listId:
|
||||
url = f"{self.apiUrl}/list/{self.listId}/field"
|
||||
async with session.get(url, headers=self._headers()) as response:
|
||||
if response.status != 200:
|
||||
logger.warning(f"ClickUp fields fetch status: {response.status}")
|
||||
else:
|
||||
data = await response.json()
|
||||
for field in data.get("fields", []):
|
||||
fieldId = field.get("id")
|
||||
fieldName = field.get("name", fieldId)
|
||||
if fieldId:
|
||||
attributes.append(TicketFieldAttribute(fieldName=fieldName, field=fieldId))
|
||||
|
||||
# Add common top-level fields
|
||||
core_fields = [
|
||||
("ID", "id"),
|
||||
("Name", "name"),
|
||||
("Status", "status.status"),
|
||||
("Assignees", "assignees"),
|
||||
("DateCreated", "date_created"),
|
||||
("DueDate", "due_date"),
|
||||
]
|
||||
for name, fid in core_fields:
|
||||
attributes.append(TicketFieldAttribute(fieldName=name, field=fid))
|
||||
except Exception as e:
|
||||
logger.error(f"ClickUp read_attributes error: {e}")
|
||||
return attributes
|
||||
|
||||
async def read_tasks(self, *, limit: int = 0) -> list[Task]:
|
||||
"""Read tasks from ClickUp, always returning full task records.
|
||||
If list_id is set, read from that list; otherwise read from team.
|
||||
"""
|
||||
tasks: list[Task] = []
|
||||
try:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
page = 0
|
||||
pageSize = 100
|
||||
while True:
|
||||
if self.listId:
|
||||
url = f"{self.apiUrl}/list/{self.listId}/task?subtasks=true&page={page}&order_by=created&reverse=true"
|
||||
else:
|
||||
# Team-level search for open tasks
|
||||
url = f"{self.apiUrl}/team/{self.teamId}/task?subtasks=true&page={page}&order_by=created&reverse=true"
|
||||
|
||||
# Request with parameters to include all fields where possible
|
||||
async with session.get(url, headers=self._headers()) as response:
|
||||
if response.status != 200:
|
||||
errorText = await response.text()
|
||||
logger.error(f"ClickUp read_tasks failed: {response.status} {errorText}")
|
||||
break
|
||||
|
||||
data = await response.json()
|
||||
items = data.get("tasks", [])
|
||||
for item in items:
|
||||
tasks.append(Task(data=item))
|
||||
if limit and len(tasks) >= limit:
|
||||
return tasks
|
||||
|
||||
if len(items) < pageSize:
|
||||
break
|
||||
page += 1
|
||||
except Exception as e:
|
||||
logger.error(f"ClickUp read_tasks error: {e}")
|
||||
return tasks
|
||||
|
||||
async def write_tasks(self, tasklist: list[Task]) -> None:
|
||||
"""Update tasks in ClickUp. Expects Task.data to contain {'ID' or 'id' or 'task_id', 'fields': {...}}"""
|
||||
try:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
for task in tasklist:
|
||||
data = task.data
|
||||
taskId = data.get("ID") or data.get("id") or data.get("task_id")
|
||||
fields = data.get("fields", {})
|
||||
if not taskId or not isinstance(fields, dict) or not fields:
|
||||
continue
|
||||
|
||||
# Map generic fields to ClickUp payload
|
||||
payload: dict = {}
|
||||
for fieldId, value in fields.items():
|
||||
# Heuristics: map common field ids
|
||||
if fieldId in ("name", "summary"):
|
||||
payload["name"] = value
|
||||
elif fieldId in ("status",):
|
||||
payload["status"] = value
|
||||
elif fieldId.startswith("customfield_") or fieldId.startswith("cf_"):
|
||||
# ClickUp custom fields need separate endpoint; attempt inline update if supported
|
||||
if "custom_fields" not in payload:
|
||||
payload["custom_fields"] = []
|
||||
payload["custom_fields"].append({"id": fieldId, "value": value})
|
||||
else:
|
||||
# Best-effort assign to description for unknown text fields
|
||||
if isinstance(value, str) and value:
|
||||
payload.setdefault("description", value)
|
||||
|
||||
url = f"{self.apiUrl}/task/{taskId}"
|
||||
async with session.put(url, headers=self._headers(), json=payload) as response:
|
||||
if response.status not in (200, 204):
|
||||
err = await response.text()
|
||||
logger.error(f"ClickUp update failed for {taskId}: {response.status} {err}")
|
||||
except Exception as e:
|
||||
logger.error(f"ClickUp write_tasks error: {e}")
|
||||
|
||||
|
||||
|
|
@ -1,47 +1,32 @@
|
|||
"""Jira connector for CRUD operations."""
|
||||
"""Jira connector for CRUD operations (neutralized to generic ticket interface)."""
|
||||
|
||||
from dataclasses import dataclass
|
||||
import os
|
||||
import logging
|
||||
import aiohttp
|
||||
import asyncio
|
||||
import json
|
||||
|
||||
from modules.interfaces.interfaceTicketModel import (
|
||||
TicketBase,
|
||||
TicketFieldAttribute,
|
||||
Task,
|
||||
)
|
||||
|
||||
from modules.interfaces.interfaceTicketModel import (TicketBase, TicketFieldAttribute, Task, )
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ConnectorTicketJira(TicketBase):
|
||||
jira_username: str
|
||||
jira_api_token: str
|
||||
jira_url: str
|
||||
project_code: str
|
||||
issue_type: str
|
||||
|
||||
@classmethod
|
||||
async def create(
|
||||
cls,
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
jira_username: str,
|
||||
jira_api_token: str,
|
||||
jira_url: str,
|
||||
project_code: str,
|
||||
issue_type: str,
|
||||
):
|
||||
return ConnectorTicketJira(
|
||||
jira_username=jira_username,
|
||||
jira_api_token=jira_api_token,
|
||||
jira_url=jira_url,
|
||||
project_code=project_code,
|
||||
issue_type=issue_type,
|
||||
)
|
||||
apiUsername: str,
|
||||
apiToken: str,
|
||||
apiUrl: str,
|
||||
projectCode: str,
|
||||
ticketType: str,
|
||||
) -> None:
|
||||
self.apiUsername = apiUsername
|
||||
self.apiToken = apiToken
|
||||
self.apiUrl = apiUrl
|
||||
self.projectCode = projectCode
|
||||
self.ticketType = ticketType
|
||||
|
||||
|
||||
async def read_attributes(self) -> list[TicketFieldAttribute]:
|
||||
"""
|
||||
|
|
@ -52,16 +37,16 @@ class ConnectorTicketJira(TicketBase):
|
|||
list[TicketFieldAttribute]: List of field attributes with names and IDs
|
||||
"""
|
||||
# Build JQL dynamically; allow empty or '*' issue_type to mean "all types"
|
||||
if self.issue_type and self.issue_type != "*":
|
||||
jql_query = f"project={self.project_code} AND issuetype={self.issue_type}"
|
||||
if self.ticketType and self.ticketType != "*":
|
||||
jql_query = f"project={self.projectCode} AND issuetype={self.ticketType}"
|
||||
else:
|
||||
jql_query = f"project={self.project_code}"
|
||||
jql_query = f"project={self.projectCode}"
|
||||
|
||||
# Prepare the request URL (use JQL search endpoint)
|
||||
url = f"{self.jira_url}/rest/api/3/search/jql"
|
||||
url = f"{self.apiUrl}/rest/api/3/search/jql"
|
||||
|
||||
# Prepare authentication
|
||||
auth = aiohttp.BasicAuth(self.jira_username, self.jira_api_token)
|
||||
auth = aiohttp.BasicAuth(self.apiUsername, self.apiToken)
|
||||
|
||||
try:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
|
|
@ -100,9 +85,9 @@ class ConnectorTicketJira(TicketBase):
|
|||
fields = issue.get("fields", {})
|
||||
|
||||
for field_id, value in fields.items():
|
||||
field_name = field_names.get(field_id, field_id)
|
||||
fieldName = field_names.get(field_id, field_id)
|
||||
attributes.append(
|
||||
TicketFieldAttribute(field_name=field_name, field=field_id)
|
||||
TicketFieldAttribute(fieldName=fieldName, field=field_id)
|
||||
)
|
||||
|
||||
logger.info(
|
||||
|
|
@ -122,8 +107,8 @@ class ConnectorTicketJira(TicketBase):
|
|||
|
||||
async def _read_all_fields_via_fields_api(self) -> list[TicketFieldAttribute]:
|
||||
"""Fallback: use Jira fields API to list all fields with id->name mapping."""
|
||||
auth = aiohttp.BasicAuth(self.jira_username, self.jira_api_token)
|
||||
url = f"{self.jira_url}/rest/api/3/field"
|
||||
auth = aiohttp.BasicAuth(self.apiUsername, self.apiToken)
|
||||
url = f"{self.apiUrl}/rest/api/3/field"
|
||||
try:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.get(url, auth=auth) as response:
|
||||
|
|
@ -138,10 +123,10 @@ class ConnectorTicketJira(TicketBase):
|
|||
attributes: list[TicketFieldAttribute] = []
|
||||
for field in data:
|
||||
field_id = field.get("id")
|
||||
field_name = field.get("name", field_id)
|
||||
fieldName = field.get("name", field_id)
|
||||
if field_id:
|
||||
attributes.append(
|
||||
TicketFieldAttribute(field_name=field_name, field=field_id)
|
||||
TicketFieldAttribute(fieldName=fieldName, field=field_id)
|
||||
)
|
||||
logger.info(
|
||||
f"Successfully retrieved {len(attributes)} field attributes via fields API"
|
||||
|
|
@ -162,10 +147,10 @@ class ConnectorTicketJira(TicketBase):
|
|||
list[Task]: List of tasks with their data
|
||||
"""
|
||||
# Build JQL dynamically; allow empty or '*' issue_type to mean "all types"
|
||||
if self.issue_type and self.issue_type != "*":
|
||||
jql_query = f"project={self.project_code} AND issuetype={self.issue_type}"
|
||||
if self.ticketType and self.ticketType != "*":
|
||||
jql_query = f"project={self.projectCode} AND issuetype={self.ticketType}"
|
||||
else:
|
||||
jql_query = f"project={self.project_code}"
|
||||
jql_query = f"project={self.projectCode}"
|
||||
|
||||
# Initialize variables for pagination (cursor-based /search/jql)
|
||||
max_results = 100
|
||||
|
|
@ -176,8 +161,8 @@ class ConnectorTicketJira(TicketBase):
|
|||
seen_issue_ids: set[str] = set()
|
||||
|
||||
# Prepare authentication
|
||||
auth = aiohttp.BasicAuth(self.jira_username, self.jira_api_token)
|
||||
url = f"{self.jira_url}/rest/api/3/search/jql"
|
||||
auth = aiohttp.BasicAuth(self.apiUsername, self.apiToken)
|
||||
url = f"{self.apiUrl}/rest/api/3/search/jql"
|
||||
|
||||
try:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
|
|
@ -286,7 +271,7 @@ class ConnectorTicketJira(TicketBase):
|
|||
tasklist: List of Task objects containing task data to update
|
||||
"""
|
||||
headers = {"Accept": "application/json", "Content-Type": "application/json"}
|
||||
auth = aiohttp.BasicAuth(self.jira_username, self.jira_api_token)
|
||||
auth = aiohttp.BasicAuth(self.apiUsername, self.apiToken)
|
||||
|
||||
try:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
|
|
@ -346,7 +331,7 @@ class ConnectorTicketJira(TicketBase):
|
|||
update_data = {"fields": processed_fields}
|
||||
|
||||
# Make the update request
|
||||
url = f"{self.jira_url}/rest/api/3/issue/{task_id}"
|
||||
url = f"{self.apiUrl}/rest/api/3/issue/{task_id}"
|
||||
|
||||
async with session.put(
|
||||
url, json=update_data, headers=headers, auth=auth
|
||||
|
|
@ -365,3 +350,5 @@ class ConnectorTicketJira(TicketBase):
|
|||
except Exception as e:
|
||||
logger.error(f"Unexpected error while updating Jira tasks: {str(e)}")
|
||||
raise
|
||||
|
||||
|
||||
|
|
@ -35,20 +35,7 @@ from modules.shared.configuration import APP_CONFIG
|
|||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Configuration loading functions
|
||||
def get_web_crawl_timeout() -> int:
|
||||
"""Get web crawl timeout from configuration"""
|
||||
return int(APP_CONFIG.get("Web_Crawl_TIMEOUT", "30"))
|
||||
|
||||
|
||||
def get_web_crawl_max_retries() -> int:
|
||||
"""Get web crawl max retries from configuration"""
|
||||
return int(APP_CONFIG.get("Web_Crawl_MAX_RETRIES", "3"))
|
||||
|
||||
|
||||
def get_web_crawl_retry_delay() -> int:
|
||||
"""Get web crawl retry delay from configuration"""
|
||||
return int(APP_CONFIG.get("Web_Crawl_RETRY_DELAY", "2"))
|
||||
# Cached configuration values are loaded into the connector instance on creation
|
||||
|
||||
|
||||
@dataclass
|
||||
|
|
@ -66,13 +53,26 @@ class TavilyCrawlResult:
|
|||
@dataclass
|
||||
class ConnectorTavily(WebSearchBase, WebCrawlBase, WebScrapeBase):
|
||||
client: AsyncTavilyClient = None
|
||||
# Cached settings loaded at initialization time
|
||||
crawl_timeout: int = 30
|
||||
crawl_max_retries: int = 3
|
||||
crawl_retry_delay: int = 2
|
||||
|
||||
@classmethod
|
||||
async def create(cls):
|
||||
api_key = APP_CONFIG.get("Connector_WebTavily_API_KEY_SECRET")
|
||||
if not api_key:
|
||||
raise ValueError("Tavily API key not configured. Please set Connector_WebTavily_API_KEY_SECRET in config.ini")
|
||||
return cls(client=AsyncTavilyClient(api_key=api_key))
|
||||
# Load and cache web crawl related configuration
|
||||
crawl_timeout = int(APP_CONFIG.get("Web_Crawl_TIMEOUT", "30"))
|
||||
crawl_max_retries = int(APP_CONFIG.get("Web_Crawl_MAX_RETRIES", "3"))
|
||||
crawl_retry_delay = int(APP_CONFIG.get("Web_Crawl_RETRY_DELAY", "2"))
|
||||
return cls(
|
||||
client=AsyncTavilyClient(api_key=api_key),
|
||||
crawl_timeout=crawl_timeout,
|
||||
crawl_max_retries=crawl_max_retries,
|
||||
crawl_retry_delay=crawl_retry_delay,
|
||||
)
|
||||
|
||||
async def search_urls(self, request: WebSearchRequest) -> WebSearchActionResult:
|
||||
"""Handles the web search request.
|
||||
|
|
@ -240,9 +240,9 @@ class ConnectorTavily(WebSearchBase, WebCrawlBase, WebScrapeBase):
|
|||
"""Calls the Tavily API to extract text content from URLs with retry logic."""
|
||||
import asyncio
|
||||
|
||||
max_retries = get_web_crawl_max_retries()
|
||||
retry_delay = get_web_crawl_retry_delay()
|
||||
timeout = get_web_crawl_timeout()
|
||||
max_retries = self.crawl_max_retries
|
||||
retry_delay = self.crawl_retry_delay
|
||||
timeout = self.crawl_timeout
|
||||
|
||||
for attempt in range(max_retries + 1):
|
||||
try:
|
||||
|
|
|
|||
18
modules/features/init.py
Normal file
18
modules/features/init.py
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
# Launch features as events
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from modules.interfaces.interfaceAppObjects import getRootInterface
|
||||
|
||||
# GET EVENT USER
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
eventUser = getRootInterface().getUserByUsername("event")
|
||||
if not eventUser:
|
||||
logger.error("Event user not found")
|
||||
|
||||
# LAUNCH FEATURES
|
||||
|
||||
from modules.features.syncDelta.mainSyncDelta import ManagerSyncDelta, performSync
|
||||
managerSyncDelta = ManagerSyncDelta(eventUser)
|
||||
asyncio.create_task(performSync(eventUser))
|
||||
|
|
@ -1,8 +1,8 @@
|
|||
import logging
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from modules.interfaces.interfaceAppModel import User
|
||||
from modules.services.serviceNeutralization.mainNeutralization import NeutralizationService
|
||||
from modules.interfaces.interfaceAppModel import User, DataNeutralizerAttributes, DataNeutraliserConfig
|
||||
from modules.services.serviceNeutralization.mainServiceNeutralization import NeutralizationService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -75,6 +75,53 @@ class NeutralizationPlayground:
|
|||
'error': str(e),
|
||||
}
|
||||
|
||||
# Additional methods needed by the route
|
||||
def get_config(self) -> Optional[DataNeutraliserConfig]:
|
||||
"""Get neutralization configuration"""
|
||||
return self.service.getConfig()
|
||||
|
||||
def save_config(self, config_data: Dict[str, Any]) -> DataNeutraliserConfig:
|
||||
"""Save neutralization configuration"""
|
||||
return self.service.saveConfig(config_data)
|
||||
|
||||
def neutralize_text(self, text: str, file_id: str = None) -> Dict[str, Any]:
|
||||
"""Neutralize text content"""
|
||||
return self.service.processText(text)
|
||||
|
||||
def resolve_text(self, text: str) -> str:
|
||||
"""Resolve UIDs in neutralized text back to original text"""
|
||||
return self.service.resolveText(text)
|
||||
|
||||
def get_attributes(self, file_id: str = None) -> List[DataNeutralizerAttributes]:
|
||||
"""Get neutralization attributes, optionally filtered by file ID"""
|
||||
if not self.service.app_interface:
|
||||
return []
|
||||
try:
|
||||
all_attributes = self.service._getAttributes()
|
||||
if file_id:
|
||||
return [attr for attr in all_attributes if attr.fileId == file_id]
|
||||
return all_attributes
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting attributes: {str(e)}")
|
||||
return []
|
||||
|
||||
async def process_sharepoint_files(self, source_path: str, target_path: str) -> Dict[str, Any]:
|
||||
"""Process files from SharePoint source path and store neutralized files in target path"""
|
||||
return await self.processSharepointFiles(source_path, target_path)
|
||||
|
||||
def batch_neutralize_files(self, files_data: List[Dict[str, Any]]) -> Dict[str, Any]:
|
||||
"""Process multiple files for neutralization"""
|
||||
file_ids = [file_data.get('fileId') for file_data in files_data if file_data.get('fileId')]
|
||||
return self.processFiles(file_ids)
|
||||
|
||||
def get_processing_stats(self) -> Dict[str, Any]:
|
||||
"""Get neutralization processing statistics"""
|
||||
return self.getStats()
|
||||
|
||||
def cleanup_file_attributes(self, file_id: str) -> bool:
|
||||
"""Clean up neutralization attributes for a specific file"""
|
||||
return self.cleanAttributes(file_id)
|
||||
|
||||
|
||||
# Internal SharePoint helper module separated to keep feature logic tidy
|
||||
class SharepointProcessor:
|
||||
|
|
|
|||
|
|
@ -1,22 +1,17 @@
|
|||
"""
|
||||
Delta Group JIRA-SharePoint Sync Manager
|
||||
Delta Group Sync Manager
|
||||
|
||||
This module handles the synchronization of JIRA tickets to SharePoint using the new
|
||||
This module handles the synchronization of tickets to SharePoint using the new
|
||||
Graph API-based connector architecture.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import csv
|
||||
import io
|
||||
from datetime import datetime, UTC
|
||||
from typing import Dict, Any, List, Optional
|
||||
from modules.services.serviceSharepoint.mainSharepoint import SharepointService
|
||||
from modules.connectors.connectorTicketJira import ConnectorTicketJira
|
||||
from modules.interfaces.interfaceAppObjects import getRootInterface
|
||||
from modules.interfaces.interfaceAppModel import UserInDB
|
||||
from modules.interfaces.interfaceTicketObjects import TicketSharepointSyncInterface
|
||||
from modules.shared.timezoneUtils import get_utc_timestamp
|
||||
from modules.services import getInterface as getServices
|
||||
# Removed direct import - now using services.ticket
|
||||
from modules.shared.configuration import APP_CONFIG
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
|
@ -25,7 +20,497 @@ logger = logging.getLogger(__name__)
|
|||
APP_ENV_TYPE = APP_CONFIG.get("APP_ENV_TYPE", "dev")
|
||||
|
||||
|
||||
def convert_adf_to_text(adf_data):
|
||||
class ManagerSyncDelta:
|
||||
"""Manages Tickets to SharePoint synchronization for Delta Group.
|
||||
|
||||
Supports two sync modes:
|
||||
- CSV mode: Uses CSV files for synchronization (default)
|
||||
- Excel mode: Uses Excel (.xlsx) files for synchronization
|
||||
|
||||
To change sync mode, use the setSyncMode() method or modify SYNC_MODE class variable.
|
||||
"""
|
||||
|
||||
SHAREPOINT_SITE_NAME = "SteeringBPM"
|
||||
SHAREPOINT_SITE_PATH = "SteeringBPM"
|
||||
SHAREPOINT_HOSTNAME = "deltasecurityag.sharepoint.com"
|
||||
SHAREPOINT_MAIN_FOLDER = "/General/50 Docs hosted by SELISE"
|
||||
SHAREPOINT_BACKUP_FOLDER = "/General/50 Docs hosted by SELISE/SyncHistory"
|
||||
SHAREPOINT_AUDIT_FOLDER = "/General/50 Docs hosted by SELISE/SyncHistory"
|
||||
SHAREPOINT_USER_ID = "patrick.motsch@delta.ch"
|
||||
|
||||
SYNC_MODE = "xlsx" # Can be "csv" or "xlsx"
|
||||
# File names for different sync modes
|
||||
SYNC_FILE_CSV = "DELTAgroup x SELISE Ticket Exchange List.csv"
|
||||
SYNC_FILE_XLSX = "DELTAgroup x SELISE Ticket Exchange List.xlsx"
|
||||
|
||||
# Tickets connection parameters
|
||||
JIRA_USERNAME = "p.motsch@valueon.ch"
|
||||
JIRA_API_TOKEN = APP_CONFIG.get("Feature_SyncDelta_JIRA_DELTA_TOKEN_SECRET", "")
|
||||
JIRA_URL = "https://deltasecurity.atlassian.net"
|
||||
JIRA_PROJECT_CODE = "DCS"
|
||||
JIRA_ISSUE_TYPE = "Task"
|
||||
|
||||
# Task sync definition for field mapping
|
||||
|
||||
TASK_SYNC_DEFINITION={
|
||||
#key=excel-header, [get:ticket>excel | put: excel>ticket, tickets-xml-field-list]
|
||||
'ID': ['get', ['key']],
|
||||
'Module Category': ['get', ['fields', 'customfield_10058', 'value']],
|
||||
'Summary': ['get', ['fields', 'summary']],
|
||||
'Description': ['get', ['fields', 'description']], # ADF format - needs conversion to text
|
||||
'References': ['get', ['fields', 'customfield_10066']], # Field exists, may be None
|
||||
'Priority': ['get', ['fields', 'priority', 'name']],
|
||||
'Issue Status': ['get', ['fields', 'status', 'name']],
|
||||
'Assignee': ['get', ['fields', 'assignee', 'displayName']],
|
||||
'Issue Created': ['get', ['fields', 'created']],
|
||||
'Due Date': ['get', ['fields', 'duedate']], # Field exists, may be None
|
||||
'DELTA Comments': ['get', ['fields', 'customfield_10167']], # Field exists, may be None
|
||||
'SELISE Ticket References': ['put', ['fields', 'customfield_10067']],
|
||||
'SELISE Status Values': ['put', ['fields', 'customfield_10065']],
|
||||
'SELISE Comments': ['put', ['fields', 'customfield_10168']],
|
||||
}
|
||||
|
||||
def __init__(self, eventUser=None):
|
||||
self.targetSite = None
|
||||
self.services = None
|
||||
self.sharepointConnection = None
|
||||
self.eventUser = eventUser
|
||||
self.sync_audit_log = [] # Store audit log entries in memory
|
||||
|
||||
try:
|
||||
if not eventUser:
|
||||
logger.error("Event user not found - SharePoint connection required")
|
||||
self._log_audit_event("SYNC_INIT", "FAILED", "Event user not found")
|
||||
else:
|
||||
self.services = getServices(eventUser, None)
|
||||
# Resolve SharePoint connection for the configured user id
|
||||
self.sharepointConnection = self.services.workflowService.getUserConnectionByExternalUsername(
|
||||
"msft", self.SHAREPOINT_USER_ID
|
||||
)
|
||||
if not self.sharepointConnection:
|
||||
logger.error(
|
||||
f"No SharePoint connection found for user: {self.SHAREPOINT_USER_ID}"
|
||||
)
|
||||
self._log_audit_event("SYNC_INIT", "FAILED", f"No SharePoint connection for user: {self.SHAREPOINT_USER_ID}")
|
||||
else:
|
||||
# Configure SharePoint service token and set connector reference
|
||||
if not self.services.sharepoint.setAccessToken(
|
||||
self.sharepointConnection, self.services.interfaceApp
|
||||
):
|
||||
logger.error("Failed to set SharePoint token from UserConnection")
|
||||
self._log_audit_event("SYNC_INIT", "FAILED", "Failed to set SharePoint token")
|
||||
else:
|
||||
logger.info(
|
||||
f"SharePoint token configured for connection: {self.sharepointConnection.id}"
|
||||
)
|
||||
self._log_audit_event("SYNC_INIT", "SUCCESS", f"SharePoint token configured for connection: {self.sharepointConnection.id}")
|
||||
except Exception as e:
|
||||
logger.error(f"Initialization error in ManagerSyncDelta.__init__: {e}")
|
||||
self._log_audit_event("SYNC_INIT", "ERROR", f"Initialization error: {str(e)}")
|
||||
|
||||
def _log_audit_event(self, action: str, status: str, details: str):
|
||||
"""Log audit events for sync operations to memory."""
|
||||
try:
|
||||
timestamp = datetime.now(UTC).strftime("%Y-%m-%d %H:%M:%S UTC")
|
||||
user_id = str(self.eventUser.id) if self.eventUser else "system"
|
||||
log_entry = f"{timestamp} | {user_id} | {action} | {status} | {details}"
|
||||
self.sync_audit_log.append(log_entry)
|
||||
logger.info(f"Sync Audit: {log_entry}")
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to log audit event: {str(e)}")
|
||||
|
||||
def _log_sync_changes(self, merge_details: dict, sync_mode: str):
|
||||
"""Log detailed field changes for sync operations."""
|
||||
try:
|
||||
# Log summary statistics
|
||||
summary = f"Sync {sync_mode} - Updated: {merge_details['updated']}, Added: {merge_details['added']}, Unchanged: {merge_details['unchanged']}"
|
||||
self._log_audit_event("SYNC_CHANGES_SUMMARY", "INFO", summary)
|
||||
|
||||
# Log individual field changes
|
||||
for change in merge_details['changes']:
|
||||
self._log_audit_event("SYNC_FIELD_CHANGE", "INFO", f"{sync_mode}: {change}")
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to log sync changes: {str(e)}")
|
||||
|
||||
async def _save_audit_log_to_sharepoint(self):
|
||||
"""Save the sync audit log to SharePoint."""
|
||||
try:
|
||||
if not self.sync_audit_log or not self.targetSite:
|
||||
return False
|
||||
|
||||
# Generate log filename with current timestamp
|
||||
timestamp = datetime.now(UTC).strftime("%Y%m%d_%H%M%S")
|
||||
log_filename = f"log_{timestamp}.log"
|
||||
|
||||
# Create log content
|
||||
log_content = "\n".join(self.sync_audit_log)
|
||||
log_bytes = log_content.encode('utf-8')
|
||||
|
||||
# Upload to SharePoint audit folder
|
||||
await self.services.sharepoint.upload_file(
|
||||
site_id=self.targetSite['id'],
|
||||
folder_path=self.SHAREPOINT_AUDIT_FOLDER,
|
||||
file_name=log_filename,
|
||||
content=log_bytes
|
||||
)
|
||||
|
||||
logger.info(f"Sync audit log saved to SharePoint: {log_filename}")
|
||||
self._log_audit_event("AUDIT_LOG_SAVE", "SUCCESS", f"Audit log saved to SharePoint: {log_filename}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to save audit log to SharePoint: {str(e)}")
|
||||
self._log_audit_event("AUDIT_LOG_SAVE", "FAILED", f"Failed to save audit log: {str(e)}")
|
||||
return False
|
||||
|
||||
def getSyncFileName(self) -> str:
|
||||
"""Get the appropriate sync file name based on the sync mode."""
|
||||
if self.SYNC_MODE == "xlsx":
|
||||
return self.SYNC_FILE_XLSX
|
||||
else: # Default to CSV
|
||||
return self.SYNC_FILE_CSV
|
||||
|
||||
def setSyncMode(self, mode: str) -> bool:
|
||||
"""Set the sync mode to either 'csv' or 'xlsx'.
|
||||
|
||||
Args:
|
||||
mode: Either 'csv' or 'xlsx'
|
||||
|
||||
Returns:
|
||||
bool: True if mode was set successfully, False if invalid mode
|
||||
"""
|
||||
if mode.lower() in ["csv", "xlsx"]:
|
||||
self.SYNC_MODE = mode.lower()
|
||||
logger.info(f"Sync mode changed to: {self.SYNC_MODE}")
|
||||
return True
|
||||
else:
|
||||
logger.error(f"Invalid sync mode: {mode}. Must be 'csv' or 'xlsx'")
|
||||
return False
|
||||
|
||||
async def initializeInterface(self) -> bool:
|
||||
"""Initialize SharePoint connector; tickets connector is created by interface on demand."""
|
||||
try:
|
||||
# Validate init-prepared members
|
||||
if not self.services or not self.sharepointConnection or not self.services.sharepoint:
|
||||
logger.error("Service or SharePoint connection not initialized")
|
||||
return False
|
||||
|
||||
# Resolve the site by hostname + site path to get the real site ID
|
||||
logger.info(
|
||||
f"Resolving site ID via hostname+path: {self.SHAREPOINT_HOSTNAME}:/sites/{self.SHAREPOINT_SITE_PATH}"
|
||||
)
|
||||
resolved = await self.services.sharepoint.find_site_by_url(
|
||||
hostname=self.SHAREPOINT_HOSTNAME,
|
||||
site_path=self.SHAREPOINT_SITE_PATH
|
||||
)
|
||||
|
||||
if not resolved:
|
||||
logger.error(
|
||||
f"Failed to resolve site. Hostname: {self.SHAREPOINT_HOSTNAME}, Path: {self.SHAREPOINT_SITE_PATH}"
|
||||
)
|
||||
return False
|
||||
|
||||
self.targetSite = {
|
||||
"id": resolved.get("id"),
|
||||
"displayName": resolved.get("displayName", self.SHAREPOINT_SITE_NAME),
|
||||
"name": resolved.get("name", self.SHAREPOINT_SITE_NAME)
|
||||
}
|
||||
|
||||
# Test site access by listing root of the drive
|
||||
logger.info("Testing site access using resolved site ID...")
|
||||
test_result = await self.services.sharepoint.list_folder_contents(
|
||||
site_id=self.targetSite["id"],
|
||||
folder_path=""
|
||||
)
|
||||
|
||||
if test_result is not None:
|
||||
logger.info(
|
||||
f"Site access confirmed: {self.targetSite['displayName']} (ID: {self.targetSite['id']})"
|
||||
)
|
||||
else:
|
||||
logger.error("Could not access site drive - check permissions")
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error initializing connectors: {str(e)}")
|
||||
return False
|
||||
|
||||
async def syncTicketsOverSharepoint(self) -> bool:
|
||||
"""Perform Tickets to SharePoint synchronization using list-based interface and local CSV/XLSX handling."""
|
||||
try:
|
||||
logger.info(f"Starting JIRA to SharePoint synchronization (Mode: {self.SYNC_MODE})")
|
||||
self._log_audit_event("SYNC_START", "INFO", f"Starting JIRA to SharePoint sync (Mode: {self.SYNC_MODE})")
|
||||
|
||||
# Initialize interface
|
||||
if not await self.initializeInterface():
|
||||
logger.error("Failed to initialize connectors")
|
||||
self._log_audit_event("SYNC_INTERFACE", "FAILED", "Failed to initialize connectors")
|
||||
return False
|
||||
|
||||
# Dump current Jira fields to text file for reference
|
||||
try:
|
||||
pass # await dump_jira_fields_to_file()
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to dump JIRA fields (non-blocking): {str(e)}")
|
||||
|
||||
# Dump actual JIRA data for debugging
|
||||
try:
|
||||
pass # await dump_jira_data_to_file()
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to dump JIRA data (non-blocking): {str(e)}")
|
||||
|
||||
# Get the appropriate sync file name based on mode
|
||||
sync_file_name = self.getSyncFileName()
|
||||
logger.info(f"Using sync file: {sync_file_name}")
|
||||
|
||||
# Create list-based ticket interface (initialize connector by type)
|
||||
sync_interface = await self.services.ticket.createTicketInterfaceByType(
|
||||
taskSyncDefinition=self.TASK_SYNC_DEFINITION,
|
||||
connectorType="Jira",
|
||||
connectorParams={
|
||||
"apiUsername": self.JIRA_USERNAME,
|
||||
"apiToken": self.JIRA_API_TOKEN,
|
||||
"apiUrl": self.JIRA_URL,
|
||||
"projectCode": self.JIRA_PROJECT_CODE,
|
||||
"ticketType": self.JIRA_ISSUE_TYPE,
|
||||
},
|
||||
)
|
||||
|
||||
# Perform the sophisticated sync based on mode
|
||||
if self.SYNC_MODE == "xlsx":
|
||||
# Export tickets to list
|
||||
data_list = await sync_interface.exportTicketsAsList()
|
||||
self._log_audit_event("SYNC_EXPORT", "INFO", f"Exported {len(data_list)} tickets from JIRA")
|
||||
# Read existing Excel headers/content
|
||||
existing_data = []
|
||||
existing_headers = {"header1": "Header 1", "header2": "Header 2"}
|
||||
try:
|
||||
file_path = f"{self.SHAREPOINT_MAIN_FOLDER}/{sync_file_name}"
|
||||
excel_content = await self.services.sharepoint.download_file_by_path(
|
||||
site_id=self.targetSite['id'], file_path=file_path
|
||||
)
|
||||
existing_data, existing_headers = self.parseExcelContent(excel_content)
|
||||
except Exception:
|
||||
pass
|
||||
# Merge and write
|
||||
merged_data, merge_details = self.mergeJiraWithExistingDetailed(data_list, existing_data)
|
||||
|
||||
# Log detailed changes for Excel mode
|
||||
self._log_sync_changes(merge_details, "EXCEL")
|
||||
|
||||
await self.backupSharepointFile(filename=sync_file_name)
|
||||
excel_bytes = self.createExcelContent(merged_data, existing_headers)
|
||||
await self.services.sharepoint.upload_file(
|
||||
site_id=self.targetSite['id'],
|
||||
folder_path=self.SHAREPOINT_MAIN_FOLDER,
|
||||
file_name=sync_file_name,
|
||||
content=excel_bytes,
|
||||
)
|
||||
# Import back to tickets
|
||||
try:
|
||||
excel_content = await self.services.sharepoint.download_file_by_path(
|
||||
site_id=self.targetSite['id'], file_path=file_path
|
||||
)
|
||||
excel_rows, _ = self.parseExcelContent(excel_content)
|
||||
self._log_audit_event("SYNC_IMPORT", "INFO", f"Importing {len(excel_rows)} Excel rows back to tickets")
|
||||
except Exception as e:
|
||||
excel_rows = []
|
||||
self._log_audit_event("SYNC_IMPORT", "WARNING", f"Failed to download Excel for import: {str(e)}")
|
||||
await sync_interface.importListToTickets(excel_rows)
|
||||
else: # CSV mode (default)
|
||||
# Export tickets to list
|
||||
data_list = await sync_interface.exportTicketsAsList()
|
||||
self._log_audit_event("SYNC_EXPORT", "INFO", f"Exported {len(data_list)} tickets from JIRA")
|
||||
# Prepare headers by reading existing CSV if present
|
||||
existing_headers = {"header1": "Header 1", "header2": "Header 2"}
|
||||
existing_data: list[dict] = []
|
||||
try:
|
||||
file_path = f"{self.SHAREPOINT_MAIN_FOLDER}/{sync_file_name}"
|
||||
csv_content = await self.services.sharepoint.download_file_by_path(
|
||||
site_id=self.targetSite['id'], file_path=file_path
|
||||
)
|
||||
csv_lines = csv_content.decode('utf-8').split('\n')
|
||||
if len(csv_lines) >= 2:
|
||||
existing_headers["header1"] = csv_lines[0].rstrip('\r\n')
|
||||
existing_headers["header2"] = csv_lines[1].rstrip('\r\n')
|
||||
# Parse existing CSV rows after the two header lines
|
||||
import pandas as pd
|
||||
df_existing = pd.read_csv(io.BytesIO(csv_content), skiprows=2, quoting=1, escapechar='\\', on_bad_lines='skip', engine='python')
|
||||
existing_data = df_existing.to_dict('records')
|
||||
except Exception:
|
||||
pass
|
||||
await self.backupSharepointFile(filename=sync_file_name)
|
||||
merged_data, _ = self.mergeJiraWithExistingDetailed(data_list, existing_data)
|
||||
csv_bytes = self.createCsvContent(merged_data, existing_headers)
|
||||
await self.services.sharepoint.upload_file(
|
||||
site_id=self.targetSite['id'],
|
||||
folder_path=self.SHAREPOINT_MAIN_FOLDER,
|
||||
file_name=sync_file_name,
|
||||
content=csv_bytes,
|
||||
)
|
||||
# Import from CSV
|
||||
try:
|
||||
csv_content = await self.services.sharepoint.download_file_by_path(
|
||||
site_id=self.targetSite['id'], file_path=file_path
|
||||
)
|
||||
import pandas as pd
|
||||
df = pd.read_csv(io.BytesIO(csv_content), skiprows=2, quoting=1, escapechar='\\', on_bad_lines='skip', engine='python')
|
||||
csv_rows = df.to_dict('records')
|
||||
self._log_audit_event("SYNC_IMPORT", "INFO", f"Importing {len(csv_rows)} CSV rows back to tickets")
|
||||
except Exception as e:
|
||||
csv_rows = []
|
||||
self._log_audit_event("SYNC_IMPORT", "WARNING", f"Failed to download CSV for import: {str(e)}")
|
||||
await sync_interface.importListToTickets(csv_rows)
|
||||
|
||||
logger.info(f"JIRA to SharePoint synchronization completed successfully (Mode: {self.SYNC_MODE})")
|
||||
self._log_audit_event("SYNC_COMPLETE", "SUCCESS", f"JIRA to SharePoint sync completed successfully (Mode: {self.SYNC_MODE})")
|
||||
|
||||
# Save audit log to SharePoint
|
||||
await self._save_audit_log_to_sharepoint()
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error during JIRA to SharePoint synchronization: {str(e)}")
|
||||
self._log_audit_event("SYNC_ERROR", "FAILED", f"Error during sync: {str(e)}")
|
||||
|
||||
# Save audit log to SharePoint even on error
|
||||
await self._save_audit_log_to_sharepoint()
|
||||
|
||||
return False
|
||||
|
||||
async def backupSharepointFile(self, *, filename: str) -> bool:
|
||||
try:
|
||||
timestamp = datetime.now(UTC).strftime("%Y%m%d_%H%M%S")
|
||||
backup_filename = f"backup_{timestamp}_{filename}"
|
||||
await self.services.sharepoint.copy_file_async(
|
||||
site_id=self.targetSite['id'],
|
||||
source_folder=self.SHAREPOINT_MAIN_FOLDER,
|
||||
source_file=filename,
|
||||
dest_folder=self.SHAREPOINT_BACKUP_FOLDER,
|
||||
dest_file=backup_filename,
|
||||
)
|
||||
self._log_audit_event("SYNC_BACKUP", "SUCCESS", f"Backed up file: {filename} -> {backup_filename}")
|
||||
return True
|
||||
except Exception as e:
|
||||
if "itemNotFound" in str(e) or "404" in str(e):
|
||||
self._log_audit_event("SYNC_BACKUP", "SKIPPED", f"File not found for backup: {filename}")
|
||||
return True
|
||||
logger.warning(f"Backup failed: {e}")
|
||||
self._log_audit_event("SYNC_BACKUP", "FAILED", f"Backup failed for {filename}: {str(e)}")
|
||||
return False
|
||||
|
||||
def mergeJiraWithExistingDetailed(self, jira_data: list[dict], existing_data: list[dict]) -> tuple[list[dict], dict]:
|
||||
existing_lookup = {row.get("ID"): row for row in existing_data if row.get("ID")}
|
||||
merged_data: list[dict] = []
|
||||
changes: list[str] = []
|
||||
updated_count = added_count = unchanged_count = 0
|
||||
for jira_row in jira_data:
|
||||
jira_id = jira_row.get("ID")
|
||||
if jira_id and jira_id in existing_lookup:
|
||||
existing_row = existing_lookup[jira_id].copy()
|
||||
row_changes: list[str] = []
|
||||
for field_name, field_config in self.TASK_SYNC_DEFINITION.items():
|
||||
if field_config[0] == 'get':
|
||||
old_value = "" if existing_row.get(field_name) is None else str(existing_row.get(field_name))
|
||||
new_value = "" if jira_row.get(field_name) is None else str(jira_row.get(field_name))
|
||||
if old_value != new_value:
|
||||
row_changes.append(f"{field_name}: '{old_value}' → '{new_value}'")
|
||||
existing_row[field_name] = jira_row.get(field_name)
|
||||
merged_data.append(existing_row)
|
||||
if row_changes:
|
||||
updated_count += 1
|
||||
changes.append(f"Row ID {jira_id} updated: {', '.join(row_changes)}")
|
||||
else:
|
||||
unchanged_count += 1
|
||||
del existing_lookup[jira_id]
|
||||
else:
|
||||
merged_data.append(jira_row)
|
||||
added_count += 1
|
||||
changes.append(f"Row ID {jira_id} added as new record")
|
||||
for remaining in existing_lookup.values():
|
||||
merged_data.append(remaining)
|
||||
unchanged_count += 1
|
||||
details = {"updated": updated_count, "added": added_count, "unchanged": unchanged_count, "changes": changes}
|
||||
return merged_data, details
|
||||
|
||||
def createCsvContent(self, data: list[dict], existing_headers: dict | None = None) -> bytes:
|
||||
import pandas as pd
|
||||
from io import StringIO
|
||||
timestamp = datetime.now(UTC).strftime("%Y-%m-%d %H:%M:%S UTC")
|
||||
if existing_headers is None:
|
||||
existing_headers = {"header1": "Header 1", "header2": "Header 2"}
|
||||
if not data:
|
||||
cols = list(self.TASK_SYNC_DEFINITION.keys())
|
||||
df = pd.DataFrame(columns=cols)
|
||||
else:
|
||||
df = pd.DataFrame(data)
|
||||
for column in df.columns:
|
||||
df[column] = df[column].astype("object").fillna("")
|
||||
df[column] = df[column].astype(str).str.replace('\n', '\\n', regex=False).str.replace('"', '""', regex=False)
|
||||
import csv as csv_module
|
||||
header1_row = next(csv_module.reader([existing_headers.get("header1", "Header 1")]), [])
|
||||
header2_row = next(csv_module.reader([existing_headers.get("header2", "Header 2")]), [])
|
||||
if len(header2_row) > 1:
|
||||
header2_row[1] = timestamp
|
||||
header_row1 = pd.DataFrame([header1_row + [""] * (len(df.columns) - len(header1_row))], columns=df.columns)
|
||||
header_row2 = pd.DataFrame([header2_row + [""] * (len(df.columns) - len(header2_row))], columns=df.columns)
|
||||
table_headers = pd.DataFrame([df.columns.tolist()], columns=df.columns)
|
||||
final_df = pd.concat([header_row1, header_row2, table_headers, df], ignore_index=True)
|
||||
out = StringIO()
|
||||
final_df.to_csv(out, index=False, header=False, quoting=1, escapechar='\\')
|
||||
return out.getvalue().encode('utf-8')
|
||||
|
||||
def createExcelContent(self, data: list[dict], existing_headers: dict | None = None) -> bytes:
|
||||
import pandas as pd
|
||||
from io import BytesIO
|
||||
timestamp = datetime.now(UTC).strftime("%Y-%m-%d %H:%M:%S UTC")
|
||||
if existing_headers is None:
|
||||
existing_headers = {"header1": "Header 1", "header2": "Header 2"}
|
||||
if not data:
|
||||
cols = list(self.TASK_SYNC_DEFINITION.keys())
|
||||
df = pd.DataFrame(columns=cols)
|
||||
else:
|
||||
df = pd.DataFrame(data)
|
||||
for column in df.columns:
|
||||
df[column] = df[column].astype("object").fillna("")
|
||||
df[column] = df[column].astype(str).str.replace('\n', '\\n', regex=False).str.replace('"', '""', regex=False)
|
||||
import csv as csv_module
|
||||
header1_row = next(csv_module.reader([existing_headers.get("header1", "Header 1")]), [])
|
||||
header2_row = next(csv_module.reader([existing_headers.get("header2", "Header 2")]), [])
|
||||
if len(header2_row) > 1:
|
||||
header2_row[1] = timestamp
|
||||
header_row1 = pd.DataFrame([header1_row + [""] * (len(df.columns) - len(header1_row))], columns=df.columns)
|
||||
header_row2 = pd.DataFrame([header2_row + [""] * (len(df.columns) - len(header2_row))], columns=df.columns)
|
||||
table_headers = pd.DataFrame([df.columns.tolist()], columns=df.columns)
|
||||
final_df = pd.concat([header_row1, header_row2, table_headers, df], ignore_index=True)
|
||||
buf = BytesIO()
|
||||
final_df.to_excel(buf, index=False, header=False, engine='openpyxl')
|
||||
return buf.getvalue()
|
||||
|
||||
def parseExcelContent(self, excel_content: bytes) -> tuple[list[dict], dict]:
|
||||
import pandas as pd
|
||||
from io import BytesIO
|
||||
df = pd.read_excel(BytesIO(excel_content), engine='openpyxl', header=None)
|
||||
header_row1 = df.iloc[0:1].copy()
|
||||
header_row2 = df.iloc[1:2].copy()
|
||||
table_headers = df.iloc[2:3].copy()
|
||||
df_data = df.iloc[3:].copy()
|
||||
df_data.columns = table_headers.iloc[0]
|
||||
df_data = df_data.reset_index(drop=True)
|
||||
for column in df_data.columns:
|
||||
df_data[column] = df_data[column].astype('object').fillna('')
|
||||
data = df_data.to_dict(orient='records')
|
||||
headers = {
|
||||
"header1": ",".join([str(x) if pd.notna(x) else "" for x in header_row1.iloc[0].tolist()]),
|
||||
"header2": ",".join([str(x) if pd.notna(x) else "" for x in header_row2.iloc[0].tolist()]),
|
||||
}
|
||||
return data, headers
|
||||
|
||||
def convertAdfToText(self, adf_data):
|
||||
"""Convert Atlassian Document Format (ADF) to plain text.
|
||||
|
||||
Based on Atlassian Document Format specification for JIRA fields.
|
||||
|
|
@ -173,405 +658,144 @@ def convert_adf_to_text(adf_data):
|
|||
result = extract_text_from_content(content)
|
||||
return result.strip()
|
||||
|
||||
|
||||
class ManagerSyncDelta:
|
||||
"""Manages JIRA to SharePoint synchronization for Delta Group.
|
||||
|
||||
Supports two sync modes:
|
||||
- CSV mode: Uses CSV files for synchronization (default)
|
||||
- Excel mode: Uses Excel (.xlsx) files for synchronization
|
||||
|
||||
To change sync mode, use the set_sync_mode() method or modify SYNC_MODE class variable.
|
||||
"""
|
||||
SHAREPOINT_SITE_ID = "02830618-4029-4dc8-8d3d-f5168f282249"
|
||||
SHAREPOINT_SITE_NAME = "SteeringBPM"
|
||||
SHAREPOINT_SITE_PATH = "SteeringBPM"
|
||||
SHAREPOINT_HOSTNAME = "deltasecurityag.sharepoint.com"
|
||||
SHAREPOINT_MAIN_FOLDER = "/General/50 Docs hosted by SELISE"
|
||||
SHAREPOINT_BACKUP_FOLDER = "/General/50 Docs hosted by SELISE/SyncHistory"
|
||||
SHAREPOINT_AUDIT_FOLDER = "/General/50 Docs hosted by SELISE/SyncHistory"
|
||||
SHAREPOINT_USER_ID = "patrick.motsch@delta.ch"
|
||||
|
||||
# Sync mode: "csv" or "xlsx"
|
||||
SYNC_MODE = "xlsx" # Can be "csv" or "xlsx"
|
||||
|
||||
# File names for different sync modes
|
||||
SYNC_FILE_CSV = "DELTAgroup x SELISE Ticket Exchange List.csv"
|
||||
SYNC_FILE_XLSX = "DELTAgroup x SELISE Ticket Exchange List.xlsx"
|
||||
|
||||
# JIRA connection parameters (hardcoded for Delta Group)
|
||||
JIRA_USERNAME = "p.motsch@valueon.ch"
|
||||
JIRA_API_TOKEN = "ATATT3xFfGF0d973nNb3R1wTDI4lesmJfJAmooS-4cYMJTyLfwYv4himrE6yyCxyX3aSMfl34NHcm2fAXeFXrLHUzJx0RQVUBonCFnlgexjLQTgS5BoCbSO7dwAVjlcHZZkArHbooCUaRwJ15n6AHkm-nwdjLQ3Z74TFnKKUZC4uhuh3Aj-MuX8=2D7124FA"
|
||||
JIRA_URL = "https://deltasecurity.atlassian.net"
|
||||
JIRA_PROJECT_CODE = "DCS"
|
||||
JIRA_ISSUE_TYPE = "Task"
|
||||
|
||||
# Task sync definition for field mapping (like original synchronizer)
|
||||
|
||||
TASK_SYNC_DEFINITION={
|
||||
#key=excel-header, [get:jira>excel | put: excel>jira, jira-xml-field-list]
|
||||
'ID': ['get', ['key']],
|
||||
'Module Category': ['get', ['fields', 'customfield_10058', 'value']],
|
||||
'Summary': ['get', ['fields', 'summary']],
|
||||
'Description': ['get', ['fields', 'description']], # ADF format - needs conversion to text
|
||||
'References': ['get', ['fields', 'customfield_10066']], # Field exists, may be None
|
||||
'Priority': ['get', ['fields', 'priority', 'name']],
|
||||
'Issue Status': ['get', ['fields', 'status', 'name']],
|
||||
'Assignee': ['get', ['fields', 'assignee', 'displayName']],
|
||||
'Issue Created': ['get', ['fields', 'created']],
|
||||
'Due Date': ['get', ['fields', 'duedate']], # Field exists, may be None
|
||||
'DELTA Comments': ['get', ['fields', 'customfield_10167']], # Field exists, may be None
|
||||
'SELISE Ticket References': ['put', ['fields', 'customfield_10067']],
|
||||
'SELISE Status Values': ['put', ['fields', 'customfield_10065']],
|
||||
'SELISE Comments': ['put', ['fields', 'customfield_10168']],
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the sync manager with hardcoded Delta Group credentials."""
|
||||
self.root_interface = getRootInterface()
|
||||
self.jira_connector = None
|
||||
self.sharepoint_connector = None
|
||||
self.target_site = None
|
||||
# Initialize centralized services with root user
|
||||
from modules.services import getInterface as getServices
|
||||
root_user = self.root_interface.getUserByUsername("admin")
|
||||
self.services = getServices(root_user, None)
|
||||
|
||||
def get_sync_file_name(self) -> str:
|
||||
"""Get the appropriate sync file name based on the sync mode."""
|
||||
if self.SYNC_MODE == "xlsx":
|
||||
return self.SYNC_FILE_XLSX
|
||||
else: # Default to CSV
|
||||
return self.SYNC_FILE_CSV
|
||||
|
||||
def set_sync_mode(self, mode: str) -> bool:
|
||||
"""Set the sync mode to either 'csv' or 'xlsx'.
|
||||
|
||||
Args:
|
||||
mode: Either 'csv' or 'xlsx'
|
||||
|
||||
Returns:
|
||||
bool: True if mode was set successfully, False if invalid mode
|
||||
"""
|
||||
if mode.lower() in ["csv", "xlsx"]:
|
||||
self.SYNC_MODE = mode.lower()
|
||||
logger.info(f"Sync mode changed to: {self.SYNC_MODE}")
|
||||
return True
|
||||
else:
|
||||
logger.error(f"Invalid sync mode: {mode}. Must be 'csv' or 'xlsx'")
|
||||
return False
|
||||
|
||||
async def initialize_connectors(self) -> bool:
|
||||
"""Initialize JIRA and SharePoint connectors."""
|
||||
# Utility: dump all ticket fields (name -> field id) to a text file (generic)
|
||||
async def dumpTicketFieldsToFile(
|
||||
*,
|
||||
filepath: str = "ticket_sync_fields.txt",
|
||||
connectorType: str = "Jira",
|
||||
connectorParams: dict | None = None,
|
||||
taskSyncDefinition: dict | None = None,
|
||||
) -> bool:
|
||||
"""Write available ticket fields (name -> field id) to a text file (generic)."""
|
||||
try:
|
||||
logger.info("Initializing JIRA connector with hardcoded credentials")
|
||||
|
||||
# Initialize JIRA connector using class constants
|
||||
self.jira_connector = await ConnectorTicketJira.create(
|
||||
jira_username=self.JIRA_USERNAME,
|
||||
jira_api_token=self.JIRA_API_TOKEN,
|
||||
jira_url=self.JIRA_URL,
|
||||
project_code=self.JIRA_PROJECT_CODE,
|
||||
issue_type=self.JIRA_ISSUE_TYPE
|
||||
connectorParams = connectorParams or {}
|
||||
taskSyncDefinition = taskSyncDefinition or ManagerSyncDelta.TASK_SYNC_DEFINITION
|
||||
ticket_interface = await createTicketInterfaceByType(
|
||||
taskSyncDefinition=taskSyncDefinition,
|
||||
connectorType=connectorType,
|
||||
connectorParams=connectorParams,
|
||||
)
|
||||
|
||||
# Use the admin user for SharePoint connection
|
||||
adminUser = self.root_interface.getUserByUsername("admin")
|
||||
if not adminUser:
|
||||
logger.error("Admin user not found - SharePoint connection required")
|
||||
return False
|
||||
|
||||
logger.info(f"Using admin user for SharePoint: {adminUser.id}")
|
||||
|
||||
# Get SharePoint connection for admin user
|
||||
user_connections = self.root_interface.getUserConnections(adminUser.id)
|
||||
sharepoint_connection = None
|
||||
|
||||
for connection in user_connections:
|
||||
if connection.authority == "msft" and connection.externalUsername == self.SHAREPOINT_USER_ID:
|
||||
sharepoint_connection = connection
|
||||
break
|
||||
|
||||
if not sharepoint_connection:
|
||||
logger.error(f"No SharePoint connection found for user: {self.SHAREPOINT_USER_ID}")
|
||||
return False
|
||||
|
||||
logger.info(f"Found SharePoint connection: {sharepoint_connection.id}")
|
||||
|
||||
# Get fresh SharePoint token for this connection
|
||||
from modules.security.tokenManager import TokenManager
|
||||
sharepoint_token = TokenManager().getFreshToken(self.root_interface, sharepoint_connection.id)
|
||||
if not sharepoint_token:
|
||||
logger.error("No SharePoint token found for Delta Group user connection")
|
||||
return False
|
||||
|
||||
logger.info(f"Found SharePoint token: {sharepoint_token.id}")
|
||||
|
||||
# Initialize SharePoint connector with Graph API
|
||||
self.sharepoint_connector = SharepointService(access_token=sharepoint_token.tokenAccess)
|
||||
|
||||
# Resolve the site by hostname + site path to get the real site ID
|
||||
logger.info(
|
||||
f"Resolving site ID via hostname+path: {self.SHAREPOINT_HOSTNAME}:/sites/{self.SHAREPOINT_SITE_PATH}"
|
||||
)
|
||||
resolved = await self.sharepoint_connector.find_site_by_url(
|
||||
hostname=self.SHAREPOINT_HOSTNAME,
|
||||
site_path=self.SHAREPOINT_SITE_PATH
|
||||
)
|
||||
|
||||
if not resolved:
|
||||
logger.error(
|
||||
f"Failed to resolve site. Hostname: {self.SHAREPOINT_HOSTNAME}, Path: {self.SHAREPOINT_SITE_PATH}"
|
||||
)
|
||||
return False
|
||||
|
||||
self.target_site = {
|
||||
"id": resolved.get("id"),
|
||||
"displayName": resolved.get("displayName", self.SHAREPOINT_SITE_NAME),
|
||||
"name": resolved.get("name", self.SHAREPOINT_SITE_NAME)
|
||||
}
|
||||
|
||||
# Test site access by listing root of the drive
|
||||
logger.info("Testing site access using resolved site ID...")
|
||||
test_result = await self.sharepoint_connector.list_folder_contents(
|
||||
site_id=self.target_site["id"],
|
||||
folder_path=""
|
||||
)
|
||||
|
||||
if test_result is not None:
|
||||
logger.info(
|
||||
f"Site access confirmed: {self.target_site['displayName']} (ID: {self.target_site['id']})"
|
||||
)
|
||||
else:
|
||||
logger.error("Could not access site drive - check permissions")
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error initializing connectors: {str(e)}")
|
||||
return False
|
||||
|
||||
async def sync_jira_to_sharepoint(self) -> bool:
|
||||
"""Perform the main JIRA to SharePoint synchronization using sophisticated sync logic."""
|
||||
try:
|
||||
logger.info(f"Starting JIRA to SharePoint synchronization (Mode: {self.SYNC_MODE})")
|
||||
|
||||
# Initialize connectors
|
||||
if not await self.initialize_connectors():
|
||||
logger.error("Failed to initialize connectors")
|
||||
return False
|
||||
|
||||
# Dump current Jira fields to text file for reference
|
||||
try:
|
||||
pass # await dump_jira_fields_to_file()
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to dump JIRA fields (non-blocking): {str(e)}")
|
||||
|
||||
# Dump actual JIRA data for debugging
|
||||
try:
|
||||
pass # await dump_jira_data_to_file()
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to dump JIRA data (non-blocking): {str(e)}")
|
||||
|
||||
# Get the appropriate sync file name based on mode
|
||||
sync_file_name = self.get_sync_file_name()
|
||||
logger.info(f"Using sync file: {sync_file_name}")
|
||||
|
||||
# Create the sophisticated sync interface
|
||||
sync_interface = await TicketSharepointSyncInterface.create(
|
||||
connector_ticket=self.jira_connector,
|
||||
connector_sharepoint=self.sharepoint_connector,
|
||||
task_sync_definition=self.TASK_SYNC_DEFINITION,
|
||||
sync_folder=self.SHAREPOINT_MAIN_FOLDER,
|
||||
sync_file=sync_file_name,
|
||||
backup_folder=self.SHAREPOINT_BACKUP_FOLDER,
|
||||
audit_folder=self.SHAREPOINT_AUDIT_FOLDER,
|
||||
site_id=self.target_site['id']
|
||||
)
|
||||
|
||||
# Perform the sophisticated sync based on mode
|
||||
if self.SYNC_MODE == "xlsx":
|
||||
logger.info("Performing JIRA to Excel sync...")
|
||||
await sync_interface.sync_from_jira_to_excel()
|
||||
logger.info("Performing Excel to JIRA sync...")
|
||||
await sync_interface.sync_from_excel_to_jira()
|
||||
else: # CSV mode (default)
|
||||
logger.info("Performing JIRA to CSV sync...")
|
||||
await sync_interface.sync_from_jira_to_csv()
|
||||
logger.info("Performing CSV to JIRA sync...")
|
||||
await sync_interface.sync_from_csv_to_jira()
|
||||
|
||||
logger.info(f"JIRA to SharePoint synchronization completed successfully (Mode: {self.SYNC_MODE})")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error during JIRA to SharePoint synchronization: {str(e)}")
|
||||
return False
|
||||
|
||||
|
||||
|
||||
# Utility: dump all Jira fields (name -> field id) to a text file
|
||||
async def dump_jira_fields_to_file(filepath: str = "delta_sync_fields.txt") -> bool:
|
||||
"""Write all available JIRA fields for the configured project/issue type to a text file.
|
||||
|
||||
The output format matches the legacy fields.txt, e.g.:
|
||||
'Summary': ['get', ['fields', 'summary']]
|
||||
|
||||
Args:
|
||||
filepath: Target text file path to write.
|
||||
|
||||
Returns:
|
||||
True on success, False otherwise.
|
||||
"""
|
||||
try:
|
||||
# Initialize Jira connector with the hardcoded credentials/constants
|
||||
jira = await ConnectorTicketJira.create(
|
||||
jira_username=ManagerSyncDelta.JIRA_USERNAME,
|
||||
jira_api_token=ManagerSyncDelta.JIRA_API_TOKEN,
|
||||
jira_url=ManagerSyncDelta.JIRA_URL,
|
||||
project_code=ManagerSyncDelta.JIRA_PROJECT_CODE,
|
||||
issue_type=ManagerSyncDelta.JIRA_ISSUE_TYPE,
|
||||
)
|
||||
|
||||
attributes = await jira.read_attributes()
|
||||
attributes = await ticket_interface.connector_ticket.read_attributes()
|
||||
if not attributes:
|
||||
logger.warning("No JIRA attributes returned; nothing to write.")
|
||||
logger.warning("No ticket attributes returned; nothing to write.")
|
||||
return False
|
||||
|
||||
# Ensure directory exists if a directory part is provided
|
||||
dir_name = os.path.dirname(filepath)
|
||||
if dir_name:
|
||||
os.makedirs(dir_name, exist_ok=True)
|
||||
|
||||
# Write in the expected mapping format
|
||||
with open(filepath, "w", encoding="utf-8") as f:
|
||||
for attr in attributes:
|
||||
# attr.field_name (human name), attr.field (Jira field id)
|
||||
f.write(f"'{attr.field_name}': ['get', ['fields', '{attr.field}']]\n")
|
||||
|
||||
logger.info(f"Wrote {len(attributes)} JIRA fields to {filepath}")
|
||||
logger.info(f"Wrote {len(attributes)} ticket fields to {filepath}")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to dump JIRA fields: {str(e)}")
|
||||
logger.error(f"Failed to dump ticket fields: {str(e)}")
|
||||
return False
|
||||
|
||||
# Utility: dump actual JIRA data for debugging
|
||||
async def dump_jira_data_to_file(filepath: str = "delta_sync_data.txt") -> bool:
|
||||
"""Write actual JIRA ticket data to a text file for debugging field mapping.
|
||||
|
||||
Args:
|
||||
filepath: Target text file path to write.
|
||||
|
||||
Returns:
|
||||
True on success, False otherwise.
|
||||
"""
|
||||
# Utility: dump actual ticket data for debugging (generic)
|
||||
async def dumpTicketDataToFile(
|
||||
*,
|
||||
filepath: str = "ticket_sync_data.txt",
|
||||
connectorType: str = "Jira",
|
||||
connectorParams: dict | None = None,
|
||||
taskSyncDefinition: dict | None = None,
|
||||
sampleLimit: int = 5,
|
||||
) -> bool:
|
||||
"""Write actual ticket data to a text file for debugging field mapping (generic)."""
|
||||
try:
|
||||
# Initialize Jira connector with the hardcoded credentials/constants
|
||||
jira = await ConnectorTicketJira.create(
|
||||
jira_username=ManagerSyncDelta.JIRA_USERNAME,
|
||||
jira_api_token=ManagerSyncDelta.JIRA_API_TOKEN,
|
||||
jira_url=ManagerSyncDelta.JIRA_URL,
|
||||
project_code=ManagerSyncDelta.JIRA_PROJECT_CODE,
|
||||
issue_type=ManagerSyncDelta.JIRA_ISSUE_TYPE,
|
||||
connectorParams = connectorParams or {}
|
||||
taskSyncDefinition = taskSyncDefinition or ManagerSyncDelta.TASK_SYNC_DEFINITION
|
||||
ticket_interface = await createTicketInterfaceByType(
|
||||
taskSyncDefinition=taskSyncDefinition,
|
||||
connectorType=connectorType,
|
||||
connectorParams=connectorParams,
|
||||
)
|
||||
|
||||
# Get a few sample tickets to see the actual data structure
|
||||
tickets = await jira.read_tasks(limit=5)
|
||||
tickets = await ticket_interface.connector_ticket.read_tasks(limit=sampleLimit)
|
||||
if not tickets:
|
||||
logger.warning("No JIRA tickets returned; nothing to write.")
|
||||
logger.warning("No tickets returned; nothing to write.")
|
||||
return False
|
||||
|
||||
# Ensure directory exists if a directory part is provided
|
||||
dir_name = os.path.dirname(filepath)
|
||||
if dir_name:
|
||||
os.makedirs(dir_name, exist_ok=True)
|
||||
|
||||
# Write the actual ticket data
|
||||
with open(filepath, "w", encoding="utf-8") as f:
|
||||
f.write("=== JIRA TICKET DATA DEBUG ===\n\n")
|
||||
f.write("=== TICKET DATA DEBUG ===\n\n")
|
||||
for i, ticket in enumerate(tickets):
|
||||
f.write(f"--- TICKET {i+1} ---\n")
|
||||
f.write(f"Raw ticket data:\n")
|
||||
f.write("Raw ticket data:\n")
|
||||
f.write(f"{ticket.data}\n\n")
|
||||
|
||||
# Also show the specific fields we're trying to map
|
||||
f.write("Field mapping analysis:\n")
|
||||
for field_name, field_path in ManagerSyncDelta.TASK_SYNC_DEFINITION.items():
|
||||
if field_path[0] == 'get': # Only analyze 'get' fields
|
||||
for fieldName, fieldPath in taskSyncDefinition.items():
|
||||
if fieldPath[0] == 'get':
|
||||
try:
|
||||
# Navigate through the field path
|
||||
value = ticket.data
|
||||
for key in field_path[1]:
|
||||
for key in fieldPath[1]:
|
||||
if isinstance(value, dict) and key in value:
|
||||
value = value[key]
|
||||
else:
|
||||
value = f"KEY_NOT_FOUND: {key}"
|
||||
break
|
||||
|
||||
# Convert ADF fields to text
|
||||
if field_name in ['Description', 'References', 'DELTA Comments', 'SELISE Comments']:
|
||||
if isinstance(value, dict) and value.get("type") == "doc":
|
||||
value = convert_adf_to_text(value)
|
||||
pass # value = self.convertAdfToText(value)
|
||||
elif value is None:
|
||||
value = ""
|
||||
|
||||
f.write(f" {field_name}: {value}\n")
|
||||
f.write(f" {fieldName}: {value}\n")
|
||||
except Exception as e:
|
||||
f.write(f" {field_name}: ERROR - {str(e)}\n")
|
||||
f.write(f" {fieldName}: ERROR - {str(e)}\n")
|
||||
f.write("\n" + "="*50 + "\n\n")
|
||||
|
||||
logger.info(f"Wrote JIRA data for {len(tickets)} tickets to {filepath}")
|
||||
logger.info(f"Wrote ticket data for {len(tickets)} tickets to {filepath}")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to dump JIRA data: {str(e)}")
|
||||
logger.error(f"Failed to dump ticket data: {str(e)}")
|
||||
return False
|
||||
|
||||
# Global sync function for use in app.py
|
||||
async def perform_sync_jira_delta_group() -> bool:
|
||||
"""Perform JIRA to SharePoint synchronization for Delta Group.
|
||||
# Main part of the module
|
||||
async def performSync(eventUser=None) -> bool:
|
||||
"""Perform tickets to SharePoint synchronization
|
||||
|
||||
This function is called by the scheduler and can be used independently.
|
||||
|
||||
Args:
|
||||
eventUser: Optional event user to use for synchronization
|
||||
|
||||
Returns:
|
||||
bool: True if synchronization was successful, False otherwise
|
||||
"""
|
||||
try:
|
||||
if APP_ENV_TYPE != "prod" and APP_ENV_TYPE != "tst":
|
||||
logger.info("JIRA to SharePoint synchronization: TASK to run only in PROD")
|
||||
return True
|
||||
logger.info("Starting DG tickets sync...")
|
||||
|
||||
logger.info("Starting Delta Group JIRA sync...")
|
||||
# Sync audit logging is handled by ManagerSyncDelta instance
|
||||
|
||||
|
||||
sync_manager = ManagerSyncDelta()
|
||||
success = await sync_manager.sync_jira_to_sharepoint()
|
||||
syncManager = ManagerSyncDelta(eventUser)
|
||||
success = await syncManager.syncTicketsOverSharepoint()
|
||||
|
||||
if success:
|
||||
logger.info("Delta Group JIRA sync completed successfully")
|
||||
logger.info("DG tickets sync completed successfully")
|
||||
else:
|
||||
logger.error("Delta Group JIRA sync failed")
|
||||
logger.error("DG tickets sync failed")
|
||||
|
||||
return success
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in perform_sync_jira_delta_group: {str(e)}")
|
||||
logger.error(f"Error in performing DG tickets sync: {str(e)}")
|
||||
return False
|
||||
|
||||
# Register scheduled job on import using the shared event manager
|
||||
try:
|
||||
from modules.shared.eventManagement import eventManager
|
||||
|
||||
# Register scheduler only in production
|
||||
if APP_ENV_TYPE == "prod" or APP_ENV_TYPE == "dev":
|
||||
# Schedule sync every 20 minutes (at minutes 00, 20, 40)
|
||||
eventManager.register_cron(
|
||||
job_id="jira_delta_group_sync",
|
||||
func=perform_sync_jira_delta_group,
|
||||
job_id="dgsync",
|
||||
func=performSync,
|
||||
cron_kwargs={"minute": "0,20,40"},
|
||||
replace_existing=True,
|
||||
coalesce=True,
|
||||
max_instances=1,
|
||||
misfire_grace_time=1800,
|
||||
)
|
||||
logger.info("Registered jira_delta_group_sync via EventManagement (every 20 minutes)")
|
||||
logger.info("Registered DG ticket sync via EventManagement (every 20 minutes)")
|
||||
else:
|
||||
logger.info(f"Skipping DG scheduler registration for ticket sync in env: {APP_ENV_TYPE}")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to register jira_delta_group_sync: {str(e)}")
|
||||
logger.error(f"Failed to register DG ticket sync: {str(e)}")
|
||||
|
|
|
|||
|
|
@ -564,20 +564,3 @@ register_model_labels(
|
|||
"patternType": {"en": "Pattern Type", "fr": "Type de modèle"}
|
||||
}
|
||||
)
|
||||
|
||||
class SystemTable(BaseModel, ModelMixin):
|
||||
"""Data model for system table entries"""
|
||||
table_name: str = Field(
|
||||
description="Name of the table",
|
||||
frontend_type="text",
|
||||
frontend_readonly=True,
|
||||
frontend_required=True
|
||||
)
|
||||
initial_id: Optional[str] = Field(
|
||||
default=None,
|
||||
description="Initial ID for the table",
|
||||
frontend_type="text",
|
||||
frontend_readonly=True,
|
||||
frontend_required=False
|
||||
)
|
||||
|
||||
|
|
@ -121,6 +121,7 @@ class AppObjects:
|
|||
"""Initialize standard records if they don't exist."""
|
||||
self._initRootMandate()
|
||||
self._initAdminUser()
|
||||
self._initEventUser()
|
||||
|
||||
def _initRootMandate(self):
|
||||
"""Creates the Root mandate if it doesn't exist."""
|
||||
|
|
@ -154,7 +155,7 @@ class AppObjects:
|
|||
language="en",
|
||||
privilege=UserPrivilege.SYSADMIN,
|
||||
authenticationAuthority="local", # Using lowercase value directly
|
||||
hashedPassword=self._getPasswordHash("The 1st Poweron Admin"), # Use a secure password in production!
|
||||
hashedPassword=self._getPasswordHash(APP_CONFIG.get("APP_INIT_PASS_ADMIN_SECRET")),
|
||||
connections=[]
|
||||
)
|
||||
createdUser = self.db.recordCreate(UserInDB, adminUser)
|
||||
|
|
@ -164,6 +165,27 @@ class AppObjects:
|
|||
self.currentUser = createdUser
|
||||
self.userId = createdUser.get("id")
|
||||
|
||||
def _initEventUser(self):
|
||||
"""Creates the Event user if it doesn't exist."""
|
||||
# Check if event user already exists
|
||||
existingUsers = self.db.getRecordset(UserInDB, recordFilter={"username": "event"})
|
||||
if not existingUsers:
|
||||
logger.info("Creating Event user")
|
||||
eventUser = UserInDB(
|
||||
mandateId=self.getInitialId(Mandate),
|
||||
username="event",
|
||||
email="event@example.com",
|
||||
fullName="Event",
|
||||
enabled=True,
|
||||
language="en",
|
||||
privilege=UserPrivilege.SYSADMIN,
|
||||
authenticationAuthority="local", # Using lowercase value directly
|
||||
hashedPassword=self._getPasswordHash(APP_CONFIG.get("APP_INIT_PASS_EVENT_SECRET")),
|
||||
connections=[]
|
||||
)
|
||||
createdUser = self.db.recordCreate(UserInDB, eventUser)
|
||||
logger.info(f"Event user created with ID {createdUser['id']}")
|
||||
|
||||
def _uam(self, model_class: type, recordset: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Unified user access management function that filters data based on user privileges
|
||||
|
|
@ -1075,13 +1097,16 @@ def getInterface(currentUser: User) -> AppObjects:
|
|||
|
||||
return _gatewayInterfaces[contextKey]
|
||||
|
||||
def getRootUser() -> User:
|
||||
def getRootInterface() -> AppObjects:
|
||||
"""
|
||||
Returns the root user from the database.
|
||||
This is the user with the initial ID in the users table.
|
||||
Returns a AppObjects instance with root privileges.
|
||||
This is used for initial setup and user creation.
|
||||
"""
|
||||
global _rootAppObjects
|
||||
|
||||
if _rootAppObjects is None:
|
||||
try:
|
||||
# Create a temporary interface without user context
|
||||
# Create a temporary interface without user context to get root user
|
||||
tempInterface = AppObjects()
|
||||
|
||||
# Get the initial user directly
|
||||
|
|
@ -1093,25 +1118,15 @@ def getRootUser() -> User:
|
|||
if not users:
|
||||
raise ValueError("Initial user not found in database")
|
||||
|
||||
|
||||
# Convert to User model and return the model instance
|
||||
# Convert to User model
|
||||
user_data = users[0]
|
||||
rootUser = User.parse_obj(user_data)
|
||||
|
||||
return User.parse_obj(user_data)
|
||||
# Create root interface with the root user
|
||||
_rootAppObjects = AppObjects(rootUser)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting root user: {str(e)}")
|
||||
raise ValueError(f"Failed to get root user: {str(e)}")
|
||||
|
||||
def getRootInterface() -> AppObjects:
|
||||
"""
|
||||
Returns a AppObjects instance with root privileges.
|
||||
This is used for initial setup and user creation.
|
||||
"""
|
||||
global _rootAppObjects
|
||||
|
||||
if _rootAppObjects is None:
|
||||
rootUser = getRootUser()
|
||||
_rootAppObjects = AppObjects(rootUser)
|
||||
|
||||
return _rootAppObjects
|
||||
|
|
|
|||
|
|
@ -6,8 +6,8 @@ from abc import ABC, abstractmethod
|
|||
|
||||
|
||||
class TicketFieldAttribute(BaseModel):
|
||||
field_name: str = Field(description="Human-readable field name")
|
||||
field: str = Field(description="JIRA field ID/key")
|
||||
fieldName: str = Field(description="Human-readable field name")
|
||||
field: str = Field(description="Ticket field ID/key")
|
||||
|
||||
|
||||
class Task(BaseModel):
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -33,12 +33,9 @@ class WebInterface:
|
|||
@classmethod
|
||||
async def create(cls) -> "WebInterface":
|
||||
connectorWebTavily = await ConnectorTavily.create()
|
||||
|
||||
return WebInterface(connectorWebTavily=connectorWebTavily)
|
||||
|
||||
async def search(
|
||||
self, web_search_request: WebSearchRequest
|
||||
) -> WebSearchActionResult:
|
||||
async def search(self, web_search_request: WebSearchRequest) -> WebSearchActionResult:
|
||||
# NOTE: Add connectors here
|
||||
return await self.connectorWebTavily.search_urls(web_search_request)
|
||||
|
||||
|
|
@ -46,9 +43,7 @@ class WebInterface:
|
|||
# NOTE: Add connectors here
|
||||
return await self.connectorWebTavily.crawl_urls(web_crawl_request)
|
||||
|
||||
async def scrape(
|
||||
self, web_scrape_request: WebScrapeRequest
|
||||
) -> WebScrapeActionResult:
|
||||
async def scrape(self, web_scrape_request: WebScrapeRequest) -> WebScrapeActionResult:
|
||||
# NOTE: Add connectors here
|
||||
return await self.connectorWebTavily.scrape(web_scrape_request)
|
||||
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ from modules.security.auth import limiter, getCurrentUser
|
|||
|
||||
# Import interfaces
|
||||
from modules.interfaces.interfaceAppModel import User, DataNeutraliserConfig, DataNeutralizerAttributes
|
||||
from modules.features.neutralization.mainNeutralizationPlayground import NeutralizationService
|
||||
from modules.features.neutralizePlayground.mainNeutralizePlayground import NeutralizationPlayground
|
||||
|
||||
# Configure logger
|
||||
logger = logging.getLogger(__name__)
|
||||
|
|
@ -33,7 +33,7 @@ async def get_neutralization_config(
|
|||
) -> DataNeutraliserConfig:
|
||||
"""Get data neutralization configuration"""
|
||||
try:
|
||||
service = NeutralizationService(currentUser)
|
||||
service = NeutralizationPlayground(currentUser)
|
||||
config = service.get_config()
|
||||
|
||||
if not config:
|
||||
|
|
@ -67,7 +67,7 @@ async def save_neutralization_config(
|
|||
) -> DataNeutraliserConfig:
|
||||
"""Save or update data neutralization configuration"""
|
||||
try:
|
||||
service = NeutralizationService(currentUser)
|
||||
service = NeutralizationPlayground(currentUser)
|
||||
config = service.save_config(config_data)
|
||||
|
||||
return config
|
||||
|
|
@ -97,7 +97,7 @@ async def neutralize_text(
|
|||
detail="Text content is required"
|
||||
)
|
||||
|
||||
service = NeutralizationService(currentUser)
|
||||
service = NeutralizationPlayground(currentUser)
|
||||
result = service.neutralize_text(text, file_id)
|
||||
|
||||
return result
|
||||
|
|
@ -128,7 +128,7 @@ async def resolve_text(
|
|||
detail="Text content is required"
|
||||
)
|
||||
|
||||
service = NeutralizationService(currentUser)
|
||||
service = NeutralizationPlayground(currentUser)
|
||||
resolved_text = service.resolve_text(text)
|
||||
|
||||
return {"resolved_text": resolved_text}
|
||||
|
|
@ -151,7 +151,7 @@ async def get_neutralization_attributes(
|
|||
) -> List[DataNeutralizerAttributes]:
|
||||
"""Get neutralization attributes, optionally filtered by file ID"""
|
||||
try:
|
||||
service = NeutralizationService(currentUser)
|
||||
service = NeutralizationPlayground(currentUser)
|
||||
attributes = service.get_attributes(fileId)
|
||||
|
||||
return attributes
|
||||
|
|
@ -181,7 +181,7 @@ async def process_sharepoint_files(
|
|||
detail="Both source and target paths are required"
|
||||
)
|
||||
|
||||
service = NeutralizationService(currentUser)
|
||||
service = NeutralizationPlayground(currentUser)
|
||||
result = await service.process_sharepoint_files(source_path, target_path)
|
||||
|
||||
return result
|
||||
|
|
@ -210,7 +210,7 @@ async def batch_process_files(
|
|||
detail="Files data is required"
|
||||
)
|
||||
|
||||
service = NeutralizationService(currentUser)
|
||||
service = NeutralizationPlayground(currentUser)
|
||||
result = service.batch_neutralize_files(files_data)
|
||||
|
||||
return result
|
||||
|
|
@ -232,7 +232,7 @@ async def get_neutralization_stats(
|
|||
) -> Dict[str, Any]:
|
||||
"""Get neutralization processing statistics"""
|
||||
try:
|
||||
service = NeutralizationService(currentUser)
|
||||
service = NeutralizationPlayground(currentUser)
|
||||
stats = service.get_processing_stats()
|
||||
|
||||
return stats
|
||||
|
|
@ -253,7 +253,7 @@ async def cleanup_file_attributes(
|
|||
) -> Dict[str, str]:
|
||||
"""Clean up neutralization attributes for a specific file"""
|
||||
try:
|
||||
service = NeutralizationService(currentUser)
|
||||
service = NeutralizationPlayground(currentUser)
|
||||
success = service.cleanup_file_attributes(fileId)
|
||||
|
||||
if success:
|
||||
|
|
|
|||
|
|
@ -51,17 +51,19 @@ class CSRFMiddleware(BaseHTTPMiddleware):
|
|||
csrf_token = request.headers.get("X-CSRF-Token")
|
||||
if not csrf_token:
|
||||
logger.warning(f"CSRF token missing for {request.method} {request.url.path}")
|
||||
raise HTTPException(
|
||||
from fastapi.responses import JSONResponse
|
||||
return JSONResponse(
|
||||
status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail="CSRF token missing"
|
||||
content={"detail": "CSRF token missing"}
|
||||
)
|
||||
|
||||
# Validate CSRF token format (basic validation)
|
||||
if not self._is_valid_csrf_token(csrf_token):
|
||||
logger.warning(f"Invalid CSRF token format for {request.method} {request.url.path}")
|
||||
raise HTTPException(
|
||||
from fastapi.responses import JSONResponse
|
||||
return JSONResponse(
|
||||
status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail="Invalid CSRF token format"
|
||||
content={"detail": "Invalid CSRF token format"}
|
||||
)
|
||||
|
||||
# Additional CSRF validation could be added here:
|
||||
|
|
|
|||
|
|
@ -39,60 +39,44 @@ class PublicService:
|
|||
|
||||
class Services:
|
||||
|
||||
def __init__(self, user: User, workflow: ChatWorkflow):
|
||||
def __init__(self, user: User, workflow: ChatWorkflow = None):
|
||||
self.user: User = user
|
||||
self.workflow: ChatWorkflow = workflow
|
||||
|
||||
# Directly expose existing service modules
|
||||
# Initialize interfaces
|
||||
|
||||
from modules.interfaces.interfaceChatObjects import getInterface as getChatInterface
|
||||
self.interfaceChat = getChatInterface(user)
|
||||
|
||||
from modules.interfaces.interfaceAppObjects import getInterface as getAppInterface
|
||||
self.interfaceApp = getAppInterface(user)
|
||||
|
||||
from modules.interfaces.interfaceComponentObjects import getInterface as getComponentInterface
|
||||
self.interfaceComponent = getComponentInterface(user)
|
||||
|
||||
# Initialize service packages
|
||||
|
||||
from .serviceDocument.mainServiceDocumentExtraction import DocumentExtractionService
|
||||
self.document = PublicService(DocumentExtractionService(self))
|
||||
self.documentExtraction = PublicService(DocumentExtractionService(self))
|
||||
|
||||
from .serviceDocument.mainServiceDocumentGeneration import DocumentGenerationService
|
||||
self.document = PublicService(DocumentGenerationService(self))
|
||||
self.documentGeneration = PublicService(DocumentGenerationService(self))
|
||||
|
||||
from .serviceNeutralization.mainNeutralization import NeutralizationService
|
||||
self.neutralization = PublicService(NeutralizationService())
|
||||
from .serviceNeutralization.mainServiceNeutralization import NeutralizationService
|
||||
self.neutralization = PublicService(NeutralizationService(self))
|
||||
|
||||
from .serviceSharepoint.mainSharepoint import SharePointService
|
||||
self.sharepoint = PublicService(SharePointService(self))
|
||||
from .serviceSharepoint.mainServiceSharepoint import SharepointService
|
||||
self.sharepoint = PublicService(SharepointService(self))
|
||||
|
||||
from .serviceAi.mainServiceAi import AiService
|
||||
self.ai = PublicService(AiService(self))
|
||||
|
||||
from .serviceWorkflows.mainServiceWorkflows import WorkflowService
|
||||
from .serviceTicket.mainServiceTicket import TicketService
|
||||
self.ticket = PublicService(TicketService(self))
|
||||
|
||||
from .serviceWorkflow.mainServiceWorkflow import WorkflowService
|
||||
self.workflow = PublicService(WorkflowService(self))
|
||||
|
||||
# Initialize chat interface for workflow operations
|
||||
from modules.interfaces.interfaceChatObjects import getInterface as getChatInterface
|
||||
self.chatInterface = getChatInterface(user)
|
||||
|
||||
# Chat interface wrapper methods
|
||||
def getWorkflow(self, workflowId: str):
|
||||
return self.chatInterface.getWorkflow(workflowId)
|
||||
|
||||
def createWorkflow(self, workflowData: dict):
|
||||
return self.chatInterface.createWorkflow(workflowData)
|
||||
|
||||
def updateWorkflow(self, workflowId: str, workflowData: dict):
|
||||
return self.chatInterface.updateWorkflow(workflowId, workflowData)
|
||||
|
||||
def createMessage(self, messageData: dict):
|
||||
return self.chatInterface.createMessage(messageData)
|
||||
|
||||
def updateMessage(self, messageId: str, messageData: dict):
|
||||
return self.chatInterface.updateMessage(messageId, messageData)
|
||||
|
||||
def createLog(self, logData: dict):
|
||||
return self.chatInterface.createLog(logData)
|
||||
|
||||
def updateWorkflowStats(self, workflowId: str, bytesSent: int = 0, bytesReceived: int = 0, tokenCount: int = 0):
|
||||
return self.chatInterface.updateWorkflowStats(workflowId, bytesSent, bytesReceived, tokenCount)
|
||||
|
||||
@property
|
||||
def mandateId(self):
|
||||
return self.chatInterface.mandateId
|
||||
|
||||
|
||||
def getInterface(user: User, workflow: ChatWorkflow) -> Services:
|
||||
return Services(user, workflow)
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ import logging
|
|||
from typing import Dict, Any, List, Optional, Tuple
|
||||
|
||||
from modules.interfaces.interfaceChatModel import ChatDocument
|
||||
from modules.services.serviceDocument.documentExtraction import DocumentExtractionService
|
||||
from modules.services.serviceDocument.mainServiceDocumentExtraction import DocumentExtractionService
|
||||
from modules.interfaces.interfaceAiModel import AiCallRequest, AiCallOptions
|
||||
from modules.interfaces.interfaceAiObjects import AiObjects
|
||||
|
||||
|
|
@ -19,9 +19,15 @@ class AiService:
|
|||
The concrete connector instances (OpenAI/Anthropic) are injected by the interface layer.
|
||||
"""
|
||||
|
||||
def __init__(self, aiObjects: AiObjects | None = None) -> None:
|
||||
def __init__(self, serviceCenter=None) -> None:
|
||||
"""Initialize AI service with service center access.
|
||||
|
||||
Args:
|
||||
serviceCenter: Service center instance for accessing other services
|
||||
"""
|
||||
self.serviceCenter = serviceCenter
|
||||
# Only depend on interfaces
|
||||
self.aiObjects = aiObjects or AiObjects()
|
||||
self.aiObjects = AiObjects()
|
||||
self.documentExtractor = DocumentExtractionService()
|
||||
|
||||
async def callAi(
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ from modules.interfaces.interfaceChatModel import (
|
|||
ContentItem,
|
||||
ContentMetadata
|
||||
)
|
||||
from modules.services.serviceNeutralization.mainNeutralization import NeutralizationService
|
||||
from modules.services.serviceNeutralization.mainServiceNeutralization import NeutralizationService
|
||||
from modules.shared.configuration import APP_CONFIG
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
|
@ -43,9 +43,8 @@ class DocumentExtractionService:
|
|||
"""Initialize the document processor."""
|
||||
self._neutralizer = NeutralizationService() if APP_CONFIG.get("ENABLE_CONTENT_NEUTRALIZATION", False) else None
|
||||
self._serviceCenter = serviceCenter
|
||||
# Centralized services interface (for AI)
|
||||
from modules.services import getInterface as getServices
|
||||
self.services = getServices(serviceCenter.user, serviceCenter.workflow)
|
||||
# Store service center for access to user/workflow context when needed
|
||||
self.services = None # Will be set to None to avoid circular dependency
|
||||
|
||||
self.supportedTypes: Dict[str, Callable[[bytes, str, str], Awaitable[List[ContentItem]]]] = {
|
||||
# Text and data files
|
||||
|
|
@ -1427,7 +1426,11 @@ class DocumentExtractionService:
|
|||
"""
|
||||
from modules.interfaces.interfaceChatModel import ChatDocument
|
||||
image_doc = ChatDocument(fileData=chunk, fileName="image", mimeType=mimeType)
|
||||
processedContent = await self.services.ai.callAi(
|
||||
# Use direct import to avoid circular dependency
|
||||
from modules.services.serviceAi.mainServiceAi import AiService
|
||||
from modules.interfaces.interfaceAiObjects import AiObjects
|
||||
aiService = AiService(AiObjects())
|
||||
processedContent = await aiService.callAi(
|
||||
prompt=imagePrompt,
|
||||
documents=[image_doc],
|
||||
options={
|
||||
|
|
@ -1462,24 +1465,11 @@ class DocumentExtractionService:
|
|||
# For code files, preserve the complete content without AI processing
|
||||
processedContent = contentToProcess
|
||||
else:
|
||||
if self.services and hasattr(self.services, 'ai'):
|
||||
processedContent = await self.services.ai.callAi(
|
||||
prompt=aiPrompt,
|
||||
documents=None,
|
||||
options={
|
||||
"process_type": "text",
|
||||
"operation_type": "analyse_content",
|
||||
"priority": "balanced",
|
||||
"compress_prompt": True,
|
||||
"compress_documents": False,
|
||||
"processing_mode": "advanced",
|
||||
"max_cost": 0.05,
|
||||
"max_processing_time": 30
|
||||
}
|
||||
)
|
||||
else:
|
||||
# Fallback to basic AI processing with centralized service
|
||||
processedContent = await self.services.ai.callAi(
|
||||
# Use direct import to avoid circular dependency
|
||||
from modules.services.serviceAi.mainServiceAi import AiService
|
||||
from modules.interfaces.interfaceAiObjects import AiObjects
|
||||
aiService = AiService(AiObjects())
|
||||
processedContent = await aiService.callAi(
|
||||
prompt=aiPrompt,
|
||||
documents=None,
|
||||
options={
|
||||
|
|
|
|||
|
|
@ -8,59 +8,51 @@ Mehrsprachig: DE, EN, FR, IT
|
|||
|
||||
import logging
|
||||
import re
|
||||
import os
|
||||
import uuid
|
||||
import json
|
||||
from typing import Dict, List, Any, Optional, Tuple
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
import mimetypes
|
||||
from typing import Dict, List, Any, Optional
|
||||
|
||||
from modules.interfaces.interfaceAppObjects import getInterface
|
||||
from modules.interfaces.interfaceAppModel import User, DataNeutraliserConfig, DataNeutralizerAttributes
|
||||
from modules.shared.timezoneUtils import get_utc_timestamp
|
||||
from modules.interfaces.interfaceAppModel import DataNeutraliserConfig, DataNeutralizerAttributes
|
||||
|
||||
# Import all necessary classes and functions for neutralization
|
||||
from modules.services.serviceNeutralization.subProcessCommon import ProcessResult, CommonUtils, NeutralizationResult, NeutralizationAttribute
|
||||
from modules.services.serviceNeutralization.subProcessCommon import CommonUtils, NeutralizationResult, NeutralizationAttribute
|
||||
from modules.services.serviceNeutralization.subProcessText import TextProcessor, PlainText
|
||||
from modules.services.serviceNeutralization.subProcessList import ListProcessor, TableData
|
||||
from modules.services.serviceNeutralization.subProcessBinary import BinaryProcessor, BinaryData
|
||||
from modules.services.serviceNeutralization.subParseString import StringParser
|
||||
from modules.services.serviceNeutralization.subPatterns import Pattern, HeaderPatterns, DataPatterns, TextTablePatterns
|
||||
from modules.services.serviceNeutralization.subProcessBinary import BinaryProcessor
|
||||
from modules.services.serviceNeutralization.subPatterns import HeaderPatterns, DataPatterns, TextTablePatterns
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class NeutralizationService:
|
||||
"""Service for handling data neutralization operations"""
|
||||
|
||||
def __init__(self, current_user: User = None, names_to_parse: List[str] = None):
|
||||
def __init__(self, serviceCenter=None, NamesToParse: List[str] = None):
|
||||
"""Initialize the service with user context and anonymization processors
|
||||
|
||||
Args:
|
||||
current_user: User object for context (optional for basic neutralization)
|
||||
names_to_parse: List of names to parse and replace (case-insensitive)
|
||||
serviceCenter: Service center instance for accessing other services
|
||||
NamesToParse: List of names to parse and replace (case-insensitive)
|
||||
"""
|
||||
self.current_user = current_user
|
||||
self.app_interface = getInterface(current_user) if current_user else None
|
||||
self.serviceCenter = serviceCenter
|
||||
self.interfaceApp = serviceCenter.interfaceApp
|
||||
|
||||
# Initialize anonymization processors
|
||||
self.names_to_parse = names_to_parse or []
|
||||
self.textProcessor = TextProcessor(names_to_parse)
|
||||
self.listProcessor = ListProcessor(names_to_parse)
|
||||
self.NamesToParse = NamesToParse or []
|
||||
self.textProcessor = TextProcessor(NamesToParse)
|
||||
self.listProcessor = ListProcessor(NamesToParse)
|
||||
self.binaryProcessor = BinaryProcessor()
|
||||
self.commonUtils = CommonUtils()
|
||||
|
||||
def getConfig(self) -> Optional[DataNeutraliserConfig]:
|
||||
"""Get the neutralization configuration for the current user's mandate"""
|
||||
if not self.app_interface:
|
||||
if not self.interfaceApp:
|
||||
return None
|
||||
return self.app_interface.getNeutralizationConfig()
|
||||
return self.interfaceApp.getNeutralizationConfig()
|
||||
|
||||
def saveConfig(self, config_data: Dict[str, Any]) -> DataNeutraliserConfig:
|
||||
"""Save or update the neutralization configuration"""
|
||||
if not self.app_interface:
|
||||
if not self.interfaceApp:
|
||||
raise ValueError("User context required for saving configuration")
|
||||
return self.app_interface.createOrUpdateNeutralizationConfig(config_data)
|
||||
return self.interfaceApp.createOrUpdateNeutralizationConfig(config_data)
|
||||
|
||||
# Public API: process text or file
|
||||
|
||||
|
|
@ -70,18 +62,18 @@ class NeutralizationService:
|
|||
|
||||
def processFile(self, fileId: str) -> Dict[str, Any]:
|
||||
"""Neutralize a file referenced by its fileId using app interface."""
|
||||
if not self.app_interface:
|
||||
if not self.interfaceApp:
|
||||
raise ValueError("User context is required to process a file by fileId")
|
||||
# Fetch file data and metadata
|
||||
fileInfo = None
|
||||
try:
|
||||
# getFile returns an object; fallback to dict-like
|
||||
fileInfo = self.app_interface.getFile(fileId)
|
||||
fileInfo = self.interfaceApp.getFile(fileId)
|
||||
except Exception:
|
||||
fileInfo = None
|
||||
fileName = getattr(fileInfo, 'fileName', None) if fileInfo else None
|
||||
mimeType = getattr(fileInfo, 'mimeType', None) if fileInfo else None
|
||||
fileData = self.app_interface.getFileData(fileId)
|
||||
fileData = self.interfaceApp.getFileData(fileId)
|
||||
if not fileData:
|
||||
raise ValueError(f"No file data found for fileId: {fileId}")
|
||||
|
||||
|
|
@ -111,17 +103,17 @@ class NeutralizationService:
|
|||
return result
|
||||
|
||||
def resolveText(self, text: str) -> str:
|
||||
if not self.app_interface:
|
||||
if not self.interfaceApp:
|
||||
return text
|
||||
try:
|
||||
placeholder_pattern = r'\[([a-z]+)\.([a-f0-9-]{36})\]'
|
||||
matches = re.findall(placeholder_pattern, text)
|
||||
resolved_text = text
|
||||
for placeholder_type, uid in matches:
|
||||
attributes = self.app_interface.db.getRecordset(
|
||||
attributes = self.interfaceApp.db.getRecordset(
|
||||
DataNeutralizerAttributes,
|
||||
recordFilter={
|
||||
"mandateId": self.app_interface.mandateId,
|
||||
"mandateId": self.interfaceApp.mandateId,
|
||||
"id": uid
|
||||
}
|
||||
)
|
||||
|
|
@ -194,6 +186,19 @@ class NeutralizationService:
|
|||
processed_info={'type': 'error', 'error': str(e)}
|
||||
).model_dump()
|
||||
|
||||
def _getAttributes(self) -> List[DataNeutralizerAttributes]:
|
||||
"""Get all neutralization attributes for the current user's mandate"""
|
||||
if not self.interfaceApp:
|
||||
return []
|
||||
try:
|
||||
return self.interfaceApp.db.getRecordset(
|
||||
DataNeutralizerAttributes,
|
||||
recordFilter={"mandateId": self.interfaceApp.mandateId}
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting neutralization attributes: {str(e)}")
|
||||
return []
|
||||
|
||||
def _getContentTypeFromMime(self, mime_type: str) -> str:
|
||||
"""Determine content type from MIME type for neutralization processing"""
|
||||
if mime_type.startswith('text/'):
|
||||
|
|
@ -11,15 +11,15 @@ from modules.services.serviceNeutralization.subPatterns import DataPatterns, fin
|
|||
class StringParser:
|
||||
"""Handles string parsing and replacement operations"""
|
||||
|
||||
def __init__(self, names_to_parse: List[str] = None):
|
||||
def __init__(self, NamesToParse: List[str] = None):
|
||||
"""
|
||||
Initialize the string parser
|
||||
|
||||
Args:
|
||||
names_to_parse: List of names to parse and replace (case-insensitive)
|
||||
NamesToParse: List of names to parse and replace (case-insensitive)
|
||||
"""
|
||||
self.data_patterns = DataPatterns.patterns
|
||||
self.names_to_parse = names_to_parse or []
|
||||
self.NamesToParse = NamesToParse or []
|
||||
self.mapping = {}
|
||||
|
||||
def is_placeholder(self, text: str) -> bool:
|
||||
|
|
@ -84,7 +84,7 @@ class StringParser:
|
|||
Returns:
|
||||
str: Text with custom names replaced
|
||||
"""
|
||||
for name in self.names_to_parse:
|
||||
for name in self.NamesToParse:
|
||||
if not name.strip():
|
||||
continue
|
||||
|
||||
|
|
|
|||
|
|
@ -22,14 +22,14 @@ class TableData:
|
|||
class ListProcessor:
|
||||
"""Handles structured data processing with headers for anonymization"""
|
||||
|
||||
def __init__(self, names_to_parse: List[str] = None):
|
||||
def __init__(self, NamesToParse: List[str] = None):
|
||||
"""
|
||||
Initialize the list processor
|
||||
|
||||
Args:
|
||||
names_to_parse: List of names to parse and replace
|
||||
NamesToParse: List of names to parse and replace
|
||||
"""
|
||||
self.string_parser = StringParser(names_to_parse)
|
||||
self.string_parser = StringParser(NamesToParse)
|
||||
self.header_patterns = HeaderPatterns.patterns
|
||||
|
||||
def anonymize_table(self, table: TableData) -> TableData:
|
||||
|
|
@ -215,7 +215,7 @@ class ListProcessor:
|
|||
text = self.string_parser.mapping[text]
|
||||
else:
|
||||
# Check if text matches any custom names from the user list
|
||||
for name in self.string_parser.names_to_parse:
|
||||
for name in self.string_parser.NamesToParse:
|
||||
if not name.strip():
|
||||
continue
|
||||
if text.lower().strip() == name.lower().strip():
|
||||
|
|
|
|||
|
|
@ -16,14 +16,14 @@ class PlainText:
|
|||
class TextProcessor:
|
||||
"""Handles plain text processing for anonymization"""
|
||||
|
||||
def __init__(self, names_to_parse: List[str] = None):
|
||||
def __init__(self, NamesToParse: List[str] = None):
|
||||
"""
|
||||
Initialize the text processor
|
||||
|
||||
Args:
|
||||
names_to_parse: List of names to parse and replace
|
||||
NamesToParse: List of names to parse and replace
|
||||
"""
|
||||
self.string_parser = StringParser(names_to_parse)
|
||||
self.string_parser = StringParser(NamesToParse)
|
||||
|
||||
def extract_tables_from_text(self, content: str) -> tuple:
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -13,18 +13,55 @@ logger = logging.getLogger(__name__)
|
|||
class SharepointService:
|
||||
"""SharePoint connector using Microsoft Graph API for reliable authentication."""
|
||||
|
||||
def __init__(self, access_token: str):
|
||||
"""Initialize with access token.
|
||||
def __init__(self, serviceCenter=None):
|
||||
"""Initialize SharePoint service without access token.
|
||||
|
||||
Args:
|
||||
access_token: Microsoft Graph access token
|
||||
serviceCenter: Service center instance for accessing other services
|
||||
|
||||
Use setAccessToken() method to configure the access token before making API calls.
|
||||
"""
|
||||
self.access_token = access_token
|
||||
self.serviceCenter = serviceCenter
|
||||
self.access_token = None
|
||||
self.base_url = "https://graph.microsoft.com/v1.0"
|
||||
|
||||
def setAccessToken(self, userConnection, interfaceApp) -> bool:
|
||||
"""Set access token from UserConnection.
|
||||
|
||||
Args:
|
||||
userConnection: UserConnection object containing token information
|
||||
interfaceApp: InterfaceApp instance used by TokenManager to resolve the token
|
||||
|
||||
Returns:
|
||||
bool: True if token was set successfully, False otherwise
|
||||
"""
|
||||
try:
|
||||
if not userConnection:
|
||||
logger.error("UserConnection is required to set access token")
|
||||
return False
|
||||
|
||||
# Get a fresh token for this specific connection
|
||||
from modules.security.tokenManager import TokenManager
|
||||
token = TokenManager().getFreshToken(interfaceApp, userConnection.id)
|
||||
if not token:
|
||||
logger.error(f"No token found for connection {userConnection.id}")
|
||||
return False
|
||||
|
||||
self.access_token = token.tokenAccess
|
||||
logger.info(f"Access token set for connection {userConnection.id}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error setting access token: {str(e)}")
|
||||
return False
|
||||
|
||||
async def _make_graph_api_call(self, endpoint: str, method: str = "GET", data: bytes = None) -> Dict[str, Any]:
|
||||
"""Make a Microsoft Graph API call with proper error handling."""
|
||||
try:
|
||||
if self.access_token is None:
|
||||
logger.error("Access token is not set. Please call setAccessToken() before using the SharePoint service.")
|
||||
return {"error": "Access token is not set. Please call setAccessToken() before using the SharePoint service."}
|
||||
|
||||
headers = {
|
||||
"Authorization": f"Bearer {self.access_token}",
|
||||
"Content-Type": "application/json" if data and method != "PUT" else "application/octet-stream" if data else "application/json"
|
||||
|
|
@ -280,6 +317,10 @@ class SharepointService:
|
|||
async def download_file(self, site_id: str, file_id: str) -> Optional[bytes]:
|
||||
"""Download a file from SharePoint."""
|
||||
try:
|
||||
if self.access_token is None:
|
||||
logger.error("Access token is not set. Please call setAccessToken() before using the SharePoint service.")
|
||||
return None
|
||||
|
||||
endpoint = f"sites/{site_id}/drive/items/{file_id}/content"
|
||||
|
||||
headers = {"Authorization": f"Bearer {self.access_token}"}
|
||||
|
|
@ -416,6 +457,10 @@ class SharepointService:
|
|||
async def download_file_by_path(self, site_id: str, file_path: str) -> Optional[bytes]:
|
||||
"""Download a file by its path within a site."""
|
||||
try:
|
||||
if self.access_token is None:
|
||||
logger.error("Access token is not set. Please call setAccessToken() before using the SharePoint service.")
|
||||
return None
|
||||
|
||||
# Clean the path
|
||||
clean_path = file_path.strip('/')
|
||||
endpoint = f"sites/{site_id}/drive/root:/{clean_path}:/content"
|
||||
41
modules/services/serviceTicket/mainServiceTicket.py
Normal file
41
modules/services/serviceTicket/mainServiceTicket.py
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
"""Ticket service for creating ticket interfaces."""
|
||||
|
||||
import logging
|
||||
from typing import Dict, Any, Optional
|
||||
from modules.interfaces.interfaceTicketObjects import createTicketInterfaceByType
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TicketService:
|
||||
"""Service class for ticket interface operations."""
|
||||
|
||||
def __init__(self, serviceCenter=None):
|
||||
"""Initialize ticket service with service center access.
|
||||
|
||||
Args:
|
||||
serviceCenter: Service center instance for accessing other services
|
||||
"""
|
||||
self.serviceCenter = serviceCenter
|
||||
|
||||
async def createTicketInterfaceByType(
|
||||
self,
|
||||
taskSyncDefinition: Dict[str, Any],
|
||||
connectorType: str,
|
||||
connectorParams: Optional[Dict[str, Any]] = None
|
||||
):
|
||||
"""Create a ticket interface by type with the given parameters.
|
||||
|
||||
Args:
|
||||
taskSyncDefinition: Field mapping definition for ticket synchronization
|
||||
connectorType: Type of connector (e.g., "Jira", "ServiceNow")
|
||||
connectorParams: Optional parameters for the connector
|
||||
|
||||
Returns:
|
||||
Ticket interface instance
|
||||
"""
|
||||
return await createTicketInterfaceByType(
|
||||
taskSyncDefinition=taskSyncDefinition,
|
||||
connectorType=connectorType,
|
||||
connectorParams=connectorParams
|
||||
)
|
||||
|
|
@ -3,7 +3,7 @@ import uuid
|
|||
from typing import Dict, Any, List, Optional
|
||||
from modules.interfaces.interfaceAppModel import User, UserConnection
|
||||
from modules.interfaces.interfaceChatModel import ChatDocument, ChatMessage, ExtractedContent
|
||||
from modules.services.serviceDocument.documentExtraction import DocumentExtractionService
|
||||
from modules.services.serviceDocument.mainServiceDocumentExtraction import DocumentExtractionService
|
||||
from modules.services.serviceDocument.documentUtility import getFileExtension, getMimeTypeFromExtension, detectContentTypeFromData
|
||||
from modules.shared.timezoneUtils import get_utc_timestamp
|
||||
|
||||
|
|
@ -12,17 +12,13 @@ logger = logging.getLogger(__name__)
|
|||
class WorkflowService:
|
||||
"""Service class containing methods for document processing, chat operations, and workflow management"""
|
||||
|
||||
def __init__(self, service_center):
|
||||
self.service_center = service_center
|
||||
self.user = service_center.user
|
||||
self.workflow = service_center.workflow
|
||||
self.interfaceChat = service_center.interfaceChat
|
||||
self.interfaceComponent = service_center.interfaceComponent
|
||||
self.interfaceApp = service_center.interfaceApp
|
||||
self.documentProcessor = service_center.documentProcessor
|
||||
# Centralized services interface (for AI)
|
||||
from modules.services import getInterface as getServices
|
||||
self.services = getServices(self.user, self.workflow)
|
||||
def __init__(self, serviceCenter):
|
||||
self.serviceCenter = serviceCenter
|
||||
self.user = serviceCenter.user
|
||||
self.workflow = serviceCenter.workflow
|
||||
self.interfaceChat = serviceCenter.interfaceChat
|
||||
self.interfaceComponent = serviceCenter.interfaceComponent
|
||||
self.interfaceApp = serviceCenter.interfaceApp
|
||||
|
||||
async def summarizeChat(self, messages: List[ChatMessage]) -> str:
|
||||
"""
|
||||
|
|
@ -57,8 +53,10 @@ class WorkflowService:
|
|||
|
||||
Please provide a comprehensive summary of this conversation."""
|
||||
|
||||
# Get summary using centralized AI (speed priority)
|
||||
return await self.services.ai.callAi(
|
||||
# Get summary using AI service directly (avoiding circular dependency)
|
||||
from modules.services.serviceAi.mainServiceAi import AiService
|
||||
ai_service = AiService(self)
|
||||
return await ai_service.callAi(
|
||||
prompt=prompt,
|
||||
documents=None,
|
||||
options={
|
||||
|
|
@ -252,6 +250,22 @@ class WorkflowService:
|
|||
logger.debug(f"getConnectionReferenceFromUserConnection: Built reference: {base_ref + state_info}")
|
||||
return base_ref + state_info
|
||||
|
||||
def getUserConnectionByExternalUsername(self, authority: str, externalUsername: str) -> Optional[UserConnection]:
|
||||
"""Fetch the user's connection by authority and external username."""
|
||||
try:
|
||||
if not authority or not externalUsername:
|
||||
return None
|
||||
user_connections = self.interfaceApp.getUserConnections(self.user.id)
|
||||
for connection in user_connections:
|
||||
# Normalize authority for comparison (enum vs string)
|
||||
connection_authority = connection.authority.value if hasattr(connection.authority, 'value') else str(connection.authority)
|
||||
if connection_authority.lower() == authority.lower() and connection.externalUsername == externalUsername:
|
||||
return connection
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting connection by external username: {str(e)}")
|
||||
return None
|
||||
|
||||
def getUserConnectionFromConnectionReference(self, connectionReference: str) -> Optional[UserConnection]:
|
||||
"""Get UserConnection from reference string (handles both old and enhanced formats)"""
|
||||
try:
|
||||
|
|
@ -334,14 +348,34 @@ class WorkflowService:
|
|||
# Recovery failed - don't continue with invalid data
|
||||
raise RuntimeError(f"Document {document.id} properties are inaccessible and recovery failed. Diagnosis: {diagnosis}")
|
||||
|
||||
# Process with document processor directly
|
||||
extractedContent = await self.documentProcessor.processFileData(
|
||||
# Process with DocumentExtractionService directly (no circular dependency)
|
||||
from modules.services.serviceDocument.mainServiceDocumentExtraction import DocumentExtractionService
|
||||
docService = DocumentExtractionService(None) # Pass None to avoid circular dependency
|
||||
content_items = await docService.processFileData(
|
||||
fileData=fileData,
|
||||
fileName=fileName,
|
||||
mimeType=mimeType,
|
||||
base64Encoded=False,
|
||||
prompt=prompt,
|
||||
documentId=document.id
|
||||
enableAI=True
|
||||
)
|
||||
|
||||
# Convert ContentItem list to ExtractedContent
|
||||
contents = []
|
||||
for item in content_items:
|
||||
contents.append({
|
||||
'label': item.label,
|
||||
'data': item.data,
|
||||
'metadata': {
|
||||
'mimeType': item.metadata.mimeType if hasattr(item.metadata, 'mimeType') else mimeType,
|
||||
'size': item.metadata.size if hasattr(item.metadata, 'size') else len(fileData),
|
||||
'base64Encoded': item.metadata.base64Encoded if hasattr(item.metadata, 'base64Encoded') else False
|
||||
}
|
||||
})
|
||||
|
||||
extractedContent = ExtractedContent(
|
||||
id=document.id,
|
||||
contents=contents
|
||||
)
|
||||
|
||||
# Note: ExtractedContent model only has 'id' and 'contents' fields
|
||||
|
|
@ -32,7 +32,7 @@ class MethodSharepoint(MethodBase):
|
|||
return datetime.now(UTC).strftime("%Y%m%d-%H%M%S")
|
||||
|
||||
def _getMicrosoftConnection(self, connectionReference: str) -> Optional[Dict[str, Any]]:
|
||||
"""Get Microsoft connection from connection reference"""
|
||||
"""Get Microsoft connection from connection reference and configure SharePoint service"""
|
||||
try:
|
||||
userConnection = self.service.getUserConnectionFromConnectionReference(connectionReference)
|
||||
if not userConnection:
|
||||
|
|
@ -48,47 +48,33 @@ class MethodSharepoint(MethodBase):
|
|||
logger.warning(f"Connection {userConnection.id} status is not active/pending: {userConnection.status.value}")
|
||||
return None
|
||||
|
||||
# Get a fresh token for this specific connection
|
||||
from modules.security.tokenManager import TokenManager
|
||||
token = TokenManager().getFreshToken(self.service.interfaceApp, userConnection.id)
|
||||
if not token:
|
||||
logger.warning(f"No token found for connection {userConnection.id}")
|
||||
# Configure SharePoint service with the UserConnection
|
||||
if not self.service.sharepoint.setAccessToken(userConnection, self.service.interfaceApp):
|
||||
logger.warning(f"Failed to configure SharePoint service with connection {userConnection.id}")
|
||||
return None
|
||||
|
||||
# Check if token is expired
|
||||
if hasattr(token, 'expiresAt') and token.expiresAt:
|
||||
current_time = get_utc_timestamp()
|
||||
if current_time > token.expiresAt:
|
||||
logger.warning(f"Token for connection {userConnection.id} is expired (expiresAt: {token.expiresAt}, current: {current_time})")
|
||||
return None
|
||||
|
||||
logger.info(f"Successfully retrieved Microsoft connection: {userConnection.id}, status: {userConnection.status.value}, externalId: {userConnection.externalId}")
|
||||
logger.info(f"Successfully configured SharePoint service with Microsoft connection: {userConnection.id}, status: {userConnection.status.value}, externalId: {userConnection.externalId}")
|
||||
|
||||
return {
|
||||
"id": userConnection.id,
|
||||
"userConnection": userConnection,
|
||||
"accessToken": token.tokenAccess,
|
||||
"refreshToken": token.tokenRefresh,
|
||||
"scopes": ["Sites.ReadWrite.All", "Files.ReadWrite.All", "User.Read"] # SharePoint scopes
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting Microsoft connection: {str(e)}")
|
||||
return None
|
||||
|
||||
async def _discoverSharePointSites(self, access_token: str) -> List[Dict[str, Any]]:
|
||||
async def _discoverSharePointSites(self) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Discover all SharePoint sites accessible to the user via Microsoft Graph API
|
||||
|
||||
Parameters:
|
||||
access_token (str): Microsoft Graph access token
|
||||
|
||||
Returns:
|
||||
List[Dict[str, Any]]: List of SharePoint site information
|
||||
"""
|
||||
try:
|
||||
# Query Microsoft Graph to get all sites the user has access to
|
||||
endpoint = "sites?search=*"
|
||||
result = await self._makeGraphApiCall(access_token, endpoint)
|
||||
result = await self._makeGraphApiCall(endpoint)
|
||||
|
||||
if "error" in result:
|
||||
logger.error(f"Error discovering SharePoint sites: {result['error']}")
|
||||
|
|
@ -375,11 +361,14 @@ class MethodSharepoint(MethodBase):
|
|||
logger.error(f"Error parsing site URL {siteUrl}: {str(e)}")
|
||||
return {"hostname": "", "sitePath": ""}
|
||||
|
||||
async def _makeGraphApiCall(self, access_token: str, endpoint: str, method: str = "GET", data: bytes = None) -> Dict[str, Any]:
|
||||
async def _makeGraphApiCall(self, endpoint: str, method: str = "GET", data: bytes = None) -> Dict[str, Any]:
|
||||
"""Make a Microsoft Graph API call with timeout and detailed logging"""
|
||||
try:
|
||||
if not hasattr(self.service, 'sharepoint') or not self.service.sharepoint._target.access_token:
|
||||
return {"error": "SharePoint service not configured with access token"}
|
||||
|
||||
headers = {
|
||||
"Authorization": f"Bearer {access_token}",
|
||||
"Authorization": f"Bearer {self.service.sharepoint._target.access_token}",
|
||||
"Content-Type": "application/json" if data and method != "PUT" else "application/octet-stream" if data else "application/json"
|
||||
}
|
||||
|
||||
|
|
@ -436,11 +425,11 @@ class MethodSharepoint(MethodBase):
|
|||
logger.error(f"Error making Graph API call: {str(e)}")
|
||||
return {"error": f"Error making Graph API call: {str(e)}"}
|
||||
|
||||
async def _getSiteId(self, access_token: str, hostname: str, site_path: str) -> str:
|
||||
async def _getSiteId(self, hostname: str, site_path: str) -> str:
|
||||
"""Get SharePoint site ID from hostname and site path"""
|
||||
try:
|
||||
endpoint = f"sites/{hostname}:/{site_path}"
|
||||
result = await self._makeGraphApiCall(access_token, endpoint)
|
||||
result = await self._makeGraphApiCall(endpoint)
|
||||
|
||||
if "error" in result:
|
||||
logger.error(f"Error getting site ID: {result['error']}")
|
||||
|
|
@ -482,7 +471,7 @@ class MethodSharepoint(MethodBase):
|
|||
# Discover SharePoint sites - use targeted approach when site parameter is provided
|
||||
if site:
|
||||
# When site parameter is provided, discover all sites first, then filter
|
||||
all_sites = await self._discoverSharePointSites(connection["accessToken"])
|
||||
all_sites = await self._discoverSharePointSites()
|
||||
if not all_sites:
|
||||
return ActionResult.isFailure(error="No SharePoint sites found or accessible")
|
||||
|
||||
|
|
@ -492,7 +481,7 @@ class MethodSharepoint(MethodBase):
|
|||
return ActionResult.isFailure(error=f"No SharePoint sites found matching '{site}'")
|
||||
else:
|
||||
# No site parameter - discover all sites
|
||||
sites = await self._discoverSharePointSites(connection["accessToken"])
|
||||
sites = await self._discoverSharePointSites()
|
||||
if not sites:
|
||||
return ActionResult.isFailure(error="No SharePoint sites found or accessible")
|
||||
|
||||
|
|
@ -535,7 +524,6 @@ class MethodSharepoint(MethodBase):
|
|||
|
||||
# Use global search endpoint (site-specific search not available)
|
||||
unified_result = await self._makeGraphApiCall(
|
||||
connection["accessToken"],
|
||||
"search/query",
|
||||
method="POST",
|
||||
data=json.dumps(payload).encode("utf-8")
|
||||
|
|
@ -707,7 +695,7 @@ class MethodSharepoint(MethodBase):
|
|||
logger.info(f"Using search API for files with query: '{search_query}'")
|
||||
|
||||
# Make the search API call (files)
|
||||
search_result = await self._makeGraphApiCall(connection["accessToken"], endpoint)
|
||||
search_result = await self._makeGraphApiCall(endpoint)
|
||||
if "error" in search_result:
|
||||
logger.warning(f"Search failed for site {site_name}: {search_result['error']}")
|
||||
continue
|
||||
|
|
@ -942,7 +930,7 @@ class MethodSharepoint(MethodBase):
|
|||
return ActionResult.isFailure(error=f"Invalid pathQuery '{pathQuery}'. This appears to be search terms, not a valid SharePoint path. Use findDocumentPath action first to search for folders, then use the returned folder path as pathQuery.")
|
||||
|
||||
# For pathQuery, we need to discover sites to find the specific one
|
||||
sites = await self._discoverSharePointSites(connection["accessToken"])
|
||||
sites = await self._discoverSharePointSites()
|
||||
if not sites:
|
||||
return ActionResult.isFailure(error="No SharePoint sites found or accessible")
|
||||
else:
|
||||
|
|
@ -975,7 +963,7 @@ class MethodSharepoint(MethodBase):
|
|||
search_query = fileName.replace("'", "''") # Escape single quotes for OData
|
||||
endpoint = f"sites/{site_id}/drive/root/search(q='{search_query}')"
|
||||
|
||||
search_result = await self._makeGraphApiCall(connection["accessToken"], endpoint)
|
||||
search_result = await self._makeGraphApiCall(endpoint)
|
||||
|
||||
if "error" in search_result:
|
||||
continue
|
||||
|
|
@ -988,7 +976,7 @@ class MethodSharepoint(MethodBase):
|
|||
file_endpoint = f"sites/{site_id}/drive/items/{file_id}"
|
||||
|
||||
# Get file metadata
|
||||
file_info_result = await self._makeGraphApiCall(connection["accessToken"], file_endpoint)
|
||||
file_info_result = await self._makeGraphApiCall(file_endpoint)
|
||||
|
||||
if "error" in file_info_result:
|
||||
continue
|
||||
|
|
@ -1027,7 +1015,7 @@ class MethodSharepoint(MethodBase):
|
|||
# For content download, we need to handle binary data
|
||||
try:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
headers = {"Authorization": f"Bearer {connection['accessToken']}"}
|
||||
headers = {"Authorization": f"Bearer {self.service.sharepoint._target.access_token}"}
|
||||
async with session.get(f"https://graph.microsoft.com/v1.0/{content_endpoint}", headers=headers) as response:
|
||||
if response.status == 200:
|
||||
content = await response.text()
|
||||
|
|
@ -1280,7 +1268,7 @@ class MethodSharepoint(MethodBase):
|
|||
return ActionResult.isFailure(error=f"Invalid pathQuery '{upload_path}'. This appears to be search terms, not a valid SharePoint path. Use findDocumentPath action first to search for folders, then use the returned folder path as pathQuery.")
|
||||
|
||||
# For pathQuery, we need to discover sites to find the specific one
|
||||
sites = await self._discoverSharePointSites(connection["accessToken"])
|
||||
sites = await self._discoverSharePointSites()
|
||||
if not sites:
|
||||
return ActionResult.isFailure(error="No SharePoint sites found or accessible")
|
||||
|
||||
|
|
@ -1368,7 +1356,6 @@ class MethodSharepoint(MethodBase):
|
|||
|
||||
# Upload the file
|
||||
upload_result = await self._makeGraphApiCall(
|
||||
connection["accessToken"],
|
||||
upload_endpoint,
|
||||
method="PUT",
|
||||
data=file_data
|
||||
|
|
@ -1633,7 +1620,7 @@ class MethodSharepoint(MethodBase):
|
|||
return ActionResult.isFailure(error=f"Invalid pathQuery '{pathQuery}'. This appears to be search terms, not a valid SharePoint path. Use findDocumentPath action first to search for folders, then use the returned folder path as pathQuery.")
|
||||
|
||||
# For pathQuery, we need to discover sites to find the specific one
|
||||
sites = await self._discoverSharePointSites(connection["accessToken"])
|
||||
sites = await self._discoverSharePointSites()
|
||||
if not sites:
|
||||
return ActionResult.isFailure(error="No SharePoint sites found or accessible")
|
||||
else:
|
||||
|
|
@ -1680,7 +1667,7 @@ class MethodSharepoint(MethodBase):
|
|||
endpoint = f"sites/{site_id}/drive/root:/{folder_path_clean}:/children"
|
||||
|
||||
# Make the API call to list folder contents
|
||||
api_result = await self._makeGraphApiCall(connection["accessToken"], endpoint)
|
||||
api_result = await self._makeGraphApiCall(endpoint)
|
||||
|
||||
if "error" in api_result:
|
||||
logger.warning(f"Failed to list folder {folderPath} in site {site_name}: {api_result['error']}")
|
||||
|
|
@ -1745,7 +1732,7 @@ class MethodSharepoint(MethodBase):
|
|||
subfolder_endpoint = f"sites/{site_id}/drive/items/{item['id']}/children"
|
||||
|
||||
logger.debug(f"Getting contents of subfolder: {item['name']}")
|
||||
subfolder_result = await self._makeGraphApiCall(connection["accessToken"], subfolder_endpoint)
|
||||
subfolder_result = await self._makeGraphApiCall(subfolder_endpoint)
|
||||
if "error" not in subfolder_result:
|
||||
subfolder_items = subfolder_result.get("value", [])
|
||||
logger.debug(f"Found {len(subfolder_items)} items in subfolder {item['name']}")
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ logger = logging.getLogger(__name__)
|
|||
# Global methods catalog - moved from serviceCenter
|
||||
methods = {}
|
||||
|
||||
def _discoverMethods(service_center):
|
||||
def _discoverMethods(serviceCenter):
|
||||
"""Dynamically discover all method classes and their actions in modules methods package"""
|
||||
try:
|
||||
# Import the methods package
|
||||
|
|
@ -36,7 +36,7 @@ def _discoverMethods(service_center):
|
|||
issubclass(item, MethodBase) and
|
||||
item != MethodBase):
|
||||
# Instantiate the method
|
||||
methodInstance = item(service_center)
|
||||
methodInstance = item(serviceCenter)
|
||||
|
||||
# Discover actions from public methods
|
||||
actions = {}
|
||||
|
|
@ -83,11 +83,11 @@ def _discoverMethods(service_center):
|
|||
except Exception as e:
|
||||
logger.error(f"Error discovering methods: {str(e)}")
|
||||
|
||||
def getMethodsList(service_center) -> List[str]:
|
||||
def getMethodsList(serviceCenter) -> List[str]:
|
||||
"""Get list of available methods with their signatures in the required format"""
|
||||
# Initialize methods if not already done
|
||||
if not methods:
|
||||
_discoverMethods(service_center)
|
||||
_discoverMethods(serviceCenter)
|
||||
|
||||
methodList = []
|
||||
for methodName, method in methods.items():
|
||||
|
|
@ -99,10 +99,10 @@ def getMethodsList(service_center) -> List[str]:
|
|||
methodList.append(signature)
|
||||
return methodList
|
||||
|
||||
def getEnhancedDocumentContext(service_center) -> str:
|
||||
def getEnhancedDocumentContext(serviceCenter) -> str:
|
||||
"""Get enhanced document context formatted for action planning prompts with proper docList and docItem references"""
|
||||
try:
|
||||
document_list = service_center.getDocumentReferenceList()
|
||||
document_list = serviceCenter.getDocumentReferenceList()
|
||||
|
||||
# Build technical context string for AI action planning
|
||||
context = "AVAILABLE DOCUMENTS:\n\n"
|
||||
|
|
@ -114,7 +114,7 @@ def getEnhancedDocumentContext(service_center) -> str:
|
|||
# Generate docList reference for the exchange (using message ID and label)
|
||||
# Find the message that corresponds to this exchange
|
||||
message_id = None
|
||||
for message in service_center.workflow.messages:
|
||||
for message in serviceCenter.workflow.messages:
|
||||
if hasattr(message, 'documentsLabel') and message.documentsLabel == exchange.documentsLabel:
|
||||
message_id = message.id
|
||||
break
|
||||
|
|
@ -143,7 +143,7 @@ def getEnhancedDocumentContext(service_center) -> str:
|
|||
# Generate docList reference for the exchange (using message ID and label)
|
||||
# Find the message that corresponds to this exchange
|
||||
message_id = None
|
||||
for message in service_center.workflow.messages:
|
||||
for message in serviceCenter.workflow.messages:
|
||||
if hasattr(message, 'documentsLabel') and message.documentsLabel == exchange.documentsLabel:
|
||||
message_id = message.id
|
||||
break
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ class WorkflowManager:
|
|||
currentTime = get_utc_timestamp()
|
||||
|
||||
if workflowId:
|
||||
workflow = self.services.getWorkflow(workflowId)
|
||||
workflow = self.services.workflow.getWorkflow(workflowId)
|
||||
if not workflow:
|
||||
raise ValueError(f"Workflow {workflowId} not found")
|
||||
|
||||
|
|
@ -43,11 +43,11 @@ class WorkflowManager:
|
|||
logger.info(f"Stopping running workflow {workflowId} before processing new prompt")
|
||||
workflow.status = "stopped"
|
||||
workflow.lastActivity = currentTime
|
||||
self.services.updateWorkflow(workflowId, {
|
||||
self.services.workflow.updateWorkflow(workflowId, {
|
||||
"status": "stopped",
|
||||
"lastActivity": currentTime
|
||||
})
|
||||
self.services.createLog({
|
||||
self.services.workflow.createLog({
|
||||
"workflowId": workflowId,
|
||||
"message": "Workflow stopped for new prompt",
|
||||
"type": "info",
|
||||
|
|
@ -57,17 +57,17 @@ class WorkflowManager:
|
|||
await asyncio.sleep(0.1)
|
||||
|
||||
newRound = workflow.currentRound + 1
|
||||
self.services.updateWorkflow(workflowId, {
|
||||
self.services.workflow.updateWorkflow(workflowId, {
|
||||
"status": "running",
|
||||
"lastActivity": currentTime,
|
||||
"currentRound": newRound
|
||||
})
|
||||
|
||||
workflow = self.services.getWorkflow(workflowId)
|
||||
workflow = self.services.workflow.getWorkflow(workflowId)
|
||||
if not workflow:
|
||||
raise ValueError(f"Failed to reload workflow {workflowId} after update")
|
||||
|
||||
self.services.createLog({
|
||||
self.services.workflow.createLog({
|
||||
"workflowId": workflowId,
|
||||
"message": f"Workflow resumed (round {workflow.currentRound})",
|
||||
"type": "info",
|
||||
|
|
@ -85,7 +85,7 @@ class WorkflowManager:
|
|||
"currentAction": 0,
|
||||
"totalTasks": 0,
|
||||
"totalActions": 0,
|
||||
"mandateId": self.services.mandateId,
|
||||
"mandateId": self.services.user.mandateId,
|
||||
"messageIds": [],
|
||||
"workflowMode": workflowMode,
|
||||
"maxSteps": 5 if workflowMode == "React" else 1, # Set maxSteps for React mode
|
||||
|
|
@ -99,12 +99,12 @@ class WorkflowManager:
|
|||
}
|
||||
}
|
||||
|
||||
workflow = self.services.createWorkflow(workflowData)
|
||||
workflow = self.services.workflow.createWorkflow(workflowData)
|
||||
logger.info(f"Created workflow with mode: {getattr(workflow, 'workflowMode', 'NOT_SET')}")
|
||||
logger.info(f"Workflow data passed: {workflowData.get('workflowMode', 'NOT_IN_DATA')}")
|
||||
workflow.currentRound = 1
|
||||
self.services.updateWorkflow(workflow.id, {"currentRound": 1})
|
||||
self.services.updateWorkflowStats(workflow.id, bytesSent=0, bytesReceived=0)
|
||||
self.services.workflow.updateWorkflow(workflow.id, {"currentRound": 1})
|
||||
self.services.workflow.updateWorkflowStats(workflow.id, bytesSent=0, bytesReceived=0)
|
||||
|
||||
# Add workflow to services
|
||||
self.services.workflow = workflow
|
||||
|
|
@ -120,17 +120,17 @@ class WorkflowManager:
|
|||
async def workflowStop(self, workflowId: str) -> ChatWorkflow:
|
||||
"""Stops a running workflow."""
|
||||
try:
|
||||
workflow = self.services.getWorkflow(workflowId)
|
||||
workflow = self.services.workflow.getWorkflow(workflowId)
|
||||
if not workflow:
|
||||
raise ValueError(f"Workflow {workflowId} not found")
|
||||
|
||||
workflow.status = "stopped"
|
||||
workflow.lastActivity = get_utc_timestamp()
|
||||
self.services.updateWorkflow(workflowId, {
|
||||
self.services.workflow.updateWorkflow(workflowId, {
|
||||
"status": "stopped",
|
||||
"lastActivity": workflow.lastActivity
|
||||
})
|
||||
self.services.createLog({
|
||||
self.services.workflow.createLog({
|
||||
"workflowId": workflowId,
|
||||
"message": "Workflow stopped",
|
||||
"type": "warning",
|
||||
|
|
@ -192,7 +192,7 @@ class WorkflowManager:
|
|||
}
|
||||
|
||||
# Create message first to get messageId
|
||||
message = self.services.createMessage(messageData)
|
||||
message = self.services.workflow.createMessage(messageData)
|
||||
if message:
|
||||
workflow.messages.append(message)
|
||||
|
||||
|
|
@ -205,7 +205,7 @@ class WorkflowManager:
|
|||
documents = await self._processFileIds(userInput.listFileId, message.id)
|
||||
message.documents = documents
|
||||
# Update the message with documents in database
|
||||
self.services.updateMessage(message.id, {"documents": [doc.to_dict() for doc in documents]})
|
||||
self.services.workflow.updateMessage(message.id, {"documents": [doc.to_dict() for doc in documents]})
|
||||
|
||||
return message
|
||||
else:
|
||||
|
|
@ -307,14 +307,14 @@ class WorkflowManager:
|
|||
"taskProgress": "stopped",
|
||||
"actionProgress": "stopped"
|
||||
}
|
||||
message = self.services.createMessage(stopped_message)
|
||||
message = self.services.workflow.createMessage(stopped_message)
|
||||
if message:
|
||||
workflow.messages.append(message)
|
||||
|
||||
# Update workflow status to stopped
|
||||
workflow.status = "stopped"
|
||||
workflow.lastActivity = get_utc_timestamp()
|
||||
self.services.updateWorkflow(workflow.id, {
|
||||
self.services.workflow.updateWorkflow(workflow.id, {
|
||||
"status": "stopped",
|
||||
"lastActivity": workflow.lastActivity
|
||||
})
|
||||
|
|
@ -339,14 +339,14 @@ class WorkflowManager:
|
|||
"taskProgress": "stopped",
|
||||
"actionProgress": "stopped"
|
||||
}
|
||||
message = self.services.createMessage(stopped_message)
|
||||
message = self.services.workflow.createMessage(stopped_message)
|
||||
if message:
|
||||
workflow.messages.append(message)
|
||||
|
||||
# Update workflow status to stopped
|
||||
workflow.status = "stopped"
|
||||
workflow.lastActivity = get_utc_timestamp()
|
||||
self.services.updateWorkflow(workflow.id, {
|
||||
self.services.workflow.updateWorkflow(workflow.id, {
|
||||
"status": "stopped",
|
||||
"lastActivity": workflow.lastActivity,
|
||||
"totalTasks": workflow.totalTasks,
|
||||
|
|
@ -354,7 +354,7 @@ class WorkflowManager:
|
|||
})
|
||||
|
||||
# Add stopped log entry
|
||||
self.services.createLog({
|
||||
self.services.workflow.createLog({
|
||||
"workflowId": workflow.id,
|
||||
"message": "Workflow stopped by user",
|
||||
"type": "warning",
|
||||
|
|
@ -381,14 +381,14 @@ class WorkflowManager:
|
|||
"taskProgress": "fail",
|
||||
"actionProgress": "fail"
|
||||
}
|
||||
message = self.services.createMessage(error_message)
|
||||
message = self.services.workflow.createMessage(error_message)
|
||||
if message:
|
||||
workflow.messages.append(message)
|
||||
|
||||
# Update workflow status to failed
|
||||
workflow.status = "failed"
|
||||
workflow.lastActivity = get_utc_timestamp()
|
||||
self.services.updateWorkflow(workflow.id, {
|
||||
self.services.workflow.updateWorkflow(workflow.id, {
|
||||
"status": "failed",
|
||||
"lastActivity": workflow.lastActivity,
|
||||
"totalTasks": workflow.totalTasks,
|
||||
|
|
@ -396,7 +396,7 @@ class WorkflowManager:
|
|||
})
|
||||
|
||||
# Add failed log entry
|
||||
self.services.createLog({
|
||||
self.services.workflow.createLog({
|
||||
"workflowId": workflow.id,
|
||||
"message": f"Workflow failed: {workflow_result.error or 'Unknown error'}",
|
||||
"type": "error",
|
||||
|
|
@ -428,14 +428,14 @@ class WorkflowManager:
|
|||
"taskProgress": "fail",
|
||||
"actionProgress": "fail"
|
||||
}
|
||||
message = self.services.createMessage(error_message)
|
||||
message = self.services.workflow.createMessage(error_message)
|
||||
if message:
|
||||
workflow.messages.append(message)
|
||||
|
||||
# Update workflow status to failed
|
||||
workflow.status = "failed"
|
||||
workflow.lastActivity = get_utc_timestamp()
|
||||
self.services.updateWorkflow(workflow.id, {
|
||||
self.services.workflow.updateWorkflow(workflow.id, {
|
||||
"status": "failed",
|
||||
"lastActivity": workflow.lastActivity,
|
||||
"totalTasks": workflow.totalTasks,
|
||||
|
|
@ -473,7 +473,7 @@ class WorkflowManager:
|
|||
}
|
||||
|
||||
# Create message using interface
|
||||
message = self.services.createMessage(messageData)
|
||||
message = self.services.workflow.createMessage(messageData)
|
||||
if message:
|
||||
workflow.messages.append(message)
|
||||
|
||||
|
|
@ -482,13 +482,13 @@ class WorkflowManager:
|
|||
workflow.lastActivity = get_utc_timestamp()
|
||||
|
||||
# Update workflow in database
|
||||
self.services.updateWorkflow(workflow.id, {
|
||||
self.services.workflow.updateWorkflow(workflow.id, {
|
||||
"status": "completed",
|
||||
"lastActivity": workflow.lastActivity
|
||||
})
|
||||
|
||||
# Add completion log entry
|
||||
self.services.createLog({
|
||||
self.services.workflow.createLog({
|
||||
"workflowId": workflow.id,
|
||||
"message": "Workflow completed",
|
||||
"type": "success",
|
||||
|
|
@ -534,7 +534,7 @@ class WorkflowManager:
|
|||
# Update workflow status to stopped
|
||||
workflow.status = "stopped"
|
||||
workflow.lastActivity = get_utc_timestamp()
|
||||
self.services.updateWorkflow(workflow.id, {
|
||||
self.services.workflow.updateWorkflow(workflow.id, {
|
||||
"status": "stopped",
|
||||
"lastActivity": workflow.lastActivity,
|
||||
"totalTasks": workflow.totalTasks,
|
||||
|
|
@ -559,12 +559,12 @@ class WorkflowManager:
|
|||
"taskProgress": "pending",
|
||||
"actionProgress": "pending"
|
||||
}
|
||||
message = self.services.createMessage(stopped_message)
|
||||
message = self.services.workflow.createMessage(stopped_message)
|
||||
if message:
|
||||
workflow.messages.append(message)
|
||||
|
||||
# Add log entry
|
||||
self.services.createLog({
|
||||
self.services.workflow.createLog({
|
||||
"workflowId": workflow.id,
|
||||
"message": "Workflow stopped by user",
|
||||
"type": "warning",
|
||||
|
|
@ -579,7 +579,7 @@ class WorkflowManager:
|
|||
# Update workflow status to failed
|
||||
workflow.status = "failed"
|
||||
workflow.lastActivity = get_utc_timestamp()
|
||||
self.services.updateWorkflow(workflow.id, {
|
||||
self.services.workflow.updateWorkflow(workflow.id, {
|
||||
"status": "failed",
|
||||
"lastActivity": workflow.lastActivity,
|
||||
"totalTasks": workflow.totalTasks,
|
||||
|
|
@ -604,12 +604,12 @@ class WorkflowManager:
|
|||
"taskProgress": "fail",
|
||||
"actionProgress": "fail"
|
||||
}
|
||||
message = self.services.createMessage(error_message)
|
||||
message = self.services.workflow.createMessage(error_message)
|
||||
if message:
|
||||
workflow.messages.append(message)
|
||||
|
||||
# Add error log entry
|
||||
self.services.createLog({
|
||||
self.services.workflow.createLog({
|
||||
"workflowId": workflow.id,
|
||||
"message": f"Workflow failed: {str(error)}",
|
||||
"type": "error",
|
||||
|
|
|
|||
Loading…
Reference in a new issue