""" Delta Group Sync Manager This module handles the synchronization of tickets to SharePoint using the new Graph API-based connector architecture. """ import logging import os import io import pandas as pd import csv as csv_module from io import StringIO, BytesIO from datetime import datetime, UTC from modules.services import getInterface as getServices logger = logging.getLogger(__name__) class ManagerSyncDelta: """Manages Tickets to SharePoint synchronization for Delta Group. Supports two sync modes: - CSV mode: Uses CSV files for synchronization (default) - Excel mode: Uses Excel (.xlsx) files for synchronization To change sync mode, use the setSyncMode() method or modify SYNC_MODE class variable. """ SHAREPOINT_SITE_NAME = "SteeringBPM" SHAREPOINT_SITE_PATH = "SteeringBPM" SHAREPOINT_HOSTNAME = "deltasecurityag.sharepoint.com" SHAREPOINT_MAIN_FOLDER = "/General/50 Docs hosted by SELISE" SHAREPOINT_BACKUP_FOLDER = "/General/50 Docs hosted by SELISE/SyncHistory" SHAREPOINT_AUDIT_FOLDER = "/General/50 Docs hosted by SELISE/SyncHistory" SHAREPOINT_USER_ID = "patrick.motsch@delta.ch" SYNC_MODE = "xlsx" # Can be "csv" or "xlsx" # File names for different sync modes SYNC_FILE_CSV = "DELTAgroup x SELISE Ticket Exchange List.csv" SYNC_FILE_XLSX = "DELTAgroup x SELISE Ticket Exchange List.xlsx" # Tickets connection parameters JIRA_USERNAME = "p.motsch@valueon.ch" JIRA_API_TOKEN = "" # Will be set in __init__ JIRA_URL = "https://deltasecurity.atlassian.net" JIRA_PROJECT_CODE = "DCS" JIRA_ISSUE_TYPE = "Task" # Task sync definition for field mapping TASK_SYNC_DEFINITION={ #key=excel-header, [get:ticket>excel | put: excel>ticket, tickets-xml-field-list] 'ID': ['get', ['key']], 'Module Category': ['get', ['fields', 'customfield_10058', 'value']], 'Summary': ['get', ['fields', 'summary']], 'Description': ['get', ['fields', 'description']], # ADF format - needs conversion to text 'References': ['get', ['fields', 'customfield_10066']], # Field exists, may be None 'Priority': ['get', ['fields', 'priority', 'name']], 'Issue Status': ['get', ['fields', 'status', 'name']], 'Assignee': ['get', ['fields', 'assignee', 'displayName']], 'Issue Created': ['get', ['fields', 'created']], 'Due Date': ['get', ['fields', 'duedate']], # Field exists, may be None 'DELTA Comments': ['get', ['fields', 'customfield_10167']], # Field exists, may be None 'SELISE Ticket References': ['put', ['fields', 'customfield_10067']], 'SELISE Status Values': ['put', ['fields', 'customfield_10065']], 'SELISE Comments': ['put', ['fields', 'customfield_10168']], } def __init__(self, eventUser=None): self.targetSite = None self.services = None self.sharepointConnection = None self.eventUser = eventUser self.sync_audit_log = [] # Store audit log entries in memory try: if not eventUser: logger.error("Event user not found - SharePoint connection required") self._logAuditEvent("SYNC_INIT", "FAILED", "Event user not found") else: self.services = getServices(eventUser, None) # Read config values using services self.APP_ENV_TYPE = self.services.utils.configGet("APP_ENV_TYPE", "dev") self.JIRA_API_TOKEN = self.services.utils.configGet("Feature_SyncDelta_JIRA_DELTA_TOKEN_SECRET", "") # Resolve SharePoint connection for the configured user id self.sharepointConnection = self.services.chat.getUserConnectionByExternalUsername("msft", self.SHAREPOINT_USER_ID) if not self.sharepointConnection: logger.error( f"No SharePoint connection found for user: {self.SHAREPOINT_USER_ID}" ) self._logAuditEvent("SYNC_INIT", "FAILED", f"No SharePoint connection for user: {self.SHAREPOINT_USER_ID}") else: # Configure SharePoint service token and set connector reference if not self.services.sharepoint.setAccessTokenFromConnection( self.sharepointConnection ): logger.error("Failed to set SharePoint token from UserConnection") self._logAuditEvent("SYNC_INIT", "FAILED", "Failed to set SharePoint token") else: logger.info( f"SharePoint token configured for connection: {self.sharepointConnection.id}" ) self._logAuditEvent("SYNC_INIT", "SUCCESS", f"SharePoint token configured for connection: {self.sharepointConnection.id}") except Exception as e: logger.error(f"Initialization error in ManagerSyncDelta.__init__: {e}") self._logAuditEvent("SYNC_INIT", "ERROR", f"Initialization error: {str(e)}") def _logAuditEvent(self, action: str, status: str, details: str): """Log audit events for sync operations to memory.""" try: timestamp = datetime.fromtimestamp(self.services.utils.timestampGetUtc(), UTC).strftime("%Y-%m-%d %H:%M:%S UTC") userId = str(self.eventUser.id) if self.eventUser else "system" logEntry = f"{timestamp} | {userId} | {action} | {status} | {details}" self.sync_audit_log.append(logEntry) logger.info(f"Sync Audit: {logEntry}") except Exception as e: logger.warning(f"Failed to log audit event: {str(e)}") def _logSyncChanges(self, mergeDetails: dict, syncMode: str): """Log detailed field changes for sync operations.""" try: # Log summary statistics summary = f"Sync {syncMode} - Updated: {mergeDetails['updated']}, Added: {mergeDetails['added']}, Unchanged: {mergeDetails['unchanged']}" self._logAuditEvent("SYNC_CHANGES_SUMMARY", "INFO", summary) # Log individual field changes (limit to first 10 to avoid spam) for change in mergeDetails['changes'][:10]: # Truncate very long changes to avoid logging issues if len(change) > 500: change = change[:500] + "... [truncated]" self._logAuditEvent("SYNC_FIELD_CHANGE", "INFO", f"{syncMode}: {change}") # Log count if there were more changes if len(mergeDetails['changes']) > 10: self._logAuditEvent("SYNC_FIELD_CHANGE", "INFO", f"{syncMode}: ... and {len(mergeDetails['changes']) - 10} more changes") except Exception as e: logger.warning(f"Failed to log sync changes: {str(e)}") async def _saveAuditLogToSharepoint(self): """Save the sync audit log to SharePoint.""" try: if not self.sync_audit_log or not self.targetSite: return False # Generate log filename with current timestamp timestamp = datetime.fromtimestamp(self.services.utils.timestampGetUtc(), UTC).strftime("%Y%m%d_%H%M%S") log_filename = f"log_{timestamp}.log" # Create log content log_content = "\n".join(self.sync_audit_log) log_bytes = log_content.encode('utf-8') # Upload to SharePoint audit folder await self.services.sharepoint.uploadFile( siteId=self.targetSite['id'], folderPath=self.SHAREPOINT_AUDIT_FOLDER, fileName=log_filename, content=log_bytes ) logger.info(f"Sync audit log saved to SharePoint: {log_filename}") self._logAuditEvent("AUDIT_LOG_SAVE", "SUCCESS", f"Audit log saved to SharePoint: {log_filename}") return True except Exception as e: logger.error(f"Failed to save audit log to SharePoint: {str(e)}") self._logAuditEvent("AUDIT_LOG_SAVE", "FAILED", f"Failed to save audit log: {str(e)}") return False def getSyncFileName(self) -> str: """Get the appropriate sync file name based on the sync mode.""" if self.SYNC_MODE == "xlsx": return self.SYNC_FILE_XLSX else: # Default to CSV return self.SYNC_FILE_CSV def setSyncMode(self, mode: str) -> bool: """Set the sync mode to either 'csv' or 'xlsx'. Args: mode: Either 'csv' or 'xlsx' Returns: bool: True if mode was set successfully, False if invalid mode """ if mode.lower() in ["csv", "xlsx"]: self.SYNC_MODE = mode.lower() logger.info(f"Sync mode changed to: {self.SYNC_MODE}") return True else: logger.error(f"Invalid sync mode: {mode}. Must be 'csv' or 'xlsx'") return False async def initializeInterface(self) -> bool: """Initialize SharePoint connector; tickets connector is created by interface on demand.""" try: # Validate init-prepared members if not self.services or not self.sharepointConnection or not self.services.sharepoint: logger.error("Service or SharePoint connection not initialized") return False # Resolve the site by hostname + site path to get the real site ID logger.info( f"Resolving site ID via hostname+path: {self.SHAREPOINT_HOSTNAME}:/sites/{self.SHAREPOINT_SITE_PATH}" ) resolved = await self.services.sharepoint.findSiteByUrl( hostname=self.SHAREPOINT_HOSTNAME, sitePath=self.SHAREPOINT_SITE_PATH ) if not resolved: logger.error( f"Failed to resolve site. Hostname: {self.SHAREPOINT_HOSTNAME}, Path: {self.SHAREPOINT_SITE_PATH}" ) return False self.targetSite = { "id": resolved.get("id"), "displayName": resolved.get("displayName", self.SHAREPOINT_SITE_NAME), "name": resolved.get("name", self.SHAREPOINT_SITE_NAME) } # Test site access by listing root of the drive logger.info("Testing site access using resolved site ID...") test_result = await self.services.sharepoint.listFolderContents( siteId=self.targetSite["id"], folderPath="" ) if test_result is not None: logger.info( f"Site access confirmed: {self.targetSite['displayName']} (ID: {self.targetSite['id']})" ) else: logger.error("Could not access site drive - check permissions") return False return True except Exception as e: logger.error(f"Error initializing connectors: {str(e)}") return False async def syncTicketsOverSharepoint(self) -> bool: """Perform Tickets to SharePoint synchronization using list-based interface and local CSV/XLSX handling.""" try: logger.info(f"Starting JIRA to SharePoint synchronization (Mode: {self.SYNC_MODE})") self._logAuditEvent("SYNC_START", "INFO", f"Starting JIRA to SharePoint sync (Mode: {self.SYNC_MODE})") # Initialize interface if not await self.initializeInterface(): logger.error("Failed to initialize connectors") self._logAuditEvent("SYNC_INTERFACE", "FAILED", "Failed to initialize connectors") return False # Dump current Jira fields to text file for reference try: pass # await dump_jira_fields_to_file() except Exception as e: logger.warning(f"Failed to dump JIRA fields (non-blocking): {str(e)}") # Dump actual JIRA data for debugging try: pass # await dump_jira_data_to_file() except Exception as e: logger.warning(f"Failed to dump JIRA data (non-blocking): {str(e)}") # Get the appropriate sync file name based on mode sync_file_name = self.getSyncFileName() logger.info(f"Using sync file: {sync_file_name}") # Create list-based ticket interface (initialize connector by type) sync_interface = await self.services.ticket.connectTicket( taskSyncDefinition=self.TASK_SYNC_DEFINITION, connectorType="Jira", connectorParams={ "apiUsername": self.JIRA_USERNAME, "apiToken": self.JIRA_API_TOKEN, "apiUrl": self.JIRA_URL, "projectCode": self.JIRA_PROJECT_CODE, "ticketType": self.JIRA_ISSUE_TYPE, }, ) # Perform the sophisticated sync based on mode if self.SYNC_MODE == "xlsx": # Export tickets to list data_list = await sync_interface.exportTicketsAsList() self._logAuditEvent("SYNC_EXPORT", "INFO", f"Exported {len(data_list)} tickets from JIRA") # Read existing Excel headers/content existing_data = [] existing_headers = {"header1": "Header 1", "header2": "Header 2"} try: file_path = f"{self.SHAREPOINT_MAIN_FOLDER}/{sync_file_name}" excel_content = await self.services.sharepoint.downloadFileByPath( siteId=self.targetSite['id'], filePath=file_path ) existing_data, existing_headers = self.parseExcelContent(excel_content) except Exception: pass # Merge and write merged_data, merge_details = self.mergeJiraWithExistingDetailed(data_list, existing_data) # Log detailed changes for Excel mode self._logSyncChanges(merge_details, "EXCEL") await self.backupSharepointFile(filename=sync_file_name) excel_bytes = self.createExcelContent(merged_data, existing_headers) await self.services.sharepoint.uploadFile( siteId=self.targetSite['id'], folderPath=self.SHAREPOINT_MAIN_FOLDER, fileName=sync_file_name, content=excel_bytes, ) # Import back to tickets try: excel_content = await self.services.sharepoint.downloadFileByPath( siteId=self.targetSite['id'], filePath=file_path ) excel_rows, _ = self.parseExcelContent(excel_content) self._logAuditEvent("SYNC_IMPORT", "INFO", f"Importing {len(excel_rows)} Excel rows back to tickets") except Exception as e: excel_rows = [] self._logAuditEvent("SYNC_IMPORT", "WARNING", f"Failed to download Excel for import: {str(e)}") await sync_interface.importListToTickets(excel_rows) else: # CSV mode (default) # Export tickets to list data_list = await sync_interface.exportTicketsAsList() self._logAuditEvent("SYNC_EXPORT", "INFO", f"Exported {len(data_list)} tickets from JIRA") # Prepare headers by reading existing CSV if present existing_headers = {"header1": "Header 1", "header2": "Header 2"} existing_data: list[dict] = [] try: file_path = f"{self.SHAREPOINT_MAIN_FOLDER}/{sync_file_name}" csv_content = await self.services.sharepoint.downloadFileByPath( siteId=self.targetSite['id'], filePath=file_path ) csv_lines = csv_content.decode('utf-8').split('\n') if len(csv_lines) >= 2: existing_headers["header1"] = csv_lines[0].rstrip('\r\n') existing_headers["header2"] = csv_lines[1].rstrip('\r\n') # Parse existing CSV rows after the two header lines df_existing = pd.read_csv(io.BytesIO(csv_content), skiprows=2, quoting=1, escapechar='\\', on_bad_lines='skip', engine='python') existing_data = df_existing.to_dict('records') except Exception: pass await self.backupSharepointFile(filename=sync_file_name) merged_data, _ = self.mergeJiraWithExistingDetailed(data_list, existing_data) csv_bytes = self.createCsvContent(merged_data, existing_headers) await self.services.sharepoint.uploadFile( siteId=self.targetSite['id'], folderPath=self.SHAREPOINT_MAIN_FOLDER, fileName=sync_file_name, content=csv_bytes, ) # Import from CSV try: csv_content = await self.services.sharepoint.downloadFileByPath( siteId=self.targetSite['id'], filePath=file_path ) df = pd.read_csv(io.BytesIO(csv_content), skiprows=2, quoting=1, escapechar='\\', on_bad_lines='skip', engine='python') csv_rows = df.to_dict('records') self._logAuditEvent("SYNC_IMPORT", "INFO", f"Importing {len(csv_rows)} CSV rows back to tickets") except Exception as e: csv_rows = [] self._logAuditEvent("SYNC_IMPORT", "WARNING", f"Failed to download CSV for import: {str(e)}") await sync_interface.importListToTickets(csv_rows) logger.info(f"JIRA to SharePoint synchronization completed successfully (Mode: {self.SYNC_MODE})") self._logAuditEvent("SYNC_COMPLETE", "SUCCESS", f"JIRA to SharePoint sync completed successfully (Mode: {self.SYNC_MODE})") # Save audit log to SharePoint await self._saveAuditLogToSharepoint() return True except Exception as e: logger.error(f"Error during JIRA to SharePoint synchronization: {str(e)}") self._logAuditEvent("SYNC_ERROR", "FAILED", f"Error during sync: {str(e)}") # Save audit log to SharePoint even on error await self._saveAuditLogToSharepoint() return False async def backupSharepointFile(self, *, filename: str) -> bool: try: timestamp = datetime.fromtimestamp(self.services.utils.timestampGetUtc(), UTC).strftime("%Y%m%d_%H%M%S") backup_filename = f"backup_{timestamp}_{filename}" await self.services.sharepoint.copyFileAsync( siteId=self.targetSite['id'], sourceFolder=self.SHAREPOINT_MAIN_FOLDER, sourceFile=filename, destFolder=self.SHAREPOINT_BACKUP_FOLDER, destFile=backup_filename, ) self._logAuditEvent("SYNC_BACKUP", "SUCCESS", f"Backed up file: {filename} -> {backup_filename}") return True except Exception as e: if "itemNotFound" in str(e) or "404" in str(e): self._logAuditEvent("SYNC_BACKUP", "SKIPPED", f"File not found for backup: {filename}") return True logger.warning(f"Backup failed: {e}") self._logAuditEvent("SYNC_BACKUP", "FAILED", f"Backup failed for {filename}: {str(e)}") return False def mergeJiraWithExistingDetailed(self, jira_data: list[dict], existing_data: list[dict]) -> tuple[list[dict], dict]: existing_lookup = {row.get("ID"): row for row in existing_data if row.get("ID")} merged_data: list[dict] = [] changes: list[str] = [] updated_count = added_count = unchanged_count = 0 for jira_row in jira_data: jira_id = jira_row.get("ID") if jira_id and jira_id in existing_lookup: existing_row = existing_lookup[jira_id].copy() row_changes: list[str] = [] for field_name, field_config in self.TASK_SYNC_DEFINITION.items(): if field_config[0] == 'get': old_value = "" if existing_row.get(field_name) is None else str(existing_row.get(field_name)) new_value = "" if jira_row.get(field_name) is None else str(jira_row.get(field_name)) # Convert ADF data to readable text for logging if isinstance(new_value, dict) and new_value.get("type") == "doc": new_value_readable = self.convertAdfToText(new_value) if old_value != new_value_readable: row_changes.append(f"{field_name}: '{old_value[:100]}...' -> '{new_value_readable[:100]}...'") elif old_value != new_value: # Truncate long values for logging old_truncated = old_value[:100] + "..." if len(old_value) > 100 else old_value new_truncated = new_value[:100] + "..." if len(new_value) > 100 else new_value row_changes.append(f"{field_name}: '{old_truncated}' -> '{new_truncated}'") existing_row[field_name] = jira_row.get(field_name) merged_data.append(existing_row) if row_changes: updated_count += 1 changes.append(f"Row ID {jira_id} updated: {', '.join(row_changes)}") else: unchanged_count += 1 del existing_lookup[jira_id] else: merged_data.append(jira_row) added_count += 1 changes.append(f"Row ID {jira_id} added as new record") for remaining in existing_lookup.values(): merged_data.append(remaining) unchanged_count += 1 details = {"updated": updated_count, "added": added_count, "unchanged": unchanged_count, "changes": changes} return merged_data, details def createCsvContent(self, data: list[dict], existing_headers: dict | None = None) -> bytes: timestamp = datetime.fromtimestamp(self.services.utils.timestampGetUtc(), UTC).strftime("%Y-%m-%d %H:%M:%S UTC") if existing_headers is None: existing_headers = {"header1": "Header 1", "header2": "Header 2"} if not data: cols = list(self.TASK_SYNC_DEFINITION.keys()) df = pd.DataFrame(columns=cols) else: df = pd.DataFrame(data) for column in df.columns: df[column] = df[column].astype("object").fillna("") df[column] = df[column].astype(str).str.replace('\n', '\\n', regex=False).str.replace('"', '""', regex=False) header1_row = next(csv_module.reader([existing_headers.get("header1", "Header 1")]), []) header2_row = next(csv_module.reader([existing_headers.get("header2", "Header 2")]), []) if len(header2_row) > 1: header2_row[1] = timestamp header_row1 = pd.DataFrame([header1_row + [""] * (len(df.columns) - len(header1_row))], columns=df.columns) header_row2 = pd.DataFrame([header2_row + [""] * (len(df.columns) - len(header2_row))], columns=df.columns) table_headers = pd.DataFrame([df.columns.tolist()], columns=df.columns) final_df = pd.concat([header_row1, header_row2, table_headers, df], ignore_index=True) out = StringIO() final_df.to_csv(out, index=False, header=False, quoting=1, escapechar='\\') return out.getvalue().encode('utf-8') def createExcelContent(self, data: list[dict], existing_headers: dict | None = None) -> bytes: timestamp = datetime.fromtimestamp(self.services.utils.timestampGetUtc(), UTC).strftime("%Y-%m-%d %H:%M:%S UTC") if existing_headers is None: existing_headers = {"header1": "Header 1", "header2": "Header 2"} if not data: cols = list(self.TASK_SYNC_DEFINITION.keys()) df = pd.DataFrame(columns=cols) else: df = pd.DataFrame(data) for column in df.columns: df[column] = df[column].astype("object").fillna("") df[column] = df[column].astype(str).str.replace('\n', '\\n', regex=False).str.replace('"', '""', regex=False) header1_row = next(csv_module.reader([existing_headers.get("header1", "Header 1")]), []) header2_row = next(csv_module.reader([existing_headers.get("header2", "Header 2")]), []) if len(header2_row) > 1: header2_row[1] = timestamp header_row1 = pd.DataFrame([header1_row + [""] * (len(df.columns) - len(header1_row))], columns=df.columns) header_row2 = pd.DataFrame([header2_row + [""] * (len(df.columns) - len(header2_row))], columns=df.columns) table_headers = pd.DataFrame([df.columns.tolist()], columns=df.columns) final_df = pd.concat([header_row1, header_row2, table_headers, df], ignore_index=True) buf = BytesIO() final_df.to_excel(buf, index=False, header=False, engine='openpyxl') return buf.getvalue() def parseExcelContent(self, excel_content: bytes) -> tuple[list[dict], dict]: df = pd.read_excel(BytesIO(excel_content), engine='openpyxl', header=None) header_row1 = df.iloc[0:1].copy() header_row2 = df.iloc[1:2].copy() table_headers = df.iloc[2:3].copy() df_data = df.iloc[3:].copy() df_data.columns = table_headers.iloc[0] df_data = df_data.reset_index(drop=True) for column in df_data.columns: df_data[column] = df_data[column].astype('object').fillna('') data = df_data.to_dict(orient='records') headers = { "header1": ",".join([str(x) if pd.notna(x) else "" for x in header_row1.iloc[0].tolist()]), "header2": ",".join([str(x) if pd.notna(x) else "" for x in header_row2.iloc[0].tolist()]), } return data, headers def convertAdfToText(self, adf_data): """Convert Atlassian Document Format (ADF) to plain text. Based on Atlassian Document Format specification for JIRA fields. Handles paragraphs, lists, text formatting, and other ADF node types. Args: adf_data: ADF object or None Returns: str: Plain text content, or empty string if None/invalid """ if not adf_data or not isinstance(adf_data, dict): return "" if adf_data.get("type") != "doc": return str(adf_data) if adf_data else "" content = adf_data.get("content", []) if not isinstance(content, list): return "" def extractTextFromContent(contentList, listLevel=0): """Recursively extract text from ADF content with proper formatting.""" textParts = [] listCounter = 1 for item in contentList: if not isinstance(item, dict): continue itemType = item.get("type", "") if itemType == "text": # Extract text content, preserving formatting text = item.get("text", "") marks = item.get("marks", []) # Handle text formatting (bold, italic, etc.) if marks: for mark in marks: if mark.get("type") == "strong": text = f"**{text}**" elif mark.get("type") == "em": text = f"*{text}*" elif mark.get("type") == "code": text = f"`{text}`" elif mark.get("type") == "link": attrs = mark.get("attrs", {}) href = attrs.get("href", "") if href: text = f"[{text}]({href})" textParts.append(text) elif itemType == "hardBreak": textParts.append("\n") elif itemType == "paragraph": paragraphContent = item.get("content", []) if paragraphContent: paragraphText = extractTextFromContent(paragraphContent, listLevel) if paragraphText.strip(): textParts.append(paragraphText) elif itemType == "bulletList": listContent = item.get("content", []) for listItem in listContent: if listItem.get("type") == "listItem": listItemContent = listItem.get("content", []) for listParagraph in listItemContent: if listParagraph.get("type") == "paragraph": listParagraphContent = listParagraph.get("content", []) if listParagraphContent: indent = " " * listLevel bulletText = extractTextFromContent(listParagraphContent, listLevel + 1) if bulletText.strip(): textParts.append(f"{indent}• {bulletText}") elif itemType == "orderedList": listContent = item.get("content", []) for listItem in listContent: if listItem.get("type") == "listItem": listItemContent = listItem.get("content", []) for listParagraph in listItemContent: if listParagraph.get("type") == "paragraph": listParagraphContent = listParagraph.get("content", []) if listParagraphContent: indent = " " * listLevel orderedText = extractTextFromContent(listParagraphContent, listLevel + 1) if orderedText.strip(): textParts.append(f"{indent}{listCounter}. {orderedText}") listCounter += 1 elif itemType == "listItem": # Handle nested list items listItemContent = item.get("content", []) if listItemContent: textParts.append(extractTextFromContent(listItemContent, listLevel)) elif itemType == "embedCard": # Handle embedded content (videos, etc.) attrs = item.get("attrs", {}) url = attrs.get("url", "") if url: textParts.append(f"[Embedded Content: {url}]") elif itemType == "codeBlock": # Handle code blocks codeContent = item.get("content", []) if codeContent: codeText = extractTextFromContent(codeContent, listLevel) if codeText.strip(): textParts.append(f"```\n{codeText}\n```") elif itemType == "blockquote": # Handle blockquotes quoteContent = item.get("content", []) if quoteContent: quoteText = extractTextFromContent(quoteContent, listLevel) if quoteText.strip(): textParts.append(f"> {quoteText}") elif itemType == "heading": # Handle headings headingContent = item.get("content", []) if headingContent: headingText = extractTextFromContent(headingContent, listLevel) if headingText.strip(): level = item.get("attrs", {}).get("level", 1) textParts.append(f"{'#' * level} {headingText}") elif itemType == "rule": # Handle horizontal rules textParts.append("---") else: # Handle unknown types by trying to extract content if "content" in item: contentText = extractTextFromContent(item.get("content", []), listLevel) if contentText.strip(): textParts.append(contentText) return "\n".join(textParts) result = extractTextFromContent(content) return result.strip() # Utility: dump all ticket fields (name -> field id) to a text file (generic) async def dumpTicketFieldsToFile(self, *, filepath: str = "ticket_sync_fields.txt", connectorType: str = "Jira", connectorParams: dict | None = None, taskSyncDefinition: dict | None = None, ) -> bool: """Write available ticket fields (name -> field id) to a text file (generic).""" try: connectorParams = connectorParams or {} taskSyncDefinition = taskSyncDefinition or self.TASK_SYNC_DEFINITION ticket_interface = await self.services.ticket.connectTicket( taskSyncDefinition=taskSyncDefinition, connectorType=connectorType, connectorParams=connectorParams, ) attributes = await ticket_interface.connector_ticket.readAttributes() if not attributes: logger.warning("No ticket attributes returned; nothing to write.") return False dir_name = os.path.dirname(filepath) if dir_name: os.makedirs(dir_name, exist_ok=True) with open(filepath, "w", encoding="utf-8") as f: for attr in attributes: f.write(f"'{attr.field_name}': ['get', ['fields', '{attr.field}']]\n") logger.info(f"Wrote {len(attributes)} ticket fields to {filepath}") return True except Exception as e: logger.error(f"Failed to dump ticket fields: {str(e)}") return False # Utility: dump actual ticket data for debugging (generic) async def dumpTicketDataToFile(self, *, filepath: str = "ticket_sync_data.txt", connectorType: str = "Jira", connectorParams: dict | None = None, taskSyncDefinition: dict | None = None, sampleLimit: int = 5, ) -> bool: """Write actual ticket data to a text file for debugging field mapping (generic).""" try: connectorParams = connectorParams or {} taskSyncDefinition = taskSyncDefinition or self.TASK_SYNC_DEFINITION ticket_interface = await self.services.ticket.connectTicket( taskSyncDefinition=taskSyncDefinition, connectorType=connectorType, connectorParams=connectorParams, ) tickets = await ticket_interface.connector_ticket.readTasks(limit=sampleLimit) if not tickets: logger.warning("No tickets returned; nothing to write.") return False dir_name = os.path.dirname(filepath) if dir_name: os.makedirs(dir_name, exist_ok=True) with open(filepath, "w", encoding="utf-8") as f: f.write("=== TICKET DATA DEBUG ===\n\n") for i, ticket in enumerate(tickets): f.write(f"--- TICKET {i+1} ---\n") f.write("Raw ticket data:\n") f.write(f"{ticket.data}\n\n") f.write("Field mapping analysis:\n") for fieldName, fieldPath in taskSyncDefinition.items(): if fieldPath[0] == 'get': try: value = ticket.data for key in fieldPath[1]: if isinstance(value, dict) and key in value: value = value[key] else: value = f"KEY_NOT_FOUND: {key}" break if isinstance(value, dict) and value.get("type") == "doc": pass # value = self.convertAdfToText(value) elif value is None: value = "" f.write(f" {fieldName}: {value}\n") except Exception as e: f.write(f" {fieldName}: ERROR - {str(e)}\n") f.write("\n" + "="*50 + "\n\n") logger.info(f"Wrote ticket data for {len(tickets)} tickets to {filepath}") return True except Exception as e: logger.error(f"Failed to dump ticket data: {str(e)}") return False # Main part of the module async def performSync(eventUser) -> bool: """Perform tickets to SharePoint synchronization This function is called by the scheduler and can be used independently. Args: eventUser: Event user to use for synchronization Returns: bool: True if synchronization was successful, False otherwise """ try: logger.info("Starting DG tickets sync...") if not eventUser: logger.error("Event user not provided - cannot perform sync") return False # Sync audit logging is handled by ManagerSyncDelta instance syncManager = ManagerSyncDelta(eventUser) success = await syncManager.syncTicketsOverSharepoint() if success: logger.info("DG tickets sync completed successfully") else: logger.error("DG tickets sync failed") return success except Exception as e: logger.error(f"Error in performing DG tickets sync: {str(e)}") return False # Create a global instance of ManagerSyncDelta to use for scheduled runs _sync_manager = None def startSyncManager(eventUser): """Initialize the global sync manager with the eventUser.""" global _sync_manager if _sync_manager is None: _sync_manager = ManagerSyncDelta(eventUser) logger.info("Global sync manager initialized with eventUser") try: # Register scheduled job based on environment using the manager's services if _sync_manager.APP_ENV_TYPE == "prod": _sync_manager.services.utils.eventRegisterCron( job_id="syncDelta.syncTicket", func=scheduledSync, cron_kwargs={"minute": "0,20,40"}, replace_existing=True, coalesce=True, max_instances=1, misfire_grace_time=1800, ) logger.info("Registered DG scheduler (every 20 minutes)") else: logger.info(f"Skipping DG scheduler registration for ticket sync in env: {_sync_manager.APP_ENV_TYPE}") except Exception as e: logger.error(f"Failed to register scheduler for DG sync: {str(e)}") return _sync_manager async def scheduledSync(): """Scheduled sync function that uses the global sync manager.""" try: global _sync_manager if _sync_manager and _sync_manager.eventUser: return await performSync(_sync_manager.eventUser) else: logger.error("Sync manager not properly initialized - no eventUser") return False except Exception as e: logger.error(f"Error in scheduled sync: {str(e)}") return False # Scheduler registration and initialization are triggered by startSyncManager(eventUser)