323 lines
12 KiB
Python
323 lines
12 KiB
Python
# Copyright (c) 2026 Patrick Motsch
|
|
# All rights reserved.
|
|
"""Incremental Redmine -> ``poweron_redmine`` mirror sync.
|
|
|
|
Strategy:
|
|
- **Full sync** when ``RedmineInstanceConfig.lastSyncAt`` is None or
|
|
``force=True`` is requested. Pulls every issue with ``status_id=*``
|
|
(open + closed) for the configured project, paginated.
|
|
- **Incremental sync** otherwise. Pulls only issues whose ``updated_on``
|
|
is greater than ``lastSyncAt - overlapSeconds`` (default 1h overlap to
|
|
catch clock skew and missed updates).
|
|
- Each issue is upserted into ``RedmineTicketMirror`` (looked up by
|
|
``(featureInstanceId, redmineId)``).
|
|
- The full set of relations attached to each issue replaces any existing
|
|
relation rows for that issue in ``RedmineRelationMirror``.
|
|
|
|
Concurrency: a per-instance ``asyncio.Lock`` prevents two concurrent
|
|
syncs for the same feature instance.
|
|
|
|
After every successful sync the in-memory stats cache is invalidated for
|
|
the instance.
|
|
"""
|
|
|
|
from __future__ import annotations
|
|
|
|
import asyncio
|
|
import logging
|
|
import time
|
|
from typing import Any, Dict, List, Optional
|
|
|
|
from modules.connectors.connectorTicketsRedmine import RedmineApiError
|
|
from modules.datamodels.datamodelUam import User
|
|
from modules.features.redmine.datamodelRedmine import (
|
|
RedmineInstanceConfig,
|
|
RedmineRelationMirror,
|
|
RedmineSyncResultDto,
|
|
RedmineSyncStatusDto,
|
|
RedmineTicketMirror,
|
|
)
|
|
from modules.features.redmine.interfaceFeatureRedmine import getInterface
|
|
from modules.features.redmine.serviceRedmineStatsCache import _getStatsCache
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
_INCREMENTAL_OVERLAP_SECONDS = 60 * 60 # 1h overlap on incremental syncs
|
|
_DEFAULT_PAGE_SIZE = 100
|
|
_MAX_PAGES_SAFETY = 5000 # 500k tickets safety cap
|
|
|
|
_locks: Dict[str, asyncio.Lock] = {}
|
|
|
|
|
|
def _lockFor(featureInstanceId: str) -> asyncio.Lock:
|
|
if featureInstanceId not in _locks:
|
|
_locks[featureInstanceId] = asyncio.Lock()
|
|
return _locks[featureInstanceId]
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Public API
|
|
# ---------------------------------------------------------------------------
|
|
|
|
async def runSync(
|
|
currentUser: User,
|
|
mandateId: Optional[str],
|
|
featureInstanceId: str,
|
|
*,
|
|
force: bool = False,
|
|
pageSize: int = _DEFAULT_PAGE_SIZE,
|
|
) -> RedmineSyncResultDto:
|
|
"""Run a (full or incremental) sync for the given feature instance."""
|
|
iface = getInterface(currentUser, mandateId=mandateId, featureInstanceId=featureInstanceId)
|
|
connector = iface.resolveConnector(featureInstanceId)
|
|
cfg = iface.getConfig(featureInstanceId)
|
|
if not connector or not cfg:
|
|
raise RuntimeError(
|
|
f"Redmine instance {featureInstanceId} is not configured or inactive"
|
|
)
|
|
|
|
async with _lockFor(featureInstanceId):
|
|
started = time.monotonic()
|
|
full = force or cfg.lastSyncAt is None
|
|
updated_from_iso: Optional[str] = None
|
|
if not full and cfg.lastSyncAt is not None:
|
|
cursor_epoch = max(0.0, cfg.lastSyncAt - _INCREMENTAL_OVERLAP_SECONDS)
|
|
updated_from_iso = time.strftime(
|
|
"%Y-%m-%dT%H:%M:%SZ", time.gmtime(cursor_epoch)
|
|
)
|
|
|
|
try:
|
|
issues = await connector.listAllIssues(
|
|
statusId="*",
|
|
updatedOnFrom=updated_from_iso,
|
|
pageSize=pageSize,
|
|
maxPages=_MAX_PAGES_SAFETY,
|
|
include=["relations"],
|
|
)
|
|
except RedmineApiError as e:
|
|
iface.recordSyncFailure(featureInstanceId, str(e))
|
|
raise
|
|
|
|
tickets_upserted = 0
|
|
relations_upserted = 0
|
|
now_epoch = time.time()
|
|
|
|
for issue in issues:
|
|
tickets_upserted += _upsertTicket(iface, featureInstanceId, mandateId, issue, now_epoch)
|
|
relations_upserted += _replaceRelations(iface, featureInstanceId, issue, now_epoch)
|
|
|
|
duration_ms = int((time.monotonic() - started) * 1000)
|
|
iface.recordSyncSuccess(
|
|
featureInstanceId,
|
|
full=full,
|
|
ticketsUpserted=tickets_upserted,
|
|
durationMs=duration_ms,
|
|
lastSyncAt=now_epoch,
|
|
)
|
|
_getStatsCache().invalidateInstance(featureInstanceId)
|
|
|
|
return RedmineSyncResultDto(
|
|
instanceId=featureInstanceId,
|
|
full=full,
|
|
ticketsUpserted=tickets_upserted,
|
|
relationsUpserted=relations_upserted,
|
|
durationMs=duration_ms,
|
|
lastSyncAt=now_epoch,
|
|
)
|
|
|
|
|
|
def getSyncStatus(
|
|
currentUser: User,
|
|
mandateId: Optional[str],
|
|
featureInstanceId: str,
|
|
) -> RedmineSyncStatusDto:
|
|
iface = getInterface(currentUser, mandateId=mandateId, featureInstanceId=featureInstanceId)
|
|
cfg = iface.getConfig(featureInstanceId)
|
|
ticket_count = iface.countMirroredTickets(featureInstanceId)
|
|
relation_count = iface.countMirroredRelations(featureInstanceId)
|
|
return RedmineSyncStatusDto(
|
|
instanceId=featureInstanceId,
|
|
lastSyncAt=cfg.lastSyncAt if cfg else None,
|
|
lastFullSyncAt=cfg.lastFullSyncAt if cfg else None,
|
|
lastSyncDurationMs=cfg.lastSyncDurationMs if cfg else None,
|
|
lastSyncTicketCount=cfg.lastSyncTicketCount if cfg else None,
|
|
lastSyncErrorAt=cfg.lastSyncErrorAt if cfg else None,
|
|
lastSyncErrorMessage=cfg.lastSyncErrorMessage if cfg else None,
|
|
mirroredTicketCount=ticket_count,
|
|
mirroredRelationCount=relation_count,
|
|
)
|
|
|
|
|
|
async def upsertSingleTicket(
|
|
currentUser: User,
|
|
mandateId: Optional[str],
|
|
featureInstanceId: str,
|
|
issueId: int,
|
|
) -> int:
|
|
"""Re-fetch one issue from Redmine and upsert it into the mirror.
|
|
|
|
Used by the write paths in ``serviceRedmine`` so the mirror stays
|
|
consistent after every create / update without a full sync.
|
|
Returns the number of relation rows replaced.
|
|
"""
|
|
iface = getInterface(currentUser, mandateId=mandateId, featureInstanceId=featureInstanceId)
|
|
connector = iface.resolveConnector(featureInstanceId)
|
|
if not connector:
|
|
raise RuntimeError("Redmine instance not configured")
|
|
issue = await connector.getIssue(int(issueId), includeRelations=True)
|
|
now_epoch = time.time()
|
|
_upsertTicket(iface, featureInstanceId, mandateId, issue, now_epoch)
|
|
relations_upserted = _replaceRelations(iface, featureInstanceId, issue, now_epoch)
|
|
_getStatsCache().invalidateInstance(featureInstanceId)
|
|
return relations_upserted
|
|
|
|
|
|
def deleteMirroredTicket(
|
|
currentUser: User,
|
|
mandateId: Optional[str],
|
|
featureInstanceId: str,
|
|
issueId: int,
|
|
) -> bool:
|
|
"""Drop a ticket and its relations from the mirror after a successful Redmine DELETE."""
|
|
iface = getInterface(currentUser, mandateId=mandateId, featureInstanceId=featureInstanceId)
|
|
deleted = iface.deleteMirroredTicket(featureInstanceId, int(issueId))
|
|
iface.deleteMirroredRelationsForIssue(featureInstanceId, int(issueId))
|
|
_getStatsCache().invalidateInstance(featureInstanceId)
|
|
return deleted
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Per-issue upsert helpers (sync, run inside the per-instance lock)
|
|
# ---------------------------------------------------------------------------
|
|
|
|
def _upsertTicket(
|
|
iface,
|
|
featureInstanceId: str,
|
|
mandateId: Optional[str],
|
|
issue: Dict[str, Any],
|
|
nowEpoch: float,
|
|
) -> int:
|
|
redmine_id = issue.get("id")
|
|
if redmine_id is None:
|
|
return 0
|
|
statuses_lookup = (iface.getConfig(featureInstanceId).schemaCache or {}).get("statuses") or []
|
|
is_closed = _statusIsClosed(issue.get("status") or {}, statuses_lookup)
|
|
record = _ticketRecordFromIssue(issue, featureInstanceId, mandateId, is_closed, nowEpoch)
|
|
iface.upsertMirroredTicket(featureInstanceId, int(redmine_id), record)
|
|
return 1
|
|
|
|
|
|
def _replaceRelations(
|
|
iface,
|
|
featureInstanceId: str,
|
|
issue: Dict[str, Any],
|
|
nowEpoch: float,
|
|
) -> int:
|
|
issue_id = issue.get("id")
|
|
relations = issue.get("relations") or []
|
|
if issue_id is None:
|
|
return 0
|
|
iface.deleteMirroredRelationsForIssue(featureInstanceId, int(issue_id))
|
|
inserted = 0
|
|
for r in relations:
|
|
rid = r.get("id")
|
|
if rid is None:
|
|
continue
|
|
iface.insertMirroredRelation(
|
|
featureInstanceId,
|
|
{
|
|
"featureInstanceId": featureInstanceId,
|
|
"redmineRelationId": int(rid),
|
|
"issueId": int(r.get("issue_id") or 0),
|
|
"issueToId": int(r.get("issue_to_id") or 0),
|
|
"relationType": str(r.get("relation_type") or "relates"),
|
|
"delay": r.get("delay"),
|
|
"syncedAt": nowEpoch,
|
|
},
|
|
)
|
|
inserted += 1
|
|
return inserted
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Pure helpers
|
|
# ---------------------------------------------------------------------------
|
|
|
|
def _statusIsClosed(status: Dict[str, Any], statusesLookup: List[Dict[str, Any]]) -> bool:
|
|
"""Best-effort: prefer the schemaCache; fall back to inspecting the
|
|
raw issue (Redmine sets ``is_closed`` on the status object only when
|
|
explicitly requested)."""
|
|
sid = status.get("id")
|
|
if sid is None:
|
|
return False
|
|
for s in statusesLookup:
|
|
if s.get("id") == sid:
|
|
return bool(s.get("isClosed"))
|
|
return bool(status.get("is_closed"))
|
|
|
|
|
|
def _parseRedmineDateToEpoch(value: Optional[str]) -> Optional[float]:
|
|
if not value:
|
|
return None
|
|
try:
|
|
from datetime import datetime
|
|
s = value.replace("Z", "+00:00")
|
|
return datetime.fromisoformat(s).timestamp()
|
|
except Exception:
|
|
return None
|
|
|
|
|
|
def _ticketRecordFromIssue(
|
|
issue: Dict[str, Any],
|
|
featureInstanceId: str,
|
|
mandateId: Optional[str],
|
|
isClosed: bool,
|
|
nowEpoch: float,
|
|
) -> Dict[str, Any]:
|
|
tracker = issue.get("tracker") or {}
|
|
status = issue.get("status") or {}
|
|
priority = issue.get("priority") or {}
|
|
assigned = issue.get("assigned_to") or {}
|
|
author = issue.get("author") or {}
|
|
parent = issue.get("parent") or {}
|
|
fixed_version = issue.get("fixed_version") or {}
|
|
category = issue.get("category") or {}
|
|
created_on = issue.get("created_on")
|
|
updated_on = issue.get("updated_on")
|
|
updated_ts = _parseRedmineDateToEpoch(updated_on)
|
|
|
|
return {
|
|
"featureInstanceId": featureInstanceId,
|
|
"mandateId": mandateId,
|
|
"redmineId": int(issue.get("id")),
|
|
"subject": str(issue.get("subject") or ""),
|
|
"description": str(issue.get("description") or ""),
|
|
"trackerId": tracker.get("id"),
|
|
"trackerName": tracker.get("name"),
|
|
"statusId": status.get("id"),
|
|
"statusName": status.get("name"),
|
|
"isClosed": bool(isClosed),
|
|
"priorityId": priority.get("id"),
|
|
"priorityName": priority.get("name"),
|
|
"assignedToId": assigned.get("id"),
|
|
"assignedToName": assigned.get("name"),
|
|
"authorId": author.get("id"),
|
|
"authorName": author.get("name"),
|
|
"parentId": parent.get("id"),
|
|
"fixedVersionId": fixed_version.get("id"),
|
|
"fixedVersionName": fixed_version.get("name"),
|
|
"categoryId": category.get("id"),
|
|
"categoryName": category.get("name"),
|
|
"createdOn": created_on,
|
|
"updatedOn": updated_on,
|
|
"createdOnTs": _parseRedmineDateToEpoch(created_on),
|
|
"updatedOnTs": updated_ts,
|
|
# Approximation: Redmine doesn't expose a dedicated "closed_on"
|
|
# timestamp via the issue endpoint. For closed tickets the last
|
|
# updatedOn is the best stable proxy without scanning journals.
|
|
"closedOnTs": updated_ts if bool(isClosed) else None,
|
|
"customFields": list(issue.get("custom_fields") or []),
|
|
"raw": issue,
|
|
"syncedAt": nowEpoch,
|
|
}
|