gateway/modules/features/redmine/serviceRedmineSync.py
2026-04-26 08:31:35 +02:00

416 lines
16 KiB
Python

# Copyright (c) 2026 Patrick Motsch
# All rights reserved.
"""Incremental Redmine -> ``poweron_redmine`` mirror sync.
Strategy:
- **Full sync** when ``RedmineInstanceConfig.lastSyncAt`` is None or
``force=True`` is requested. Pulls every issue with ``status_id=*``
(open + closed) for the configured project, paginated.
- **Incremental sync** otherwise. Pulls only issues whose ``updated_on``
is greater than ``lastSyncAt - overlapSeconds`` (default 1h overlap to
catch clock skew and missed updates).
- Each issue is upserted into ``RedmineTicketMirror`` (looked up by
``(featureInstanceId, redmineId)``).
- The full set of relations attached to each issue replaces any existing
relation rows for that issue in ``RedmineRelationMirror``.
Concurrency: a per-instance ``asyncio.Lock`` prevents two concurrent
syncs for the same feature instance.
After every successful sync the in-memory stats cache is invalidated for
the instance.
"""
from __future__ import annotations
import asyncio
import logging
import time
from typing import Any, Dict, List, Optional
from modules.connectors.connectorTicketsRedmine import RedmineApiError
from modules.datamodels.datamodelUam import User
from modules.features.redmine.datamodelRedmine import (
RedmineInstanceConfig,
RedmineRelationMirror,
RedmineSyncResultDto,
RedmineSyncStatusDto,
RedmineTicketMirror,
)
from modules.features.redmine.interfaceFeatureRedmine import getInterface
from modules.features.redmine.serviceRedmineStatsCache import getStatsCache
logger = logging.getLogger(__name__)
_INCREMENTAL_OVERLAP_SECONDS = 60 * 60 # 1h overlap on incremental syncs
_DEFAULT_PAGE_SIZE = 100
_MAX_PAGES_SAFETY = 5000 # 500k tickets safety cap
_locks: Dict[str, asyncio.Lock] = {}
def _lockFor(featureInstanceId: str) -> asyncio.Lock:
if featureInstanceId not in _locks:
_locks[featureInstanceId] = asyncio.Lock()
return _locks[featureInstanceId]
# ---------------------------------------------------------------------------
# Public API
# ---------------------------------------------------------------------------
async def runSync(
currentUser: User,
mandateId: Optional[str],
featureInstanceId: str,
*,
force: bool = False,
pageSize: int = _DEFAULT_PAGE_SIZE,
) -> RedmineSyncResultDto:
"""Run a (full or incremental) sync for the given feature instance."""
iface = getInterface(currentUser, mandateId=mandateId, featureInstanceId=featureInstanceId)
connector = iface.resolveConnector(featureInstanceId)
cfg = iface.getConfig(featureInstanceId)
if not connector or not cfg:
raise RuntimeError(
f"Redmine instance {featureInstanceId} is not configured or inactive"
)
async with _lockFor(featureInstanceId):
started = time.monotonic()
# CRITICAL: ensure the schema cache (especially the per-status
# ``isClosed`` map) is populated BEFORE we iterate issues. Redmine's
# /issues.json endpoint only returns ``{id, name}`` for the status
# object -- the closed/open flag lives in /issue_statuses.json. If
# the cache is empty here, every freshly-synced ticket would land
# with ``isClosed=False`` and the Stats page would be useless.
await _ensureSchemaWarm(currentUser, mandateId, featureInstanceId)
cfg = iface.getConfig(featureInstanceId) # re-read to get warm cache
full = force or cfg.lastSyncAt is None
updated_from_iso: Optional[str] = None
if not full and cfg.lastSyncAt is not None:
cursor_epoch = max(0.0, cfg.lastSyncAt - _INCREMENTAL_OVERLAP_SECONDS)
updated_from_iso = time.strftime(
"%Y-%m-%dT%H:%M:%SZ", time.gmtime(cursor_epoch)
)
try:
issues = await connector.listAllIssues(
statusId="*",
updatedOnFrom=updated_from_iso,
pageSize=pageSize,
maxPages=_MAX_PAGES_SAFETY,
include=["relations"],
)
except RedmineApiError as e:
iface.recordSyncFailure(featureInstanceId, str(e))
raise
tickets_upserted = 0
relations_upserted = 0
now_epoch = time.time()
for issue in issues:
tickets_upserted += _upsertTicket(iface, featureInstanceId, mandateId, issue, now_epoch)
relations_upserted += _replaceRelations(iface, featureInstanceId, issue, now_epoch)
# Self-healing pass: re-apply ``isClosed`` to every mirrored ticket
# using the now-warm schema cache. Fixes pre-existing rows that were
# synced before the cache was populated (cheap; mirror-local only).
flags_fixed = _rebuildIsClosedFromSchema(iface, featureInstanceId, now_epoch)
if flags_fixed:
logger.info(
f"runSync({featureInstanceId}): corrected isClosed on {flags_fixed} mirror rows"
)
duration_ms = int((time.monotonic() - started) * 1000)
iface.recordSyncSuccess(
featureInstanceId,
full=full,
ticketsUpserted=tickets_upserted,
durationMs=duration_ms,
lastSyncAt=now_epoch,
)
getStatsCache().invalidateInstance(featureInstanceId)
return RedmineSyncResultDto(
instanceId=featureInstanceId,
full=full,
ticketsUpserted=tickets_upserted,
relationsUpserted=relations_upserted,
durationMs=duration_ms,
lastSyncAt=now_epoch,
)
def getSyncStatus(
currentUser: User,
mandateId: Optional[str],
featureInstanceId: str,
) -> RedmineSyncStatusDto:
iface = getInterface(currentUser, mandateId=mandateId, featureInstanceId=featureInstanceId)
cfg = iface.getConfig(featureInstanceId)
ticket_count = iface.countMirroredTickets(featureInstanceId)
relation_count = iface.countMirroredRelations(featureInstanceId)
return RedmineSyncStatusDto(
instanceId=featureInstanceId,
lastSyncAt=cfg.lastSyncAt if cfg else None,
lastFullSyncAt=cfg.lastFullSyncAt if cfg else None,
lastSyncDurationMs=cfg.lastSyncDurationMs if cfg else None,
lastSyncTicketCount=cfg.lastSyncTicketCount if cfg else None,
lastSyncErrorAt=cfg.lastSyncErrorAt if cfg else None,
lastSyncErrorMessage=cfg.lastSyncErrorMessage if cfg else None,
mirroredTicketCount=ticket_count,
mirroredRelationCount=relation_count,
)
async def upsertSingleTicket(
currentUser: User,
mandateId: Optional[str],
featureInstanceId: str,
issueId: int,
) -> int:
"""Re-fetch one issue from Redmine and upsert it into the mirror.
Used by the write paths in ``serviceRedmine`` so the mirror stays
consistent after every create / update without a full sync.
Returns the number of relation rows replaced.
"""
iface = getInterface(currentUser, mandateId=mandateId, featureInstanceId=featureInstanceId)
connector = iface.resolveConnector(featureInstanceId)
if not connector:
raise RuntimeError("Redmine instance not configured")
issue = await connector.getIssue(int(issueId), includeRelations=True)
now_epoch = time.time()
_upsertTicket(iface, featureInstanceId, mandateId, issue, now_epoch)
relations_upserted = _replaceRelations(iface, featureInstanceId, issue, now_epoch)
getStatsCache().invalidateInstance(featureInstanceId)
return relations_upserted
def deleteMirroredTicket(
currentUser: User,
mandateId: Optional[str],
featureInstanceId: str,
issueId: int,
) -> bool:
"""Drop a ticket and its relations from the mirror after a successful Redmine DELETE."""
iface = getInterface(currentUser, mandateId=mandateId, featureInstanceId=featureInstanceId)
deleted = iface.deleteMirroredTicket(featureInstanceId, int(issueId))
iface.deleteMirroredRelationsForIssue(featureInstanceId, int(issueId))
getStatsCache().invalidateInstance(featureInstanceId)
return deleted
# ---------------------------------------------------------------------------
# Per-issue upsert helpers (sync, run inside the per-instance lock)
# ---------------------------------------------------------------------------
def _upsertTicket(
iface,
featureInstanceId: str,
mandateId: Optional[str],
issue: Dict[str, Any],
nowEpoch: float,
) -> int:
redmine_id = issue.get("id")
if redmine_id is None:
return 0
statuses_lookup = (iface.getConfig(featureInstanceId).schemaCache or {}).get("statuses") or []
is_closed = _statusIsClosed(issue.get("status") or {}, statuses_lookup)
record = _ticketRecordFromIssue(issue, featureInstanceId, mandateId, is_closed, nowEpoch)
iface.upsertMirroredTicket(featureInstanceId, int(redmine_id), record)
return 1
def _replaceRelations(
iface,
featureInstanceId: str,
issue: Dict[str, Any],
nowEpoch: float,
) -> int:
issue_id = issue.get("id")
relations = issue.get("relations") or []
if issue_id is None:
return 0
iface.deleteMirroredRelationsForIssue(featureInstanceId, int(issue_id))
inserted = 0
for r in relations:
rid = r.get("id")
if rid is None:
continue
iface.insertMirroredRelation(
featureInstanceId,
{
"featureInstanceId": featureInstanceId,
"redmineRelationId": int(rid),
"issueId": int(r.get("issue_id") or 0),
"issueToId": int(r.get("issue_to_id") or 0),
"relationType": str(r.get("relation_type") or "relates"),
"delay": r.get("delay"),
"syncedAt": nowEpoch,
},
)
inserted += 1
return inserted
# ---------------------------------------------------------------------------
# Schema cache warm-up + post-sync isClosed correction
# ---------------------------------------------------------------------------
async def _ensureSchemaWarm(
currentUser: User,
mandateId: Optional[str],
featureInstanceId: str,
) -> None:
"""Make sure ``cfg.schemaCache['statuses']`` exists with the per-status
``isClosed`` flag. Called at the start of every sync because Redmine's
``/issues.json`` doesn't expose ``is_closed`` on the inline status
object, so we MUST resolve it via the schema.
"""
iface = getInterface(currentUser, mandateId=mandateId, featureInstanceId=featureInstanceId)
cfg = iface.getConfig(featureInstanceId)
if cfg is None:
return
statuses = (cfg.schemaCache or {}).get("statuses") or []
if statuses:
return
# Lazy import to avoid a circular dependency at module load.
from modules.features.redmine.serviceRedmine import getProjectMeta
try:
await getProjectMeta(currentUser, mandateId, featureInstanceId, forceRefresh=True)
except Exception as e:
logger.warning(
f"_ensureSchemaWarm({featureInstanceId}): could not warm schema cache: {e} "
"-- isClosed flags may be inaccurate until next successful schema fetch."
)
def _rebuildIsClosedFromSchema(iface, featureInstanceId: str, nowEpoch: float) -> int:
"""Walk the mirror once and fix ``isClosed`` (and ``closedOnTs``) for any
ticket whose stored value disagrees with the current schema cache.
Returns the number of rows that were actually corrected. A no-op when
the schema cache has no statuses (logged once, then the caller can
decide whether to retry).
"""
cfg = iface.getConfig(featureInstanceId)
if cfg is None:
return 0
statuses = (cfg.schemaCache or {}).get("statuses") or []
if not statuses:
return 0
closed_ids = {int(s.get("id")) for s in statuses if s.get("id") is not None and s.get("isClosed")}
rows = iface.listMirroredTickets(featureInstanceId)
corrections = 0
for row in rows:
sid = row.get("statusId")
if sid is None:
continue
should_be_closed = int(sid) in closed_ids
if bool(row.get("isClosed")) == should_be_closed:
continue
# Only the closed/open flag (and the derived closedOnTs) are
# touched here -- everything else came from Redmine and stays.
update = {
"isClosed": bool(should_be_closed),
"closedOnTs": float(row.get("updatedOnTs")) if (should_be_closed and row.get("updatedOnTs") is not None) else None,
"syncedAt": nowEpoch,
}
try:
iface.upsertMirroredTicket(featureInstanceId, int(row.get("redmineId")), {**row, **update})
corrections += 1
except Exception as e:
logger.warning(
f"_rebuildIsClosedFromSchema({featureInstanceId}): could not fix ticket "
f"#{row.get('redmineId')}: {e}"
)
return corrections
# ---------------------------------------------------------------------------
# Pure helpers
# ---------------------------------------------------------------------------
def _statusIsClosed(status: Dict[str, Any], statusesLookup: List[Dict[str, Any]]) -> bool:
"""Best-effort: prefer the schemaCache; fall back to inspecting the
raw issue (Redmine sets ``is_closed`` on the status object only when
explicitly requested)."""
sid = status.get("id")
if sid is None:
return False
for s in statusesLookup:
if s.get("id") == sid:
return bool(s.get("isClosed"))
return bool(status.get("is_closed"))
def _parseRedmineDateToEpoch(value: Optional[str]) -> Optional[float]:
if not value:
return None
try:
from datetime import datetime
s = value.replace("Z", "+00:00")
return datetime.fromisoformat(s).timestamp()
except Exception:
return None
def _ticketRecordFromIssue(
issue: Dict[str, Any],
featureInstanceId: str,
mandateId: Optional[str],
isClosed: bool,
nowEpoch: float,
) -> Dict[str, Any]:
tracker = issue.get("tracker") or {}
status = issue.get("status") or {}
priority = issue.get("priority") or {}
assigned = issue.get("assigned_to") or {}
author = issue.get("author") or {}
parent = issue.get("parent") or {}
fixed_version = issue.get("fixed_version") or {}
category = issue.get("category") or {}
created_on = issue.get("created_on")
updated_on = issue.get("updated_on")
updated_ts = _parseRedmineDateToEpoch(updated_on)
return {
"featureInstanceId": featureInstanceId,
"mandateId": mandateId,
"redmineId": int(issue.get("id")),
"subject": str(issue.get("subject") or ""),
"description": str(issue.get("description") or ""),
"trackerId": tracker.get("id"),
"trackerName": tracker.get("name"),
"statusId": status.get("id"),
"statusName": status.get("name"),
"isClosed": bool(isClosed),
"priorityId": priority.get("id"),
"priorityName": priority.get("name"),
"assignedToId": assigned.get("id"),
"assignedToName": assigned.get("name"),
"authorId": author.get("id"),
"authorName": author.get("name"),
"parentId": parent.get("id"),
"fixedVersionId": fixed_version.get("id"),
"fixedVersionName": fixed_version.get("name"),
"categoryId": category.get("id"),
"categoryName": category.get("name"),
"createdOn": created_on,
"updatedOn": updated_on,
"createdOnTs": _parseRedmineDateToEpoch(created_on),
"updatedOnTs": updated_ts,
# Approximation: Redmine doesn't expose a dedicated "closed_on"
# timestamp via the issue endpoint. For closed tickets the last
# updatedOn is the best stable proxy without scanning journals.
"closedOnTs": updated_ts if bool(isClosed) else None,
"customFields": list(issue.get("custom_fields") or []),
"raw": issue,
"syncedAt": nowEpoch,
}