403 lines
13 KiB
Python
403 lines
13 KiB
Python
# Copyright (c) 2026 Patrick Motsch
|
|
# All rights reserved.
|
|
"""Redmine statistics aggregator.
|
|
|
|
Returns raw buckets in :class:`RedmineStatsDto`. The frontend
|
|
(``RedmineStatsPage.tsx``) maps these onto ``ReportSection`` for
|
|
``FormGeneratorReport``. Decision 2026-04-21.
|
|
|
|
Sections produced:
|
|
- KPIs: total / open / closed / closedInPeriod / createdInPeriod / orphans
|
|
- statusByTracker (stacked bar)
|
|
- throughput (line chart, created vs closed per bucket)
|
|
- topAssignees (top-10 horizontal bar)
|
|
- relationDistribution (pie)
|
|
- backlogAging (open issues by age since last update)
|
|
|
|
The whole result is cached in :mod:`serviceRedmineStatsCache` keyed by
|
|
``(instanceId, dateFrom, dateTo, bucket, trackerIds)`` with a 90 s TTL.
|
|
"""
|
|
|
|
from __future__ import annotations
|
|
|
|
import datetime as _dt
|
|
import logging
|
|
from collections import Counter, defaultdict
|
|
from typing import Any, Dict, Iterable, List, Optional, Tuple
|
|
|
|
from modules.datamodels.datamodelUam import User
|
|
from modules.features.redmine.datamodelRedmine import (
|
|
RedmineAgingBucket,
|
|
RedmineAssigneeBucket,
|
|
RedmineFieldSchemaDto,
|
|
RedmineRelationDistributionEntry,
|
|
RedmineStatsDto,
|
|
RedmineStatsKpis,
|
|
RedmineStatusByTrackerEntry,
|
|
RedmineThroughputBucket,
|
|
RedmineTicketDto,
|
|
)
|
|
from modules.features.redmine.serviceRedmineStatsCache import _getStatsCache
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Public entry
|
|
# ---------------------------------------------------------------------------
|
|
|
|
async def getStats(
|
|
currentUser: User,
|
|
mandateId: Optional[str],
|
|
featureInstanceId: str,
|
|
*,
|
|
dateFrom: Optional[str] = None,
|
|
dateTo: Optional[str] = None,
|
|
bucket: str = "week",
|
|
trackerIds: Optional[List[int]] = None,
|
|
) -> RedmineStatsDto:
|
|
"""Compute (or fetch from cache) the full statistics payload."""
|
|
bucket_norm = (bucket or "week").lower()
|
|
if bucket_norm not in {"day", "week", "month"}:
|
|
bucket_norm = "week"
|
|
tracker_ids_norm: List[int] = sorted({int(t) for t in trackerIds or []})
|
|
|
|
cache = _getStatsCache()
|
|
cache_key = cache.buildKey(featureInstanceId, dateFrom, dateTo, bucket_norm, tracker_ids_norm)
|
|
cached = cache.get(cache_key)
|
|
if cached is not None:
|
|
return cached
|
|
|
|
# Lazy import: keeps the pure aggregation helpers below importable
|
|
# without dragging in aiohttp / DB connector at module load.
|
|
from modules.features.redmine.serviceRedmine import (
|
|
getProjectMeta,
|
|
listTickets,
|
|
)
|
|
|
|
schema = await getProjectMeta(currentUser, mandateId, featureInstanceId)
|
|
root_tracker_id = schema.rootTrackerId
|
|
|
|
tickets = listTickets(
|
|
currentUser,
|
|
mandateId,
|
|
featureInstanceId,
|
|
trackerIds=tracker_ids_norm or None,
|
|
statusFilter="*",
|
|
)
|
|
|
|
stats = _aggregate(
|
|
tickets,
|
|
schema=schema,
|
|
rootTrackerId=root_tracker_id,
|
|
dateFrom=dateFrom,
|
|
dateTo=dateTo,
|
|
bucket=bucket_norm,
|
|
trackerIdsFilter=tracker_ids_norm,
|
|
instanceId=featureInstanceId,
|
|
)
|
|
|
|
cache.set(cache_key, stats)
|
|
return stats
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Pure aggregation (testable without I/O)
|
|
# ---------------------------------------------------------------------------
|
|
|
|
def _aggregate(
|
|
tickets: List[RedmineTicketDto],
|
|
*,
|
|
schema: Optional[RedmineFieldSchemaDto],
|
|
rootTrackerId: Optional[int],
|
|
dateFrom: Optional[str],
|
|
dateTo: Optional[str],
|
|
bucket: str,
|
|
trackerIdsFilter: List[int],
|
|
instanceId: str,
|
|
) -> RedmineStatsDto:
|
|
period_from = _parseIsoDate(dateFrom)
|
|
period_to = _parseIsoDate(dateTo)
|
|
|
|
kpis = _kpis(tickets, rootTrackerId, period_from, period_to)
|
|
status_by_tracker = _statusByTracker(tickets, schema)
|
|
throughput = _throughput(tickets, period_from, period_to, bucket)
|
|
top_assignees = _topAssignees(tickets, limit=10)
|
|
relation_distribution = _relationDistribution(tickets)
|
|
backlog_aging = _backlogAging(tickets, now=_utcNow())
|
|
|
|
return RedmineStatsDto(
|
|
instanceId=instanceId,
|
|
dateFrom=dateFrom,
|
|
dateTo=dateTo,
|
|
bucket=bucket,
|
|
trackerIds=trackerIdsFilter,
|
|
kpis=kpis,
|
|
statusByTracker=status_by_tracker,
|
|
throughput=throughput,
|
|
topAssignees=top_assignees,
|
|
relationDistribution=relation_distribution,
|
|
backlogAging=backlog_aging,
|
|
)
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Section builders
|
|
# ---------------------------------------------------------------------------
|
|
|
|
def _kpis(
|
|
tickets: List[RedmineTicketDto],
|
|
rootTrackerId: Optional[int],
|
|
periodFrom: Optional[_dt.datetime],
|
|
periodTo: Optional[_dt.datetime],
|
|
) -> RedmineStatsKpis:
|
|
total = len(tickets)
|
|
open_count = sum(1 for t in tickets if not t.isClosed)
|
|
closed_count = sum(1 for t in tickets if t.isClosed)
|
|
|
|
closed_in_period = 0
|
|
created_in_period = 0
|
|
for t in tickets:
|
|
created = _parseIsoDate(t.createdOn)
|
|
updated = _parseIsoDate(t.updatedOn)
|
|
if created and _inPeriod(created, periodFrom, periodTo):
|
|
created_in_period += 1
|
|
if t.isClosed and updated and _inPeriod(updated, periodFrom, periodTo):
|
|
closed_in_period += 1
|
|
|
|
orphans = _countOrphans(tickets, rootTrackerId)
|
|
|
|
return RedmineStatsKpis(
|
|
total=total,
|
|
open=open_count,
|
|
closed=closed_count,
|
|
closedInPeriod=closed_in_period,
|
|
createdInPeriod=created_in_period,
|
|
orphans=orphans,
|
|
)
|
|
|
|
|
|
def _countOrphans(
|
|
tickets: List[RedmineTicketDto], rootTrackerId: Optional[int]
|
|
) -> int:
|
|
"""A ticket is an orphan if it is not a root user-story AND not
|
|
reachable (via parent or any relation, in either direction) to any
|
|
root user-story within the same loaded set."""
|
|
if not tickets:
|
|
return 0
|
|
by_id: Dict[int, RedmineTicketDto] = {t.id: t for t in tickets}
|
|
roots: set[int] = {
|
|
t.id for t in tickets if rootTrackerId and t.trackerId == rootTrackerId
|
|
}
|
|
if not roots:
|
|
return sum(1 for t in tickets if not (rootTrackerId and t.trackerId == rootTrackerId))
|
|
|
|
adjacency: Dict[int, set[int]] = defaultdict(set)
|
|
for t in tickets:
|
|
if t.parentId is not None and t.parentId in by_id:
|
|
adjacency[t.id].add(t.parentId)
|
|
adjacency[t.parentId].add(t.id)
|
|
for r in t.relations:
|
|
for a, b in ((r.issueId, r.issueToId), (r.issueToId, r.issueId)):
|
|
if a in by_id and b in by_id and a != b:
|
|
adjacency[a].add(b)
|
|
|
|
reached: set[int] = set(roots)
|
|
frontier: List[int] = list(roots)
|
|
while frontier:
|
|
nxt: List[int] = []
|
|
for tid in frontier:
|
|
for neighbour in adjacency.get(tid, ()): # type: ignore[arg-type]
|
|
if neighbour not in reached:
|
|
reached.add(neighbour)
|
|
nxt.append(neighbour)
|
|
frontier = nxt
|
|
return sum(1 for t in tickets if t.id not in reached)
|
|
|
|
|
|
def _statusByTracker(
|
|
tickets: List[RedmineTicketDto], schema: Optional[RedmineFieldSchemaDto]
|
|
) -> List[RedmineStatusByTrackerEntry]:
|
|
by_tracker: Dict[Tuple[Optional[int], str], Counter] = defaultdict(Counter)
|
|
for t in tickets:
|
|
key = (t.trackerId, t.trackerName or "(unbekannt)")
|
|
by_tracker[key][t.statusName or "(unbekannt)"] += 1
|
|
out: List[RedmineStatusByTrackerEntry] = []
|
|
for (tid, tname), ctr in by_tracker.items():
|
|
out.append(
|
|
RedmineStatusByTrackerEntry(
|
|
trackerId=tid,
|
|
trackerName=tname,
|
|
countsByStatus=dict(ctr),
|
|
total=sum(ctr.values()),
|
|
)
|
|
)
|
|
out.sort(key=lambda e: e.total, reverse=True)
|
|
return out
|
|
|
|
|
|
def _throughput(
|
|
tickets: List[RedmineTicketDto],
|
|
periodFrom: Optional[_dt.datetime],
|
|
periodTo: Optional[_dt.datetime],
|
|
bucket: str,
|
|
) -> List[RedmineThroughputBucket]:
|
|
if not tickets:
|
|
return []
|
|
|
|
if periodFrom is None or periodTo is None:
|
|
all_dates: List[_dt.datetime] = []
|
|
for t in tickets:
|
|
for s in (t.createdOn, t.updatedOn):
|
|
d = _parseIsoDate(s)
|
|
if d:
|
|
all_dates.append(d)
|
|
if not all_dates:
|
|
return []
|
|
periodFrom = periodFrom or min(all_dates)
|
|
periodTo = periodTo or max(all_dates)
|
|
|
|
created_counter: Counter = Counter()
|
|
closed_counter: Counter = Counter()
|
|
for t in tickets:
|
|
c = _parseIsoDate(t.createdOn)
|
|
if c and _inPeriod(c, periodFrom, periodTo):
|
|
created_counter[_bucketKey(c, bucket)] += 1
|
|
if t.isClosed:
|
|
u = _parseIsoDate(t.updatedOn)
|
|
if u and _inPeriod(u, periodFrom, periodTo):
|
|
closed_counter[_bucketKey(u, bucket)] += 1
|
|
|
|
keys: List[str] = sorted(set(created_counter) | set(closed_counter))
|
|
if not keys:
|
|
return []
|
|
out: List[RedmineThroughputBucket] = []
|
|
for key in keys:
|
|
out.append(
|
|
RedmineThroughputBucket(
|
|
bucketKey=key,
|
|
label=_bucketLabel(key, bucket),
|
|
created=int(created_counter.get(key, 0)),
|
|
closed=int(closed_counter.get(key, 0)),
|
|
)
|
|
)
|
|
return out
|
|
|
|
|
|
def _topAssignees(
|
|
tickets: List[RedmineTicketDto], *, limit: int = 10
|
|
) -> List[RedmineAssigneeBucket]:
|
|
by_assignee: Dict[Tuple[Optional[int], str], int] = defaultdict(int)
|
|
for t in tickets:
|
|
if t.isClosed:
|
|
continue
|
|
key = (t.assignedToId, t.assignedToName or "(nicht zugewiesen)")
|
|
by_assignee[key] += 1
|
|
sorted_items = sorted(by_assignee.items(), key=lambda kv: kv[1], reverse=True)[:limit]
|
|
return [
|
|
RedmineAssigneeBucket(assignedToId=k[0], name=k[1], open=v)
|
|
for k, v in sorted_items
|
|
]
|
|
|
|
|
|
def _relationDistribution(
|
|
tickets: List[RedmineTicketDto],
|
|
) -> List[RedmineRelationDistributionEntry]:
|
|
seen: set[int] = set()
|
|
counter: Counter = Counter()
|
|
for t in tickets:
|
|
for r in t.relations:
|
|
if r.id in seen:
|
|
continue
|
|
seen.add(r.id)
|
|
counter[r.relationType or "relates"] += 1
|
|
return [
|
|
RedmineRelationDistributionEntry(relationType=k, count=v)
|
|
for k, v in sorted(counter.items(), key=lambda kv: kv[1], reverse=True)
|
|
]
|
|
|
|
|
|
def _backlogAging(
|
|
tickets: List[RedmineTicketDto], *, now: Optional[_dt.datetime] = None
|
|
) -> List[RedmineAgingBucket]:
|
|
if now is None:
|
|
now = _utcNow()
|
|
buckets = [
|
|
RedmineAgingBucket(bucketKey="lt7", label="< 7 Tage", minDays=0, maxDays=7),
|
|
RedmineAgingBucket(bucketKey="7-30", label="7-30 Tage", minDays=7, maxDays=30),
|
|
RedmineAgingBucket(bucketKey="30-90", label="30-90 Tage", minDays=30, maxDays=90),
|
|
RedmineAgingBucket(bucketKey="90-180", label="90-180 Tage", minDays=90, maxDays=180),
|
|
RedmineAgingBucket(bucketKey="gt180", label="> 180 Tage", minDays=180, maxDays=None),
|
|
]
|
|
for t in tickets:
|
|
if t.isClosed:
|
|
continue
|
|
ref = _parseIsoDate(t.updatedOn) or _parseIsoDate(t.createdOn)
|
|
if ref is None:
|
|
continue
|
|
age_days = max(0, (now - ref).days)
|
|
for b in buckets:
|
|
if (b.maxDays is None and age_days >= b.minDays) or (
|
|
b.maxDays is not None and b.minDays <= age_days < b.maxDays
|
|
):
|
|
b.count += 1
|
|
break
|
|
return buckets
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Date helpers (no external deps)
|
|
# ---------------------------------------------------------------------------
|
|
|
|
def _utcNow() -> _dt.datetime:
|
|
"""Naive UTC ``datetime`` -- the rest of the helpers compare naive
|
|
objects, so we strip tz info on purpose."""
|
|
return _dt.datetime.now(_dt.timezone.utc).replace(tzinfo=None)
|
|
|
|
|
|
def _parseIsoDate(value: Optional[str]) -> Optional[_dt.datetime]:
|
|
if not value:
|
|
return None
|
|
try:
|
|
s = value.replace("Z", "+00:00") if isinstance(value, str) else value
|
|
if isinstance(s, str) and "T" not in s and len(s) == 10:
|
|
return _dt.datetime.strptime(s, "%Y-%m-%d")
|
|
return _dt.datetime.fromisoformat(s).replace(tzinfo=None)
|
|
except Exception:
|
|
try:
|
|
return _dt.datetime.strptime(str(value)[:10], "%Y-%m-%d")
|
|
except Exception:
|
|
return None
|
|
|
|
|
|
def _inPeriod(
|
|
when: _dt.datetime,
|
|
fromDate: Optional[_dt.datetime],
|
|
toDate: Optional[_dt.datetime],
|
|
) -> bool:
|
|
if fromDate and when < fromDate:
|
|
return False
|
|
if toDate and when > toDate + _dt.timedelta(days=1):
|
|
return False
|
|
return True
|
|
|
|
|
|
def _bucketKey(when: _dt.datetime, bucket: str) -> str:
|
|
if bucket == "day":
|
|
return when.strftime("%Y-%m-%d")
|
|
if bucket == "month":
|
|
return when.strftime("%Y-%m")
|
|
iso_year, iso_week, _ = when.isocalendar()
|
|
return f"{iso_year}-W{iso_week:02d}"
|
|
|
|
|
|
def _bucketLabel(key: str, bucket: str) -> str:
|
|
if bucket == "day":
|
|
return key
|
|
if bucket == "month":
|
|
try:
|
|
d = _dt.datetime.strptime(key, "%Y-%m")
|
|
return d.strftime("%b %Y")
|
|
except Exception:
|
|
return key
|
|
return key
|