fixed cursor feature

This commit is contained in:
patrick-motsch 2026-02-23 23:01:28 +01:00
parent f6f42d8db7
commit 2dff6cd0af
4 changed files with 38 additions and 9 deletions

View file

@ -6,17 +6,39 @@ No complex rules needed - just filter by properties and sort by priority!
"""
import logging
from typing import List, Dict, Any, Optional
import time
from typing import List, Dict, Any, Optional, Tuple
from modules.datamodels.datamodelAi import AiModel, AiCallOptions, OperationTypeEnum, PriorityEnum, ProcessingModeEnum
# Configure logger
logger = logging.getLogger(__name__)
_COOLDOWN_DURATION = 60.0
class ModelSelector:
"""Simple model selector based on properties and priority-based sorting."""
"""Model selector with priority scoring and recent-failure cooldown."""
def __init__(self):
logger.info("ModelSelector initialized with simplified approach")
self._failureLog: Dict[str, float] = {}
logger.info("ModelSelector initialized with failure cooldown support")
def reportFailure(self, modelName: str):
"""Record that a model just failed (rate limit, error, etc.).
The model will be deprioritized for COOLDOWN_DURATION seconds."""
self._failureLog[modelName] = time.time()
logger.info(f"ModelSelector: Recorded failure for {modelName}, cooldown {_COOLDOWN_DURATION}s")
def _getCooldownPenalty(self, modelName: str) -> float:
"""Return a score penalty (0.0 = no penalty, large negative = recently failed)."""
failedAt = self._failureLog.get(modelName)
if failedAt is None:
return 0.0
elapsed = time.time() - failedAt
if elapsed > _COOLDOWN_DURATION:
del self._failureLog[modelName]
return 0.0
remaining = _COOLDOWN_DURATION - elapsed
return -(remaining / _COOLDOWN_DURATION) * 5000.0
def selectModel(self,
prompt: str,
@ -129,10 +151,14 @@ class ModelSelector:
maxAllowed = model.contextLength * 0.8 / 4 if model.contextLength > 0 else "unlimited"
logger.warning(f" - {model.name}: contextLength={model.contextLength} tokens, maxAllowed={maxAllowed} tokens")
# Step 3: Calculate scores for each model
# Step 3: Calculate scores for each model (including cooldown penalties)
scoredModels = []
for model in promptFiltered:
score = self._calculateModelScore(model, promptSize, contextSize, totalSize, options)
penalty = self._getCooldownPenalty(model.name)
if penalty < 0:
logger.debug(f"Model {model.name}: base_score={score:.3f}, cooldown_penalty={penalty:.0f}")
score += penalty
scoredModels.append((model, score))
logger.debug(f"Model {model.name}: score={score:.3f}")

View file

@ -29,6 +29,7 @@ class FileContext(BaseModel):
content: Optional[str] = None
mimeType: str
sizeBytes: int = 0
modifiedAt: Optional[float] = None
tags: List[str] = Field(default_factory=list)

View file

@ -62,12 +62,14 @@ def listTextFiles(dbManagement) -> List[FileContext]:
for fileItem in allFiles:
if isTextFile(fileItem.mimeType, fileItem.fileName):
modifiedAt = getattr(fileItem, "_modifiedAt", None) or getattr(fileItem, "creationDate", None)
textFiles.append(FileContext(
fileId=fileItem.id,
fileName=fileItem.fileName,
content=None,
mimeType=fileItem.mimeType,
sizeBytes=fileItem.fileSize
sizeBytes=fileItem.fileSize,
modifiedAt=modifiedAt
))
return textFiles

View file

@ -135,11 +135,11 @@ class AiObjects:
except Exception as e:
lastError = e
logger.warning(f"❌ AI call failed with model {model.name}: {str(e)}")
logger.warning(f"AI call failed with model {model.name}: {str(e)}")
modelSelector.reportFailure(model.name)
# If this is not the last model, try the next one
if attempt < len(failoverModelList) - 1:
logger.info(f"🔄 Trying next failover model...")
logger.info(f"Trying next failover model...")
continue
else:
# All models failed