- Add async Redis client singleton (redis_client.py) for caching and arq pool
- Add arq job functions (jobs.py) for background LLM processing
- Cache ask/explain LLM responses with TTL (1h ask, 24h explain)
- Add async mode to /api/ask: enqueue job, return job_id, poll /api/jobs/{id}
- Add GET /api/jobs/{job_id} endpoint for job status polling
- Add arq worker service to docker-compose (dev + prod)
- Switch from Redis to Valkey (BSD fork) in Docker Compose
- Add REDIS_URL config setting
- Add tests for cache hit, async mode, and job status
106 lines
2.4 KiB
Python
106 lines
2.4 KiB
Python
from pydantic import BaseModel, ConfigDict
|
|
|
|
|
|
class EventItem(BaseModel):
|
|
id: str | None = None
|
|
timestamp: str | None = None
|
|
service: str | None = None
|
|
operation: str | None = None
|
|
result: str | None = None
|
|
actor_display: str | None = None
|
|
target_displays: list[str] | None = None
|
|
display_summary: str | None = None
|
|
display_category: str | None = None
|
|
dedupe_key: str | None = None
|
|
actor: dict | None = None
|
|
targets: list[dict] | None = None
|
|
raw: dict | None = None
|
|
raw_text: str | None = None
|
|
tags: list[str] | None = None
|
|
comments: list[dict] | None = None
|
|
|
|
model_config = ConfigDict(extra="allow")
|
|
|
|
|
|
class PaginatedEventResponse(BaseModel):
|
|
items: list[dict]
|
|
total: int
|
|
page_size: int
|
|
next_cursor: str | None = None
|
|
|
|
|
|
class FilterOptionsResponse(BaseModel):
|
|
services: list[str]
|
|
operations: list[str]
|
|
results: list[str]
|
|
actors: list[str]
|
|
actor_upns: list[str]
|
|
devices: list[str]
|
|
|
|
|
|
class FetchAuditLogsResponse(BaseModel):
|
|
stored_events: int
|
|
errors: list[str]
|
|
|
|
|
|
class SourceHealthResponse(BaseModel):
|
|
source: str
|
|
last_fetch_time: str | None = None
|
|
last_attempt_time: str | None = None
|
|
status: str
|
|
|
|
|
|
class TagsUpdateRequest(BaseModel):
|
|
tags: list[str]
|
|
|
|
|
|
class BulkTagsRequest(BaseModel):
|
|
tags: list[str]
|
|
mode: str = "append" # "append" or "replace"
|
|
|
|
|
|
class CommentAddRequest(BaseModel):
|
|
text: str
|
|
|
|
|
|
class AlertRuleResponse(BaseModel):
|
|
id: str | None = None
|
|
name: str
|
|
enabled: bool
|
|
severity: str
|
|
conditions: list[dict]
|
|
message: str
|
|
|
|
|
|
class AskRequest(BaseModel):
|
|
question: str
|
|
services: list[str] | None = None
|
|
actor: str | None = None
|
|
operation: str | None = None
|
|
result: str | None = None
|
|
start: str | None = None
|
|
end: str | None = None
|
|
include_tags: list[str] | None = None
|
|
exclude_tags: list[str] | None = None
|
|
async_mode: bool = False # enqueue async job instead of waiting
|
|
|
|
|
|
class AskEventRef(BaseModel):
|
|
id: str | None = None
|
|
timestamp: str | None = None
|
|
operation: str | None = None
|
|
actor_display: str | None = None
|
|
target_displays: list[str] | None = None
|
|
display_summary: str | None = None
|
|
service: str | None = None
|
|
result: str | None = None
|
|
|
|
|
|
class AskResponse(BaseModel):
|
|
answer: str
|
|
events: list[AskEventRef]
|
|
query_info: dict
|
|
llm_used: bool
|
|
llm_error: str | None = None
|
|
job_id: str | None = None
|