Compare commits
54 Commits
v1.0.0
...
67f3c28e82
| Author | SHA1 | Date | |
|---|---|---|---|
| 67f3c28e82 | |||
| 04c41ee740 | |||
| cbd46adaa6 | |||
| e4bafbc4b0 | |||
| f75f165911 | |||
| 47e0dfc2ca | |||
| 2fffe3aec2 | |||
| b2f4cabef4 | |||
| e069869a94 | |||
| fb2386e190 | |||
| 05f5f07e7b | |||
| 681f7d468a | |||
| fb5d45dfb3 | |||
| 658ddd0aac | |||
| a5db0d363d | |||
| 43582692ba | |||
| 5122739c01 | |||
| 6cf5c0a28b | |||
| 6aa47e9b1e | |||
| 60b6ad15c4 | |||
| b4e504a87b | |||
| b728abb5ee | |||
| d100388c7d | |||
| 11fd87411d | |||
| 6a80bf4eb9 | |||
| 5e02f5a402 | |||
| 0c3e5ec57b | |||
| a255be93fe | |||
| cfe9397cc5 | |||
| cf0283b20b | |||
| 28542f7b80 | |||
| 4303b8f02c | |||
| 9ec193ea13 | |||
| be319688f6 | |||
| 22d237fbfb | |||
| 0ef50c91f7 | |||
| b0eba09f0f | |||
| 91a4c6dccf | |||
| 196e1b7781 | |||
| 30dc75d0e5 | |||
| b45d9bb8a3 | |||
| 52f565b647 | |||
| 9774277bd0 | |||
| 4713b43afe | |||
| b86539399b | |||
| 86966bb57f | |||
| 3761aa6d74 | |||
| 6d00d7cf32 | |||
| de9ea45e1e | |||
| bade860fd4 | |||
| 9f4601c4d9 | |||
| 194858490d | |||
| 153ae05114 | |||
| 9dba33aa9f |
29
.env.example
29
.env.example
@@ -33,3 +33,32 @@ SIEM_WEBHOOK_URL=
|
||||
|
||||
# Optional: enable rule-based alerting during ingestion
|
||||
ALERTS_ENABLED=false
|
||||
|
||||
# Optional: enable AI/natural-language features (/api/ask, MCP server)
|
||||
# Set to false to completely disable AI endpoints and UI elements
|
||||
AI_FEATURES_ENABLED=true
|
||||
|
||||
# Optional: LLM configuration for natural language querying (/api/ask)
|
||||
# Supports any OpenAI-compatible API (OpenAI, Azure OpenAI, Ollama, etc.)
|
||||
# For Azure OpenAI / MS Foundry, set BASE_URL to your deployment endpoint
|
||||
# (e.g. https://your-resource.openai.azure.com/openai/deployments/your-deployment)
|
||||
# and set API_VERSION to something like 2025-01-01-preview
|
||||
LLM_API_KEY=
|
||||
LLM_BASE_URL=https://api.openai.com/v1
|
||||
LLM_MODEL=gpt-4o-mini
|
||||
LLM_MAX_EVENTS=200
|
||||
LLM_TIMEOUT_SECONDS=30
|
||||
LLM_API_VERSION=
|
||||
|
||||
# Valkey (caching + async job queue for LLM calls)
|
||||
# In Docker Compose, this is set automatically to redis://redis:6379/0
|
||||
# For local dev, start Valkey with: docker run -d -p 6379:6379 valkey/valkey:8-alpine
|
||||
REDIS_URL=redis://localhost:6379/0
|
||||
|
||||
# Optional: privacy / access control
|
||||
# Hide entire services from users without PRIVACY_SERVICE_ROLES
|
||||
# PRIVACY_SERVICES=Exchange,Teams
|
||||
# Hide specific operations across all services from users without PRIVACY_SERVICE_ROLES
|
||||
# PRIVACY_SENSITIVE_OPERATIONS=MailItemsAccessed,Search-Mailbox,Send,ChatMessageRead
|
||||
# Comma-separated list of Entra roles that can access privacy-sensitive data
|
||||
# PRIVACY_SERVICE_ROLES=SecurityAdministrator,ComplianceAdministrator
|
||||
|
||||
28
.gitea/workflows/release.yml
Normal file
28
.gitea/workflows/release.yml
Normal file
@@ -0,0 +1,28 @@
|
||||
name: Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v*"
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Log in to Gitea Container Registry
|
||||
run: echo "${{ secrets.REGISTRY_TOKEN }}" | docker login git.cqre.net -u ${{ github.actor }} --password-stdin 2>&1 | grep -v "WARNING! Your credentials are stored unencrypted"
|
||||
|
||||
- name: Build Docker image
|
||||
run: docker build ./backend --build-arg VERSION=${{ gitea.ref_name }} --tag git.cqre.net/cqrenet/aoc-backend:${{ gitea.ref_name }}
|
||||
|
||||
- name: Tag as latest
|
||||
run: docker tag git.cqre.net/cqrenet/aoc-backend:${{ gitea.ref_name }} git.cqre.net/cqrenet/aoc-backend:latest
|
||||
|
||||
- name: Push version tag
|
||||
run: docker push git.cqre.net/cqrenet/aoc-backend:${{ gitea.ref_name }}
|
||||
|
||||
- name: Push latest tag
|
||||
run: docker push git.cqre.net/cqrenet/aoc-backend:latest
|
||||
99
AGENTS.md
99
AGENTS.md
@@ -6,28 +6,34 @@ AOC is a FastAPI microservice that ingests Microsoft Entra (Azure AD) audit logs
|
||||
|
||||
## Technology Stack
|
||||
|
||||
- **Runtime**: Python 3.11
|
||||
- **Web Framework**: FastAPI + Uvicorn
|
||||
- **Runtime**: Python 3.11 (3.14 for tests)
|
||||
- **Web Framework**: FastAPI + Uvicorn (Gunicorn in production)
|
||||
- **Database**: MongoDB (PyMongo)
|
||||
- **Frontend**: Vanilla HTML/CSS/JS (served as static files from `backend/frontend/`)
|
||||
- **Frontend**: Alpine.js + HTML/CSS (served as static files from `backend/frontend/`)
|
||||
- **Authentication**: Optional OIDC Bearer token validation against Microsoft Entra (using `python-jose` and MSAL.js on the frontend)
|
||||
- **External APIs**: Microsoft Graph API, Office 365 Management Activity API
|
||||
- **Deployment**: Docker Compose
|
||||
- **External APIs**: Microsoft Graph API, Office 365 Management Activity API, Azure OpenAI / MS Foundry
|
||||
- **Deployment**: Docker Compose (dev), Docker Compose + nginx (prod)
|
||||
- **CI/CD**: Gitea Actions (lint + test + Docker build + release)
|
||||
|
||||
## Project Structure
|
||||
|
||||
```
|
||||
backend/
|
||||
main.py # FastAPI app, router registration, background periodic fetch
|
||||
config.py # Environment-based configuration (loads .env)
|
||||
config.py # Pydantic Settings configuration (loads .env)
|
||||
database.py # MongoClient setup (db = micro_soc, collection = events)
|
||||
auth.py # OIDC Bearer token validation, JWKS caching, role/group checks
|
||||
requirements.txt # Python dependencies
|
||||
Dockerfile # python:3.11-slim image
|
||||
Dockerfile # python:3.11-slim image, non-root user, version baked at build
|
||||
mcp_server.py # Standalone MCP server for Claude Desktop / Cursor integration
|
||||
routes/
|
||||
fetch.py # GET /api/fetch-audit-logs, run_fetch()
|
||||
events.py # GET /api/events, GET /api/filter-options
|
||||
config.py # GET /api/config/auth
|
||||
events.py # GET /api/events, GET /api/filter-options, PATCH tags, POST comments
|
||||
config.py # GET /api/config/auth, GET /api/config/features
|
||||
ask.py # POST /api/ask — natural language query with LLM
|
||||
health.py # GET /health, GET /metrics
|
||||
rules.py # Rule-based alerting endpoints
|
||||
webhooks.py # Microsoft Graph change notification webhooks
|
||||
graph/
|
||||
auth.py # Client credentials token acquisition for Graph
|
||||
audit_logs.py # Fetch and enrich directory audit logs from Graph
|
||||
@@ -41,7 +47,7 @@ backend/
|
||||
mappings.yml # User-editable category labels and summary templates
|
||||
maintenance.py # CLI for re-normalization and deduplication of stored events
|
||||
frontend/
|
||||
index.html # Single-page UI with filters, pagination, raw-event modal
|
||||
index.html # Single-page UI with filters, pagination, ask panel, raw-event modal
|
||||
style.css # Dark-themed stylesheet
|
||||
```
|
||||
|
||||
@@ -60,6 +66,9 @@ Key variables:
|
||||
- `AUTH_ALLOWED_ROLES`, `AUTH_ALLOWED_GROUPS` — comma-separated access control lists
|
||||
- `ENABLE_PERIODIC_FETCH`, `FETCH_INTERVAL_MINUTES` — background ingestion scheduler
|
||||
- `MONGO_ROOT_USERNAME`, `MONGO_ROOT_PASSWORD`, `MONGO_PORT` — used by Docker Compose for MongoDB
|
||||
- `AI_FEATURES_ENABLED` — set `false` to completely disable AI endpoints and UI (default `true`)
|
||||
- `LLM_API_KEY`, `LLM_BASE_URL`, `LLM_MODEL`, `LLM_MAX_EVENTS`, `LLM_TIMEOUT_SECONDS` — LLM provider settings
|
||||
- `LLM_API_VERSION` — required for Azure OpenAI / MS Foundry endpoints
|
||||
|
||||
## Build and Run Commands
|
||||
|
||||
@@ -87,35 +96,81 @@ uvicorn main:app --reload --host 0.0.0.0 --port 8000
|
||||
## API Endpoints
|
||||
|
||||
- `GET /api/fetch-audit-logs?hours=168` — pulls last N hours (capped at 720 / 30 days) from all sources, normalizes, dedupes, and upserts into MongoDB
|
||||
- `GET /api/events` — list stored events with filters (`service`, `actor`, `operation`, `result`, `start`, `end`, `search`) and pagination (`page`, `page_size`)
|
||||
- `GET /api/events` — list stored events with filters (`service`, `actor`, `operation`, `result`, `start`, `end`, `search`) and cursor-based pagination
|
||||
- `GET /api/filter-options` — best-effort distinct values for UI dropdowns
|
||||
- `GET /api/config/auth` — auth configuration exposed to the frontend
|
||||
- `GET /api/config/features` — feature flags (`ai_features_enabled`)
|
||||
- `POST /api/ask` — natural language query; returns LLM narrative + referenced events (only when `AI_FEATURES_ENABLED=true`)
|
||||
- `GET /health` — liveness probe with DB connectivity
|
||||
- `GET /metrics` — Prometheus metrics
|
||||
|
||||
## MCP Server
|
||||
|
||||
A standalone MCP server (`backend/mcp_server.py`) exposes audit log tools for Claude Desktop, Cursor, and other MCP clients.
|
||||
|
||||
Available tools:
|
||||
- `search_events` — Search by entity, service, operation, result, time range
|
||||
- `get_event` — Retrieve a single event by ID (raw JSON)
|
||||
- `get_summary` — Aggregated counts by service, operation, result, actor
|
||||
- `ask` — Natural language question (returns recent events + guidance)
|
||||
|
||||
**Claude Desktop config** (`~/.config/claude/claude_desktop_config.json`):
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"aoc": {
|
||||
"command": "python",
|
||||
"args": ["/path/to/aoc/backend/mcp_server.py"],
|
||||
"env": {"MONGO_URI": "mongodb://root:example@localhost:27017/"}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The MCP server imports `database.py` directly and does not go through the FastAPI layer, so it shares the same MongoDB connection but bypasses auth.
|
||||
|
||||
## AI Feature Flag
|
||||
|
||||
Set `AI_FEATURES_ENABLED=false` in `.env` to:
|
||||
- Prevent the `ask` router from being registered in FastAPI
|
||||
- Hide the "Ask a question" panel in the frontend
|
||||
- Return `ai_features_enabled: false` from `/api/config/features`
|
||||
|
||||
This is intended for the open-core monetization split: core features (ingestion, filtering, search, export) are always available; premium AI features (NLQ, MCP) can be disabled.
|
||||
|
||||
## Code Conventions
|
||||
|
||||
- Python modules use absolute imports within the `backend/` package (e.g., `from graph.auth import get_access_token`). When running locally, ensure the working directory is `backend/` so these resolve correctly.
|
||||
- No formal formatter or linter is configured. Keep changes consistent with the existing style: simple functions, explicit exception handling, and informative docstrings.
|
||||
- The frontend is a single HTML file with inline JavaScript. It relies on the MSAL.js CDN (`https://alcdn.msauth.net/browser/2.37.0/js/msal-browser.min.js`).
|
||||
- The project uses `ruff` for linting and formatting. Run `ruff check . && ruff format .` before committing.
|
||||
- Keep changes consistent with the existing style: simple functions, explicit exception handling, and informative docstrings.
|
||||
- The frontend is a single HTML file with inline JavaScript and Alpine.js.
|
||||
|
||||
## Testing
|
||||
|
||||
There are currently **no automated tests** in this repository. When adding new features or bug fixes, verify behavior manually:
|
||||
Tests run with pytest and mongomock (no real MongoDB required):
|
||||
|
||||
1. Start the server (Docker Compose or local uvicorn).
|
||||
2. Run a smoke test:
|
||||
```bash
|
||||
curl http://localhost:8000/api/events
|
||||
curl http://localhost:8000/api/fetch-audit-logs
|
||||
```
|
||||
3. Open http://localhost:8000 in a browser, apply filters, paginate, and click "View raw event".
|
||||
```bash
|
||||
cd backend
|
||||
python -m venv .venv_test
|
||||
source .venv_test/bin/activate
|
||||
pip install -r requirements.txt
|
||||
pytest tests/ -q
|
||||
```
|
||||
|
||||
When adding new features or bug fixes, add or update tests in `backend/tests/`. The test suite covers:
|
||||
- Event normalization and deduplication
|
||||
- Auth middleware and token validation
|
||||
- API endpoints (`/api/events`, `/api/fetch-audit-logs`, `/api/ask`)
|
||||
- NLQ time range extraction, entity extraction, query building
|
||||
|
||||
## Security Considerations
|
||||
|
||||
- **Secrets**: `CLIENT_SECRET` and other credentials come from `.env`. Never commit `.env`.
|
||||
- **Secrets**: `CLIENT_SECRET`, `LLM_API_KEY`, and other credentials come from `.env`. Never commit `.env`.
|
||||
- **Auth validation**: When `AUTH_ENABLED=true`, the backend fetches JWKS from `https://login.microsoftonline.com/{AUTH_TENANT_ID}/v2.0/.well-known/openid-configuration`, caches keys for 1 hour, and validates tenant/issuer claims. Tokens are decoded without strict signature verification (`jwt.get_unverified_claims`), so the tenant and issuer checks are the primary gate.
|
||||
- **Role/Group gating**: Access is allowed if the token’s `roles` intersect `AUTH_ALLOWED_ROLES` or `groups` intersect `AUTH_ALLOWED_GROUPS`. If neither list is configured, all authenticated users are allowed.
|
||||
- **Pagination limits**: `page_size` is clamped to a maximum of 500 to prevent large queries.
|
||||
- **Fetch window cap**: `hours` is clamped to 720 (30 days) to avoid runaway API calls.
|
||||
- **MCP server**: The MCP server bypasses auth entirely. Only run it in trusted environments or behind a VPN.
|
||||
|
||||
## Maintenance and Operations
|
||||
|
||||
|
||||
103
DEPLOY.md
Normal file
103
DEPLOY.md
Normal file
@@ -0,0 +1,103 @@
|
||||
# Production Deployment Guide
|
||||
|
||||
## Overview
|
||||
|
||||
AOC runs as a set of Docker containers orchestrated by Docker Compose:
|
||||
|
||||
- **nginx** — reverse proxy, TLS termination, static file serving
|
||||
- **backend** — FastAPI application (Gunicorn + Uvicorn workers)
|
||||
- **mongo** — MongoDB data store (not exposed externally)
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Docker Engine 24+ and Docker Compose plugin
|
||||
- A server with ports 80/443 reachable from your users
|
||||
- TLS certificates (place in `nginx/ssl/` or use Let's Encrypt)
|
||||
- A valid `.env` file at the repo root (see `.env.example`)
|
||||
|
||||
## Quick start
|
||||
|
||||
1. **Clone / pull the latest release**
|
||||
|
||||
```bash
|
||||
git checkout v1.1.0
|
||||
```
|
||||
|
||||
2. **Copy and edit environment variables**
|
||||
|
||||
```bash
|
||||
cp .env.example .env
|
||||
# Edit .env and fill in real credentials
|
||||
```
|
||||
|
||||
3. **Set the release version**
|
||||
|
||||
```bash
|
||||
export AOC_VERSION=v1.1.0
|
||||
```
|
||||
|
||||
4. **Deploy**
|
||||
|
||||
```bash
|
||||
docker compose -f docker-compose.prod.yml pull
|
||||
docker compose -f docker-compose.prod.yml up -d
|
||||
```
|
||||
|
||||
5. **Verify**
|
||||
|
||||
```bash
|
||||
curl http://localhost/health
|
||||
curl http://localhost/api/events
|
||||
```
|
||||
|
||||
## Updating to a new release
|
||||
|
||||
```bash
|
||||
export AOC_VERSION=v1.2.0
|
||||
docker compose -f docker-compose.prod.yml pull
|
||||
docker compose -f docker-compose.prod.yml up -d
|
||||
```
|
||||
|
||||
## Enabling HTTPS
|
||||
|
||||
### Option A: Use your own certificates
|
||||
|
||||
1. Place `cert.pem` and `key.pem` in `nginx/ssl/`
|
||||
2. Uncomment the HTTPS server block in `nginx/nginx.conf`
|
||||
3. Uncomment the HTTP → HTTPS redirect server block
|
||||
4. Reload nginx:
|
||||
|
||||
```bash
|
||||
docker compose -f docker-compose.prod.yml exec nginx nginx -s reload
|
||||
```
|
||||
|
||||
### Option B: Let's Encrypt with Certbot
|
||||
|
||||
Replace the `nginx` service in `docker-compose.prod.yml` with a Certbot-friendly setup (e.g., use the `nginx-proxy` + `acme-companion` stack) or mount the Certbot certificates into `nginx/ssl/`.
|
||||
|
||||
## Security hardening
|
||||
|
||||
- MongoDB is **not exposed** to the host — only the backend container can reach it.
|
||||
- The backend runs as a non-root (`aoc`) user inside the container.
|
||||
- nginx adds security headers (`X-Frame-Options`, `X-Content-Type-Options`, etc.).
|
||||
- Keep `.env` out of version control — it is listed in `.gitignore`.
|
||||
|
||||
## Rollback
|
||||
|
||||
```bash
|
||||
export AOC_VERSION=v1.0.3
|
||||
docker compose -f docker-compose.prod.yml pull
|
||||
docker compose -f docker-compose.prod.yml up -d
|
||||
```
|
||||
|
||||
## Monitoring
|
||||
|
||||
- Prometheus metrics: `http://your-host/metrics`
|
||||
- Health check: `http://your-host/health`
|
||||
- Container logs:
|
||||
|
||||
```bash
|
||||
docker compose -f docker-compose.prod.yml logs -f backend
|
||||
docker compose -f docker-compose.prod.yml logs -f nginx
|
||||
docker compose -f docker-compose.prod.yml logs -f mongo
|
||||
```
|
||||
35
README.md
35
README.md
@@ -9,6 +9,8 @@ FastAPI microservice that ingests Microsoft Entra (Azure AD) and other admin aud
|
||||
- Office 365 Management Activity API client for Exchange/SharePoint/Teams admin audit logs.
|
||||
- Frontend served from the backend for filtering/searching events and viewing raw entries.
|
||||
- Optional OIDC bearer auth (Entra) to protect the API/UI and gate access by roles/groups.
|
||||
- Natural language query (`/api/ask`) powered by LLM (OpenAI, Azure OpenAI, or any compatible API).
|
||||
- MCP server for Claude Desktop / Cursor integration.
|
||||
|
||||
## Prerequisites (macOS)
|
||||
- Python 3.11
|
||||
@@ -38,6 +40,15 @@ cp .env.example .env
|
||||
|
||||
# Optional: CORS origins if the frontend is served separately
|
||||
# CORS_ORIGINS=http://localhost:3000,https://app.example.com
|
||||
|
||||
# Optional: enable AI/natural-language features (/api/ask, MCP server)
|
||||
# AI_FEATURES_ENABLED=true
|
||||
|
||||
# Optional: LLM configuration for natural language querying
|
||||
# LLM_API_KEY=...
|
||||
# LLM_BASE_URL=https://api.openai.com/v1
|
||||
# LLM_MODEL=gpt-4o-mini
|
||||
# LLM_TIMEOUT_SECONDS=30
|
||||
```
|
||||
|
||||
## Run with Docker Compose (recommended)
|
||||
@@ -66,6 +77,7 @@ uvicorn main:app --reload --host 0.0.0.0 --port 8000
|
||||
## API
|
||||
- `GET /health` — health check with MongoDB connectivity status.
|
||||
- `GET /metrics` — Prometheus metrics for request latency, fetch volume, and errors.
|
||||
- `GET /api/version` — running version (baked into the Docker image at build time).
|
||||
- `GET /api/fetch-audit-logs` — pulls the last 7 days by default (override with `?hours=N`, capped to 30 days) of:
|
||||
- Entra directory audit logs (`/auditLogs/directoryAudits`)
|
||||
- Exchange/SharePoint/Teams admin audits (via Office 365 Management Activity API)
|
||||
@@ -82,11 +94,34 @@ uvicorn main:app --reload --host 0.0.0.0 --port 8000
|
||||
- `GET /api/source-health` — last fetch status for each ingestion source (`directory`, `unified`, `intune`).
|
||||
- `PATCH /api/events/{id}/tags` — update tags on an event (e.g., `investigating`, `false_positive`).
|
||||
- `POST /api/events/{id}/comments` — add a comment to an event.
|
||||
- `POST /api/events/{id}/explain` — AI explanation of a single audit event with security context (requires `LLM_API_KEY`).
|
||||
- `POST /api/ask` — natural language query. Returns a narrative answer + referenced events. Supports time ranges, entity names, and respects active UI filters. Only available when `AI_FEATURES_ENABLED=true`.
|
||||
- `GET /api/config/features` — feature flags (`ai_features_enabled`).
|
||||
- `GET /api/rules` — list alert rules.
|
||||
- `POST /api/rules` — create an alert rule.
|
||||
- `PUT /api/rules/{id}` — update an alert rule.
|
||||
- `DELETE /api/rules/{id}` — delete an alert rule.
|
||||
|
||||
### MCP Server
|
||||
AOC exposes an MCP interface in two forms:
|
||||
|
||||
**1. HTTP/SSE (production)** — mounted at `/mcp` inside the FastAPI app, behind OIDC auth:
|
||||
- `GET /mcp/sse` — establish SSE stream (requires Bearer token if `AUTH_ENABLED=true`)
|
||||
- `POST /mcp/messages/?session_id=...` — send tool calls
|
||||
|
||||
This is the recommended way to use MCP against a remote deployment like `aoc.cqre.net`. Any MCP client that supports SSE transport (e.g. Cursor, Claude Desktop with an SSE bridge, or custom scripts) can connect using the same Entra token as the web UI.
|
||||
|
||||
**2. stdio (local development)** — `python backend/mcp_server.py`:
|
||||
- Runs as a local subprocess for Claude Desktop
|
||||
- Connects directly to MongoDB (bypasses FastAPI auth)
|
||||
- Useful for local development when you have the repo cloned and MongoDB running locally
|
||||
|
||||
Available tools (both transports):
|
||||
- `search_events` — filter by entity, service, operation, result, time range.
|
||||
- `get_event` — retrieve raw event JSON by ID.
|
||||
- `get_summary` — aggregated summary (service, operation, result, actor counts) for the last N days.
|
||||
- `ask` — natural language query returning recent events.
|
||||
|
||||
Stored document shape (collection `micro_soc.events`):
|
||||
```json
|
||||
{
|
||||
|
||||
56
RELEASE_NOTES_v1.1.0.md
Normal file
56
RELEASE_NOTES_v1.1.0.md
Normal file
@@ -0,0 +1,56 @@
|
||||
# AOC v1.1.0 Release Notes
|
||||
|
||||
**Release date:** 2026-04-20
|
||||
|
||||
## What's new
|
||||
|
||||
### Natural language query (`/api/ask`)
|
||||
Ask questions in plain English and get AI-generated answers backed by your audit logs.
|
||||
|
||||
- **Regex-based parsing** extracts time ranges (`last 3 days`, `yesterday`, `today`) and entities (`device ABC123`, `user bob@example.com`) without calling an LLM — fast and deterministic.
|
||||
- **AI narrative summarisation** via any OpenAI-compatible API (OpenAI, Azure OpenAI, MS Foundry, Ollama). The LLM reads the matching events and writes a concise story for non-expert admins.
|
||||
- **Graceful fallback** when no LLM is configured — returns a structured bullet list instead of a narrative.
|
||||
- **Cited evidence** — every answer includes the raw events that back it up, so admins can verify claims.
|
||||
|
||||
### Azure OpenAI / MS Foundry support
|
||||
- Automatic `api-key` header detection for Azure endpoints.
|
||||
- `LLM_API_VERSION` config for Azure `api-version` query parameters.
|
||||
- `max_completion_tokens` support for newer model deployments.
|
||||
|
||||
### Production hardening
|
||||
- **Dockerfile:** runs as non-root user, uses Gunicorn + Uvicorn workers.
|
||||
- **docker-compose.prod.yml:** MongoDB is internal-only (no host port exposure), health checks on all services, nginx reverse proxy with security headers.
|
||||
- **nginx config:** gzip, security headers (`X-Frame-Options`, `X-Content-Type-Options`), ready for TLS.
|
||||
|
||||
### Frontend
|
||||
- New **"Ask a question"** panel at the top of the page.
|
||||
- Markdown rendering for LLM answers (bold, italic, code).
|
||||
- Orange warning banner when LLM is not configured or fails.
|
||||
|
||||
### Tests
|
||||
- 29 new tests covering ask parsing, query building, and endpoint behaviour.
|
||||
- 62 tests total, all passing.
|
||||
|
||||
## Configuration
|
||||
|
||||
Add to your `.env`:
|
||||
|
||||
```bash
|
||||
# Required for AI narrative summarisation
|
||||
LLM_API_KEY=your-key
|
||||
LLM_BASE_URL=https://api.openai.com/v1
|
||||
LLM_MODEL=gpt-4o-mini
|
||||
LLM_MAX_EVENTS=50
|
||||
LLM_TIMEOUT_SECONDS=30
|
||||
LLM_API_VERSION= # set for Azure OpenAI, e.g. 2024-12-01-preview
|
||||
```
|
||||
|
||||
## Upgrade notes
|
||||
|
||||
No breaking changes. Existing `/api/events`, filters, pagination, tags, and comments work unchanged.
|
||||
|
||||
## Docker image
|
||||
|
||||
```
|
||||
git.cqre.net/cqrenet/aoc-backend:v1.1.0
|
||||
```
|
||||
78
RELEASE_NOTES_v1.2.5.md
Normal file
78
RELEASE_NOTES_v1.2.5.md
Normal file
@@ -0,0 +1,78 @@
|
||||
# AOC v1.2.5 Release Notes
|
||||
|
||||
**Release date:** 2026-04-20
|
||||
|
||||
---
|
||||
|
||||
## What's new
|
||||
|
||||
### Natural language query (`/api/ask`)
|
||||
Ask questions in plain English and get AI-generated answers backed by your audit logs.
|
||||
|
||||
- **Regex-based parsing** extracts time ranges (`last 3 days`, `yesterday`, `today`) and entities (`device ABC123`, `user bob@example.com`) without calling an LLM.
|
||||
- **AI narrative summarisation** via any OpenAI-compatible API (OpenAI, Azure OpenAI, MS Foundry, Ollama).
|
||||
- **Graceful fallback** when no LLM is configured — returns a structured bullet list with a clear error banner.
|
||||
- **Cited evidence** — every answer includes the raw events that back it up.
|
||||
|
||||
### Filter-aware queries
|
||||
The ask endpoint now respects the filter panel. When you set **Service = Exchange**, **Result = failure** and ask *"What happened to device X?"*, the LLM only sees failed Exchange events for that device.
|
||||
|
||||
### Scales to thousands of events
|
||||
For large result sets (>50 events), the LLM receives an **aggregated overview** instead of a raw dump:
|
||||
- Counts by service, action, result, and actor
|
||||
- Failure highlights
|
||||
- The 50 most recent raw events as samples
|
||||
|
||||
This keeps token usage low while preserving accuracy.
|
||||
|
||||
### Azure OpenAI / MS Foundry support
|
||||
- Automatic `api-key` header detection for Azure endpoints.
|
||||
- `LLM_API_VERSION` config for Azure `api-version` query parameters.
|
||||
- `max_completion_tokens` support for newer model deployments.
|
||||
|
||||
### Version display
|
||||
- `GET /api/version` endpoint reads the `VERSION` file.
|
||||
- Frontend shows a version badge in the header (e.g., **1.2.5**).
|
||||
|
||||
### Production hardening (from v1.1.0)
|
||||
- Dockerfile runs as non-root user with Gunicorn + Uvicorn workers.
|
||||
- `docker-compose.prod.yml` with internal-only MongoDB, health checks, and nginx reverse proxy.
|
||||
- Security headers (`X-Frame-Options`, `X-Content-Type-Options`, etc.).
|
||||
|
||||
---
|
||||
|
||||
## Configuration
|
||||
|
||||
Add to your `.env`:
|
||||
|
||||
```bash
|
||||
# Required for AI narrative summarisation
|
||||
LLM_API_KEY=your-key
|
||||
LLM_BASE_URL=https://api.openai.com/v1
|
||||
LLM_MODEL=gpt-4o-mini
|
||||
LLM_MAX_EVENTS=200
|
||||
LLM_TIMEOUT_SECONDS=30
|
||||
LLM_API_VERSION= # set for Azure OpenAI, e.g. 2024-12-01-preview
|
||||
```
|
||||
|
||||
For Azure OpenAI / MS Foundry:
|
||||
```bash
|
||||
LLM_BASE_URL=https://your-resource.openai.azure.com/openai/deployments/your-deployment
|
||||
LLM_API_KEY=your-azure-key
|
||||
LLM_API_VERSION=2024-12-01-preview
|
||||
LLM_MODEL=your-deployment-name
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Upgrade notes
|
||||
|
||||
No breaking changes. Existing `/api/events`, filters, pagination, tags, and comments work unchanged.
|
||||
|
||||
---
|
||||
|
||||
## Docker image
|
||||
|
||||
```
|
||||
git.cqre.net/cqrenet/aoc-backend:v1.2.5
|
||||
```
|
||||
13
ROADMAP.md
13
ROADMAP.md
@@ -59,5 +59,16 @@ Goal: evolve from a polling dashboard into a full security operations tool.
|
||||
|
||||
---
|
||||
|
||||
## Phase 5: Intelligence
|
||||
Goal: add AI-powered analysis and external tool integration.
|
||||
|
||||
- [x] AI feature flag (`AI_FEATURES_ENABLED`) to gate LLM-dependent features
|
||||
- [x] Natural language query endpoint (`/api/ask`) with intent extraction and smart sampling
|
||||
- [x] MCP (Model Context Protocol) server for Claude Desktop / Cursor integration
|
||||
- [x] Valkey caching for LLM responses and frequent queries
|
||||
- [x] Async queue (arq) for LLM requests to prevent timeout/cost explosions at scale
|
||||
- [ ] Advanced analytics dashboard (trending operations, anomaly detection)
|
||||
|
||||
## Completed in this PR
|
||||
All Phase 1 items were implemented in the latest changes.
|
||||
All Phase 5 items marked done were implemented in v1.3.0–v1.5.0.
|
||||
Redis caching + async queue implemented in v1.6.0, switched to Valkey.
|
||||
|
||||
@@ -1,6 +1,31 @@
|
||||
FROM python:3.11-slim
|
||||
|
||||
# Bake the version into the image at build time
|
||||
ARG VERSION=unknown
|
||||
ENV VERSION=${VERSION}
|
||||
|
||||
# Security: run as non-root
|
||||
RUN groupadd -r aoc && useradd -r -g aoc aoc
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Install dependencies first for layer caching
|
||||
COPY requirements.txt .
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Copy application code
|
||||
COPY . .
|
||||
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
|
||||
|
||||
# Create directories for potential volume mounts and fix permissions
|
||||
RUN mkdir -p /app/data && chown -R aoc:aoc /app
|
||||
|
||||
USER aoc
|
||||
|
||||
# Production: use gunicorn with uvicorn workers
|
||||
# Workers = 2-4 x $NUM_CORES; keep it conservative for containerised workloads
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
|
||||
EXPOSE 8000
|
||||
|
||||
CMD ["gunicorn", "main:app", "-k", "uvicorn.workers.UvicornWorker", "--bind", "0.0.0.0:8000", "--workers", "2", "--timeout", "120", "--access-logfile", "-", "--error-logfile", "-"]
|
||||
|
||||
@@ -8,6 +8,8 @@ from config import (
|
||||
AUTH_CLIENT_ID,
|
||||
AUTH_ENABLED,
|
||||
AUTH_TENANT_ID,
|
||||
PRIVACY_SERVICE_ROLES,
|
||||
PRIVACY_SERVICES,
|
||||
)
|
||||
from fastapi import Header, HTTPException
|
||||
from jwt import ExpiredSignatureError, InvalidTokenError, decode
|
||||
@@ -82,6 +84,14 @@ def _decode_token(token: str, jwks):
|
||||
raise HTTPException(status_code=401, detail=f"Invalid token ({type(exc).__name__})") from None
|
||||
|
||||
|
||||
def user_can_access_privacy_services(claims: dict) -> bool:
|
||||
"""Check if the user has roles that grant access to privacy-sensitive services."""
|
||||
if not PRIVACY_SERVICES or not PRIVACY_SERVICE_ROLES:
|
||||
return True
|
||||
user_roles = set(claims.get("roles", []) or claims.get("role", []) or [])
|
||||
return bool(user_roles.intersection(PRIVACY_SERVICE_ROLES))
|
||||
|
||||
|
||||
def require_auth(authorization: str | None = Header(None)):
|
||||
if not AUTH_ENABLED:
|
||||
return {"sub": "anonymous"}
|
||||
|
||||
@@ -42,6 +42,24 @@ class Settings(BaseSettings):
|
||||
# Alerting
|
||||
ALERTS_ENABLED: bool = False
|
||||
|
||||
# AI / Natural Language Query
|
||||
AI_FEATURES_ENABLED: bool = True
|
||||
LLM_API_KEY: str = ""
|
||||
LLM_BASE_URL: str = "https://api.openai.com/v1"
|
||||
LLM_MODEL: str = "gpt-4o-mini"
|
||||
LLM_MAX_EVENTS: int = 200
|
||||
LLM_TIMEOUT_SECONDS: int = 30
|
||||
LLM_API_VERSION: str = "" # e.g. 2025-01-01-preview for Azure OpenAI
|
||||
|
||||
# Privacy / access control
|
||||
# Entire services can be hidden, or specific operations can be gated.
|
||||
PRIVACY_SERVICES: str = "" # comma-separated, e.g. "Exchange,Teams"
|
||||
PRIVACY_SENSITIVE_OPERATIONS: str = "" # comma-separated, e.g. "MailItemsAccessed,Search-Mailbox,Send"
|
||||
PRIVACY_SERVICE_ROLES: str = "" # comma-separated, e.g. "SecurityAdministrator,ComplianceAdministrator"
|
||||
|
||||
# Redis (caching + async job queue)
|
||||
REDIS_URL: str = "redis://localhost:6379/0"
|
||||
|
||||
|
||||
_settings = Settings()
|
||||
|
||||
@@ -68,3 +86,17 @@ CORS_ORIGINS = [o.strip() for o in _settings.CORS_ORIGINS.split(",") if o.strip(
|
||||
SIEM_ENABLED = _settings.SIEM_ENABLED
|
||||
SIEM_WEBHOOK_URL = _settings.SIEM_WEBHOOK_URL
|
||||
ALERTS_ENABLED = _settings.ALERTS_ENABLED
|
||||
|
||||
AI_FEATURES_ENABLED = _settings.AI_FEATURES_ENABLED
|
||||
LLM_API_KEY = _settings.LLM_API_KEY
|
||||
LLM_BASE_URL = _settings.LLM_BASE_URL
|
||||
LLM_MODEL = _settings.LLM_MODEL
|
||||
LLM_MAX_EVENTS = _settings.LLM_MAX_EVENTS
|
||||
LLM_TIMEOUT_SECONDS = _settings.LLM_TIMEOUT_SECONDS
|
||||
LLM_API_VERSION = _settings.LLM_API_VERSION
|
||||
|
||||
PRIVACY_SERVICES = {s.strip() for s in _settings.PRIVACY_SERVICES.split(",") if s.strip()}
|
||||
PRIVACY_SENSITIVE_OPERATIONS = {o.strip() for o in _settings.PRIVACY_SENSITIVE_OPERATIONS.split(",") if o.strip()}
|
||||
PRIVACY_SERVICE_ROLES = {r.strip() for r in _settings.PRIVACY_SERVICE_ROLES.split(",") if r.strip()}
|
||||
|
||||
REDIS_URL = _settings.REDIS_URL
|
||||
|
||||
@@ -4,9 +4,10 @@ import structlog
|
||||
from config import DB_NAME, MONGO_URI, RETENTION_DAYS
|
||||
from pymongo import ASCENDING, DESCENDING, TEXT, MongoClient
|
||||
|
||||
client = MongoClient(MONGO_URI)
|
||||
client = MongoClient(MONGO_URI or "mongodb://localhost:27017")
|
||||
db = client[DB_NAME]
|
||||
events_collection = db["events"]
|
||||
saved_searches_collection = db["saved_searches"]
|
||||
logger = structlog.get_logger("aoc.database")
|
||||
|
||||
|
||||
@@ -20,6 +21,7 @@ def setup_indexes(max_retries: int = 5, delay: float = 2.0):
|
||||
events_collection.create_index([("timestamp", DESCENDING)])
|
||||
events_collection.create_index([("service", ASCENDING), ("timestamp", DESCENDING)])
|
||||
events_collection.create_index("id")
|
||||
saved_searches_collection.create_index([("created_by", ASCENDING), ("created_at", DESCENDING)])
|
||||
events_collection.create_index(
|
||||
[("actor_display", TEXT), ("raw_text", TEXT), ("operation", TEXT)],
|
||||
name="text_search_index",
|
||||
|
||||
@@ -3,23 +3,49 @@
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>AOC Events</title>
|
||||
<link rel="stylesheet" href="/style.css?v=7" />
|
||||
<title>Admin Operations Center</title>
|
||||
<link rel="stylesheet" href="/style.css?v=9" />
|
||||
<script defer src="https://cdn.jsdelivr.net/npm/alpinejs@3.x.x/dist/cdn.min.js"></script>
|
||||
<script src="https://alcdn.msauth.net/browser/2.37.0/js/msal-browser.min.js" crossorigin="anonymous"></script>
|
||||
</head>
|
||||
<body>
|
||||
<div class="page" x-data="aocApp()" x-init="initApp()">
|
||||
<nav class="topbar">
|
||||
<div class="topbar__brand">
|
||||
<span class="topbar__logo">🔍</span>
|
||||
<span class="topbar__name">AOC</span>
|
||||
<span class="version-badge" x-text="appVersion"></span>
|
||||
</div>
|
||||
<div class="topbar__links">
|
||||
<a :href="repoUrl" target="_blank" rel="noopener">Repository</a>
|
||||
<a :href="docsUrl" target="_blank" rel="noopener">Docs</a>
|
||||
</div>
|
||||
<div class="topbar__meta">
|
||||
<template x-if="account">
|
||||
<div class="user-chip">
|
||||
<div class="user-avatar" x-text="(account.name || account.username || '?').charAt(0).toUpperCase()"></div>
|
||||
<div class="user-details">
|
||||
<span class="user-name" x-text="account.name || account.username || ''"></span>
|
||||
<span class="user-email" x-text="account.username || ''"></span>
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
<template x-if="!account && authConfig?.auth_enabled">
|
||||
<span class="login-hint">Not signed in</span>
|
||||
</template>
|
||||
</div>
|
||||
<div class="topbar__actions">
|
||||
<button id="fetchBtn" class="ghost btn--compact" aria-label="Fetch latest audit logs" @click="fetchLogs()">Fetch</button>
|
||||
<button id="refreshBtn" class="ghost btn--compact" aria-label="Refresh events" @click="loadEvents(currentCursor)">Refresh</button>
|
||||
<button id="authBtn" class="ghost btn--compact" aria-label="Login" x-text="authBtnText" @click="toggleAuth()"></button>
|
||||
</div>
|
||||
</nav>
|
||||
|
||||
<header class="hero">
|
||||
<div>
|
||||
<p class="eyebrow">Admin Operations Center</p>
|
||||
<h1>Directory Audit Explorer</h1>
|
||||
<p class="lede">Filter Microsoft Entra audit events by user, app, time, action, and action type.</p>
|
||||
</div>
|
||||
<div class="cta">
|
||||
<button id="authBtn" class="ghost" aria-label="Login" x-text="authBtnText" @click="toggleAuth()"></button>
|
||||
<button id="fetchBtn" aria-label="Fetch latest audit logs" @click="fetchLogs()">Fetch new</button>
|
||||
<button id="refreshBtn" aria-label="Refresh events" @click="loadEvents(currentCursor)">Refresh</button>
|
||||
<h1>Audit Log Explorer</h1>
|
||||
<p class="lede">Search and review Microsoft audit events from Entra, Intune, Exchange, SharePoint, and Teams.</p>
|
||||
</div>
|
||||
</header>
|
||||
|
||||
@@ -76,12 +102,20 @@
|
||||
To
|
||||
<input name="end" type="datetime-local" x-model="filters.end" />
|
||||
</label>
|
||||
<label>
|
||||
Include tags
|
||||
<input name="includeTags" type="text" placeholder="backup, critical" x-model="filters.includeTags" />
|
||||
</label>
|
||||
<label>
|
||||
Exclude tags
|
||||
<input name="excludeTags" type="text" placeholder="noise, auto" x-model="filters.excludeTags" />
|
||||
</label>
|
||||
</div>
|
||||
<div class="filter-row">
|
||||
<label class="span-2">
|
||||
Search (raw/full-text)
|
||||
<input name="search" type="text" placeholder="Any text to search in raw/summary" x-model="filters.search" />
|
||||
</label>
|
||||
</div>
|
||||
<div class="filter-row filter-row--tall">
|
||||
<div class="filter-group span-2">
|
||||
<span>App / Service</span>
|
||||
<div class="multi-select">
|
||||
@@ -104,13 +138,69 @@
|
||||
<div class="actions">
|
||||
<button type="submit">Apply filters</button>
|
||||
<button type="button" id="clearBtn" class="ghost" @click="clearFilters()">Clear</button>
|
||||
<button type="button" class="ghost" @click="saveCurrentFilters()">Save filters</button>
|
||||
<button type="button" class="ghost" @click="bulkTagMatching()">Bulk tag matching</button>
|
||||
<button type="button" class="ghost" @click="exportJSON()">Export JSON</button>
|
||||
<button type="button" class="ghost" @click="exportCSV()">Export CSV</button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="filter-row" x-show="savedSearches.length">
|
||||
<div class="saved-searches">
|
||||
<span>Saved:</span>
|
||||
<template x-for="ss in savedSearches" :key="ss.id">
|
||||
<span class="pill pill--tag" style="cursor:pointer;" @click="applySavedSearch(ss)">
|
||||
<span x-text="ss.name"></span>
|
||||
<button type="button" class="link" style="margin-left:4px;" @click.stop="deleteSavedSearch(ss.id)">×</button>
|
||||
</span>
|
||||
</template>
|
||||
</div>
|
||||
</div>
|
||||
</form>
|
||||
</section>
|
||||
|
||||
<section class="panel" x-show="aiFeaturesEnabled">
|
||||
<h3>Ask a question</h3>
|
||||
<form class="ask-form" @submit.prevent="askQuestion()">
|
||||
<div class="ask-row">
|
||||
<input
|
||||
type="text"
|
||||
placeholder="What happened to device ABC123 in the last 3 days?"
|
||||
x-model="askQuestionText"
|
||||
class="ask-input"
|
||||
/>
|
||||
<button type="submit" :disabled="askLoading" x-text="askLoading ? 'Thinking…' : 'Ask'">Ask</button>
|
||||
</div>
|
||||
<div x-show="hasActiveFilters()" class="ask-filter-hint">
|
||||
<small>Respecting active filters: <span x-text="activeFilterSummary()"></span></small>
|
||||
</div>
|
||||
</form>
|
||||
<template x-if="askAnswer">
|
||||
<div class="ask-result">
|
||||
<div x-show="askLlmError" class="ask-error" x-text="askLlmError"></div>
|
||||
<div class="ask-answer" x-html="askAnswerHtml"></div>
|
||||
<template x-if="askEvents.length">
|
||||
<div class="ask-events">
|
||||
<h4>Referenced events</h4>
|
||||
<template x-for="(evt, idx) in askEvents" :key="evt.id || idx">
|
||||
<article class="event event--compact">
|
||||
<div class="event__meta">
|
||||
<span class="pill" x-text="evt.display_category || evt.service || '—'"></span>
|
||||
<span class="pill" :class="['success','succeeded','ok','passed','true'].includes((evt.result || '').toLowerCase()) ? 'pill--ok' : 'pill--warn'" x-text="evt.result || '—'"></span>
|
||||
</div>
|
||||
<h3 x-text="evt.operation || '—'"></h3>
|
||||
<p class="event__detail" x-show="evt.display_summary"><strong>Summary:</strong> <span x-text="evt.display_summary"></span></p>
|
||||
<p class="event__detail"><strong>Actor:</strong> <span x-text="evt.actor_display || '—'"></span></p>
|
||||
<p class="event__detail"><strong>Target:</strong> <span x-text="Array.isArray(evt.target_displays) ? evt.target_displays.join(', ') : '—'"></span></p>
|
||||
<p class="event__detail"><strong>When:</strong> <span x-text="evt.timestamp ? new Date(evt.timestamp).toLocaleString() : '—'"></span></p>
|
||||
</article>
|
||||
</template>
|
||||
</div>
|
||||
</template>
|
||||
<button type="button" class="ghost" @click="clearAsk()">Clear</button>
|
||||
</div>
|
||||
</template>
|
||||
</section>
|
||||
|
||||
<section class="panel">
|
||||
<div class="panel-header">
|
||||
<h2>Events</h2>
|
||||
@@ -122,7 +212,7 @@
|
||||
<article class="event">
|
||||
<div class="event__meta">
|
||||
<span class="pill" x-text="evt.display_category || evt.service || '—'"></span>
|
||||
<span class="pill" :class="['success','succeeded','ok','passed'].includes((evt.result || '').toLowerCase()) ? 'pill--ok' : 'pill--warn'" x-text="evt.result || '—'"></span>
|
||||
<span class="pill" :class="['success','succeeded','ok','passed','true'].includes((evt.result || '').toLowerCase()) ? 'pill--ok' : 'pill--warn'" x-text="evt.result || '—'"></span>
|
||||
</div>
|
||||
<h3 x-text="evt.operation || '—'"></h3>
|
||||
<p class="event__detail" x-show="evt.display_summary"><strong>Summary:</strong> <span x-text="evt.display_summary"></span></p>
|
||||
@@ -162,11 +252,34 @@
|
||||
<div class="modal__content">
|
||||
<div class="modal__header">
|
||||
<h3 id="modalTitle">Raw Event</h3>
|
||||
<button type="button" id="closeModal" class="ghost" @click="modalOpen = false">Close</button>
|
||||
<div class="modal__actions">
|
||||
<button type="button" class="ghost" @click="copyRawEvent()">Copy</button>
|
||||
<button type="button" class="ghost" x-show="aiFeaturesEnabled" :disabled="modalExplainLoading" @click="explainEvent()" x-text="modalExplainLoading ? 'Explaining…' : 'Explain'">Explain</button>
|
||||
<button type="button" id="closeModal" class="ghost" @click="modalOpen = false">Close</button>
|
||||
</div>
|
||||
</div>
|
||||
<div x-show="modalExplanation || modalExplainError" class="modal__explanation">
|
||||
<div x-show="modalExplainError" class="ask-error" x-text="modalExplainError"></div>
|
||||
<div x-show="modalExplanation" class="ask-answer" x-html="_mdToHtml(modalExplanation)"></div>
|
||||
</div>
|
||||
<pre id="modalBody" x-text="modalBody"></pre>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<footer class="footer">
|
||||
<div class="footer__left">
|
||||
<span class="footer__brand">Admin Operations Center</span>
|
||||
<span class="footer__version" x-text="'v' + appVersion"></span>
|
||||
</div>
|
||||
<div class="footer__center">
|
||||
<a :href="repoUrl + '/issues/new'" target="_blank" rel="noopener">🐛 Report an issue</a>
|
||||
<a :href="repoUrl" target="_blank" rel="noopener">💻 Source code</a>
|
||||
<a :href="docsUrl" target="_blank" rel="noopener">📖 Documentation</a>
|
||||
</div>
|
||||
<div class="footer__right">
|
||||
<span>Built with ❤️ by CQRE</span>
|
||||
</div>
|
||||
</footer>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
@@ -181,6 +294,10 @@
|
||||
currentCursor: null,
|
||||
modalOpen: false,
|
||||
modalBody: '',
|
||||
modalEventId: '',
|
||||
modalExplanation: '',
|
||||
modalExplainLoading: false,
|
||||
modalExplainError: '',
|
||||
authBtnText: 'Login',
|
||||
authConfig: null,
|
||||
msalInstance: null,
|
||||
@@ -188,19 +305,62 @@
|
||||
accessToken: null,
|
||||
authScopes: [],
|
||||
filters: {
|
||||
actor: '', selectedServices: [], search: '', operation: '', result: '', start: '', end: '', limit: 100,
|
||||
actor: '', selectedServices: [], search: '', operation: '', result: '', start: '', end: '', limit: 100, includeTags: '', excludeTags: '',
|
||||
},
|
||||
options: { actors: [], services: [], operations: [], results: [] },
|
||||
savedSearches: [],
|
||||
appVersion: '',
|
||||
repoUrl: 'https://git.cqre.net/cqrenet/aoc',
|
||||
docsUrl: 'https://git.cqre.net/cqrenet/aoc/src/branch/main/README.md',
|
||||
aiFeaturesEnabled: true,
|
||||
askQuestionText: '',
|
||||
askLoading: false,
|
||||
askAnswer: '',
|
||||
askAnswerHtml: '',
|
||||
askEvents: [],
|
||||
askLlmUsed: false,
|
||||
askLlmError: '',
|
||||
|
||||
async initApp() {
|
||||
await this.loadVersion();
|
||||
await this.initAuth();
|
||||
this.loadSavedFilters();
|
||||
if (!this.authConfig?.auth_enabled || this.accessToken) {
|
||||
await this.loadFilterOptions();
|
||||
await this.loadSavedSearches();
|
||||
await this.loadSourceHealth();
|
||||
await this.loadEvents();
|
||||
}
|
||||
},
|
||||
|
||||
loadSavedFilters() {
|
||||
try {
|
||||
const saved = localStorage.getItem('aoc_filters');
|
||||
if (!saved) return;
|
||||
const parsed = JSON.parse(saved);
|
||||
const fields = ['actor', 'selectedServices', 'search', 'operation', 'result', 'start', 'end', 'limit', 'includeTags', 'excludeTags'];
|
||||
fields.forEach((f) => {
|
||||
if (parsed[f] !== undefined) this.filters[f] = parsed[f];
|
||||
});
|
||||
} catch {}
|
||||
},
|
||||
|
||||
saveFilters() {
|
||||
try {
|
||||
localStorage.setItem('aoc_filters', JSON.stringify(this.filters));
|
||||
} catch {}
|
||||
},
|
||||
|
||||
async loadVersion() {
|
||||
try {
|
||||
const res = await fetch('/api/version');
|
||||
if (res.ok) {
|
||||
const body = await res.json();
|
||||
this.appVersion = body.version || '';
|
||||
}
|
||||
} catch {}
|
||||
},
|
||||
|
||||
authHeader() {
|
||||
return this.accessToken ? { Authorization: `Bearer ${this.accessToken}` } : {};
|
||||
},
|
||||
@@ -231,6 +391,18 @@
|
||||
this.authConfig = { auth_enabled: false };
|
||||
}
|
||||
|
||||
try {
|
||||
const featRes = await fetch('/api/config/features');
|
||||
if (featRes.ok) {
|
||||
const featBody = await featRes.json();
|
||||
this.aiFeaturesEnabled = featBody.ai_features_enabled !== false;
|
||||
} else {
|
||||
this.aiFeaturesEnabled = true;
|
||||
}
|
||||
} catch {
|
||||
this.aiFeaturesEnabled = true;
|
||||
}
|
||||
|
||||
if (!this.authConfig?.auth_enabled) {
|
||||
this.authBtnText = '';
|
||||
return;
|
||||
@@ -320,6 +492,12 @@
|
||||
if (this.filters.selectedServices && this.filters.selectedServices.length) {
|
||||
this.filters.selectedServices.forEach((s) => params.append('services', s));
|
||||
}
|
||||
if (this.filters.includeTags) {
|
||||
this.filters.includeTags.split(/[,;]+/).map((t) => t.trim()).filter(Boolean).forEach((t) => params.append('include_tags', t));
|
||||
}
|
||||
if (this.filters.excludeTags) {
|
||||
this.filters.excludeTags.split(/[,;]+/).map((t) => t.trim()).filter(Boolean).forEach((t) => params.append('exclude_tags', t));
|
||||
}
|
||||
if (this.filters.start) {
|
||||
const d = new Date(this.filters.start);
|
||||
if (!isNaN(d.getTime())) params.append('start', d.toISOString());
|
||||
@@ -347,6 +525,7 @@
|
||||
this.nextCursor = body.next_cursor || null;
|
||||
this.countText = body.total >= 0 ? `${body.total} event${body.total === 1 ? '' : 's'}` : '';
|
||||
this.statusText = this.events.length ? '' : 'No events found for these filters.';
|
||||
this.saveFilters();
|
||||
} catch (err) {
|
||||
this.statusText = err.message || 'Failed to load events.';
|
||||
}
|
||||
@@ -382,8 +561,19 @@
|
||||
this.options.services = (opts.services || []).slice(0, 200);
|
||||
this.options.operations = (opts.operations || []).slice(0, 200);
|
||||
this.options.results = (opts.results || []).slice(0, 200);
|
||||
if (!this.filters.selectedServices.length && this.options.services.length) {
|
||||
this.filters.selectedServices = [...this.options.services];
|
||||
|
||||
const saved = localStorage.getItem('aoc_filters');
|
||||
if (!saved && this.options.services.length) {
|
||||
// Default: exclude noisy high-volume services
|
||||
const noisy = ['Exchange', 'SharePoint', 'Teams'];
|
||||
this.filters.selectedServices = this.options.services.filter((s) => !noisy.includes(s));
|
||||
} else if (saved) {
|
||||
try {
|
||||
const parsed = JSON.parse(saved);
|
||||
if (parsed.selectedServices) {
|
||||
this.filters.selectedServices = parsed.selectedServices.filter((s) => this.options.services.includes(s));
|
||||
}
|
||||
} catch {}
|
||||
}
|
||||
} catch {}
|
||||
},
|
||||
@@ -396,6 +586,59 @@
|
||||
} catch {}
|
||||
},
|
||||
|
||||
async loadSavedSearches() {
|
||||
try {
|
||||
const res = await fetch('/api/saved-searches', { headers: this.authHeader() });
|
||||
if (!res.ok) return;
|
||||
this.savedSearches = await res.json();
|
||||
} catch {}
|
||||
},
|
||||
|
||||
async saveCurrentFilters() {
|
||||
const name = prompt('Name this saved filter:');
|
||||
if (!name || !name.trim()) return;
|
||||
try {
|
||||
const res = await fetch('/api/saved-searches', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', ...this.authHeader() },
|
||||
body: JSON.stringify({ name: name.trim(), filters: { ...this.filters } }),
|
||||
});
|
||||
if (!res.ok) throw new Error(await res.text());
|
||||
const created = await res.json();
|
||||
this.savedSearches.unshift(created);
|
||||
this.statusText = 'Filters saved.';
|
||||
setTimeout(() => { if (this.statusText === 'Filters saved.') this.statusText = ''; }, 2000);
|
||||
} catch (err) {
|
||||
this.statusText = err.message || 'Failed to save filters.';
|
||||
}
|
||||
},
|
||||
|
||||
applySavedSearch(ss) {
|
||||
if (!ss || !ss.filters) return;
|
||||
const fields = ['actor', 'selectedServices', 'search', 'operation', 'result', 'start', 'end', 'limit', 'includeTags', 'excludeTags'];
|
||||
fields.forEach((f) => {
|
||||
if (ss.filters[f] !== undefined) this.filters[f] = ss.filters[f];
|
||||
});
|
||||
// Validate selectedServices against current options
|
||||
this.filters.selectedServices = this.filters.selectedServices.filter((s) => this.options.services.includes(s));
|
||||
this.resetPagination();
|
||||
this.loadEvents();
|
||||
},
|
||||
|
||||
async deleteSavedSearch(id) {
|
||||
if (!confirm('Delete this saved search?')) return;
|
||||
try {
|
||||
const res = await fetch(`/api/saved-searches/${id}`, {
|
||||
method: 'DELETE',
|
||||
headers: this.authHeader(),
|
||||
});
|
||||
if (!res.ok) throw new Error(await res.text());
|
||||
this.savedSearches = this.savedSearches.filter((s) => s.id !== id);
|
||||
} catch (err) {
|
||||
this.statusText = err.message || 'Failed to delete saved search.';
|
||||
}
|
||||
},
|
||||
|
||||
resetPagination() {
|
||||
this.cursorStack = [];
|
||||
this.nextCursor = null;
|
||||
@@ -417,11 +660,142 @@
|
||||
},
|
||||
|
||||
clearFilters() {
|
||||
this.filters = { actor: '', selectedServices: [...this.options.services], search: '', operation: '', result: '', start: '', end: '', limit: 100 };
|
||||
const noisy = ['Exchange', 'SharePoint', 'Teams'];
|
||||
this.filters = { actor: '', selectedServices: this.options.services.filter((s) => !noisy.includes(s)), search: '', operation: '', result: '', start: '', end: '', limit: 100, includeTags: '', excludeTags: '' };
|
||||
this.saveFilters();
|
||||
this.resetPagination();
|
||||
this.loadEvents();
|
||||
},
|
||||
|
||||
async askQuestion() {
|
||||
const q = this.askQuestionText.trim();
|
||||
if (!q) return;
|
||||
this.askLoading = true;
|
||||
this.askAnswer = '';
|
||||
this.askAnswerHtml = '';
|
||||
this.askEvents = [];
|
||||
this.askLlmError = '';
|
||||
|
||||
const payload = { question: q };
|
||||
if (this.filters.selectedServices && this.filters.selectedServices.length) {
|
||||
payload.services = this.filters.selectedServices;
|
||||
}
|
||||
if (this.filters.actor) payload.actor = this.filters.actor;
|
||||
if (this.filters.operation) payload.operation = this.filters.operation;
|
||||
if (this.filters.result) payload.result = this.filters.result;
|
||||
if (this.filters.start) payload.start = new Date(this.filters.start).toISOString();
|
||||
if (this.filters.end) payload.end = new Date(this.filters.end).toISOString();
|
||||
if (this.filters.includeTags) {
|
||||
payload.include_tags = this.filters.includeTags.split(/[,;]+/).map(t => t.trim()).filter(Boolean);
|
||||
}
|
||||
if (this.filters.excludeTags) {
|
||||
payload.exclude_tags = this.filters.excludeTags.split(/[,;]+/).map(t => t.trim()).filter(Boolean);
|
||||
}
|
||||
|
||||
try {
|
||||
const res = await fetch('/api/ask', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', ...this.authHeader() },
|
||||
body: JSON.stringify(payload),
|
||||
});
|
||||
if (!res.ok) throw new Error(await res.text());
|
||||
const body = await res.json();
|
||||
this.askAnswer = body.answer;
|
||||
this.askAnswerHtml = this._mdToHtml(body.answer);
|
||||
this.askEvents = body.events || [];
|
||||
this.askLlmUsed = body.llm_used;
|
||||
this.askLlmError = body.llm_error || '';
|
||||
} catch (err) {
|
||||
this.askAnswer = 'Sorry, something went wrong: ' + (err.message || 'Unknown error');
|
||||
this.askAnswerHtml = this.askAnswer;
|
||||
} finally {
|
||||
this.askLoading = false;
|
||||
}
|
||||
},
|
||||
|
||||
clearAsk() {
|
||||
this.askQuestionText = '';
|
||||
this.askAnswer = '';
|
||||
this.askAnswerHtml = '';
|
||||
this.askEvents = [];
|
||||
this.askLlmUsed = false;
|
||||
this.askLlmError = '';
|
||||
},
|
||||
|
||||
_mdToHtml(text) {
|
||||
// Very lightweight markdown-to-HTML for LLM answers
|
||||
return text
|
||||
.replace(/&/g, '&').replace(/</g, '<').replace(/>/g, '>')
|
||||
.replace(/\*\*(.+?)\*\*/g, '<strong>$1</strong>')
|
||||
.replace(/\*(.+?)\*/g, '<em>$1</em>')
|
||||
.replace(/`([^`]+)`/g, '<code>$1</code>')
|
||||
.replace(/Event #(\d+)/g, '<strong>Event #$1</strong>')
|
||||
.replace(/\n/g, '<br>');
|
||||
},
|
||||
|
||||
hasActiveFilters() {
|
||||
return this.filters.actor || this.filters.operation || this.filters.result ||
|
||||
this.filters.start || this.filters.end || this.filters.includeTags ||
|
||||
this.filters.excludeTags ||
|
||||
(this.filters.selectedServices && this.filters.selectedServices.length &&
|
||||
this.filters.selectedServices.length < this.options.services.length);
|
||||
},
|
||||
|
||||
activeFilterSummary() {
|
||||
const parts = [];
|
||||
if (this.filters.actor) parts.push('actor');
|
||||
if (this.filters.operation) parts.push('action');
|
||||
if (this.filters.result) parts.push('result');
|
||||
if (this.filters.start || this.filters.end) parts.push('time');
|
||||
if (this.filters.includeTags || this.filters.excludeTags) parts.push('tags');
|
||||
const svcCount = this.filters.selectedServices?.length || 0;
|
||||
const allCount = this.options.services?.length || 0;
|
||||
if (svcCount && svcCount < allCount) parts.push(`${svcCount} service${svcCount === 1 ? '' : 's'}`);
|
||||
return parts.join(', ') || 'none';
|
||||
},
|
||||
|
||||
async bulkTagMatching() {
|
||||
const tag = prompt('Enter tag to apply to all matching events:');
|
||||
if (!tag || !tag.trim()) return;
|
||||
const mode = confirm('Click OK to REPLACE existing tags.\nClick Cancel to APPEND the new tag.') ? 'replace' : 'append';
|
||||
const params = new URLSearchParams();
|
||||
['actor', 'operation', 'result', 'search'].forEach((key) => {
|
||||
const val = this.filters[key];
|
||||
if (val) params.append(key, val);
|
||||
});
|
||||
if (this.filters.selectedServices && this.filters.selectedServices.length) {
|
||||
this.filters.selectedServices.forEach((s) => params.append('services', s));
|
||||
}
|
||||
if (this.filters.includeTags) {
|
||||
this.filters.includeTags.split(/[,;]+/).map((t) => t.trim()).filter(Boolean).forEach((t) => params.append('include_tags', t));
|
||||
}
|
||||
if (this.filters.excludeTags) {
|
||||
this.filters.excludeTags.split(/[,;]+/).map((t) => t.trim()).filter(Boolean).forEach((t) => params.append('exclude_tags', t));
|
||||
}
|
||||
if (this.filters.start) {
|
||||
const d = new Date(this.filters.start);
|
||||
if (!isNaN(d.getTime())) params.append('start', d.toISOString());
|
||||
}
|
||||
if (this.filters.end) {
|
||||
const d = new Date(this.filters.end);
|
||||
if (!isNaN(d.getTime())) params.append('end', d.toISOString());
|
||||
}
|
||||
this.statusText = 'Applying bulk tag…';
|
||||
try {
|
||||
const res = await fetch(`/api/events/bulk-tags?${params.toString()}`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', ...this.authHeader() },
|
||||
body: JSON.stringify({ tags: [tag.trim()], mode }),
|
||||
});
|
||||
if (!res.ok) throw new Error(await res.text());
|
||||
const body = await res.json();
|
||||
this.statusText = `Tagged ${body.matched} events (${body.modified} modified).`;
|
||||
await this.loadEvents();
|
||||
} catch (err) {
|
||||
this.statusText = err.message || 'Failed to apply bulk tag.';
|
||||
}
|
||||
},
|
||||
|
||||
displayActor(e) {
|
||||
const app = e.actor?.application || e.actor?.app;
|
||||
if (app?.displayName) return app.displayName;
|
||||
@@ -453,9 +827,44 @@
|
||||
} catch (err) {
|
||||
this.modalBody = `Error serializing event:\n${err.message}\n\nEvent ID: ${e.id || 'N/A'}`;
|
||||
}
|
||||
this.modalEventId = e.id || '';
|
||||
this.modalExplanation = '';
|
||||
this.modalExplainError = '';
|
||||
this.modalOpen = true;
|
||||
},
|
||||
|
||||
async copyRawEvent() {
|
||||
if (!this.modalBody) return;
|
||||
try {
|
||||
await navigator.clipboard.writeText(this.modalBody);
|
||||
this.statusText = 'Raw event copied to clipboard.';
|
||||
setTimeout(() => { if (this.statusText === 'Raw event copied to clipboard.') this.statusText = ''; }, 2000);
|
||||
} catch (err) {
|
||||
this.statusText = 'Failed to copy to clipboard.';
|
||||
}
|
||||
},
|
||||
|
||||
async explainEvent() {
|
||||
if (!this.modalEventId) return;
|
||||
this.modalExplainLoading = true;
|
||||
this.modalExplanation = '';
|
||||
this.modalExplainError = '';
|
||||
try {
|
||||
const res = await fetch(`/api/events/${this.modalEventId}/explain`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', ...this.authHeader() },
|
||||
});
|
||||
if (!res.ok) throw new Error(await res.text());
|
||||
const body = await res.json();
|
||||
this.modalExplanation = body.explanation;
|
||||
this.modalExplainError = body.llm_error || '';
|
||||
} catch (err) {
|
||||
this.modalExplainError = err.message || 'Failed to explain event.';
|
||||
} finally {
|
||||
this.modalExplainLoading = false;
|
||||
}
|
||||
},
|
||||
|
||||
async addTag(e, tag) {
|
||||
if (!tag.trim()) return;
|
||||
const tags = [...(e.tags || []), tag.trim()];
|
||||
|
||||
@@ -28,7 +28,115 @@ body {
|
||||
.page {
|
||||
max-width: 1100px;
|
||||
margin: 0 auto;
|
||||
padding: 32px 20px 60px;
|
||||
padding: 0 20px 40px;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
min-height: 100vh;
|
||||
}
|
||||
|
||||
.topbar {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 16px;
|
||||
padding: 12px 0;
|
||||
margin-bottom: 8px;
|
||||
border-bottom: 1px solid var(--border);
|
||||
flex-wrap: wrap;
|
||||
}
|
||||
|
||||
.topbar__brand {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 8px;
|
||||
font-weight: 700;
|
||||
font-size: 16px;
|
||||
}
|
||||
|
||||
.topbar__logo {
|
||||
font-size: 20px;
|
||||
}
|
||||
|
||||
.topbar__links {
|
||||
display: flex;
|
||||
gap: 16px;
|
||||
margin-right: auto;
|
||||
}
|
||||
|
||||
.topbar__links a {
|
||||
color: var(--muted);
|
||||
font-size: 13px;
|
||||
text-decoration: none;
|
||||
font-weight: 500;
|
||||
transition: color 0.15s ease;
|
||||
}
|
||||
|
||||
.topbar__links a:hover {
|
||||
color: var(--accent-strong);
|
||||
}
|
||||
|
||||
.topbar__meta {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 10px;
|
||||
}
|
||||
|
||||
.user-chip {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 8px;
|
||||
background: rgba(255, 255, 255, 0.04);
|
||||
border: 1px solid var(--border);
|
||||
border-radius: 999px;
|
||||
padding: 4px 12px 4px 4px;
|
||||
}
|
||||
|
||||
.user-avatar {
|
||||
width: 26px;
|
||||
height: 26px;
|
||||
border-radius: 50%;
|
||||
background: linear-gradient(135deg, var(--accent), var(--accent-strong));
|
||||
color: #0b1220;
|
||||
font-size: 12px;
|
||||
font-weight: 700;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
flex-shrink: 0;
|
||||
}
|
||||
|
||||
.user-details {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
line-height: 1.2;
|
||||
}
|
||||
|
||||
.user-name {
|
||||
font-size: 12px;
|
||||
font-weight: 600;
|
||||
color: var(--text);
|
||||
}
|
||||
|
||||
.user-email {
|
||||
font-size: 11px;
|
||||
color: var(--muted);
|
||||
}
|
||||
|
||||
.login-hint {
|
||||
font-size: 12px;
|
||||
color: var(--muted);
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
.topbar__actions {
|
||||
display: flex;
|
||||
gap: 8px;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.btn--compact {
|
||||
padding: 8px 14px;
|
||||
font-size: 13px;
|
||||
border-radius: 8px;
|
||||
}
|
||||
|
||||
.hero {
|
||||
@@ -37,6 +145,7 @@ body {
|
||||
justify-content: space-between;
|
||||
gap: 16px;
|
||||
margin-bottom: 20px;
|
||||
padding-top: 16px;
|
||||
}
|
||||
|
||||
.eyebrow {
|
||||
@@ -364,6 +473,30 @@ input {
|
||||
margin-bottom: 10px;
|
||||
}
|
||||
|
||||
.modal__actions {
|
||||
display: flex;
|
||||
gap: 8px;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.saved-searches {
|
||||
display: flex;
|
||||
flex-wrap: wrap;
|
||||
gap: 8px;
|
||||
align-items: center;
|
||||
font-size: 13px;
|
||||
}
|
||||
|
||||
.modal__explanation {
|
||||
background: rgba(255, 255, 255, 0.03);
|
||||
border: 1px solid var(--border);
|
||||
border-radius: 10px;
|
||||
padding: 12px;
|
||||
margin-bottom: 10px;
|
||||
font-size: 14px;
|
||||
line-height: 1.6;
|
||||
}
|
||||
|
||||
.modal pre {
|
||||
background: rgba(255, 255, 255, 0.02);
|
||||
color: var(--text);
|
||||
@@ -377,7 +510,177 @@ input {
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
/* Ask / Natural Language Query */
|
||||
.ask-form {
|
||||
margin-top: 10px;
|
||||
}
|
||||
|
||||
.ask-row {
|
||||
display: flex;
|
||||
gap: 10px;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.ask-input {
|
||||
flex: 1;
|
||||
padding: 12px 14px;
|
||||
border-radius: 10px;
|
||||
border: 1px solid var(--border);
|
||||
background: rgba(255, 255, 255, 0.02);
|
||||
color: var(--text);
|
||||
font-size: 15px;
|
||||
}
|
||||
|
||||
.ask-result {
|
||||
margin-top: 16px;
|
||||
}
|
||||
|
||||
.ask-answer {
|
||||
background: rgba(125, 211, 252, 0.06);
|
||||
border: 1px solid rgba(125, 211, 252, 0.2);
|
||||
border-radius: 12px;
|
||||
padding: 16px;
|
||||
line-height: 1.55;
|
||||
margin-bottom: 14px;
|
||||
}
|
||||
|
||||
.ask-answer code {
|
||||
background: rgba(255,255,255,0.06);
|
||||
padding: 2px 6px;
|
||||
border-radius: 6px;
|
||||
font-size: 13px;
|
||||
}
|
||||
|
||||
.ask-error {
|
||||
background: rgba(249, 115, 22, 0.1);
|
||||
border: 1px solid rgba(249, 115, 22, 0.3);
|
||||
border-radius: 8px;
|
||||
padding: 10px 14px;
|
||||
color: #fdba74;
|
||||
font-size: 14px;
|
||||
margin-bottom: 10px;
|
||||
}
|
||||
|
||||
.ask-filter-hint {
|
||||
margin-top: 6px;
|
||||
color: var(--muted);
|
||||
}
|
||||
|
||||
.version-badge {
|
||||
display: inline-block;
|
||||
margin-left: 8px;
|
||||
padding: 2px 8px;
|
||||
border-radius: 999px;
|
||||
background: rgba(125, 211, 252, 0.15);
|
||||
border: 1px solid rgba(125, 211, 252, 0.3);
|
||||
color: var(--accent-strong);
|
||||
font-size: 11px;
|
||||
font-weight: 600;
|
||||
letter-spacing: 0.05em;
|
||||
vertical-align: middle;
|
||||
}
|
||||
|
||||
.ask-events {
|
||||
margin-bottom: 14px;
|
||||
}
|
||||
|
||||
.ask-events h4 {
|
||||
margin: 0 0 10px;
|
||||
color: var(--muted);
|
||||
font-size: 14px;
|
||||
text-transform: uppercase;
|
||||
letter-spacing: 0.06em;
|
||||
}
|
||||
|
||||
.event--compact {
|
||||
padding: 12px;
|
||||
margin-bottom: 10px;
|
||||
}
|
||||
|
||||
.event--compact h3 {
|
||||
font-size: 15px;
|
||||
}
|
||||
|
||||
.source-health {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(auto-fit, minmax(min(200px, 100%), 1fr));
|
||||
gap: 10px;
|
||||
}
|
||||
|
||||
.health-card {
|
||||
border: 1px solid var(--border);
|
||||
border-radius: 10px;
|
||||
padding: 10px 12px;
|
||||
background: rgba(255, 255, 255, 0.02);
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 4px;
|
||||
}
|
||||
|
||||
.footer {
|
||||
margin-top: auto;
|
||||
padding: 20px 0;
|
||||
border-top: 1px solid var(--border);
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: space-between;
|
||||
gap: 16px;
|
||||
flex-wrap: wrap;
|
||||
font-size: 13px;
|
||||
color: var(--muted);
|
||||
}
|
||||
|
||||
.footer__left {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 10px;
|
||||
}
|
||||
|
||||
.footer__brand {
|
||||
font-weight: 600;
|
||||
color: var(--text);
|
||||
}
|
||||
|
||||
.footer__version {
|
||||
font-size: 11px;
|
||||
padding: 2px 8px;
|
||||
border-radius: 999px;
|
||||
background: rgba(125, 211, 252, 0.1);
|
||||
border: 1px solid rgba(125, 211, 252, 0.2);
|
||||
color: var(--accent-strong);
|
||||
}
|
||||
|
||||
.footer__center {
|
||||
display: flex;
|
||||
gap: 16px;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.footer__center a {
|
||||
color: var(--muted);
|
||||
text-decoration: none;
|
||||
transition: color 0.15s ease;
|
||||
}
|
||||
|
||||
.footer__center a:hover {
|
||||
color: var(--accent-strong);
|
||||
}
|
||||
|
||||
.footer__right {
|
||||
font-size: 12px;
|
||||
}
|
||||
|
||||
@media (max-width: 640px) {
|
||||
.topbar {
|
||||
flex-direction: column;
|
||||
align-items: flex-start;
|
||||
gap: 10px;
|
||||
}
|
||||
|
||||
.topbar__links {
|
||||
margin-right: 0;
|
||||
}
|
||||
|
||||
.hero {
|
||||
flex-direction: column;
|
||||
}
|
||||
@@ -386,4 +689,15 @@ input {
|
||||
flex-direction: column;
|
||||
align-items: stretch;
|
||||
}
|
||||
|
||||
.ask-row {
|
||||
flex-direction: column;
|
||||
align-items: stretch;
|
||||
}
|
||||
|
||||
.footer {
|
||||
flex-direction: column;
|
||||
text-align: center;
|
||||
gap: 10px;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,10 +9,7 @@ def fetch_audit_logs(hours: int = 24, since: str | None = None, max_pages: int =
|
||||
"""Fetch paginated directory audit logs from Microsoft Graph and enrich with resolved names."""
|
||||
token = get_access_token()
|
||||
start_time = since or (datetime.utcnow() - timedelta(hours=hours)).isoformat() + "Z"
|
||||
next_url = (
|
||||
"https://graph.microsoft.com/v1.0/"
|
||||
f"auditLogs/directoryAudits?$filter=activityDateTime ge {start_time}"
|
||||
)
|
||||
next_url = f"https://graph.microsoft.com/v1.0/auditLogs/directoryAudits?$filter=activityDateTime ge {start_time}"
|
||||
headers = {"Authorization": f"Bearer {token}"}
|
||||
|
||||
events = []
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
|
||||
from utils.http import get_with_retry
|
||||
|
||||
|
||||
@@ -48,7 +47,10 @@ def resolve_directory_object(object_id: str, token: str, cache: dict[str, dict])
|
||||
|
||||
probes = [
|
||||
("user", f"https://graph.microsoft.com/v1.0/users/{object_id}?$select=id,displayName,userPrincipalName,mail"),
|
||||
("servicePrincipal", f"https://graph.microsoft.com/v1.0/servicePrincipals/{object_id}?$select=id,displayName,appId,appDisplayName"),
|
||||
(
|
||||
"servicePrincipal",
|
||||
f"https://graph.microsoft.com/v1.0/servicePrincipals/{object_id}?$select=id,displayName,appId,appDisplayName",
|
||||
),
|
||||
("group", f"https://graph.microsoft.com/v1.0/groups/{object_id}?$select=id,displayName,mail"),
|
||||
("device", f"https://graph.microsoft.com/v1.0/devices/{object_id}?$select=id,displayName"),
|
||||
]
|
||||
@@ -82,12 +84,7 @@ def resolve_service_principal_owners(sp_id: str, token: str, cache: dict[str, li
|
||||
)
|
||||
payload = _request_json(url, token)
|
||||
for owner in (payload or {}).get("value", []):
|
||||
name = (
|
||||
owner.get("displayName")
|
||||
or owner.get("userPrincipalName")
|
||||
or owner.get("mail")
|
||||
or owner.get("id")
|
||||
)
|
||||
name = owner.get("displayName") or owner.get("userPrincipalName") or owner.get("mail") or owner.get("id")
|
||||
if name:
|
||||
owners.append(name)
|
||||
|
||||
|
||||
117
backend/jobs.py
Normal file
117
backend/jobs.py
Normal file
@@ -0,0 +1,117 @@
|
||||
"""arq job functions for async LLM processing."""
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
|
||||
import structlog
|
||||
from arq.connections import RedisSettings
|
||||
from config import REDIS_URL
|
||||
|
||||
logger = structlog.get_logger("aoc.jobs")
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Cache helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
CACHE_TTL_ASK = 3600 # 1 hour
|
||||
CACHE_TTL_EXPLAIN = 86400 # 24 hours
|
||||
|
||||
|
||||
def _ask_cache_key(question: str, filters: dict, events: list) -> str:
|
||||
payload = json.dumps({"q": question, "f": filters, "e": [e.get("id") for e in events]}, sort_keys=True)
|
||||
return f"aoc:cache:ask:{hashlib.md5(payload.encode()).hexdigest()}"
|
||||
|
||||
|
||||
def _explain_cache_key(event_id: str) -> str:
|
||||
return f"aoc:cache:explain:{event_id}"
|
||||
|
||||
|
||||
async def get_cached_ask(redis, question: str, filters: dict, events: list) -> dict | None:
|
||||
key = _ask_cache_key(question, filters, events)
|
||||
raw = await redis.get(key)
|
||||
if raw:
|
||||
return json.loads(raw)
|
||||
return None
|
||||
|
||||
|
||||
async def set_cached_ask(redis, question: str, filters: dict, events: list, result: dict):
|
||||
key = _ask_cache_key(question, filters, events)
|
||||
await redis.setex(key, CACHE_TTL_ASK, json.dumps(result, default=str))
|
||||
|
||||
|
||||
async def get_cached_explain(redis, event_id: str) -> dict | None:
|
||||
key = _explain_cache_key(event_id)
|
||||
raw = await redis.get(key)
|
||||
if raw:
|
||||
return json.loads(raw)
|
||||
return None
|
||||
|
||||
|
||||
async def set_cached_explain(redis, event_id: str, result: dict):
|
||||
key = _explain_cache_key(event_id)
|
||||
await redis.setex(key, CACHE_TTL_EXPLAIN, json.dumps(result, default=str))
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# arq job functions
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def process_ask_question(
|
||||
ctx, question: str, filters: dict, events: list, total: int, excluded_services: list | None
|
||||
):
|
||||
"""Background job: call LLM for /api/ask and cache result."""
|
||||
from routes.ask import _call_llm
|
||||
|
||||
redis = ctx["redis"]
|
||||
try:
|
||||
answer = await _call_llm(question, events, total=total, excluded_services=excluded_services)
|
||||
result = {"status": "completed", "answer": answer, "llm_used": True, "llm_error": None}
|
||||
except Exception as exc:
|
||||
logger.warning("Async ask LLM failed", error=str(exc))
|
||||
result = {"status": "failed", "answer": "", "llm_used": False, "llm_error": str(exc)}
|
||||
|
||||
await set_cached_ask(redis, question, filters, events, result)
|
||||
return result
|
||||
|
||||
|
||||
async def process_explain_event(ctx, event_id: str, event: dict, related: list):
|
||||
"""Background job: call LLM for /api/events/{id}/explain and cache result."""
|
||||
from routes.ask import _explain_event
|
||||
|
||||
redis = ctx["redis"]
|
||||
try:
|
||||
explanation = await _explain_event(event, related)
|
||||
result = {"status": "completed", "explanation": explanation, "llm_used": True, "llm_error": None}
|
||||
except Exception as exc:
|
||||
logger.warning("Async explain LLM failed", error=str(exc))
|
||||
result = {"status": "failed", "explanation": "", "llm_used": False, "llm_error": str(exc)}
|
||||
|
||||
await set_cached_explain(redis, event_id, result)
|
||||
return result
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# arq worker configuration
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def startup(ctx):
|
||||
from redis.asyncio import Redis
|
||||
|
||||
ctx["redis"] = Redis.from_url(REDIS_URL, decode_responses=True)
|
||||
|
||||
|
||||
async def shutdown(ctx):
|
||||
await ctx["redis"].close()
|
||||
|
||||
|
||||
class WorkerSettings:
|
||||
functions = [process_ask_question, process_explain_event]
|
||||
redis_settings = RedisSettings.from_dsn(REDIS_URL)
|
||||
on_startup = startup
|
||||
on_shutdown = shutdown
|
||||
max_jobs = 10
|
||||
job_timeout = 120
|
||||
keep_result = 3600
|
||||
keep_result_forever = False
|
||||
@@ -6,7 +6,7 @@ from pathlib import Path
|
||||
|
||||
import structlog
|
||||
from audit_trail import log_action
|
||||
from config import CORS_ORIGINS, ENABLE_PERIODIC_FETCH, FETCH_INTERVAL_MINUTES
|
||||
from config import AI_FEATURES_ENABLED, CORS_ORIGINS, ENABLE_PERIODIC_FETCH, FETCH_INTERVAL_MINUTES
|
||||
from database import setup_indexes
|
||||
from fastapi import FastAPI, HTTPException, Request
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
@@ -19,7 +19,9 @@ from routes.events import router as events_router
|
||||
from routes.fetch import router as fetch_router
|
||||
from routes.fetch import run_fetch
|
||||
from routes.health import router as health_router
|
||||
from routes.jobs import router as jobs_router
|
||||
from routes.rules import router as rules_router
|
||||
from routes.saved_searches import router as saved_searches_router
|
||||
from routes.webhooks import router as webhooks_router
|
||||
|
||||
|
||||
@@ -85,12 +87,14 @@ async def audit_middleware(request: Request, call_next):
|
||||
response = await call_next(request)
|
||||
if request.url.path.startswith("/api/") and request.method in ("POST", "PATCH", "PUT", "DELETE"):
|
||||
from auth import AUTH_ENABLED
|
||||
|
||||
user = "anonymous"
|
||||
if AUTH_ENABLED:
|
||||
auth_header = request.headers.get("authorization", "")
|
||||
if auth_header.lower().startswith("bearer "):
|
||||
try:
|
||||
from jose import jwt
|
||||
|
||||
token = auth_header.split(" ", 1)[1]
|
||||
claims = jwt.get_unverified_claims(token)
|
||||
user = claims.get("sub", "unknown")
|
||||
@@ -110,12 +114,22 @@ app.include_router(events_router, prefix="/api")
|
||||
app.include_router(config_router, prefix="/api")
|
||||
app.include_router(webhooks_router, prefix="/api")
|
||||
app.include_router(health_router, prefix="/api")
|
||||
if AI_FEATURES_ENABLED:
|
||||
from routes.ask import router as ask_router
|
||||
|
||||
app.include_router(ask_router, prefix="/api")
|
||||
from routes.mcp import mcp_asgi
|
||||
|
||||
app.mount("/mcp", mcp_asgi)
|
||||
app.include_router(saved_searches_router, prefix="/api")
|
||||
app.include_router(rules_router, prefix="/api")
|
||||
app.include_router(jobs_router, prefix="/api")
|
||||
|
||||
|
||||
@app.get("/health")
|
||||
async def health_check():
|
||||
from database import db
|
||||
|
||||
try:
|
||||
db.command("ping")
|
||||
return {"status": "ok", "database": "connected"}
|
||||
@@ -129,6 +143,13 @@ async def metrics():
|
||||
return Response(content=prometheus_metrics(), media_type="text/plain")
|
||||
|
||||
|
||||
@app.get("/api/version")
|
||||
async def version():
|
||||
import os
|
||||
|
||||
return {"version": os.environ.get("VERSION", "unknown")}
|
||||
|
||||
|
||||
frontend_dir = Path(__file__).parent / "frontend"
|
||||
app.mount("/", StaticFiles(directory=frontend_dir, html=True), name="frontend")
|
||||
|
||||
@@ -157,3 +178,6 @@ async def stop_periodic_fetch():
|
||||
task.cancel()
|
||||
with suppress(Exception):
|
||||
await task
|
||||
from redis_client import close_redis_connections
|
||||
|
||||
await close_redis_connections()
|
||||
|
||||
@@ -6,6 +6,7 @@ new display fields. Example:
|
||||
|
||||
python maintenance.py renormalize --limit 500
|
||||
"""
|
||||
|
||||
import argparse
|
||||
|
||||
from database import events_collection
|
||||
@@ -53,7 +54,9 @@ def dedupe(limit: int = None, batch_size: int = 500) -> int:
|
||||
"""
|
||||
Remove duplicate events based on dedupe_key. Keeps the first occurrence encountered.
|
||||
"""
|
||||
cursor = events_collection.find({}, projection={"_id": 1, "dedupe_key": 1, "raw": 1, "id": 1, "timestamp": 1}).sort("timestamp", 1)
|
||||
cursor = events_collection.find({}, projection={"_id": 1, "dedupe_key": 1, "raw": 1, "id": 1, "timestamp": 1}).sort(
|
||||
"timestamp", 1
|
||||
)
|
||||
if limit:
|
||||
cursor = cursor.limit(int(limit))
|
||||
|
||||
|
||||
187
backend/mcp_common.py
Normal file
187
backend/mcp_common.py
Normal file
@@ -0,0 +1,187 @@
|
||||
"""Shared MCP tool handlers used by both stdio and SSE transports."""
|
||||
|
||||
import json
|
||||
from datetime import UTC, datetime, timedelta
|
||||
|
||||
from database import events_collection
|
||||
from mcp.types import TextContent
|
||||
|
||||
|
||||
async def handle_search_events(arguments: dict) -> list[TextContent]:
|
||||
days = arguments.get("days", 7)
|
||||
limit = min(arguments.get("limit", 20), 100)
|
||||
since = (datetime.now(UTC) - timedelta(days=days)).isoformat().replace("+00:00", "Z")
|
||||
|
||||
filters = [{"timestamp": {"$gte": since}}]
|
||||
|
||||
services = arguments.get("services")
|
||||
if services:
|
||||
filters.append({"service": {"$in": services}})
|
||||
|
||||
operation = arguments.get("operation")
|
||||
if operation:
|
||||
filters.append({"operation": {"$regex": operation, "$options": "i"}})
|
||||
|
||||
result = arguments.get("result")
|
||||
if result:
|
||||
filters.append({"result": {"$regex": result, "$options": "i"}})
|
||||
|
||||
entity = arguments.get("entity")
|
||||
if entity:
|
||||
entity_safe = entity.replace(".", "\\.").replace("(", "\\(").replace(")", "\\)")
|
||||
filters.append(
|
||||
{
|
||||
"$or": [
|
||||
{"target_displays": {"$elemMatch": {"$regex": entity_safe, "$options": "i"}}},
|
||||
{"actor_display": {"$regex": entity_safe, "$options": "i"}},
|
||||
{"actor_upn": {"$regex": entity_safe, "$options": "i"}},
|
||||
{"raw_text": {"$regex": entity_safe, "$options": "i"}},
|
||||
]
|
||||
}
|
||||
)
|
||||
|
||||
query = {"$and": filters}
|
||||
cursor = events_collection.find(query).sort("timestamp", -1).limit(limit)
|
||||
events = list(cursor)
|
||||
|
||||
if not events:
|
||||
return [TextContent(type="text", text="No matching events found.")]
|
||||
|
||||
lines = [f"Found {len(events)} event(s):\n"]
|
||||
for e in events:
|
||||
ts = e.get("timestamp", "?")[:16].replace("T", " ")
|
||||
svc = e.get("service", "?")
|
||||
op = e.get("operation", "?")
|
||||
actor = e.get("actor_display", "?")
|
||||
result_str = e.get("result", "?")
|
||||
lines.append(f"{ts} | {svc} | {op} | {actor} | {result_str}")
|
||||
|
||||
return [TextContent(type="text", text="\n".join(lines))]
|
||||
|
||||
|
||||
async def handle_get_event(arguments: dict) -> list[TextContent]:
|
||||
event_id = arguments["event_id"]
|
||||
event = events_collection.find_one({"id": event_id})
|
||||
if not event:
|
||||
return [TextContent(type="text", text=f"Event {event_id} not found.")]
|
||||
event.pop("_id", None)
|
||||
return [TextContent(type="text", text=json.dumps(event, indent=2, default=str))]
|
||||
|
||||
|
||||
async def handle_get_summary(arguments: dict) -> list[TextContent]:
|
||||
days = arguments.get("days", 7)
|
||||
since = (datetime.now(UTC) - timedelta(days=days)).isoformat().replace("+00:00", "Z")
|
||||
query = {"timestamp": {"$gte": since}}
|
||||
|
||||
total = events_collection.count_documents(query)
|
||||
if total == 0:
|
||||
return [TextContent(type="text", text="No events in the specified period.")]
|
||||
|
||||
svc_pipeline = [
|
||||
{"$match": query},
|
||||
{"$group": {"_id": "$service", "count": {"$sum": 1}}},
|
||||
{"$sort": {"count": -1}},
|
||||
{"$limit": 10},
|
||||
]
|
||||
op_pipeline = [
|
||||
{"$match": query},
|
||||
{"$group": {"_id": "$operation", "count": {"$sum": 1}}},
|
||||
{"$sort": {"count": -1}},
|
||||
{"$limit": 10},
|
||||
]
|
||||
result_pipeline = [
|
||||
{"$match": query},
|
||||
{"$group": {"_id": "$result", "count": {"$sum": 1}}},
|
||||
{"$sort": {"count": -1}},
|
||||
]
|
||||
actor_pipeline = [
|
||||
{"$match": query},
|
||||
{"$group": {"_id": "$actor_display", "count": {"$sum": 1}}},
|
||||
{"$sort": {"count": -1}},
|
||||
{"$limit": 10},
|
||||
]
|
||||
|
||||
svc_counts = list(events_collection.aggregate(svc_pipeline))
|
||||
op_counts = list(events_collection.aggregate(op_pipeline))
|
||||
result_counts = list(events_collection.aggregate(result_pipeline))
|
||||
actor_counts = list(events_collection.aggregate(actor_pipeline))
|
||||
|
||||
lines = [f"Summary for the last {days} days ({total} total events)\n"]
|
||||
|
||||
lines.append("By service:")
|
||||
for row in svc_counts:
|
||||
lines.append(f" {row['_id'] or 'Unknown'}: {row['count']}")
|
||||
|
||||
lines.append("\nBy action:")
|
||||
for row in op_counts:
|
||||
lines.append(f" {row['_id'] or 'Unknown'}: {row['count']}")
|
||||
|
||||
lines.append("\nBy result:")
|
||||
for row in result_counts:
|
||||
lines.append(f" {row['_id'] or 'Unknown'}: {row['count']}")
|
||||
|
||||
lines.append("\nTop actors:")
|
||||
for row in actor_counts:
|
||||
lines.append(f" {row['_id'] or 'Unknown'}: {row['count']}")
|
||||
|
||||
return [TextContent(type="text", text="\n".join(lines))]
|
||||
|
||||
|
||||
async def handle_ask(arguments: dict) -> list[TextContent]:
|
||||
"""For now, returns recent events + guidance. In the future this could call the LLM backend."""
|
||||
question = arguments["question"]
|
||||
days = arguments.get("days", 7)
|
||||
|
||||
result = await handle_search_events({"entity": "", "days": days, "limit": 50})
|
||||
base_text = result[0].text if result else ""
|
||||
|
||||
text = (
|
||||
f"You asked: '{question}'\n\n"
|
||||
f"Here are the most recent events from the last {days} days:\n\n"
|
||||
f"{base_text}\n\n"
|
||||
f"Tip: Use the 'search_events' tool with specific filters "
|
||||
f"to narrow down the dataset before asking follow-up questions."
|
||||
)
|
||||
return [TextContent(type="text", text=text)]
|
||||
|
||||
|
||||
# JSON schemas for tool definitions
|
||||
SEARCH_EVENTS_SCHEMA = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"entity": {"type": "string", "description": "Device name, user UPN, or email to search for"},
|
||||
"services": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "Filter by service (e.g. Intune, Directory, Exchange)",
|
||||
},
|
||||
"operation": {"type": "string", "description": "Filter by operation name"},
|
||||
"result": {"type": "string", "description": "Filter by result (success, failure)"},
|
||||
"days": {"type": "integer", "description": "Number of days to look back (default 7)"},
|
||||
"limit": {"type": "integer", "description": "Max events to return (default 20)"},
|
||||
},
|
||||
}
|
||||
|
||||
GET_EVENT_SCHEMA = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"event_id": {"type": "string", "description": "The event ID to retrieve"},
|
||||
},
|
||||
"required": ["event_id"],
|
||||
}
|
||||
|
||||
GET_SUMMARY_SCHEMA = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"days": {"type": "integer", "description": "Number of days to summarise (default 7)"},
|
||||
},
|
||||
}
|
||||
|
||||
ASK_SCHEMA = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"question": {"type": "string", "description": "Natural language question about audit logs"},
|
||||
"days": {"type": "integer", "description": "Number of days to look back (default 7)"},
|
||||
},
|
||||
"required": ["question"],
|
||||
}
|
||||
88
backend/mcp_server.py
Normal file
88
backend/mcp_server.py
Normal file
@@ -0,0 +1,88 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
AOC MCP Server — stdio transport
|
||||
|
||||
Standalone MCP server for local use (Claude Desktop, Cursor, etc.).
|
||||
For the HTTP/SSE version (production, behind auth), see routes/mcp.py.
|
||||
|
||||
Usage:
|
||||
python mcp_server.py
|
||||
|
||||
Claude Desktop config (~/.config/claude/claude_desktop_config.json):
|
||||
{
|
||||
"mcpServers": {
|
||||
"aoc": {
|
||||
"command": "python",
|
||||
"args": ["/path/to/aoc/backend/mcp_server.py"],
|
||||
"env": {"MONGO_URI": "mongodb://..."}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
import sys
|
||||
|
||||
# Ensure backend modules are importable when run standalone
|
||||
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
||||
|
||||
from mcp.server import Server
|
||||
from mcp.server.stdio import stdio_server
|
||||
from mcp.types import TextContent, Tool
|
||||
from mcp_common import (
|
||||
ASK_SCHEMA,
|
||||
GET_EVENT_SCHEMA,
|
||||
GET_SUMMARY_SCHEMA,
|
||||
SEARCH_EVENTS_SCHEMA,
|
||||
handle_ask,
|
||||
handle_get_event,
|
||||
handle_get_summary,
|
||||
handle_search_events,
|
||||
)
|
||||
|
||||
app = Server("aoc")
|
||||
|
||||
|
||||
@app.list_tools()
|
||||
async def list_tools() -> list[Tool]:
|
||||
return [
|
||||
Tool(
|
||||
name="search_events",
|
||||
description="Search audit events by entity, service, operation, or result.",
|
||||
inputSchema=SEARCH_EVENTS_SCHEMA,
|
||||
),
|
||||
Tool(name="get_event", description="Retrieve a single audit event by its ID.", inputSchema=GET_EVENT_SCHEMA),
|
||||
Tool(
|
||||
name="get_summary",
|
||||
description="Get an aggregated summary of audit activity for the last N days.",
|
||||
inputSchema=GET_SUMMARY_SCHEMA,
|
||||
),
|
||||
Tool(
|
||||
name="ask",
|
||||
description="Ask a natural language question about audit logs. Returns a narrative answer.",
|
||||
inputSchema=ASK_SCHEMA,
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
@app.call_tool()
|
||||
async def call_tool(name: str, arguments: dict) -> list[TextContent]:
|
||||
if name == "search_events":
|
||||
return await handle_search_events(arguments)
|
||||
if name == "get_event":
|
||||
return await handle_get_event(arguments)
|
||||
if name == "get_summary":
|
||||
return await handle_get_summary(arguments)
|
||||
if name == "ask":
|
||||
return await handle_ask(arguments)
|
||||
raise ValueError(f"Unknown tool: {name}")
|
||||
|
||||
|
||||
async def main():
|
||||
async with stdio_server() as (read_stream, write_stream):
|
||||
await app.run(read_stream, write_stream, app.create_initialization_options())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
@@ -1,4 +1,3 @@
|
||||
|
||||
from prometheus_client import Counter, Histogram, generate_latest
|
||||
|
||||
REQUEST_DURATION = Histogram(
|
||||
|
||||
@@ -54,6 +54,11 @@ class TagsUpdateRequest(BaseModel):
|
||||
tags: list[str]
|
||||
|
||||
|
||||
class BulkTagsRequest(BaseModel):
|
||||
tags: list[str]
|
||||
mode: str = "append" # "append" or "replace"
|
||||
|
||||
|
||||
class CommentAddRequest(BaseModel):
|
||||
text: str
|
||||
|
||||
@@ -65,3 +70,36 @@ class AlertRuleResponse(BaseModel):
|
||||
severity: str
|
||||
conditions: list[dict]
|
||||
message: str
|
||||
|
||||
|
||||
class AskRequest(BaseModel):
|
||||
question: str
|
||||
services: list[str] | None = None
|
||||
actor: str | None = None
|
||||
operation: str | None = None
|
||||
result: str | None = None
|
||||
start: str | None = None
|
||||
end: str | None = None
|
||||
include_tags: list[str] | None = None
|
||||
exclude_tags: list[str] | None = None
|
||||
async_mode: bool = False # enqueue async job instead of waiting
|
||||
|
||||
|
||||
class AskEventRef(BaseModel):
|
||||
id: str | None = None
|
||||
timestamp: str | None = None
|
||||
operation: str | None = None
|
||||
actor_display: str | None = None
|
||||
target_displays: list[str] | None = None
|
||||
display_summary: str | None = None
|
||||
service: str | None = None
|
||||
result: str | None = None
|
||||
|
||||
|
||||
class AskResponse(BaseModel):
|
||||
answer: str
|
||||
events: list[AskEventRef]
|
||||
query_info: dict
|
||||
llm_used: bool
|
||||
llm_error: str | None = None
|
||||
job_id: str | None = None
|
||||
|
||||
@@ -75,10 +75,7 @@ def _target_types(targets: list) -> list:
|
||||
types = []
|
||||
for t in targets or []:
|
||||
resolved = t.get("_resolved") or {}
|
||||
t_type = (
|
||||
resolved.get("type")
|
||||
or t.get("type")
|
||||
)
|
||||
t_type = resolved.get("type") or t.get("type")
|
||||
if t_type:
|
||||
types.append(t_type)
|
||||
return types
|
||||
@@ -101,7 +98,9 @@ def _display_summary(operation: str, target_labels: list, actor_label: str, targ
|
||||
return " | ".join(pieces)
|
||||
|
||||
|
||||
def _render_summary(template: str, operation: str, actor: str, target: str, category: str, result: str, service: str) -> str:
|
||||
def _render_summary(
|
||||
template: str, operation: str, actor: str, target: str, category: str, result: str, service: str
|
||||
) -> str:
|
||||
try:
|
||||
return template.format(
|
||||
operation=operation or category or "Event",
|
||||
@@ -177,13 +176,16 @@ def normalize_event(e):
|
||||
else:
|
||||
display_actor_value = actor_label
|
||||
|
||||
dedupe_key = _make_dedupe_key(e, {
|
||||
"id": e.get("id"),
|
||||
"timestamp": e.get("activityDateTime"),
|
||||
"service": e.get("category"),
|
||||
"operation": e.get("activityDisplayName"),
|
||||
"target_displays": target_labels,
|
||||
})
|
||||
dedupe_key = _make_dedupe_key(
|
||||
e,
|
||||
{
|
||||
"id": e.get("id"),
|
||||
"timestamp": e.get("activityDateTime"),
|
||||
"service": e.get("category"),
|
||||
"operation": e.get("activityDisplayName"),
|
||||
"target_displays": target_labels,
|
||||
},
|
||||
)
|
||||
|
||||
return {
|
||||
"id": e.get("id"),
|
||||
|
||||
36
backend/redis_client.py
Normal file
36
backend/redis_client.py
Normal file
@@ -0,0 +1,36 @@
|
||||
"""Async Redis client singleton for caching and job queue."""
|
||||
|
||||
import redis.asyncio as aioredis
|
||||
from arq import create_pool
|
||||
from arq.connections import ArqRedis, RedisSettings
|
||||
from config import REDIS_URL
|
||||
|
||||
_arq_pool: ArqRedis | None = None
|
||||
_plain_redis: aioredis.Redis | None = None
|
||||
|
||||
|
||||
async def get_arq_pool() -> ArqRedis:
|
||||
"""Return a shared arq pool (ArqRedis extends redis.asyncio.Redis)."""
|
||||
global _arq_pool
|
||||
if _arq_pool is None:
|
||||
_arq_pool = await create_pool(RedisSettings.from_dsn(REDIS_URL))
|
||||
return _arq_pool
|
||||
|
||||
|
||||
async def get_redis() -> aioredis.Redis:
|
||||
"""Return a shared plain async Redis client."""
|
||||
global _plain_redis
|
||||
if _plain_redis is None:
|
||||
_plain_redis = aioredis.from_url(REDIS_URL, decode_responses=True)
|
||||
return _plain_redis
|
||||
|
||||
|
||||
async def close_redis_connections():
|
||||
"""Close all Redis connections (call on shutdown)."""
|
||||
global _arq_pool, _plain_redis
|
||||
if _arq_pool:
|
||||
await _arq_pool.close()
|
||||
_arq_pool = None
|
||||
if _plain_redis:
|
||||
await _plain_redis.close()
|
||||
_plain_redis = None
|
||||
@@ -11,3 +11,8 @@ pydantic-settings
|
||||
structlog
|
||||
tenacity
|
||||
prometheus-client
|
||||
httpx
|
||||
gunicorn
|
||||
mcp
|
||||
redis
|
||||
arq
|
||||
|
||||
871
backend/routes/ask.py
Normal file
871
backend/routes/ask.py
Normal file
@@ -0,0 +1,871 @@
|
||||
import asyncio
|
||||
import json
|
||||
import re
|
||||
from datetime import UTC, datetime, timedelta
|
||||
|
||||
import httpx
|
||||
import structlog
|
||||
from auth import require_auth, user_can_access_privacy_services
|
||||
from config import (
|
||||
LLM_API_KEY,
|
||||
LLM_API_VERSION,
|
||||
LLM_BASE_URL,
|
||||
LLM_MAX_EVENTS,
|
||||
LLM_MODEL,
|
||||
LLM_TIMEOUT_SECONDS,
|
||||
PRIVACY_SENSITIVE_OPERATIONS,
|
||||
PRIVACY_SERVICES,
|
||||
)
|
||||
from database import events_collection
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from jobs import get_cached_ask, get_cached_explain, set_cached_ask, set_cached_explain
|
||||
from models.api import AskRequest, AskResponse
|
||||
from redis_client import get_arq_pool
|
||||
|
||||
router = APIRouter(dependencies=[Depends(require_auth)])
|
||||
logger = structlog.get_logger("aoc.ask")
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Intent extraction — map question keywords to relevant audit services
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_SERVICE_INTENTS = {
|
||||
"intune": ["Intune"],
|
||||
"device": ["Intune", "Device"],
|
||||
"laptop": ["Intune", "Device"],
|
||||
"mobile": ["Intune", "Device"],
|
||||
"phone": ["Intune", "Device"],
|
||||
"ipad": ["Intune", "Device"],
|
||||
"app": ["Intune", "ApplicationManagement"],
|
||||
"application": ["Intune", "ApplicationManagement"],
|
||||
"policy": ["Intune", "Policy"],
|
||||
"compliance": ["Intune", "Policy"],
|
||||
"user": ["Directory", "UserManagement"],
|
||||
"group": ["Directory", "GroupManagement"],
|
||||
"role": ["Directory", "RoleManagement"],
|
||||
"permission": ["Directory", "RoleManagement"],
|
||||
"license": ["Directory", "License"],
|
||||
"email": ["Exchange"],
|
||||
"mailbox": ["Exchange"],
|
||||
"mail": ["Exchange"],
|
||||
"message": ["Exchange", "Teams"],
|
||||
"file": ["SharePoint"],
|
||||
"sharepoint": ["SharePoint"],
|
||||
"site": ["SharePoint"],
|
||||
"document": ["SharePoint"],
|
||||
"team": ["Teams"],
|
||||
"channel": ["Teams"],
|
||||
"meeting": ["Teams"],
|
||||
"call": ["Teams"],
|
||||
}
|
||||
|
||||
# Services that are extremely noisy for typical admin questions.
|
||||
# We exclude them by default on broad questions unless the user explicitly mentions them.
|
||||
_NOISY_SERVICES = {"Exchange", "SharePoint", "Teams"}
|
||||
|
||||
# Services that are generally admin-relevant and kept by default.
|
||||
_DEFAULT_ADMIN_SERVICES = {
|
||||
"Directory",
|
||||
"UserManagement",
|
||||
"GroupManagement",
|
||||
"RoleManagement",
|
||||
"ApplicationManagement",
|
||||
"Intune",
|
||||
"Device",
|
||||
"Policy",
|
||||
"Teams",
|
||||
"License",
|
||||
}
|
||||
|
||||
|
||||
def _extract_intent_services(question: str) -> tuple[list[str] | None, bool]:
|
||||
"""
|
||||
Extract relevant services from the question.
|
||||
|
||||
Returns:
|
||||
(services, is_explicit):
|
||||
- services: list of service names to query, or None for default admin set
|
||||
- is_explicit: True if the user explicitly mentioned a noisy service
|
||||
"""
|
||||
q_lower = question.lower()
|
||||
tokens = set(re.findall(r"\b[a-z]+\b", q_lower))
|
||||
|
||||
matched_services = set()
|
||||
for token, services in _SERVICE_INTENTS.items():
|
||||
if token in tokens:
|
||||
matched_services.update(services)
|
||||
|
||||
if matched_services:
|
||||
# User asked something specific — return exactly what they asked for
|
||||
is_explicit = not matched_services.isdisjoint(_NOISY_SERVICES)
|
||||
return sorted(matched_services), is_explicit
|
||||
|
||||
# Broad question with no clear intent — default to admin-relevant services only
|
||||
return None, False
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Smart sampling — stratified by importance so the LLM sees signal, not noise
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _smart_sample(events: list[dict], max_events: int = 200) -> list[dict]:
|
||||
"""
|
||||
Return a curated subset that preserves diversity and prioritises signal.
|
||||
|
||||
Tiers:
|
||||
1. Failures (always valuable)
|
||||
2. High-admin-value services (Intune, Device, Directory, etc.)
|
||||
3. Everything else
|
||||
"""
|
||||
if len(events) <= max_events:
|
||||
return events
|
||||
|
||||
high_value = {
|
||||
"Directory",
|
||||
"UserManagement",
|
||||
"GroupManagement",
|
||||
"RoleManagement",
|
||||
"Intune",
|
||||
"Device",
|
||||
"Policy",
|
||||
"ApplicationManagement",
|
||||
}
|
||||
|
||||
failures = [e for e in events if str(e.get("result") or "").lower() in ("failure", "failed")]
|
||||
high_val = [e for e in events if e.get("service") in high_value and e not in failures]
|
||||
rest = [e for e in events if e not in failures and e not in high_val]
|
||||
|
||||
# Allocate slots: half to failures+high-value, half to rest (but never let rest dominate)
|
||||
slots = max_events
|
||||
failure_cap = min(len(failures), max(10, slots // 4))
|
||||
high_cap = min(len(high_val), max(20, slots // 4))
|
||||
rest_cap = slots - failure_cap - high_cap
|
||||
|
||||
sampled = failures[:failure_cap] + high_val[:high_cap] + rest[:rest_cap]
|
||||
# Sort back to chronological order
|
||||
sampled.sort(key=lambda e: e.get("timestamp") or "", reverse=True)
|
||||
return sampled
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Time-range extraction
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_TIME_PATTERNS = [
|
||||
(r"\blast\s+(\d+)\s+days?\b", "days"),
|
||||
(r"\blast\s+(\d+)\s+hours?\b", "hours"),
|
||||
(r"\blast\s+(\d+)\s+minutes?\b", "minutes"),
|
||||
(r"\blast\s+week\b", "week"),
|
||||
(r"\byesterday\b", "yesterday"),
|
||||
(r"\btoday\b", "today"),
|
||||
(r"\bin\s+the\s+last\s+(\d+)\s+days?\b", "days"),
|
||||
(r"\bin\s+the\s+last\s+(\d+)\s+hours?\b", "hours"),
|
||||
]
|
||||
|
||||
|
||||
def _extract_time_range(question: str) -> tuple[str | None, str | None]:
|
||||
"""Return (start_iso, end_iso) or (None, None) if no time detected."""
|
||||
now = datetime.now(UTC)
|
||||
q_lower = question.lower()
|
||||
|
||||
for pattern, unit in _TIME_PATTERNS:
|
||||
m = re.search(pattern, q_lower)
|
||||
if not m:
|
||||
continue
|
||||
if unit == "week":
|
||||
start = now - timedelta(days=7)
|
||||
elif unit == "yesterday":
|
||||
start = now - timedelta(days=1)
|
||||
elif unit == "today":
|
||||
start = now.replace(hour=0, minute=0, second=0, microsecond=0)
|
||||
else:
|
||||
num = int(m.group(1))
|
||||
delta = {"days": timedelta(days=num), "hours": timedelta(hours=num), "minutes": timedelta(minutes=num)}[
|
||||
unit
|
||||
]
|
||||
start = now - delta
|
||||
return start.isoformat().replace("+00:00", "Z"), now.isoformat().replace("+00:00", "Z")
|
||||
|
||||
return None, None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Entity extraction
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_ENTITY_HINTS = [
|
||||
r"device\s+['\"]?([^'\"\s]+)['\"]?",
|
||||
r"user\s+['\"]?([^'\"\s]+)['\"]?",
|
||||
r"laptop\s+['\"]?([^'\"\s]+)['\"]?",
|
||||
r"vm\s+['\"]?([^'\"\s]+)['\"]?",
|
||||
r"server\s+['\"]?([^'\"\s]+)['\"]?",
|
||||
r"computer\s+['\"]?([^'\"\s]+)['\"]?",
|
||||
]
|
||||
|
||||
_EMAIL_RE = re.compile(r"[\w.+-]+@[\w-]+\.[\w.-]+")
|
||||
|
||||
|
||||
def _extract_entity(question: str) -> str | None:
|
||||
"""Best-effort extraction of the device / user / entity name."""
|
||||
q_lower = question.lower()
|
||||
|
||||
# Look for explicit hints: "device ABC123"
|
||||
for pattern in _ENTITY_HINTS:
|
||||
m = re.search(pattern, q_lower)
|
||||
if m:
|
||||
# Extract from the original question to preserve case
|
||||
start, end = m.span(1)
|
||||
return question[start:end].strip().rstrip("?.!,;:")
|
||||
|
||||
# Look for quoted strings
|
||||
m = re.search(r'"([^"]{2,50})"', question)
|
||||
if m:
|
||||
return m.group(1).strip()
|
||||
m = re.search(r"'([^']{2,50})'", question)
|
||||
if m:
|
||||
return m.group(1).strip()
|
||||
|
||||
# Look for email addresses
|
||||
m = _EMAIL_RE.search(question)
|
||||
if m:
|
||||
return m.group(0)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# MongoDB query builder
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _build_event_query(
|
||||
entity: str | None,
|
||||
start: str | None,
|
||||
end: str | None,
|
||||
services: list[str] | None = None,
|
||||
actor: str | None = None,
|
||||
operation: str | None = None,
|
||||
result: str | None = None,
|
||||
include_tags: list[str] | None = None,
|
||||
exclude_tags: list[str] | None = None,
|
||||
) -> dict:
|
||||
filters = []
|
||||
|
||||
if start or end:
|
||||
time_filter = {}
|
||||
if start:
|
||||
time_filter["$gte"] = start
|
||||
if end:
|
||||
time_filter["$lte"] = end
|
||||
filters.append({"timestamp": time_filter})
|
||||
|
||||
if entity:
|
||||
entity_safe = re.escape(entity)
|
||||
filters.append(
|
||||
{
|
||||
"$or": [
|
||||
{"target_displays": {"$elemMatch": {"$regex": entity_safe, "$options": "i"}}},
|
||||
{"actor_display": {"$regex": entity_safe, "$options": "i"}},
|
||||
{"actor_upn": {"$regex": entity_safe, "$options": "i"}},
|
||||
{"raw_text": {"$regex": entity_safe, "$options": "i"}},
|
||||
]
|
||||
}
|
||||
)
|
||||
|
||||
if services:
|
||||
filters.append({"service": {"$in": services}})
|
||||
if actor:
|
||||
actor_safe = re.escape(actor)
|
||||
filters.append(
|
||||
{
|
||||
"$or": [
|
||||
{"actor_display": {"$regex": actor_safe, "$options": "i"}},
|
||||
{"actor_upn": {"$regex": actor_safe, "$options": "i"}},
|
||||
{"actor.user.userPrincipalName": {"$regex": actor_safe, "$options": "i"}},
|
||||
]
|
||||
}
|
||||
)
|
||||
if operation:
|
||||
filters.append({"operation": {"$regex": re.escape(operation), "$options": "i"}})
|
||||
if result:
|
||||
filters.append({"result": {"$regex": re.escape(result), "$options": "i"}})
|
||||
if include_tags:
|
||||
filters.append({"tags": {"$all": include_tags}})
|
||||
if exclude_tags:
|
||||
filters.append({"tags": {"$not": {"$all": exclude_tags}}})
|
||||
|
||||
return {"$and": filters} if filters else {}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# LLM summarisation
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_SYSTEM_PROMPT = """You are an IT operations assistant. An administrator has asked a question about audit logs.
|
||||
Your job is to read the data below and write a concise, plain-language answer.
|
||||
|
||||
The input may be either:
|
||||
- A small list of individual audit events (numbered Event #1, #2, etc.), or
|
||||
- An aggregated overview with counts by service, action, result, and actor, plus sample events.
|
||||
|
||||
Rules:
|
||||
- Assume the reader is a non-expert admin.
|
||||
- For aggregated overviews: summarise the scale, top patterns, and highlight anomalies or failures.
|
||||
- For small event lists: group related events together and tell a coherent story.
|
||||
- Highlight anything unusual, failed actions, or privilege escalations.
|
||||
- Reference specific event numbers (e.g., "Event #3") when making claims so the user can verify.
|
||||
- If the data is an aggregated subset of a larger result set, acknowledge the scale (e.g., "847 events occurred — the top pattern was...").
|
||||
- If there are no events, say so clearly.
|
||||
- Keep the answer under 300 words.
|
||||
- Do not invent events or patterns that are not supported by the data.
|
||||
"""
|
||||
|
||||
|
||||
def _aggregate_counts(events: list[dict]) -> dict:
|
||||
"""Build lightweight aggregation tables for large result sets."""
|
||||
from collections import Counter
|
||||
|
||||
svc_counts = Counter(e.get("service") or "Unknown" for e in events)
|
||||
op_counts = Counter(e.get("operation") or "Unknown" for e in events)
|
||||
result_counts = Counter(e.get("result") or "Unknown" for e in events)
|
||||
actor_counts = Counter(e.get("actor_display") or "Unknown" for e in events)
|
||||
return {
|
||||
"services": svc_counts.most_common(10),
|
||||
"operations": op_counts.most_common(10),
|
||||
"results": result_counts.most_common(5),
|
||||
"actors": actor_counts.most_common(10),
|
||||
}
|
||||
|
||||
|
||||
def _format_events_for_llm(
|
||||
events: list[dict], total: int | None = None, excluded_services: list[str] | None = None
|
||||
) -> str:
|
||||
lines = []
|
||||
|
||||
# If we have a large result set, send aggregation + samples instead of raw dump
|
||||
if total is not None and total > len(events) and len(events) >= 50:
|
||||
lines.append(f"Result set overview: {total} total events (showing a curated sample of {len(events)}).\n")
|
||||
if excluded_services:
|
||||
lines.append(f"Note: high-volume services excluded by default: {', '.join(excluded_services)}.\n")
|
||||
agg = _aggregate_counts(events)
|
||||
lines.append("Breakdown by service:")
|
||||
for svc, cnt in agg["services"]:
|
||||
lines.append(f" {svc}: {cnt}")
|
||||
lines.append("\nBreakdown by action:")
|
||||
for op, cnt in agg["operations"]:
|
||||
lines.append(f" {op}: {cnt}")
|
||||
lines.append("\nBreakdown by result:")
|
||||
for res, cnt in agg["results"]:
|
||||
lines.append(f" {res}: {cnt}")
|
||||
lines.append("\nTop actors:")
|
||||
for actor, cnt in agg["actors"]:
|
||||
lines.append(f" {actor}: {cnt}")
|
||||
# Include failures and a few recent samples
|
||||
failures = [e for e in events if str(e.get("result") or "").lower() in ("failure", "failed")]
|
||||
if failures:
|
||||
lines.append(f"\nFailures ({len(failures)}):")
|
||||
for e in failures[:10]:
|
||||
ts = e.get("timestamp", "?")[:16].replace("T", " ")
|
||||
op = e.get("operation", "unknown")
|
||||
actor = e.get("actor_display", "unknown")
|
||||
lines.append(f" {ts} — {op} by {actor}")
|
||||
lines.append("\nMost recent sample events:")
|
||||
else:
|
||||
if total is not None and total > len(events):
|
||||
lines.append(f"Showing {len(events)} of {total} total matching events (most recent first):\n")
|
||||
|
||||
# Always include the first N raw events as detail (up to 50)
|
||||
for i, e in enumerate(events[:50], 1):
|
||||
ts = e.get("timestamp") or "unknown time"
|
||||
op = e.get("operation") or "unknown action"
|
||||
actor = e.get("actor_display") or "unknown actor"
|
||||
targets = ", ".join(e.get("target_displays") or []) or "unknown target"
|
||||
svc = e.get("service") or "unknown service"
|
||||
result = e.get("result") or "unknown result"
|
||||
summary = e.get("display_summary") or ""
|
||||
lines.append(
|
||||
f"Event #{i}\n"
|
||||
f" Time: {ts}\n"
|
||||
f" Service: {svc}\n"
|
||||
f" Action: {op}\n"
|
||||
f" Actor: {actor}\n"
|
||||
f" Target: {targets}\n"
|
||||
f" Result: {result}\n"
|
||||
f" Summary: {summary}\n"
|
||||
)
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def _build_chat_url(base_url: str, api_version: str) -> str:
|
||||
"""Construct the chat completions URL, handling Azure OpenAI endpoints."""
|
||||
base = base_url.rstrip("/")
|
||||
url = base if base.endswith("/chat/completions") else f"{base}/chat/completions"
|
||||
if api_version:
|
||||
url = f"{url}?api-version={api_version}"
|
||||
return url
|
||||
|
||||
|
||||
async def _call_llm(
|
||||
question: str,
|
||||
events: list[dict],
|
||||
total: int | None = None,
|
||||
excluded_services: list[str] | None = None,
|
||||
) -> str:
|
||||
if not LLM_API_KEY:
|
||||
raise RuntimeError("LLM_API_KEY not configured")
|
||||
|
||||
context = _format_events_for_llm(events, total=total, excluded_services=excluded_services)
|
||||
messages = [
|
||||
{"role": "system", "content": _SYSTEM_PROMPT},
|
||||
{
|
||||
"role": "user",
|
||||
"content": f"Question: {question}\n\nAudit events:\n{context}\n\nPlease answer the question based only on the events above.",
|
||||
},
|
||||
]
|
||||
|
||||
url = _build_chat_url(LLM_BASE_URL, LLM_API_VERSION)
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
# Azure OpenAI uses api-key header; standard OpenAI uses Bearer token
|
||||
if "azure" in LLM_BASE_URL.lower() or "cognitiveservices" in LLM_BASE_URL.lower():
|
||||
headers["api-key"] = LLM_API_KEY
|
||||
else:
|
||||
headers["Authorization"] = f"Bearer {LLM_API_KEY}"
|
||||
|
||||
payload = {
|
||||
"model": LLM_MODEL,
|
||||
"messages": messages,
|
||||
"max_completion_tokens": 800,
|
||||
}
|
||||
|
||||
async with httpx.AsyncClient(timeout=LLM_TIMEOUT_SECONDS) as client:
|
||||
resp = await client.post(url, headers=headers, json=payload)
|
||||
if resp.status_code >= 400:
|
||||
body = resp.text
|
||||
logger.error("LLM API error", status_code=resp.status_code, url=url, response_body=body)
|
||||
raise RuntimeError(f"LLM API error {resp.status_code}: {body[:500]}")
|
||||
data = resp.json()
|
||||
return data["choices"][0]["message"]["content"].strip()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# API endpoint
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _to_event_ref(e: dict) -> dict:
|
||||
return {
|
||||
"id": e.get("id"),
|
||||
"timestamp": e.get("timestamp"),
|
||||
"operation": e.get("operation"),
|
||||
"actor_display": e.get("actor_display"),
|
||||
"target_displays": e.get("target_displays"),
|
||||
"display_summary": e.get("display_summary"),
|
||||
"service": e.get("service"),
|
||||
"result": e.get("result"),
|
||||
}
|
||||
|
||||
|
||||
_EXPLAIN_SYSTEM_PROMPT = """You are a Microsoft 365 security and compliance expert.
|
||||
An administrator needs help understanding an audit event.
|
||||
|
||||
Your task:
|
||||
1. Explain what happened in plain language (1-2 sentences).
|
||||
2. Identify who performed the action and what was the target.
|
||||
3. Assess whether this is typical admin activity or something to investigate.
|
||||
4. Highlight any security implications (privilege escalation, unusual actor, after-hours activity, etc.).
|
||||
5. Suggest what the admin should do next, if anything.
|
||||
|
||||
Keep the answer under 200 words. Use bullet points for readability.
|
||||
Do not invent facts that are not in the data.
|
||||
"""
|
||||
|
||||
|
||||
_GUID_RE = re.compile(r"^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$")
|
||||
|
||||
|
||||
def _extract_guids(obj: dict | list | str) -> set[str]:
|
||||
"""Recursively extract UUID-like strings from a JSON structure."""
|
||||
guids = set()
|
||||
if isinstance(obj, dict):
|
||||
for k, v in obj.items():
|
||||
if k.lower() in ("id", "groupid", "userid", "targetid") and isinstance(v, str) and _GUID_RE.match(v):
|
||||
guids.add(v)
|
||||
guids.update(_extract_guids(v))
|
||||
elif isinstance(obj, list):
|
||||
for item in obj:
|
||||
guids.update(_extract_guids(item))
|
||||
elif isinstance(obj, str) and _GUID_RE.match(obj):
|
||||
guids.add(obj)
|
||||
return guids
|
||||
|
||||
|
||||
async def _resolve_guids_for_event(event: dict) -> dict[str, str]:
|
||||
"""Try to resolve GUIDs in an event to human-readable names via Graph API."""
|
||||
raw = event.get("raw") or {}
|
||||
guids = _extract_guids(raw)
|
||||
# Also include any GUIDs in targetResources that might not have displayName
|
||||
for tr in raw.get("targetResources") or []:
|
||||
tid = tr.get("id")
|
||||
if tid and _GUID_RE.match(tid):
|
||||
guids.add(tid)
|
||||
for tr in raw.get("modifiedProperties") or []:
|
||||
for key in ("oldValue", "newValue"):
|
||||
val = tr.get(key)
|
||||
if val and _GUID_RE.match(val):
|
||||
guids.add(val)
|
||||
|
||||
if not guids:
|
||||
return {}
|
||||
|
||||
try:
|
||||
from graph.auth import get_access_token
|
||||
from graph.resolve import resolve_directory_object
|
||||
|
||||
token = await asyncio.to_thread(get_access_token)
|
||||
cache: dict[str, dict] = {}
|
||||
resolved = {}
|
||||
for gid in guids:
|
||||
result = await asyncio.to_thread(resolve_directory_object, gid, token, cache)
|
||||
if result:
|
||||
resolved[gid] = result["name"]
|
||||
return resolved
|
||||
except Exception as exc:
|
||||
logger.warning("GUID resolution failed", error=str(exc))
|
||||
return {}
|
||||
|
||||
|
||||
async def _explain_event(event: dict, related: list[dict]) -> str:
|
||||
if not LLM_API_KEY:
|
||||
raise RuntimeError("LLM_API_KEY not configured")
|
||||
|
||||
# Resolve GUIDs to names before sending to LLM
|
||||
resolved = await _resolve_guids_for_event(event)
|
||||
|
||||
event_text = json.dumps(event, indent=2, default=str)
|
||||
resolution_text = ""
|
||||
if resolved:
|
||||
resolution_text = "\nResolved GUIDs:\n"
|
||||
for gid, name in resolved.items():
|
||||
resolution_text += f" {gid} → {name}\n"
|
||||
|
||||
related_text = ""
|
||||
if related:
|
||||
related_text = "\n\nRelated events in the last 24 hours:\n"
|
||||
for i, e in enumerate(related[:10], 1):
|
||||
ts = e.get("timestamp", "?")[:16].replace("T", " ")
|
||||
op = e.get("operation", "unknown")
|
||||
actor = e.get("actor_display", "unknown")
|
||||
targets = ", ".join(e.get("target_displays") or []) or "—"
|
||||
result = e.get("result", "—")
|
||||
related_text += f"{i}. {ts} — {op} by {actor} on {targets} ({result})\n"
|
||||
|
||||
messages = [
|
||||
{"role": "system", "content": _EXPLAIN_SYSTEM_PROMPT},
|
||||
{
|
||||
"role": "user",
|
||||
"content": f"Audit event:\n{event_text}{resolution_text}{related_text}\n\nPlease explain this event.",
|
||||
},
|
||||
]
|
||||
|
||||
url = _build_chat_url(LLM_BASE_URL, LLM_API_VERSION)
|
||||
headers = {"Content-Type": "application/json"}
|
||||
if "azure" in LLM_BASE_URL.lower() or "cognitiveservices" in LLM_BASE_URL.lower():
|
||||
headers["api-key"] = LLM_API_KEY
|
||||
else:
|
||||
headers["Authorization"] = f"Bearer {LLM_API_KEY}"
|
||||
|
||||
payload = {
|
||||
"model": LLM_MODEL,
|
||||
"messages": messages,
|
||||
"max_completion_tokens": 600,
|
||||
}
|
||||
|
||||
async with httpx.AsyncClient(timeout=LLM_TIMEOUT_SECONDS) as client:
|
||||
resp = await client.post(url, headers=headers, json=payload)
|
||||
if resp.status_code >= 400:
|
||||
body = resp.text
|
||||
logger.error("LLM API error", status_code=resp.status_code, url=url, response_body=body)
|
||||
raise RuntimeError(f"LLM API error {resp.status_code}: {body[:500]}")
|
||||
data = resp.json()
|
||||
return data["choices"][0]["message"]["content"].strip()
|
||||
|
||||
|
||||
@router.post("/events/{event_id}/explain")
|
||||
async def explain_event(event_id: str, user: dict = Depends(require_auth)):
|
||||
event = events_collection.find_one({"id": event_id})
|
||||
if not event:
|
||||
raise HTTPException(status_code=404, detail="Event not found")
|
||||
|
||||
if (
|
||||
event.get("service") in PRIVACY_SERVICES or event.get("operation") in PRIVACY_SENSITIVE_OPERATIONS
|
||||
) and not user_can_access_privacy_services(user):
|
||||
raise HTTPException(status_code=403, detail="Access to this event is restricted")
|
||||
|
||||
event.pop("_id", None)
|
||||
|
||||
# Fetch related events for context (same actor or target in last 24h)
|
||||
related = []
|
||||
since = (datetime.now(UTC) - timedelta(hours=24)).isoformat().replace("+00:00", "Z")
|
||||
actor = event.get("actor_upn") or event.get("actor_display")
|
||||
target = event.get("target_displays", [None])[0] if event.get("target_displays") else None
|
||||
|
||||
or_filters = [{"timestamp": {"$gte": since}}, {"id": {"$ne": event_id}}]
|
||||
if actor:
|
||||
or_filters.append(
|
||||
{
|
||||
"$or": [
|
||||
{"actor_upn": actor},
|
||||
{"actor_display": actor},
|
||||
]
|
||||
}
|
||||
)
|
||||
if target:
|
||||
or_filters.append({"target_displays": target})
|
||||
|
||||
if len(or_filters) > 2:
|
||||
try:
|
||||
rel_cursor = events_collection.find({"$and": or_filters}).sort("timestamp", -1).limit(10)
|
||||
related = list(rel_cursor)
|
||||
for r in related:
|
||||
r.pop("_id", None)
|
||||
r.pop("raw", None)
|
||||
except Exception as exc:
|
||||
logger.warning("Failed to fetch related events", error=str(exc))
|
||||
|
||||
if not LLM_API_KEY:
|
||||
return {
|
||||
"explanation": "LLM is not configured. Set LLM_API_KEY in your environment to enable event explanations.",
|
||||
"llm_used": False,
|
||||
"llm_error": "LLM_API_KEY not configured",
|
||||
}
|
||||
|
||||
# Check cache first
|
||||
redis = await get_arq_pool()
|
||||
cached = await get_cached_explain(redis, event_id)
|
||||
if cached:
|
||||
cached["related_count"] = len(related)
|
||||
return cached
|
||||
|
||||
try:
|
||||
explanation = await _explain_event(event, related)
|
||||
result = {
|
||||
"explanation": explanation,
|
||||
"llm_used": True,
|
||||
"llm_error": None,
|
||||
"related_count": len(related),
|
||||
}
|
||||
await set_cached_explain(redis, event_id, result)
|
||||
return result
|
||||
except Exception as exc:
|
||||
logger.warning("Event explanation failed", error=str(exc))
|
||||
return {
|
||||
"explanation": "Unable to generate an explanation at this time. Please check the raw event details.",
|
||||
"llm_used": False,
|
||||
"llm_error": str(exc),
|
||||
"related_count": len(related),
|
||||
}
|
||||
|
||||
|
||||
@router.post("/ask", response_model=AskResponse)
|
||||
async def ask_question(body: AskRequest, user: dict = Depends(require_auth)):
|
||||
question = body.question.strip()
|
||||
if not question:
|
||||
raise HTTPException(status_code=400, detail="Question is required")
|
||||
|
||||
start, end = _extract_time_range(question)
|
||||
entity = _extract_entity(question)
|
||||
intent_services, explicit_noisy = _extract_intent_services(question)
|
||||
|
||||
# Default to last 7 days if no time range detected
|
||||
if not start:
|
||||
now = datetime.now(UTC)
|
||||
start = (now - timedelta(days=7)).isoformat().replace("+00:00", "Z")
|
||||
end = now.isoformat().replace("+00:00", "Z")
|
||||
|
||||
# -----------------------------------------------------------------------
|
||||
# Decide which services to query
|
||||
# -----------------------------------------------------------------------
|
||||
excluded_services: list[str] = []
|
||||
if body.services:
|
||||
# User explicitly filtered via UI — respect that exactly
|
||||
query_services = body.services
|
||||
elif intent_services is not None:
|
||||
# NL question implies specific services
|
||||
query_services = intent_services
|
||||
else:
|
||||
# Broad question with no intent — exclude noisy services by default
|
||||
query_services = sorted(_DEFAULT_ADMIN_SERVICES)
|
||||
excluded_services = sorted(_NOISY_SERVICES)
|
||||
|
||||
# -----------------------------------------------------------------------
|
||||
# Build and run query
|
||||
# -----------------------------------------------------------------------
|
||||
privacy_excluded_services = [] if user_can_access_privacy_services(user) else list(PRIVACY_SERVICES)
|
||||
privacy_excluded_ops = [] if user_can_access_privacy_services(user) else list(PRIVACY_SENSITIVE_OPERATIONS)
|
||||
query = _build_event_query(
|
||||
entity,
|
||||
start,
|
||||
end,
|
||||
services=query_services,
|
||||
actor=body.actor,
|
||||
operation=body.operation,
|
||||
result=body.result,
|
||||
include_tags=body.include_tags,
|
||||
exclude_tags=body.exclude_tags,
|
||||
)
|
||||
extra_filters = []
|
||||
if privacy_excluded_services:
|
||||
extra_filters.append({"service": {"$nin": privacy_excluded_services}})
|
||||
if privacy_excluded_ops:
|
||||
extra_filters.append({"operation": {"$nin": privacy_excluded_ops}})
|
||||
if extra_filters:
|
||||
query["$and"] = query.get("$and", []) + extra_filters
|
||||
|
||||
try:
|
||||
total = events_collection.count_documents(query)
|
||||
# Fetch a generous window so we can apply smart sampling in Python
|
||||
cursor = events_collection.find(query).sort([("timestamp", -1)]).limit(1000)
|
||||
raw_events = list(cursor)
|
||||
except Exception as exc:
|
||||
logger.error("Failed to query events for ask", error=str(exc))
|
||||
raise HTTPException(status_code=500, detail=f"Database query failed: {exc}") from exc
|
||||
|
||||
for e in raw_events:
|
||||
e["_id"] = str(e.get("_id", ""))
|
||||
|
||||
# Apply smart sampling (preserves failures, prioritises admin-relevant services)
|
||||
events = _smart_sample(raw_events, max_events=LLM_MAX_EVENTS)
|
||||
|
||||
# If no events, return early
|
||||
if not events:
|
||||
return AskResponse(
|
||||
answer="I couldn't find any audit events matching your question. Try broadening the time range or checking the spelling of the device/user name.",
|
||||
events=[],
|
||||
query_info={
|
||||
"entity": entity,
|
||||
"start": start,
|
||||
"end": end,
|
||||
"event_count": 0,
|
||||
"total_matched": total,
|
||||
"services_queried": query_services,
|
||||
"excluded_services": excluded_services,
|
||||
},
|
||||
llm_used=False,
|
||||
llm_error="LLM not used — no events found." if not LLM_API_KEY else None,
|
||||
)
|
||||
|
||||
# Try LLM summarisation (with caching + optional async)
|
||||
answer = ""
|
||||
llm_used = False
|
||||
llm_error = None
|
||||
job_id = None
|
||||
|
||||
filters_snapshot = {
|
||||
"services": body.services,
|
||||
"actor": body.actor,
|
||||
"operation": body.operation,
|
||||
"result": body.result,
|
||||
"start": body.start,
|
||||
"end": body.end,
|
||||
"include_tags": body.include_tags,
|
||||
"exclude_tags": body.exclude_tags,
|
||||
}
|
||||
|
||||
if LLM_API_KEY:
|
||||
redis = await get_arq_pool()
|
||||
cached = await get_cached_ask(redis, question, filters_snapshot, events)
|
||||
if cached:
|
||||
answer = cached.get("answer", "")
|
||||
llm_used = cached.get("llm_used", False)
|
||||
llm_error = cached.get("llm_error")
|
||||
elif body.async_mode:
|
||||
pool = await get_arq_pool()
|
||||
job = await pool.enqueue_job(
|
||||
"process_ask_question",
|
||||
question,
|
||||
filters_snapshot,
|
||||
events,
|
||||
total,
|
||||
excluded_services,
|
||||
)
|
||||
job_id = job.job_id if job else None
|
||||
return AskResponse(
|
||||
answer="Your question is being processed. Poll /api/jobs/{job_id} for the result.",
|
||||
events=[_to_event_ref(e) for e in events],
|
||||
query_info={
|
||||
"entity": entity,
|
||||
"start": start,
|
||||
"end": end,
|
||||
"event_count": len(events),
|
||||
"total_matched": total,
|
||||
"services_queried": query_services,
|
||||
"excluded_services": excluded_services,
|
||||
"mongo_query": json.dumps(query, default=str),
|
||||
},
|
||||
llm_used=False,
|
||||
llm_error=None,
|
||||
job_id=job_id,
|
||||
)
|
||||
else:
|
||||
try:
|
||||
answer = await _call_llm(question, events, total=total, excluded_services=excluded_services)
|
||||
llm_used = True
|
||||
await set_cached_ask(
|
||||
redis,
|
||||
question,
|
||||
filters_snapshot,
|
||||
events,
|
||||
{
|
||||
"answer": answer,
|
||||
"llm_used": True,
|
||||
"llm_error": None,
|
||||
},
|
||||
)
|
||||
except Exception as exc:
|
||||
llm_error = f"LLM call failed: {exc}"
|
||||
logger.warning("LLM call failed, falling back to structured summary", error=str(exc))
|
||||
else:
|
||||
llm_error = "LLM_API_KEY is not configured. Set it in your .env to enable AI narrative summarisation."
|
||||
|
||||
# Fallback: structured summary if LLM unavailable or failed
|
||||
if not answer:
|
||||
parts = [f"Found {total} event(s)"]
|
||||
if entity:
|
||||
parts.append(f"related to **{entity}**")
|
||||
if excluded_services:
|
||||
parts.append(f"(excluding {', '.join(excluded_services)})")
|
||||
parts.append(f"between {start[:10]} and {end[:10]}.\n")
|
||||
|
||||
for i, e in enumerate(events[:10], 1):
|
||||
ts = e.get("timestamp", "?")[:16].replace("T", " ")
|
||||
op = e.get("operation", "unknown action")
|
||||
actor = e.get("actor_display", "unknown")
|
||||
targets = ", ".join(e.get("target_displays") or []) or "—"
|
||||
result = e.get("result", "—")
|
||||
parts.append(f"{i}. **{ts}** — {op} by {actor} on {targets} ({result})")
|
||||
|
||||
if len(events) > 10:
|
||||
parts.append(f"\n...and {len(events) - 10} more events.")
|
||||
|
||||
answer = "\n".join(parts)
|
||||
|
||||
return AskResponse(
|
||||
answer=answer,
|
||||
events=[_to_event_ref(e) for e in events],
|
||||
query_info={
|
||||
"entity": entity,
|
||||
"start": start,
|
||||
"end": end,
|
||||
"event_count": len(events),
|
||||
"total_matched": total,
|
||||
"services_queried": query_services,
|
||||
"excluded_services": excluded_services,
|
||||
"mongo_query": json.dumps(query, default=str),
|
||||
},
|
||||
llm_used=llm_used,
|
||||
llm_error=llm_error,
|
||||
job_id=job_id,
|
||||
)
|
||||
@@ -1,4 +1,5 @@
|
||||
from config import (
|
||||
AI_FEATURES_ENABLED,
|
||||
AUTH_CLIENT_ID,
|
||||
AUTH_ENABLED,
|
||||
AUTH_SCOPE,
|
||||
@@ -18,3 +19,10 @@ def auth_config():
|
||||
"scope": AUTH_SCOPE,
|
||||
"redirect_uri": None, # frontend uses window.location.origin by default
|
||||
}
|
||||
|
||||
|
||||
@router.get("/config/features")
|
||||
def features_config():
|
||||
return {
|
||||
"ai_features_enabled": AI_FEATURES_ENABLED,
|
||||
}
|
||||
|
||||
@@ -3,11 +3,13 @@ import re
|
||||
from datetime import UTC, datetime
|
||||
|
||||
from audit_trail import log_action
|
||||
from auth import require_auth
|
||||
from auth import require_auth, user_can_access_privacy_services
|
||||
from bson import ObjectId
|
||||
from config import PRIVACY_SENSITIVE_OPERATIONS, PRIVACY_SERVICES
|
||||
from database import events_collection
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query
|
||||
from models.api import (
|
||||
BulkTagsRequest,
|
||||
CommentAddRequest,
|
||||
FilterOptionsResponse,
|
||||
PaginatedEventResponse,
|
||||
@@ -31,10 +33,9 @@ def _decode_cursor(cursor: str) -> tuple[str, str]:
|
||||
raise HTTPException(status_code=400, detail="Invalid cursor") from exc
|
||||
|
||||
|
||||
@router.get("/events", response_model=PaginatedEventResponse)
|
||||
def list_events(
|
||||
def _build_query(
|
||||
service: str | None = None,
|
||||
services: list[str] | None = Query(default=None),
|
||||
services: list[str] | None = None,
|
||||
actor: str | None = None,
|
||||
operation: str | None = None,
|
||||
result: str | None = None,
|
||||
@@ -42,15 +43,18 @@ def list_events(
|
||||
end: str | None = None,
|
||||
search: str | None = None,
|
||||
cursor: str | None = None,
|
||||
page_size: int = Query(default=50, ge=1, le=500),
|
||||
user: dict = Depends(require_auth),
|
||||
):
|
||||
include_tags: list[str] | None = None,
|
||||
exclude_tags: list[str] | None = None,
|
||||
exclude_operations: list[str] | None = None,
|
||||
) -> dict:
|
||||
filters = []
|
||||
|
||||
if service:
|
||||
filters.append({"service": service})
|
||||
if services:
|
||||
filters.append({"service": {"$in": services}})
|
||||
if exclude_operations:
|
||||
filters.append({"operation": {"$nin": exclude_operations}})
|
||||
if actor:
|
||||
actor_safe = re.escape(actor)
|
||||
filters.append(
|
||||
@@ -87,6 +91,10 @@ def list_events(
|
||||
]
|
||||
}
|
||||
)
|
||||
if include_tags:
|
||||
filters.append({"tags": {"$all": include_tags}})
|
||||
if exclude_tags:
|
||||
filters.append({"tags": {"$not": {"$all": exclude_tags}}})
|
||||
|
||||
if cursor:
|
||||
try:
|
||||
@@ -102,17 +110,52 @@ def list_events(
|
||||
}
|
||||
)
|
||||
|
||||
query = {"$and": filters} if filters else {}
|
||||
return {"$and": filters} if filters else {}
|
||||
|
||||
|
||||
@router.get("/events", response_model=PaginatedEventResponse)
|
||||
def list_events(
|
||||
service: str | None = None,
|
||||
services: list[str] | None = Query(default=None),
|
||||
actor: str | None = None,
|
||||
operation: str | None = None,
|
||||
result: str | None = None,
|
||||
start: str | None = None,
|
||||
end: str | None = None,
|
||||
search: str | None = None,
|
||||
cursor: str | None = None,
|
||||
page_size: int = Query(default=50, ge=1, le=500),
|
||||
include_tags: list[str] | None = Query(default=None),
|
||||
exclude_tags: list[str] | None = Query(default=None),
|
||||
user: dict = Depends(require_auth),
|
||||
):
|
||||
privacy_excluded_services = [] if user_can_access_privacy_services(user) else list(PRIVACY_SERVICES)
|
||||
privacy_excluded_ops = [] if user_can_access_privacy_services(user) else list(PRIVACY_SENSITIVE_OPERATIONS)
|
||||
query = _build_query(
|
||||
service=service,
|
||||
services=services,
|
||||
actor=actor,
|
||||
operation=operation,
|
||||
result=result,
|
||||
start=start,
|
||||
end=end,
|
||||
search=search,
|
||||
cursor=cursor,
|
||||
include_tags=include_tags,
|
||||
exclude_tags=exclude_tags,
|
||||
exclude_operations=privacy_excluded_ops,
|
||||
)
|
||||
if privacy_excluded_services:
|
||||
query = query if query else {}
|
||||
if "$and" not in query:
|
||||
query = {"$and": [query]} if query else {"$and": []}
|
||||
query["$and"].append({"service": {"$nin": privacy_excluded_services}})
|
||||
|
||||
safe_page_size = max(1, min(page_size, 500))
|
||||
|
||||
try:
|
||||
total = events_collection.count_documents(query) if not cursor else -1
|
||||
cursor_query = (
|
||||
events_collection.find(query)
|
||||
.sort([("timestamp", -1), ("_id", -1)])
|
||||
.limit(safe_page_size)
|
||||
)
|
||||
cursor_query = events_collection.find(query).sort([("timestamp", -1), ("_id", -1)]).limit(safe_page_size)
|
||||
events = list(cursor_query)
|
||||
except Exception as exc:
|
||||
raise HTTPException(status_code=500, detail=f"Failed to query events: {exc}") from exc
|
||||
@@ -125,10 +168,28 @@ def list_events(
|
||||
for e in events:
|
||||
e["_id"] = str(e["_id"])
|
||||
|
||||
log_action("list_events", "/api/events", {"filters": {k: v for k, v in {
|
||||
"service": service, "actor": actor, "operation": operation, "result": result,
|
||||
"start": start, "end": end, "search": search, "cursor": cursor, "page_size": page_size,
|
||||
}.items() if v is not None}}, user.get("sub", "anonymous"))
|
||||
log_action(
|
||||
"list_events",
|
||||
"/api/events",
|
||||
{
|
||||
"filters": {
|
||||
k: v
|
||||
for k, v in {
|
||||
"service": service,
|
||||
"actor": actor,
|
||||
"operation": operation,
|
||||
"result": result,
|
||||
"start": start,
|
||||
"end": end,
|
||||
"search": search,
|
||||
"cursor": cursor,
|
||||
"page_size": page_size,
|
||||
}.items()
|
||||
if v is not None
|
||||
}
|
||||
},
|
||||
user.get("sub", "anonymous"),
|
||||
)
|
||||
|
||||
return {
|
||||
"items": events,
|
||||
@@ -138,8 +199,66 @@ def list_events(
|
||||
}
|
||||
|
||||
|
||||
@router.post("/events/bulk-tags")
|
||||
def bulk_tags(
|
||||
body: BulkTagsRequest,
|
||||
service: str | None = None,
|
||||
services: list[str] | None = Query(default=None),
|
||||
actor: str | None = None,
|
||||
operation: str | None = None,
|
||||
result: str | None = None,
|
||||
start: str | None = None,
|
||||
end: str | None = None,
|
||||
search: str | None = None,
|
||||
include_tags: list[str] | None = Query(default=None),
|
||||
exclude_tags: list[str] | None = Query(default=None),
|
||||
user: dict = Depends(require_auth),
|
||||
):
|
||||
privacy_excluded_services = [] if user_can_access_privacy_services(user) else list(PRIVACY_SERVICES)
|
||||
privacy_excluded_ops = [] if user_can_access_privacy_services(user) else list(PRIVACY_SENSITIVE_OPERATIONS)
|
||||
query = _build_query(
|
||||
service=service,
|
||||
services=services,
|
||||
actor=actor,
|
||||
operation=operation,
|
||||
result=result,
|
||||
start=start,
|
||||
end=end,
|
||||
search=search,
|
||||
include_tags=include_tags,
|
||||
exclude_tags=exclude_tags,
|
||||
exclude_operations=privacy_excluded_ops,
|
||||
)
|
||||
if privacy_excluded_services:
|
||||
query = query if query else {}
|
||||
if "$and" not in query:
|
||||
query = {"$and": [query]} if query else {"$and": []}
|
||||
query["$and"].append({"service": {"$nin": privacy_excluded_services}})
|
||||
tags = [t.strip() for t in body.tags if t.strip()]
|
||||
if not tags:
|
||||
raise HTTPException(status_code=400, detail="No tags provided")
|
||||
|
||||
update = {"$set": {"tags": tags}} if body.mode == "replace" else {"$addToSet": {"tags": {"$each": tags}}}
|
||||
|
||||
try:
|
||||
result_obj = events_collection.update_many(query, update)
|
||||
except Exception as exc:
|
||||
raise HTTPException(status_code=500, detail=f"Failed to update tags: {exc}") from exc
|
||||
|
||||
log_action(
|
||||
"bulk_tags",
|
||||
"/api/events/bulk-tags",
|
||||
{"tags": tags, "mode": body.mode, "matched": result_obj.matched_count},
|
||||
user.get("sub", "anonymous"),
|
||||
)
|
||||
return {"matched": result_obj.matched_count, "modified": result_obj.modified_count}
|
||||
|
||||
|
||||
@router.get("/filter-options", response_model=FilterOptionsResponse)
|
||||
def filter_options(limit: int = Query(default=200, ge=1, le=1000)):
|
||||
def filter_options(
|
||||
limit: int = Query(default=200, ge=1, le=1000),
|
||||
user: dict = Depends(require_auth),
|
||||
):
|
||||
safe_limit = max(1, min(limit, 1000))
|
||||
try:
|
||||
services = sorted(events_collection.distinct("service"))[:safe_limit]
|
||||
@@ -151,6 +270,10 @@ def filter_options(limit: int = Query(default=200, ge=1, le=1000)):
|
||||
except Exception as exc:
|
||||
raise HTTPException(status_code=500, detail=f"Failed to load filter options: {exc}") from exc
|
||||
|
||||
if not user_can_access_privacy_services(user):
|
||||
services = [s for s in services if s not in PRIVACY_SERVICES]
|
||||
operations = [o for o in operations if o not in PRIVACY_SENSITIVE_OPERATIONS]
|
||||
|
||||
return {
|
||||
"services": services,
|
||||
"operations": operations,
|
||||
|
||||
@@ -54,11 +54,14 @@ def run_fetch(hours: int = 168):
|
||||
if key:
|
||||
ops.append(UpdateOne({"dedupe_key": key}, {"$set": doc}, upsert=True))
|
||||
else:
|
||||
ops.append(UpdateOne({"id": doc.get("id"), "timestamp": doc.get("timestamp")}, {"$set": doc}, upsert=True))
|
||||
ops.append(
|
||||
UpdateOne({"id": doc.get("id"), "timestamp": doc.get("timestamp")}, {"$set": doc}, upsert=True)
|
||||
)
|
||||
events_collection.bulk_write(ops, ordered=False)
|
||||
|
||||
if ALERTS_ENABLED:
|
||||
from rules import evaluate_event
|
||||
|
||||
for doc in normalized:
|
||||
evaluate_event(doc)
|
||||
|
||||
@@ -75,7 +78,12 @@ def fetch_logs(
|
||||
):
|
||||
try:
|
||||
result = run_fetch(hours=hours)
|
||||
log_action("fetch_audit_logs", "/api/fetch-audit-logs", {"hours": hours, "stored": result["stored_events"]}, user.get("sub", "anonymous"))
|
||||
log_action(
|
||||
"fetch_audit_logs",
|
||||
"/api/fetch-audit-logs",
|
||||
{"hours": hours, "stored": result["stored_events"]},
|
||||
user.get("sub", "anonymous"),
|
||||
)
|
||||
return result
|
||||
except Exception as exc:
|
||||
raise HTTPException(status_code=502, detail=str(exc)) from exc
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
|
||||
from auth import require_auth
|
||||
from fastapi import APIRouter, Depends
|
||||
from models.api import SourceHealthResponse
|
||||
@@ -19,17 +18,21 @@ def source_health():
|
||||
status = doc.get("status")
|
||||
if not status:
|
||||
status = "healthy" if doc.get("last_fetch_time") else "unknown"
|
||||
results.append({
|
||||
"source": source,
|
||||
"last_fetch_time": doc.get("last_fetch_time"),
|
||||
"last_attempt_time": doc.get("last_attempt_time"),
|
||||
"status": status,
|
||||
})
|
||||
results.append(
|
||||
{
|
||||
"source": source,
|
||||
"last_fetch_time": doc.get("last_fetch_time"),
|
||||
"last_attempt_time": doc.get("last_attempt_time"),
|
||||
"status": status,
|
||||
}
|
||||
)
|
||||
else:
|
||||
results.append({
|
||||
"source": source,
|
||||
"last_fetch_time": None,
|
||||
"last_attempt_time": None,
|
||||
"status": "unknown",
|
||||
})
|
||||
results.append(
|
||||
{
|
||||
"source": source,
|
||||
"last_fetch_time": None,
|
||||
"last_attempt_time": None,
|
||||
"status": "unknown",
|
||||
}
|
||||
)
|
||||
return results
|
||||
|
||||
43
backend/routes/jobs.py
Normal file
43
backend/routes/jobs.py
Normal file
@@ -0,0 +1,43 @@
|
||||
"""Job status endpoints for async LLM operations."""
|
||||
|
||||
from arq.jobs import Job, JobStatus
|
||||
from auth import require_auth
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from pydantic import BaseModel
|
||||
from redis_client import get_redis
|
||||
|
||||
router = APIRouter(dependencies=[Depends(require_auth)])
|
||||
|
||||
|
||||
class JobStatusResponse(BaseModel):
|
||||
job_id: str
|
||||
status: str # queued, in_progress, complete, not_found, deferred
|
||||
result: dict | None = None
|
||||
error: str | None = None
|
||||
|
||||
|
||||
@router.get("/jobs/{job_id}", response_model=JobStatusResponse)
|
||||
async def get_job_status(job_id: str, user: dict = Depends(require_auth)):
|
||||
"""Poll for the result of an async LLM job."""
|
||||
redis = await get_redis()
|
||||
job = Job(job_id, redis)
|
||||
status = await job.status()
|
||||
|
||||
if status == JobStatus.not_found:
|
||||
raise HTTPException(status_code=404, detail="Job not found")
|
||||
|
||||
result = None
|
||||
error = None
|
||||
if status == JobStatus.complete:
|
||||
try:
|
||||
result_data = await job.result(timeout=0)
|
||||
result = result_data if isinstance(result_data, dict) else {"data": str(result_data)}
|
||||
except Exception as exc:
|
||||
error = str(exc)
|
||||
|
||||
return JobStatusResponse(
|
||||
job_id=job_id,
|
||||
status=status.value,
|
||||
result=result,
|
||||
error=error,
|
||||
)
|
||||
124
backend/routes/mcp.py
Normal file
124
backend/routes/mcp.py
Normal file
@@ -0,0 +1,124 @@
|
||||
"""MCP server over SSE (HTTP) transport, mounted inside FastAPI with OIDC auth."""
|
||||
|
||||
import structlog
|
||||
from auth import (
|
||||
AUTH_ALLOWED_GROUPS,
|
||||
AUTH_ALLOWED_ROLES,
|
||||
AUTH_ENABLED,
|
||||
_allowed,
|
||||
_decode_token,
|
||||
_get_jwks,
|
||||
)
|
||||
from mcp.server import Server
|
||||
from mcp.server.sse import SseServerTransport
|
||||
from mcp.types import TextContent, Tool
|
||||
from mcp_common import (
|
||||
ASK_SCHEMA,
|
||||
GET_EVENT_SCHEMA,
|
||||
GET_SUMMARY_SCHEMA,
|
||||
SEARCH_EVENTS_SCHEMA,
|
||||
handle_ask,
|
||||
handle_get_event,
|
||||
handle_get_summary,
|
||||
handle_search_events,
|
||||
)
|
||||
from starlette.requests import Request
|
||||
from starlette.responses import Response
|
||||
|
||||
logger = structlog.get_logger("aoc.mcp")
|
||||
|
||||
mcp_app = Server("aoc")
|
||||
transport = SseServerTransport("/messages/")
|
||||
|
||||
|
||||
@mcp_app.list_tools()
|
||||
async def list_tools() -> list[Tool]:
|
||||
return [
|
||||
Tool(
|
||||
name="search_events",
|
||||
description="Search audit events by entity, service, operation, or result.",
|
||||
inputSchema=SEARCH_EVENTS_SCHEMA,
|
||||
),
|
||||
Tool(name="get_event", description="Retrieve a single audit event by its ID.", inputSchema=GET_EVENT_SCHEMA),
|
||||
Tool(
|
||||
name="get_summary",
|
||||
description="Get an aggregated summary of audit activity for the last N days.",
|
||||
inputSchema=GET_SUMMARY_SCHEMA,
|
||||
),
|
||||
Tool(
|
||||
name="ask",
|
||||
description="Ask a natural language question about audit logs. Returns a narrative answer.",
|
||||
inputSchema=ASK_SCHEMA,
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
@mcp_app.call_tool()
|
||||
async def call_tool(name: str, arguments: dict) -> list[TextContent]:
|
||||
if name == "search_events":
|
||||
return await handle_search_events(arguments)
|
||||
if name == "get_event":
|
||||
return await handle_get_event(arguments)
|
||||
if name == "get_summary":
|
||||
return await handle_get_summary(arguments)
|
||||
if name == "ask":
|
||||
return await handle_ask(arguments)
|
||||
raise ValueError(f"Unknown tool: {name}")
|
||||
|
||||
|
||||
async def _validate_auth(request: Request) -> dict | None:
|
||||
"""Validate Bearer token. Returns claims dict or None on failure."""
|
||||
if not AUTH_ENABLED:
|
||||
return {"sub": "anonymous"}
|
||||
|
||||
auth_header = request.headers.get("authorization", "")
|
||||
if not auth_header or not auth_header.lower().startswith("bearer "):
|
||||
return None
|
||||
|
||||
token = auth_header.split(" ", 1)[1]
|
||||
try:
|
||||
jwks = _get_jwks()
|
||||
claims = _decode_token(token, jwks)
|
||||
except Exception as exc:
|
||||
logger.warning("MCP auth failed", error=str(exc))
|
||||
return None
|
||||
|
||||
if not _allowed(claims, AUTH_ALLOWED_ROLES, AUTH_ALLOWED_GROUPS):
|
||||
logger.warning("MCP auth forbidden", sub=claims.get("sub"))
|
||||
return None
|
||||
|
||||
return claims
|
||||
|
||||
|
||||
async def mcp_asgi(scope: dict, receive, send):
|
||||
"""ASGI application for MCP over SSE, mounted under /mcp in FastAPI."""
|
||||
if scope["type"] != "http":
|
||||
return
|
||||
|
||||
request = Request(scope, receive)
|
||||
|
||||
# Auth check
|
||||
claims = await _validate_auth(request)
|
||||
if claims is None:
|
||||
response = Response("Unauthorized", status_code=401)
|
||||
await response(scope, receive, send)
|
||||
return
|
||||
|
||||
path = scope.get("path", "")
|
||||
root_path = scope.get("root_path", "")
|
||||
relative_path = path[len(root_path) :] if path.startswith(root_path) else path
|
||||
method = scope.get("method", "")
|
||||
|
||||
if relative_path == "/sse" and method == "GET":
|
||||
logger.info("MCP SSE connection established", sub=claims.get("sub", "unknown"))
|
||||
async with transport.connect_sse(scope, receive, send) as (read_stream, write_stream):
|
||||
await mcp_app.run(
|
||||
read_stream,
|
||||
write_stream,
|
||||
mcp_app.create_initialization_options(),
|
||||
)
|
||||
elif relative_path == "/messages/" and method == "POST":
|
||||
await transport.handle_post_message(scope, receive, send)
|
||||
else:
|
||||
response = Response("Not found", status_code=404)
|
||||
await response(scope, receive, send)
|
||||
60
backend/routes/saved_searches.py
Normal file
60
backend/routes/saved_searches.py
Normal file
@@ -0,0 +1,60 @@
|
||||
"""CRUD for saved filter searches (bookmarks)."""
|
||||
|
||||
import uuid
|
||||
from datetime import UTC, datetime
|
||||
|
||||
import structlog
|
||||
from auth import require_auth
|
||||
from database import saved_searches_collection
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
|
||||
router = APIRouter(dependencies=[Depends(require_auth)])
|
||||
logger = structlog.get_logger("aoc.saved_searches")
|
||||
|
||||
|
||||
def _user_sub(user: dict) -> str:
|
||||
return user.get("sub", "anonymous")
|
||||
|
||||
|
||||
@router.get("/saved-searches")
|
||||
async def list_saved_searches(user: dict = Depends(require_auth)):
|
||||
"""Return saved searches for the current user."""
|
||||
sub = _user_sub(user)
|
||||
cursor = saved_searches_collection.find({"created_by": sub}).sort("created_at", -1)
|
||||
items = []
|
||||
for doc in cursor:
|
||||
doc["id"] = doc.pop("_id")
|
||||
items.append(doc)
|
||||
return items
|
||||
|
||||
|
||||
@router.post("/saved-searches")
|
||||
async def create_saved_search(body: dict, user: dict = Depends(require_auth)):
|
||||
"""Save the current filter set."""
|
||||
name = (body.get("name") or "").strip()
|
||||
if not name:
|
||||
raise HTTPException(status_code=400, detail="Name is required")
|
||||
|
||||
filters = body.get("filters") or {}
|
||||
doc = {
|
||||
"_id": str(uuid.uuid4()),
|
||||
"name": name,
|
||||
"filters": filters,
|
||||
"created_at": datetime.now(UTC).isoformat().replace("+00:00", "Z"),
|
||||
"created_by": _user_sub(user),
|
||||
}
|
||||
saved_searches_collection.insert_one(doc)
|
||||
logger.info("Saved search created", name=name, user=doc["created_by"])
|
||||
doc["id"] = doc.pop("_id")
|
||||
return doc
|
||||
|
||||
|
||||
@router.delete("/saved-searches/{search_id}")
|
||||
async def delete_saved_search(search_id: str, user: dict = Depends(require_auth)):
|
||||
"""Delete a saved search (only if owned by current user)."""
|
||||
sub = _user_sub(user)
|
||||
result = saved_searches_collection.delete_one({"_id": search_id, "created_by": sub})
|
||||
if result.deleted_count == 0:
|
||||
raise HTTPException(status_code=404, detail="Saved search not found")
|
||||
logger.info("Saved search deleted", search_id=search_id, user=sub)
|
||||
return {"status": "deleted"}
|
||||
@@ -11,10 +11,7 @@ def fetch_intune_audit(hours: int = 24, since: str | None = None, max_pages: int
|
||||
"""
|
||||
token = get_access_token()
|
||||
start_time = since or (datetime.utcnow() - timedelta(hours=hours)).isoformat() + "Z"
|
||||
url = (
|
||||
"https://graph.microsoft.com/v1.0/deviceManagement/auditEvents"
|
||||
f"?$filter=activityDateTime ge {start_time}"
|
||||
)
|
||||
url = f"https://graph.microsoft.com/v1.0/deviceManagement/auditEvents?$filter=activityDateTime ge {start_time}"
|
||||
headers = {"Authorization": f"Bearer {token}"}
|
||||
|
||||
events = []
|
||||
@@ -69,7 +66,8 @@ def _normalize_intune(e: dict) -> dict:
|
||||
"targetResources": [
|
||||
{
|
||||
"id": target.get("id"),
|
||||
"displayName": target.get("displayName") or target.get("modifiedProperties", [{}])[0].get("displayName"),
|
||||
"displayName": target.get("displayName")
|
||||
or target.get("modifiedProperties", [{}])[0].get("displayName"),
|
||||
"type": target.get("type"),
|
||||
}
|
||||
]
|
||||
|
||||
@@ -22,13 +22,23 @@ def mock_watermarks_collection():
|
||||
@pytest.fixture(scope="function")
|
||||
def client(mock_events_collection, mock_watermarks_collection, monkeypatch):
|
||||
monkeypatch.setattr("database.events_collection", mock_events_collection)
|
||||
monkeypatch.setattr("database.saved_searches_collection", mock_events_collection)
|
||||
monkeypatch.setattr("routes.fetch.events_collection", mock_events_collection)
|
||||
monkeypatch.setattr("routes.events.events_collection", mock_events_collection)
|
||||
monkeypatch.setattr("routes.ask.events_collection", mock_events_collection)
|
||||
monkeypatch.setattr("routes.saved_searches.saved_searches_collection", mock_events_collection)
|
||||
monkeypatch.setattr("watermark.watermarks_collection", mock_watermarks_collection)
|
||||
monkeypatch.setattr("routes.health.watermarks_collection", mock_watermarks_collection)
|
||||
monkeypatch.setattr("routes.fetch.get_watermark", lambda source: None)
|
||||
monkeypatch.setattr("routes.fetch.set_watermark", lambda source, ts: None)
|
||||
monkeypatch.setattr("auth.AUTH_ENABLED", False)
|
||||
monkeypatch.setattr("routes.mcp.AUTH_ENABLED", False)
|
||||
monkeypatch.setattr("config.PRIVACY_SERVICES", set())
|
||||
monkeypatch.setattr("config.PRIVACY_SENSITIVE_OPERATIONS", set())
|
||||
monkeypatch.setattr("routes.events.PRIVACY_SERVICES", set())
|
||||
monkeypatch.setattr("routes.events.PRIVACY_SENSITIVE_OPERATIONS", set())
|
||||
monkeypatch.setattr("routes.ask.PRIVACY_SERVICES", set())
|
||||
monkeypatch.setattr("routes.ask.PRIVACY_SENSITIVE_OPERATIONS", set())
|
||||
monkeypatch.setattr("database.db.command", lambda cmd: {"ok": 1} if cmd == "ping" else {})
|
||||
|
||||
# Mock audit trail and rules collections so tests don't wait on real MongoDB
|
||||
@@ -39,6 +49,21 @@ def client(mock_events_collection, mock_watermarks_collection, monkeypatch):
|
||||
monkeypatch.setattr("rules.rules_collection", audit_db["alert_rules"])
|
||||
monkeypatch.setattr("routes.rules.rules_collection", audit_db["alert_rules"])
|
||||
|
||||
# Mock Redis so tests don't require a running Redis server
|
||||
class FakeRedis:
|
||||
async def get(self, key):
|
||||
return None
|
||||
|
||||
async def setex(self, key, ttl, value):
|
||||
pass
|
||||
|
||||
async def fake_get_arq_pool():
|
||||
return FakeRedis()
|
||||
|
||||
monkeypatch.setattr("redis_client.get_arq_pool", fake_get_arq_pool)
|
||||
monkeypatch.setattr("routes.ask.get_arq_pool", fake_get_arq_pool)
|
||||
monkeypatch.setattr("routes.jobs.get_redis", fake_get_arq_pool)
|
||||
|
||||
from main import app
|
||||
|
||||
return TestClient(app)
|
||||
|
||||
@@ -1,6 +1,264 @@
|
||||
from datetime import UTC, datetime
|
||||
|
||||
|
||||
def test_config_features(client):
|
||||
response = client.get("/api/config/features")
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert "ai_features_enabled" in data
|
||||
assert isinstance(data["ai_features_enabled"], bool)
|
||||
|
||||
|
||||
def test_ask_disabled_when_ai_features_off():
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
code = """
|
||||
import sys
|
||||
sys.path.insert(0, '.')
|
||||
import os
|
||||
os.environ['AI_FEATURES_ENABLED'] = 'false'
|
||||
|
||||
# Re-import config with the env override
|
||||
import importlib
|
||||
import config
|
||||
importlib.reload(config)
|
||||
|
||||
# Now import main; it will pick up the new AI_FEATURES_ENABLED
|
||||
import main
|
||||
ask_paths = [r.path for r in main.app.routes if hasattr(r, 'path') and 'ask' in r.path]
|
||||
print('ASK_PATHS:', ask_paths)
|
||||
assert len(ask_paths) == 0, f"Expected no ask routes, found: {ask_paths}"
|
||||
print('OK')
|
||||
"""
|
||||
result = subprocess.run([sys.executable, "-c", code], capture_output=True, text=True, cwd=".")
|
||||
assert result.returncode == 0, f"Subprocess failed: {result.stdout}\n{result.stderr}"
|
||||
assert "OK" in result.stdout
|
||||
|
||||
|
||||
def test_mcp_sse_mount_exists():
|
||||
from main import app
|
||||
|
||||
mcp_mounts = [r for r in app.routes if getattr(r, "path", "") == "/mcp"]
|
||||
assert len(mcp_mounts) == 1, "MCP mount not found in app routes"
|
||||
|
||||
|
||||
def test_mcp_messages_no_session(client):
|
||||
response = client.post("/mcp/messages/")
|
||||
# MCP transport returns 400 when session_id is missing, 404 when session not found
|
||||
assert response.status_code in (400, 404)
|
||||
|
||||
|
||||
def test_mcp_sse_auth_required_when_enabled(client, monkeypatch):
|
||||
monkeypatch.setattr("routes.mcp.AUTH_ENABLED", True)
|
||||
response = client.get("/mcp/sse")
|
||||
assert response.status_code == 401
|
||||
|
||||
|
||||
def test_explain_event_not_found(client):
|
||||
response = client.post("/api/events/nonexistent/explain")
|
||||
assert response.status_code == 404
|
||||
|
||||
|
||||
def test_explain_event_no_llm_key(client, mock_events_collection, monkeypatch):
|
||||
monkeypatch.setattr("routes.ask.LLM_API_KEY", "")
|
||||
mock_events_collection.insert_one(
|
||||
{
|
||||
"id": "evt-explain",
|
||||
"timestamp": datetime.now(UTC).isoformat(),
|
||||
"service": "Directory",
|
||||
"operation": "Add user",
|
||||
"result": "success",
|
||||
"actor_display": "Alice",
|
||||
"raw_text": "",
|
||||
}
|
||||
)
|
||||
response = client.post("/api/events/evt-explain/explain")
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert "explanation" in data
|
||||
assert data["llm_used"] is False
|
||||
assert "LLM_API_KEY" in (data.get("llm_error") or "")
|
||||
|
||||
|
||||
def test_explain_event_with_llm_mock(client, mock_events_collection, monkeypatch):
|
||||
monkeypatch.setattr("routes.ask.LLM_API_KEY", "test-key")
|
||||
|
||||
async def fake_explain(event, related):
|
||||
return "This is a test explanation."
|
||||
|
||||
monkeypatch.setattr("routes.ask._explain_event", fake_explain)
|
||||
|
||||
class FakeRedis:
|
||||
async def get(self, key):
|
||||
return None
|
||||
|
||||
async def setex(self, key, ttl, value):
|
||||
pass
|
||||
|
||||
async def fake_get_arq_pool():
|
||||
return FakeRedis()
|
||||
|
||||
monkeypatch.setattr("routes.ask.get_arq_pool", fake_get_arq_pool)
|
||||
|
||||
mock_events_collection.insert_one(
|
||||
{
|
||||
"id": "evt-explain2",
|
||||
"timestamp": datetime.now(UTC).isoformat(),
|
||||
"service": "Directory",
|
||||
"operation": "Add user",
|
||||
"result": "success",
|
||||
"actor_display": "Alice",
|
||||
"raw_text": "",
|
||||
}
|
||||
)
|
||||
response = client.post("/api/events/evt-explain2/explain")
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data["explanation"] == "This is a test explanation."
|
||||
assert data["llm_used"] is True
|
||||
|
||||
|
||||
def test_saved_searches_crud(client, monkeypatch):
|
||||
monkeypatch.setattr("auth.AUTH_ENABLED", False)
|
||||
|
||||
# Create
|
||||
response = client.post(
|
||||
"/api/saved-searches", json={"name": "Test search", "filters": {"actor": "alice", "result": "success"}}
|
||||
)
|
||||
assert response.status_code == 200
|
||||
created = response.json()
|
||||
assert created["name"] == "Test search"
|
||||
assert created["filters"]["actor"] == "alice"
|
||||
search_id = created["id"]
|
||||
|
||||
# List
|
||||
response2 = client.get("/api/saved-searches")
|
||||
assert response2.status_code == 200
|
||||
items = response2.json()
|
||||
assert len(items) == 1
|
||||
assert items[0]["name"] == "Test search"
|
||||
|
||||
# Delete
|
||||
response3 = client.delete(f"/api/saved-searches/{search_id}")
|
||||
assert response3.status_code == 200
|
||||
|
||||
# List empty
|
||||
response4 = client.get("/api/saved-searches")
|
||||
assert response4.status_code == 200
|
||||
assert len(response4.json()) == 0
|
||||
|
||||
|
||||
def test_saved_searches_delete_not_found(client, monkeypatch):
|
||||
monkeypatch.setattr("auth.AUTH_ENABLED", False)
|
||||
response = client.delete("/api/saved-searches/nonexistent")
|
||||
assert response.status_code == 404
|
||||
|
||||
|
||||
def test_saved_searches_create_validation(client, monkeypatch):
|
||||
monkeypatch.setattr("auth.AUTH_ENABLED", False)
|
||||
response = client.post("/api/saved-searches", json={"name": " ", "filters": {}})
|
||||
assert response.status_code == 400
|
||||
|
||||
|
||||
def test_privacy_filtering_events_by_operation(client, mock_events_collection, monkeypatch):
|
||||
monkeypatch.setattr("config.PRIVACY_SENSITIVE_OPERATIONS", {"MailItemsAccessed", "Send"})
|
||||
monkeypatch.setattr("routes.events.PRIVACY_SENSITIVE_OPERATIONS", {"MailItemsAccessed", "Send"})
|
||||
monkeypatch.setattr("auth.PRIVACY_SERVICE_ROLES", {"SecurityAdmin"})
|
||||
monkeypatch.setattr("auth.user_can_access_privacy_services", lambda claims: False)
|
||||
monkeypatch.setattr("routes.events.user_can_access_privacy_services", lambda claims: False)
|
||||
|
||||
mock_events_collection.insert_one(
|
||||
{
|
||||
"id": "evt-safe",
|
||||
"timestamp": datetime.now(UTC).isoformat(),
|
||||
"service": "Exchange",
|
||||
"operation": "Add-MailboxPermission",
|
||||
"result": "success",
|
||||
"actor_display": "Alice",
|
||||
"raw_text": "",
|
||||
}
|
||||
)
|
||||
mock_events_collection.insert_one(
|
||||
{
|
||||
"id": "evt-priv",
|
||||
"timestamp": datetime.now(UTC).isoformat(),
|
||||
"service": "Exchange",
|
||||
"operation": "Send",
|
||||
"result": "success",
|
||||
"actor_display": "Bob",
|
||||
"raw_text": "",
|
||||
}
|
||||
)
|
||||
|
||||
response = client.get("/api/events")
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
ids = [e["id"] for e in data["items"]]
|
||||
assert "evt-safe" in ids
|
||||
assert "evt-priv" not in ids
|
||||
|
||||
|
||||
def test_privacy_filter_options_shows_service_hides_ops(client, mock_events_collection, monkeypatch):
|
||||
monkeypatch.setattr("config.PRIVACY_SENSITIVE_OPERATIONS", {"MailItemsAccessed"})
|
||||
monkeypatch.setattr("routes.events.PRIVACY_SENSITIVE_OPERATIONS", {"MailItemsAccessed"})
|
||||
monkeypatch.setattr("auth.PRIVACY_SERVICE_ROLES", {"SecurityAdmin"})
|
||||
monkeypatch.setattr("auth.user_can_access_privacy_services", lambda claims: False)
|
||||
monkeypatch.setattr("routes.events.user_can_access_privacy_services", lambda claims: False)
|
||||
|
||||
mock_events_collection.insert_one(
|
||||
{
|
||||
"id": "evt-1",
|
||||
"timestamp": datetime.now(UTC).isoformat(),
|
||||
"service": "Exchange",
|
||||
"operation": "MailItemsAccessed",
|
||||
"result": "success",
|
||||
"actor_display": "Alice",
|
||||
"raw_text": "",
|
||||
}
|
||||
)
|
||||
mock_events_collection.insert_one(
|
||||
{
|
||||
"id": "evt-2",
|
||||
"timestamp": datetime.now(UTC).isoformat(),
|
||||
"service": "Exchange",
|
||||
"operation": "Add-MailboxPermission",
|
||||
"result": "success",
|
||||
"actor_display": "Bob",
|
||||
"raw_text": "",
|
||||
}
|
||||
)
|
||||
|
||||
response = client.get("/api/filter-options")
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert "Exchange" in data["services"]
|
||||
assert "MailItemsAccessed" not in data["operations"]
|
||||
assert "Add-MailboxPermission" in data["operations"]
|
||||
|
||||
|
||||
def test_privacy_explain_forbidden_by_operation(client, mock_events_collection, monkeypatch):
|
||||
monkeypatch.setattr("config.PRIVACY_SENSITIVE_OPERATIONS", {"Send"})
|
||||
monkeypatch.setattr("routes.ask.PRIVACY_SENSITIVE_OPERATIONS", {"Send"})
|
||||
monkeypatch.setattr("auth.PRIVACY_SERVICE_ROLES", {"SecurityAdmin"})
|
||||
monkeypatch.setattr("auth.user_can_access_privacy_services", lambda claims: False)
|
||||
monkeypatch.setattr("routes.ask.user_can_access_privacy_services", lambda claims: False)
|
||||
|
||||
mock_events_collection.insert_one(
|
||||
{
|
||||
"id": "evt-send",
|
||||
"timestamp": datetime.now(UTC).isoformat(),
|
||||
"service": "Exchange",
|
||||
"operation": "Send",
|
||||
"result": "success",
|
||||
"actor_display": "Bob",
|
||||
"raw_text": "",
|
||||
}
|
||||
)
|
||||
response = client.post("/api/events/evt-send/explain")
|
||||
assert response.status_code == 403
|
||||
|
||||
|
||||
def test_health(client):
|
||||
response = client.get("/health")
|
||||
assert response.status_code == 200
|
||||
@@ -25,15 +283,17 @@ def test_list_events_empty(client):
|
||||
|
||||
def test_list_events_cursor_pagination(client, mock_events_collection):
|
||||
for i in range(5):
|
||||
mock_events_collection.insert_one({
|
||||
"id": f"evt-{i}",
|
||||
"timestamp": datetime.now(UTC).isoformat(),
|
||||
"service": "Directory",
|
||||
"operation": "Add user",
|
||||
"result": "success",
|
||||
"actor_display": f"Actor {i}",
|
||||
"raw_text": "",
|
||||
})
|
||||
mock_events_collection.insert_one(
|
||||
{
|
||||
"id": f"evt-{i}",
|
||||
"timestamp": datetime.now(UTC).isoformat(),
|
||||
"service": "Directory",
|
||||
"operation": "Add user",
|
||||
"result": "success",
|
||||
"actor_display": f"Actor {i}",
|
||||
"raw_text": "",
|
||||
}
|
||||
)
|
||||
response = client.get("/api/events?page_size=2")
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
@@ -48,24 +308,28 @@ def test_list_events_cursor_pagination(client, mock_events_collection):
|
||||
|
||||
|
||||
def test_list_events_filter_by_service(client, mock_events_collection):
|
||||
mock_events_collection.insert_one({
|
||||
"id": "evt-1",
|
||||
"timestamp": datetime.now(UTC).isoformat(),
|
||||
"service": "Exchange",
|
||||
"operation": "Update",
|
||||
"result": "success",
|
||||
"actor_display": "Alice",
|
||||
"raw_text": "",
|
||||
})
|
||||
mock_events_collection.insert_one({
|
||||
"id": "evt-2",
|
||||
"timestamp": datetime.now(UTC).isoformat(),
|
||||
"service": "Directory",
|
||||
"operation": "Add",
|
||||
"result": "success",
|
||||
"actor_display": "Bob",
|
||||
"raw_text": "",
|
||||
})
|
||||
mock_events_collection.insert_one(
|
||||
{
|
||||
"id": "evt-1",
|
||||
"timestamp": datetime.now(UTC).isoformat(),
|
||||
"service": "Exchange",
|
||||
"operation": "Update",
|
||||
"result": "success",
|
||||
"actor_display": "Alice",
|
||||
"raw_text": "",
|
||||
}
|
||||
)
|
||||
mock_events_collection.insert_one(
|
||||
{
|
||||
"id": "evt-2",
|
||||
"timestamp": datetime.now(UTC).isoformat(),
|
||||
"service": "Directory",
|
||||
"operation": "Add",
|
||||
"result": "success",
|
||||
"actor_display": "Bob",
|
||||
"raw_text": "",
|
||||
}
|
||||
)
|
||||
response = client.get("/api/events?service=Exchange")
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
@@ -74,34 +338,40 @@ def test_list_events_filter_by_service(client, mock_events_collection):
|
||||
|
||||
|
||||
def test_list_events_filter_by_services(client, mock_events_collection):
|
||||
mock_events_collection.insert_one({
|
||||
"id": "evt-1",
|
||||
"timestamp": datetime.now(UTC).isoformat(),
|
||||
"service": "Exchange",
|
||||
"operation": "Update",
|
||||
"result": "success",
|
||||
"actor_display": "Alice",
|
||||
"raw_text": "",
|
||||
})
|
||||
mock_events_collection.insert_one({
|
||||
"id": "evt-2",
|
||||
"timestamp": datetime.now(UTC).isoformat(),
|
||||
"service": "Directory",
|
||||
"operation": "Add",
|
||||
"result": "success",
|
||||
"actor_display": "Bob",
|
||||
"raw_text": "",
|
||||
})
|
||||
mock_events_collection.insert_one({
|
||||
"id": "evt-3",
|
||||
"timestamp": datetime.now(UTC).isoformat(),
|
||||
"service": "Teams",
|
||||
"operation": "Delete",
|
||||
"result": "success",
|
||||
"actor_display": "Charlie",
|
||||
"raw_text": "",
|
||||
})
|
||||
response = client.get("/api/events?service=Exchange&service=Directory")
|
||||
mock_events_collection.insert_one(
|
||||
{
|
||||
"id": "evt-1",
|
||||
"timestamp": datetime.now(UTC).isoformat(),
|
||||
"service": "Exchange",
|
||||
"operation": "Update",
|
||||
"result": "success",
|
||||
"actor_display": "Alice",
|
||||
"raw_text": "",
|
||||
}
|
||||
)
|
||||
mock_events_collection.insert_one(
|
||||
{
|
||||
"id": "evt-2",
|
||||
"timestamp": datetime.now(UTC).isoformat(),
|
||||
"service": "Directory",
|
||||
"operation": "Add",
|
||||
"result": "success",
|
||||
"actor_display": "Bob",
|
||||
"raw_text": "",
|
||||
}
|
||||
)
|
||||
mock_events_collection.insert_one(
|
||||
{
|
||||
"id": "evt-3",
|
||||
"timestamp": datetime.now(UTC).isoformat(),
|
||||
"service": "Teams",
|
||||
"operation": "Delete",
|
||||
"result": "success",
|
||||
"actor_display": "Charlie",
|
||||
"raw_text": "",
|
||||
}
|
||||
)
|
||||
response = client.get("/api/events?services=Exchange&services=Directory")
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert len(data["items"]) == 2
|
||||
@@ -117,16 +387,18 @@ def test_list_events_page_size_validation(client):
|
||||
|
||||
|
||||
def test_filter_options(client, mock_events_collection):
|
||||
mock_events_collection.insert_one({
|
||||
"id": "evt-1",
|
||||
"timestamp": datetime.now(UTC).isoformat(),
|
||||
"service": "Intune",
|
||||
"operation": "Assign",
|
||||
"result": "failure",
|
||||
"actor_display": "Charlie",
|
||||
"actor_upn": "charlie@example.com",
|
||||
"raw_text": "",
|
||||
})
|
||||
mock_events_collection.insert_one(
|
||||
{
|
||||
"id": "evt-1",
|
||||
"timestamp": datetime.now(UTC).isoformat(),
|
||||
"service": "Intune",
|
||||
"operation": "Assign",
|
||||
"result": "failure",
|
||||
"actor_display": "Charlie",
|
||||
"actor_upn": "charlie@example.com",
|
||||
"raw_text": "",
|
||||
}
|
||||
)
|
||||
response = client.get("/api/filter-options")
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
@@ -168,15 +440,17 @@ def test_graph_webhook_notification(client):
|
||||
|
||||
|
||||
def test_update_tags(client, mock_events_collection):
|
||||
mock_events_collection.insert_one({
|
||||
"id": "evt-tags",
|
||||
"timestamp": datetime.now(UTC).isoformat(),
|
||||
"service": "Directory",
|
||||
"operation": "Add user",
|
||||
"result": "success",
|
||||
"actor_display": "Alice",
|
||||
"raw_text": "",
|
||||
})
|
||||
mock_events_collection.insert_one(
|
||||
{
|
||||
"id": "evt-tags",
|
||||
"timestamp": datetime.now(UTC).isoformat(),
|
||||
"service": "Directory",
|
||||
"operation": "Add user",
|
||||
"result": "success",
|
||||
"actor_display": "Alice",
|
||||
"raw_text": "",
|
||||
}
|
||||
)
|
||||
response = client.patch("/api/events/evt-tags/tags", json={"tags": ["investigating", "urgent"]})
|
||||
assert response.status_code == 200
|
||||
assert response.json()["tags"] == ["investigating", "urgent"]
|
||||
@@ -185,15 +459,17 @@ def test_update_tags(client, mock_events_collection):
|
||||
|
||||
|
||||
def test_add_comment(client, mock_events_collection):
|
||||
mock_events_collection.insert_one({
|
||||
"id": "evt-comment",
|
||||
"timestamp": datetime.now(UTC).isoformat(),
|
||||
"service": "Directory",
|
||||
"operation": "Add user",
|
||||
"result": "success",
|
||||
"actor_display": "Alice",
|
||||
"raw_text": "",
|
||||
})
|
||||
mock_events_collection.insert_one(
|
||||
{
|
||||
"id": "evt-comment",
|
||||
"timestamp": datetime.now(UTC).isoformat(),
|
||||
"service": "Directory",
|
||||
"operation": "Add user",
|
||||
"result": "success",
|
||||
"actor_display": "Alice",
|
||||
"raw_text": "",
|
||||
}
|
||||
)
|
||||
response = client.post("/api/events/evt-comment/comments", json={"text": "Looks suspicious"})
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
@@ -241,3 +517,76 @@ def test_rules_crud(client):
|
||||
res5 = client.get("/api/rules")
|
||||
assert res5.status_code == 200
|
||||
assert len(res5.json()) == 0
|
||||
|
||||
|
||||
def test_list_events_filter_by_include_tags(client, mock_events_collection):
|
||||
mock_events_collection.insert_one(
|
||||
{
|
||||
"id": "evt-tagged",
|
||||
"timestamp": datetime.now(UTC).isoformat(),
|
||||
"service": "Directory",
|
||||
"operation": "Add user",
|
||||
"result": "success",
|
||||
"actor_display": "Alice",
|
||||
"raw_text": "",
|
||||
"tags": ["backup", "auto"],
|
||||
}
|
||||
)
|
||||
mock_events_collection.insert_one(
|
||||
{
|
||||
"id": "evt-untagged",
|
||||
"timestamp": datetime.now(UTC).isoformat(),
|
||||
"service": "Directory",
|
||||
"operation": "Remove user",
|
||||
"result": "success",
|
||||
"actor_display": "Bob",
|
||||
"raw_text": "",
|
||||
"tags": [],
|
||||
}
|
||||
)
|
||||
response = client.get("/api/events?include_tags=backup")
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert len(data["items"]) == 1
|
||||
assert data["items"][0]["id"] == "evt-tagged"
|
||||
|
||||
|
||||
def test_bulk_tags_append(client, mock_events_collection):
|
||||
mock_events_collection.insert_one(
|
||||
{
|
||||
"id": "evt-bulk",
|
||||
"timestamp": datetime.now(UTC).isoformat(),
|
||||
"service": "Exchange",
|
||||
"operation": "Update",
|
||||
"result": "success",
|
||||
"actor_display": "Alice",
|
||||
"raw_text": "",
|
||||
"tags": ["existing"],
|
||||
}
|
||||
)
|
||||
response = client.post("/api/events/bulk-tags?service=Exchange", json={"tags": ["backup"], "mode": "append"})
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data["matched"] == 1
|
||||
doc = mock_events_collection.find_one({"id": "evt-bulk"})
|
||||
assert "backup" in doc["tags"]
|
||||
assert "existing" in doc["tags"]
|
||||
|
||||
|
||||
def test_bulk_tags_replace(client, mock_events_collection):
|
||||
mock_events_collection.insert_one(
|
||||
{
|
||||
"id": "evt-bulk2",
|
||||
"timestamp": datetime.now(UTC).isoformat(),
|
||||
"service": "Exchange",
|
||||
"operation": "Update",
|
||||
"result": "success",
|
||||
"actor_display": "Alice",
|
||||
"raw_text": "",
|
||||
"tags": ["old"],
|
||||
}
|
||||
)
|
||||
response = client.post("/api/events/bulk-tags?service=Exchange", json={"tags": ["backup"], "mode": "replace"})
|
||||
assert response.status_code == 200
|
||||
doc = mock_events_collection.find_one({"id": "evt-bulk2"})
|
||||
assert doc["tags"] == ["backup"]
|
||||
|
||||
482
backend/tests/test_ask.py
Normal file
482
backend/tests/test_ask.py
Normal file
@@ -0,0 +1,482 @@
|
||||
import asyncio
|
||||
from datetime import UTC, datetime, timedelta
|
||||
|
||||
from jobs import set_cached_ask
|
||||
from routes.ask import _build_event_query, _extract_entity, _extract_time_range
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Unit tests: time-range extraction
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestExtractTimeRange:
|
||||
def test_last_n_days(self):
|
||||
start, end = _extract_time_range("What happened in the last 3 days?")
|
||||
assert start is not None
|
||||
assert end is not None
|
||||
# Start should be roughly 3 days before end
|
||||
start_dt = datetime.fromisoformat(start.replace("Z", "+00:00"))
|
||||
end_dt = datetime.fromisoformat(end.replace("Z", "+00:00"))
|
||||
delta = end_dt - start_dt
|
||||
assert delta.days == 3
|
||||
|
||||
def test_last_n_hours(self):
|
||||
start, end = _extract_time_range("Show me events in the last 24 hours")
|
||||
start_dt = datetime.fromisoformat(start.replace("Z", "+00:00"))
|
||||
end_dt = datetime.fromisoformat(end.replace("Z", "+00:00"))
|
||||
delta = end_dt - start_dt
|
||||
assert delta.total_seconds() == 24 * 3600
|
||||
|
||||
def test_last_week(self):
|
||||
start, end = _extract_time_range("What happened last week?")
|
||||
start_dt = datetime.fromisoformat(start.replace("Z", "+00:00"))
|
||||
end_dt = datetime.fromisoformat(end.replace("Z", "+00:00"))
|
||||
assert (end_dt - start_dt).days == 7
|
||||
|
||||
def test_yesterday(self):
|
||||
start, end = _extract_time_range("Show me yesterday's events")
|
||||
start_dt = datetime.fromisoformat(start.replace("Z", "+00:00"))
|
||||
end_dt = datetime.fromisoformat(end.replace("Z", "+00:00"))
|
||||
assert (end_dt - start_dt).days == 1
|
||||
|
||||
def test_today(self):
|
||||
start, end = _extract_time_range("What happened today?")
|
||||
start_dt = datetime.fromisoformat(start.replace("Z", "+00:00"))
|
||||
# end_dt is not needed for this assertion
|
||||
# Should be from midnight today to now
|
||||
assert start_dt.hour == 0
|
||||
assert start_dt.minute == 0
|
||||
assert start_dt.second == 0
|
||||
|
||||
def test_no_time_pattern_returns_none(self):
|
||||
start, end = _extract_time_range("What happened to device ABC?")
|
||||
assert start is None
|
||||
assert end is None
|
||||
|
||||
def test_last_n_minutes(self):
|
||||
start, end = _extract_time_range("Show me events in the last 15 minutes")
|
||||
start_dt = datetime.fromisoformat(start.replace("Z", "+00:00"))
|
||||
end_dt = datetime.fromisoformat(end.replace("Z", "+00:00"))
|
||||
assert (end_dt - start_dt).total_seconds() == 15 * 60
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Unit tests: entity extraction
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestExtractEntity:
|
||||
def test_device_hint(self):
|
||||
assert _extract_entity("What happened to device LAPTOP-001?") == "LAPTOP-001"
|
||||
|
||||
def test_user_hint(self):
|
||||
assert _extract_entity("Show me user alice@example.com") == "alice@example.com"
|
||||
|
||||
def test_laptop_hint(self):
|
||||
assert _extract_entity("What did laptop HR-Desk-04 do?") == "HR-Desk-04"
|
||||
|
||||
def test_server_hint(self):
|
||||
assert _extract_entity("Check server WEB-01") == "WEB-01"
|
||||
|
||||
def test_quoted_string(self):
|
||||
assert _extract_entity('What happened to "Surface-Pro-7"?') == "Surface-Pro-7"
|
||||
|
||||
def test_single_quoted_string(self):
|
||||
assert _extract_entity("What happened to 'VM-WEB-01' today?") == "VM-WEB-01"
|
||||
|
||||
def test_email_address(self):
|
||||
assert _extract_entity("What did tomas.svensson@contoso.com do?") == "tomas.svensson@contoso.com"
|
||||
|
||||
def test_no_entity_returns_none(self):
|
||||
assert _extract_entity("What happened in the last 3 days?") is None
|
||||
|
||||
def test_vm_hint(self):
|
||||
assert _extract_entity("Show me vm APP-SERVER-02") == "APP-SERVER-02"
|
||||
|
||||
def test_computer_hint(self):
|
||||
assert _extract_entity("What happened to computer DESK-123?") == "DESK-123"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Unit tests: query builder
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestBuildEventQuery:
|
||||
def test_entity_only(self):
|
||||
q = _build_event_query("ABC123", None, None)
|
||||
assert "$and" in q
|
||||
or_clause = q["$and"][0]["$or"]
|
||||
assert any("target_displays" in c for c in or_clause)
|
||||
assert any("actor_display" in c for c in or_clause)
|
||||
assert any("raw_text" in c for c in or_clause)
|
||||
|
||||
def test_time_only(self):
|
||||
q = _build_event_query(None, "2024-01-01T00:00:00Z", "2024-01-02T00:00:00Z")
|
||||
assert q["$and"][0]["timestamp"]["$gte"] == "2024-01-01T00:00:00Z"
|
||||
assert q["$and"][0]["timestamp"]["$lte"] == "2024-01-02T00:00:00Z"
|
||||
|
||||
def test_entity_and_time(self):
|
||||
q = _build_event_query("DEV-01", "2024-01-01T00:00:00Z", "2024-01-02T00:00:00Z")
|
||||
assert len(q["$and"]) == 2
|
||||
assert "timestamp" in q["$and"][0] or "timestamp" in q["$and"][1]
|
||||
|
||||
def test_empty_returns_empty(self):
|
||||
q = _build_event_query(None, None, None)
|
||||
assert q == {}
|
||||
|
||||
def test_entity_is_escaped_for_regex(self):
|
||||
q = _build_event_query("DEV.01", None, None)
|
||||
# The dot should be escaped in the regex
|
||||
or_clause = q["$and"][0]["$or"]
|
||||
raw_regex = or_clause[-1]["raw_text"]["$regex"]
|
||||
assert raw_regex == "DEV\\.01"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Integration tests: /api/ask endpoint
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestAskEndpoint:
|
||||
def test_ask_empty_question(self, client):
|
||||
response = client.post("/api/ask", json={"question": ""})
|
||||
assert response.status_code == 400
|
||||
|
||||
def test_ask_no_events(self, client):
|
||||
response = client.post("/api/ask", json={"question": "What happened to device NONEXISTENT in the last 3 days?"})
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data["answer"] != ""
|
||||
assert data["events"] == []
|
||||
assert data["llm_used"] is False
|
||||
assert data["query_info"]["entity"] == "NONEXISTENT"
|
||||
|
||||
def test_ask_with_events_fallback(self, client, mock_events_collection):
|
||||
now = datetime.now(UTC)
|
||||
mock_events_collection.insert_one(
|
||||
{
|
||||
"id": "evt-ask-1",
|
||||
"timestamp": now.isoformat(),
|
||||
"service": "Device",
|
||||
"operation": "Update device",
|
||||
"result": "success",
|
||||
"actor_display": "Admin Bob",
|
||||
"actor_upn": "bob@example.com",
|
||||
"target_displays": ["LAPTOP-001"],
|
||||
"display_summary": "Update device | device: LAPTOP-001 by Admin Bob",
|
||||
"raw_text": "LAPTOP-001 something",
|
||||
}
|
||||
)
|
||||
response = client.post("/api/ask", json={"question": "What happened to device LAPTOP-001 in the last 3 days?"})
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data["llm_used"] is False
|
||||
assert len(data["events"]) == 1
|
||||
assert data["events"][0]["id"] == "evt-ask-1"
|
||||
assert "LAPTOP-001" in data["answer"]
|
||||
assert data["query_info"]["entity"] == "LAPTOP-001"
|
||||
assert data["query_info"]["event_count"] == 1
|
||||
|
||||
def test_ask_defaults_to_7_days_when_no_time(self, client, mock_events_collection):
|
||||
# Insert an event from 5 days ago
|
||||
five_days_ago = datetime.now(UTC) - timedelta(days=5)
|
||||
mock_events_collection.insert_one(
|
||||
{
|
||||
"id": "evt-ask-old",
|
||||
"timestamp": five_days_ago.isoformat(),
|
||||
"service": "Directory",
|
||||
"operation": "Add user",
|
||||
"result": "success",
|
||||
"actor_display": "Alice",
|
||||
"target_displays": ["DESKTOP-999"],
|
||||
"display_summary": "summary",
|
||||
"raw_text": "raw",
|
||||
}
|
||||
)
|
||||
response = client.post("/api/ask", json={"question": "What happened to DESKTOP-999?"})
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data["query_info"]["event_count"] == 1
|
||||
assert data["events"][0]["id"] == "evt-ask-old"
|
||||
|
||||
def test_ask_event_outside_time_window(self, client, mock_events_collection):
|
||||
# Event from 10 days ago — outside default 7-day window
|
||||
old = datetime.now(UTC) - timedelta(days=10)
|
||||
mock_events_collection.insert_one(
|
||||
{
|
||||
"id": "evt-too-old",
|
||||
"timestamp": old.isoformat(),
|
||||
"service": "Directory",
|
||||
"operation": "Add user",
|
||||
"result": "success",
|
||||
"actor_display": "Alice",
|
||||
"target_displays": ["OLD-DEVICE"],
|
||||
"display_summary": "summary",
|
||||
"raw_text": "raw",
|
||||
}
|
||||
)
|
||||
response = client.post("/api/ask", json={"question": "What happened to OLD-DEVICE?"})
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
# Default is 7 days, so 10-day-old event should not match
|
||||
assert data["query_info"]["event_count"] == 0
|
||||
|
||||
def test_ask_with_llm(self, client, mock_events_collection, monkeypatch):
|
||||
now = datetime.now(UTC)
|
||||
mock_events_collection.insert_one(
|
||||
{
|
||||
"id": "evt-llm",
|
||||
"timestamp": now.isoformat(),
|
||||
"service": "Device",
|
||||
"operation": "Wipe device",
|
||||
"result": "failure",
|
||||
"actor_display": "System",
|
||||
"target_displays": ["PHONE-001"],
|
||||
"display_summary": "Wipe device | device: PHONE-001 by System",
|
||||
"raw_text": "PHONE-001 wipe failed",
|
||||
}
|
||||
)
|
||||
|
||||
async def fake_llm(question, events, total=None, excluded_services=None):
|
||||
return "The device had a failed wipe attempt."
|
||||
|
||||
monkeypatch.setattr("routes.ask.LLM_API_KEY", "fake-key")
|
||||
monkeypatch.setattr("routes.ask._call_llm", fake_llm)
|
||||
|
||||
response = client.post("/api/ask", json={"question": "What happened to PHONE-001 in the last day?"})
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data["llm_used"] is True
|
||||
assert data["answer"] == "The device had a failed wipe attempt."
|
||||
assert len(data["events"]) == 1
|
||||
|
||||
def test_ask_falls_back_when_llm_errors(self, client, mock_events_collection, monkeypatch):
|
||||
now = datetime.now(UTC)
|
||||
mock_events_collection.insert_one(
|
||||
{
|
||||
"id": "evt-fallback",
|
||||
"timestamp": now.isoformat(),
|
||||
"service": "Directory",
|
||||
"operation": "Add user",
|
||||
"result": "success",
|
||||
"actor_display": "Alice",
|
||||
"target_displays": ["USER-001"],
|
||||
"display_summary": "summary",
|
||||
"raw_text": "raw",
|
||||
}
|
||||
)
|
||||
|
||||
async def failing_llm(question, events, total=None):
|
||||
raise RuntimeError("LLM service down")
|
||||
|
||||
monkeypatch.setattr("routes.ask.LLM_API_KEY", "fake-key")
|
||||
monkeypatch.setattr("routes.ask._call_llm", failing_llm)
|
||||
|
||||
response = client.post("/api/ask", json={"question": "What happened to USER-001?"})
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data["llm_used"] is False # Falls back
|
||||
assert len(data["events"]) == 1
|
||||
assert "Found 1 event" in data["answer"]
|
||||
|
||||
def test_ask_with_explicit_filters(self, client, mock_events_collection):
|
||||
now = datetime.now(UTC)
|
||||
mock_events_collection.insert_one(
|
||||
{
|
||||
"id": "evt-exchange",
|
||||
"timestamp": now.isoformat(),
|
||||
"service": "Exchange",
|
||||
"operation": "Update",
|
||||
"result": "failure",
|
||||
"actor_display": "Alice",
|
||||
"target_displays": ["LAPTOP-001"],
|
||||
"display_summary": "summary",
|
||||
"raw_text": "raw",
|
||||
}
|
||||
)
|
||||
mock_events_collection.insert_one(
|
||||
{
|
||||
"id": "evt-directory",
|
||||
"timestamp": now.isoformat(),
|
||||
"service": "Directory",
|
||||
"operation": "Add user",
|
||||
"result": "success",
|
||||
"actor_display": "Alice",
|
||||
"target_displays": ["LAPTOP-001"],
|
||||
"display_summary": "summary",
|
||||
"raw_text": "raw",
|
||||
}
|
||||
)
|
||||
response = client.post(
|
||||
"/api/ask",
|
||||
json={"question": "What happened to LAPTOP-001?", "services": ["Exchange"], "result": "failure"},
|
||||
)
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data["query_info"]["event_count"] == 1
|
||||
assert data["events"][0]["id"] == "evt-exchange"
|
||||
|
||||
def test_ask_with_explicit_actor_filter(self, client, mock_events_collection):
|
||||
now = datetime.now(UTC)
|
||||
mock_events_collection.insert_one(
|
||||
{
|
||||
"id": "evt-bob",
|
||||
"timestamp": now.isoformat(),
|
||||
"service": "Directory",
|
||||
"operation": "Add user",
|
||||
"result": "success",
|
||||
"actor_display": "Bob",
|
||||
"actor_upn": "bob@example.com",
|
||||
"target_displays": ["USER-001"],
|
||||
"display_summary": "summary",
|
||||
"raw_text": "raw",
|
||||
}
|
||||
)
|
||||
mock_events_collection.insert_one(
|
||||
{
|
||||
"id": "evt-alice",
|
||||
"timestamp": now.isoformat(),
|
||||
"service": "Directory",
|
||||
"operation": "Remove user",
|
||||
"result": "success",
|
||||
"actor_display": "Alice",
|
||||
"actor_upn": "alice@example.com",
|
||||
"target_displays": ["USER-001"],
|
||||
"display_summary": "summary",
|
||||
"raw_text": "raw",
|
||||
}
|
||||
)
|
||||
response = client.post("/api/ask", json={"question": "What happened to USER-001?", "actor": "bob"})
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data["query_info"]["event_count"] == 1
|
||||
assert data["events"][0]["id"] == "evt-bob"
|
||||
|
||||
|
||||
class TestAskCaching:
|
||||
def test_ask_cache_hit_returns_cached_answer(self, client, mock_events_collection, monkeypatch):
|
||||
"""If the answer is cached, the LLM should not be called."""
|
||||
now = datetime.now(UTC)
|
||||
mock_events_collection.insert_one(
|
||||
{
|
||||
"id": "evt-cache",
|
||||
"timestamp": now.isoformat(),
|
||||
"service": "Directory",
|
||||
"operation": "Add user",
|
||||
"result": "success",
|
||||
"actor_display": "Alice",
|
||||
"target_displays": ["USER-001"],
|
||||
"display_summary": "summary",
|
||||
"raw_text": "raw",
|
||||
}
|
||||
)
|
||||
|
||||
llm_called = False
|
||||
|
||||
async def fake_llm(question, events, total=None, excluded_services=None):
|
||||
nonlocal llm_called
|
||||
llm_called = True
|
||||
return "This should NOT appear."
|
||||
|
||||
monkeypatch.setattr("routes.ask.LLM_API_KEY", "fake-key")
|
||||
monkeypatch.setattr("routes.ask._call_llm", fake_llm)
|
||||
|
||||
# Pre-populate cache with a specific answer
|
||||
class CachingFakeRedis:
|
||||
def __init__(self):
|
||||
self.store = {}
|
||||
|
||||
async def get(self, key):
|
||||
return self.store.get(key)
|
||||
|
||||
async def setex(self, key, ttl, value):
|
||||
self.store[key] = value
|
||||
|
||||
redis = CachingFakeRedis()
|
||||
# Seed cache with the exact filters the endpoint will generate
|
||||
filters_snapshot = {
|
||||
"services": None,
|
||||
"actor": None,
|
||||
"operation": None,
|
||||
"result": None,
|
||||
"start": None,
|
||||
"end": None,
|
||||
"include_tags": None,
|
||||
"exclude_tags": None,
|
||||
}
|
||||
asyncio.run(
|
||||
set_cached_ask(
|
||||
redis,
|
||||
"What happened to USER-001?",
|
||||
filters_snapshot,
|
||||
[{"id": "evt-cache"}],
|
||||
{"answer": "Cached answer!", "llm_used": True, "llm_error": None},
|
||||
)
|
||||
)
|
||||
|
||||
async def fake_get_arq_pool():
|
||||
return redis
|
||||
|
||||
monkeypatch.setattr("routes.ask.get_arq_pool", fake_get_arq_pool)
|
||||
|
||||
response = client.post("/api/ask", json={"question": "What happened to USER-001?"})
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data["answer"] == "Cached answer!"
|
||||
assert data["llm_used"] is True
|
||||
assert llm_called is False
|
||||
|
||||
def test_ask_async_mode_returns_job_id(self, client, mock_events_collection, monkeypatch):
|
||||
"""Async mode should return immediately with a job_id."""
|
||||
now = datetime.now(UTC)
|
||||
mock_events_collection.insert_one(
|
||||
{
|
||||
"id": "evt-async",
|
||||
"timestamp": now.isoformat(),
|
||||
"service": "Directory",
|
||||
"operation": "Add user",
|
||||
"result": "success",
|
||||
"actor_display": "Alice",
|
||||
"target_displays": ["USER-001"],
|
||||
"display_summary": "summary",
|
||||
"raw_text": "raw",
|
||||
}
|
||||
)
|
||||
|
||||
monkeypatch.setattr("routes.ask.LLM_API_KEY", "fake-key")
|
||||
|
||||
# Mock arq pool to capture enqueue_job call
|
||||
class FakeArqPool:
|
||||
def __init__(self):
|
||||
self.enqueued = []
|
||||
|
||||
async def get(self, key):
|
||||
return None
|
||||
|
||||
async def setex(self, key, ttl, value):
|
||||
pass
|
||||
|
||||
async def enqueue_job(self, func, *args, **kwargs):
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
job = MagicMock()
|
||||
job.job_id = "job-12345"
|
||||
self.enqueued.append((func, args, kwargs))
|
||||
return job
|
||||
|
||||
pool = FakeArqPool()
|
||||
|
||||
async def fake_get_arq_pool():
|
||||
return pool
|
||||
|
||||
monkeypatch.setattr("routes.ask.get_arq_pool", fake_get_arq_pool)
|
||||
|
||||
response = client.post("/api/ask", json={"question": "What happened to USER-001?", "async_mode": True})
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data["job_id"] == "job-12345"
|
||||
assert data["llm_used"] is False
|
||||
assert "being processed" in data["answer"]
|
||||
assert len(pool.enqueued) == 1
|
||||
assert pool.enqueued[0][0] == "process_ask_question"
|
||||
@@ -30,9 +30,7 @@ def test_normalize_event_basic():
|
||||
"userPrincipalName": "alice@example.com",
|
||||
}
|
||||
},
|
||||
"targetResources": [
|
||||
{"id": "t1", "displayName": "Bob", "type": "User"}
|
||||
],
|
||||
"targetResources": [{"id": "t1", "displayName": "Bob", "type": "User"}],
|
||||
}
|
||||
out = normalize_event(e)
|
||||
assert out["id"] == "abc"
|
||||
|
||||
@@ -1,29 +1,35 @@
|
||||
from datetime import UTC, datetime
|
||||
|
||||
from rules import _matches, evaluate_event
|
||||
|
||||
|
||||
def test_matches_equals():
|
||||
rule = {"conditions": [{"field": "operation", "op": "eq", "value": "Add user"}]}
|
||||
event = {"operation": "Add user", "timestamp": datetime.now(UTC).isoformat()}
|
||||
from rules import _matches
|
||||
|
||||
assert _matches(rule, event) is True
|
||||
|
||||
|
||||
def test_matches_not_equals():
|
||||
rule = {"conditions": [{"field": "operation", "op": "neq", "value": "Delete user"}]}
|
||||
event = {"operation": "Add user", "timestamp": datetime.now(UTC).isoformat()}
|
||||
from rules import _matches
|
||||
|
||||
assert _matches(rule, event) is True
|
||||
|
||||
|
||||
def test_matches_contains():
|
||||
rule = {"conditions": [{"field": "actor_display", "op": "contains", "value": "Admin"}]}
|
||||
event = {"actor_display": "Admin (admin@example.com)", "timestamp": datetime.now(UTC).isoformat()}
|
||||
from rules import _matches
|
||||
|
||||
assert _matches(rule, event) is True
|
||||
|
||||
|
||||
def test_matches_after_hours():
|
||||
rule = {"conditions": [{"field": "timestamp", "op": "after_hours", "value": None}]}
|
||||
event = {"timestamp": "2024-01-01T22:00:00Z"}
|
||||
from rules import _matches
|
||||
|
||||
assert _matches(rule, event) is True
|
||||
|
||||
event2 = {"timestamp": "2024-01-01T10:00:00Z"}
|
||||
@@ -31,13 +37,24 @@ def test_matches_after_hours():
|
||||
|
||||
|
||||
def test_evaluate_event_creates_alert(monkeypatch):
|
||||
from rules import alerts_collection
|
||||
from rules import alerts_collection, evaluate_event
|
||||
|
||||
monkeypatch.setattr("rules.load_rules", lambda: [
|
||||
{"_id": "r1", "name": "Test rule", "enabled": True, "severity": "high", "conditions": [{"field": "operation", "op": "eq", "value": "Add user"}], "message": "Alert!"}
|
||||
])
|
||||
monkeypatch.setattr(
|
||||
"rules.load_rules",
|
||||
lambda: [
|
||||
{
|
||||
"_id": "r1",
|
||||
"name": "Test rule",
|
||||
"enabled": True,
|
||||
"severity": "high",
|
||||
"conditions": [{"field": "operation", "op": "eq", "value": "Add user"}],
|
||||
"message": "Alert!",
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
inserted = {}
|
||||
|
||||
def mock_insert(doc):
|
||||
inserted["doc"] = doc
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
|
||||
import requests
|
||||
import structlog
|
||||
from tenacity import retry, retry_if_exception_type, stop_after_attempt, wait_exponential
|
||||
@@ -18,12 +17,16 @@ RETRY_CONFIG = {
|
||||
|
||||
|
||||
@retry(**RETRY_CONFIG)
|
||||
def get_with_retry(url: str, headers: dict | None = None, params: dict | None = None, timeout: float = 20) -> requests.Response:
|
||||
def get_with_retry(
|
||||
url: str, headers: dict | None = None, params: dict | None = None, timeout: float = 20
|
||||
) -> requests.Response:
|
||||
res = requests.get(url, headers=headers, params=params, timeout=timeout)
|
||||
return res
|
||||
|
||||
|
||||
@retry(**RETRY_CONFIG)
|
||||
def post_with_retry(url: str, headers: dict | None = None, data: dict | None = None, params: dict | None = None, timeout: float = 15) -> requests.Response:
|
||||
def post_with_retry(
|
||||
url: str, headers: dict | None = None, data: dict | None = None, params: dict | None = None, timeout: float = 15
|
||||
) -> requests.Response:
|
||||
res = requests.post(url, headers=headers, data=data, params=params, timeout=timeout)
|
||||
return res
|
||||
|
||||
102
docker-compose.prod.yml
Normal file
102
docker-compose.prod.yml
Normal file
@@ -0,0 +1,102 @@
|
||||
services:
|
||||
redis:
|
||||
image: valkey/valkey:8-alpine
|
||||
container_name: aoc-redis
|
||||
restart: always
|
||||
volumes:
|
||||
- redis_data:/data
|
||||
networks:
|
||||
- aoc-internal
|
||||
healthcheck:
|
||||
test: ["CMD", "redis-cli", "ping"]
|
||||
interval: 10s
|
||||
timeout: 3s
|
||||
retries: 5
|
||||
start_period: 5s
|
||||
|
||||
mongo:
|
||||
image: mongo:7
|
||||
container_name: aoc-mongo
|
||||
restart: always
|
||||
# Do NOT expose MongoDB port to the host in production
|
||||
# Only backend can reach it via the internal Docker network
|
||||
environment:
|
||||
MONGO_INITDB_ROOT_USERNAME: ${MONGO_ROOT_USERNAME}
|
||||
MONGO_INITDB_ROOT_PASSWORD: ${MONGO_ROOT_PASSWORD}
|
||||
volumes:
|
||||
- mongo_data:/data/db
|
||||
networks:
|
||||
- aoc-internal
|
||||
healthcheck:
|
||||
test: ["CMD", "mongosh", "--eval", "db.adminCommand('ping')"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
start_period: 10s
|
||||
|
||||
backend:
|
||||
image: git.cqre.net/cqrenet/aoc-backend:${AOC_VERSION:-latest}
|
||||
container_name: aoc-backend
|
||||
restart: always
|
||||
env_file:
|
||||
- .env
|
||||
environment:
|
||||
MONGO_URI: mongodb://${MONGO_ROOT_USERNAME}:${MONGO_ROOT_PASSWORD}@mongo:27017/
|
||||
REDIS_URL: redis://redis:6379/0
|
||||
depends_on:
|
||||
mongo:
|
||||
condition: service_healthy
|
||||
redis:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- aoc-internal
|
||||
healthcheck:
|
||||
test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:8000/health')"]
|
||||
interval: 15s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 10s
|
||||
|
||||
worker:
|
||||
image: git.cqre.net/cqrenet/aoc-backend:${AOC_VERSION:-latest}
|
||||
container_name: aoc-worker
|
||||
restart: always
|
||||
env_file:
|
||||
- .env
|
||||
environment:
|
||||
MONGO_URI: mongodb://${MONGO_ROOT_USERNAME}:${MONGO_ROOT_PASSWORD}@mongo:27017/
|
||||
REDIS_URL: redis://redis:6379/0
|
||||
command: ["arq", "jobs.WorkerSettings"]
|
||||
depends_on:
|
||||
redis:
|
||||
condition: service_healthy
|
||||
mongo:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- aoc-internal
|
||||
|
||||
nginx:
|
||||
image: nginx:alpine
|
||||
container_name: aoc-nginx
|
||||
restart: always
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
volumes:
|
||||
- ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro
|
||||
- ./nginx/ssl:/etc/nginx/ssl:ro
|
||||
depends_on:
|
||||
backend:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- aoc-internal
|
||||
- aoc-public
|
||||
|
||||
volumes:
|
||||
mongo_data:
|
||||
redis_data:
|
||||
|
||||
networks:
|
||||
aoc-internal:
|
||||
internal: true
|
||||
aoc-public:
|
||||
@@ -1,4 +1,13 @@
|
||||
services:
|
||||
redis:
|
||||
image: valkey/valkey:8-alpine
|
||||
container_name: aoc-redis
|
||||
restart: always
|
||||
ports:
|
||||
- "6379:6379"
|
||||
volumes:
|
||||
- redis_data:/data
|
||||
|
||||
mongo:
|
||||
image: mongo:7
|
||||
container_name: aoc-mongo
|
||||
@@ -13,16 +22,35 @@ services:
|
||||
|
||||
backend:
|
||||
build: ./backend
|
||||
# For production, use the pre-built image instead:
|
||||
# image: git.cqre.net/cqrenet/aoc-backend:v1.2.5
|
||||
container_name: aoc-backend
|
||||
restart: always
|
||||
env_file:
|
||||
- .env
|
||||
environment:
|
||||
MONGO_URI: mongodb://${MONGO_ROOT_USERNAME}:${MONGO_ROOT_PASSWORD}@mongo:${MONGO_PORT}/
|
||||
REDIS_URL: redis://redis:6379/0
|
||||
depends_on:
|
||||
- mongo
|
||||
- redis
|
||||
ports:
|
||||
- "8000:8000"
|
||||
|
||||
worker:
|
||||
build: ./backend
|
||||
container_name: aoc-worker
|
||||
restart: always
|
||||
env_file:
|
||||
- .env
|
||||
environment:
|
||||
MONGO_URI: mongodb://${MONGO_ROOT_USERNAME}:${MONGO_ROOT_PASSWORD}@mongo:${MONGO_PORT}/
|
||||
REDIS_URL: redis://redis:6379/0
|
||||
command: ["arq", "jobs.WorkerSettings"]
|
||||
depends_on:
|
||||
- redis
|
||||
- mongo
|
||||
|
||||
volumes:
|
||||
mongo_data:
|
||||
redis_data:
|
||||
|
||||
94
nginx/nginx.conf
Normal file
94
nginx/nginx.conf
Normal file
@@ -0,0 +1,94 @@
|
||||
user nginx;
|
||||
worker_processes auto;
|
||||
error_log /var/log/nginx/error.log warn;
|
||||
pid /var/run/nginx.pid;
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
http {
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
||||
'$status $body_bytes_sent "$http_referer" '
|
||||
'"$http_user_agent" "$http_x_forwarded_for"';
|
||||
|
||||
access_log /var/log/nginx/access.log main;
|
||||
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 65;
|
||||
types_hash_max_size 2048;
|
||||
|
||||
# Gzip compression
|
||||
gzip on;
|
||||
gzip_vary on;
|
||||
gzip_proxied any;
|
||||
gzip_comp_level 6;
|
||||
gzip_types text/plain text/css text/xml application/json application/javascript application/rss+xml application/atom+xml image/svg+xml;
|
||||
|
||||
# Security headers
|
||||
add_header X-Frame-Options "SAMEORIGIN" always;
|
||||
add_header X-Content-Type-Options "nosniff" always;
|
||||
add_header X-XSS-Protection "1; mode=block" always;
|
||||
add_header Referrer-Policy "strict-origin-when-cross-origin" always;
|
||||
|
||||
# Upstream backend
|
||||
upstream aoc_backend {
|
||||
server backend:8000;
|
||||
}
|
||||
|
||||
# HTTP → HTTPS redirect (optional; enable once TLS is configured)
|
||||
# server {
|
||||
# listen 80;
|
||||
# server_name _;
|
||||
# return 301 https://$host$request_uri;
|
||||
# }
|
||||
|
||||
server {
|
||||
listen 80;
|
||||
server_name _;
|
||||
|
||||
client_max_body_size 50M;
|
||||
proxy_connect_timeout 60s;
|
||||
proxy_send_timeout 60s;
|
||||
proxy_read_timeout 60s;
|
||||
|
||||
location / {
|
||||
proxy_pass http://aoc_backend;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_buffering off;
|
||||
}
|
||||
}
|
||||
|
||||
# HTTPS server (uncomment and configure once you have certificates)
|
||||
# server {
|
||||
# listen 443 ssl http2;
|
||||
# server_name _;
|
||||
#
|
||||
# ssl_certificate /etc/nginx/ssl/cert.pem;
|
||||
# ssl_certificate_key /etc/nginx/ssl/key.pem;
|
||||
# ssl_protocols TLSv1.2 TLSv1.3;
|
||||
# ssl_ciphers HIGH:!aNULL:!MD5;
|
||||
# ssl_prefer_server_ciphers on;
|
||||
#
|
||||
# client_max_body_size 50M;
|
||||
#
|
||||
# location / {
|
||||
# proxy_pass http://aoc_backend;
|
||||
# proxy_http_version 1.1;
|
||||
# proxy_set_header Host $host;
|
||||
# proxy_set_header X-Real-IP $remote_addr;
|
||||
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
# proxy_set_header X-Forwarded-Proto $scheme;
|
||||
# proxy_buffering off;
|
||||
# }
|
||||
# }
|
||||
}
|
||||
Reference in New Issue
Block a user