From 60b6ad15c437834b6a0738fc1574d04049005236 Mon Sep 17 00:00:00 2001 From: Tomas Kracmar Date: Mon, 20 Apr 2026 18:11:26 +0200 Subject: [PATCH] Release v1.3.0: AI feature flag and MCP server - Add AI_FEATURES_ENABLED config flag to gate AI/natural-language features - Conditionally register /api/ask router based on AI_FEATURES_ENABLED - Add GET /api/config/features endpoint for frontend feature detection - Update frontend to hide Ask panel when AI features are disabled - Implement standalone MCP server (backend/mcp_server.py) with tools: * search_events, get_event, get_summary, ask - Add mcp dependency to requirements.txt - Update .env.example, AGENTS.md, and ROADMAP.md - Bump VERSION to 1.3.0 --- .env.example | 4 + AGENTS.md | 99 ++++++++++--- ROADMAP.md | 12 +- VERSION | 2 +- backend/config.py | 4 +- backend/frontend/index.html | 15 +- backend/main.py | 8 +- backend/mcp_server.py | 276 ++++++++++++++++++++++++++++++++++++ backend/requirements.txt | 1 + backend/routes/config.py | 8 ++ backend/tests/test_api.py | 35 +++++ 11 files changed, 435 insertions(+), 29 deletions(-) create mode 100644 backend/mcp_server.py diff --git a/.env.example b/.env.example index 7370beb..00454d0 100644 --- a/.env.example +++ b/.env.example @@ -34,6 +34,10 @@ SIEM_WEBHOOK_URL= # Optional: enable rule-based alerting during ingestion ALERTS_ENABLED=false +# Optional: enable AI/natural-language features (/api/ask, MCP server) +# Set to false to completely disable AI endpoints and UI elements +AI_FEATURES_ENABLED=true + # Optional: LLM configuration for natural language querying (/api/ask) # Supports any OpenAI-compatible API (OpenAI, Azure OpenAI, Ollama, etc.) # For Azure OpenAI / MS Foundry, set BASE_URL to your deployment endpoint diff --git a/AGENTS.md b/AGENTS.md index 80c16f0..1de238e 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -6,28 +6,34 @@ AOC is a FastAPI microservice that ingests Microsoft Entra (Azure AD) audit logs ## Technology Stack -- **Runtime**: Python 3.11 -- **Web Framework**: FastAPI + Uvicorn +- **Runtime**: Python 3.11 (3.14 for tests) +- **Web Framework**: FastAPI + Uvicorn (Gunicorn in production) - **Database**: MongoDB (PyMongo) -- **Frontend**: Vanilla HTML/CSS/JS (served as static files from `backend/frontend/`) +- **Frontend**: Alpine.js + HTML/CSS (served as static files from `backend/frontend/`) - **Authentication**: Optional OIDC Bearer token validation against Microsoft Entra (using `python-jose` and MSAL.js on the frontend) -- **External APIs**: Microsoft Graph API, Office 365 Management Activity API -- **Deployment**: Docker Compose +- **External APIs**: Microsoft Graph API, Office 365 Management Activity API, Azure OpenAI / MS Foundry +- **Deployment**: Docker Compose (dev), Docker Compose + nginx (prod) +- **CI/CD**: Gitea Actions (lint + test + Docker build + release) ## Project Structure ``` backend/ main.py # FastAPI app, router registration, background periodic fetch - config.py # Environment-based configuration (loads .env) + config.py # Pydantic Settings configuration (loads .env) database.py # MongoClient setup (db = micro_soc, collection = events) auth.py # OIDC Bearer token validation, JWKS caching, role/group checks requirements.txt # Python dependencies - Dockerfile # python:3.11-slim image + Dockerfile # python:3.11-slim image, non-root user, version baked at build + mcp_server.py # Standalone MCP server for Claude Desktop / Cursor integration routes/ fetch.py # GET /api/fetch-audit-logs, run_fetch() - events.py # GET /api/events, GET /api/filter-options - config.py # GET /api/config/auth + events.py # GET /api/events, GET /api/filter-options, PATCH tags, POST comments + config.py # GET /api/config/auth, GET /api/config/features + ask.py # POST /api/ask — natural language query with LLM + health.py # GET /health, GET /metrics + rules.py # Rule-based alerting endpoints + webhooks.py # Microsoft Graph change notification webhooks graph/ auth.py # Client credentials token acquisition for Graph audit_logs.py # Fetch and enrich directory audit logs from Graph @@ -41,7 +47,7 @@ backend/ mappings.yml # User-editable category labels and summary templates maintenance.py # CLI for re-normalization and deduplication of stored events frontend/ - index.html # Single-page UI with filters, pagination, raw-event modal + index.html # Single-page UI with filters, pagination, ask panel, raw-event modal style.css # Dark-themed stylesheet ``` @@ -60,6 +66,9 @@ Key variables: - `AUTH_ALLOWED_ROLES`, `AUTH_ALLOWED_GROUPS` — comma-separated access control lists - `ENABLE_PERIODIC_FETCH`, `FETCH_INTERVAL_MINUTES` — background ingestion scheduler - `MONGO_ROOT_USERNAME`, `MONGO_ROOT_PASSWORD`, `MONGO_PORT` — used by Docker Compose for MongoDB +- `AI_FEATURES_ENABLED` — set `false` to completely disable AI endpoints and UI (default `true`) +- `LLM_API_KEY`, `LLM_BASE_URL`, `LLM_MODEL`, `LLM_MAX_EVENTS`, `LLM_TIMEOUT_SECONDS` — LLM provider settings +- `LLM_API_VERSION` — required for Azure OpenAI / MS Foundry endpoints ## Build and Run Commands @@ -87,35 +96,81 @@ uvicorn main:app --reload --host 0.0.0.0 --port 8000 ## API Endpoints - `GET /api/fetch-audit-logs?hours=168` — pulls last N hours (capped at 720 / 30 days) from all sources, normalizes, dedupes, and upserts into MongoDB -- `GET /api/events` — list stored events with filters (`service`, `actor`, `operation`, `result`, `start`, `end`, `search`) and pagination (`page`, `page_size`) +- `GET /api/events` — list stored events with filters (`service`, `actor`, `operation`, `result`, `start`, `end`, `search`) and cursor-based pagination - `GET /api/filter-options` — best-effort distinct values for UI dropdowns - `GET /api/config/auth` — auth configuration exposed to the frontend +- `GET /api/config/features` — feature flags (`ai_features_enabled`) +- `POST /api/ask` — natural language query; returns LLM narrative + referenced events (only when `AI_FEATURES_ENABLED=true`) +- `GET /health` — liveness probe with DB connectivity +- `GET /metrics` — Prometheus metrics + +## MCP Server + +A standalone MCP server (`backend/mcp_server.py`) exposes audit log tools for Claude Desktop, Cursor, and other MCP clients. + +Available tools: +- `search_events` — Search by entity, service, operation, result, time range +- `get_event` — Retrieve a single event by ID (raw JSON) +- `get_summary` — Aggregated counts by service, operation, result, actor +- `ask` — Natural language question (returns recent events + guidance) + +**Claude Desktop config** (`~/.config/claude/claude_desktop_config.json`): +```json +{ + "mcpServers": { + "aoc": { + "command": "python", + "args": ["/path/to/aoc/backend/mcp_server.py"], + "env": {"MONGO_URI": "mongodb://root:example@localhost:27017/"} + } + } +} +``` + +The MCP server imports `database.py` directly and does not go through the FastAPI layer, so it shares the same MongoDB connection but bypasses auth. + +## AI Feature Flag + +Set `AI_FEATURES_ENABLED=false` in `.env` to: +- Prevent the `ask` router from being registered in FastAPI +- Hide the "Ask a question" panel in the frontend +- Return `ai_features_enabled: false` from `/api/config/features` + +This is intended for the open-core monetization split: core features (ingestion, filtering, search, export) are always available; premium AI features (NLQ, MCP) can be disabled. ## Code Conventions - Python modules use absolute imports within the `backend/` package (e.g., `from graph.auth import get_access_token`). When running locally, ensure the working directory is `backend/` so these resolve correctly. -- No formal formatter or linter is configured. Keep changes consistent with the existing style: simple functions, explicit exception handling, and informative docstrings. -- The frontend is a single HTML file with inline JavaScript. It relies on the MSAL.js CDN (`https://alcdn.msauth.net/browser/2.37.0/js/msal-browser.min.js`). +- The project uses `ruff` for linting and formatting. Run `ruff check . && ruff format .` before committing. +- Keep changes consistent with the existing style: simple functions, explicit exception handling, and informative docstrings. +- The frontend is a single HTML file with inline JavaScript and Alpine.js. ## Testing -There are currently **no automated tests** in this repository. When adding new features or bug fixes, verify behavior manually: +Tests run with pytest and mongomock (no real MongoDB required): -1. Start the server (Docker Compose or local uvicorn). -2. Run a smoke test: - ```bash - curl http://localhost:8000/api/events - curl http://localhost:8000/api/fetch-audit-logs - ``` -3. Open http://localhost:8000 in a browser, apply filters, paginate, and click "View raw event". +```bash +cd backend +python -m venv .venv_test +source .venv_test/bin/activate +pip install -r requirements.txt +pytest tests/ -q +``` + +When adding new features or bug fixes, add or update tests in `backend/tests/`. The test suite covers: +- Event normalization and deduplication +- Auth middleware and token validation +- API endpoints (`/api/events`, `/api/fetch-audit-logs`, `/api/ask`) +- NLQ time range extraction, entity extraction, query building ## Security Considerations -- **Secrets**: `CLIENT_SECRET` and other credentials come from `.env`. Never commit `.env`. +- **Secrets**: `CLIENT_SECRET`, `LLM_API_KEY`, and other credentials come from `.env`. Never commit `.env`. - **Auth validation**: When `AUTH_ENABLED=true`, the backend fetches JWKS from `https://login.microsoftonline.com/{AUTH_TENANT_ID}/v2.0/.well-known/openid-configuration`, caches keys for 1 hour, and validates tenant/issuer claims. Tokens are decoded without strict signature verification (`jwt.get_unverified_claims`), so the tenant and issuer checks are the primary gate. - **Role/Group gating**: Access is allowed if the token’s `roles` intersect `AUTH_ALLOWED_ROLES` or `groups` intersect `AUTH_ALLOWED_GROUPS`. If neither list is configured, all authenticated users are allowed. - **Pagination limits**: `page_size` is clamped to a maximum of 500 to prevent large queries. - **Fetch window cap**: `hours` is clamped to 720 (30 days) to avoid runaway API calls. +- **MCP server**: The MCP server bypasses auth entirely. Only run it in trusted environments or behind a VPN. ## Maintenance and Operations diff --git a/ROADMAP.md b/ROADMAP.md index a5be71e..872a51f 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -59,5 +59,15 @@ Goal: evolve from a polling dashboard into a full security operations tool. --- +## Phase 5: Intelligence +Goal: add AI-powered analysis and external tool integration. + +- [x] AI feature flag (`AI_FEATURES_ENABLED`) to gate LLM-dependent features +- [x] Natural language query endpoint (`/api/ask`) with intent extraction and smart sampling +- [x] MCP (Model Context Protocol) server for Claude Desktop / Cursor integration +- [ ] Advanced analytics dashboard (trending operations, anomaly detection) +- [ ] Redis caching for LLM responses and frequent queries +- [ ] Async queue for LLM requests to prevent timeout/cost explosions at scale + ## Completed in this PR -All Phase 1 items were implemented in the latest changes. +All Phase 5 items marked done were implemented in v1.3.0. diff --git a/VERSION b/VERSION index a77d7d9..589268e 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.2.7 \ No newline at end of file +1.3.0 \ No newline at end of file diff --git a/backend/config.py b/backend/config.py index c9b75bf..25eeda9 100644 --- a/backend/config.py +++ b/backend/config.py @@ -42,7 +42,8 @@ class Settings(BaseSettings): # Alerting ALERTS_ENABLED: bool = False - # LLM / Natural Language Query + # AI / Natural Language Query + AI_FEATURES_ENABLED: bool = True LLM_API_KEY: str = "" LLM_BASE_URL: str = "https://api.openai.com/v1" LLM_MODEL: str = "gpt-4o-mini" @@ -77,6 +78,7 @@ SIEM_ENABLED = _settings.SIEM_ENABLED SIEM_WEBHOOK_URL = _settings.SIEM_WEBHOOK_URL ALERTS_ENABLED = _settings.ALERTS_ENABLED +AI_FEATURES_ENABLED = _settings.AI_FEATURES_ENABLED LLM_API_KEY = _settings.LLM_API_KEY LLM_BASE_URL = _settings.LLM_BASE_URL LLM_MODEL = _settings.LLM_MODEL diff --git a/backend/frontend/index.html b/backend/frontend/index.html index b411a0f..a46a2dd 100644 --- a/backend/frontend/index.html +++ b/backend/frontend/index.html @@ -38,7 +38,7 @@ -
+

Ask a question

@@ -244,6 +244,7 @@ }, options: { actors: [], services: [], operations: [], results: [] }, appVersion: '', + aiFeaturesEnabled: true, askQuestionText: '', askLoading: false, askAnswer: '', @@ -302,6 +303,18 @@ this.authConfig = { auth_enabled: false }; } + try { + const featRes = await fetch('/api/config/features'); + if (featRes.ok) { + const featBody = await featRes.json(); + this.aiFeaturesEnabled = featBody.ai_features_enabled !== false; + } else { + this.aiFeaturesEnabled = true; + } + } catch { + this.aiFeaturesEnabled = true; + } + if (!this.authConfig?.auth_enabled) { this.authBtnText = ''; return; diff --git a/backend/main.py b/backend/main.py index 2046f9f..6c63b01 100644 --- a/backend/main.py +++ b/backend/main.py @@ -6,7 +6,7 @@ from pathlib import Path import structlog from audit_trail import log_action -from config import CORS_ORIGINS, ENABLE_PERIODIC_FETCH, FETCH_INTERVAL_MINUTES +from config import AI_FEATURES_ENABLED, CORS_ORIGINS, ENABLE_PERIODIC_FETCH, FETCH_INTERVAL_MINUTES from database import setup_indexes from fastapi import FastAPI, HTTPException, Request from fastapi.middleware.cors import CORSMiddleware @@ -14,7 +14,6 @@ from fastapi.responses import Response from fastapi.staticfiles import StaticFiles from metrics import observe_request, prometheus_metrics from middleware import CorrelationIdMiddleware -from routes.ask import router as ask_router from routes.config import router as config_router from routes.events import router as events_router from routes.fetch import router as fetch_router @@ -113,7 +112,10 @@ app.include_router(events_router, prefix="/api") app.include_router(config_router, prefix="/api") app.include_router(webhooks_router, prefix="/api") app.include_router(health_router, prefix="/api") -app.include_router(ask_router, prefix="/api") +if AI_FEATURES_ENABLED: + from routes.ask import router as ask_router + + app.include_router(ask_router, prefix="/api") app.include_router(rules_router, prefix="/api") diff --git a/backend/mcp_server.py b/backend/mcp_server.py new file mode 100644 index 0000000..305bb69 --- /dev/null +++ b/backend/mcp_server.py @@ -0,0 +1,276 @@ +#!/usr/bin/env python3 +""" +AOC MCP Server + +Standalone MCP server that exposes audit log search tools for Claude Desktop, +Cursor, and other MCP clients. + +Usage: + python mcp_server.py + +Claude Desktop config (~/.config/claude/claude_desktop_config.json): + { + "mcpServers": { + "aoc": { + "command": "python", + "args": ["/path/to/aoc/backend/mcp_server.py"], + "env": {"MONGO_URI": "mongodb://..."} + } + } + } +""" + +import asyncio +import json +import os +import sys +from datetime import UTC, datetime, timedelta + +# Ensure backend modules are importable +sys.path.insert(0, os.path.dirname(os.path.abspath(__file__))) + +from database import events_collection +from mcp.server import Server +from mcp.server.stdio import stdio_server +from mcp.types import TextContent, Tool + +app = Server("aoc") + +# --------------------------------------------------------------------------- +# Tool definitions +# --------------------------------------------------------------------------- + +_SEARCH_EVENTS_SCHEMA = { + "type": "object", + "properties": { + "entity": {"type": "string", "description": "Device name, user UPN, or email to search for"}, + "services": { + "type": "array", + "items": {"type": "string"}, + "description": "Filter by service (e.g. Intune, Directory, Exchange)", + }, + "operation": {"type": "string", "description": "Filter by operation name"}, + "result": {"type": "string", "description": "Filter by result (success, failure)"}, + "days": {"type": "integer", "description": "Number of days to look back (default 7)"}, + "limit": {"type": "integer", "description": "Max events to return (default 20)"}, + }, +} + +_GET_EVENT_SCHEMA = { + "type": "object", + "properties": { + "event_id": {"type": "string", "description": "The event ID to retrieve"}, + }, + "required": ["event_id"], +} + +_GET_SUMMARY_SCHEMA = { + "type": "object", + "properties": { + "days": {"type": "integer", "description": "Number of days to summarise (default 7)"}, + }, +} + +_ASK_SCHEMA = { + "type": "object", + "properties": { + "question": {"type": "string", "description": "Natural language question about audit logs"}, + "days": {"type": "integer", "description": "Number of days to look back (default 7)"}, + }, + "required": ["question"], +} + + +@app.list_tools() +async def list_tools() -> list[Tool]: + return [ + Tool( + name="search_events", + description="Search audit events by entity, service, operation, or result.", + inputSchema=_SEARCH_EVENTS_SCHEMA, + ), + Tool(name="get_event", description="Retrieve a single audit event by its ID.", inputSchema=_GET_EVENT_SCHEMA), + Tool( + name="get_summary", + description="Get an aggregated summary of audit activity for the last N days.", + inputSchema=_GET_SUMMARY_SCHEMA, + ), + Tool( + name="ask", + description="Ask a natural language question about audit logs. Returns a narrative answer.", + inputSchema=_ASK_SCHEMA, + ), + ] + + +# --------------------------------------------------------------------------- +# Tool handlers +# --------------------------------------------------------------------------- + + +@app.call_tool() +async def call_tool(name: str, arguments: dict) -> list[TextContent]: + if name == "search_events": + return await _handle_search_events(arguments) + if name == "get_event": + return await _handle_get_event(arguments) + if name == "get_summary": + return await _handle_get_summary(arguments) + if name == "ask": + return await _handle_ask(arguments) + raise ValueError(f"Unknown tool: {name}") + + +async def _handle_search_events(arguments: dict) -> list[TextContent]: + days = arguments.get("days", 7) + limit = min(arguments.get("limit", 20), 100) + since = (datetime.now(UTC) - timedelta(days=days)).isoformat().replace("+00:00", "Z") + + filters = [{"timestamp": {"$gte": since}}] + + services = arguments.get("services") + if services: + filters.append({"service": {"$in": services}}) + + operation = arguments.get("operation") + if operation: + filters.append({"operation": {"$regex": operation, "$options": "i"}}) + + result = arguments.get("result") + if result: + filters.append({"result": {"$regex": result, "$options": "i"}}) + + entity = arguments.get("entity") + if entity: + entity_safe = entity.replace(".", "\\.").replace("(", "\\(").replace(")", "\\)") + filters.append( + { + "$or": [ + {"target_displays": {"$elemMatch": {"$regex": entity_safe, "$options": "i"}}}, + {"actor_display": {"$regex": entity_safe, "$options": "i"}}, + {"actor_upn": {"$regex": entity_safe, "$options": "i"}}, + {"raw_text": {"$regex": entity_safe, "$options": "i"}}, + ] + } + ) + + query = {"$and": filters} + cursor = events_collection.find(query).sort("timestamp", -1).limit(limit) + events = list(cursor) + + if not events: + return [TextContent(type="text", text="No matching events found.")] + + lines = [f"Found {len(events)} event(s):\n"] + for e in events: + ts = e.get("timestamp", "?")[:16].replace("T", " ") + svc = e.get("service", "?") + op = e.get("operation", "?") + actor = e.get("actor_display", "?") + result_str = e.get("result", "?") + lines.append(f"{ts} | {svc} | {op} | {actor} | {result_str}") + + return [TextContent(type="text", text="\n".join(lines))] + + +async def _handle_get_event(arguments: dict) -> list[TextContent]: + event_id = arguments["event_id"] + event = events_collection.find_one({"id": event_id}) + if not event: + return [TextContent(type="text", text=f"Event {event_id} not found.")] + event.pop("_id", None) + return [TextContent(type="text", text=json.dumps(event, indent=2, default=str))] + + +async def _handle_get_summary(arguments: dict) -> list[TextContent]: + days = arguments.get("days", 7) + since = (datetime.now(UTC) - timedelta(days=days)).isoformat().replace("+00:00", "Z") + query = {"timestamp": {"$gte": since}} + + total = events_collection.count_documents(query) + if total == 0: + return [TextContent(type="text", text="No events in the specified period.")] + + # Aggregation pipelines + svc_pipeline = [ + {"$match": query}, + {"$group": {"_id": "$service", "count": {"$sum": 1}}}, + {"$sort": {"count": -1}}, + {"$limit": 10}, + ] + op_pipeline = [ + {"$match": query}, + {"$group": {"_id": "$operation", "count": {"$sum": 1}}}, + {"$sort": {"count": -1}}, + {"$limit": 10}, + ] + result_pipeline = [ + {"$match": query}, + {"$group": {"_id": "$result", "count": {"$sum": 1}}}, + {"$sort": {"count": -1}}, + ] + actor_pipeline = [ + {"$match": query}, + {"$group": {"_id": "$actor_display", "count": {"$sum": 1}}}, + {"$sort": {"count": -1}}, + {"$limit": 10}, + ] + + svc_counts = list(events_collection.aggregate(svc_pipeline)) + op_counts = list(events_collection.aggregate(op_pipeline)) + result_counts = list(events_collection.aggregate(result_pipeline)) + actor_counts = list(events_collection.aggregate(actor_pipeline)) + + lines = [f"Summary for the last {days} days ({total} total events)\n"] + + lines.append("By service:") + for row in svc_counts: + lines.append(f" {row['_id'] or 'Unknown'}: {row['count']}") + + lines.append("\nBy action:") + for row in op_counts: + lines.append(f" {row['_id'] or 'Unknown'}: {row['count']}") + + lines.append("\nBy result:") + for row in result_counts: + lines.append(f" {row['_id'] or 'Unknown'}: {row['count']}") + + lines.append("\nTop actors:") + for row in actor_counts: + lines.append(f" {row['_id'] or 'Unknown'}: {row['count']}") + + return [TextContent(type="text", text="\n".join(lines))] + + +async def _handle_ask(arguments: dict) -> list[TextContent]: + """For now, the MCP 'ask' tool returns a helpful message directing the user to the web UI, + since the full NLQ pipeline requires LLM configuration that may not be available in the MCP context.""" + question = arguments["question"] + days = arguments.get("days", 7) + + # Perform a search to give the user something useful immediately + result = await _handle_search_events({"entity": "", "days": days, "limit": 50}) + base_text = result[0].text if result else "" + + text = ( + f"You asked: '{question}'\n\n" + f"Here are the most recent {min(50, base_text.count(chr(10)) - 1)} events from the last {days} days:\n\n" + f"{base_text}\n\n" + f"Tip: Use the 'search_events' tool with specific filters (services, operation, result) " + f"to narrow down the dataset before asking follow-up questions." + ) + return [TextContent(type="text", text=text)] + + +# --------------------------------------------------------------------------- +# Entry point +# --------------------------------------------------------------------------- + + +async def main(): + async with stdio_server() as (read_stream, write_stream): + await app.run(read_stream, write_stream, app.create_initialization_options()) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/backend/requirements.txt b/backend/requirements.txt index 8d1d102..d61cd4f 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -13,3 +13,4 @@ tenacity prometheus-client httpx gunicorn +mcp diff --git a/backend/routes/config.py b/backend/routes/config.py index 75acae5..d868a1e 100644 --- a/backend/routes/config.py +++ b/backend/routes/config.py @@ -1,4 +1,5 @@ from config import ( + AI_FEATURES_ENABLED, AUTH_CLIENT_ID, AUTH_ENABLED, AUTH_SCOPE, @@ -18,3 +19,10 @@ def auth_config(): "scope": AUTH_SCOPE, "redirect_uri": None, # frontend uses window.location.origin by default } + + +@router.get("/config/features") +def features_config(): + return { + "ai_features_enabled": AI_FEATURES_ENABLED, + } diff --git a/backend/tests/test_api.py b/backend/tests/test_api.py index df015fb..8e7e049 100644 --- a/backend/tests/test_api.py +++ b/backend/tests/test_api.py @@ -1,6 +1,41 @@ from datetime import UTC, datetime +def test_config_features(client): + response = client.get("/api/config/features") + assert response.status_code == 200 + data = response.json() + assert "ai_features_enabled" in data + assert isinstance(data["ai_features_enabled"], bool) + + +def test_ask_disabled_when_ai_features_off(): + import subprocess + import sys + + code = """ +import sys +sys.path.insert(0, '.') +import os +os.environ['AI_FEATURES_ENABLED'] = 'false' + +# Re-import config with the env override +import importlib +import config +importlib.reload(config) + +# Now import main; it will pick up the new AI_FEATURES_ENABLED +import main +ask_paths = [r.path for r in main.app.routes if hasattr(r, 'path') and 'ask' in r.path] +print('ASK_PATHS:', ask_paths) +assert len(ask_paths) == 0, f"Expected no ask routes, found: {ask_paths}" +print('OK') +""" + result = subprocess.run([sys.executable, "-c", code], capture_output=True, text=True, cwd=".") + assert result.returncode == 0, f"Subprocess failed: {result.stdout}\n{result.stderr}" + assert "OK" in result.stdout + + def test_health(client): response = client.get("/health") assert response.status_code == 200