- Add AI_FEATURES_ENABLED config flag to gate AI/natural-language features - Conditionally register /api/ask router based on AI_FEATURES_ENABLED - Add GET /api/config/features endpoint for frontend feature detection - Update frontend to hide Ask panel when AI features are disabled - Implement standalone MCP server (backend/mcp_server.py) with tools: * search_events, get_event, get_summary, ask - Add mcp dependency to requirements.txt - Update .env.example, AGENTS.md, and ROADMAP.md - Bump VERSION to 1.3.0
52 lines
1.8 KiB
Plaintext
52 lines
1.8 KiB
Plaintext
TENANT_ID=your-tenant-id
|
|
CLIENT_ID=your-client-id
|
|
CLIENT_SECRET=your-client-secret
|
|
ENABLE_PERIODIC_FETCH=false
|
|
FETCH_INTERVAL_MINUTES=60
|
|
AUTH_ENABLED=false
|
|
AUTH_TENANT_ID=your-tenant-id
|
|
AUTH_CLIENT_ID=your-api-client-id
|
|
# API scope the SPA should request at login.
|
|
# When set, the frontend acquires an access token for this scope (aud = AUTH_CLIENT_ID).
|
|
# When empty, the frontend falls back to the idToken, which is also valid for the backend.
|
|
# Example: api://cc31fd45-1eca-431f-a2c6-ba81cd4c5d50/.default
|
|
AUTH_SCOPE=
|
|
# Comma-separated lists (optional):
|
|
AUTH_ALLOWED_ROLES=
|
|
AUTH_ALLOWED_GROUPS=
|
|
MONGO_ROOT_USERNAME=root
|
|
MONGO_ROOT_PASSWORD=example
|
|
MONGO_PORT=27017
|
|
|
|
# MongoDB connection string (takes precedence over root credentials in Docker Compose)
|
|
MONGO_URI=mongodb://root:example@localhost:27017
|
|
|
|
# Optional: number of days to retain events in MongoDB (0 = disabled)
|
|
RETENTION_DAYS=0
|
|
|
|
# Optional: comma-separated CORS origins (e.g., http://localhost:3000,https://app.example.com)
|
|
CORS_ORIGINS=*
|
|
|
|
# Optional: SIEM export webhook (e.g., Splunk HEC, Sentinel, or generic syslog webhook)
|
|
SIEM_ENABLED=false
|
|
SIEM_WEBHOOK_URL=
|
|
|
|
# Optional: enable rule-based alerting during ingestion
|
|
ALERTS_ENABLED=false
|
|
|
|
# Optional: enable AI/natural-language features (/api/ask, MCP server)
|
|
# Set to false to completely disable AI endpoints and UI elements
|
|
AI_FEATURES_ENABLED=true
|
|
|
|
# Optional: LLM configuration for natural language querying (/api/ask)
|
|
# Supports any OpenAI-compatible API (OpenAI, Azure OpenAI, Ollama, etc.)
|
|
# For Azure OpenAI / MS Foundry, set BASE_URL to your deployment endpoint
|
|
# (e.g. https://your-resource.openai.azure.com/openai/deployments/your-deployment)
|
|
# and set API_VERSION to something like 2025-01-01-preview
|
|
LLM_API_KEY=
|
|
LLM_BASE_URL=https://api.openai.com/v1
|
|
LLM_MODEL=gpt-4o-mini
|
|
LLM_MAX_EVENTS=200
|
|
LLM_TIMEOUT_SECONDS=30
|
|
LLM_API_VERSION=
|