66 lines
2.0 KiB
Plaintext
66 lines
2.0 KiB
Plaintext
# Copy this file to .env and fill in secrets
|
|
|
|
# Meilisearch keys
|
|
MEILI_MASTER_KEY=change_me_to_strong_random
|
|
MEILI_KEY=${MEILI_MASTER_KEY}
|
|
|
|
# OpenWebUI integration
|
|
OPENWEBUI_URL=http://openwebui:3000
|
|
OPENWEBUI_API_KEY=put_your_openwebui_api_key_here
|
|
OPENWEBUI_KB_NAME=Homelab Library
|
|
OPENWEBUI_KB_ID=your_kb_uuid_here
|
|
OPENWEBUI_AUTO_FIX_METADATA=1
|
|
# Optional: JSON string to enforce as metadata template when auto-fix runs
|
|
# OPENWEBUI_METADATA_TEMPLATE_JSON={}
|
|
|
|
# Media normalisation
|
|
MEDIA_NORMALIZE=1
|
|
MEDIA_NORMALIZE_KEEP_ORIGINAL=0
|
|
VIDEO_NORMALIZE_CODEC=hevc
|
|
VIDEO_NORMALIZE_EXTENSION=.mp4
|
|
VIDEO_NORMALIZE_CRF=28
|
|
VIDEO_NORMALIZE_PRESET=medium
|
|
VIDEO_NORMALIZE_AUDIO_CODEC=aac
|
|
VIDEO_NORMALIZE_AUDIO_BITRATE=160k
|
|
AUDIO_NORMALIZE_CODEC=libmp3lame
|
|
AUDIO_NORMALIZE_EXTENSION=.mp3
|
|
AUDIO_NORMALIZE_BITRATE=192k
|
|
AUDIO_NORMALIZE_CHANNELS=2
|
|
|
|
# Transcription backend (local Whisper by default)
|
|
TRANSCRIBE_BACKEND=local
|
|
OPENAI_API_KEY=
|
|
# Uncomment to customize OpenAI settings when offloading transcription
|
|
# OPENAI_BASE_URL=https://api.openai.com/v1
|
|
# OPENAI_TRANSCRIBE_MODEL=whisper-1
|
|
# OPENAI_TRANSCRIBE_TIMEOUT=600
|
|
|
|
# Local Whisper settings
|
|
# Choose CPU explicitly unless you have a working GPU runtime in Docker
|
|
WHISPER_DEVICE=cpu
|
|
# Model and precision (large-v3 int8 is accurate but heavy; consider medium/small for speed)
|
|
WHISPER_MODEL=large-v3
|
|
WHISPER_PRECISION=int8
|
|
# Threads for CPU inference
|
|
WHISPER_CPU_THREADS=4
|
|
|
|
# --- GPU (CUDA) optional setup ---
|
|
# To enable NVIDIA GPU acceleration:
|
|
# 1) Install NVIDIA driver on the host and the NVIDIA Container Toolkit
|
|
# 2) Set the Docker runtime to NVIDIA for the worker containers
|
|
# DOCKER_GPU_RUNTIME=nvidia
|
|
# 3) Ensure GPU visibility (default is all)
|
|
# NVIDIA_VISIBLE_DEVICES=all
|
|
# 4) Use GPU-friendly precision and device
|
|
# WHISPER_DEVICE=cuda
|
|
# WHISPER_PRECISION=float16
|
|
|
|
# Docker volumes paths
|
|
LIBRARY_HOST_DIR=/mnt/nfs/library
|
|
TRANSCRIPTS_HOST_DIR=/mnt/nfs/transcripts
|
|
# leave others as-is or customize:
|
|
# TMP_HOST_DIR=./tmp
|
|
# MODELS_HOST_DIR=./models
|
|
# MEILI_DATA_HOST_DIR=./data/meili
|
|
# REDIS_DATA_HOST_DIR=./data/redis
|