80 lines
2.6 KiB
Plaintext
80 lines
2.6 KiB
Plaintext
# Copy this file to .env and fill in secrets
|
|
|
|
# Meilisearch keys
|
|
MEILI_MASTER_KEY=change_me_to_strong_random
|
|
MEILI_KEY=${MEILI_MASTER_KEY}
|
|
|
|
# OpenWebUI integration
|
|
OPENWEBUI_URL=http://openwebui:3000
|
|
OPENWEBUI_API_KEY=put_your_openwebui_api_key_here
|
|
OPENWEBUI_KB_NAME=Homelab Library
|
|
OPENWEBUI_KB_ID=your_kb_uuid_here
|
|
OPENWEBUI_AUTO_FIX_METADATA=1
|
|
# Optional: JSON string to enforce as metadata template when auto-fix runs
|
|
# OPENWEBUI_METADATA_TEMPLATE_JSON={}
|
|
|
|
# Media normalisation
|
|
MEDIA_NORMALIZE=1
|
|
MEDIA_NORMALIZE_KEEP_ORIGINAL=0
|
|
VIDEO_NORMALIZE_CODEC=hevc
|
|
VIDEO_NORMALIZE_EXTENSION=.mp4
|
|
VIDEO_NORMALIZE_CRF=28
|
|
VIDEO_NORMALIZE_PRESET=medium
|
|
VIDEO_NORMALIZE_AUDIO_CODEC=aac
|
|
VIDEO_NORMALIZE_AUDIO_BITRATE=160k
|
|
AUDIO_NORMALIZE_CODEC=libmp3lame
|
|
AUDIO_NORMALIZE_EXTENSION=.mp3
|
|
AUDIO_NORMALIZE_BITRATE=192k
|
|
AUDIO_NORMALIZE_CHANNELS=2
|
|
|
|
# Transcription backend (local Whisper by default)
|
|
TRANSCRIBE_BACKEND=local
|
|
OPENAI_API_KEY=
|
|
# Uncomment to customize OpenAI settings when offloading transcription
|
|
# OPENAI_BASE_URL=https://api.openai.com/v1
|
|
# OPENAI_TRANSCRIBE_MODEL=whisper-1
|
|
# OPENAI_TRANSCRIBE_TIMEOUT=600
|
|
|
|
# Local Whisper settings
|
|
# Choose CPU explicitly unless you have a working GPU runtime in Docker
|
|
WHISPER_DEVICE=cpu
|
|
# Model and precision (large-v3 int8 is accurate but heavy; consider medium/small for speed)
|
|
WHISPER_MODEL=large-v3
|
|
WHISPER_PRECISION=int8
|
|
# Threads for CPU inference
|
|
WHISPER_CPU_THREADS=4
|
|
|
|
# --- GPU (CUDA) optional setup ---
|
|
# To enable NVIDIA GPU acceleration:
|
|
# 1) Install NVIDIA driver on the host and the NVIDIA Container Toolkit
|
|
# 2) Set the Docker runtime to NVIDIA for the worker containers
|
|
# DOCKER_GPU_RUNTIME=nvidia
|
|
# 3) Ensure GPU visibility (default is all)
|
|
# NVIDIA_VISIBLE_DEVICES=all
|
|
# 4) Use GPU-friendly precision and device
|
|
# WHISPER_DEVICE=cuda
|
|
# WHISPER_PRECISION=float16
|
|
# 5) (Build-time) use an NVIDIA CUDA runtime base image for the app containers.
|
|
# Set an image tag that exists for your architecture (most CUDA images are amd64):
|
|
# GPU_BASE_IMAGE=nvidia/cuda:11.8.0-cudnn8-runtime-ubuntu22.04
|
|
#
|
|
# If you are on ARM64 without discrete NVIDIA GPU, leave GPU_BASE_IMAGE unset and run CPU-only.
|
|
|
|
# Docker volumes paths
|
|
LIBRARY_HOST_DIR=/mnt/nfs/library
|
|
TRANSCRIPTS_HOST_DIR=/mnt/nfs/transcripts
|
|
# leave others as-is or customize:
|
|
# TMP_HOST_DIR=./tmp
|
|
# MODELS_HOST_DIR=./models
|
|
# MEILI_DATA_HOST_DIR=./data/meili
|
|
# REDIS_DATA_HOST_DIR=./data/redis
|
|
|
|
# RSS / Podcast downloads
|
|
# Where to save downloaded podcast audio (inside the container mount)
|
|
PODCASTS_ROOT=/library
|
|
# Organize under per-show subfolders (true/false)
|
|
PODCASTS_PER_SHOW=true
|
|
# Scan interval (minutes) for rss_ingest; set RSS_ONCE=1 for one-shot
|
|
# RSS_SCAN_MINUTES=120
|
|
# RSS_ONCE=0
|