diff --git a/README.md b/README.md index 7d8b0b1..29c5843 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,43 @@ -# podx +# PodX - Offline Podcast + Docs Unified Search (Docker) +## Quick start +```bash +unzip homelab-podx.zip && cd homelab-podx +docker compose up -d --build +# Open the UI: +# http://:8088 +``` + +Paste links to podcasts/video (YouTube, Rumble, direct MP3). Worker downloads with yt-dlp, transcribes locally with faster-whisper large-v3, stores media under library/, subtitles and transcripts under transcripts/, and indexes everything (plus your PDFs/EPUBs/Kiwix) in a single Meilisearch index: library. + +### Ingest PDFs +```bash +MEILI_URL=http://localhost:7700 MEILI_KEY=devkey ./ingest/ingest_pdfs.sh /path/to/*.pdf +``` + +### Ingest EPUBs +```bash +pip install ebooklib beautifulsoup4 lxml requests +MEILI_URL=http://localhost:7700 MEILI_KEY=devkey ./ingest/ingest_epub.py /path/to/*.epub +``` + +### Ingest Kiwix ZIM +Install zim-tools: apt-get install zim-tools or brew install zimtools +```bash +MEILI_URL=http://localhost:7700 MEILI_KEY=devkey ./ingest/ingest_kiwix.sh /path/to/wikipedia_en.zim +``` + +### Optional: set Meilisearch settings +```bash +curl -X PATCH "http://localhost:7700/indexes/library/settings" -H "Authorization: Bearer devkey" -H "Content-Type: application/json" -d '{"searchableAttributes":["title","text","meta.book_author","meta.uploader","meta.chapter"], + "displayedAttributes":["type","title","source","date","_formatted","meta"], + "sortableAttributes":["date","type"], + "filterableAttributes":["type"]}' +``` + +### Plex +Point Plex libraries at library/. Video podcasts will show .srt/.vtt subtitles automatically if basenames match. + +### Notes +- Accuracy-first: model large-v3 (English + Czech). Change via WHISPER_MODEL env var in docker-compose.yml if desired. +- Everything runs offline; no cloud calls. diff --git a/app/Dockerfile b/app/Dockerfile new file mode 100644 index 0000000..bff1346 --- /dev/null +++ b/app/Dockerfile @@ -0,0 +1,14 @@ +FROM python:3.11-slim + +RUN apt-get update && apt-get install -y --no-install-recommends \ + ffmpeg curl jq poppler-utils \ && rm -rf /var/lib/apt/lists/* + +WORKDIR /app +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +COPY app.py worker.py ./ +RUN pip install --no-cache-dir gunicorn==22.0.0 + +EXPOSE 8080 +CMD ["gunicorn", "-b", "0.0.0.0:8080", "app:app", "--workers", "2", "--threads", "4"] diff --git a/app/app.py b/app/app.py new file mode 100644 index 0000000..070a206 --- /dev/null +++ b/app/app.py @@ -0,0 +1,144 @@ +from flask import Flask, request, redirect +import os, json, requests +from redis import Redis +from rq import Queue + +MEILI_URL = os.getenv("MEILI_URL", "http://meili:7700") +MEILI_KEY = os.getenv("MEILI_KEY", "") +REDIS_URL = os.getenv("REDIS_URL", "redis://redis:6379/0") + +app = Flask(__name__) +q = Queue(connection=Redis.from_url(REDIS_URL)) + +PAGE = """ + +PodX - unified search + +

PodX

+
+ + +
+
Batch +
+ + +
+
+ +

Unified search (podcasts + PDFs + EPUB + Kiwix)

+
+ +
+
+ + + +

Recent jobs

+
+ + +""" + +def meili_search(qstr, limit=30): + if not qstr.strip(): return [] + r = requests.post(f"{MEILI_URL}/indexes/library/search", + headers={"Authorization": f"Bearer {MEILI_KEY}","Content-Type":"application/json"}, + data=json.dumps({"q": qstr, "limit": limit})) + return r.json().get("hits", []) + +@app.get("/") +def index(): + return PAGE + +@app.post("/enqueue") +def enqueue(): + url = request.form["url"].strip() + q.enqueue("worker.handle_url", url) + return redirect("/") + +@app.post("/enqueue_batch") +def enqueue_batch(): + urls = [u.strip() for u in request.form["urls"].splitlines() if u.strip()] + for u in urls: q.enqueue("worker.handle_url", u) + return redirect("/") + +@app.get("/recent") +def recent(): + try: + with open("/transcripts/_feed.log", "r", encoding="utf-8") as f: + tail = f.readlines()[-40:] + except FileNotFoundError: + tail=[] + html = [] + for line in reversed(tail): + try: item = json.loads(line) + except: continue + html.append(f"
{item.get('title','')}
{item.get('uploader','')} — {item.get('date','')} — {item.get('status','')}
{item.get('path','')}
") + return "\n".join(html) + +@app.get("/search") +def search(): + qstr = request.args.get("q","") + hits = meili_search(qstr) + out=[] + for h in hits: + t = h.get("title","") + src = h.get("source","") + typ = h.get("type","") + ctx = h.get("_formatted",{}).get("text", h.get("text","")[:300]) + segs = h.get("segments",[]) + ts = int(segs[0]["start"]) if segs else 0 + open_link = f"/open?file={{requests.utils.quote(src)}}&t={ts}" if typ=='podcast' else f"/open?file={{requests.utils.quote(src)}}" + badge = f"{typ}" + out.append( + f"
{badge}{t}
{src}" + f"

{ctx}

" + f"Open" + f"{' | Transcript' if typ=='podcast' else ''}" + f"
" + ) + return "\n".join(out) or "No results yet." + +@app.get("/open") +def open_local(): + file = request.args.get("file","") + t = int(request.args.get("t","0")) + return f"
{file}\nStart at: {t} sec
" + +@app.get("/subtitle") +def subtitle(): + file = request.args.get("file","") + base = os.path.splitext(os.path.basename(file))[0] + p = f"/transcripts/{base}.vtt" + if os.path.exists(p): + with open(p,"r",encoding="utf-8") as f: + return f"
{f.read()}
" + return "No VTT found." diff --git a/app/requirements.txt b/app/requirements.txt new file mode 100644 index 0000000..bfa0a24 --- /dev/null +++ b/app/requirements.txt @@ -0,0 +1,8 @@ +flask==3.0.3 +redis==5.0.7 +rq==1.16.2 +yt-dlp==2024.08.06 +faster-whisper==1.0.3 +ffmpeg-python==0.2.0 +requests==2.32.3 +orjson==3.10.7 diff --git a/app/worker.py b/app/worker.py new file mode 100644 index 0000000..7a50366 --- /dev/null +++ b/app/worker.py @@ -0,0 +1,109 @@ +import os, subprocess, shutil, json, re, orjson, requests +from pathlib import Path +from faster_whisper import WhisperModel + +MEILI_URL = os.getenv("MEILI_URL", "http://meili:7700") +MEILI_KEY = os.getenv("MEILI_KEY", "") +LIB = Path(os.getenv("LIBRARY_ROOT", "/library")) +TRN = Path(os.getenv("TRANSCRIPT_ROOT", "/transcripts")) +TMP = Path(os.getenv("TMP_ROOT", "/tmpdl")) +MODEL_NAME = os.getenv("WHISPER_MODEL","large-v3") +COMPUTE = os.getenv("WHISPER_PRECISION","int8") + +TRN.mkdir(parents=True, exist_ok=True) +LIB.mkdir(parents=True, exist_ok=True) +TMP.mkdir(parents=True, exist_ok=True) + +model = WhisperModel(MODEL_NAME, compute_type=COMPUTE) + +def log(feed): + with open(TRN / "_feed.log", "a", encoding="utf-8") as f: + import orjson as _oj + f.write(_oj.dumps(feed).decode()+"\n") + +def sanitize(name): + return re.sub(r'[\\/:"*?<>|]+', ' ', name).strip() + +def yt_dlp(url, outdir): + outtmpl = str(outdir / "%(uploader)s/%(upload_date)s - %(title)s.%(ext)s") + cmd = [ + "yt-dlp", "-o", outtmpl, + "-f", "bv*+ba/best", + "-x", "--audio-format", "m4a", + "--write-thumbnail", + "--no-playlist", "--no-warnings", "--restrict-filenames", + url + ] + subprocess.check_call(cmd) + media = list(outdir.rglob("*.[mM][pP]4")) + list(outdir.rglob("*.mkv")) + list(outdir.rglob("*.m4a")) + list(outdir.rglob("*.mp3")) + return sorted(media, key=lambda p: p.stat().st_mtime)[-1:] + +def transcribe(media_path: Path): + segments, info = model.transcribe(str(media_path), vad_filter=True, language="auto") + title = media_path.stem + base = TRN / title + segs = [] + text_parts = [] + for s in segments: + segs.append({"start": s.start, "end": s.end, "text": s.text}) + text_parts.append(s.text) + txt = " ".join(text_parts).strip() + + import orjson as _oj + open(base.with_suffix(".json"), "wb").write(_oj.dumps({"file": str(media_path), "language": info.language, "segments": segs})) + open(base.with_suffix(".txt"), "w", encoding="utf-8").write(txt) + + def fmt_ts(t): + h=int(t//3600); m=int((t%3600)//60); s=t-(h*3600+m*60) + return f"{h:02}:{m:02}:{s:06.3f}".replace('.',',') + with open(base.with_suffix(".srt"), "w", encoding="utf-8") as srt: + for i,s in enumerate(segs,1): + srt.write(f"{i}\n{fmt_ts(s['start'])} --> {fmt_ts(s['end'])}\n{s['text'].strip()}\n\n") + with open(base.with_suffix(".vtt"), "w", encoding="utf-8") as vtt: + vtt.write("WEBVTT\n\n") + for s in segs: + vtt.write(f"{fmt_ts(s['start']).replace(',', '.')} --> {fmt_ts(s['end']).replace(',', '.')} \n{s['text'].strip()}\n\n") + return base + +def index_meili(json_path: Path): + doc = json.loads(open(json_path, "r", encoding="utf-8").read()) + title = Path(doc["file"]).stem + date = re.findall(r"\b(\d{8})\b", title) + payload = { + "id": title, + "type": "podcast", + "title": title, + "date": date[0] if date else "", + "source": str(Path(LIB, Path(doc["file"]).name)), + "text": " ".join(s["text"] for s in doc.get("segments", [])), + "segments": doc.get("segments", []), + "meta": {"language": doc.get("language", "")} + } + r = requests.post(f"{MEILI_URL}/indexes/library/documents", + headers={"Authorization": f"Bearer {MEILI_KEY}", "Content-Type":"application/json"}, + data=__import__('orjson').dumps(payload)) + r.raise_for_status() + +def handle_url(url: str): + try: + info = {"url": url, "status":"queued", "title":"", "uploader":"", "date":"", "path":""} + log({**info, **{"status":"downloading"}}) + files = yt_dlp(url, TMP) + for f in files: + parts = f.relative_to(TMP).parts + uploader = sanitize(parts[0]) if len(parts)>1 else "Unknown" + dest_dir = LIB / uploader + dest_dir.mkdir(parents=True, exist_ok=True) + dest = dest_dir / sanitize(f.name) + shutil.move(str(f), dest) + import re as _re + info.update({"title": dest.stem, "uploader": uploader, + "date": _re.findall(r"\b(\d{8})\b", dest.stem)[0] if _re.findall(r"\b(\d{8})\b", dest.stem) else "", + "path": str(dest)}) + log({**info, **{"status":"transcribing"}}) + base = transcribe(dest) + index_meili(base.with_suffix(".json")) + log({**info, **{"status":"done"}}) + except Exception as e: + log({"url": url, "status":"error", "error": str(e)}) + raise diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..27b0614 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,60 @@ +services: + web: + build: ./app + container_name: podx-web + environment: + - MEILI_URL=http://meili:7700 + - MEILI_KEY=devkey + - REDIS_URL=redis://redis:6379/0 + - LIBRARY_ROOT=/library + - TRANSCRIPT_ROOT=/transcripts + - TMP_ROOT=/tmpdl + - WHISPER_MODEL=large-v3 + - WHISPER_PRECISION=int8 + volumes: + - ./library:/library + - ./transcripts:/transcripts + - ./tmp:/tmpdl + - ./models:/root/.cache/huggingface + ports: ["8088:8080"] + depends_on: [worker, meili, redis] + restart: unless-stopped + + worker: + build: ./app + container_name: podx-worker + command: ["python", "worker.py"] + environment: + - MEILI_URL=http://meili:7700 + - MEILI_KEY=devkey + - REDIS_URL=redis://redis:6379/0 + - LIBRARY_ROOT=/library + - TRANSCRIPT_ROOT=/transcripts + - TMP_ROOT=/tmpdl + - WHISPER_MODEL=large-v3 + - WHISPER_PRECISION=int8 + volumes: + - ./library:/library + - ./transcripts:/transcripts + - ./tmp:/tmpdl + - ./models:/root/.cache/huggingface + depends_on: [meili, redis] + restart: unless-stopped + + meili: + image: getmeili/meilisearch:v1.8 + container_name: meili + environment: + - MEILI_MASTER_KEY=devkey + - MEILI_NO_ANALYTICS=true + ports: ["7700:7700"] + volumes: + - ./data/meili:/meili_data + restart: unless-stopped + + redis: + image: redis:7-alpine + container_name: redis + volumes: + - ./data/redis:/data + restart: unless-stopped diff --git a/ingest/ingest_epub.py b/ingest/ingest_epub.py new file mode 100755 index 0000000..2e6fce9 --- /dev/null +++ b/ingest/ingest_epub.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python3 +import sys, os, hashlib, json +from ebooklib import epub +from bs4 import BeautifulSoup +import requests + +MEILI_URL = os.getenv("MEILI_URL","http://localhost:7700") +MEILI_KEY = os.getenv("MEILI_KEY","devkey") + +def post(doc): + r = requests.post(f"{MEILI_URL}/indexes/library/documents", + headers={"Authorization": f"Bearer {MEILI_KEY}", "Content-Type":"application/json"}, + data=json.dumps(doc)) + r.raise_for_status() + +for path in sys.argv[1:]: + book = epub.read_epub(path) + title = book.get_metadata('DC', 'title')[0][0] if book.get_metadata('DC','title') else os.path.basename(path) + author = "; ".join([a[0] for a in book.get_metadata('DC','creator')]) if book.get_metadata('DC','creator') else "" + n=0 + for item in book.get_items_of_type(9): + soup = BeautifulSoup(item.get_body_content(), "lxml") + text = soup.get_text(separator=" ", strip=True) + if not text.strip(): continue + n+=1 + doc = { + "id": hashlib.sha1((path+item.get_name()).encode()).hexdigest(), + "type": "epub", + "title": f"{title} — {item.get_name()}", + "source": f"file://{os.path.abspath(path)}", + "date": "", + "text": text, + "meta": {"book_title": title, "book_author": author, "chapter": item.get_name()} + } + post(doc) + print(f"Indexed {title} ({n} sections)") diff --git a/ingest/ingest_kiwix.sh b/ingest/ingest_kiwix.sh new file mode 100755 index 0000000..cd0b529 --- /dev/null +++ b/ingest/ingest_kiwix.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +set -euo pipefail +ZIM="$1" +BASE_URL=${MEILI_URL:-http://localhost:7700} +KEY=${MEILI_KEY:-devkey} + +zimdump list "$ZIM" --json | jq -rc '.[] | select(.mimetype=="text/html") | .path' | while read -r path; do + html="$(zimdump dump "$ZIM" "$path" 2>/dev/null || true)" + [ -z "$html" ] && continue + text="$(echo "$html" | sed -e 's/<[^>]*>/ /g' | tr -s ' ' ' ' | sed 's/^[[:space:]]*//')" + title="$(basename "$path" | sed 's/_/ /g')" + id="$(echo -n "${ZIM}:${path}" | sha1sum | awk '{print $1}')" + doc=$(jq -nc --arg id "$id" --arg t "$title" --arg src "zim://$ZIM$path" --arg txt "$text" '{id:$id, type:"kiwix", title:$t, source:$src, date:"", text:$txt, meta:{path:$src}}') + curl -sS -X POST "$BASE_URL/indexes/library/documents" -H "Authorization: Bearer '"$KEY"'" -H 'Content-Type: application/json' --data-binary "$doc" >/dev/null +done + +echo "Indexed ZIM: $ZIM" diff --git a/ingest/ingest_pdfs.sh b/ingest/ingest_pdfs.sh new file mode 100755 index 0000000..216b5b4 --- /dev/null +++ b/ingest/ingest_pdfs.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +set -euo pipefail +BASE_URL=${MEILI_URL:-http://localhost:7700} +KEY=${MEILI_KEY:-devkey} + +for pdf in "$@"; do + title="$(basename "$pdf")" + pages=$(pdfinfo "$pdf" | awk '/Pages:/ {print $2}') + for p in $(seq 1 "$pages"); do + text="$(pdftotext -f $p -l $p -layout "$pdf" - | sed 's/^[[:space:]]*$//' )" + [ -z "$text" ] && continue + doc=$(jq -nc --arg id "${title}-p${p}" --arg t "$title" --arg src "file://$pdf" --arg txt "$text" '{id:$id, type:"pdf", title:$t, source:$src, date:"", text:$txt, meta:{page:$id}}') + curl -sS -X POST "$BASE_URL/indexes/library/documents" -H "Authorization: Bearer '"$KEY"'" -H 'Content-Type: application/json' --data-binary "$doc" >/dev/null + done + echo "Indexed $title ($pages pages)" +done