Initial commit
This commit is contained in:
43
README.md
43
README.md
@@ -1,2 +1,43 @@
|
||||
# podx
|
||||
# PodX - Offline Podcast + Docs Unified Search (Docker)
|
||||
|
||||
## Quick start
|
||||
```bash
|
||||
unzip homelab-podx.zip && cd homelab-podx
|
||||
docker compose up -d --build
|
||||
# Open the UI:
|
||||
# http://<your-host>:8088
|
||||
```
|
||||
|
||||
Paste links to podcasts/video (YouTube, Rumble, direct MP3). Worker downloads with yt-dlp, transcribes locally with faster-whisper large-v3, stores media under library/, subtitles and transcripts under transcripts/, and indexes everything (plus your PDFs/EPUBs/Kiwix) in a single Meilisearch index: library.
|
||||
|
||||
### Ingest PDFs
|
||||
```bash
|
||||
MEILI_URL=http://localhost:7700 MEILI_KEY=devkey ./ingest/ingest_pdfs.sh /path/to/*.pdf
|
||||
```
|
||||
|
||||
### Ingest EPUBs
|
||||
```bash
|
||||
pip install ebooklib beautifulsoup4 lxml requests
|
||||
MEILI_URL=http://localhost:7700 MEILI_KEY=devkey ./ingest/ingest_epub.py /path/to/*.epub
|
||||
```
|
||||
|
||||
### Ingest Kiwix ZIM
|
||||
Install zim-tools: apt-get install zim-tools or brew install zimtools
|
||||
```bash
|
||||
MEILI_URL=http://localhost:7700 MEILI_KEY=devkey ./ingest/ingest_kiwix.sh /path/to/wikipedia_en.zim
|
||||
```
|
||||
|
||||
### Optional: set Meilisearch settings
|
||||
```bash
|
||||
curl -X PATCH "http://localhost:7700/indexes/library/settings" -H "Authorization: Bearer devkey" -H "Content-Type: application/json" -d '{"searchableAttributes":["title","text","meta.book_author","meta.uploader","meta.chapter"],
|
||||
"displayedAttributes":["type","title","source","date","_formatted","meta"],
|
||||
"sortableAttributes":["date","type"],
|
||||
"filterableAttributes":["type"]}'
|
||||
```
|
||||
|
||||
### Plex
|
||||
Point Plex libraries at library/. Video podcasts will show .srt/.vtt subtitles automatically if basenames match.
|
||||
|
||||
### Notes
|
||||
- Accuracy-first: model large-v3 (English + Czech). Change via WHISPER_MODEL env var in docker-compose.yml if desired.
|
||||
- Everything runs offline; no cloud calls.
|
||||
|
14
app/Dockerfile
Normal file
14
app/Dockerfile
Normal file
@@ -0,0 +1,14 @@
|
||||
FROM python:3.11-slim
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
ffmpeg curl jq poppler-utils \ && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
COPY requirements.txt .
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
COPY app.py worker.py ./
|
||||
RUN pip install --no-cache-dir gunicorn==22.0.0
|
||||
|
||||
EXPOSE 8080
|
||||
CMD ["gunicorn", "-b", "0.0.0.0:8080", "app:app", "--workers", "2", "--threads", "4"]
|
144
app/app.py
Normal file
144
app/app.py
Normal file
@@ -0,0 +1,144 @@
|
||||
from flask import Flask, request, redirect
|
||||
import os, json, requests
|
||||
from redis import Redis
|
||||
from rq import Queue
|
||||
|
||||
MEILI_URL = os.getenv("MEILI_URL", "http://meili:7700")
|
||||
MEILI_KEY = os.getenv("MEILI_KEY", "")
|
||||
REDIS_URL = os.getenv("REDIS_URL", "redis://redis:6379/0")
|
||||
|
||||
app = Flask(__name__)
|
||||
q = Queue(connection=Redis.from_url(REDIS_URL))
|
||||
|
||||
PAGE = """
|
||||
<!doctype html><html><head><meta charset="utf-8">
|
||||
<title>PodX - unified search</title>
|
||||
<style>
|
||||
body{font-family:system-ui, sans-serif;max-width:880px;margin:2rem auto;padding:0 1rem}
|
||||
form{display:flex;gap:.5rem;margin-bottom:1rem}
|
||||
input[type=url]{flex:1;padding:.7rem}
|
||||
button{padding:.7rem 1rem}
|
||||
.card{border:1px solid #ddd;padding:1rem;border-radius:8px;margin:.5rem 0}
|
||||
small{color:#666}
|
||||
input[type=search]{width:100%;padding:.6rem;margin:.5rem 0 1rem}
|
||||
mark{background: #fff2a8}
|
||||
.badge{display:inline-block;font-size:.75rem;border:1px solid #999;padding:.1rem .4rem;border-radius:999px;margin-right:.4rem}
|
||||
</style></head><body>
|
||||
<h1>PodX</h1>
|
||||
<form action="/enqueue" method="post">
|
||||
<input type="url" name="url" placeholder="Paste podcast/video URL…" required>
|
||||
<button type="submit">Fetch & Transcribe</button>
|
||||
</form>
|
||||
<details><summary>Batch</summary>
|
||||
<form action="/enqueue_batch" method="post">
|
||||
<textarea name="urls" rows="4" style="width:100%" placeholder="One URL per line"></textarea>
|
||||
<button type="submit">Queue All</button>
|
||||
</form>
|
||||
</details>
|
||||
|
||||
<h2>Unified search (podcasts + PDFs + EPUB + Kiwix)</h2>
|
||||
<form id="sform">
|
||||
<input type="search" name="q" placeholder='e.g., "vector database" OR retrieval augmented generation' autofocus />
|
||||
</form>
|
||||
<div id="results"></div>
|
||||
|
||||
<script>
|
||||
const form = document.getElementById('sform');
|
||||
async function doSearch(){
|
||||
const q = new URLSearchParams(new FormData(form)).toString();
|
||||
const r = await fetch('/search?'+q);
|
||||
document.getElementById('results').innerHTML = await r.text();
|
||||
}
|
||||
form.addEventListener('input', doSearch);
|
||||
doSearch();
|
||||
</script>
|
||||
|
||||
<h2>Recent jobs</h2>
|
||||
<div id="feed"></div>
|
||||
<script>
|
||||
(async function poll(){
|
||||
try{
|
||||
const r = await fetch('/recent');
|
||||
document.getElementById('feed').innerHTML = await r.text();
|
||||
}catch(e){}
|
||||
setTimeout(poll, 4000);
|
||||
})();
|
||||
</script>
|
||||
</body></html>
|
||||
"""
|
||||
|
||||
def meili_search(qstr, limit=30):
|
||||
if not qstr.strip(): return []
|
||||
r = requests.post(f"{MEILI_URL}/indexes/library/search",
|
||||
headers={"Authorization": f"Bearer {MEILI_KEY}","Content-Type":"application/json"},
|
||||
data=json.dumps({"q": qstr, "limit": limit}))
|
||||
return r.json().get("hits", [])
|
||||
|
||||
@app.get("/")
|
||||
def index():
|
||||
return PAGE
|
||||
|
||||
@app.post("/enqueue")
|
||||
def enqueue():
|
||||
url = request.form["url"].strip()
|
||||
q.enqueue("worker.handle_url", url)
|
||||
return redirect("/")
|
||||
|
||||
@app.post("/enqueue_batch")
|
||||
def enqueue_batch():
|
||||
urls = [u.strip() for u in request.form["urls"].splitlines() if u.strip()]
|
||||
for u in urls: q.enqueue("worker.handle_url", u)
|
||||
return redirect("/")
|
||||
|
||||
@app.get("/recent")
|
||||
def recent():
|
||||
try:
|
||||
with open("/transcripts/_feed.log", "r", encoding="utf-8") as f:
|
||||
tail = f.readlines()[-40:]
|
||||
except FileNotFoundError:
|
||||
tail=[]
|
||||
html = []
|
||||
for line in reversed(tail):
|
||||
try: item = json.loads(line)
|
||||
except: continue
|
||||
html.append(f"<div class='card'><b>{item.get('title','')}</b><br><small>{item.get('uploader','')} — {item.get('date','')} — {item.get('status','')}</small><br><small>{item.get('path','')}</small></div>")
|
||||
return "\n".join(html)
|
||||
|
||||
@app.get("/search")
|
||||
def search():
|
||||
qstr = request.args.get("q","")
|
||||
hits = meili_search(qstr)
|
||||
out=[]
|
||||
for h in hits:
|
||||
t = h.get("title","")
|
||||
src = h.get("source","")
|
||||
typ = h.get("type","")
|
||||
ctx = h.get("_formatted",{}).get("text", h.get("text","")[:300])
|
||||
segs = h.get("segments",[])
|
||||
ts = int(segs[0]["start"]) if segs else 0
|
||||
open_link = f"/open?file={{requests.utils.quote(src)}}&t={ts}" if typ=='podcast' else f"/open?file={{requests.utils.quote(src)}}"
|
||||
badge = f"<span class='badge'>{typ}</span>"
|
||||
out.append(
|
||||
f"<div class='card'><b>{badge}{t}</b><br><small>{src}</small>"
|
||||
f"<p>{ctx}</p>"
|
||||
f"<a href='{open_link}'>Open</a>"
|
||||
f"{' | <a href=\"/subtitle?file='+requests.utils.quote(src)+'\">Transcript</a>' if typ=='podcast' else ''}"
|
||||
f"</div>"
|
||||
)
|
||||
return "\n".join(out) or "<small>No results yet.</small>"
|
||||
|
||||
@app.get("/open")
|
||||
def open_local():
|
||||
file = request.args.get("file","")
|
||||
t = int(request.args.get("t","0"))
|
||||
return f"<pre>{file}\nStart at: {t} sec</pre>"
|
||||
|
||||
@app.get("/subtitle")
|
||||
def subtitle():
|
||||
file = request.args.get("file","")
|
||||
base = os.path.splitext(os.path.basename(file))[0]
|
||||
p = f"/transcripts/{base}.vtt"
|
||||
if os.path.exists(p):
|
||||
with open(p,"r",encoding="utf-8") as f:
|
||||
return f"<pre>{f.read()}</pre>"
|
||||
return "<small>No VTT found.</small>"
|
8
app/requirements.txt
Normal file
8
app/requirements.txt
Normal file
@@ -0,0 +1,8 @@
|
||||
flask==3.0.3
|
||||
redis==5.0.7
|
||||
rq==1.16.2
|
||||
yt-dlp==2024.08.06
|
||||
faster-whisper==1.0.3
|
||||
ffmpeg-python==0.2.0
|
||||
requests==2.32.3
|
||||
orjson==3.10.7
|
109
app/worker.py
Normal file
109
app/worker.py
Normal file
@@ -0,0 +1,109 @@
|
||||
import os, subprocess, shutil, json, re, orjson, requests
|
||||
from pathlib import Path
|
||||
from faster_whisper import WhisperModel
|
||||
|
||||
MEILI_URL = os.getenv("MEILI_URL", "http://meili:7700")
|
||||
MEILI_KEY = os.getenv("MEILI_KEY", "")
|
||||
LIB = Path(os.getenv("LIBRARY_ROOT", "/library"))
|
||||
TRN = Path(os.getenv("TRANSCRIPT_ROOT", "/transcripts"))
|
||||
TMP = Path(os.getenv("TMP_ROOT", "/tmpdl"))
|
||||
MODEL_NAME = os.getenv("WHISPER_MODEL","large-v3")
|
||||
COMPUTE = os.getenv("WHISPER_PRECISION","int8")
|
||||
|
||||
TRN.mkdir(parents=True, exist_ok=True)
|
||||
LIB.mkdir(parents=True, exist_ok=True)
|
||||
TMP.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
model = WhisperModel(MODEL_NAME, compute_type=COMPUTE)
|
||||
|
||||
def log(feed):
|
||||
with open(TRN / "_feed.log", "a", encoding="utf-8") as f:
|
||||
import orjson as _oj
|
||||
f.write(_oj.dumps(feed).decode()+"\n")
|
||||
|
||||
def sanitize(name):
|
||||
return re.sub(r'[\\/:"*?<>|]+', ' ', name).strip()
|
||||
|
||||
def yt_dlp(url, outdir):
|
||||
outtmpl = str(outdir / "%(uploader)s/%(upload_date)s - %(title)s.%(ext)s")
|
||||
cmd = [
|
||||
"yt-dlp", "-o", outtmpl,
|
||||
"-f", "bv*+ba/best",
|
||||
"-x", "--audio-format", "m4a",
|
||||
"--write-thumbnail",
|
||||
"--no-playlist", "--no-warnings", "--restrict-filenames",
|
||||
url
|
||||
]
|
||||
subprocess.check_call(cmd)
|
||||
media = list(outdir.rglob("*.[mM][pP]4")) + list(outdir.rglob("*.mkv")) + list(outdir.rglob("*.m4a")) + list(outdir.rglob("*.mp3"))
|
||||
return sorted(media, key=lambda p: p.stat().st_mtime)[-1:]
|
||||
|
||||
def transcribe(media_path: Path):
|
||||
segments, info = model.transcribe(str(media_path), vad_filter=True, language="auto")
|
||||
title = media_path.stem
|
||||
base = TRN / title
|
||||
segs = []
|
||||
text_parts = []
|
||||
for s in segments:
|
||||
segs.append({"start": s.start, "end": s.end, "text": s.text})
|
||||
text_parts.append(s.text)
|
||||
txt = " ".join(text_parts).strip()
|
||||
|
||||
import orjson as _oj
|
||||
open(base.with_suffix(".json"), "wb").write(_oj.dumps({"file": str(media_path), "language": info.language, "segments": segs}))
|
||||
open(base.with_suffix(".txt"), "w", encoding="utf-8").write(txt)
|
||||
|
||||
def fmt_ts(t):
|
||||
h=int(t//3600); m=int((t%3600)//60); s=t-(h*3600+m*60)
|
||||
return f"{h:02}:{m:02}:{s:06.3f}".replace('.',',')
|
||||
with open(base.with_suffix(".srt"), "w", encoding="utf-8") as srt:
|
||||
for i,s in enumerate(segs,1):
|
||||
srt.write(f"{i}\n{fmt_ts(s['start'])} --> {fmt_ts(s['end'])}\n{s['text'].strip()}\n\n")
|
||||
with open(base.with_suffix(".vtt"), "w", encoding="utf-8") as vtt:
|
||||
vtt.write("WEBVTT\n\n")
|
||||
for s in segs:
|
||||
vtt.write(f"{fmt_ts(s['start']).replace(',', '.')} --> {fmt_ts(s['end']).replace(',', '.')} \n{s['text'].strip()}\n\n")
|
||||
return base
|
||||
|
||||
def index_meili(json_path: Path):
|
||||
doc = json.loads(open(json_path, "r", encoding="utf-8").read())
|
||||
title = Path(doc["file"]).stem
|
||||
date = re.findall(r"\b(\d{8})\b", title)
|
||||
payload = {
|
||||
"id": title,
|
||||
"type": "podcast",
|
||||
"title": title,
|
||||
"date": date[0] if date else "",
|
||||
"source": str(Path(LIB, Path(doc["file"]).name)),
|
||||
"text": " ".join(s["text"] for s in doc.get("segments", [])),
|
||||
"segments": doc.get("segments", []),
|
||||
"meta": {"language": doc.get("language", "")}
|
||||
}
|
||||
r = requests.post(f"{MEILI_URL}/indexes/library/documents",
|
||||
headers={"Authorization": f"Bearer {MEILI_KEY}", "Content-Type":"application/json"},
|
||||
data=__import__('orjson').dumps(payload))
|
||||
r.raise_for_status()
|
||||
|
||||
def handle_url(url: str):
|
||||
try:
|
||||
info = {"url": url, "status":"queued", "title":"", "uploader":"", "date":"", "path":""}
|
||||
log({**info, **{"status":"downloading"}})
|
||||
files = yt_dlp(url, TMP)
|
||||
for f in files:
|
||||
parts = f.relative_to(TMP).parts
|
||||
uploader = sanitize(parts[0]) if len(parts)>1 else "Unknown"
|
||||
dest_dir = LIB / uploader
|
||||
dest_dir.mkdir(parents=True, exist_ok=True)
|
||||
dest = dest_dir / sanitize(f.name)
|
||||
shutil.move(str(f), dest)
|
||||
import re as _re
|
||||
info.update({"title": dest.stem, "uploader": uploader,
|
||||
"date": _re.findall(r"\b(\d{8})\b", dest.stem)[0] if _re.findall(r"\b(\d{8})\b", dest.stem) else "",
|
||||
"path": str(dest)})
|
||||
log({**info, **{"status":"transcribing"}})
|
||||
base = transcribe(dest)
|
||||
index_meili(base.with_suffix(".json"))
|
||||
log({**info, **{"status":"done"}})
|
||||
except Exception as e:
|
||||
log({"url": url, "status":"error", "error": str(e)})
|
||||
raise
|
60
docker-compose.yml
Normal file
60
docker-compose.yml
Normal file
@@ -0,0 +1,60 @@
|
||||
services:
|
||||
web:
|
||||
build: ./app
|
||||
container_name: podx-web
|
||||
environment:
|
||||
- MEILI_URL=http://meili:7700
|
||||
- MEILI_KEY=devkey
|
||||
- REDIS_URL=redis://redis:6379/0
|
||||
- LIBRARY_ROOT=/library
|
||||
- TRANSCRIPT_ROOT=/transcripts
|
||||
- TMP_ROOT=/tmpdl
|
||||
- WHISPER_MODEL=large-v3
|
||||
- WHISPER_PRECISION=int8
|
||||
volumes:
|
||||
- ./library:/library
|
||||
- ./transcripts:/transcripts
|
||||
- ./tmp:/tmpdl
|
||||
- ./models:/root/.cache/huggingface
|
||||
ports: ["8088:8080"]
|
||||
depends_on: [worker, meili, redis]
|
||||
restart: unless-stopped
|
||||
|
||||
worker:
|
||||
build: ./app
|
||||
container_name: podx-worker
|
||||
command: ["python", "worker.py"]
|
||||
environment:
|
||||
- MEILI_URL=http://meili:7700
|
||||
- MEILI_KEY=devkey
|
||||
- REDIS_URL=redis://redis:6379/0
|
||||
- LIBRARY_ROOT=/library
|
||||
- TRANSCRIPT_ROOT=/transcripts
|
||||
- TMP_ROOT=/tmpdl
|
||||
- WHISPER_MODEL=large-v3
|
||||
- WHISPER_PRECISION=int8
|
||||
volumes:
|
||||
- ./library:/library
|
||||
- ./transcripts:/transcripts
|
||||
- ./tmp:/tmpdl
|
||||
- ./models:/root/.cache/huggingface
|
||||
depends_on: [meili, redis]
|
||||
restart: unless-stopped
|
||||
|
||||
meili:
|
||||
image: getmeili/meilisearch:v1.8
|
||||
container_name: meili
|
||||
environment:
|
||||
- MEILI_MASTER_KEY=devkey
|
||||
- MEILI_NO_ANALYTICS=true
|
||||
ports: ["7700:7700"]
|
||||
volumes:
|
||||
- ./data/meili:/meili_data
|
||||
restart: unless-stopped
|
||||
|
||||
redis:
|
||||
image: redis:7-alpine
|
||||
container_name: redis
|
||||
volumes:
|
||||
- ./data/redis:/data
|
||||
restart: unless-stopped
|
36
ingest/ingest_epub.py
Executable file
36
ingest/ingest_epub.py
Executable file
@@ -0,0 +1,36 @@
|
||||
#!/usr/bin/env python3
|
||||
import sys, os, hashlib, json
|
||||
from ebooklib import epub
|
||||
from bs4 import BeautifulSoup
|
||||
import requests
|
||||
|
||||
MEILI_URL = os.getenv("MEILI_URL","http://localhost:7700")
|
||||
MEILI_KEY = os.getenv("MEILI_KEY","devkey")
|
||||
|
||||
def post(doc):
|
||||
r = requests.post(f"{MEILI_URL}/indexes/library/documents",
|
||||
headers={"Authorization": f"Bearer {MEILI_KEY}", "Content-Type":"application/json"},
|
||||
data=json.dumps(doc))
|
||||
r.raise_for_status()
|
||||
|
||||
for path in sys.argv[1:]:
|
||||
book = epub.read_epub(path)
|
||||
title = book.get_metadata('DC', 'title')[0][0] if book.get_metadata('DC','title') else os.path.basename(path)
|
||||
author = "; ".join([a[0] for a in book.get_metadata('DC','creator')]) if book.get_metadata('DC','creator') else ""
|
||||
n=0
|
||||
for item in book.get_items_of_type(9):
|
||||
soup = BeautifulSoup(item.get_body_content(), "lxml")
|
||||
text = soup.get_text(separator=" ", strip=True)
|
||||
if not text.strip(): continue
|
||||
n+=1
|
||||
doc = {
|
||||
"id": hashlib.sha1((path+item.get_name()).encode()).hexdigest(),
|
||||
"type": "epub",
|
||||
"title": f"{title} — {item.get_name()}",
|
||||
"source": f"file://{os.path.abspath(path)}",
|
||||
"date": "",
|
||||
"text": text,
|
||||
"meta": {"book_title": title, "book_author": author, "chapter": item.get_name()}
|
||||
}
|
||||
post(doc)
|
||||
print(f"Indexed {title} ({n} sections)")
|
17
ingest/ingest_kiwix.sh
Executable file
17
ingest/ingest_kiwix.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
ZIM="$1"
|
||||
BASE_URL=${MEILI_URL:-http://localhost:7700}
|
||||
KEY=${MEILI_KEY:-devkey}
|
||||
|
||||
zimdump list "$ZIM" --json | jq -rc '.[] | select(.mimetype=="text/html") | .path' | while read -r path; do
|
||||
html="$(zimdump dump "$ZIM" "$path" 2>/dev/null || true)"
|
||||
[ -z "$html" ] && continue
|
||||
text="$(echo "$html" | sed -e 's/<[^>]*>/ /g' | tr -s ' ' ' ' | sed 's/^[[:space:]]*//')"
|
||||
title="$(basename "$path" | sed 's/_/ /g')"
|
||||
id="$(echo -n "${ZIM}:${path}" | sha1sum | awk '{print $1}')"
|
||||
doc=$(jq -nc --arg id "$id" --arg t "$title" --arg src "zim://$ZIM$path" --arg txt "$text" '{id:$id, type:"kiwix", title:$t, source:$src, date:"", text:$txt, meta:{path:$src}}')
|
||||
curl -sS -X POST "$BASE_URL/indexes/library/documents" -H "Authorization: Bearer '"$KEY"'" -H 'Content-Type: application/json' --data-binary "$doc" >/dev/null
|
||||
done
|
||||
|
||||
echo "Indexed ZIM: $ZIM"
|
16
ingest/ingest_pdfs.sh
Executable file
16
ingest/ingest_pdfs.sh
Executable file
@@ -0,0 +1,16 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
BASE_URL=${MEILI_URL:-http://localhost:7700}
|
||||
KEY=${MEILI_KEY:-devkey}
|
||||
|
||||
for pdf in "$@"; do
|
||||
title="$(basename "$pdf")"
|
||||
pages=$(pdfinfo "$pdf" | awk '/Pages:/ {print $2}')
|
||||
for p in $(seq 1 "$pages"); do
|
||||
text="$(pdftotext -f $p -l $p -layout "$pdf" - | sed 's/^[[:space:]]*$//' )"
|
||||
[ -z "$text" ] && continue
|
||||
doc=$(jq -nc --arg id "${title}-p${p}" --arg t "$title" --arg src "file://$pdf" --arg txt "$text" '{id:$id, type:"pdf", title:$t, source:$src, date:"", text:$txt, meta:{page:$id}}')
|
||||
curl -sS -X POST "$BASE_URL/indexes/library/documents" -H "Authorization: Bearer '"$KEY"'" -H 'Content-Type: application/json' --data-binary "$doc" >/dev/null
|
||||
done
|
||||
echo "Indexed $title ($pages pages)"
|
||||
done
|
Reference in New Issue
Block a user