Initial commit

This commit is contained in:
2025-09-04 15:21:07 +01:00
parent d87b02a7ac
commit bb0c5cc8ff
9 changed files with 446 additions and 1 deletions

14
app/Dockerfile Normal file
View File

@@ -0,0 +1,14 @@
FROM python:3.11-slim
RUN apt-get update && apt-get install -y --no-install-recommends \
ffmpeg curl jq poppler-utils \ && rm -rf /var/lib/apt/lists/*
WORKDIR /app
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
COPY app.py worker.py ./
RUN pip install --no-cache-dir gunicorn==22.0.0
EXPOSE 8080
CMD ["gunicorn", "-b", "0.0.0.0:8080", "app:app", "--workers", "2", "--threads", "4"]

144
app/app.py Normal file
View File

@@ -0,0 +1,144 @@
from flask import Flask, request, redirect
import os, json, requests
from redis import Redis
from rq import Queue
MEILI_URL = os.getenv("MEILI_URL", "http://meili:7700")
MEILI_KEY = os.getenv("MEILI_KEY", "")
REDIS_URL = os.getenv("REDIS_URL", "redis://redis:6379/0")
app = Flask(__name__)
q = Queue(connection=Redis.from_url(REDIS_URL))
PAGE = """
<!doctype html><html><head><meta charset="utf-8">
<title>PodX - unified search</title>
<style>
body{font-family:system-ui, sans-serif;max-width:880px;margin:2rem auto;padding:0 1rem}
form{display:flex;gap:.5rem;margin-bottom:1rem}
input[type=url]{flex:1;padding:.7rem}
button{padding:.7rem 1rem}
.card{border:1px solid #ddd;padding:1rem;border-radius:8px;margin:.5rem 0}
small{color:#666}
input[type=search]{width:100%;padding:.6rem;margin:.5rem 0 1rem}
mark{background: #fff2a8}
.badge{display:inline-block;font-size:.75rem;border:1px solid #999;padding:.1rem .4rem;border-radius:999px;margin-right:.4rem}
</style></head><body>
<h1>PodX</h1>
<form action="/enqueue" method="post">
<input type="url" name="url" placeholder="Paste podcast/video URL…" required>
<button type="submit">Fetch & Transcribe</button>
</form>
<details><summary>Batch</summary>
<form action="/enqueue_batch" method="post">
<textarea name="urls" rows="4" style="width:100%" placeholder="One URL per line"></textarea>
<button type="submit">Queue All</button>
</form>
</details>
<h2>Unified search (podcasts + PDFs + EPUB + Kiwix)</h2>
<form id="sform">
<input type="search" name="q" placeholder='e.g., "vector database" OR retrieval augmented generation' autofocus />
</form>
<div id="results"></div>
<script>
const form = document.getElementById('sform');
async function doSearch(){
const q = new URLSearchParams(new FormData(form)).toString();
const r = await fetch('/search?'+q);
document.getElementById('results').innerHTML = await r.text();
}
form.addEventListener('input', doSearch);
doSearch();
</script>
<h2>Recent jobs</h2>
<div id="feed"></div>
<script>
(async function poll(){
try{
const r = await fetch('/recent');
document.getElementById('feed').innerHTML = await r.text();
}catch(e){}
setTimeout(poll, 4000);
})();
</script>
</body></html>
"""
def meili_search(qstr, limit=30):
if not qstr.strip(): return []
r = requests.post(f"{MEILI_URL}/indexes/library/search",
headers={"Authorization": f"Bearer {MEILI_KEY}","Content-Type":"application/json"},
data=json.dumps({"q": qstr, "limit": limit}))
return r.json().get("hits", [])
@app.get("/")
def index():
return PAGE
@app.post("/enqueue")
def enqueue():
url = request.form["url"].strip()
q.enqueue("worker.handle_url", url)
return redirect("/")
@app.post("/enqueue_batch")
def enqueue_batch():
urls = [u.strip() for u in request.form["urls"].splitlines() if u.strip()]
for u in urls: q.enqueue("worker.handle_url", u)
return redirect("/")
@app.get("/recent")
def recent():
try:
with open("/transcripts/_feed.log", "r", encoding="utf-8") as f:
tail = f.readlines()[-40:]
except FileNotFoundError:
tail=[]
html = []
for line in reversed(tail):
try: item = json.loads(line)
except: continue
html.append(f"<div class='card'><b>{item.get('title','')}</b><br><small>{item.get('uploader','')}{item.get('date','')}{item.get('status','')}</small><br><small>{item.get('path','')}</small></div>")
return "\n".join(html)
@app.get("/search")
def search():
qstr = request.args.get("q","")
hits = meili_search(qstr)
out=[]
for h in hits:
t = h.get("title","")
src = h.get("source","")
typ = h.get("type","")
ctx = h.get("_formatted",{}).get("text", h.get("text","")[:300])
segs = h.get("segments",[])
ts = int(segs[0]["start"]) if segs else 0
open_link = f"/open?file={{requests.utils.quote(src)}}&t={ts}" if typ=='podcast' else f"/open?file={{requests.utils.quote(src)}}"
badge = f"<span class='badge'>{typ}</span>"
out.append(
f"<div class='card'><b>{badge}{t}</b><br><small>{src}</small>"
f"<p>{ctx}</p>"
f"<a href='{open_link}'>Open</a>"
f"{' | <a href=\"/subtitle?file='+requests.utils.quote(src)+'\">Transcript</a>' if typ=='podcast' else ''}"
f"</div>"
)
return "\n".join(out) or "<small>No results yet.</small>"
@app.get("/open")
def open_local():
file = request.args.get("file","")
t = int(request.args.get("t","0"))
return f"<pre>{file}\nStart at: {t} sec</pre>"
@app.get("/subtitle")
def subtitle():
file = request.args.get("file","")
base = os.path.splitext(os.path.basename(file))[0]
p = f"/transcripts/{base}.vtt"
if os.path.exists(p):
with open(p,"r",encoding="utf-8") as f:
return f"<pre>{f.read()}</pre>"
return "<small>No VTT found.</small>"

8
app/requirements.txt Normal file
View File

@@ -0,0 +1,8 @@
flask==3.0.3
redis==5.0.7
rq==1.16.2
yt-dlp==2024.08.06
faster-whisper==1.0.3
ffmpeg-python==0.2.0
requests==2.32.3
orjson==3.10.7

109
app/worker.py Normal file
View File

@@ -0,0 +1,109 @@
import os, subprocess, shutil, json, re, orjson, requests
from pathlib import Path
from faster_whisper import WhisperModel
MEILI_URL = os.getenv("MEILI_URL", "http://meili:7700")
MEILI_KEY = os.getenv("MEILI_KEY", "")
LIB = Path(os.getenv("LIBRARY_ROOT", "/library"))
TRN = Path(os.getenv("TRANSCRIPT_ROOT", "/transcripts"))
TMP = Path(os.getenv("TMP_ROOT", "/tmpdl"))
MODEL_NAME = os.getenv("WHISPER_MODEL","large-v3")
COMPUTE = os.getenv("WHISPER_PRECISION","int8")
TRN.mkdir(parents=True, exist_ok=True)
LIB.mkdir(parents=True, exist_ok=True)
TMP.mkdir(parents=True, exist_ok=True)
model = WhisperModel(MODEL_NAME, compute_type=COMPUTE)
def log(feed):
with open(TRN / "_feed.log", "a", encoding="utf-8") as f:
import orjson as _oj
f.write(_oj.dumps(feed).decode()+"\n")
def sanitize(name):
return re.sub(r'[\\/:"*?<>|]+', ' ', name).strip()
def yt_dlp(url, outdir):
outtmpl = str(outdir / "%(uploader)s/%(upload_date)s - %(title)s.%(ext)s")
cmd = [
"yt-dlp", "-o", outtmpl,
"-f", "bv*+ba/best",
"-x", "--audio-format", "m4a",
"--write-thumbnail",
"--no-playlist", "--no-warnings", "--restrict-filenames",
url
]
subprocess.check_call(cmd)
media = list(outdir.rglob("*.[mM][pP]4")) + list(outdir.rglob("*.mkv")) + list(outdir.rglob("*.m4a")) + list(outdir.rglob("*.mp3"))
return sorted(media, key=lambda p: p.stat().st_mtime)[-1:]
def transcribe(media_path: Path):
segments, info = model.transcribe(str(media_path), vad_filter=True, language="auto")
title = media_path.stem
base = TRN / title
segs = []
text_parts = []
for s in segments:
segs.append({"start": s.start, "end": s.end, "text": s.text})
text_parts.append(s.text)
txt = " ".join(text_parts).strip()
import orjson as _oj
open(base.with_suffix(".json"), "wb").write(_oj.dumps({"file": str(media_path), "language": info.language, "segments": segs}))
open(base.with_suffix(".txt"), "w", encoding="utf-8").write(txt)
def fmt_ts(t):
h=int(t//3600); m=int((t%3600)//60); s=t-(h*3600+m*60)
return f"{h:02}:{m:02}:{s:06.3f}".replace('.',',')
with open(base.with_suffix(".srt"), "w", encoding="utf-8") as srt:
for i,s in enumerate(segs,1):
srt.write(f"{i}\n{fmt_ts(s['start'])} --> {fmt_ts(s['end'])}\n{s['text'].strip()}\n\n")
with open(base.with_suffix(".vtt"), "w", encoding="utf-8") as vtt:
vtt.write("WEBVTT\n\n")
for s in segs:
vtt.write(f"{fmt_ts(s['start']).replace(',', '.')} --> {fmt_ts(s['end']).replace(',', '.')} \n{s['text'].strip()}\n\n")
return base
def index_meili(json_path: Path):
doc = json.loads(open(json_path, "r", encoding="utf-8").read())
title = Path(doc["file"]).stem
date = re.findall(r"\b(\d{8})\b", title)
payload = {
"id": title,
"type": "podcast",
"title": title,
"date": date[0] if date else "",
"source": str(Path(LIB, Path(doc["file"]).name)),
"text": " ".join(s["text"] for s in doc.get("segments", [])),
"segments": doc.get("segments", []),
"meta": {"language": doc.get("language", "")}
}
r = requests.post(f"{MEILI_URL}/indexes/library/documents",
headers={"Authorization": f"Bearer {MEILI_KEY}", "Content-Type":"application/json"},
data=__import__('orjson').dumps(payload))
r.raise_for_status()
def handle_url(url: str):
try:
info = {"url": url, "status":"queued", "title":"", "uploader":"", "date":"", "path":""}
log({**info, **{"status":"downloading"}})
files = yt_dlp(url, TMP)
for f in files:
parts = f.relative_to(TMP).parts
uploader = sanitize(parts[0]) if len(parts)>1 else "Unknown"
dest_dir = LIB / uploader
dest_dir.mkdir(parents=True, exist_ok=True)
dest = dest_dir / sanitize(f.name)
shutil.move(str(f), dest)
import re as _re
info.update({"title": dest.stem, "uploader": uploader,
"date": _re.findall(r"\b(\d{8})\b", dest.stem)[0] if _re.findall(r"\b(\d{8})\b", dest.stem) else "",
"path": str(dest)})
log({**info, **{"status":"transcribing"}})
base = transcribe(dest)
index_meili(base.with_suffix(".json"))
log({**info, **{"status":"done"}})
except Exception as e:
log({"url": url, "status":"error", "error": str(e)})
raise