OpenWebUI integration

This commit is contained in:
2025-09-07 10:42:27 +02:00
parent bb0c5cc8ff
commit e3478fb77e
12 changed files with 362 additions and 71 deletions

View File

@@ -4,7 +4,7 @@ from redis import Redis
from rq import Queue
MEILI_URL = os.getenv("MEILI_URL", "http://meili:7700")
MEILI_KEY = os.getenv("MEILI_KEY", "")
MEILI_KEY = os.getenv("MEILI_KEY", "") # from .env
REDIS_URL = os.getenv("REDIS_URL", "redis://redis:6379/0")
app = Flask(__name__)
@@ -26,8 +26,8 @@ mark{background: #fff2a8}
</style></head><body>
<h1>PodX</h1>
<form action="/enqueue" method="post">
<input type="url" name="url" placeholder="Paste podcast/video URL…" required>
<button type="submit">Fetch & Transcribe</button>
<input type="url" name="url" placeholder="Paste podcast/video/article URL…" required>
<button type="submit">Fetch</button>
</form>
<details><summary>Batch</summary>
<form action="/enqueue_batch" method="post">
@@ -36,7 +36,7 @@ mark{background: #fff2a8}
</form>
</details>
<h2>Unified search (podcasts + PDFs + EPUB + Kiwix)</h2>
<h2>Unified search (podcasts + PDFs + EPUB + Kiwix + Web)</h2>
<form id="sform">
<input type="search" name="q" placeholder='e.g., "vector database" OR retrieval augmented generation' autofocus />
</form>

View File

@@ -6,3 +6,7 @@ faster-whisper==1.0.3
ffmpeg-python==0.2.0
requests==2.32.3
orjson==3.10.7
trafilatura==1.6.3
lxml==5.3.0
html5lib==1.1
tldextract==5.1.2

View File

@@ -10,6 +10,10 @@ TMP = Path(os.getenv("TMP_ROOT", "/tmpdl"))
MODEL_NAME = os.getenv("WHISPER_MODEL","large-v3")
COMPUTE = os.getenv("WHISPER_PRECISION","int8")
OWUI_URL = os.getenv("OPENWEBUI_URL", "").rstrip("/")
OWUI_KEY = os.getenv("OPENWEBUI_API_KEY", "")
OWUI_KB = os.getenv("OPENWEBUI_KB_NAME", "Homelab Library")
TRN.mkdir(parents=True, exist_ok=True)
LIB.mkdir(parents=True, exist_ok=True)
TMP.mkdir(parents=True, exist_ok=True)
@@ -17,9 +21,11 @@ TMP.mkdir(parents=True, exist_ok=True)
model = WhisperModel(MODEL_NAME, compute_type=COMPUTE)
def log(feed):
with open(TRN / "_feed.log", "a", encoding="utf-8") as f:
import orjson as _oj
f.write(_oj.dumps(feed).decode()+"\n")
try:
with open(TRN / "_feed.log", "a", encoding="utf-8") as f:
f.write(orjson.dumps(feed).decode()+"\n")
except Exception:
pass
def sanitize(name):
return re.sub(r'[\\/:"*?<>|]+', ' ', name).strip()
@@ -42,15 +48,13 @@ def transcribe(media_path: Path):
segments, info = model.transcribe(str(media_path), vad_filter=True, language="auto")
title = media_path.stem
base = TRN / title
segs = []
text_parts = []
segs, text_parts = [], []
for s in segments:
segs.append({"start": s.start, "end": s.end, "text": s.text})
text_parts.append(s.text)
txt = " ".join(text_parts).strip()
import orjson as _oj
open(base.with_suffix(".json"), "wb").write(_oj.dumps({"file": str(media_path), "language": info.language, "segments": segs}))
open(base.with_suffix(".json"), "wb").write(orjson.dumps({"file": str(media_path), "language": info.language, "segments": segs}))
open(base.with_suffix(".txt"), "w", encoding="utf-8").write(txt)
def fmt_ts(t):
@@ -81,11 +85,124 @@ def index_meili(json_path: Path):
}
r = requests.post(f"{MEILI_URL}/indexes/library/documents",
headers={"Authorization": f"Bearer {MEILI_KEY}", "Content-Type":"application/json"},
data=__import__('orjson').dumps(payload))
data=orjson.dumps(payload))
r.raise_for_status()
import tldextract, trafilatura, requests as _requests
def slugify(text):
text = re.sub(r'[^A-Za-z0-9\-._ ]+', '', text).strip().replace(' ', '_')
return text[:120] or 'page'
def save_web_snapshot(url: str):
r = _requests.get(url, timeout=30, headers={"User-Agent":"Mozilla/5.0"})
r.raise_for_status()
html = r.text
downloaded = trafilatura.load_html(html, url=url)
text = trafilatura.extract(downloaded, include_comments=False, include_images=False, with_metadata=True) or ""
meta = trafilatura.metadata.extract_metadata(downloaded) or None
title = (meta.title if meta and getattr(meta, 'title', None) else None) or (re.search(r'<title[^>]*>(.*?)</title>', html, re.I|re.S).group(1).strip() if re.search(r'<title[^>]*>(.*?)</title>', html, re.I|re.S) else url)
date = (meta.date if meta and getattr(meta, 'date', None) else "")
parts = tldextract.extract(url)
domain = ".".join([p for p in [parts.domain, parts.suffix] if p])
slug = slugify(title)
outdir = LIB / "web" / domain
outdir.mkdir(parents=True, exist_ok=True)
base = outdir / slug
open(base.with_suffix(".html"), "w", encoding="utf-8", errors="ignore").write(html)
open(base.with_suffix(".txt"), "w", encoding="utf-8", errors="ignore").write(text)
return base, title, domain, date, text
def index_web(base: Path, title: str, domain: str, date: str, text: str, url: str):
payload = {
"id": f"web:{domain}:{base.stem}",
"type": "web",
"title": title,
"date": re.sub(r'[^0-9]', '', date)[:8] if date else "",
"source": f"file://{str(base.with_suffix('.html'))}",
"text": text,
"segments": [],
"meta": {"url": url, "domain": domain}
}
r = requests.post(f"{MEILI_URL}/indexes/library/documents",
headers={"Authorization": f"Bearer {MEILI_KEY}", "Content-Type":"application/json"},
data=orjson.dumps(payload))
r.raise_for_status()
def is_media_url(url: str):
lowered = url.lower()
media_hosts = ["youtube.com","youtu.be","rumble.com","vimeo.com","soundcloud.com","spotify.com","podbean.com","buzzsprout.com"]
return any(h in lowered for h in media_hosts)
def owui_headers():
return {"Authorization": f"Bearer {OWUI_KEY}"} if OWUI_KEY else {}
def owui_get_or_create_kb():
if not OWUI_URL or not OWUI_KEY:
return None
try:
r = requests.get(f"{OWUI_URL}/api/v1/knowledge/list", headers=owui_headers(), timeout=15)
r.raise_for_status()
for kb in r.json().get("data", []):
if kb.get("name") == OWUI_KB:
return kb["id"]
except Exception:
pass
r = requests.post(
f"{OWUI_URL}/api/v1/knowledge/create",
headers={**owui_headers(), "Content-Type": "application/json"},
data=orjson.dumps({"name": OWUI_KB, "description": "All local content indexed by podx"}),
timeout=15,
)
r.raise_for_status()
return r.json()["data"]["id"]
def owui_upload_and_attach(path: Path, kb_id: str):
with open(path, "rb") as f:
r = requests.post(f"{OWUI_URL}/api/v1/files/", headers=owui_headers(), files={"file": (path.name, f)}, timeout=60*10)
r.raise_for_status()
file_id = r.json()["data"]["id"]
r = requests.post(
f"{OWUI_URL}/api/v1/knowledge/{kb_id}/file/add",
headers={**owui_headers(), "Content-Type": "application/json"},
data=orjson.dumps({"file_id": file_id}),
timeout=60,
)
r.raise_for_status()
return True
def publish_to_openwebui(paths):
if not OWUI_URL or not OWUI_KEY:
return
try:
kb_id = owui_get_or_create_kb()
for p in paths:
p = Path(p)
if not p.exists():
continue
try:
owui_upload_and_attach(p, kb_id)
except Exception as e:
log({"url": str(p), "status": "owui_error", "error": str(e)})
except Exception as e:
log({"status": "owui_error", "error": str(e)})
def handle_web(url: str):
info = {"url": url, "status":"web-downloading", "title":"", "uploader":"", "date":"", "path":""}
log(info)
base, title, domain, date, text = save_web_snapshot(url)
info.update({"title": title, "uploader": domain, "date": date, "path": str(base.with_suffix('.html'))})
log({**info, **{"status":"web-indexing"}})
index_web(base, title, domain, date, text, url)
push = [p for p in [base.with_suffix('.txt'), base.with_suffix('.html')] if p.exists()]
publish_to_openwebui(push)
log({**info, **{"status":"done"}})
def handle_url(url: str):
try:
if not is_media_url(url):
handle_web(url)
return
info = {"url": url, "status":"queued", "title":"", "uploader":"", "date":"", "path":""}
log({**info, **{"status":"downloading"}})
files = yt_dlp(url, TMP)
@@ -96,13 +213,13 @@ def handle_url(url: str):
dest_dir.mkdir(parents=True, exist_ok=True)
dest = dest_dir / sanitize(f.name)
shutil.move(str(f), dest)
import re as _re
info.update({"title": dest.stem, "uploader": uploader,
"date": _re.findall(r"\b(\d{8})\b", dest.stem)[0] if _re.findall(r"\b(\d{8})\b", dest.stem) else "",
"date": (re.findall(r"\b(\d{8})\b", dest.stem)[0] if re.findall(r"\b(\d{8})\b", dest.stem) else ""),
"path": str(dest)})
log({**info, **{"status":"transcribing"}})
base = transcribe(dest)
index_meili(base.with_suffix(".json"))
publish_to_openwebui([base.with_suffix(".txt")])
log({**info, **{"status":"done"}})
except Exception as e:
log({"url": url, "status":"error", "error": str(e)})