You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

178 lines
5.6 KiB
Python

This file contains ambiguous Unicode characters!

This file contains ambiguous Unicode characters that may be confused with others in your current locale. If your use case is intentional and legitimate, you can safely ignore this warning. Use the Escape button to highlight these characters.

# app.py Optimised AF
from flask import Flask, render_template, request, redirect, url_for, jsonify
from funcs import (
process_videos, group_videos, match_data_to_video_fast,
get_all_videos, get_all_data
)
from config import connect_redis
from concurrent.futures import ThreadPoolExecutor
import hashlib, json, math, os, subprocess, time, zlib
# ───────── CONFIG ───────── #
app = Flask(__name__)
redis = connect_redis()
CACHE_KEY = "video_cache_v2" # bump key so we dont fight old data
META_HASH = "video_meta_v2" # per-file meta cache
THUMB_DIR = "static/thumbnails"
VIDEOS_PER_PAGE = 20
THUMB_WIDTH = 320 # px
FF_QUALITY = "80" # 0-100 for WebP
SCAN_DIRS = [
r"E:/streamaster/downloaded/",
r"U:/encoded",
r"U:/count_sorted"
]
DATA_DIRS = [
r"E:/streamaster/data",
r"E:/streamaster/downloaded",
]
os.makedirs(THUMB_DIR, exist_ok=True)
# ───────── UTILS ───────── #
def _hashed_thumb_path(video_id: str) -> str:
"""
Static/thumbnails/ab/cd/<video_id>.webp
keeps any subdir under ~256 files.
"""
h = hashlib.md5(video_id.encode()).hexdigest()
sub1, sub2 = h[:2], h[2:4]
path = os.path.join(THUMB_DIR, sub1, sub2)
os.makedirs(path, exist_ok=True)
return os.path.join(path, f"{video_id}.webp")
def _gen_thumb_cmd(src: str, dest: str):
return [
"ffmpeg", "-y", "-loglevel", "error",
"-ss", "0", "-i", src,
"-vframes", "1",
"-vf", f"thumbnail,scale={THUMB_WIDTH}:-1",
"-q:v", FF_QUALITY,
dest
]
def generate_thumbnail(task):
"""Run in threadpool. task = (video_path, dest_path)"""
src, dest = task
if os.path.exists(dest):
return
subprocess.run(_gen_thumb_cmd(src, dest), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def load_video_lists():
videos, data = [], []
for d in SCAN_DIRS: videos += get_all_videos(d)
for d in DATA_DIRS: data += get_all_data(d)
parsed, _ = match_data_to_video_fast(videos, data)
return process_videos(parsed)
def build_cache():
parsed_videos = load_video_lists()
grouped = group_videos(parsed_videos, sort_by="count", order="desc")
storage_usage = {}
avg_sizes = {}
video_map = {}
# Threaded thumb generation queue
thumb_tasks = []
for (username, platform), vids in grouped.items():
key = f"{username}::{platform}"
total_gb = sum(v["size"] for v in vids) / 1024
storage_usage[key] = {"total_size": total_gb, "video_count": len(vids)}
avg_sizes[key] = total_gb / len(vids) if vids else 0
for v in vids:
video_id = os.path.basename(v["filepath"]).rsplit(".", 1)[0]
thumb_path = _hashed_thumb_path(video_id)
# Meta-cache (skip thumb regen if unchanged)
mtime = os.path.getmtime(v["filepath"])
meta = redis.hget(META_HASH, v["filepath"])
if not meta or json.loads(meta)["mtime"] != mtime:
thumb_tasks.append((v["filepath"], thumb_path))
redis.hset(META_HASH, v["filepath"],
json.dumps({"mtime": mtime, "thumb": thumb_path}))
v["thumbnail"] = thumb_path
video_map[key] = vids
# Smash thumbnails in parallel
if thumb_tasks:
with ThreadPoolExecutor(max_workers=os.cpu_count()*2) as exe:
list(exe.map(generate_thumbnail, thumb_tasks))
cache = {
"timestamp" : time.time(),
"videos" : video_map,
"storage_usage" : storage_usage,
"avg_sizes" : avg_sizes
}
# Compress JSON → binary before Redis
redis.set(CACHE_KEY, zlib.compress(json.dumps(cache).encode()))
# also drop to disk in case Redis is wiped
with open("video_cache.json.gz", "wb") as f:
f.write(zlib.compress(json.dumps(cache).encode()))
return cache
def get_cached_data():
# try Redis first
blob = redis.get(CACHE_KEY)
if blob:
return json.loads(zlib.decompress(blob).decode())
# fallback to disk
if os.path.exists("video_cache.json.gz"):
with open("video_cache.json.gz", "rb") as f:
return json.loads(zlib.decompress(f.read()).decode())
# last resort full rebuild
return build_cache()
# ───────── ROUTES ───────── #
@app.route("/")
def dashboard():
cache = get_cached_data()
sorted_usage = sorted(
cache["storage_usage"].items(),
key=lambda x: x[1]["total_size"],
reverse=True
)
return render_template(
"analytics.html",
storage_usage=sorted_usage,
avg_sizes=cache["avg_sizes"]
)
@app.route("/refresh")
def refresh():
cache = build_cache()
return jsonify({
"status" : "ok",
"videos" : sum(x["video_count"] for x in cache["storage_usage"].values()),
"updated" : time.ctime(cache["timestamp"])
})
@app.route("/user/<username>")
def user_page(username):
cache = get_cached_data()
videos = [v | {"platform": key.split("::")[1]}
for key, vids in cache["videos"].items()
if key.split("::")[0] == username
for v in vids]
page = max(1, int(request.args.get("page", 1)))
total_pages = max(1, math.ceil(len(videos) / VIDEOS_PER_PAGE))
start = (page - 1) * VIDEOS_PER_PAGE
return render_template(
"user_page.html",
username=username,
videos=videos[start:start + VIDEOS_PER_PAGE],
page=page, total_pages=total_pages
)
if __name__ == "__main__":
app.run(debug=True)