init
parent
a93d13d19d
commit
350eea60a3
@ -0,0 +1,100 @@
|
||||
from moviepy.editor import VideoFileClip, concatenate_videoclips
|
||||
import os, cv2
|
||||
|
||||
def add_intro_to_video(input_video, intro_video='intro.mp4', output_video='output.mp4'):
|
||||
clip_main = VideoFileClip(input_video)
|
||||
|
||||
clip_intro = VideoFileClip(intro_video).resize(clip_main.size).set_fps(clip_main.fps)
|
||||
|
||||
if clip_main.audio is not None and clip_intro.audio is None:
|
||||
from moviepy.editor import AudioArrayClip
|
||||
silent_audio = AudioArrayClip([[0] * int(clip_intro.duration * clip_main.audio.fps)], fps=clip_main.audio.fps)
|
||||
clip_intro = clip_intro.set_audio(silent_audio)
|
||||
|
||||
final_clip = concatenate_videoclips([clip_intro, clip_main])
|
||||
|
||||
final_clip.write_videofile(output_video, codec='libx264')
|
||||
|
||||
def get_duration(input_file):
|
||||
if not os.path.isfile(input_file):
|
||||
print('Input file does not exist')
|
||||
return 0
|
||||
|
||||
try:
|
||||
video = cv2.VideoCapture(input_file)
|
||||
frames = video.get(cv2.CAP_PROP_FRAME_COUNT)
|
||||
fps = video.get(cv2.CAP_PROP_FPS)
|
||||
duration = frames / fps
|
||||
video.release()
|
||||
|
||||
return int(duration)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
return 0
|
||||
|
||||
def generate_thumbnails(input_file, filename):
|
||||
output_folder = 'temp/'
|
||||
if not os.path.isfile(input_file):
|
||||
raise ValueError('Input file does not exist')
|
||||
if not os.path.exists(output_folder):
|
||||
os.makedirs(output_folder)
|
||||
|
||||
posterPath = os.path.join(output_folder, f'{filename}.jpg')
|
||||
previewPath = os.path.join(output_folder, f'{filename}.mp4')
|
||||
|
||||
clip = VideoFileClip(input_file)
|
||||
duration = clip.duration
|
||||
|
||||
interval = duration / 11.0
|
||||
|
||||
start_time_first_clip = 0 * interval
|
||||
try:
|
||||
clip.save_frame(posterPath, t=start_time_first_clip)
|
||||
except:
|
||||
pass
|
||||
|
||||
clips = []
|
||||
for i in range(10):
|
||||
start_time = i * interval
|
||||
end_time = start_time + 1
|
||||
clips.append(clip.subclip(start_time, end_time))
|
||||
|
||||
final_clip = concatenate_videoclips(clips).resize(newsize=(384, 216)).without_audio()
|
||||
final_clip.write_videofile(previewPath, fps=24, codec="libx264")
|
||||
|
||||
for subclip in clips:
|
||||
subclip.close()
|
||||
|
||||
clip.close()
|
||||
final_clip.close()
|
||||
|
||||
return posterPath, previewPath
|
||||
|
||||
def split_video(file_path, segment_size_gb=8):
|
||||
import subprocess
|
||||
|
||||
# Convert GB to bytes
|
||||
segment_size_bytes = segment_size_gb * 1024 * 1024 * 1024
|
||||
|
||||
# Get the total size of the video file
|
||||
total_size_bytes = os.path.getsize(file_path)
|
||||
|
||||
# Calculate the number of segments needed
|
||||
num_segments = total_size_bytes // segment_size_bytes + 1
|
||||
|
||||
# Get the duration of the video file
|
||||
duration = get_duration(file_path)
|
||||
|
||||
# Calculate the duration of each segment
|
||||
segment_duration = duration / num_segments
|
||||
|
||||
# Generate output file pattern
|
||||
file_name, file_extension = os.path.splitext(file_path)
|
||||
output_pattern = f"{file_name}_segment_%03d{file_extension}"
|
||||
|
||||
# Run FFmpeg command to split the video
|
||||
command = [
|
||||
"ffmpeg", "-i", file_path, "-c", "copy", "-map", "0",
|
||||
"-segment_time", str(segment_duration), "-f", "segment", output_pattern
|
||||
]
|
||||
subprocess.run(command)
|
||||
@ -0,0 +1,62 @@
|
||||
from funcs import process_videos, group_videos, match_data_to_video_fast, get_all_videos, get_all_data
|
||||
from flask import Flask, render_template
|
||||
import os
|
||||
from collections import defaultdict
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
SCAN_DIRS = [
|
||||
"E:/streamaster/downloaded/",
|
||||
"U:/encoded",
|
||||
"U:/count_sorted"
|
||||
]
|
||||
|
||||
DATA_DIRS = [
|
||||
"E:/streamaster/data",
|
||||
"E:/streamaster/downloaded",
|
||||
]
|
||||
|
||||
# ----------- Data Processing -----------
|
||||
def load_video_data():
|
||||
videos = []
|
||||
for d in SCAN_DIRS:
|
||||
videos += get_all_videos(d)
|
||||
|
||||
data = []
|
||||
for d in DATA_DIRS:
|
||||
data += get_all_data(d)
|
||||
|
||||
parsed_videos, unmatched = match_data_to_video_fast(videos, data)
|
||||
parsed_videos = process_videos(parsed_videos)
|
||||
video_data = group_videos(parsed_videos, sort_by="count", order="desc")
|
||||
return video_data
|
||||
|
||||
def compute_analytics(video_data):
|
||||
storage_usage = defaultdict(lambda: {"total_size": 0, "video_count": 0})
|
||||
per_video_sizes = {}
|
||||
|
||||
for (username, platform), vids in video_data.items():
|
||||
total_size_gb = sum(v['size'] for v in vids) / 1024 # Convert MB to GB
|
||||
avg_size_gb = (total_size_gb / len(vids)) if vids else 0
|
||||
|
||||
storage_usage[(username, platform)]["total_size"] += total_size_gb
|
||||
storage_usage[(username, platform)]["video_count"] += len(vids)
|
||||
per_video_sizes[(username, platform)] = avg_size_gb
|
||||
|
||||
return storage_usage, per_video_sizes
|
||||
|
||||
# ----------- Flask Routes -----------
|
||||
@app.route("/")
|
||||
def analytics_dashboard():
|
||||
video_data = load_video_data()
|
||||
storage_usage, per_video_sizes = compute_analytics(video_data)
|
||||
|
||||
# Sort by total storage used
|
||||
sorted_usage = sorted(storage_usage.items(), key=lambda x: x[1]["total_size"], reverse=True)
|
||||
|
||||
return render_template("analytics.html",
|
||||
storage_usage=sorted_usage,
|
||||
avg_sizes=per_video_sizes)
|
||||
|
||||
if __name__ == "__main__":
|
||||
app.run(debug=True)
|
||||
@ -0,0 +1,127 @@
|
||||
from flask import Flask, render_template, request, redirect, url_for
|
||||
from funcs import process_videos, group_videos, match_data_to_video_fast, get_all_videos, get_all_data
|
||||
from config import connect_redis
|
||||
import json, os, time, math, subprocess
|
||||
|
||||
# -------------------- CONFIG -------------------- #
|
||||
app = Flask(__name__)
|
||||
redis = connect_redis()
|
||||
|
||||
CACHE_KEY = "video_cache"
|
||||
THUMB_DIR = "static/thumbnails"
|
||||
VIDEOS_PER_PAGE = 20
|
||||
|
||||
SCAN_DIRS = [
|
||||
"E:/streamaster/downloaded/",
|
||||
"U:/encoded",
|
||||
"U:/count_sorted"
|
||||
]
|
||||
|
||||
DATA_DIRS = [
|
||||
"E:/streamaster/data",
|
||||
"E:/streamaster/downloaded",
|
||||
]
|
||||
|
||||
os.makedirs(THUMB_DIR, exist_ok=True)
|
||||
|
||||
# -------------------- UTILS -------------------- #
|
||||
def generate_thumbnail(video_path, thumb_path):
|
||||
if not os.path.exists(thumb_path):
|
||||
cmd = [
|
||||
"ffmpeg", "-y", "-i", video_path, "-ss", "00:00:05.000",
|
||||
"-vframes", "1", thumb_path
|
||||
]
|
||||
subprocess.run(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
|
||||
|
||||
def load_video_data():
|
||||
videos = []
|
||||
for d in SCAN_DIRS:
|
||||
videos += get_all_videos(d)
|
||||
|
||||
data = []
|
||||
for d in DATA_DIRS:
|
||||
data += get_all_data(d)
|
||||
|
||||
parsed_videos, unmatched = match_data_to_video_fast(videos, data)
|
||||
parsed_videos = process_videos(parsed_videos)
|
||||
video_data = group_videos(parsed_videos, sort_by="count", order="desc")
|
||||
return video_data
|
||||
|
||||
def compute_analytics(video_data):
|
||||
storage_usage = {}
|
||||
avg_sizes = {}
|
||||
video_map = {}
|
||||
|
||||
for (username, platform), vids in video_data.items():
|
||||
total_size_gb = sum(v['size'] for v in vids) / 1024
|
||||
avg_size_gb = (total_size_gb / len(vids)) if vids else 0
|
||||
key = f"{username}::{platform}"
|
||||
storage_usage[key] = {
|
||||
"total_size": total_size_gb,
|
||||
"video_count": len(vids)
|
||||
}
|
||||
avg_sizes[key] = avg_size_gb
|
||||
|
||||
for v in vids:
|
||||
video_id = os.path.basename(v['filepath'])
|
||||
thumb_path = os.path.join(THUMB_DIR, f"{video_id}.jpg")
|
||||
generate_thumbnail(v['filepath'], thumb_path)
|
||||
v['thumbnail'] = thumb_path
|
||||
video_map[key] = vids
|
||||
return storage_usage, avg_sizes, video_map
|
||||
|
||||
def refresh_data():
|
||||
video_data = load_video_data()
|
||||
storage_usage, avg_sizes, video_map = compute_analytics(video_data)
|
||||
cache = {
|
||||
"timestamp": time.time(),
|
||||
"videos": video_map,
|
||||
"storage_usage": storage_usage,
|
||||
"avg_sizes": avg_sizes
|
||||
}
|
||||
redis.set(CACHE_KEY, json.dumps(cache))
|
||||
return cache
|
||||
|
||||
def get_cached_data():
|
||||
try:
|
||||
cached = redis.get(CACHE_KEY)
|
||||
return json.loads(cached) # ✅ Use cache if it exists
|
||||
except Exception as e:
|
||||
return refresh_data() # ✅ Generate and store fresh data if empty
|
||||
|
||||
# -------------------- ROUTES -------------------- #
|
||||
@app.route("/")
|
||||
def dashboard():
|
||||
cache = get_cached_data()
|
||||
sorted_usage = sorted(cache["storage_usage"].items(), key=lambda x: x[1]["total_size"], reverse=True)
|
||||
return render_template("analytics.html", storage_usage=sorted_usage, avg_sizes=cache["avg_sizes"])
|
||||
|
||||
@app.route("/refresh")
|
||||
def refresh():
|
||||
refresh_data()
|
||||
return redirect(url_for("dashboard"))
|
||||
|
||||
@app.route("/user/<username>")
|
||||
def user_page(username):
|
||||
cache = get_cached_data()
|
||||
videos = []
|
||||
for key, vid_list in cache["videos"].items():
|
||||
user, platform = key.split("::")
|
||||
if user == username:
|
||||
for v in vid_list:
|
||||
v['platform'] = platform
|
||||
videos.extend(vid_list)
|
||||
|
||||
page = int(request.args.get("page", 1))
|
||||
total_pages = math.ceil(len(videos) / VIDEOS_PER_PAGE)
|
||||
start = (page - 1) * VIDEOS_PER_PAGE
|
||||
paginated = videos[start:start + VIDEOS_PER_PAGE]
|
||||
|
||||
return render_template("user_page.html",
|
||||
username=username,
|
||||
videos=paginated,
|
||||
page=page,
|
||||
total_pages=total_pages)
|
||||
|
||||
if __name__ == "__main__":
|
||||
app.run(debug=True)
|
||||
@ -0,0 +1,47 @@
|
||||
from redis import Redis
|
||||
import json
|
||||
|
||||
redisCred = {"host": "192.168.0.27", "port": 30036, "password": "bignigga123"}
|
||||
|
||||
|
||||
def redis_gen_connection():
|
||||
return Redis(host=redisCred["host"], port=redisCred["port"], password=redisCred["password"])
|
||||
|
||||
def connect_redis():
|
||||
REDIS_HOST = "192.168.0.27"
|
||||
REDIS_PORT = 30036
|
||||
REDIS_PASSWORD = "bignigga123"
|
||||
|
||||
try:
|
||||
client = Redis(
|
||||
host=REDIS_HOST,
|
||||
port=REDIS_PORT,
|
||||
password=REDIS_PASSWORD,
|
||||
decode_responses=True
|
||||
)
|
||||
|
||||
response = client.ping()
|
||||
if response:
|
||||
print("Connected to Redis successfully!")
|
||||
return client
|
||||
else:
|
||||
print("Failed to connect to Redis!")
|
||||
return None
|
||||
except Exception as e:
|
||||
print(f"An error occurred: {e}")
|
||||
return None
|
||||
|
||||
def get_streamer_data(username):
|
||||
try:
|
||||
redis_client = redis_gen_connection()
|
||||
streamer_data = redis_client.hget("streamers", username)
|
||||
|
||||
if streamer_data is None:
|
||||
return None
|
||||
|
||||
streamer_data = json.loads(streamer_data)
|
||||
return streamer_data
|
||||
except Exception as e:
|
||||
print(f"Unexpected error: {e}")
|
||||
|
||||
return None
|
||||
@ -0,0 +1,400 @@
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from MP4Manager import get_duration
|
||||
import os, json, subprocess, shutil
|
||||
|
||||
|
||||
def is_file_empty(filepath):
|
||||
return os.stat(filepath).st_size == 0
|
||||
|
||||
def format_datetime(datetime_str):
|
||||
"""Format the datetime string to a more readable format."""
|
||||
return datetime.strptime(datetime_str, "%Y-%m-%d %H:%M:%S")
|
||||
|
||||
def get_file_size_in_mb(file_path):
|
||||
return os.path.getsize(file_path) / (1024 ** 2)
|
||||
|
||||
def get_file_size_gb(file_path):
|
||||
return os.path.getsize(file_path) / 1024 / 1024 / 1024
|
||||
|
||||
def get_data(data_path):
|
||||
try:
|
||||
with open(data_path, 'r') as file:
|
||||
data = json.load(file)
|
||||
return data
|
||||
except Exception as e:
|
||||
print(f"Error loading {data_path}: {e}")
|
||||
return None
|
||||
|
||||
def update_video_data(dataPath, data):
|
||||
"""Update or create a JSON file for the video metadata."""
|
||||
if os.path.exists(dataPath):
|
||||
with open(dataPath, "r") as f:
|
||||
existing_data = json.load(f)
|
||||
|
||||
if existing_data == data:
|
||||
return # No update needed if data hasn't changed.
|
||||
|
||||
data["updatedAt"] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
|
||||
with open(dataPath, "w") as f:
|
||||
json.dump(data, f) # Write to file if new or if data has changed.
|
||||
|
||||
|
||||
def is_recent(updated_at_str, minutes=30):
|
||||
updated_at = format_datetime(updated_at_str)
|
||||
updated_at = updated_at.replace(tzinfo=timezone.utc)
|
||||
now = datetime.now(timezone.utc)
|
||||
return now - updated_at < timedelta(minutes=minutes)
|
||||
|
||||
|
||||
def is_file_size_bigger_than(file_size_in_mb, max_size_gb):
|
||||
"""Check if the file size is bigger than the specified max size in GB."""
|
||||
max_size_megabytes = max_size_gb * 1024 # Convert GB to MB
|
||||
return file_size_in_mb > max_size_megabytes
|
||||
|
||||
|
||||
def cleanup_data_files(folder_path):
|
||||
videos = [video for video in os.listdir(folder_path) if video.endswith(".json")]
|
||||
for filename in videos:
|
||||
json_path = os.path.join(folder_path, filename)
|
||||
video_path = json_path.replace(".json", ".mp4")
|
||||
if not os.path.exists(video_path):
|
||||
os.remove(json_path)
|
||||
|
||||
|
||||
def get_video_data(videoPath):
|
||||
with open(videoPath, "r") as f:
|
||||
data = json.load(f)
|
||||
return data
|
||||
|
||||
|
||||
def get_videos(folder_path):
|
||||
"""Retrieve video metadata from the JSON files in a specified folder."""
|
||||
video_list = []
|
||||
|
||||
# List all .mp4 files and their corresponding .json metadata files
|
||||
videos = [f for f in os.listdir(folder_path) if f.endswith(".mp4")]
|
||||
|
||||
for video_filename in videos:
|
||||
video_path = os.path.join(folder_path, video_filename)
|
||||
json_path = video_path.replace(".mp4", ".json")
|
||||
|
||||
if not os.path.exists(json_path):
|
||||
continue
|
||||
|
||||
data = get_video_data(json_path)
|
||||
data['size'] = get_file_size_in_mb(video_path) # Include size in MB for further processing
|
||||
data['filepath'] = video_path
|
||||
|
||||
video_list.append(data)
|
||||
|
||||
return video_list
|
||||
|
||||
|
||||
def group_videos(video_list, sort_by="count", order="desc"):
|
||||
"""Group video data by username and site, and sort the groups by video creation time."""
|
||||
video_data = {}
|
||||
is_desc = order == "desc"
|
||||
|
||||
for video in video_list:
|
||||
key = (video["username"], video["site"])
|
||||
if key not in video_data:
|
||||
video_data[key] = []
|
||||
video_data[key].append(video)
|
||||
|
||||
# Ensure videos for each user and site are sorted by creation date
|
||||
for key in video_data:
|
||||
video_data[key].sort(key=lambda x: format_datetime(x["createdAt"]))
|
||||
|
||||
# Further sort groups if required based on size or count
|
||||
if sort_by == "size":
|
||||
video_data = dict(sorted(video_data.items(), key=lambda x: sum(item['size'] for item in x[1]), reverse=is_desc))
|
||||
elif sort_by == "count":
|
||||
video_data = dict(sorted(video_data.items(), key=lambda x: len(x[1]), reverse=is_desc))
|
||||
|
||||
return video_data
|
||||
|
||||
|
||||
def process_videos(video_data):
|
||||
processed_videos = []
|
||||
failed_directory = "failed"
|
||||
|
||||
for video in video_data:
|
||||
is_updated = False
|
||||
video_path = video["filepath"]
|
||||
data_path = video["jsonpath"]
|
||||
|
||||
if 'size' not in video:
|
||||
filesize = get_file_size_in_mb(video_path)
|
||||
video['size'] = filesize
|
||||
is_updated = True
|
||||
|
||||
if is_updated and 'duration' not in video:
|
||||
video['duration'] = get_duration(video_path)
|
||||
is_updated = True
|
||||
|
||||
# Move corrupted videos to the failed folder
|
||||
if video['duration'] == 0:
|
||||
print(f"{video['videoID']} is corrupted, moving to failed folder")
|
||||
failed_video_path = os.path.join(failed_directory, video["videoID"] + ".mp4")
|
||||
failed_data_path = failed_video_path.replace(".mp4", ".json")
|
||||
|
||||
shutil.move(video_path, failed_video_path)
|
||||
shutil.move(data_path, failed_data_path)
|
||||
|
||||
continue # Skip further processing for this video
|
||||
|
||||
if is_updated:
|
||||
update_video_data(data_path, video)
|
||||
|
||||
processed_videos.append(video)
|
||||
|
||||
return processed_videos
|
||||
|
||||
|
||||
def group_for_concatenation(videos, time_limit=30):
|
||||
"""
|
||||
Groups videos into lists where:
|
||||
- total group size <= 9GB (9216 MB),
|
||||
- time gap between consecutive videos <= time_limit minutes,
|
||||
- AND all have the same resolution/fps/codecs for no-reencode concat.
|
||||
"""
|
||||
concatenated_video_groups = []
|
||||
current_group = []
|
||||
current_size_mb = 0
|
||||
last_video_end = None
|
||||
reference_params = None # We'll store the 'ffprobe' params for the first video in each group
|
||||
|
||||
for video in videos:
|
||||
video_start = format_datetime(video['createdAt'])
|
||||
video_end = video_start + timedelta(seconds=video['duration'])
|
||||
|
||||
# Probe the video to get parameters
|
||||
video_path = video['filepath']
|
||||
params = get_video_params(video_path)
|
||||
if params is None:
|
||||
# If ffprobe fails, skip or handle the error
|
||||
print(f"Skipping {video_path}, failed to get ffprobe info.")
|
||||
continue
|
||||
|
||||
if current_group:
|
||||
# Check if adding this video breaks the size limit
|
||||
time_difference = (video_start - last_video_end).total_seconds() / 60
|
||||
size_exceeded = (current_size_mb + video['size'] > 9216)
|
||||
time_exceeded = (time_difference > time_limit)
|
||||
|
||||
# Check if the video parameters match the group's reference
|
||||
param_mismatch = False
|
||||
if reference_params:
|
||||
# Compare relevant fields
|
||||
for field in ['video_codec','width','height','pix_fmt','fps',
|
||||
'audio_codec','audio_sample_rate','audio_channels','audio_channel_layout']:
|
||||
if params[field] != reference_params[field]:
|
||||
param_mismatch = True
|
||||
break
|
||||
|
||||
# If we exceed size, exceed time gap, or mismatch in parameters => start new group
|
||||
if size_exceeded or time_exceeded or param_mismatch:
|
||||
concatenated_video_groups.append(current_group)
|
||||
current_group = []
|
||||
current_size_mb = 0
|
||||
reference_params = None # reset for new group
|
||||
|
||||
# If we're starting a new group, set reference parameters
|
||||
if not current_group:
|
||||
reference_params = params
|
||||
|
||||
# Add the current video to the group
|
||||
current_group.append(video)
|
||||
current_size_mb += video['size']
|
||||
last_video_end = video_end
|
||||
|
||||
# Add the last group if not empty
|
||||
if current_group:
|
||||
concatenated_video_groups.append(current_group)
|
||||
|
||||
# Optional: Ensure the last group is "ready" for upload based on time difference
|
||||
# (Your original logic that if last video was updated < time_limit minutes ago, remove the group)
|
||||
if concatenated_video_groups:
|
||||
last_group = concatenated_video_groups[-1]
|
||||
last_video = last_group[-1]
|
||||
last_updated_at = datetime.strptime(last_video['createdAt'], "%Y-%m-%d %H:%M:%S")
|
||||
if datetime.now() - last_updated_at <= timedelta(minutes=time_limit):
|
||||
print(f"Last group is not ready for upload. Removing from final groups.")
|
||||
concatenated_video_groups.pop()
|
||||
|
||||
concatenated_video_groups = [group for group in concatenated_video_groups if len(group) > 1]
|
||||
|
||||
return concatenated_video_groups
|
||||
|
||||
|
||||
def get_video_params(video_path):
|
||||
"""
|
||||
Run ffprobe on a given video path to extract:
|
||||
- codec_name (video + audio)
|
||||
- width, height
|
||||
- pix_fmt
|
||||
- r_frame_rate (frame rate)
|
||||
- sample_rate, channel_layout (audio)
|
||||
Returns a dict with these parameters or None if there's an error.
|
||||
"""
|
||||
cmd = [
|
||||
'ffprobe', '-v', 'error',
|
||||
'-print_format', 'json',
|
||||
'-show_streams',
|
||||
'-show_format',
|
||||
video_path
|
||||
]
|
||||
|
||||
try:
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, check=True)
|
||||
info = json.loads(result.stdout)
|
||||
|
||||
# We'll parse out the first video & audio streams we find.
|
||||
video_stream = next((s for s in info['streams'] if s['codec_type'] == 'video'), None)
|
||||
audio_stream = next((s for s in info['streams'] if s['codec_type'] == 'audio'), None)
|
||||
|
||||
if not video_stream:
|
||||
raise ValueError(f"No video stream found in {video_path}")
|
||||
|
||||
# Frame rate can be something like "30000/1001" - convert to float
|
||||
r_frame_rate = video_stream.get('r_frame_rate', '0/0')
|
||||
try:
|
||||
num, den = r_frame_rate.split('/')
|
||||
fps = float(num) / float(den) if float(den) != 0 else 0.0
|
||||
except:
|
||||
fps = 0.0
|
||||
|
||||
# Gather the key parameters
|
||||
params = {
|
||||
'video_codec': video_stream.get('codec_name', 'unknown'),
|
||||
'width': video_stream.get('width', 0),
|
||||
'height': video_stream.get('height', 0),
|
||||
'pix_fmt': video_stream.get('pix_fmt', 'unknown'),
|
||||
'fps': fps,
|
||||
'audio_codec': audio_stream.get('codec_name', 'none') if audio_stream else 'none',
|
||||
'audio_sample_rate': audio_stream.get('sample_rate', '0') if audio_stream else '0',
|
||||
'audio_channels': audio_stream.get('channels', 0) if audio_stream else 0,
|
||||
'audio_channel_layout': audio_stream.get('channel_layout', 'none') if audio_stream else 'none'
|
||||
}
|
||||
|
||||
return params
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(f"Failed to run ffprobe on {video_path}: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def generate_list_file(videos):
|
||||
directory = os.path.dirname(videos[0]["filepath"])
|
||||
list_filename = os.path.join(directory, f"{videos[0]['videoID']}.txt")
|
||||
with open(list_filename, "w") as list_file:
|
||||
for video in videos:
|
||||
list_file.write(f"file '{video['videoID']}.mp4'\n")
|
||||
return list_filename
|
||||
|
||||
|
||||
def concatenate_videos(grouped_videos, directory):
|
||||
"""Concatenate pre-grouped videos, updating metadata and managing file operations."""
|
||||
processed_videos = []
|
||||
|
||||
for group in grouped_videos:
|
||||
if len(group) == 1:
|
||||
processed_videos.append(group[0])
|
||||
continue
|
||||
|
||||
# Set up paths based on the first video in the group
|
||||
first_video = group[0]
|
||||
video_path = first_video["filepath"]
|
||||
data_path = video_path.replace(".mp4", ".json")
|
||||
temp_path = video_path.replace(".mp4", "_temp.mp4")
|
||||
|
||||
# Generate a list file for ffmpeg concatenation
|
||||
list_filename = generate_list_file(directory, group)
|
||||
|
||||
# Run ffmpeg to concatenate videos
|
||||
subprocess.run(["ffmpeg", "-f", "concat", "-safe", "0", "-i", list_filename, "-c", "copy", temp_path])
|
||||
|
||||
# Remove individual video files and their metadata
|
||||
[os.remove(v["filepath"]) for v in group]
|
||||
[os.remove(v["filepath"].replace(".mp4", ".json")) for v in group]
|
||||
os.remove(list_filename)
|
||||
|
||||
os.rename(temp_path, video_path)
|
||||
|
||||
# Update the metadata for the concatenated video
|
||||
first_video["filepath"] = video_path
|
||||
first_video["size"] = get_file_size_in_mb(video_path)
|
||||
first_video["duration"] = get_duration(video_path)
|
||||
update_video_data(data_path, first_video) # Ensure this function reflects the changes of concatenation
|
||||
processed_videos.append(first_video)
|
||||
|
||||
return processed_videos
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def get_all_videos(directory):
|
||||
# find all .mp4 files in the directory and its subdirectories
|
||||
videos = []
|
||||
for root, dirs, files in os.walk(directory):
|
||||
for file in files:
|
||||
if file.endswith(".mp4"):
|
||||
videos.append(os.path.join(root, file))
|
||||
return videos
|
||||
|
||||
def get_all_data(directory):
|
||||
# finds all json files in the directory and its subdirectories
|
||||
data = []
|
||||
for root, dirs, files in os.walk(directory):
|
||||
for file in files:
|
||||
if file.endswith(".json"):
|
||||
data.append(os.path.join(root, file))
|
||||
return data
|
||||
|
||||
def match_data_to_video_fast(videos, data):
|
||||
data_dict = {os.path.splitext(os.path.basename(d))[0]: d for d in data}
|
||||
matched, unmatched = [], []
|
||||
for v in videos:
|
||||
video_id = os.path.splitext(os.path.basename(v))[0]
|
||||
if video_id in data_dict:
|
||||
matched.append((v, data_dict[video_id]))
|
||||
else:
|
||||
unmatched.append(v)
|
||||
return parse_video_data(matched), unmatched
|
||||
|
||||
def parse_video_data(matched_videos):
|
||||
"""Retrieve video metadata from the JSON files in a specified folder."""
|
||||
import tqdm
|
||||
video_list = []
|
||||
|
||||
with tqdm.tqdm(total=len(matched_videos), desc="Parsing video data") as pbar:
|
||||
for video in matched_videos:
|
||||
pbar.update(1)
|
||||
video_path, json_path = video
|
||||
|
||||
data = get_video_data(json_path)
|
||||
data['filepath'] = video_path
|
||||
data['jsonpath'] = json_path
|
||||
|
||||
video_list.append(data)
|
||||
|
||||
return video_list
|
||||
|
||||
def get_videos_matched(video_dirs, data_dirs):
|
||||
# get all videos
|
||||
videos = []
|
||||
for d in video_dirs:
|
||||
videos += get_all_videos(d)
|
||||
|
||||
# get all data
|
||||
data = []
|
||||
for d in data_dirs:
|
||||
data += get_all_data(d)
|
||||
|
||||
# match the data to the videos
|
||||
parsed_videos, unmatched = match_data_to_video_fast(videos, data)
|
||||
|
||||
return parsed_videos, unmatched
|
||||
@ -0,0 +1,114 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<title>Video Storage Analytics</title>
|
||||
<style>
|
||||
body {
|
||||
font-family: Arial, sans-serif;
|
||||
background: #111;
|
||||
color: #eee;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
table {
|
||||
margin: auto;
|
||||
border-collapse: collapse;
|
||||
width: 90%;
|
||||
margin-top: 20px;
|
||||
}
|
||||
|
||||
th,
|
||||
td {
|
||||
border: 1px solid #444;
|
||||
padding: 10px;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
th {
|
||||
background: #333;
|
||||
}
|
||||
|
||||
tr:nth-child(even) {
|
||||
background: #222;
|
||||
}
|
||||
|
||||
th.sort-asc::after {
|
||||
content: " ▲";
|
||||
}
|
||||
|
||||
th.sort-desc::after {
|
||||
content: " ▼";
|
||||
}
|
||||
|
||||
#search {
|
||||
margin: 10px;
|
||||
padding: 8px;
|
||||
width: 300px;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<h1>📊 Video Storage Analytics</h1>
|
||||
<button onclick="window.location.href='/refresh'">🔄 Refresh Data</button>
|
||||
<input type="text" id="search" placeholder="Search users...">
|
||||
<table id="analytics-table">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>User</th>
|
||||
<th>Platform</th>
|
||||
<th>Total Storage (GB)</th>
|
||||
<th>Video Count</th>
|
||||
<th>Avg Size per Video (GB)</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for key, stats in storage_usage %}
|
||||
{% set user, platform = key.split("::") %}
|
||||
<tr>
|
||||
<td><a href="/user/{{ user }}">{{ user }}</a></td>
|
||||
<td>{{ platform }}</td>
|
||||
<td>{{ "%.2f"|format(stats.total_size) }}</td>
|
||||
<td>{{ stats.video_count }}</td>
|
||||
<td>{{ "%.2f"|format(avg_sizes[key]) }}</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
<script>
|
||||
const table = document.getElementById('analytics-table');
|
||||
const headers = table.querySelectorAll('th');
|
||||
const searchInput = document.getElementById('search');
|
||||
let sortDirection = {};
|
||||
|
||||
headers.forEach((header, index) => {
|
||||
header.addEventListener('click', () => {
|
||||
const rows = Array.from(table.querySelector('tbody').rows);
|
||||
const isNumeric = index >= 2;
|
||||
const dir = sortDirection[index] === 'asc' ? 'desc' : 'asc';
|
||||
sortDirection[index] = dir;
|
||||
headers.forEach(h => h.classList.remove('sort-asc', 'sort-desc'));
|
||||
header.classList.add(dir === 'asc' ? 'sort-asc' : 'sort-desc');
|
||||
|
||||
rows.sort((a, b) => {
|
||||
const aVal = isNumeric ? parseFloat(a.cells[index].innerText) : a.cells[index].innerText.toLowerCase();
|
||||
const bVal = isNumeric ? parseFloat(b.cells[index].innerText) : b.cells[index].innerText.toLowerCase();
|
||||
return dir === 'asc' ? (aVal > bVal ? 1 : -1) : (aVal < bVal ? 1 : -1);
|
||||
});
|
||||
rows.forEach(row => table.querySelector('tbody').appendChild(row));
|
||||
});
|
||||
});
|
||||
|
||||
searchInput.addEventListener('keyup', () => {
|
||||
const term = searchInput.value.toLowerCase();
|
||||
Array.from(table.querySelector('tbody').rows).forEach(row => {
|
||||
const text = row.cells[0].innerText.toLowerCase();
|
||||
row.style.display = text.includes(term) ? '' : 'none';
|
||||
});
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
@ -0,0 +1,67 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<title>{{ username }}'s Videos</title>
|
||||
<style>
|
||||
body {
|
||||
font-family: Arial, sans-serif;
|
||||
background: #111;
|
||||
color: #eee;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.grid {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
|
||||
gap: 15px;
|
||||
margin: 20px;
|
||||
}
|
||||
|
||||
.video {
|
||||
background: #222;
|
||||
padding: 10px;
|
||||
border-radius: 5px;
|
||||
}
|
||||
|
||||
img {
|
||||
width: 100%;
|
||||
border-radius: 5px;
|
||||
}
|
||||
|
||||
.pagination {
|
||||
margin-top: 20px;
|
||||
}
|
||||
|
||||
a {
|
||||
color: #0af;
|
||||
text-decoration: none;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<h1>🎥 Videos for {{ username }}</h1>
|
||||
<a href="/">⬅ Back to Dashboard</a>
|
||||
<div class="grid">
|
||||
{% for video in videos %}
|
||||
<div class="video">
|
||||
<img src="/{{ video.thumbnail }}" alt="Thumbnail">
|
||||
<p><b>{{ video['filepath'].split('/')[-1] }}</b></p>
|
||||
<p>Platform: {{ video.platform }}</p>
|
||||
<p>Size: {{ "%.2f"|format(video.size/1024) }} GB</p>
|
||||
</div>
|
||||
{% endfor %}
|
||||
</div>
|
||||
<div class="pagination">
|
||||
{% if page > 1 %}
|
||||
<a href="?page={{ page-1 }}">⬅ Prev</a>
|
||||
{% endif %}
|
||||
<span>Page {{ page }} / {{ total_pages }}</span>
|
||||
{% if page < total_pages %} <a href="?page={{ page+1 }}">Next ➡</a>
|
||||
{% endif %}
|
||||
</div>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
Loading…
Reference in New Issue