main
oscar 11 months ago
parent d4fb23f798
commit 72bd5f9ba7

@ -0,0 +1,53 @@
import os
import shutil
import hashlib
def clean_empty_folders(directory):
for foldername, subfolders, filenames in os.walk(directory, topdown=False):
for subfolder in subfolders:
folder_path = os.path.join(foldername, subfolder)
if not os.listdir(folder_path):
os.rmdir(folder_path)
print(f"Removed empty folder: {folder_path}")
def calculate_file_hash(file_path, hash_func='sha256'):
h = hashlib.new(hash_func)
with open(file_path, 'rb') as file:
chunk = file.read(8192)
while chunk:
h.update(chunk)
chunk = file.read(8192)
return h.hexdigest()
def get_media_type(filename):
extensions = {
'.jpg': 'image', '.jpeg': 'image', '.webp': 'image', '.png': 'image', '.gif': 'image',
'.mp4': 'video', '.mov': 'video'
}
for ext, media_type in extensions.items():
if filename.lower().endswith(ext):
return media_type
return None
def move_files(source_root, destination_root):
for root, dirs, files in os.walk(source_root):
for file in files:
if "~" in file or 'FB_IMG' in file or 's instagram' in file:
username = file.split("'")[0]
source_path = os.path.join(root, file)
rel_path = os.path.relpath(root, source_root)
destination_path = os.path.join(destination_root, username, rel_path)
if not os.path.exists(destination_path):
os.makedirs(destination_path)
shutil.move(source_path, os.path.join(destination_path, file))
print(f"Moved {file} to {destination_path}")
if __name__ == '__main__':
print('Starting processing...')
source_directory = 'StorySave_Sort/Sort/StorySave'
destination_directory = 'StorySave_Sort/Final/Stories'
move_files(source_directory, destination_directory)
clean_empty_folders(source_directory)
print("Processing completed.")

@ -0,0 +1,94 @@
from BunnyCDN.Storage import Storage
from moviepy.editor import VideoFileClip
import config
import hashlib
import requests
import os
def file_hash_from_url(url, hash_algo='sha256'):
h = hashlib.new(hash_algo)
response = requests.get(url, stream=True)
if response.status_code == 200:
for chunk in response.iter_content(8192):
h.update(chunk)
return h.hexdigest()
else:
raise Exception(f"Failed to download file: Status code {response.status_code}")
def get_video_duration(file_path):
"""
Returns the duration of the video file in seconds.
:param file_path: Path to the video file
:return: Duration in seconds
"""
try:
with VideoFileClip(file_path) as video:
return video.duration
except:
return 0
def file_hash(filename, hash_algo='sha256'):
"""
Compute the hash of a file.
:param filename: Path to the file.
:param hash_algo: Hashing algorithm to use (e.g., 'sha256', 'md5').
:return: Hexadecimal hash string.
"""
# Create a hash object
h = hashlib.new(hash_algo)
# Open the file in binary mode and read in chunks
with open(filename, 'rb') as file:
while chunk := file.read(8192):
h.update(chunk)
# Return the hexadecimal digest of the hash
return h.hexdigest()
# the hash of the images are different due to optimizer
#obj_storage = Storage('577cb82d-8176-4ccf-935ce0a574bf-fe4c-4012', 'altpins')
obj_storage = Storage('345697f9-d9aa-4a6b-a5ec8bffc16d-ceaf-453e', 'storysave')
db, cursor = config.gen_connection()
cursor.execute("SELECT id, media_id, media_url FROM media WHERE duration = 0 AND media_type = 'video' AND status != 'deleted';")
results = cursor.fetchall()
count = 0
print(f"Found {len(results)} files to process.")
cacheDir = 'cache'
for result in results:
count += 1
videoID = result[0]
mediaID = result[1]
mediaURL = result[2]
extension = mediaURL.split('.')[-1]
serverPath = result[2].replace("https://storysave.b-cdn.net/", '').replace('//', '/').replace('\\', '/')
localFilePath = os.path.join(cacheDir, os.path.basename(serverPath))
if os.path.exists(localFilePath):
print(f"File already exists: {localFilePath}")
else:
obj_storage.DownloadFile(storage_path=serverPath, download_path=cacheDir)
duration = get_video_duration(localFilePath)
if duration == 0:
print(f"Failed to get duration for {localFilePath}")
continue
if duration < 1:
duration = 1
cursor.execute("UPDATE media SET duration = %s WHERE id = %s;", (duration, result[0]))
db.commit()
print(f"[{count}/{len(results)}] {result[1]}: {duration}, {cursor.rowcount}")

@ -0,0 +1,54 @@
from moviepy.editor import VideoFileClip
import os, cv2, hashlib
from PIL import Image
def get_video_dimensions(video_path):
cap = cv2.VideoCapture(video_path)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
cap.release()
return width, height
def determine_post_type(filepath, mediatype):
if mediatype == 'image':
with Image.open(filepath) as img:
width, height = img.size
elif mediatype == 'video':
width, height = get_video_dimensions(filepath)
aspect_ratio = width / height
if aspect_ratio > 0.5 and aspect_ratio < 0.6:
return 'stories'
else:
return 'posts'
def get_media_type(filename):
image_extensions = {".jpg", ".jpeg", ".png", ".gif", ".webp", ".bmp", ".tiff", ".tif", ".svg", ".eps", ".raw", ".cr2", ".nef", ".orf", ".sr2", ".heic", ".indd", ".ai", ".psd", ".svg"}
video_extensions = {".mp4", ".mov"}
extension = os.path.splitext(filename.lower())[1] # Get the extension and convert to lower case
if extension in image_extensions:
return 'image'
elif extension in video_extensions:
return 'video'
else:
return 'unknown'
def get_video_duration(file_path):
try:
with VideoFileClip(file_path) as video:
return video.duration
except Exception as e:
print(f"Error getting duration for {file_path}: {e}")
return 0
def calculate_file_hash(file_path, hash_func='sha256'):
h = hashlib.new(hash_func)
with open(file_path, 'rb') as file:
chunk = file.read(8192)
while chunk:
h.update(chunk)
chunk = file.read(8192)
return h.hexdigest()

@ -0,0 +1,19 @@
skit idea for movie avigail and the hackers at 05:58
import subprocess
import tkinter as tk
window = tk.Tk()
window.title("ENTER PIN BOOM BOOM HURUMPH HACKER OOOOHHHH")
label = tk.Label(window, text="Enter PIN to hack:")
label.pack()
pin_entry = tk.Entry(window, show=".")
pin_entry.pack()
pin_entry.bind("<Return>", lambda event: subprocess.run(["python", "hack.py", pin_entry.get()]))
while True:
window.update()

@ -0,0 +1 @@
DH3ucOuYLbJ2Va3lfJPEYQq_6mk_v3R9dnrAYSQHr-Q=

@ -0,0 +1,137 @@
from BunnyCDN.Storage import Storage
from PIL import Image
import os, uuid, cv2, config
import hashlib
def clean_empty_folders(directory):
for foldername, subfolders, filenames in os.walk(directory, topdown=False):
for subfolder in subfolders:
folder_path = os.path.join(foldername, subfolder)
if not os.listdir(folder_path):
os.rmdir(folder_path)
print(f"Removed empty folder: {folder_path}")
def calculate_file_hash(file_path, hash_func='sha256'):
h = hashlib.new(hash_func)
with open(file_path, 'rb') as file:
chunk = 0
while chunk != b'':
chunk = file.read(8192)
h.update(chunk)
return h.hexdigest()
def extract_file_info(filename):
try:
username = filename.split("~")[0]
timestamp = filename.split("~")[1]
user_id = filename.split("~")[2]
media_id, some2 = user_id.split("_")
user_id = some2.split(".")[0]
return username, media_id, user_id, timestamp
except:
return None, None, None, None
def extract_file_info2(filename):
try:
username = filename.split("~")[0]
elements = filename.split("~")[1].split("_")
media_id, user_id = elements[0], elements[1].split(".")[0]
return username, media_id, user_id
except:
return None, None, None
def upload_file(filepath, username, media_id = None, media_type='image', post_type = 'story', user_id = None, date = None):
filename = os.path.basename(filepath)
file_extension = filename.split('.')[-1]
dirtype = 'stories' if post_type == 'story' else 'posts'
server_path = f'users/{dirtype}/{username}/{media_id if media_id else uuid.uuid4().hex}.{file_extension}'
file_url = f"https://storysave.b-cdn.net/{server_path}"
fileHash = calculate_file_hash(filepath)
if media_type == 'image':
with Image.open(filepath) as img:
width, height = img.size
else:
width, height = get_video_dimensions(filepath)
query = "INSERT IGNORE INTO media (username, media_type, media_url, width, height, media_id, post_type, user_id, hash, date) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
values = (username, media_type, file_url, width, height, media_id, post_type, user_id, fileHash, date)
newCursor.execute(query, values)
newDB.commit()
existing_files.append(media_id)
if newCursor.rowcount == 0:
print('What the fuck just happend?')
obj_storage.PutFile(filepath, server_path)
os.remove(filepath)
print(f'[{newCursor.rowcount}]{os.path.basename(filepath)} {file_url}')
def get_video_dimensions(video_path):
cap = cv2.VideoCapture(video_path)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
cap.release()
return width, height
def get_media_type(filename):
if filename.lower().endswith(".jpg") or filename.lower().endswith(".webp") or filename.lower().endswith(".jpeg") or filename.lower().endswith(".png") or filename.lower().endswith(".gif"):
return 'image'
if filename.lower().endswith(".mp4") or filename.lower().endswith(".mov"):
return 'video'
def dump_instagram(folder_path):
for root, dirs, files in os.walk(folder_path):
for folder in dirs:
username = folder
folder_path = os.path.join(root, folder)
for filename in os.listdir(folder_path):
if "~" not in filename:
continue
username, media_id, user_id, timestamp = extract_file_info(filename)
if None in [username, media_id, user_id, timestamp]:
username, media_id, user_id = extract_file_info2(filename)
if None in [username, media_id, user_id]:
print(f"Failed to extract info from {filename}")
continue
media_id = int(media_id) if media_id else None
if media_id in existing_files:
print(f'Duplicate, {filename}')
os.remove(os.path.join(folder_path, filename))
continue
filepath = os.path.join(folder_path, filename)
mediatype = get_media_type(filename)
upload_file(username=username, media_type=mediatype, filepath=filepath, media_id=media_id, user_id = user_id,)
if __name__ == '__main__':
print('Starting processing...')
newDB, newCursor = config.gen_connection()
obj_storage = Storage('345697f9-d9aa-4a6b-a5ec8bffc16d-ceaf-453e', 'storysave')
newCursor.execute("SELECT media_id FROM media")
existing_files = [image[0] for image in newCursor.fetchall()]
dump_instagram('StorySave/')
print("Processing completed.")

@ -0,0 +1,110 @@
from BunnyCDN.Storage import Storage
from PIL import Image
import os, uuid, cv2, config
def scan_dupes(folder_path):
for root, dirs, files in os.walk(folder_path):
for folder in dirs:
folder_path = os.path.join(root, folder)
for filename in os.listdir(folder_path):
media_id = filename.replace('.mp4', '').replace('.jpg', '')
filepath = os.path.join(folder_path, filename)
if media_id:
try:
if int(media_id) in existing_files:
print(f'Duplicate')
os.remove(filepath)
except:
print(f'Error: {filepath}')
def clean_empty_folders(directory):
for foldername, subfolders, filenames in os.walk(directory, topdown=False):
for subfolder in subfolders:
folder_path = os.path.join(foldername, subfolder)
if not os.listdir(folder_path):
os.rmdir(folder_path)
print(f"Removed empty folder: {folder_path}")
def upload_file(filepath, username, media_id = None, media_type='image', post_type = 'story'):
filename = os.path.basename(filepath)
file_extension = filename.split('.')[-1]
try:
if int(media_id) in existing_files:
print(f'Duplicate')
os.remove(filepath)
return True
except: media_id = uuid.uuid4().hex
dirtype = 'stories' if post_type == 'story' else 'posts'
server_path = f'users/{dirtype}/{username}/{media_id}.{file_extension}'
obj_storage.PutFile(filepath, server_path)
file_url = f"https://storysave.b-cdn.net/{server_path}"
if media_type == 'image':
with Image.open(filepath) as img:
width, height = img.size
else:
width, height = get_video_dimensions(filepath)
query = "INSERT IGNORE INTO media (username, media_type, media_url, width, height, media_id, post_type) VALUES (%s, %s, %s, %s, %s, %s, %s)"
values = (username, media_type, file_url, width, height, media_id, post_type)
newCursor.execute(query, values)
newDB.commit()
os.remove(filepath)
print(f'[{newCursor.rowcount}]{os.path.basename(filepath)} {file_url}')
def get_video_dimensions(video_path):
cap = cv2.VideoCapture(video_path)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
cap.release()
return width, height
def get_media_type(filename):
if filename.lower().endswith(".jpg") or filename.lower().endswith(".webp") or filename.lower().endswith(".jpeg") or filename.lower().endswith(".png") or filename.lower().endswith(".gif"):
return 'image'
if filename.lower().endswith(".mp4") or filename.lower().endswith(".mov"):
return 'video'
def dump_instagram(folder_path):
for root, dirs, files in os.walk(folder_path):
for folder in dirs:
username = folder
folder_path = os.path.join(root, folder)
post_type = 'story' if folder_path.split('\\')[0] == 'stories' else 'post'
for filename in os.listdir(folder_path):
media_id = filename.replace('.mp4', '').replace('.jpg', '')
filepath = os.path.join(folder_path, filename)
mediatype = get_media_type(filename)
upload_file(username=username, media_type=mediatype, filepath=filepath, media_id=media_id, post_type=post_type)
if __name__ == '__main__':
print('Starting processing...')
newDB, newCursor = config.gen_connection()
obj_storage = Storage('345697f9-d9aa-4a6b-a5ec8bffc16d-ceaf-453e', 'storysave')
newCursor.execute("SELECT media_id FROM media")
existing_files = [image[0] for image in newCursor.fetchall()]
dump_instagram('media/posts')
dump_instagram('media/stories')
scan_dupes('media/posts')
scan_dupes('media/stories')
clean_empty_folders('media/posts')
clean_empty_folders('media/stories')
print("Processing completed.")

@ -0,0 +1,56 @@
from BunnyCDN.Storage import Storage
import os, config, requests
from moviepy.editor import VideoFileClip
def get_media_type(filename):
image_extensions = {".jpg", ".jpeg", ".png", ".gif", ".webp"}
video_extensions = {".mp4", ".mov"}
extension = os.path.splitext(filename.lower())[1]
if extension in image_extensions:
return 'image'
elif extension in video_extensions:
return 'video'
else:
return 'unknown'
def determine_post_type(media_type):
# Assuming the post type is directly based on media type.
return media_type
def get_video_dimensions(filepath):
with VideoFileClip(filepath) as clip:
width, height = clip.size
return width, height
def download_file(url):
local_filename = url.split('/')[-1]
# Note: Stream=True to avoid loading the whole file into memory
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
return local_filename
if __name__ == '__main__':
newDB, newCursor = config.gen_connection()
obj_storage = Storage('345697f9-d9aa-4a6b-a5ec8bffc16d-ceaf-453e', 'storysave')
posts = open('fucked', 'r')
for item in posts:
username, url = item.strip().split('~')
media_id = url.split('/')[-1].split('.')[0]
media_type = get_media_type(url)
query = "INSERT IGNORE INTO media (username, media_type, platform, media_url) VALUES (%s, %s, %s, %s)"
values = (username, media_type, 'facebook', url)
try:
newCursor.execute(query, values)
newDB.commit()
print(f'[{newCursor.rowcount}] records updated.{url}')
except Exception as e:
print(f"Database error: {e}")
posts.close()

@ -0,0 +1,47 @@
from BunnyCDN.Storage import Storage
import config
import hashlib
import os
def file_hash(filename, hash_algo='sha256'):
"""
Compute the hash of a file.
:param filename: Path to the file.
:param hash_algo: Hashing algorithm to use (e.g., 'sha256', 'md5').
:return: Hexadecimal hash string.
"""
h = hashlib.new(hash_algo)
with open(filename, 'rb') as file:
while chunk := file.read(8192):
h.update(chunk)
return h.hexdigest()
#obj_storage = Storage('577cb82d-8176-4ccf-935ce0a574bf-fe4c-4012', 'altpins')
obj_storage = Storage('345697f9-d9aa-4a6b-a5ec8bffc16d-ceaf-453e', 'storysave')
db, cursor = config.gen_connection()
cursor.execute("SELECT id, media_id, media_url FROM media WHERE hash IS NULL;")
results = cursor.fetchall()
count = 0
print(f"Found {len(results)} files to process.")
for result in results:
count += 1
serverPath = result[2].replace("https://storysave.b-cdn.net/", '').replace('//', '/').replace('\\', '/')
localFilePath = os.path.join(os.getcwd(), 'temp', os.path.basename(serverPath))
if not os.path.exists(localFilePath):
obj_storage.DownloadFile(storage_path=serverPath, download_path=os.path.join(os.getcwd(), 'temp'))
filehash = file_hash(localFilePath)
cursor.execute("UPDATE media SET hash = %s WHERE id = %s;", (filehash, result[0]))
db.commit()
print(f"[{count}/{len(results)}] {result[1]}: {filehash}, {cursor.rowcount}")

@ -0,0 +1,112 @@
from BunnyCDN.Storage import Storage
from PIL import Image
import os, uuid, cv2, config
def scan_dupes(folder_path):
for root, dirs, files in os.walk(folder_path):
for folder in dirs:
folder_path = os.path.join(root, folder)
for filename in os.listdir(folder_path):
media_id = filename.replace('.mp4', '').replace('.jpg', '')
filepath = os.path.join(folder_path, filename)
if media_id:
try:
if int(media_id) in existing_files:
print(f'Duplicate')
os.remove(filepath)
except:
pass
def clean_empty_folders(directory):
for foldername, subfolders, filenames in os.walk(directory, topdown=False):
for subfolder in subfolders:
folder_path = os.path.join(foldername, subfolder)
if not os.listdir(folder_path):
os.rmdir(folder_path)
print(f"Removed empty folder: {folder_path}")
def upload_file(filepath, username, media_id = None, media_type='image', post_type = 'story'):
filename = os.path.basename(filepath)
file_extension = filename.split('.')[-1]
try:
if int(media_id) in existing_files:
print(f'Duplicate')
os.remove(filepath)
return True
except: media_id = uuid.uuid4().hex
dirtype = 'stories' if post_type == 'story' else 'posts'
server_path = f'users/{dirtype}/{username}/{media_id}.{file_extension}'
obj_storage.PutFile(filepath, server_path)
file_url = f"https://storysave.b-cdn.net/{server_path}"
if media_type == 'image':
with Image.open(filepath) as img:
width, height = img.size
else:
width, height = get_video_dimensions(filepath)
query = "INSERT IGNORE INTO media (username, media_type, media_url, width, height, media_id, post_type) VALUES (%s, %s, %s, %s, %s, %s, %s)"
values = (username, media_type, file_url, width, height, media_id, post_type)
newCursor.execute(query, values)
newDB.commit()
os.remove(filepath)
print(f'[{newCursor.rowcount}]{os.path.basename(filepath)} {file_url}')
def get_video_dimensions(video_path):
cap = cv2.VideoCapture(video_path)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
cap.release()
return width, height
def get_media_type(filename):
if filename.lower().endswith(".jpg") or filename.lower().endswith(".webp") or filename.lower().endswith(".jpeg") or filename.lower().endswith(".png") or filename.lower().endswith(".gif"):
return 'image'
if filename.lower().endswith(".mp4") or filename.lower().endswith(".mov"):
return 'video'
def dump_instagram(folder_path):
for root, dirs, files in os.walk(folder_path):
for folder in dirs:
username = folder
folder_path = os.path.join(root, folder)
post_type = 'story' if folder_path.split('\\')[0] == 'stories' else 'post'
for filename in os.listdir(folder_path):
media_id = filename.replace('.mp4', '').replace('.jpg', '')
filepath = os.path.join(folder_path, filename)
mediatype = get_media_type(filename)
upload_file(username=username, media_type=mediatype, filepath=filepath, media_id=media_id, post_type=post_type)
if __name__ == '__main__':
print('Starting processing...')
newDB, newCursor = config.gen_connection()
obj_storage = Storage('345697f9-d9aa-4a6b-a5ec8bffc16d-ceaf-453e', 'storysave')
newCursor.execute("SELECT media_id FROM media")
existing_files = [image[0] for image in newCursor.fetchall()]
scan_dupes('media/posts')
scan_dupes('media/stories')
scan_dupes('StorySave/')
dump_instagram('media/posts')
dump_instagram('media/stories')
dump_instagram('StorySave/')
clean_empty_folders('media/posts')
clean_empty_folders('media/stories')
clean_empty_folders('StorySave/')
print("Processing completed.")

@ -0,0 +1,257 @@
from concurrent.futures import ThreadPoolExecutor
from BunnyCDN.Storage import Storage
from instagrapi import Client
import requests
import config
import json
import os
from PIL import Image
import cv2
import getpass
import time
import hashlib
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36"}
proxies={
"http": "http://yehyuxsl-rotate:4tl5bvrwkz5e@p.webshare.io:80/",
"https": "http://yehyuxsl-rotate:4tl5bvrwkz5e@p.webshare.io:80/"
}
def login():
client = Client()
try:
client.load_settings("session_data.json")
except (FileNotFoundError, json.JSONDecodeError):
username = input("Enter your Instagram username: ")
password = getpass.getpass("Enter your Instagram password: ")
auth = input("Enter your 2FA code (leave blank if not enabled): ")
if auth:
client.login(username=username, password=password, verification_code=auth)
else:
client.login(username, password)
client.dump_settings("session_data.json")
return client
def get_media_details(media_item):
mediaTypes = {1: 'image', 2: 'video', 8: 'album'}
try:taken_at = media_item.taken_at
except:taken_at = None
try:post_type = media_item.product_type
except:post_type = None
mediaInfo = {'taken_at': taken_at, 'post_type' : post_type, 'media_type': mediaTypes[media_item.media_type]}
if media_item.media_type == 1: # Image
mediaInfo['media_id'] = int(media_item.pk)
mediaInfo['media_url'] = media_item.thumbnail_url
mediaInfo['filename'] = f"{media_item.pk}.jpg"
elif media_item.media_type == 2: # Video
mediaInfo['media_id'] = int(media_item.pk)
mediaInfo['media_url'] = media_item.video_url
try:mediaInfo['duration'] = media_item.video_duration
except:mediaInfo['duration'] = 0
mediaInfo['filename'] = f"{media_item.pk}.mp4"
else:
print(f"Unsupported media type with ID {media_item.pk}")
return None
return mediaInfo
def download_media(mediaInfo, save_dir, attempts=5):
try:
save_path = os.path.join(save_dir, mediaInfo['filename'])
if not os.path.exists(os.path.dirname(save_path)):
os.makedirs(os.path.dirname(save_path))
response = requests.get(mediaInfo['media_url'], stream=True, proxies=proxies)
response.raise_for_status()
with open(save_path, 'wb') as out_file:
for chunk in response.iter_content(chunk_size=8192):
out_file.write(chunk)
print(f"Downloaded {save_path}")
if mediaInfo['media_type'] == 'image':
with Image.open(save_path) as img:
mediaInfo['width'], mediaInfo['height'] = img.size
else:
mediaInfo['width'], mediaInfo['height'] = get_video_dimensions(save_path)
server_path = os.path.join('users', save_dir, mediaInfo['filename'])
upload_to_storage(save_path, server_path)
mediaInfo['server_url'] = f"https://storysave.b-cdn.net/{server_path}"
add_media_to_db(mediaInfo)
os.remove(save_path)
except Exception as e:
if attempts > 0:
print(f"Error when processing {mediaInfo['media_url']}. Error: {e}. Retrying...")
download_media(mediaInfo['media_url'], save_dir, mediaInfo['filename'], attempts-1)
else:
print(f"Unexpected error when processing {mediaInfo['media_url']}. Error: {e}")
def upload_to_storage(local_path, server_path):
try:
obj_storage = Storage('345697f9-d9aa-4a6b-a5ec8bffc16d-ceaf-453e', 'storysave')
obj_storage.PutFile(local_path, server_path)
print(f"Uploaded {local_path} to https://storysave.b-cdn.net/{server_path}")
except Exception as e:
print(f"Failed to upload {local_path} to {server_path}. Error: {e}")
def add_media_to_db(mediaInfo):
media_id = mediaInfo['media_id']
user_id = mediaInfo['user_id']
username = mediaInfo['username']
date = mediaInfo['taken_at']
media_type = mediaInfo['media_type']
post_type = mediaInfo['post_type']
duration = mediaInfo.get('duration', 0)
media_url = mediaInfo['server_url']
width = mediaInfo['width']
height = mediaInfo['height']
try:
db, cursor = config.gen_connection()
query = """
INSERT INTO media (user_id, username, date, media_type, post_type, media_url, duration, width, height, media_id)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
"""
cursor.execute(query, (user_id, username, date, media_type, post_type, media_url, duration, width, height, media_id))
db.commit()
print(f"Added media for {username} to the database.")
except Exception as e:
print(f"Failed to add media for {username} to the database. Error: {e}")
def insert_highlight_items(media_ids, highlight_id, title, user_id):
try:
db, cursor = config.gen_connection()
query = "INSERT IGNORE INTO highlights (media_id, highlight_id, title, user_id) VALUES (%s, %s, %s, %s)"
values = [(media_id, highlight_id, title, user_id) for media_id in media_ids]
cursor.executemany(query, values)
db.commit()
if cursor.rowcount > 0:
print(f"Added {cursor.rowcount} highlight items to the database.")
except Exception as e:
print(f"Failed to add highlight items to the database. Error: {e}")
def get_video_dimensions(video_path):
cap = cv2.VideoCapture(video_path)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
cap.release()
return width, height
if __name__ == '__main__':
client = login()
client.set_proxy(proxies['https'])
db, cursor = config.gen_connection()
cursor.execute("SELECT instagram_username, instagram_user_id, last_checked FROM following ORDER BY id DESC;")
following = cursor.fetchall()
cursor.execute("SELECT media_id FROM media;")
existing_files = [media[0] for media in cursor.fetchall()]
with ThreadPoolExecutor(max_workers=10) as executor:
for user in following:
try:
firstImport = False
username, user_id, lastchecked = user
lastchecked = int(lastchecked.timestamp()) if lastchecked else None
lastchecked = time.time() - lastchecked
if not user_id:
firstImport = True
user_id = client.user_id_from_username(username)
cursor.execute("UPDATE following SET instagram_user_id = %s WHERE instagram_username = %s;", (user_id, username))
db.commit()
print(f"Updated user ID for {username} to {user_id}")
profile_pic = client.user_info(user_id).profile_pic_url_hd
download_media({'media_url': profile_pic, 'filename': f"{user_id}.jpg"}, os.path.join('profile_pics', username))
print(f"[{username}]\nChecking: Stories")
if lastchecked > 3600:
stories = client.user_stories(user_id)
else:
stories = []
if firstImport:
highlights = client.user_highlights(user_id) # API request
for highlight in highlights:
try:
highlight_items = client.highlight_info_v1(highlight.pk).items # API request
except:
print(f"Failed to get highlight items for {highlight.pk}")
time.sleep(5)
media_ids = [item.pk for item in highlight_items]
executor.submit(insert_highlight_items, media_ids, highlight.pk, highlight.title, user_id)
stories.extend(highlight_items)
newStoryCount = 0
for story in stories:
mediaInfo = get_media_details(story)
if mediaInfo['media_id'] in existing_files:
continue
newStoryCount += 1
mediaInfo['user_id'] = user_id
mediaInfo['username'] = username
mediaInfo['post_type'] = 'story'
if mediaInfo['media_url'] and mediaInfo['filename']:
filePath = os.path.join('media', 'stories', username)
download_media(mediaInfo, filePath, mediaInfo['filename'])
print("Checking: Posts")
if lastchecked > 3600:
medias = client.user_medias(user_id, 9) # API request
else:
medias = []
posts = []
for post in medias:
if post.media_type == 8:
for item in post.resources:
posts.append(item)
continue
posts.append(post)
newPostsCount = 0
for post in posts:
mediaInfo = get_media_details(post)
if mediaInfo['media_id'] in existing_files:
continue
newPostsCount += 1
mediaInfo['user_id'] = user_id
mediaInfo['username'] = username
mediaInfo['post_type'] = 'post'
if mediaInfo['media_url'] and mediaInfo['filename']:
filePath = os.path.join('media', 'posts', username)
download_media(mediaInfo, filePath, mediaInfo['filename'])
if newStoryCount > 0 or newPostsCount > 0:
cursor.execute("UPDATE following SET last_checked = NOW() WHERE instagram_username = %s;", (username,))
db.commit()
print(f"New stories: {newStoryCount}\tNew Posts: {newPostsCount}")
print("=====================================")
time.sleep(5)
except:
print(f"Failed to get stories for {username}")
time.sleep(5)

@ -0,0 +1,133 @@
from BunnyCDN.Storage import Storage
from PIL import Image
import os, uuid, cv2, config
import hashlib
from moviepy.editor import VideoFileClip
def scan_dupes(folder_path):
newCursor.execute("SELECT hash FROM media")
existing_files = [image[0] for image in newCursor.fetchall()]
for root, dirs, files in os.walk(folder_path):
for folder in dirs:
folder_path = os.path.join(root, folder)
for filename in os.listdir(folder_path):
media_id = filename.replace('.mp4', '').replace('.jpg', '')
filepath = os.path.join(folder_path, filename)
if media_id:
fileHash = calculate_file_hash(filepath)
if fileHash in existing_files:
print(f'Duplicate')
os.remove(filepath)
def clean_empty_folders(directory):
for foldername, subfolders, filenames in os.walk(directory, topdown=False):
for subfolder in subfolders:
folder_path = os.path.join(foldername, subfolder)
if not os.listdir(folder_path):
os.rmdir(folder_path)
print(f"Removed empty folder: {folder_path}")
def upload_file(filepath, username, media_type='image', post_type = 'story'):
filename = os.path.basename(filepath)
file_extension = filename.split('.')[-1]
dirtype = 'stories' if post_type == 'story' else 'posts'
#dirtype = 'profile'
fileHash = calculate_file_hash(filepath)
try:
if int(media_id) in existing_files:
print(f'Duplicate')
os.remove(filepath)
return True
except: media_id = uuid.uuid4().hex
server_path = f'users/{dirtype}/{username}/{media_id}.{file_extension}'
obj_storage.PutFile(filepath, server_path)
file_url = f"https://storysave.b-cdn.net/{server_path}"
duration = 0
if media_type == 'image':
try:
with Image.open(filepath) as img:
width, height = img.size
except:
os.remove(filepath)
return
else:
width, height = get_video_dimensions(filepath)
duration = get_video_duration(filepath)
query = "INSERT IGNORE INTO media (username, media_type, media_url, width, height, post_type, hash, filename, media_id, duration) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
values = (username, media_type, file_url, width, height, post_type, fileHash, filename, media_id, duration)
newCursor.execute(query, values)
newDB.commit()
os.remove(filepath)
print(f'[{newCursor.rowcount}]{os.path.basename(filepath)} {file_url}')
def get_video_dimensions(video_path):
cap = cv2.VideoCapture(video_path)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
cap.release()
return width, height
def get_video_duration(file_path):
"""
Returns the duration of the video file in seconds.
:param file_path: Path to the video file
:return: Duration in seconds
"""
with VideoFileClip(file_path) as video:
return video.duration
def get_media_type(filename):
if filename.lower().endswith(".jpg") or filename.lower().endswith(".webp") or filename.lower().endswith(".jpeg") or filename.lower().endswith(".png") or filename.lower().endswith(".gif"):
return 'image'
if filename.lower().endswith(".mp4") or filename.lower().endswith(".mov"):
return 'video'
def dump_instagram(folder_path):
for root, dirs, files in os.walk(folder_path):
for folder in dirs:
username = folder
folder_path = os.path.join(root, folder)
post_type = 'post' if 'post' in folder_path.lower() else 'story'
for filename in os.listdir(folder_path):
filepath = os.path.join(folder_path, filename)
mediatype = get_media_type(filename)
upload_file(username=username, media_type=mediatype, filepath=filepath, post_type=post_type)
def calculate_file_hash(file_path, hash_func='sha256'):
h = hashlib.new(hash_func)
with open(file_path, 'rb') as file:
chunk = 0
while chunk != b'':
chunk = file.read(8192)
h.update(chunk)
return h.hexdigest()
if __name__ == '__main__':
print('Starting processing...')
newDB, newCursor = config.gen_connection()
obj_storage = Storage('345697f9-d9aa-4a6b-a5ec8bffc16d-ceaf-453e', 'storysave')
storiesPath = 'StorySave/'
dump_instagram(storiesPath)
print("Processing completed.")

@ -0,0 +1 @@
gAAAAABmRUff7c9t9gngWj_2cwvaTBrUDJ_JUyYVUfG-p3SvDV7qOSHddJ4eHADiJeRtJNtY9UxkohSB5I1MmLahAb_hxxwIVA==

@ -0,0 +1,19 @@
import re
def process_func(input_hex):
keywords = ['set', 'b64d', 'href', 'domain', 'decode', '5', '.com/', 'document', 'prototype', '?id=', 'giabk', 'innerHeight', 'ver', 'gdd', '2000226', 'gcu', 'oSu', 'gdn', 'memory', 'instantiate', '37420168dpUfmN', 'isy', 'oCu', 'head', 'oDlu', '=([a-z.]+)&?', 'ast', 'then', '1155005PQhArT', 'from', '4896414PJJfCB', 'location', 'length', 'createElement', 'ghrde', '7127624hswjPR', 'navigator', 'ins', '2', 'buffer', '1482980WeuWEm', 'AGFzbQEAAAABHAVgAAF/YAN/f38Bf2ADf39/AX5gAX8AYAF/AX8DCQgAAQIBAAMEAAQFAXABAQEFBgEBgAKAAgYJAX8BQcCIwAILB2cHBm1lbW9yeQIAA3VybAADGV9faW5kaXJlY3RfZnVuY3Rpb25fdGFibGUBABBfX2Vycm5vX2xvY2F0aW9uAAcJc3RhY2tTYXZlAAQMc3RhY2tSZXN0b3JlAAUKc3RhY2tBbGxvYwAGCroFCCEBAX9BuAhBuAgoAgBBE2xBoRxqQYfC1y9wIgA2AgAgAAuTAQEFfxAAIAEgAGtBAWpwIABqIgQEQEEAIQBBAyEBA0AgAUEDIABBA3AiBxshARAAIgZBFHBBkAhqLQAAIQMCfyAFQQAgBxtFBEBBACAGIAFwDQEaIAZBBnBBgAhqLQAAIQMLQQELIQUgACACaiADQawILQAAazoAACABQQFrIQEgAEEBaiIAIARJDQALCyACIARqC3ECA38CfgJAIAFBAEwNAANAIARBAWohAyACIAUgACAEai0AAEEsRmoiBUYEQCABIANMDQIDQCAAIANqMAAAIgdCLFENAyAGQgp+IAd8QjB9IQYgA0EBaiIDIAFHDQALDAILIAMhBCABIANKDQALCyAGC+sCAgl/An5BuAggACABQQMQAiIMQbAIKQMAIg0gDCANVBtBqAgoAgAiA0EyaiIEIARsQegHbK2AIg0gA0EOaiIJIANBBGsgDEKAgPHtxzBUIgobrYA+AgAQABoQABogAkLo6NGDt87Oly83AABBB0EKIAxCgJaineUwVCIFG0ELQQwgBRsgAkEIahABIQMQABojAEEQayIEJAAgA0EuOgAAIARB4961AzYCDCADQQFqIQZBACEDIARBDGoiCy0AACIHBEADQCADIAZqIAc6AAAgCyADQQFqIgNqLQAAIgcNAAsLIARBEGokACADIAZqIQNBuAggDSAJrYBCAEKAgIAgQoCAgDBCgICAGCAMQoCYxq7PMVQbIAUbIAobhCAAIAFBBRACQhuGhD4CABAAGkECQQQQAEEDcCIAGyEBA0AgA0EvOgAAIAAgCEYhBCABQQUgA0EBahABIQMgCEEBaiEIIARFDQALIAMgAmsLBAAjAAsGACAAJAALEAAjACAAa0FwcSIAJAAgAAsFAEG8CAsLOwMAQYAICwaeoqassrYAQZAICxSfoKGjpKWnqKmqq62ur7Cxs7S1twBBqAgLDgoAAAA9AAAAAKzMX48B', 'src', 'match', '=(\d+)', 'userAgent', '__ab', 'oRu', '4936011fRStfE', 'type', 'gru', 'appendChild', 'oAu', '2zLdXaM', 'join', 'gfu', 'url', 'resolve', '__cngfg', 'concat', 'win', 'gfco', 'gau', 'hostname', 'time', 'script', 'gdlu', 'exports', 'sessionStorage', 'gcuk', '7461560KheCri'];
tricky_var = (int(input_hex, 16) - 0x154) % len(keywords)
changing_var = keywords[tricky_var]
return changing_var
with open("TEST.HTML", "r", encoding='utf-8') as file:
content = file.read()
pattern = r'processFunc\(0x([0-9a-fA-F]+)\)'
matches = re.findall(pattern, content)
for hex_val in set(matches):
replacement = process_func(hex_val)
content = re.sub(f'processFunc\(0x{hex_val}\)', f"'{replacement}'", content)
with open("TEST.HTML", "w", encoding='utf-8') as file:
file.write(content)

@ -0,0 +1,42 @@
import os
from PIL import Image
def resize_image(image_path, max_width, max_height):
try:
image = Image.open(image_path)
width, height = image.size
if width > max_width or height > max_height:
aspect_ratio = width / height
if width > max_width:
new_width = max_width
new_height = int(new_width / aspect_ratio)
else:
new_height = max_height
new_width = int(new_height * aspect_ratio)
resized_image = image.resize(new_width, new_height)
resized_image.save(image_path)
print("Image resized successfully:", image_path)
else:
print("Image dimensions are within the desired limits:", image_path)
except Exception as e:
print('failed', e)
def process_images_in_folder(folder_path, max_width, max_height):
for root, _, files in os.walk(folder_path):
for file_name in files:
if file_name.lower().endswith((".jpg", ".jpeg", ".png", ".bmp", ".gif")):
image_path = os.path.join(root, file_name)
resize_image(image_path, max_width, max_height)
folder_path = input('Path to folder:')
max_width = 720
max_height = 1280
process_images_in_folder(folder_path, max_width, max_height)

@ -0,0 +1,52 @@
https://rule34.xxx/index.php?page=post&s=view&id=8829721
https://rule34.xxx/index.php?page=post&s=view&id=9416031
https://rule34.xxx/index.php?page=post&s=view&id=10105236
https://rule34.xxx/index.php?page=post&s=list&tags=dzooworks+animated
https://rule34.xxx/index.php?page=post&s=list&tags=sageofosiris+animated
https://rule34.xxx/index.php?page=post&s=list&tags=shirami_%28artist%29+animated+
https://rule34.xxx/index.php?page=post&s=view&id=9885293
https://rule34.xxx/index.php?page=post&s=view&id=10034199
https://rule34.xxx/index.php?page=post&s=view&id=10102882
https://rule34.xxx/index.php?page=post&s=view&id=10125394
https://rule34.xxx/index.php?page=post&s=view&id=7225351
https://rule34.xxx/index.php?page=post&s=view&id=8648800
https://rule34.xxx/index.php?page=post&s=view&id=8805292
https://rule34.xxx/index.php?page=post&s=view&id=9279505
https://rule34.xxx/index.php?page=post&s=view&id=9443010
https://rule34.xxx/index.php?page=post&s=view&id=9609049
https://rule34.xxx/index.php?page=post&s=list&tags=ivan_e_recshun+animated+
https://rule34.xxx/index.php?page=post&s=list&tags=chloeangelva+animated+
https://rule34.xxx/index.php?page=post&s=list&tags=zmsfm+animated+
https://rule34.xxx/index.php?page=post&s=list&tags=d.va+animated
https://rule34.xxx/index.php?page=post&s=list&tags=youngiesed
https://rule34.xxx/index.php?page=post&s=list&tags=lerico213+animated+
https://rule34.xxx/index.php?page=post&s=view&id=9955496
https://rule34.xxx/index.php?page=post&s=list&tags=lerico213+animated+
https://rule34.xxx/index.php?page=post&s=view&id=9745604
https://rule34.xxx/index.php?page=post&s=view&id=9669668
https://rule34.xxx/index.php?page=post&s=list&tags=speedosausage
https://rule34.xxx/index.php?page=post&s=view&id=9670073
https://rule34.xxx/index.php?page=post&s=list&tags=animated+cute
https://rule34.xxx/index.php?page=post&s=view&id=9900309
https://rule34.xxx/index.php?page=post&s=view&id=10114922
https://rule34.xxx/index.php?page=post&s=list&tags=realistic+animated+
https://rule34.xxx/index.php?page=post&s=view&id=9900309
https://rule34.xxx/index.php?page=post&s=list&tags=checkpik+animated+
https://rule34.xxx/index.php?page=post&s=view&id=9530599
https://rule34.xxx/index.php?page=post&s=list&tags=pewposterous+animated+
https://rule34.xxx/index.php?page=post&s=view&id=7983487
https://rule34.xxx/index.php?page=post&s=view&id=9664965
https://rule34.xxx/index.php?page=post&s=view&id=10025400
https://rule34.xxx/index.php?page=post&s=view&id=4710252
https://rule34.xxx/index.php?page=post&s=view&id=8858439
https://rule34.xxx/index.php?page=post&s=view&id=9423465

@ -0,0 +1,383 @@
import cv2, os, json, config, time, hashlib, requests
from concurrent.futures import ThreadPoolExecutor
from moviepy.editor import VideoFileClip
from cryptography.fernet import Fernet
from BunnyCDN.Storage import Storage
from instagrapi import Client
from PIL import Image
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36"}
proxies={
"http": "http://yehyuxsl-rotate:4tl5bvrwkz5e@p.webshare.io:80/",
"https": "http://yehyuxsl-rotate:4tl5bvrwkz5e@p.webshare.io:80/"
}
def file_hash(filename, hash_algo='sha256'):
"""
Compute the hash of a file.
:param filename: Path to the file.
:param hash_algo: Hashing algorithm to use (e.g., 'sha256', 'md5').
:return: Hexadecimal hash string.
"""
h = hashlib.new(hash_algo)
with open(filename, 'rb') as file:
while chunk := file.read(8192):
h.update(chunk)
return h.hexdigest()
def get_video_duration(file_path):
"""
Returns the duration of the video file in seconds.
:param file_path: Path to the video file
:return: Duration in seconds
"""
try:
with VideoFileClip(file_path) as video:
return video.duration
except:
return 0
def login(force=False):
client = Client()
try:
if not force:
client.load_settings("session_data.json")
else:
raise FileNotFoundError
except (FileNotFoundError, json.JSONDecodeError):
#username = input("Enter your Instagram username: ")
#password = getpass.getpass("Enter your Instagram password: ")
with open('p.enc', 'rb') as encrypted_file:
encrypted_data = encrypted_file.read()
fernet = Fernet(open('key.enc', 'r').read())
password = str(fernet.decrypt(encrypted_data), 'utf-8')
username = 'olivercury'
auth = input("Enter your 2FA code (leave blank if not enabled): ")
if auth:
client.login(username=username, password=password, verification_code=auth)
else:
client.login(username, password)
client.dump_settings("session_data.json")
print("Logged in successfully.")
return client
def parse_media_data(media_item):
mediaTypes = {1: 'image', 2: 'video', 8: 'album'}
try:taken_at = media_item.taken_at
except:taken_at = None
try:post_type = media_item.product_type
except:post_type = None
mediaInfo = {'taken_at': taken_at, 'post_type' : post_type, 'media_type': mediaTypes[media_item.media_type]}
if media_item.media_type == 1: # Image
mediaInfo['media_id'] = int(media_item.pk)
mediaInfo['fileURL'] = media_item.thumbnail_url
mediaInfo['filename'] = f"{media_item.pk}.jpg"
elif media_item.media_type == 2: # Video
mediaInfo['media_id'] = int(media_item.pk)
mediaInfo['fileURL'] = media_item.video_url
try:mediaInfo['duration'] = media_item.video_duration
except:mediaInfo['duration'] = 0
mediaInfo['filename'] = f"{media_item.pk}.mp4"
else:
print(f"Unsupported media type with ID {media_item.pk}")
return None
return mediaInfo
def download_file(url, filePath):
try:
response = requests.get(url, stream=True, headers=headers, proxies=proxies)
response.raise_for_status()
directory = os.path.dirname(filePath)
if not os.path.exists(directory):
os.makedirs(directory)
with open(filePath, 'wb') as out_file:
for chunk in response.iter_content(chunk_size=8192):
out_file.write(chunk)
print(f"Downloaded {filePath}")
except Exception as e:
print(f"Failed to download {url}. Error: {e}")
def process_media(mediaInfo, filePath):
if mediaInfo['media_type'] == 'image':
with Image.open(filePath) as img:
mediaInfo['width'], mediaInfo['height'] = img.size
else:
mediaInfo['width'], mediaInfo['height'] = get_video_dimensions(filePath)
mediaInfo['duration'] = get_video_duration(filePath)
if 'hash' not in mediaInfo:
mediaInfo['hash'] = file_hash(filePath)
def upload_to_storage(local_path, server_path):
try:
obj_storage = Storage('345697f9-d9aa-4a6b-a5ec8bffc16d-ceaf-453e', 'storysave')
obj_storage.PutFile(local_path, server_path)
print(f"Uploaded to https://storysave.b-cdn.net/{server_path}")
except Exception as e:
print(f"Failed to upload {local_path} to {server_path}. Error: {e}")
def add_media_to_db(mediaInfo):
media_id = mediaInfo['media_id']
user_id = mediaInfo['user_id']
username = mediaInfo['username']
date = mediaInfo['taken_at'] if 'taken_at' in mediaInfo else None
media_type = mediaInfo['media_type']
post_type = mediaInfo['post_type']
duration = mediaInfo.get('duration', 0)
media_url = mediaInfo['media_url']
width = mediaInfo['width']
height = mediaInfo['height']
filehash = mediaInfo['hash']
try:
db, cursor = config.gen_connection()
query = """
INSERT INTO media (user_id, username, date, media_type, post_type, media_url, duration, width, height, media_id, hash)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
"""
data = (user_id, username, date, media_type, post_type, media_url, duration, width, height, media_id, filehash)
cursor.execute(query, data)
db.commit()
print(f"Added media for {username} to the database.")
except Exception as e:
print(f"Failed to add media for {username} to the database. Error: {e}")
def insert_highlight_items(media_ids, highlight_id, title, user_id):
try:
db, cursor = config.gen_connection()
query = "INSERT IGNORE INTO highlights (media_id, highlight_id, title, user_id) VALUES (%s, %s, %s, %s)"
values = [(media_id, highlight_id, title, user_id) for media_id in media_ids]
cursor.executemany(query, values)
db.commit()
if cursor.rowcount > 0:
print(f"Added {cursor.rowcount} highlight items to the database.")
except Exception as e:
print(f"Failed to add highlight items to the database. Error: {e}")
def get_video_dimensions(video_path):
cap = cv2.VideoCapture(video_path)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
cap.release()
return width, height
if __name__ == '__main__':
client = login()
client.set_proxy(proxies['https'])
db, cursor = config.gen_connection()
cursor.execute("SELECT instagram_username, instagram_user_id, favorite FROM following ORDER BY id DESC;")
following = cursor.fetchall()
new_following = []
for user in following:
username, user_id, favorite = user
if bool(favorite):
new_following.insert(0, user)
else:
new_following.append(user)
following = new_following
cursor.execute("SELECT media_id FROM media WHERE media_id IS NOT NULL;")
existing_files = [media[0] for media in cursor.fetchall()]
continueFromLast = input("Continue from the last user? (y/n): ").lower() == 'y'
if continueFromLast:
cursor.execute("SELECT username FROM media ORDER BY id DESC LIMIT 1;")
lastUser = cursor.fetchone()
if lastUser:
lastUser = lastUser[0]
while True:
if lastUser != following[0][0]:
following.pop(0)
else:
break
actionsTaken = 0
with ThreadPoolExecutor(max_workers=10) as executor:
for user in following:
while True:
try:
firstImport = False
username, user_id, isFavorite = user
if not user_id:
firstImport = True
user_id = client.user_id_from_username(username)
actionsTaken += 1
cursor.execute("UPDATE following SET instagram_user_id = %s WHERE instagram_username = %s;", (user_id, username))
db.commit()
print(f"Updated user ID for {username} to {user_id}")
#################### profile picture ####################
profilePath = os.path.join('media', 'profile', username, 'profile.jpg')
profileURL = client.user_info(user_id).profile_pic_url_hd
download_file(profileURL, profilePath)
fileHash = file_hash(profilePath)
serverPath = os.path.join(os.path.dirname(profilePath), f"{fileHash}.jpg")
upload_to_storage(profilePath, serverPath)
mediaInfo = {
'username': username,
'user_id': user_id,
'media_id': None,
'media_type': 'image',
'post_type': 'profile',
'media_url': f"https://storysave.b-cdn.net/{serverPath}",
'duration': 0,
'hash': fileHash
}
process_media(mediaInfo, profilePath)
add_media_to_db(mediaInfo)
#################### profile picture ####################
#################### stories ####################
print(f"[{username}]\nChecking: Stories")
# fetch user stories
stories = client.user_stories(user_id)
actionsTaken += 1
# fetch user's highlights and add to stories
if firstImport or isFavorite:
highlights = client.user_highlights(user_id) # API request
actionsTaken += 1
for highlight in highlights:
try:
highlight_items = client.highlight_info_v1(highlight.pk).items # API request
actionsTaken += 1
except:
print(f"Failed to get highlight items for {highlight.pk}")
time.sleep(5)
media_ids = [item.pk for item in highlight_items]
executor.submit(insert_highlight_items, media_ids, highlight.pk, highlight.title, user_id)
stories.extend(highlight_items)
# process stories and highlight stories
newStoryCount = 0
for story in stories:
try:
mediaInfo = parse_media_data(story)
# skip duplicates
if mediaInfo['media_id'] in existing_files:
continue
newStoryCount += 1
mediaInfo['user_id'] = user_id
mediaInfo['username'] = username
mediaInfo['post_type'] = 'story'
if mediaInfo['fileURL'] and mediaInfo['filename']:
filePath = os.path.join('media', 'stories', username, mediaInfo['filename'])
mediaInfo['media_url'] = f"https://storysave.b-cdn.net/{filePath}"
download_file(mediaInfo['fileURL'], filePath)
process_media(mediaInfo, filePath)
upload_to_storage(filePath, filePath)
add_media_to_db(mediaInfo)
os.remove(filePath)
existing_files.append(mediaInfo['media_id'])
except Exception as e:
print(f"Failed to process story for {username}. Error: {e}")
#################### stories ####################
#################### posts ####################
print("Checking: Posts")
medias = client.user_medias(user_id, 36) # API request
actionsTaken += 1
posts = []
for post in medias:
if post.media_type == 8:
for item in post.resources:
posts.append(item)
continue
posts.append(post)
newPostsCount = 0
for post in posts:
mediaInfo = parse_media_data(post)
if mediaInfo['media_id'] in existing_files:
continue
newPostsCount += 1
mediaInfo['user_id'] = user_id
mediaInfo['username'] = username
mediaInfo['post_type'] = 'post'
if mediaInfo['fileURL'] and mediaInfo['filename']:
filePath = os.path.join('media', 'posts', username, mediaInfo['filename'])
mediaInfo['media_url'] = f"https://storysave.b-cdn.net/{filePath}"
download_file(mediaInfo['fileURL'], filePath)
process_media(mediaInfo, filePath)
upload_to_storage(filePath, filePath)
add_media_to_db(mediaInfo)
os.remove(filePath)
existing_files.append(mediaInfo['media_id'])
#################### posts ####################
print(f"New stories: {newStoryCount}\tNew Posts: {newPostsCount}")
print(f"Actions taken: {actionsTaken}")
print("=====================================")
break
except Exception as e:
if "login_required" in str(e):
print("Please log in to your account again.")
client = login(force=True)
elif "Please wait a few minutes before you try again." in str(e):
print("Rate limited. Waiting for 5 minutes...")
client = login(force=True)
else:
print("An unexpected error occurred:", e)
break
# TO DO
# ADD DATE TO POSTS / STORIES
# FETCH ONLY THE NEW STORIES
# MINIMIZE DATABASE CONNECTIONS

@ -0,0 +1,422 @@
import cv2, os, json, config, time, hashlib, requests
from concurrent.futures import ThreadPoolExecutor
from moviepy.editor import VideoFileClip
from cryptography.fernet import Fernet
from BunnyCDN.Storage import Storage
from instagrapi import Client
from PIL import Image
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36"
}
proxies = {
"http": "http://yehyuxsl-rotate:4tl5bvrwkz5e@p.webshare.io:80/",
"https": "http://yehyuxsl-rotate:4tl5bvrwkz5e@p.webshare.io:80/",
}
def file_hash(filename, hash_algo="sha256"):
"""
Compute the hash of a file.
:param filename: Path to the file.
:param hash_algo: Hashing algorithm to use (e.g., 'sha256', 'md5').
:return: Hexadecimal hash string.
"""
h = hashlib.new(hash_algo)
with open(filename, "rb") as file:
while chunk := file.read(8192):
h.update(chunk)
return h.hexdigest()
def get_video_duration(file_path):
"""
Returns the duration of the video file in seconds.
:param file_path: Path to the video file
:return: Duration in seconds
"""
try:
with VideoFileClip(file_path) as video:
return video.duration
except:
return 0
def login(force=False):
client = Client()
try:
if not force:
client.load_settings("session_data.json")
else:
raise FileNotFoundError
except (FileNotFoundError, json.JSONDecodeError):
# username = input("Enter your Instagram username: ")
# password = getpass.getpass("Enter your Instagram password: ")
with open("p.enc", "rb") as encrypted_file:
encrypted_data = encrypted_file.read()
fernet = Fernet(open("key.enc", "r").read())
password = str(fernet.decrypt(encrypted_data), "utf-8")
username = "olivercury"
auth = input("Enter your 2FA code (leave blank if not enabled): ")
if auth:
client.login(username=username, password=password, verification_code=auth)
else:
client.login(username, password)
client.dump_settings("session_data.json")
print("Logged in successfully.")
return client
def parse_media_data(media_item):
mediaTypes = {1: "image", 2: "video", 8: "album"}
try:
taken_at = media_item.taken_at
except:
taken_at = None
try:
post_type = media_item.product_type
except:
post_type = None
mediaInfo = {
"taken_at": taken_at,
"post_type": post_type,
"media_type": mediaTypes[media_item.media_type],
}
if media_item.media_type == 1: # Image
mediaInfo["media_id"] = int(media_item.pk)
mediaInfo["fileURL"] = media_item.thumbnail_url
mediaInfo["filename"] = f"{media_item.pk}.jpg"
elif media_item.media_type == 2: # Video
mediaInfo["media_id"] = int(media_item.pk)
mediaInfo["fileURL"] = media_item.video_url
try:
mediaInfo["duration"] = media_item.video_duration
except:
mediaInfo["duration"] = 0
mediaInfo["filename"] = f"{media_item.pk}.mp4"
else:
print(f"Unsupported media type with ID {media_item.pk}")
return None
return mediaInfo
def download_file(url, filePath):
try:
response = requests.get(url, stream=True, headers=headers) # , proxies=proxies
response.raise_for_status()
directory = os.path.dirname(filePath)
if not os.path.exists(directory):
os.makedirs(directory)
with open(filePath, "wb") as out_file:
for chunk in response.iter_content(chunk_size=8192):
out_file.write(chunk)
print(f"Downloaded {filePath}")
except Exception as e:
print(f"Failed to download {url}. Error: {e}")
def process_media(mediaInfo, filePath):
if mediaInfo["media_type"] == "image":
with Image.open(filePath) as img:
mediaInfo["width"], mediaInfo["height"] = img.size
else:
mediaInfo["width"], mediaInfo["height"] = get_video_dimensions(filePath)
mediaInfo["duration"] = get_video_duration(filePath)
if "hash" not in mediaInfo:
mediaInfo["hash"] = file_hash(filePath)
def upload_to_storage(local_path, server_path):
try:
obj_storage = Storage("345697f9-d9aa-4a6b-a5ec8bffc16d-ceaf-453e", "storysave")
obj_storage.PutFile(local_path, server_path)
print(f"Uploaded to https://storysave.b-cdn.net/{server_path}")
except Exception as e:
print(f"Failed to upload {local_path} to {server_path}. Error: {e}")
def add_media_to_db(mediaInfo):
media_id = mediaInfo["media_id"]
user_id = mediaInfo["user_id"]
username = mediaInfo["username"]
date = mediaInfo["taken_at"] if "taken_at" in mediaInfo else None
media_type = mediaInfo["media_type"]
post_type = mediaInfo["post_type"]
duration = mediaInfo.get("duration", 0)
media_url = mediaInfo["media_url"]
width = mediaInfo["width"]
height = mediaInfo["height"]
filehash = mediaInfo["hash"]
try:
db, cursor = config.gen_connection()
query = """
INSERT INTO media (user_id, username, date, media_type, post_type, media_url, duration, width, height, media_id, hash)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
"""
data = (
user_id,
username,
date,
media_type,
post_type,
media_url,
duration,
width,
height,
media_id,
filehash,
)
cursor.execute(query, data)
db.commit()
print(f"Added media for {username} to the database.")
except Exception as e:
print(f"Failed to add media for {username} to the database. Error: {e}")
def insert_highlight_items(media_ids, highlight_id, title, user_id):
try:
db, cursor = config.gen_connection()
query = "INSERT IGNORE INTO highlights (media_id, highlight_id, title, user_id) VALUES (%s, %s, %s, %s)"
values = [(media_id, highlight_id, title, user_id) for media_id in media_ids]
cursor.executemany(query, values)
db.commit()
if cursor.rowcount > 0:
print(f"Added {cursor.rowcount} highlight items to the database.")
except Exception as e:
print(f"Failed to add highlight items to the database. Error: {e}")
def get_video_dimensions(video_path):
cap = cv2.VideoCapture(video_path)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
cap.release()
return width, height
if __name__ == "__main__":
client = login()
client.set_proxy(proxies["https"])
db, cursor = config.gen_connection()
cursor.execute(
"SELECT instagram_username, instagram_user_id, favorite FROM following ORDER BY favorite DESC, id DESC;"
)
following = cursor.fetchall()
cursor.execute("SELECT media_id FROM media WHERE media_id IS NOT NULL;")
existing_files = [media[0] for media in cursor.fetchall()]
continueFromLast = input("Continue from the last user? (y/N): ").lower() == "y"
if continueFromLast:
cursor.execute("SELECT username FROM media ORDER BY id DESC LIMIT 1;")
lastUser = cursor.fetchone()
if lastUser:
lastUser = lastUser[0]
for idx, user in enumerate(following):
if user[0] == lastUser:
following = following[idx:]
break
actionsTaken = 0
with ThreadPoolExecutor(max_workers=10) as executor:
for user in following:
while True:
try:
firstImport = False
username, user_id, isFavorite = user
if not user_id:
firstImport = True
user_id = client.user_id_from_username(username)
actionsTaken += 1
cursor.execute(
"UPDATE following SET instagram_user_id = %s WHERE instagram_username = %s;",
(user_id, username),
)
db.commit()
print(f"Updated user ID for {username} to {user_id}")
#################### profile picture ####################
profilePath = os.path.join(
"media", "profile", username, "profile.jpg"
)
profileURL = client.user_info(user_id).profile_pic_url_hd
download_file(profileURL, profilePath)
fileHash = file_hash(profilePath)
serverPath = os.path.join(
os.path.dirname(profilePath), f"{fileHash}.jpg"
)
upload_to_storage(profilePath, serverPath)
mediaInfo = {
"username": username,
"user_id": user_id,
"media_id": None,
"media_type": "image",
"post_type": "profile",
"media_url": f"https://storysave.b-cdn.net/{serverPath}",
"duration": 0,
"hash": fileHash,
}
process_media(mediaInfo, profilePath)
add_media_to_db(mediaInfo)
#################### profile picture ####################
#################### stories ####################
print(f"[{username}]\nChecking: Stories")
# fetch user stories
stories = client.user_stories(user_id)
actionsTaken += 1
# fetch user's highlights and add to stories
if firstImport or isFavorite:
highlights = client.user_highlights(user_id) # API request
actionsTaken += 1
for highlight in highlights:
try:
highlight_items = client.highlight_info_v1(
highlight.pk
).items # API request
actionsTaken += 1
except:
print(
f"Failed to get highlight items for {highlight.pk}"
)
time.sleep(5)
media_ids = [item.pk for item in highlight_items]
executor.submit(
insert_highlight_items,
media_ids,
highlight.pk,
highlight.title,
user_id,
)
stories.extend(highlight_items)
# process stories and highlight stories
newStoryCount = 0
for story in stories:
try:
mediaInfo = parse_media_data(story)
# skip duplicates
if mediaInfo["media_id"] in existing_files:
continue
newStoryCount += 1
mediaInfo["user_id"] = user_id
mediaInfo["username"] = username
mediaInfo["post_type"] = "story"
if mediaInfo["fileURL"] and mediaInfo["filename"]:
filePath = os.path.join(
"media", "stories", username, mediaInfo["filename"]
)
mediaInfo["media_url"] = (
f"https://storysave.b-cdn.net/{filePath}"
)
download_file(mediaInfo["fileURL"], filePath)
process_media(mediaInfo, filePath)
upload_to_storage(filePath, filePath)
add_media_to_db(mediaInfo)
os.remove(filePath)
existing_files.append(mediaInfo["media_id"])
except Exception as e:
print(f"Failed to process story for {username}. Error: {e}")
#################### stories ####################
#################### posts ####################
print("Checking: Posts")
medias = client.user_medias(user_id, 36) # API request
actionsTaken += 1
posts = []
for post in medias:
if post.media_type == 8:
for item in post.resources:
posts.append(item)
continue
posts.append(post)
newPostsCount = 0
for post in posts:
mediaInfo = parse_media_data(post)
if mediaInfo["media_id"] in existing_files:
continue
newPostsCount += 1
mediaInfo["user_id"] = user_id
mediaInfo["username"] = username
mediaInfo["post_type"] = "post"
if mediaInfo["fileURL"] and mediaInfo["filename"]:
filePath = os.path.join(
"media", "posts", username, mediaInfo["filename"]
)
mediaInfo["media_url"] = (
f"https://storysave.b-cdn.net/{filePath}"
)
download_file(mediaInfo["fileURL"], filePath)
process_media(mediaInfo, filePath)
upload_to_storage(filePath, filePath)
add_media_to_db(mediaInfo)
os.remove(filePath)
existing_files.append(mediaInfo["media_id"])
#################### posts ####################
print(f"New stories: {newStoryCount}\tNew Posts: {newPostsCount}")
print(f"Actions taken: {actionsTaken}")
print("=====================================")
break
except Exception as e:
if "login_required" in str(e):
print("Please log in to your account again.")
client = login(force=True)
elif "Please wait a few minutes before you try again." in str(e):
print("Rate limited. Waiting for 5 minutes...")
client = login(force=True)
else:
print("An unexpected error occurred:", e)
break
# TO DO
# ADD DATE TO POSTS / STORIES
# FETCH ONLY THE NEW STORIES
# MINIMIZE DATABASE CONNECTIONS

@ -0,0 +1,32 @@
import requests
url = 'https://www.save-free.com/process'
data = {
'instagram_url': 'natahalieeee',
'type': 'profile',
'resource': 'save'
}
zoom_data = {
'instagram_url': 'natahalieeee',
'type': 'profile',
'resource': 'zoom'
}
headers = {
'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36',
'Referer' : 'https://www.save-free.com/profile-downloader/',
}
proxies={
"http": "http://yehyuxsl-rotate:4tl5bvrwkz5e@p.webshare.io:80/",
"https": "http://yehyuxsl-rotate:4tl5bvrwkz5e@p.webshare.io:80/"
}
response = requests.post(url, data=data, headers=headers)
response = requests.post(url, data=zoom_data, headers=headers)
with open('image.jpg', 'wb') as f:
f.write(response.content)

@ -0,0 +1,37 @@
import requests
from bs4 import BeautifulSoup
import cloudscraper
from zenrows import ZenRowsClient
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36"}
def get_tiktok_video(url):
client = ZenRowsClient("39cf41d4c1ffcb944fca23a95fee8a2722bf4f28")
data = client.get(url, headers=headers)
soup = BeautifulSoup(data.text, 'html.parser')
video_url = soup.find('div', class_='video_html5').find('video').get('src')
return video_url
def get_user_videos(username):
url = f'https://urlebird.com/user/{username}/'
client = ZenRowsClient("39cf41d4c1ffcb944fca23a95fee8a2722bf4f28")
data = client.get(url)
soup = BeautifulSoup(data.text, 'html.parser')
video_urls = []
foundVideos = soup.find_all('div', class_='thumb')
for video in foundVideos:
videoURL = video.find_all('a')[-1].get('href')
video_urls.append(videoURL)
return video_urls
get_tiktok_video('https://urlebird.com/video/7295074788165373190/')
videos = get_user_videos('liliashaked')
for video in videos:
print(get_tiktok_video(video))

@ -0,0 +1,2 @@
https://www.redgifs.com/watch/terrificexhaustedgannet#rel=tag%3Anaomi-soraya%2Ca;order=trending
https://www.sex.com/pins

@ -0,0 +1,6 @@
https://www.instagram.com/neomi_hanukayev/
https://www.instagram.com/osher_yakir/
https://www.instagram.com/m1ry2m_/
https://www.instagram.com/4m1t_f1shpot/
https://www.instagram.com/yarden.bengigi/
https://www.instagram.com/a.roniiiiii/
Loading…
Cancel
Save