Scripts That ChatGPT Made Me

This thread will be a dumping ground for the nifty scripts ChatGPT made me, so I don’t lose them, also to share them with anyone that might want to do the same or a similar task.

This script will list all the droplets in Digital Ocean, along with their tags. It then takes the tag and passes it to a cURL command that uses the Tailscale API to remove that device from Tailscale. In the last step, all the droplets in Digital Ocean are deleted.

import requests
import subprocess

# Your DigitalOcean API token
DO_API_TOKEN = "YOUR_DIGITALOCEAN_API_TOKEN"
DO_BASE_URL = "https://api.digitalocean.com/v2"

# Your Tailscale API key
TAILSCALE_API_KEY = "tskey-api-xxxxx"
TAILSCALE_BASE_URL = "https://api.tailscale.com/api/v2/device/"

headers_do = {
    "Authorization": f"Bearer {DO_API_TOKEN}",
    "Content-Type": "application/json"
}

def get_all_droplets():
    """Retrieve all droplets."""
    response = requests.get(f"{DO_BASE_URL}/droplets", headers=headers_do)
    
    if response.status_code != 200:
        print("Error fetching droplets:", response.json())
        return []

    return response.json().get('droplets', [])

def print_droplet_info_and_delete(droplets):
    """Print names and tags of the droplets, execute cURL commands, and then delete the droplets."""
    for droplet in droplets:
        name = droplet.get('name', 'N/A')
        tags = droplet.get('tags', [])
        
        print(f"Name: {name} | Tags: {', '.join(tags)}")
        
        for tag in tags:
            cmd = f"curl -X DELETE '{TAILSCALE_BASE_URL}{tag}' -u '{TAILSCALE_API_KEY}:'"
            print(f"Next command to be executed: {cmd}")
            input("Press Enter to execute the above command...")
            execute_curl_command(cmd)
    
    input("Press Enter to start deleting droplets...")
    for droplet in droplets:
        delete_droplet(droplet['id'])

def execute_curl_command(cmd):
    """Execute the given cURL command."""
    try:
        result = subprocess.run(cmd, shell=True, check=True, capture_output=True, text=True)
        print(result.stdout)
    except subprocess.CalledProcessError as e:
        print(f"Error executing cURL. Response: {e.output}")

def delete_droplet(droplet_id):
    """Delete a droplet using its ID."""
    response = requests.delete(f"{DO_BASE_URL}/droplets/{droplet_id}", headers=headers_do)
    if response.status_code == 204:  # 204 No Content indicates success without returning data
        print(f"Droplet with ID {droplet_id} deleted successfully.")
    else:
        print(f"Failed to delete droplet with ID {droplet_id}. Error: {response.json()}")

if __name__ == "__main__":
    droplets = get_all_droplets()
    print_droplet_info_and_delete(droplets)

This script creates a bunch of Digital Ocean droplets. It’ll make up to 10 of the cheapest droplets, 1 per region.

It will also add the machine to Tailscale and place a tag on the droplet on Digital Ocean the Tailscale device ID so when it’s time to delete the droplet, we can use that tag to also remove it from Tailscale.

import requests
import time

TOKEN = 'xxxx'
HEADERS = {
    'Content-Type': 'application/json',
    'Authorization': f'Bearer {TOKEN}',
}

USER_DATA_SCRIPT = r"""#!/bin/bash

# Define TAILSCALE_AUTH_KEY and DIGITALOCEAN_TOKEN at the beginning of the script
TAILSCALE_AUTH_KEY="xxxx"
DIGITALOCEAN_TOKEN="xxxx"

# Update the system and install jq
apt-get update
apt-get install -y jq

# Install tailscale
curl -fsSL https://tailscale.com/install.sh | sh

# Start tailscale with the provided auth key
tailscale up --ssh --authkey "${TAILSCALE_AUTH_KEY}"

# Wait 5 seconds
sleep 5

# Get the ID of the Self object from tailscale status
SELF_ID=$(tailscale status --json | jq -r '.Self.ID')

# Check if we successfully retrieved the ID
if [ -z "$SELF_ID" ]; then
    echo "Error: Failed to retrieve the ID from tailscale status."
    exit 1
fi

echo "Retrieved ID: $SELF_ID"

# Create a new tag on DigitalOcean using the API
curl -X POST "https://api.digitalocean.com/v2/tags" \
     -H "Content-Type: application/json" \
     -H "Authorization: Bearer ${DIGITALOCEAN_TOKEN}" \
     -d "{\"name\":\"${SELF_ID}\"}"

# Assuming you want to tag the droplet this script runs on, you can obtain its droplet ID from the metadata service
DROPLET_ID=$(curl -s http://169.254.169.254/metadata/v1/id)

# Add the droplet to the tag
curl -X POST "https://api.digitalocean.com/v2/tags/${SELF_ID}/resources" \
     -H "Content-Type: application/json" \
     -H "Authorization: Bearer ${DIGITALOCEAN_TOKEN}" \
     -d "{\"resources\":[{\"resource_id\":\"${DROPLET_ID}\",\"resource_type\":\"droplet\"}]}"
"""

def list_regions():
    response = requests.get("https://api.digitalocean.com/v2/regions", headers=HEADERS)
    response.raise_for_status()
    return response.json()['regions']

def list_sizes():
    response = requests.get("https://api.digitalocean.com/v2/sizes", headers=HEADERS)
    response.raise_for_status()
    return response.json()['sizes']

def cheapest_droplet_per_region(regions, sizes):
    cheapest_droplets = {}
    for region in regions:
        available_sizes = [size for size in sizes if region['slug'] in size['regions']]
        if not available_sizes:
            continue
        min_size = min(available_sizes, key=lambda x: x['price_monthly'])
        cheapest_droplets[region['slug']] = min_size['slug']
    return cheapest_droplets

def create_droplet(region, size_slug):
    data = {
        "name": f"cheapest-{region}",
        "region": region,
        "size": size_slug,
        "image": "debian-12-x64",
        "ssh_keys": ["xxxx"],
        "firewalls": ["xxxx"],
        "user_data": USER_DATA_SCRIPT
    }
    response = requests.post("https://api.digitalocean.com/v2/droplets", headers=HEADERS, json=data)
    response.raise_for_status()
    return response.json()['droplet']

def main():
    regions = list_regions()
    sizes = list_sizes()
    
    droplets_to_create = cheapest_droplet_per_region(regions, sizes)
    
    count = 0
    for region, size in droplets_to_create.items():
        if count >= 10:
            break
        print(f"Creating droplet in {region} with size {size}")
        create_droplet(region, size)
        count += 1
        if count < 10:  # Only sleep if it's not the last iteration
            time.sleep(10)

if __name__ == "__main__":
    main()

This is a Windows batch file that goes through a directory of ripped Bluray discs and scans each disc with BDInfoCLI, placing the output in a text file.


@echo off
setlocal

:: Set the path to the BDInfoCLI executable
set "BDInfoPath=C:\Users\antho\Downloads\BDInfoCLI-ng_v0.7.5.5\BDInfo.exe"

:: Set the root folder to check and the output directory
set "RootFolder=Z:\backup"
set "OutputFolder=Z:\reports"

:: Loop through each directory in the root folder
for /D %%G in ("%RootFolder%\*") do (
    echo Processing "%%~nxG" ...
    
    :: Run the BDInfoCLI command with the current directory name
    "%BDInfoPath%" -w "%%G" "%OutputFolder%"
)

:: Pause to allow the user to see the results
pause

This python script goes through a directory of ripped Bluray discs and looks at each disc to find the main feature (usually the largest m2ts file) and then uses ffprobe to find the length of that feature.

Once it has the biggest file and the length, it passes that info on to ffmpeg to take 9 random screenshots of the video and saves them in a directory with the same name as the disc.

import os
import argparse
import subprocess
import random

def find_main_feature(directory):
    """
    Find the main feature m2ts file in a given Blu-ray directory.
    Assumes the main feature is the largest file in the STREAM directory.
    """
    stream_directory = os.path.join(directory, 'BDMV', 'STREAM')
    if not os.path.exists(stream_directory):
        print(f"STREAM directory not found in {directory}.")
        return None

    # List all m2ts files in the STREAM directory
    m2ts_files = [f for f in os.listdir(stream_directory) if f.endswith('.m2ts')]

    # Find the largest m2ts file
    largest_file = None
    largest_size = 0
    for m2ts_file in m2ts_files:
        file_path = os.path.join(stream_directory, m2ts_file)
        file_size = os.path.getsize(file_path)
        if file_size > largest_size:
            largest_size = file_size
            largest_file = file_path

    return largest_file

def capture_screenshots(video_file, output_directory, num_screenshots=9):
    # Get the duration of the video in seconds
    cmd = ["ffprobe", "-v", "error", "-show_entries", "format=duration", "-of",
           "default=noprint_wrappers=1:nokey=1", video_file]
    duration = float(subprocess.check_output(cmd).decode('utf-8').strip())

    # Get random times for screenshots
    random_times = sorted(random.sample(range(int(duration)), num_screenshots))

    for i, screenshot_time in enumerate(random_times, 1):
        output_file = os.path.join(output_directory, f"screenshot_{i}.png")
        cmd = ["ffmpeg", "-ss", str(screenshot_time), "-i", video_file,
               "-vframes", "1", output_file]
        subprocess.call(cmd)

def main():
    parser = argparse.ArgumentParser(description="Find the main feature .m2ts file for each ripped Blu-ray in the specified directory and capture screenshots.")
    parser.add_argument("path", help="Path to the directory containing ripped Blu-rays.")
    args = parser.parse_args()

    root_directory = args.path
    screenshots_root = "/storage/backups/screenshots"

    # Go through each Blu-ray directory
    for directory in os.listdir(root_directory):
        directory_path = os.path.join(root_directory, directory)
        if os.path.isdir(directory_path):
            main_feature = find_main_feature(directory_path)
            if main_feature:
                print(f"Main feature for {directory}: {main_feature}")

                # Create a directory for the disc's screenshots
                disc_screenshot_dir = os.path.join(screenshots_root, directory)
                os.makedirs(disc_screenshot_dir, exist_ok=True)

                # Capture screenshots
                capture_screenshots(main_feature, disc_screenshot_dir)
            else:
                print(f"Could not determine main feature for {directory}.")

if __name__ == "__main__":
    main()

This bash script goes through a directory and removes those annoying “.DS_Store” metadata files from the entire directory structure, then uses the Linux tool mktorrent to make a torrent of each sub-directory.

#!/bin/bash

# Delete all ".DS_Store" files recursively from the current directory down.
find . -name ".DS_Store" -type f -delete
echo "All .DS_Store files have been deleted."

# Define the announce URL and the output directory for torrents
ANNOUNCE_URL="https://tracker.nothere.com/announce"
OUTPUT_DIR="/storage/backups/torrents"

# Ensure the output directory exists, if not, create it
mkdir -p "$OUTPUT_DIR"

# For each first-level directory, create a private torrent file using mktorrent.
for dir in */; do
    # Ensure mktorrent is available
    if command -v mktorrent > /dev/null; then
        mktorrent -p -a "$ANNOUNCE_URL" -o "$OUTPUT_DIR/${dir%/}.torrent" "$dir"
        echo "Created private torrent for $dir with announce URL: $ANNOUNCE_URL. Saved to $OUTPUT_DIR."
    else
        echo "mktorrent command not found. Please install mktorrent."
        exit 1
    fi
done