]> code.delx.au - monosys/commitdiff
Merge package-lists
authorJames Bunton <jamesbunton@delx.au>
Sat, 27 Jan 2024 10:16:02 +0000 (21:16 +1100)
committerJames Bunton <jamesbunton@delx.au>
Sat, 27 Jan 2024 10:16:02 +0000 (21:16 +1100)
55 files changed:
archpkg/aur-build [new file with mode: 0755]
archpkg/aur-buildx [new file with mode: 0755]
archpkg/aur-check-updates [new file with mode: 0755]
archpkg/makechrootpkgx [new file with mode: 0755]
archpkg/repo-ls [new file with mode: 0755]
archpkg/repo-sign [new file with mode: 0755]
bin/aptorphan [new symlink]
bin/check-local-updates [new file with mode: 0755]
bin/csv2txt [new file with mode: 0755]
bin/dates [new file with mode: 0755]
bin/docker-cleanup [new file with mode: 0755]
bin/find-services-to-restart [new file with mode: 0755]
bin/git-cleanup [new file with mode: 0755]
bin/hexhost [new file with mode: 0755]
bin/java-decompile-recursive [new file with mode: 0755]
bin/mfree [new file with mode: 0755]
bin/pacorphan [new file with mode: 0755]
bin/passphrasegen [new file with mode: 0755]
bin/passwdgen [new file with mode: 0755]
bin/update-grub [new file with mode: 0755]
bin/wifi-scan [new file with mode: 0755]
bin/xmlpp [new file with mode: 0755]
hacks/apple-time-machine-symlink.py [new file with mode: 0755]
hacks/backup-mysql [new file with mode: 0755]
hacks/backup-openwrt [new file with mode: 0755]
hacks/bashttpd [new file with mode: 0755]
hacks/bin2ascii [new file with mode: 0755]
hacks/bt-dun-connect [new file with mode: 0755]
hacks/dnsctl [new file with mode: 0755]
hacks/find-in-file [new file with mode: 0755]
hacks/fix-openwrt-hairpin [new file with mode: 0755]
hacks/git-no-husky [new file with mode: 0755]
hacks/gnome-shell-raise-window [new file with mode: 0755]
hacks/lib-ext-backup [new file with mode: 0644]
hacks/magnet [new file with mode: 0755]
hacks/make-persistent-journal [new file with mode: 0755]
hacks/multiboot-setup [new file with mode: 0755]
hacks/opal-card-tool [new file with mode: 0755]
hacks/rename-by-date [new file with mode: 0755]
hacks/rsync-ssh-backup [new file with mode: 0755]
hacks/shaper [new file with mode: 0755]
hacks/smart-stats [new file with mode: 0755]
hacks/split-mvimg [new file with mode: 0755]
hacks/ssh-screen-wrapper [new file with mode: 0755]
hacks/tcp-proxy [new file with mode: 0755]
hacks/terminal-color-table [new file with mode: 0755]
hacks/usb-reset [new file with mode: 0755]
hacks/wordpress-salt-gen [new file with mode: 0755]
healthcheck/disk-usage [new file with mode: 0755]
healthcheck/packages [new file with mode: 0755]
healthcheck/run-all [new file with mode: 0755]
healthcheck/systemd-units [new file with mode: 0755]
healthcheck/systemd-user-timers [new file with mode: 0755]
healthcheck/systemd-user-units [new file with mode: 0755]
healthcheck/zpool-health [new file with mode: 0755]

diff --git a/archpkg/aur-build b/archpkg/aur-build
new file mode 100755 (executable)
index 0000000..bcb3b61
--- /dev/null
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+set -eu
+
+PKGNAME="$1"
+if [ -z "$PKGNAME" ]; then
+    echo "Usage: $0 pkgname"
+    exit 1
+fi
+
+shift
+MAKEPKG_CMD=("$@")
+if [ ${#MAKEPKG_CMD[@]} -eq 0 ]; then
+    MAKEPKG_CMD=("makepkg" "-sr")
+fi
+
+function enter_directory {
+    mkdir -p "$1"
+    cd "$1"
+}
+
+function fetch_latest_changes {
+    if [ ! -d .git ]; then
+        git init
+        git remote add origin "https://aur.archlinux.org/${PKGNAME}"
+    fi
+    git fetch
+    git reset origin/master
+}
+
+function show_diff {
+    git diff -R
+}
+
+function ask_user_to_continue {
+    read -r -p "Ok? (y/n) " ok
+    if [ "$ok" != "y" ]; then
+        return 1
+    fi
+}
+
+function build_and_install {
+    git checkout .
+    "${MAKEPKG_CMD[@]}"
+}
+
+enter_directory "/var/abs/${PKGNAME}"
+fetch_latest_changes
+show_diff
+ask_user_to_continue
+build_and_install
diff --git a/archpkg/aur-buildx b/archpkg/aur-buildx
new file mode 100755 (executable)
index 0000000..090f3f7
--- /dev/null
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+PKGNAME="$1"
+shift
+
+exec aur-build "$PKGNAME" makechrootpkgx "$@"
diff --git a/archpkg/aur-check-updates b/archpkg/aur-check-updates
new file mode 100755 (executable)
index 0000000..c3402da
--- /dev/null
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+set -eu
+
+declare -A pkg_versions
+query_url='https://aur.archlinux.org/rpc/?v=5&type=info'
+
+while read -r pkg installed_version; do
+    pkg_versions[$pkg]="$installed_version"
+    query_url="${query_url}&arg[]=${pkg}"
+done < <(if [ -t 0 ]; then pacman -Qm; else cat; fi)
+
+curl -gsSf "$query_url" | jq -r '.results[] | .Name, .Version, "\u0000"' | while read -r -d $'\0' pkg aur_version; do
+    installed_version="${pkg_versions[$pkg]}"
+    if ! echo -e "${installed_version}\n${aur_version}" | pacsort | tail -n1 | grep -qxF "${installed_version}"; then
+        echo "${pkg} $installed_version -> $aur_version"
+    fi
+done
diff --git a/archpkg/makechrootpkgx b/archpkg/makechrootpkgx
new file mode 100755 (executable)
index 0000000..1f00cd9
--- /dev/null
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+set -xeu
+sudo arch-nspawn /var/cache/pacman/chroot/root pacman -Syu
+makechrootpkg -c -l "$(basename "$PWD")" -T -r /var/cache/pacman/chroot
diff --git a/archpkg/repo-ls b/archpkg/repo-ls
new file mode 100755 (executable)
index 0000000..44f3b2b
--- /dev/null
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+set -eu
+
+tar -Oxf "$1" | \
+    awk 'BEGIN {RS=""} $1 == "%NAME%" {printf "%s ", $2} $1 == "%VERSION%" {print $2}'
diff --git a/archpkg/repo-sign b/archpkg/repo-sign
new file mode 100755 (executable)
index 0000000..e555c43
--- /dev/null
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+set -eu
+
+cd /var/cache/pacman/abs
+
+tosign=()
+for pkg in *.pkg.*; do
+    if [[ "$pkg" =~ .*\.sig ]]; then
+        continue
+    fi
+    if ! [ -f "${pkg}.sig" ]; then
+        tosign+=("$pkg")
+    fi
+done
+
+if [ "${#tosign[@]}" -gt 0 ]; then
+    echo "Signing:"
+    echo "${tosign[@]}" | xargs -n1 echo "  "
+    echo
+    for i in $(seq 5 -1 1); do
+        echo -n "$i "
+        sleep 1
+    done
+    echo
+    set -x
+    # Preload the agent
+    gpg --output /dev/null --detach-sign /dev/null
+    echo "${tosign[@]}" | xargs -n1 gpg --detach-sign
+    echo "${tosign[@]}" | xargs -n1 repo-add delx.db.tar.xz -R
+else
+    echo "Nothing to do"
+fi
diff --git a/bin/aptorphan b/bin/aptorphan
new file mode 120000 (symlink)
index 0000000..2384a1e
--- /dev/null
@@ -0,0 +1 @@
+pacorphan
\ No newline at end of file
diff --git a/bin/check-local-updates b/bin/check-local-updates
new file mode 100755 (executable)
index 0000000..806c58f
--- /dev/null
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+set -eu
+
+. /etc/os-release
+
+function is_debian {
+    [ "$ID" = debian ] || [ "${ID_LIKE:-}" = debian ]
+}
+
+function is_arch {
+    [ "$ID" = arch ]
+}
+
+if is_debian; then
+    aptitude search ~U || true
+fi
+
+if is_arch; then
+    checkupdates || true
+fi
diff --git a/bin/csv2txt b/bin/csv2txt
new file mode 100755 (executable)
index 0000000..dfbf3a9
--- /dev/null
@@ -0,0 +1,9 @@
+#!/usr/bin/env python3
+
+import csv
+import sys
+
+rows = list(csv.reader(sys.stdin))
+column_widths = list(max((len(str(cell))) for cell in column) for column in zip(*rows))
+for row in rows:
+    print("".join(str(cell).ljust(width+1) for cell, width in zip(row, column_widths)))
diff --git a/bin/dates b/bin/dates
new file mode 100755 (executable)
index 0000000..f4708d3
--- /dev/null
+++ b/bin/dates
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+list="
+America/Los_Angeles
+America/Chicago
+America/New_York
+Europe/London
+UTC
+Asia/Kolkata
+Asia/Ho_Chi_Minh
+Australia/Perth
+Australia/Brisbane
+Australia/Sydney
+"
+
+for tz in $list; do
+    printf "%-25s" "$tz"
+    TZ="$tz" date
+    echo
+done
+
diff --git a/bin/docker-cleanup b/bin/docker-cleanup
new file mode 100755 (executable)
index 0000000..b7c3c63
--- /dev/null
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+set -x
+
+docker container prune -f
+
+docker volume prune -f
+
+docker images --no-trunc --format '{{.ID}} {{.CreatedSince}}' \
+    | awk '/ months/ || / years/ { print $1 }' \
+    | xargs --no-run-if-empty docker rmi -f
+
+docker image prune -f
diff --git a/bin/find-services-to-restart b/bin/find-services-to-restart
new file mode 100755 (executable)
index 0000000..ce497f8
--- /dev/null
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+function get_pids_to_restart {
+    sudo lsof +c 0 / | \
+        awk '/DEL|(deleted)/ { print $2 }' | \
+        sort -u
+}
+
+function find_service_for_pid {
+    systemctl status "$1" | \
+        awk '$2 ~ /\.service$/ && NR == 1 { print $2 }'
+}
+
+function is_cron_child {
+    if [ "$1" != "cronie.service" ]; then
+        return 1
+    fi
+    if systemctl show cronie -p MainPID | grep -q "$2"; then
+        return 1
+    fi
+    return 0
+}
+
+function echo_kill_pid {
+    echo "sudo kill $1 # $(ps -p"$1" -o user=,cmd=)"
+}
+
+function echo_restart_service {
+    echo "sudo systemctl restart $1"
+}
+
+for pid in $(get_pids_to_restart); do
+    if [ "$pid" = 1 ]; then
+        echo "sudo systemctl daemon-reexec"
+        exit 0
+    fi
+
+    service="$(find_service_for_pid "$pid")"
+    if is_cron_child "$service" "$pid"; then
+        echo_kill_pid "$pid"
+
+    elif [ -n "$service" ]; then
+        echo_restart_service "$service"
+
+    else
+        echo_kill_pid "$pid"
+    fi
+
+done | sort -u
+
diff --git a/bin/git-cleanup b/bin/git-cleanup
new file mode 100755 (executable)
index 0000000..2337a71
--- /dev/null
@@ -0,0 +1,75 @@
+#!/bin/bash
+
+set -eu
+
+function usage {
+    echo "Usage: $0 [--remote origin] [--age ndays]"
+    echo
+    echo "This tool will not change your repository, the output is a list of git push commands you can use to delete old branches."
+    echo
+    echo "Note that the arguments must be passed in the order listed above."
+    exit 1
+}
+
+if [ "${1:-}" = "--help" ]; then
+    usage
+fi
+
+remote="origin"
+if [ "${1:-}" = "--remote" ]; then
+    remote="$2"
+    shift
+    shift
+fi
+
+age_days=30
+if [ "${1:-}" = "--age" ]; then
+    age_days="$2"
+    shift
+    shift
+fi
+age_seconds=$((age_days*24*3600))
+
+if [ -n "${1:-}" ]; then
+    usage
+fi
+
+
+echo "## Fetching latest changes from $remote"
+git fetch -p "${remote}"
+
+
+echo "## Constructing list of revisions in master and tags"
+safe_revs_file="$(mktemp -t gitcleanup.XXXXXX)"
+git rev-list origin/master --tags > "$safe_revs_file"
+
+
+echo "## Checking for branches to delete"
+now="$(date +%s)"
+git ls-remote --heads "$remote" | while read line; do
+    set $line
+    rev="$1"
+    branch="$2"
+    timestamp="$(git rev-list --format=format:'%ct' --max-count=1 "$rev"|tail -n1)"
+    age=$((now-timestamp))
+
+    if [ "$branch" = "refs/heads/master" ]; then
+        continue;
+    fi
+
+    if grep -q "$rev" "$safe_revs_file"; then
+        echo git push "$remote" ":$branch" "# remove merged into master or tag"
+        continue
+    fi
+
+    if [ "$age" -gt "$age_seconds" ]; then
+        branch_name="${branch##refs/heads/}"
+        echo git tag "archived/$branch_name" "$rev" "# create tag for older than $age_days days branch"
+        echo git push "$remote" tag "archived/$branch_name" "# push tag for older than $age_days days branch"
+        echo git push "$remote" ":$branch" "# remove older than $age_days days"
+        continue
+    fi
+done
+
+rm -f "$safe_revs_file"
+
diff --git a/bin/hexhost b/bin/hexhost
new file mode 100755 (executable)
index 0000000..ef6240f
--- /dev/null
@@ -0,0 +1,73 @@
+#!/usr/bin/env node
+
+'use strict';
+
+const BLOCKSIZE = 3;
+const CHARS1 = [
+    ' ',
+    ...'0123456789'.split(''),
+    ...'abcdefghijklmnopqrstuvwxyz'.split(''),
+    '_',
+    '-',
+];
+const SHIFT = BigInt(Math.ceil(Math.log2(CHARS1.length ** BLOCKSIZE)));
+const MASK = 2n**SHIFT - 1n;
+
+const CHARSN = [...Array(BLOCKSIZE - 1)].reduce((acc) => acc.map((v1) => CHARS1.map((v2) => ''+v1+v2)).flat(), CHARS1);
+const FMAP = new Map(CHARSN.map((v, i) => [''+v, BigInt(i)]));
+const RMAP = new Map(CHARSN.map((v, i) => [BigInt(i), ''+v]));
+
+function main(arg1, arg2) {
+    if (!arg1) {
+        console.error('Usage: hexhost fdxx::4a59954e');
+        console.error('Usage: hexhost fdxx:: hostname');
+        process.exit(1);
+    }
+
+    if (arg2) {
+        const prefix = arg1;
+        const suffix = encode(arg2).replaceAll(/(.{4})/g, '$1:').replace(/:$/, '');
+        console.log(prefix + suffix);
+    } else {
+        const [, suffix] = arg1.split(/::|:0:/);
+        console.log(decode(suffix));
+    }
+}
+
+function decode(input) {
+    input = input && input.replaceAll(':', '');
+    if (!input) {
+        throw new Error('No suffix found');
+    }
+    input = BigInt('0x' + input);
+    let output = [];
+    while (input > 0) {
+        const encodedBlock = input & MASK;
+        input >>= SHIFT;
+        const block = RMAP.get(encodedBlock);
+        if (block !== undefined) {
+            output.push(block);
+        }
+    }
+    return output.reverse().join('').trim();
+}
+
+function encode(input) {
+    if (input.length / BLOCKSIZE > (64n / SHIFT)) {
+        throw new Error('Input is too long to fit in a /64!');
+    }
+
+    input = input.toLowerCase();
+
+    let out = BigInt(0);
+    for (let i = 0; i < input.length; i += BLOCKSIZE) {
+        const block = input.substring(i, i + BLOCKSIZE).padEnd(BLOCKSIZE);
+        const encodedBlock = FMAP.get(block);
+        if (encodedBlock !== undefined) {
+            out = (out << SHIFT) + encodedBlock;
+        }
+    }
+    return out.toString(16);
+}
+
+main(process.argv[2], process.argv[3]);
diff --git a/bin/java-decompile-recursive b/bin/java-decompile-recursive
new file mode 100755 (executable)
index 0000000..afc6220
--- /dev/null
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+if [ -z "$1" -o -z "$2" ]; then
+    echo "Usage: $0 src dest"
+    exit 1
+fi
+
+src="$1"
+dest="$2"
+mkdir -p "$dest"
+dest="$(cd "$dest" && pwd)"
+
+cd "$src"
+find . -name '*.class' | while read line; do
+    class="$(echo "$line"| sed -e 's|^\./||' -e 's|\.class$||' -e 's|/|.|g')"
+    javap -private -c "$class" > "${dest}/${class}.txt"
+done
+
diff --git a/bin/mfree b/bin/mfree
new file mode 100755 (executable)
index 0000000..a2f7508
--- /dev/null
+++ b/bin/mfree
@@ -0,0 +1,86 @@
+#!/usr/bin/env python3
+
+import sys
+
+def read_meminfo():
+    f = open("/proc/meminfo")
+    result = {}
+    for line in f:
+        key, value, *_ = line.split()
+        key = key.strip(":")
+        result[key] = int(value) * 1024
+    return result
+
+def read_zfs_arcstats():
+    try:
+        f = open("/proc/spl/kstat/zfs/arcstats")
+    except FileNotFoundError:
+        return {}
+
+    # skip first two lines
+    f.readline()
+    f.readline()
+
+    result = {}
+    for line in f:
+        key, _, value = line.split()
+        result[key] = int(value)
+    return result
+
+def print_template(first, *rest):
+    sys.stdout.write(first.ljust(7))
+    for x in rest:
+        sys.stdout.write(x.rjust(12))
+    sys.stdout.write("\n")
+
+def main():
+    meminfo = read_meminfo()
+    arcstats = read_zfs_arcstats()
+
+    mem_total = meminfo["MemTotal"]
+    mem_free = meminfo["MemFree"]
+    mem_cached = meminfo["Cached"] + meminfo["SReclaimable"]
+    mem_buffers = meminfo["Buffers"]
+    mem_shared = meminfo["Shmem"]
+    mem_available = meminfo["MemAvailable"]
+
+    swap_total = meminfo["SwapTotal"]
+    swap_free = meminfo["SwapFree"]
+
+    arc_total = arcstats.get("c_max", 0)
+    arc_used = arcstats.get("size", 0)
+
+    calc_used = mem_total - mem_free - mem_cached - mem_buffers - arc_used
+    calc_cache = mem_buffers + mem_cached + arc_used
+    calc_available = mem_available + arc_used
+
+    def fmt(x):
+        return str(round(x/1024/1024))
+
+    print_template("", "total", "used", "free", "shared", "buff/cache", "available")
+
+    print_template(
+        "Mem:",
+        fmt(mem_total),
+        fmt(calc_used),
+        fmt(mem_free),
+        fmt(mem_shared),
+        fmt(calc_cache),
+        fmt(calc_available),
+    )
+    print_template(
+        "Swap:",
+        fmt(swap_total),
+        fmt(swap_total - swap_free),
+        fmt(swap_free),
+    )
+    if arcstats:
+        print_template(
+            "ZFS:",
+            fmt(arc_total),
+            fmt(arc_used),
+        )
+
+if __name__ == "__main__":
+    main()
+
diff --git a/bin/pacorphan b/bin/pacorphan
new file mode 100755 (executable)
index 0000000..e34e6da
--- /dev/null
@@ -0,0 +1,170 @@
+#!/usr/bin/env python3
+
+import codecs
+import subprocess
+import os
+import sys
+
+def main():
+    name = os.path.basename(sys.argv[0])
+    if name == "pacorphan":
+        pacorphan_main()
+    elif name == "aptorphan":
+        aptorphan_main()
+    else:
+        print("This script must be named pacorphan or aptorphan!", file=sys.stderr)
+        sys.exit(1)
+
+def pacorphan_main():
+    keep_pkg_list = load_keep_pkg_list("pacorphan")
+    unneeded_pkg_list = list(run(["pacman", "-Qttq"]))
+    installed_pkg_list = list(run(["pacman", "-Qq"]))
+    explicit_pkg_list = list(run(["pacman", "-Qeq"]))
+    mark_explicit_list = []
+    need_install_list = []
+
+    for pkg in keep_pkg_list:
+        if pkg in unneeded_pkg_list:
+            unneeded_pkg_list.remove(pkg)
+
+        if pkg in explicit_pkg_list:
+            explicit_pkg_list.remove(pkg)
+        else:
+            if pkg in installed_pkg_list:
+                mark_explicit_list.append(pkg)
+            else:
+                need_install_list.append(pkg)
+
+    if unneeded_pkg_list:
+        print("# Found packages to remove")
+        print("sudo pacman -R " + " ".join(unneeded_pkg_list))
+        print()
+
+    if explicit_pkg_list:
+        print("# Found explicitly installed packages to keep or remove")
+        print("echo " + " ".join(explicit_pkg_list) + " | tr ' ' '\\n' >> ~/.pacorphan/keep")
+        print("sudo pacman -D --asdeps " + " ".join(explicit_pkg_list))
+        print()
+
+    if mark_explicit_list:
+        print("# Found packages which should be marked as explicitly installed")
+        print("sudo pacman -D --asexplicit " + " ".join(mark_explicit_list))
+        print()
+
+    if need_install_list:
+        print("# Found packages which should be installed")
+        print("sudo pacman -S " + " ".join(need_install_list))
+        print()
+
+def aptorphan_main():
+    ensure_apt_config_is_sane()
+
+    keep_pkg_list = load_keep_pkg_list("aptorphan")
+    mark_explicit_list = []
+    need_install_list = []
+    installed_pkg_list = list(run(["aptitude", "search", "?or(~i!~aremove,~ainstall)", "-F", "%p"]))
+    explicit_pkg_list = list(run(["aptitude", "search", "?or(~i!~M!~aremove,~ainstall!~M)", "-F", "%p"]))
+
+    for pkg in keep_pkg_list:
+        if pkg in explicit_pkg_list:
+            explicit_pkg_list.remove(pkg)
+        else:
+            if pkg in installed_pkg_list:
+                mark_explicit_list.append(pkg)
+            else:
+                need_install_list.append(pkg)
+
+
+    if mark_explicit_list:
+        print("# Found packages which should be marked as explicitly installed")
+        print("sudo aptitude --schedule-only install " + " ".join([("'"+x+"&m'") for x in mark_explicit_list]))
+        print()
+
+    if need_install_list:
+        print("# Found packages which should be installed")
+        print("sudo aptitude --schedule-only install " + " ".join(need_install_list))
+        print()
+
+    if explicit_pkg_list:
+        print("# Found explicitly installed packages to keep or remove")
+        print("echo " + " ".join(explicit_pkg_list) + " | tr ' ' '\\n' >> ~/.aptorphan/keep")
+        print("sudo aptitude --schedule-only install " + " ".join([(x+"+M") for x in explicit_pkg_list]))
+        print()
+
+def ensure_apt_config_is_sane():
+    required_config = """
+APT::Install-Recommends "false";
+APT::Install-Suggests "false";
+APT::AutoRemove::RecommendsImportant "false";
+APT::AutoRemove::SuggestsImportant "false";
+""".strip().split("\n")
+
+    actual_config = run(["apt-config", "dump"])
+
+    missing_lines = []
+    for required_line in required_config:
+        for line in actual_config:
+            if line == required_line:
+                break
+        else:
+            missing_lines.append(required_line)
+    if missing_lines:
+        print("Missing apt-config, add these lines to /etc/apt/apt.conf.d/99recommends-disable")
+        print("\n".join(missing_lines))
+        sys.exit(1)
+
+
+def load_keep_pkg_list(name):
+    config_path = find_config_path(name)
+    if not os.path.isdir(config_path):
+        print("# WARNING, you should create a directory at %s" % config_path)
+        return []
+
+    result = []
+
+    for filename in sorted(os.listdir(config_path)):
+        if filename.startswith("."):
+            continue
+        full_filename = os.path.join(config_path, filename)
+        for pkg in codecs.open(full_filename, "r", "utf-8"):
+            pkg = strip_comment(pkg).strip()
+            if not pkg:
+                continue
+            if filename[0] != "~" and pkg[0] != "~":
+                if pkg in result:
+                    print("# Duplicate entry:", pkg, "in file", filename)
+                    continue
+                result.append(pkg)
+            else:
+                pkg = pkg.strip("~")
+                if pkg not in result:
+                    print("# Redundant removal:", pkg, "in file", filename)
+                    continue
+                result.remove(pkg)
+
+    return result
+
+def find_config_path(name):
+    homedir_path = os.path.expanduser("~/.%s" % name)
+    if os.path.isdir(homedir_path):
+        return homedir_path
+
+    if "XDG_CONFIG_HOME" not in os.environ:
+        return os.path.expanduser("~/.config/%s" % name)
+    else:
+        return os.path.join(os.environ["XDG_CONFIG_HOME"], name)
+
+def strip_comment(line):
+    pos = line.find("#")
+    if pos >= 0:
+        line = line[:pos]
+    return line.strip()
+
+def run(cmd):
+    for line in subprocess.check_output(cmd).decode("utf-8").split("\n"):
+        line = line.strip()
+        if line:
+            yield line
+
+if __name__ == "__main__":
+    main()
diff --git a/bin/passphrasegen b/bin/passphrasegen
new file mode 100755 (executable)
index 0000000..0469ce3
--- /dev/null
@@ -0,0 +1,24 @@
+#!/usr/bin/env python3
+
+import math
+import random
+import os
+
+words_filename = os.environ.get("WORDS", "/usr/share/dict/words")
+num_words = 5
+max_word_len = 8
+min_word_len = 3
+
+words = open(words_filename)
+words = [word.strip().lower() for word in words]
+words = [word for word in words if word.isalpha() and min_word_len <= len(word) <= max_word_len]
+random_words = [random.choice(words) for _ in range(num_words)]
+entropy_words = num_words * math.log(len(words)) / math.log(2)
+
+max_number = 100
+min_number = 10
+random_number = random.randint(min_number, max_number)
+entropy_number = math.log(max_number - min_number) / math.log(2)
+
+print("Entropy:", math.floor(entropy_words + entropy_number))
+print("Passphrase:", " ".join(random_words + [str(random_number)]))
diff --git a/bin/passwdgen b/bin/passwdgen
new file mode 100755 (executable)
index 0000000..668771e
--- /dev/null
@@ -0,0 +1,33 @@
+#!/usr/bin/env python3
+
+import random, sys
+
+alnum_chars = list(filter(lambda c: c.isalnum(), map(chr, range(128))))
+full_chars = alnum_chars * 2 + list("!@#%^&*(){}[]/=?+_-;:,.<>")
+
+def generate(chars, length):
+    return "".join([random.choice(chars) for i in range(length)])
+
+def print_usage():
+    print("Usage: %s [length] [alnum|full]" % sys.argv[0])
+    sys.exit(1)
+
+if __name__ == "__main__":
+    if len(sys.argv) <= 1:
+        n = 20
+    elif sys.argv[1].isdigit():
+        n = int(sys.argv[1])
+    else:
+        print_usage()
+
+    if len(sys.argv) <= 2:
+        chars = alnum_chars
+    elif sys.argv[2] == "alnum":
+        chars = alnum_chars
+    elif sys.argv[2] == "full":
+        chars = full_chars
+    else:
+        print_usage()
+
+    print(generate(chars, n))
+
diff --git a/bin/update-grub b/bin/update-grub
new file mode 100755 (executable)
index 0000000..54363ab
--- /dev/null
@@ -0,0 +1,4 @@
+#!/bin/sh
+set -e
+export ZPOOL_VDEV_NAME_PATH=1
+exec grub-mkconfig -o /boot/grub/grub.cfg "$@"
diff --git a/bin/wifi-scan b/bin/wifi-scan
new file mode 100755 (executable)
index 0000000..12c3240
--- /dev/null
@@ -0,0 +1,116 @@
+#!/usr/bin/env node
+'use strict'
+
+const {exec} = require('child_process');
+const fs = require('fs').promises;
+
+function execAsync(command, opts) {
+    return new Promise((resolve, reject) => {
+        exec(command, opts, (error, stdout, stderr) => {
+            if (error) {
+                reject(error);
+            } else {
+                resolve({stdout, stderr});
+            }
+        });
+    });
+}
+
+function sleep(n) {
+    return new Promise((resolve) => setTimeout(resolve, n));
+}
+
+async function findInterface() {
+    const {stdout} = await execAsync('iw dev');
+    const lines = stdout.split('\n')
+        .map((line) => line.trim())
+        .filter((line) => line.startsWith('Interface '))
+        .map((line) => line.split(' ')[1]);
+    return lines[0];
+}
+
+async function scanInterface(iface) {
+    const {stdout} = await execAsync(`sudo iw dev ${iface} scan`);
+    return stdout;
+}
+
+function formatScanResult(scanResult) {
+    const results = [];
+    let partial = null;
+
+    for (let line of scanResult.split('\n')) {
+        if (line.startsWith('BSS ')) {
+            finishPartial();
+            partial = {};
+            partial.bssid = line.match(/[a-z0-9:]+/)[0];
+            partial.associated = line.indexOf('associated') >= 0 ? '**' : '';
+        }
+
+        line = line.trim()
+        if (line.startsWith('SSID: ')) {
+            partial.ssid = line.split(':')[1].trim();
+        }
+        if (line.startsWith('signal: ')) {
+            partial.signal = line.split(':')[1].trim();
+        }
+        if (line.startsWith('DS Parameter set: channel')) {
+            partial.channel = line.split(':')[1].trim();
+        }
+        if (line.startsWith('* primary channel:')) {
+            partial.channel = 'channel ' + line.split(':')[1].trim();
+        }
+        if (line.startsWith('freq: ')) {
+            partial.freq = 'freq ' + line.split(':')[1].trim();
+        }
+    }
+
+    finishPartial();
+
+    function finishPartial() {
+        if (!partial) {
+            return;
+        }
+
+        partial.ssid = partial.ssid || '';
+        partial.channel = partial.channel || partial.freq || '';
+
+        const sortKey = [
+            parseFloat(partial.signal),
+            parseInt(partial.channel.split(' ')[1])
+        ];
+
+        results.push([sortKey, partial]);
+    }
+
+    return results
+        .sort()
+        .map(([, {bssid, ssid, signal, channel, associated}]) => {
+            ssid = ssid.padStart(40, ' ').substr(0, 40);
+            channel = channel.padEnd(12, ' ');
+            return `${signal}  ${channel}  ${ssid}  ${bssid}  ${associated}`;
+        })
+        .join('\n') + '\n';
+}
+
+async function main() {
+    const iface = process.argv[2] || await findInterface();
+
+    if (iface === '-') {
+        const scanResult = await fs.readFile('/dev/stdin', 'utf-8');
+        const prettyScanResult = formatScanResult(scanResult);
+        process.stdout.write(prettyScanResult);
+    } else {
+        for (;;) {
+            const scanResult = await scanInterface(iface).catch((err) => err.toString());
+            const prettyScanResult = formatScanResult(scanResult);
+            process.stdout.write('\x1b[2J\x1b[0f');
+            process.stdout.write(prettyScanResult);
+            await sleep(3000);
+        }
+    }
+}
+
+main().catch((err) => {
+    console.log('Unhandled error!', err);
+    process.exit(1);
+});
diff --git a/bin/xmlpp b/bin/xmlpp
new file mode 100755 (executable)
index 0000000..60472c9
--- /dev/null
+++ b/bin/xmlpp
@@ -0,0 +1,8 @@
+#!/usr/bin/env python3
+
+import xml.dom.minidom
+import sys
+
+doc = xml.dom.minidom.parse(sys.stdin)
+doc.writexml(sys.stdout, addindent="    ", newl="\n")
+
diff --git a/hacks/apple-time-machine-symlink.py b/hacks/apple-time-machine-symlink.py
new file mode 100755 (executable)
index 0000000..1a8b92d
--- /dev/null
@@ -0,0 +1,63 @@
+#!/usr/bin/env python2
+
+# This tool tries to parse the weird hardlink format Apple uses for Time Machine
+# The goal is to recover data from a Time Machine backup without a Mac
+
+import math
+import stat
+import os
+import sys
+
+def find_lookup_dir(path):
+    while path != "/":
+        lookup_dir = os.path.join(path, ".HFS+ Private Directory Data\r")
+        if os.path.isdir(lookup_dir):
+            return lookup_dir
+        path = os.path.split(path)[0]
+    raise Exception("Could not find HFS+ link dir")
+
+def resolve_path(lookup_dir, path):
+    st = os.lstat(path)
+    if stat.S_ISREG(st.st_mode) and st.st_size == 0 and st.st_nlink > 1000:
+        return os.path.join(lookup_dir, "dir_%d" % st.st_nlink)
+    else:
+        return path
+
+
+def process_directory(lookup_dir, dest, path):
+    if os.path.islink(dest):
+        os.unlink(dest)
+    if not os.path.isdir(dest):
+        os.mkdir(dest)
+    path = resolve_path(lookup_dir, path)
+
+    for filename in os.listdir(path):
+        full_filename = os.path.join(path, filename)
+        full_filename = resolve_path(lookup_dir, full_filename)
+        dest_filename = os.path.join(dest, filename)
+
+        if os.path.isdir(full_filename):
+            process_directory(lookup_dir, dest_filename, full_filename)
+        else:
+            if os.path.islink(dest_filename):
+                os.unlink(dest_filename)
+            if not os.path.isdir(dest_filename):
+                os.symlink(full_filename, dest_filename)
+
+def main(dest, path):
+    lookup_dir = find_lookup_dir(path)
+    process_directory(lookup_dir, dest, path)
+
+def print_usage_exit():
+    print >>sys.stderr, "Usage: %s dest path" % sys.argv[0]
+    sys.exit(1)
+
+if __name__ == "__main__":
+    if len(sys.argv) != 3:
+        print_usage_exit()
+
+    dest = sys.argv[1]
+    path = sys.argv[2]
+
+    main(dest, path)
+
diff --git a/hacks/backup-mysql b/hacks/backup-mysql
new file mode 100755 (executable)
index 0000000..2747ca1
--- /dev/null
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+set -eu
+
+cd ~/backup-mysql/
+
+DATABASES="$(echo 'show databases' | mysql | grep -vE '^(Database|mysql|information_schema|performance_schema)$')"
+for db in ${DATABASES}; do
+    mysqldump --add-drop-table --single-transaction "${db}" | \
+        gzip -9 > "${db}-$(date '+%Y%m%d').sql.gz"
+
+    rm -f $(ls -1 "${db}-"* | sort -r | tail -n +10)
+done
diff --git a/hacks/backup-openwrt b/hacks/backup-openwrt
new file mode 100755 (executable)
index 0000000..59b8640
--- /dev/null
@@ -0,0 +1,11 @@
+#!/bin/bash
+
+BACKUP_HOSTS="$(awk '/^Host.*openwrt/ {print $2}' < ~/.ssh/config)"
+
+cd ~/backup-openwrt/
+
+for host in $BACKUP_HOSTS; do
+    file="${host}.backup.tar.gz"
+    ssh "$host" sysupgrade -b - > "${file}.new" && mv "${file}.new" "${file}"
+done
+
diff --git a/hacks/bashttpd b/hacks/bashttpd
new file mode 100755 (executable)
index 0000000..7d8955c
--- /dev/null
@@ -0,0 +1,181 @@
+#!/bin/bash
+
+##########################################################################
+# Bash HTTP Server                                                       #
+#                                                                        #
+# This is just a bit of fun. Please don't trust it for anything serious! #
+##########################################################################
+
+# static configuration
+PORT="${PORT:-8080}"
+SERVER_ROOT="${SERVER_ROOT:-$PWD}"
+
+# per-request globals
+STATE_FUNC=""
+REQUEST_LINE=""
+REQUEST_FILE=""
+
+
+function main {
+    set -e
+
+    cd "$SERVER_ROOT"
+    print_log "Server running on port $PORT in $SERVER_ROOT"
+
+    while true; do
+        handle_connection
+    done
+}
+
+function print_log {
+    echo "$(date -Iseconds) $@"
+}
+
+function handle_connection {
+    start_netcat
+
+    STATE_FUNC="handle_read_request_status_line"
+    while [ -n "$STATE_FUNC" ]; do
+        $STATE_FUNC || break
+    done <&"${COPROC[0]}"
+
+    close_request
+}
+
+function start_netcat {
+    coproc nc -l "$PORT"
+}
+
+function close_request {
+    STATE_FUNC=""
+    kill "$COPROC_PID" &> /dev/null || true
+    wait "$COPROC_PID" &> /dev/null || true
+}
+
+function handle_read_request_status_line {
+    read_request_line
+    print_log "$REQUEST_LINE"
+
+    read method path ignored < <(echo "$REQUEST_LINE")
+    REQUEST_FILE=".$path"
+
+    check_valid_method || return 0
+    check_path_is_under_server_root || return 0
+    find_index_in_request_file
+    check_request_file_exists || return 0
+
+    STATE_FUNC="handle_read_request_to_end"
+}
+
+function check_valid_method {
+    if [ "$method" != "GET" ]; then
+        STATE_FUNC="write_error_response_405"
+        return 1
+    fi
+}
+
+function check_path_is_under_server_root {
+    abspath="$(cd "$(dirname "$REQUEST_FILE")" && pwd)"
+    if ! echo "$abspath" | grep -q "^$PWD"; then
+        STATE_FUNC="write_error_response_400"
+        return 1
+    fi
+}
+
+function find_index_in_request_file {
+    if [ ! -d "$REQUEST_FILE" ]; then
+        return 0
+    fi
+
+    for filename in index.html index.txt; do
+        if [ -f "${REQUEST_FILE}/${filename}" ]; then
+            REQUEST_FILE="${REQUEST_FILE}/${filename}"
+            return 0
+        fi
+    done
+}
+
+function check_request_file_exists {
+    if [ ! -f "$REQUEST_FILE" ]; then
+        STATE_FUNC="write_error_response_404"
+        return 1
+    fi
+}
+
+function handle_read_request_to_end {
+    read_request_line
+
+    if [ -z "$REQUEST_LINE" ]; then
+        STATE_FUNC="write_response"
+    fi
+}
+
+function read_request_line {
+    read REQUEST_LINE
+    REQUEST_LINE="$(echo "$REQUEST_LINE" | tr -d '\r')"
+}
+
+function write_response {
+    STATE_FUNC=""
+    cat <<EOT >&"${COPROC[1]}"
+HTTP/1.0 200 OK
+Server: bashttpd
+Content-Type: $(file -bi "$REQUEST_FILE")
+Connection: close
+
+$(cat "$REQUEST_FILE")
+EOT
+}
+
+function write_error_response {
+    STATE_FUNC=""
+    local code="$1"
+    local message="$2"
+    cat <<EOT >&"${COPROC[1]}"
+HTTP/1.0 $code $message
+
+<!DOCTYPE html>
+<html>
+<head>
+<title>$code $message</title>
+</head>
+<body>
+<h1>$code $message</h1>
+</body>
+</html>
+EOT
+}
+
+function write_error_response_400 {
+    write_error_response 400 "Bad Request"
+}
+
+function write_error_response_404 {
+    write_error_response 404 "File Not Found"
+}
+
+function write_error_response_405 {
+    write_error_response 405 "Unsupported method"
+}
+
+function check_dependencies {
+    check_bash
+    check_netcat
+}
+
+function check_bash {
+    if [ "${BASH_VERSINFO[0]}" -lt 4 ]; then
+        echo "ERROR! Requires Bash 4+ for coproc support"
+        exit 1
+    fi
+}
+
+function check_netcat {
+    if ! nc -h 2>&1 | head -n1 | grep -q '^OpenBSD netcat'; then
+        echo "ERROR! Requires OpenBSD netcat to be installed"
+        exit 1
+    fi
+}
+
+check_dependencies
+main
diff --git a/hacks/bin2ascii b/hacks/bin2ascii
new file mode 100755 (executable)
index 0000000..6e09573
--- /dev/null
@@ -0,0 +1,10 @@
+#!/usr/bin/env python3
+
+import binascii, sys
+
+if len(sys.argv) == 2:
+    input = sys.argv[1]
+else:
+    input = sys.stdin.read()
+
+print(binascii.unhexlify(hex(int(input, 2))[2:]).decode('utf-8'))
diff --git a/hacks/bt-dun-connect b/hacks/bt-dun-connect
new file mode 100755 (executable)
index 0000000..7c38ebe
--- /dev/null
@@ -0,0 +1,199 @@
+#!/usr/bin/env python3
+
+import dbus
+import json
+import os
+import subprocess
+import sys
+import tempfile
+import time
+
+"""
+Instructions!
+1. Pair your bluetooth device, use bluetoothctl
+2. Use 'sdptool search DUN' to find the bluetooth channel
+3. Save your configuration to ~/.config/bt-dun-connect.json
+4. Run bt-dun-connect
+
+
+Example configuration:
+{
+    "apn": "internet",
+    "bluetooth_addr": "DE:AD:BE:EE:EE:EF",
+    "bluetooth_channel": "22"
+}
+
+"""
+
+
+class DiallerException(Exception):
+    pass
+
+class BluetoothDialler(object):
+    def __init__(self, rfcomm_id, bt_addr, bt_channel, apn):
+        self.rfcomm_id = rfcomm_id
+        self.bt_addr = bt_addr
+        self.bt_channel = bt_channel
+        self.apn = apn
+
+        self.rfcomm = None
+        self.wvdial = None
+        self.wvdial_conf_name = None
+        self.dbus_system = None
+
+    def release(self):
+        if self.wvdial:
+            try:
+                self.wvdial.terminate()
+                self.wvdial.wait()
+            except Exception as e:
+                print(e)
+
+        if self.rfcomm:
+            try:
+                self.rfcomm.terminate()
+                self.rfcomm.wait()
+            except Exception as e:
+                print(e)
+
+        if self.wvdial_conf_name:
+            try:
+                os.unlink(self.wvdial_conf_name)
+            except Exception as e:
+                print(e)
+
+        try:
+            reset_rfcomm(self.rfcomm_id)
+        except Exception as e:
+            print(e)
+
+        if self.dbus_system:
+            try:
+                self.disconnect_bluetooth()
+            except Exception as e:
+                print(e)
+
+
+    def setup_dbus(self):
+        self.dbus_system = dbus.SystemBus()
+
+    def enable_bluetooth(self):
+        bluez = self.dbus_system.get_object("org.bluez", "/org/bluez/hci0")
+        iprops = dbus.Interface(bluez, "org.freedesktop.DBus.Properties")
+        iprops.Set("org.bluez.Adapter1", "Powered", True)
+
+    def disconnect_bluetooth(self):
+        path = self.bt_addr.upper().replace(":", "_")
+        bluez_dev = self.dbus_system.get_object("org.bluez", "/org/bluez/hci0/dev_" + path)
+        idev = dbus.Interface(bluez_dev, "org.bluez.Device1")
+        idev.Disconnect()
+
+    def connect_rfcomm(self):
+        self.rfcomm = subprocess.Popen([
+            "rfcomm",
+            "connect",
+            self.rfcomm_id,
+            self.bt_addr,
+            self.bt_channel,
+        ])
+
+        # poll until connected
+        start = time.time()
+        while time.time() - start < 10:
+            if self.is_rfcomm_connected():
+                return
+            time.sleep(0.1)
+        raise DiallerException("Timeout connecting rfcomm")
+
+    def is_rfcomm_connected(self):
+        output = subprocess.check_output(["rfcomm", "-a"])
+        for line in output.decode("ascii").split("\n"):
+            if not line.startswith("rfcomm%s:" % self.rfcomm_id):
+                continue
+            if line.find(" connected ") >= 0:
+                return True
+        return False
+
+    def write_wvdial_conf(self):
+        fd, self.wvdial_conf_name = tempfile.mkstemp()
+        f = os.fdopen(fd, "w")
+        f.write("""
+[Dialer Defaults]
+Modem = /dev/rfcomm0
+Baud = 115200
+Init = ATZ
+Init2 = AT+CGDCONT=1,"IP","%s"
+Phone = *99#
+Username = dummy
+Password = dummy
+""" % self.apn)
+        f.close()
+
+    def connect_wvdial(self):
+        self.wvdial = subprocess.Popen([
+            "wvdial", "-C", self.wvdial_conf_name
+        ])
+        self.wvdial.wait()
+
+
+    def run(self):
+        try:
+            self.setup_dbus()
+
+            print("Enabling bluetooth...")
+            self.enable_bluetooth()
+
+            print("Connecting rfcomm...")
+            self.connect_rfcomm()
+            self.write_wvdial_conf()
+
+            print("Dialling...")
+            self.connect_wvdial()
+
+        except KeyboardInterrupt as e:
+            print("Exiting...")
+        except DiallerException as e:
+            print(e)
+        finally:
+            self.release()
+
+
+def get_next_rfcomm_id():
+    # for now always use rfcomm0
+    reset_rfcomm("all")
+    return "0"
+
+def reset_rfcomm(rfcomm_id):
+    subprocess.call(["rfcomm", "release", rfcomm_id], stderr=open("/dev/null"))
+
+def read_config(filename):
+    try:
+        config = open(os.path.expanduser(filename))
+    except OSError as e:
+        print("Failed to open config file: %s" % e)
+        sys.exit(1)
+
+    try:
+        return json.load(config)
+    except ValueError as e:
+        print("Failed to parse config file %s: %s" % (filename, e))
+        sys.exit(1)
+
+
+def main():
+    rfcomm_id = get_next_rfcomm_id()
+    config = read_config("~/.config/bt-dun-connect.json")
+    dialler = BluetoothDialler(
+        rfcomm_id=rfcomm_id,
+        bt_addr=config["bluetooth_addr"],
+        bt_channel=config["bluetooth_channel"],
+        apn=config["apn"],
+    )
+    dialler.run()
+
+
+if __name__ == "__main__":
+    main()
+
+
+
diff --git a/hacks/dnsctl b/hacks/dnsctl
new file mode 100755 (executable)
index 0000000..368107b
--- /dev/null
@@ -0,0 +1,154 @@
+#!/usr/bin/env python3
+
+import argparse
+import datetime
+import os
+import subprocess
+import sys
+import re
+
+
+def increment_serial(line):
+    current_serial = re.search(R"\b\d\d*\b", line).group(0)
+
+    current = int(current_serial)
+    current_num = current % 100
+    current_date = (current - current_num) / 100
+    new_date = int(datetime.datetime.now().strftime("%Y%m%d"))
+    if current_date == new_date:
+        next_num = current_num + 1
+    else:
+        next_num = 0
+
+    if next_num >= 100:
+        raise ValueError("Too many serial changes today!")
+    new_serial = str(new_date) + str(next_num).zfill(2)
+    line = line.replace(current_serial, new_serial)
+
+    return line
+
+def replace_ip(line):
+    source_ip, source_port, dest_ip, dest_port = os.environ["SSH_CONNECTION"].split()
+    line = re.sub(R"\b\d\d?\d?\.\d\d?\d?\.\d\d?\d?\.\d\d?\d?\b", source_ip, line)
+    return line
+
+def update_dyndns(zonefile, dnslabel):
+    out = []
+    with open(zonefile, encoding="utf-8") as f:
+        for line in f:
+            if line.find("Serial") >= 0:
+                line = increment_serial(line)
+            elif line.find("DYNDNS") >= 0 and line.find(dnslabel) >= 0:
+                line = replace_ip(line)
+            out.append(line)
+
+    with open(zonefile, "w", encoding="utf-8") as f:
+        f.writelines(out)
+
+    reload_bind()
+
+
+def read_zonefile(zonefile):
+    with open(zonefile, encoding="utf-8") as f:
+        sys.stdout.write(f.read())
+
+
+def write_zonefile(zonefile):
+    data = sys.stdin.read()
+    if not data.strip().endswith("; END"):
+        print("Missing end of file marker -- ; END")
+        sys.exit(1)
+
+    with open(zonefile, "w", encoding="utf-8") as f:
+        f.write(data)
+
+    reload_bind()
+
+def tail_logs():
+    subprocess.check_call(["sudo", "journalctl", "-u", "nsd", "-f"])
+
+def reload_bind():
+    subprocess.check_call(["sudo", "systemctl", "reload", "nsd"])
+    print("nsd reloaded")
+
+
+def parse_ssh_args():
+    parser = argparse.ArgumentParser(description="Edit zone files")
+
+    parser.add_argument("--zonefile",
+        help="the zone file to operate on")
+
+
+    action_group = parser.add_mutually_exclusive_group(required=True)
+
+    action_group.add_argument("--logs", action="store_true",
+        help="show bind logs")
+
+    action_group.add_argument("--dyndns",
+        help="update the specified dnslabel with the SSH origin IP")
+
+    action_group.add_argument("--read", action="store_true",
+        help="print the zone file to stdout")
+
+    action_group.add_argument("--write", action="store_true",
+        help="save the contents of stdin to the zone file")
+
+    ssh_args = os.environ.get("SSH_ORIGINAL_COMMAND", "--help").split()[1:]
+    args = parser.parse_args(ssh_args)
+
+    if not args.logs and not args.zonefile:
+        print("Required parameter: --zonefile\n")
+        parser.print_help()
+        sys.exit(0)
+
+    return args
+
+def parse_cmdline_args():
+    parser = argparse.ArgumentParser(description="Edit zone files")
+
+    parser.add_argument("--allow-zonefile", required=True, action="append",
+        help="specify allowed zone files")
+
+    parser.add_argument("--allow-write", action="store_true",
+        help="allow --write option")
+
+    parser.add_argument("--allow-dyndns",
+        help="allow --dyndns option")
+
+    return parser.parse_args()
+
+def parse_args():
+    cmdline_args = parse_cmdline_args()
+    ssh_args = parse_ssh_args()
+
+    if ssh_args.zonefile and ssh_args.zonefile not in cmdline_args.allow_zonefile:
+        print("The specified zonefile is not on the allowed list:", cmdline_args.allow_zonefile)
+        sys.exit(1)
+
+    if ssh_args.dyndns and ssh_args.dyndns != cmdline_args.allow_dyndns:
+        print("Dynamic DNS is only allowed for:", cmdline_args.allow_dyndns)
+        sys.exit(1)
+
+    if ssh_args.write and not cmdline_args.allow_write:
+        print("Write to zonefile is not allowed")
+        sys.exit(1)
+
+    return ssh_args
+
+
+def main():
+    args = parse_args()
+
+    if args.logs:
+        tail_logs()
+    elif args.dyndns:
+        update_dyndns(args.zonefile, args.dyndns)
+    elif args.read:
+        read_zonefile(args.zonefile)
+    elif args.write:
+        write_zonefile(args.zonefile)
+    else:
+        assert False, "Bad action"
+
+if __name__ == "__main__":
+    main()
diff --git a/hacks/find-in-file b/hacks/find-in-file
new file mode 100755 (executable)
index 0000000..12ee05e
--- /dev/null
@@ -0,0 +1,32 @@
+#!/usr/bin/env python2
+
+import sys
+
+try:
+    needle = sys.argv[1]
+    haystack = sys.argv[2]
+except IndexError:
+    print >>sys.stderr, "Usage: %s needle haystack" % sys.argv[0]
+    sys.exit(1)
+
+
+f = open(needle)
+magic = f.read(1024)
+f.close()
+
+chunk_size = 32768
+f = open(haystack)
+count = 0
+buf = ""
+while True:
+    newbuf = f.read(chunk_size)
+    if not newbuf:
+        break
+    buf += newbuf
+    pos = buf.find(magic)
+    if pos >= 0:
+        print "found", count + pos
+    count += len(buf) - len(magic)
+    buf = buf[-len(magic):]
+f.close()
+
diff --git a/hacks/fix-openwrt-hairpin b/hacks/fix-openwrt-hairpin
new file mode 100755 (executable)
index 0000000..9403ffe
--- /dev/null
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+AP_HOSTS="$(awk '/^Host.*openwrt/ {print $2}' < ~/.ssh/config)"
+
+for host in $AP_HOSTS; do
+    ssh "$host" 'for f in /sys/devices/virtual/net/*/*wlan*/brport/hairpin_mode; do [ -f $f ] && echo 1 > "$f"; done'
+done
+
diff --git a/hacks/git-no-husky b/hacks/git-no-husky
new file mode 100755 (executable)
index 0000000..7ed9144
--- /dev/null
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+if [ "$1" = "config" ] && [ -z "${GIT_CONFIG_ENABLE}" ] && ! [[ "$2" =~ --get ]]; then
+    echo "Ignoring git $*"
+    exit 0
+fi
+
+/usr/bin/git "$@"
diff --git a/hacks/gnome-shell-raise-window b/hacks/gnome-shell-raise-window
new file mode 100755 (executable)
index 0000000..5c5c166
--- /dev/null
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+set -eu
+
+if [ -z "${1:-}" ]; then
+    echo "Usage: $0 title.endsWith('Page Title - Mozilla Firefox')"
+    exit 1
+fi
+
+MATCHER="$1"
+
+dbus-send --session --type=method_call --dest=org.gnome.Shell /org/gnome/Shell org.gnome.Shell.Eval string:"
+const windows = global.display.get_tab_list(0, null);
+const matching = windows.filter((win) => {
+  const title = win.get_title();
+  return $MATCHER;
+});
+
+if (matching.length > 0) {
+  const win = matching[0];
+  win.get_workspace().activate_with_focus(win, true);
+  win.activate(0);
+}
+"
diff --git a/hacks/lib-ext-backup b/hacks/lib-ext-backup
new file mode 100644 (file)
index 0000000..4a1d119
--- /dev/null
@@ -0,0 +1,130 @@
+#!/bin/bash
+
+function cryptsetup_open {
+    for DEVICE in /dev/disk/by-partlabel/ext*backup*; do
+        if ! [ -L "$DEVICE" ]; then
+            continue
+        fi
+        DISKNAME="$(basename "$DEVICE")"
+        CRYPTNAME="crypt-$DISKNAME"
+        if [ -L "/run/ext-backup-crypt/$CRYPTNAME" ]; then
+            continue
+        fi
+        echo "> cryptsetup luksOpen $DEVICE $CRYPTNAME"
+        cryptsetup luksOpen "$DEVICE" "$CRYPTNAME" --key-file "/etc/lukskeys/${DISKNAME}"
+        mkdir -p /run/ext-backup-crypt/
+        ln -sf "/dev/mapper/$CRYPTNAME" /run/ext-backup-crypt/
+    done
+}
+
+function cryptsetup_close {
+    for CRYPTDEVICE in /dev/mapper/crypt-ext*backup*; do
+        if ! [ -L "$CRYPTDEVICE" ]; then
+            continue
+        fi
+        CRYPTNAME="$(basename "$CRYPTDEVICE")"
+        echo "> cryptsetup luksClose $CRYPTNAME"
+        cryptsetup luksClose "$CRYPTNAME"
+        rm -f "/run/ext-backup-crypt/$CRYPTNAME"
+    done
+}
+
+function pool_import {
+    echo "> zpool import -d /run/ext-backup-crypt -a"
+    zpool import -d /run/ext-backup-crypt -a
+
+    ZPOOLNAME="$(zpool list -H -o name|grep 'ext.*backup'|head -n1)"
+    if [ -z "$ZPOOLNAME" ]; then
+        echo "Error! Could not find pool!"
+        return 1
+    fi
+
+    echo "> Found: $ZPOOLNAME"
+}
+
+function pool_export {
+    echo "> zpool export $ZPOOLNAME"
+    for _ in $(seq 60); do
+        zpool export "$ZPOOLNAME" && break
+        sleep 1
+    done
+}
+
+function pool_setup {
+    zpool set failmode=wait "$ZPOOLNAME"
+    zfs set mountpoint="/mnt/$ZPOOLNAME" "$ZPOOLNAME"
+    chmod 0700 "/mnt/$ZPOOLNAME"
+    zfs set compression=lz4 "$ZPOOLNAME"
+    zfs set devices=off "$ZPOOLNAME"
+    zfs set exec=off "$ZPOOLNAME"
+    zfs set setuid=off "$ZPOOLNAME"
+    zfs set xattr=sa "$ZPOOLNAME"
+    zfs set acltype=posixacl "$ZPOOLNAME"
+}
+
+function pool_maybe_scrub {
+    local now
+    local last_scrub
+    local last_scrub_days_ago
+    now="$(date +%s)"
+    last_scrub="$(zpool status "$ZPOOLNAME"|sed -nE 's/scan: scrub repaired.* on (.*)/\1/p')"
+    last_scrub_days_ago=$(((now - $(date --date="${last_scrub:-1970-01-01}" +%s)) / 86400))
+
+    if [ $last_scrub_days_ago -lt 7 ]; then
+        return
+    fi
+
+    echo "> zpool scrub $ZPOOLNAME"
+    zpool scrub "$ZPOOLNAME"
+
+    while zpool status "$ZPOOLNAME" | awk '/state: ONLINE|scan: scrub in progress/ {x++} END {exit x-2}'; do
+        echo -n .
+        sleep 60
+    done
+    echo " done"
+
+    if zpool list -H -o health "$ZPOOLNAME" | grep -qv ONLINE; then
+        zpool status -v "$ZPOOLNAME"
+        return 1
+    fi
+}
+
+function syncoidw {
+    echo "> syncoid $1"
+    syncoid "$@" --identifier "$ZPOOLNAME"
+}
+
+function snapshot_cleanup {
+    echo "> Cleaning snapshots"
+    sanoid --configdir=/etc/sanoid/ext-backup --verbose --prune-snapshots
+
+    zfs list -t snapshot -H -o name -r "$ZPOOLNAME" \
+        | grep -F @syncoid_ \
+        | grep -Fv "@syncoid_$ZPOOLNAME" \
+        | xargs -rn1 zfs destroy -v
+}
+
+function snapshot_convert_to_bookmarks {
+    local fs
+    local snap
+
+    for fs in "$@"; do
+        for snap in $(zfs list -H -o name -t snapshot -r "$fs"); do
+            echo "> zfs bookmark $snap"
+            zfs bookmark "$snap" "${snap/@/#}"
+            zfs destroy "$snap"
+        done
+    done
+}
+
+function main {
+    zfs get all -s local -H > /root/zfs-props.txt
+    cryptsetup_open
+    pool_import
+    pool_setup
+    snapshot_sync
+    snapshot_cleanup
+    pool_maybe_scrub
+    pool_export
+    cryptsetup_close
+}
diff --git a/hacks/magnet b/hacks/magnet
new file mode 100755 (executable)
index 0000000..62b8903
--- /dev/null
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+if ! [[ "$1" =~ xt=urn:btih:([^&/]+) ]]; then
+    echo "Invalid magnet link"
+    exit 1
+fi
+
+filename="$(echo "$1" | sed -e 's/.*dn=//' -e 's/&.*//').torrent"
+echo "d10:magnet-uri${#1}:${1}e" > "$filename"
+chmod 0644 "$filename"
+
+echo "created $filename"
diff --git a/hacks/make-persistent-journal b/hacks/make-persistent-journal
new file mode 100755 (executable)
index 0000000..5f25b62
--- /dev/null
@@ -0,0 +1,11 @@
+#!/bin/bash
+
+set -eux
+
+mkdir /var/log/journal
+chmod 2755 /var/log/journal
+chgrp systemd-journal /var/log/journal
+setfacl -m g:adm:rx /var/log/journal
+setfacl -d -m g:adm:rx /var/log/journal
+chattr -R +C /var/log/journal
+systemctl restart systemd-journald
diff --git a/hacks/multiboot-setup b/hacks/multiboot-setup
new file mode 100755 (executable)
index 0000000..30bbe58
--- /dev/null
@@ -0,0 +1,273 @@
+#!/bin/bash
+
+set -eu
+
+PARTITION_LABEL_ESP="multibt-esp"
+PARTITION_LABEL_DATA="multiboot"
+MULTIBOOT_MNT="${MULTIBOOT_MNT:-/mnt/multiboot}"
+
+function cmd_format {
+    if [ ! -b "${1:-}" ]; then
+        echo "Usage: $0 format /dev/sdX"
+        exit 1
+    fi
+    set -x
+
+    sudo -k
+    DISK_DEVICE="$1"
+    print_sfdisk_command | sudo sfdisk --wipe always --wipe-partitions always "$DISK_DEVICE"
+    udevadm settle
+    sudo mkfs.vfat -n "${PARTITION_LABEL_ESP}" "/dev/disk/by-partlabel/${PARTITION_LABEL_ESP}"
+    sudo mkfs.ext4 -L "${PARTITION_LABEL_DATA}" -E "root_owner=$(id -u):$(id -g)" "/dev/disk/by-partlabel/${PARTITION_LABEL_DATA}"
+}
+
+function print_sfdisk_command {
+    cat <<EOT
+label: gpt
+unit: sectors
+
+size=10M, type=uefi, name="$PARTITION_LABEL_ESP"
+size=1M, type=21686148-6449-6E6F-744E-656564454649
+type=linux, name="$PARTITION_LABEL_DATA"
+EOT
+}
+
+function cmd_grub {
+    DISK_DEVICE="$(findmnt -n -o source "$MULTIBOOT_MNT" | sed 's/[0-9]*$//')"
+    if [ ! -b "$DISK_DEVICE" ]; then
+        echo "ERROR! Could not find disk to install bootloader. Try using mount."
+        exit 1
+    fi
+    set -x
+
+    sudo -k
+    install_grub_bios
+    install_grub_efi
+    install_grub_cfg
+}
+
+function cmd_grubcfg {
+    set -x
+    install_grub_cfg
+}
+
+function install_grub_bios {
+    sudo grub-install \
+        --target=i386-pc \
+        --boot-directory="$MULTIBOOT_MNT" \
+        "$DISK_DEVICE"
+}
+
+function install_grub_efi {
+    for arch in i386-efi x86_64-efi; do
+        sudo grub-install \
+            --target="$arch" \
+            --no-nvram \
+            --removable \
+            --efi-directory="${MULTIBOOT_MNT}/EFI" \
+            --boot-directory="$MULTIBOOT_MNT" \
+            "$DISK_DEVICE"
+    done
+}
+
+function install_grub_cfg {
+    if [[ -w "${MULTIBOOT_MNT}/grub/" ]]; then
+        # We already have write access, no need to use sudo
+        print_grub_cfg > "${MULTIBOOT_MNT}/grub/grub.cfg"
+    else
+        print_grub_cfg | sudo tee "${MULTIBOOT_MNT}/grub/grub.cfg" > /dev/null
+    fi
+}
+
+function cmd_mount {
+    set -x
+
+    while sudo umount "/dev/disk/by-partlabel/${PARTITION_LABEL_ESP}" &> /dev/null; do true; done
+    while sudo umount "/dev/disk/by-partlabel/${PARTITION_LABEL_DATA}" &> /dev/null; do true; done
+    sudo mkdir -p "$MULTIBOOT_MNT"
+    sudo mount "/dev/disk/by-partlabel/${PARTITION_LABEL_DATA}" "$MULTIBOOT_MNT"
+    mkdir -p "${MULTIBOOT_MNT}/EFI"
+    sudo mount "/dev/disk/by-partlabel/${PARTITION_LABEL_ESP}" "${MULTIBOOT_MNT}/EFI"
+}
+
+function cmd_umount {
+    set -x
+
+    sudo umount "${MULTIBOOT_MNT}/EFI" || true
+    sudo umount "$MULTIBOOT_MNT" || true
+    sudo rmdir "$MULTIBOOT_MNT"
+}
+
+function cmd_freedos {
+    set -x
+
+    local SYSLINUX_VERSION="6.03"
+    local SYSLINUX_URL="https://www.kernel.org/pub/linux/utils/boot/syslinux/syslinux-${SYSLINUX_VERSION}.tar.gz"
+    local FREEDOS_URL="https://www.ibiblio.org/pub/micro/pc-stuff/freedos/files/distributions/1.2/official/FD12LITE.zip"
+
+    curl -fL "$SYSLINUX_URL" | \
+        tar xz --no-same-owner --strip-components=3 -C "$MULTIBOOT_MNT" \
+        "syslinux-${SYSLINUX_VERSION}/bios/memdisk/memdisk"
+
+    curl -fL "$FREEDOS_URL" > "${MULTIBOOT_MNT}/FD12LITE.zip"
+}
+
+function cmd_memtest {
+    curl -fL -o "${MULTIBOOT_MNT}/memtest.tmp.zip" "https://memtest.org/download/v6.20/mt86plus_6.20_64.grub.iso.zip"
+    unzip -d "$MULTIBOOT_MNT" "${MULTIBOOT_MNT}/memtest.tmp.zip"
+    rm "${MULTIBOOT_MNT}/memtest.tmp.zip"
+}
+
+function print_grub_cfg {
+    cat <<EOT
+search --set=root --label $PARTITION_LABEL_DATA
+
+insmod all_video
+insmod gfxterm
+loadfont unicode
+set gfxmode=1024x768
+terminal_output gfxterm
+
+insmod part_msdos
+insmod progress
+insmod regexp
+
+set maybe_quiet='quiet splash'
+set maybe_to_ram=''
+
+menuentry "! Copy ISO image to ram before booting" {
+  # copytoram is used by arch
+  # toram is used by casper based images (tails, Ubuntu, etc)
+  set maybe_to_ram="copytoram toram"
+}
+
+menuentry "! Verbose" {
+  set maybe_quiet=''
+}
+
+function setup_arch {
+  menuentry "\$1" {
+    loopback loop \$1
+    echo "Loading kernel..."
+    linux (loop)/arch/boot/x86_64/vmlinuz-* img_label=${PARTITION_LABEL_DATA} img_loop=\$1 archisobasedir=arch earlymodules=loop \$maybe_to_ram \$maybe_quiet
+    echo "Loading initrd (and microcode if they exist)..."
+    initrd (loop)/arch/boot/*.img (loop)/arch/boot/x86_64/initramfs-*.img
+  }
+}
+for iso in /archlinux-*.iso /isos/archlinux-*.iso; do
+  if [ -f "\$iso" ]; then
+    setup_arch \$iso
+  fi
+done
+
+function setup_debian {
+  menuentry "\$1" {
+    loopback loop \$1
+    echo "Loading kernel..."
+    linux (loop)/live/vmlinuz* boot=live components findiso=\$1 \$maybe_to_ram \$maybe_quiet
+    echo "Loading initrd..."
+    initrd (loop)/live/initrd*
+  }
+}
+for iso in /debian-live-*.iso /isos/debian-live-*.iso; do
+  if [ -f "\$iso" ]; then
+    setup_debian \$iso
+  fi
+done
+
+if [ -f /memdisk -a -f /FD12LITE.zip ]; then
+  menuentry /FD12LITE.zip {
+    if [ \${grub_platform} = pc ]; then
+      linux16 /memdisk raw
+      initrd16 /FD12LITE.zip
+    else
+      echo "FreeDOS only works with BIOS boot."
+      sleep 3
+    fi
+  }
+fi
+
+
+function setup_memtest {
+  menuentry "\$1" {
+    loopback loop \$1
+    if [ \${grub_platform} = pc ]; then
+      linux (loop)/boot/memtest
+    else
+      linux (loop)/EFI/BOOT/memtest
+    fi
+  }
+}
+for iso in /mt86plus*.grub.iso /isos/mt86plus*.grub.iso; do
+  if [ -f "\$iso" ]; then
+    setup_memtest \$iso
+  fi
+done
+
+
+function setup_fedora {
+  menuentry "\$1" {
+    loopback loop \$1
+    probe -s iso_label -l (loop)
+    echo "Loading kernel..."
+    linux (loop)/images/pxeboot/vmlinuz root=live:CDLABEL=\$iso_label rd.live.image iso-scan/filename=\$1 \$maybe_quiet
+    echo "Loading initrd..."
+    initrd (loop)/images/pxeboot/initrd.img
+  }
+}
+for iso in /Fedora-Workstation-Live-*.iso /isos/Fedora-Workstation-Live-*.iso; do
+  if [ -f "\$iso" ]; then
+    setup_fedora \$iso
+  fi
+done
+
+function setup_ubuntu {
+  menuentry "\$1" {
+    loopback loop \$1
+    set maybe_layerfs_path=''
+    for f in minimal.standard.live.squashfs; do
+      if [ -f "(loop)/casper/\$f" ]; then
+        echo " \$f"
+        set maybe_layerfs_path="layerfs-path=\$f"
+        echo "Setting \$maybe_layerfs_path"
+      fi
+    done
+    echo "Loading kernel..."
+    linux (loop)/casper/vmlinuz* \$maybe_layerfs_path boot=casper iso-scan/filename=\$1 \$maybe_to_ram \$maybe_quiet
+    echo "Loading initrd..."
+    initrd (loop)/casper/initrd*
+  }
+}
+for iso in /ubuntu-*-desktop-*.iso /isos/ubuntu-*-desktop-*.iso; do
+  if [ -f "\$iso" ]; then
+    setup_ubuntu \$iso
+  fi
+done
+
+function setup_tails {
+  menuentry "\$1" {
+    loopback loop \$1
+    echo "Loading kernel..."
+    linux (loop)/live/vmlinuz* initrd=/live/initrd.img boot=live config iso-scan/filename=\$1 findiso=\$1 nopersistence noprompt timezone=Etc/UTC noautologin module=Tails slab_nomerge slub_debug=FZP mce=0 vsyscall=none page_poison=1 init_on_free=1 mds=full,nosmt \$maybe_to_ram \$maybe_quiet
+    echo "Loading initrd..."
+    initrd (loop)/live/initrd*
+  }
+}
+for iso in /tails-*.iso /isos/tails-*.iso; do
+  if [ -f "\$iso" ]; then
+    setup_tails \$iso
+  fi
+done
+
+EOT
+}
+
+CMD="cmd_${1:-}"
+shift || true
+
+if [ "$(type -t -- "$CMD")" = "function" ]; then
+    "${CMD}" "$@"
+else
+    echo "Usage: $0 [format|mount|grub|grubcfg|freedos|memtest|umount]"
+    exit 1
+fi
diff --git a/hacks/opal-card-tool b/hacks/opal-card-tool
new file mode 100755 (executable)
index 0000000..26fc6c7
--- /dev/null
@@ -0,0 +1,511 @@
+#!/usr/bin/env python3
+
+import argparse
+import csv
+import datetime
+import getpass
+import itertools
+import lxml.html
+import os
+import pickle
+import requests
+import subprocess
+import sys
+import tempfile
+import time
+
+
+VERSION = 3
+
+CACHE_DIR = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache/opal-card-tool"))
+PICKLE_FILE = os.path.join(CACHE_DIR, "pickle")
+
+USER_AGENT = "Mozilla/5.0 (X11; Linux x86_64; rv:59.0) Gecko/20100101 Firefox/59.0"
+OPAL_BASE = "https://www.opal.com.au"
+LOGIN_URL = OPAL_BASE + "/login/registeredUserUsernameAndPasswordLogin"
+CARD_DETAILS_URL = OPAL_BASE + "/registered/getJsonCardDetailsArray"
+TRANSACTION_LIST_URL = OPAL_BASE + "/registered/opal-card-transactions/opal-card-activities-list?AMonth=-1&AYear=-1&cardIndex=%d&pageIndex=%d"
+
+
+
+def stringify(el):
+    return " ".join(t.strip() for t in el.itertext()).strip()
+
+def get_first(l):
+    for x in l:
+        return x
+
+def is_weekday(d):
+    return d.isoweekday() <= 5
+
+def n_days_ago(days):
+    d = datetime.datetime.now() - datetime.timedelta(days=days)
+    d = d.replace(hour=0, minute=0, second=0, microsecond=0)
+    return d
+
+
+class FatalError(Exception):
+    pass
+
+class Transaction(object):
+    pass
+
+class Card(object):
+    def __init__(self):
+        self.transaction_list = []
+
+    def get_max_transaction(self):
+        if self.transaction_list:
+            return self.transaction_list[0].number
+        else:
+            return -1
+
+    def add_transactions(self, l):
+        self.transaction_list = l + self.transaction_list
+
+class Opal(object):
+    def __init__(self, username, password):
+        self.version = VERSION
+        self.username = username
+        self.password = password
+        self.cards = []
+
+        self.init()
+
+    def init(self):
+        self.session = requests.Session()
+        self.session.headers["User-Agent"] = USER_AGENT
+
+    def login(self):
+        print("Attempting login ", end="", flush=True)
+        for i in range(10):
+            print(".", end="", flush=True)
+            self.login_once()
+            if self.check_login():
+                print(" ok")
+                return
+
+            time.sleep(1)
+
+    def login_once(self):
+        r = self.session.post(LOGIN_URL, {
+            "h_username": self.username,
+            "h_password": self.password,
+        })
+        if not r.ok:
+            raise Exception("Failed to login, error code: %d" % r.status_code)
+
+        json = r.json()
+        if json["errorMessage"]:
+            raise Exception("Failed to login: %s" % json["errorMessage"])
+
+    def check_login(self):
+        r = self.session.get(CARD_DETAILS_URL)
+        try:
+            r.json()
+            return True
+        except:
+            return False
+
+    def load(self):
+        self.load_cards()
+        for card in self.cards:
+            self.load_transactions(card)
+
+    def resolve_card_number(self, card_number):
+        if int(card_number) < len(self.cards):
+            return self.cards[int(card_number)].number
+        else:
+            return card_number
+
+    def get_transaction_list_for_card(self, card_number):
+        for card in self.cards:
+            if card.number == card_number:
+                return card.transaction_list
+
+        return []
+
+    def load_cards(self):
+        r = self.session.get(CARD_DETAILS_URL)
+        if not r.ok:
+            raise Exception("Failed to login, error code: %d" % r.status_code)
+
+        for index, card_json in enumerate(r.json()):
+            card_number = card_json["cardNumber"]
+
+            for card in self.cards:
+                if card.number == card_number:
+                    break
+            else:
+                card = Card()
+                self.cards.append(card)
+
+            card.number = card_number
+            card.name = card_json["cardNickName"]
+            card.index = index
+
+    def load_transactions(self, card):
+        print("Loading transactions for", card.number, "", end="", flush=True)
+        max_transaction = card.get_max_transaction()
+        transaction_list = []
+
+        for page in itertools.count(1):
+            print(".", end="", flush=True)
+            transaction_page = self.fetch_transaction_page(card.index, page)
+            continue_paging = False
+
+            for transaction in transaction_page:
+                if transaction.number <= max_transaction:
+                    continue_paging = False
+                    break
+
+                transaction_list.append(transaction)
+                continue_paging = True
+
+            if not continue_paging:
+                break
+
+        print(" done")
+        card.add_transactions(transaction_list)
+
+    def parse_transaction(self, cells):
+        t = Transaction()
+        t.number = int(stringify(cells["transaction number"]))
+        t.timestamp = datetime.datetime.strptime(stringify(cells["date/time"]), "%a %d/%m/%Y %H:%M")
+        t.mode = get_first(cells["mode"].xpath("img/@alt"))
+        t.details = stringify(cells["details"])
+        t.journey_number = stringify(cells["journey number"])
+        t.fare_applied = stringify(cells["fare applied"])
+        t.fare = stringify(cells["fare"])
+        t.fare_discount = stringify(cells["discount"])
+        t.amount = stringify(cells["amount"])
+        return t
+
+    def fetch_transaction_page(self, card, page):
+        url = TRANSACTION_LIST_URL % (card, page)
+        r = self.session.get(url)
+        if not r.ok:
+            raise Exception("Failed to fetch transactions, error code: %d" % r.status_code)
+
+        doc = lxml.html.fromstring(r.text)
+        headers = [stringify(th).lower() for th in doc.xpath("//table/thead//th")]
+
+        if not headers:
+            return []
+
+        for tr in doc.xpath("//table/tbody/tr"):
+            try:
+                yield self.parse_transaction(dict(zip(headers, tr.getchildren())))
+            except Exception:
+                print("Failed to parse:", headers, lxml.html.tostring(tr), file=sys.stderr)
+                raise
+
+
+class CommuterGraph(object):
+    class gnuplot_dialect(csv.excel):
+        delimiter = " "
+
+    def __init__(self):
+        self.data_am_csv, self.data_am_file = self.new_csv()
+        self.data_pm_csv, self.data_pm_file = self.new_csv()
+        self.plot_file = self.new_tempfile()
+        self.files = [self.data_am_file, self.data_pm_file, self.plot_file]
+
+        self.xrange_start = None
+        self.xrange_end = None
+
+    def is_plottable(self):
+        return self.xrange_start is not None and self.xrange_end is not None
+
+    def graph(self, transaction_list):
+        try:
+            self.write_points(transaction_list)
+            if not self.is_plottable():
+                print("No transactions!", file=sys.stderr)
+                return
+            self.write_plot_command()
+            self.flush_files()
+            self.run_gnuplot()
+        finally:
+            self.cleanup()
+
+    def new_tempfile(self):
+        return tempfile.NamedTemporaryFile(
+            mode="w",
+            encoding="utf-8",
+            prefix="opal-card-tool-",
+            delete=True,
+        )
+
+    def new_csv(self):
+        f = self.new_tempfile()
+        out = csv.writer(f, dialect=self.gnuplot_dialect)
+        return out, f
+
+    def update_xrange(self, ts):
+        if self.xrange_start is None or ts < self.xrange_start:
+            self.xrange_start = ts
+        if self.xrange_end is None or ts > self.xrange_end:
+            self.xrange_end = ts
+
+    def generate_point(self, transaction):
+        ts = transaction.timestamp
+        x_date = ts.strftime("%Y-%m-%dT00:00:00")
+        y_time = ts.strftime("2000-01-01T%H:%M:00")
+        y_label = ts.strftime("%H:%M")
+        return [x_date, y_time, y_label]
+
+    def write_point(self, ts, point):
+        if ts.time() < datetime.time(12):
+            out_csv = self.data_am_csv
+        else:
+            out_csv = self.data_pm_csv
+
+        out_csv.writerow(point)
+
+    def write_points(self, transaction_list):
+        for transaction in transaction_list:
+            if not self.is_commuter_transaction(transaction):
+                continue
+
+            self.update_xrange(transaction.timestamp)
+            point = self.generate_point(transaction)
+            self.write_point(transaction.timestamp, point)
+
+    def is_commuter_transaction(self, transaction):
+        if not is_weekday(transaction.timestamp):
+            return False
+        if transaction.details.startswith("Auto top up"):
+            return False
+        return True
+
+    def write_plot_command(self):
+        d = {
+            "data_am_filename": self.data_am_file.name,
+            "data_pm_filename": self.data_pm_file.name,
+            "xrange_start": self.xrange_start - datetime.timedelta(hours=24),
+            "xrange_end": self.xrange_end + datetime.timedelta(hours=24),
+        }
+        self.plot_file.write(R"""
+set timefmt '%Y-%m-%dT%H:%M:%S'
+
+set xlabel 'Date'
+set xdata time
+set format x '%a %d'
+set xtics 86400 scale 1.0,0.0
+set xrange [ '{xrange_start}' : '{xrange_end}' ]
+
+set ylabel 'Time'
+set ydata time
+set format y '%H:%M'
+set yrange [ '2000-01-01T06:00:00' : '2000-01-01T23:00:00' ]
+
+set key box opaque 
+set terminal qt \
+    persist \
+    title 'opal-card-tool graph' \
+    font 'Sans,10' \
+    enhanced \
+    size 1000,700
+
+plot \
+    '{data_pm_filename}' \
+        using 1:2 \
+        with line \
+        title 'Afternoon departure time' \
+    , \
+    '{data_pm_filename}' \
+        using 1:2:3 \
+        with labels \
+        offset 0,1 \
+        notitle \
+    , \
+    '{data_am_filename}' \
+        using 1:2 \
+        with line \
+        title 'Morning departure time' \
+    , \
+    '{data_am_filename}' \
+        using 1:2:3 \
+        with labels \
+        offset 0,1 \
+        notitle \
+""".format(**d))
+
+    def flush_files(self):
+        for f in self.files:
+            f.flush()
+
+    def cleanup(self):
+        for f in self.files:
+            try:
+                f.close()
+            except:
+                pass
+
+    def run_gnuplot(self):
+        subprocess.check_call([
+            "gnuplot",
+            self.plot_file.name,
+        ])
+
+def restrict_days(transaction_list, num_days):
+    oldest_date = n_days_ago(num_days)
+    for transaction in transaction_list:
+        if transaction.timestamp < oldest_date:
+            return
+        yield transaction
+
+def graph_commuter(transaction_list):
+    g = CommuterGraph()
+    g.graph(transaction_list)
+
+def print_transaction_list(transaction_list):
+    headers = []
+    headers.extend(["number", "timestamp"])
+    headers.extend(h for h in sorted(transaction_list[0].__dict__.keys()) if h not in headers)
+
+    out = csv.DictWriter(sys.stdout, headers)
+    out.writeheader()
+    for transaction in transaction_list:
+        out.writerow(transaction.__dict__)
+
+def print_cards(opal):
+    for i, card in enumerate(opal.cards):
+        print("Card", i)
+        print("  number:", card.number)
+        print("  name:", card.name)
+        print("  transactions:", len(card.transaction_list))
+        print()
+
+def try_unpickle():
+    if not os.path.isfile(PICKLE_FILE):
+        return None
+
+    with open(PICKLE_FILE, "rb") as f:
+        return pickle.load(f)
+
+def save_pickle(opal):
+    if not os.path.isdir(CACHE_DIR):
+        os.makedirs(CACHE_DIR)
+    with open(PICKLE_FILE, "wb") as f:
+        pickle.dump(opal, f)
+
+
+
+def upgrade_opal_v2(opal):
+    # example upgrade!
+    opal.version = 3
+
+def upgrade_opal(opal):
+    while opal.version < VERSION:
+        print("Upgrading from version", opal.version, file=sys.stderr)
+        upgrade_func = globals()["upgrade_opal_v%d" % opal.version]
+        upgrade_func(opal)
+
+
+
+def load_opal():
+    opal = try_unpickle()
+
+    if opal:
+        upgrade_opal(opal)
+        opal.init()
+    else:
+        username = input("Username: ")
+        password = getpass.getpass()
+        opal = Opal(username, password)
+
+    save_pickle(opal)
+    return opal
+
+def do_load():
+    opal = load_opal()
+    opal.login()
+    opal.load()
+    save_pickle(opal)
+
+def do_show_cards():
+    opal = load_opal()
+    print_cards(opal)
+    save_pickle(opal)
+
+def do_print(args):
+    opal = load_opal()
+
+    if args.card_number:
+        card_number = args.card_number
+    else:
+        card_number = 0
+    card_number = opal.resolve_card_number(card_number)
+
+    if args.num_days:
+        num_days = int(args.num_days)
+    elif args.graph_commuter:
+        num_days = 14
+    else:
+        num_days = 365
+
+    transaction_list = opal.get_transaction_list_for_card(card_number)
+    transaction_list = list(restrict_days(transaction_list, num_days))
+
+    if not transaction_list:
+        print("No transactions!", file=sys.stderr)
+        return
+
+    if args.show_transactions:
+        print_transaction_list(transaction_list)
+    elif args.graph_commuter:
+        graph_commuter(transaction_list)
+    else:
+        print("Missing display function!", file=sys.stderr)
+
+def parse_args():
+    parser = argparse.ArgumentParser(description="Opal card activity fetcher")
+
+    parser.add_argument("--num-days",
+        help="restrict to NUM_DAYS of output"
+    )
+    parser.add_argument("--card-number",
+        help="Opal card number or index (eg: 0,1,etc"
+    )
+
+    group = parser.add_mutually_exclusive_group(required=True)
+    group.add_argument("--load", action="store_true",
+        help="load any new data from the Opal website"
+    )
+    group.add_argument("--show-cards", action="store_true",
+        help="show a list of cards"
+    )
+    group.add_argument("--show-transactions", action="store_true",
+        help="show transactions for card"
+    )
+    group.add_argument("--graph-commuter", action="store_true",
+        help="draw commuter graph for card with gnuplot"
+    )
+
+    args = parser.parse_args()
+
+    return args
+
+def main():
+    args = parse_args()
+
+    if args.load:
+        do_load()
+        return
+
+    elif args.show_cards:
+        do_show_cards()
+        return
+
+    else:
+        do_print(args)
+
+if __name__ == "__main__":
+    try:
+        main()
+    except (KeyboardInterrupt, BrokenPipeError) as e:
+        print("Exiting:", e, file=sys.stderr)
+        sys.exit(1)
+
diff --git a/hacks/rename-by-date b/hacks/rename-by-date
new file mode 100755 (executable)
index 0000000..5424aa8
--- /dev/null
@@ -0,0 +1,139 @@
+#!/usr/bin/python3
+
+import argparse
+import datetime
+import os
+import subprocess
+import sys
+
+
+def read_directories(src_directories):
+    result = []
+    for src_directory in src_directories:
+        for root, dirnames, filenames in os.walk(src_directory):
+            for filename in filenames:
+                filename = os.path.join(root, filename)
+                result.append(filename)
+    result.sort()
+    return result
+
+def get_timestamp(filename):
+    ext = os.path.splitext(filename.lower())[1]
+
+    if ext == ".jpg":
+        return get_jpg_timestamp(filename)
+
+    if ext == ".mp4":
+        return get_mp4_timestamp(filename)
+
+    raise NotImplementedError("Unsupported extension: " + ext)
+
+def get_mp4_timestamp(filename):
+    output = subprocess.check_output([
+        "ffprobe", filename, "-show_format", "-v", "quiet",
+    ]).decode("utf-8")
+    line = [line for line in output.split("\n") if line.startswith("TAG:creation_time=")][0]
+    value = line.split("=")[1].split(".")[0]
+    return datetime.datetime.strptime(value, "%Y-%m-%dT%H:%M:%S")
+
+def get_jpg_timestamp(filename):
+    output = subprocess.check_output([
+        "exiv2", "-pt", "-g", "Exif.Photo.DateTimeOriginal", filename
+    ]).decode("utf-8")
+    first_line = output.split("\n")[0]
+    timestamp = " ".join(first_line.split()[-2:])
+    return datetime.datetime.strptime(timestamp, "%Y:%m:%d %H:%M:%S")
+
+class FilesByDate(object):
+    def __init__(self, dest, src_directories):
+        self.dest = dest
+        self.src_directories = src_directories
+
+    def plan(self):
+        sorted_filenames = self.get_sorted_filenames()
+
+        for i, filename in enumerate(sorted_filenames, 1):
+            yield filename, self.get_new_filename(filename, i)
+
+    def get_sorted_filenames(self):
+        src_filenames = read_directories(self.src_directories)
+
+        ts_filenames = []
+        for filename in src_filenames:
+            timestamp = get_timestamp(filename)
+            ts_filenames.append((timestamp, filename))
+        ts_filenames.sort()
+
+        return [filename for _, filename in sorted(ts_filenames)]
+
+    def get_new_filename(self, orig_filename, i):
+        prefix = "S" + str(i).zfill(3) + "_"
+        orig_filename = os.path.basename(orig_filename)
+        return os.path.join(self.dest, prefix + orig_filename)
+
+class DirectoryFanout(object):
+    def __init__(self, dest, src_directories):
+        self.dest = dest
+        self.src_directories = src_directories
+
+    def plan(self):
+        src_filenames = read_directories(self.src_directories)
+
+        for filename in src_filenames:
+            timestamp = get_timestamp(filename)
+            yield filename, self.get_new_filename(filename, timestamp)
+
+    def get_new_filename(self, orig_filename, timestamp):
+        prefix = timestamp.strftime("%Y-%m-%d") + "/"
+        orig_filename = os.path.basename(orig_filename)
+        return os.path.join(self.dest, prefix + orig_filename)
+
+def print_plan(plan):
+    for orig_filename, new_filename in plan:
+        print("    ", orig_filename, "->", new_filename)
+
+def execute_plan(plan):
+    for orig_filename, new_filename in plan:
+        os.makedirs(os.path.dirname(new_filename), exist_ok=True)
+        os.link(orig_filename, new_filename)
+
+def parse_args():
+    parser = argparse.ArgumentParser(description="Relink photos based on EXIF dates")
+
+    parser.add_argument("--dry-run", action="store_true")
+
+    parser.add_argument("dest", nargs=1)
+    parser.add_argument("src", nargs="+")
+
+    action_group = parser.add_mutually_exclusive_group(required=True)
+
+    action_group.add_argument("--directory-fanout", action="store_true",
+        help="Create directories with names like 2015-01-01, place files into them")
+
+    action_group.add_argument("--rename-files", action="store_true",
+        help="Rename files from different cameras based on timestamps")
+
+    args = parser.parse_args()
+    args.dest = args.dest[0]
+    return args
+
+def main():
+    args = parse_args()
+
+    def planner():
+        raise NotImplementedError()
+
+    if args.directory_fanout:
+        planner = DirectoryFanout(args.dest, args.src).plan
+    elif args.rename_files:
+        planner = FilesByDate(args.dest, args.src).plan
+
+    plan = planner()
+    if args.dry_run:
+        print_plan(plan)
+    else:
+        execute_plan(plan)
+
+if __name__ == "__main__":
+    main()
+
diff --git a/hacks/rsync-ssh-backup b/hacks/rsync-ssh-backup
new file mode 100755 (executable)
index 0000000..fc5e7b1
--- /dev/null
@@ -0,0 +1,32 @@
+#!/bin/sh
+
+case "$SSH_ORIGINAL_COMMAND" in
+*\&*)
+echo "Rejected" >&2
+;;
+*\(*)
+echo "Rejected" >&2
+;;
+*\{*)
+echo "Rejected" >&2
+;;
+*\;*)
+echo "Rejected" >&2
+;;
+*\<*)
+echo "Rejected" >&2
+;;
+*\`*)
+echo "Rejected" >&2
+;;
+*\|*)
+echo "Rejected" >&2
+;;
+rsync\ --server*)
+ionice -c3 nice -n5 $SSH_ORIGINAL_COMMAND
+;;
+*)
+echo "Rejected" >&2
+;;
+esac
+
diff --git a/hacks/shaper b/hacks/shaper
new file mode 100755 (executable)
index 0000000..1751105
--- /dev/null
@@ -0,0 +1,122 @@
+#!/bin/bash
+
+# Docs: http://lartc.org/howto
+
+# Tweakables
+IFACE="ppp0"
+VOIP_HOST="$(dig +short sip.internode.on.net)"
+
+UPLINK_RATE=450
+
+VOIP_RATE=70
+HIGH_RATE=130
+NORM_RATE=240
+BULK_RATE=10
+
+
+# Symbolic 'constants'
+ROOT=1
+LIMIT=1
+VOIP_TRAFFIC=10
+HIGH_TRAFFIC=20
+NORM_TRAFFIC=30
+BULK_TRAFFIC=40
+
+
+# Print status of classes
+if [ "$1" = "status" ]; then
+    tc -s qdisc ls dev ${IFACE}
+    tc -s class ls dev ${IFACE}
+    exit
+fi
+
+set -x
+
+# clean existing down- and uplink qdiscs, hide errors
+tc qdisc del dev ${IFACE} root    2> /dev/null > /dev/null
+tc qdisc del dev ${IFACE} ingress 2> /dev/null > /dev/null
+
+if [ "$1" = "stop" ]; then
+    exit
+fi
+
+cd /
+
+
+########## uplink #############
+
+# install root HTB, point default traffic to NORM_TRAFFIC
+tc qdisc add dev ${IFACE} \
+    root handle ${ROOT}:0 \
+    htb default ${NORM_TRAFFIC}
+
+
+# LIMIT class shapes everything at $UPLINK_RATE speed
+# this prevents huge queues in the DSL modem which destroy latency
+tc class add dev ${IFACE} \
+    parent ${ROOT}:0 classid ${ROOT}:${LIMIT} \
+    htb rate ${UPLINK_RATE}Kbit ceil ${UPLINK_RATE}Kbit
+
+
+# VoIP traffic class gets guaranteed bandwidth
+tc class add dev ${IFACE} \
+    parent ${ROOT}:${LIMIT} classid ${ROOT}:${VOIP_TRAFFIC} \
+    htb rate ${VOIP_RATE}Kbit ceil ${UPLINK_RATE}Kbit prio 0
+
+# High priority traffic
+tc class add dev ${IFACE} \
+    parent ${ROOT}:${LIMIT} classid ${ROOT}:${HIGH_TRAFFIC} \
+    htb rate ${HIGH_RATE}Kbit ceil ${UPLINK_RATE}Kbit prio 1
+
+# Normal priority traffic
+tc class add dev ${IFACE} \
+    parent ${ROOT}:${LIMIT} classid ${ROOT}:${NORM_TRAFFIC} \
+    htb rate ${NORM_RATE}Kbit ceil ${UPLINK_RATE}Kbit prio 2
+
+# Bulk traffic gets little default allowance
+tc class add dev ${IFACE} \
+    parent ${ROOT}:${LIMIT} classid ${ROOT}:${BULK_TRAFFIC} \
+    htb rate ${BULK_RATE}Kbit ceil ${UPLINK_RATE}Kbit prio 3
+
+
+# Stochastic Fairness
+tc qdisc add dev ${IFACE} \
+    parent ${ROOT}:${HIGH_TRAFFIC} handle ${HIGH_TRAFFIC}:0 \
+    sfq perturb 10
+tc qdisc add dev ${IFACE} \
+    parent ${ROOT}:${NORM_TRAFFIC} handle ${NORM_TRAFFIC}:0 \
+    sfq perturb 10
+tc qdisc add dev ${IFACE} \
+    parent ${ROOT}:${BULK_TRAFFIC} handle ${BULK_TRAFFIC}:0 \
+    sfq perturb 10
+
+
+# Match VoIP traffic as highest priority
+tc filter add dev ${IFACE} \
+    parent ${ROOT}:0 protocol ip prio 10 u32 \
+    match ip dst ${VOIP_HOST} flowid ${ROOT}:${VOIP_TRAFFIC}
+
+# ICMP in the HIGH_TRAFFIC class
+tc filter add dev ${IFACE} \
+    parent ${ROOT}:0 protocol ip prio 10 u32 \
+    match ip protocol 1 0xff flowid ${ROOT}:${HIGH_TRAFFIC}
+
+# To speed up downloads while an upload is going on, ACK is HIGH_TRAFFIC
+tc filter add dev ${IFACE} \
+    parent ${ROOT}:0 protocol ip prio 10 u32 \
+    match ip protocol 6 0xff \
+    match u8 0x05 0x0f at 0 \
+    match u16 0x0000 0xffc0 at 2 \
+    match u8 0x10 0xff at 33 \
+    flowid ${ROOT}:${HIGH_TRAFFIC}
+
+# TOS Minimum-Delay (eg ssh but not scp) in HIGH_TRAFFIC
+tc filter add dev ${IFACE} \
+    parent ${ROOT}:0 protocol ip prio 10 u32 \
+    match ip tos 0x10 0xff flowid ${ROOT}:${HIGH_TRAFFIC}
+
+# TOS Maximise-Throughput (eg rtorrent) in BULK_TRAFFIC
+tc filter add dev ${IFACE} \
+    parent ${ROOT}:0 protocol ip prio 10 u32 \
+    match ip tos 0x08 0xff flowid ${ROOT}:${BULK_TRAFFIC}
+
diff --git a/hacks/smart-stats b/hacks/smart-stats
new file mode 100755 (executable)
index 0000000..21c69d7
--- /dev/null
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+DISKS="$(grep -Eo '^/dev/[^ ]+' /etc/smartd.conf)"
+
+for dev in $DISKS; do
+    name="$(basename "$dev" | sed 's/^ata-//')"
+    logfile="/var/log/smart/${name}/$(date +%Y-%m-%d_%H:%M).txt"
+    mkdir -p "$(dirname "$logfile")"
+    smartctl -a "$dev" > "$logfile"
+done
+
+exit 0
diff --git a/hacks/split-mvimg b/hacks/split-mvimg
new file mode 100755 (executable)
index 0000000..c639743
--- /dev/null
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+if [ -z "$1" ]; then
+    echo "Usage: $0 MVIMG_xxx.jpg"
+    exit 1
+fi
+
+set -eu
+
+infile="$1"
+outvid="${infile%.*}.mp4"
+outpic="${infile#MV}"
+
+if ! [ -f "$infile" ]; then
+    echo "Missing input file: $infile"
+    exit 1
+fi
+
+if [ -f "$outvid" ] || [ -f "$outpic" ]; then
+    echo "Output files already exist! $outvid || $outpic"
+    exit 1
+fi
+
+size="$(stat -c '%s' "$infile")"
+revoffset="$(exiftool -m -p "\$MicroVideoOffset" "$infile")"
+if [ -z "$revoffset" ] || [ "$revoffset" -le 0 ]; then
+    echo "Invalid video offset: $revoffset"
+    exit 1
+fi
+offset=$((size - revoffset))
+
+dd if="$infile" of="$outvid" bs="$offset" skip=1
+dd if="$infile" of="$outpic" bs="$offset" count=1
+exiftool -overwrite_original -xmp:all= "$outpic"
diff --git a/hacks/ssh-screen-wrapper b/hacks/ssh-screen-wrapper
new file mode 100755 (executable)
index 0000000..c40a490
--- /dev/null
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+set -eu
+
+hostname="$(basename "$0")"
+
+while true; do
+    clear
+    echo "Connecting to ${hostname}..."
+    ssh \
+        -o ServerAliveInterval=1 \
+        -o ServerAliveCountMax=5 \
+        -t \
+        "$@" \
+        "$hostname" \
+        "bash --login -c 'screen -dR'" \
+    && exit 0
+    echo "Disconnected, waiting..."
+    echo -ne "\\033]0;${hostname}: disconnected!\\007"
+    sleep 1
+done
+
diff --git a/hacks/tcp-proxy b/hacks/tcp-proxy
new file mode 100755 (executable)
index 0000000..865a64c
--- /dev/null
@@ -0,0 +1,387 @@
+#!/usr/bin/env python3
+
+"""
+Proxy Utility
+-------------
+
+With mode=basic any incoming connections on listen_port will be proxied
+to host:port. The proxy will only accept connections from hosts in the
+[allowed] section.
+
+With mode=proxy the first two lines of incoming connections must be
+'host\nport\n'. Once again only connections from hosts in the [allowed]
+section will be accepted. The proxy will connect to host:port and pass
+bytes in both directions.
+
+The final mode, mode=interceptor is designed to be combined with a firewall
+rule and another instance running mode=proxy on another computer.
+Proxies running in interceptor mode listen on all interfaces. They make
+connections to the host:port specified in the config file, passing the
+'capturedhost\ncapturedport\n' onto the destination. They then pass bytes
+in both directions.
+
+
+Example - Basic Forwarder
+-------------------------
+
+Config to forward all packets from port 8000 on localhost to google.com:80
+Connections will be accepted from whatever IP address alpha.example.com
+and beta.example.com point to.
+
+[proxy]
+mode = basic
+listen_port = 8000
+host = google.com
+port = 80
+
+[allowed]
+host1 = alpha.example.com
+host2 = beta.example.com
+
+
+
+Example - Interceptor Proxy Combo
+---------------------------------
+
+Capture all packets destined for port 1935 and send them to an interceptor
+configured to listen on example.com:9997.
+On Linux:
+  # iptables -t nat -A PREROUTING -p tcp --dport 1935 
+      -j REDIRECT --to-ports 9997
+  # iptables -t nat -A OUTPUT -p tcp --dport 1935 
+      -j REDIRECT --to-ports 9997
+
+On Mac OS X:
+  # ipfw add 50000 fwd 127.0.0.1,9997 tcp from any to any dst-port 1935
+
+Config to forward these connections to proxy.example.com
+
+[proxy]
+mode = interceptor
+listen_port = 9997
+host = proxy.example.com
+port = 9997
+
+
+
+Config file for proxy.example.com
+
+[proxy]
+mode = proxy
+listen_port = 9997
+
+[allowed]
+host1 = alpha.example.com
+host2 = beta.example.com
+
+
+"""
+
+
+import asyncore
+import configparser
+import os
+import socket
+import struct
+import sys
+import traceback
+
+
+if sys.platform == "linux2":
+    try:
+        socket.SO_ORIGINAL_DST
+    except AttributeError:
+        # There is a missing const in the socket module... So we will add it now
+        socket.SO_ORIGINAL_DST = 80
+
+    def get_original_dest(sock):
+        '''Gets the original destination address for connection that has been
+        redirected by netfilter.'''
+        # struct sockaddr_in {
+        #     short            sin_family;   // e.g. AF_INET
+        #     unsigned short   sin_port;     // e.g. htons(3490)
+        #     struct in_addr   sin_addr;     // see struct in_addr, below
+        #     char             sin_zero[8];  // zero this if you want to
+        # };
+        # struct in_addr {
+        #     unsigned long s_addr;  // load with inet_aton()
+        # };
+        # getsockopt(fd, SOL_IP, SO_ORIGINAL_DST, (struct sockaddr_in *)&dstaddr, &dstlen);
+
+        data = sock.getsockopt(socket.SOL_IP, socket.SO_ORIGINAL_DST, 16)
+        _, port, a1, a2, a3, a4 = struct.unpack("!HHBBBBxxxxxxxx", data)
+        address = "%d.%d.%d.%d" % (a1, a2, a3, a4)
+        return address, port
+
+
+elif sys.platform == "darwin":
+    def get_original_dest(sock):
+        '''Gets the original destination address for connection that has been
+        redirected by ipfw.'''
+        return sock.getsockname()
+
+
+
+class Proxy(asyncore.dispatcher):
+    def __init__(self, arg):
+        if isinstance(arg, tuple):
+            asyncore.dispatcher.__init__(self)
+            self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
+            self.connect(arg)
+        else:
+            asyncore.dispatcher.__init__(self, arg)
+        self.init()
+
+    def init(self):
+        self.end = False
+        self.other = None
+        self.buffer = b""
+
+    def meet(self, other):
+        self.other = other
+        other.other = self
+
+    def handle_error(self):
+        print("Proxy error:", traceback.format_exc(), file=sys.stderr)
+        self.close()
+
+    def handle_read(self):
+        data = self.recv(8192)
+        if len(data) > 0:
+            self.other.buffer += data
+
+    def handle_write(self):
+        sent = self.send(self.buffer)
+        self.buffer = self.buffer[sent:]
+        if len(self.buffer) == 0 and self.end:
+            self.close()
+
+    def writable(self):
+        return len(self.buffer) > 0
+
+    def handle_close(self):
+        if not self.other:
+            return
+        print("Proxy closed", file=sys.stderr)
+        self.close()
+        if len(self.other.buffer) == 0:
+            self.other.close()
+        self.other.end = True
+        self.other = None
+
+class ConnectProxy(asyncore.dispatcher):
+    def __init__(self, sock):
+        asyncore.dispatcher.__init__(self, sock)
+        self.buffer = b""
+
+    def handle_error(self):
+        print("ConnectProxy error:", traceback.format_exc(), file=sys.stderr)
+        self.close()
+
+    def handle_read(self):
+        self.buffer += self.recv(8192)
+        pos1 = self.buffer.find("\n")
+        if pos1 < 0:
+            return
+        host = self.buffer[:pos1].strip()
+        pos1 += 1
+        pos2 = self.buffer[pos1:].find("\n")
+        if pos2 < 0:
+            return
+        pos2 += pos1
+        port = int(self.buffer[pos1:pos2].strip())
+
+        self.buffer = self.buffer[pos2+1:]
+        self.done(host, port)
+
+    def handle_write(self):
+        pass
+
+    def handle_close(self):
+        print("Proxy closed", file=sys.stderr)
+        self.close()
+
+    def done(self, host, port):
+        print("Forwarding connection", host, port, file=sys.stderr)
+
+        # Create server proxy
+        server = Proxy((host, port))
+        server.buffer = self.buffer
+
+        # Morph and connect
+        self.__class__ = Proxy
+        self.init()
+        server.meet(self)
+
+
+class BasicForwarder(asyncore.dispatcher):
+    def __init__(self, listen_port, host, port, allowed):
+        asyncore.dispatcher.__init__(self)
+        self.host = host
+        self.port = port
+        self.allowed = allowed
+        self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
+        self.set_reuse_addr()
+        self.bind((b"", listen_port))
+        self.listen(50)
+        print("BasicForwarder bound on", listen_port, file=sys.stderr)
+
+    def handle_error(self):
+        print("BasicForwarder error:", traceback.format_exc(), file=sys.stderr)
+
+    def handle_accept(self):
+        client_connection, source_addr = self.accept()
+        if not self.is_connected_allowed(source_addr):
+            print("Rejected connection from", source_addr, file=sys.stderr)
+            client_connection.close()
+            return
+
+        print("Accepted connection from", source_addr, file=sys.stderr)
+
+        # Hook the sockets up to the event loop
+        client = Proxy(client_connection)
+        server = Proxy((self.host, self.port))
+        server.meet(client)
+
+    def is_connected_allowed(self, source_addr):
+        if len(self.allowed) == 1 and self.allowed[0].lower() == "all":
+            return True
+
+        if source_addr[0] in list(map(socket.gethostbyname, self.allowed)):
+            return True
+
+        return False
+
+class Forwarder(asyncore.dispatcher):
+    def __init__(self, listen_port, allowed):
+        asyncore.dispatcher.__init__(self)
+        self.allowed = allowed
+        self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
+        self.set_reuse_addr()
+        self.bind((b"", listen_port))
+        self.listen(50)
+        print("Forwarder bound on", listen_port, file=sys.stderr)
+
+    def handle_error(self):
+        print("Forwarder error:", traceback.format_exc(), file=sys.stderr)
+
+    def handle_accept(self):
+        client_connection, source_addr = self.accept()
+        if source_addr[0] not in list(map(socket.gethostbyname, self.allowed)):
+            print("Rejected connection from", source_addr, file=sys.stderr)
+            client_connection.close()
+            return
+
+        print("Accepted connection from", source_addr, file=sys.stderr)
+        ConnectProxy(client_connection)
+
+class Interceptor(asyncore.dispatcher):
+    def __init__(self, listen_port, host, port):
+        asyncore.dispatcher.__init__(self)
+        self.host = host
+        self.port = port
+        self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
+        self.set_reuse_addr()
+        self.bind(("0.0.0.0", listen_port))
+        self.listen(50)
+        print("Interceptor bound on", listen_port, file=sys.stderr)
+
+    def handle_error(self):
+        print("Interceptor error!", traceback.format_exc(), file=sys.stderr)
+
+    def handle_accept(self):
+        # Get sockets
+        client_connection, source_addr = self.accept()
+        dest = get_original_dest(client_connection)
+        print("Accepted connection from", source_addr, file=sys.stderr)
+
+        # Hook them up to the event loop
+        client = Proxy(client_connection)
+        server = Proxy((self.host, self.port))
+        server.buffer += "%s\n%d\n" % dest
+        server.meet(client)
+
+
+def main(listen_port, host, port, mode, allowed):
+    if mode == "basic":
+        proxy = BasicForwarder(listen_port, host, port, allowed)
+    elif mode == "proxy":
+        proxy = Forwarder(listen_port, allowed)
+    elif mode == "interceptor":
+        proxy = Interceptor(listen_port, host, port)
+    else:
+        print("Unknown mode:", mode, file=sys.stderr)
+        return 1
+    asyncore.loop()
+
+
+if __name__ == "__main__":
+    try:
+        if sys.argv[1] == "-d":
+            daemon = True
+            config = sys.argv[2]
+        else:
+            daemon = False
+            config = sys.argv[1]
+    except (IndexError, ValueError):
+        print("Usage: %s [-d] config" % sys.argv[0], file=sys.stderr)
+        sys.exit(1)
+
+    try:
+        c = configparser.RawConfigParser()
+        c.read(config)
+    except:
+        print("Error parsing config!", file=sys.stderr)
+        sys.exit(1)
+
+    def guard(func, message):
+        try:
+            return func()
+        except:
+            print("Error:", message, file=sys.stderr)
+            raise
+            sys.exit(1)
+
+    mode = guard(lambda:c.get("proxy", "mode").lower(),
+        "mode is a required field")
+
+    listen_port = guard(lambda:c.getint("proxy", "listen_port"),
+        "listen_port is a required field")
+
+    if mode in ["basic", "interceptor"]:
+        text = "%%s is a required field for mode=%s" % mode
+        host = guard(lambda:c.get("proxy", "host"), text % "host")
+        port = guard(lambda:c.getint("proxy", "port"), text % "port")
+    else:
+        host = None
+        port = None
+
+    if mode in ["basic", "proxy"]:
+        allowed = guard(lambda:c.items("allowed"),
+            "[allowed] section is required for mode=%s" % mode)
+        allowed = [h for _,h in c.items("allowed")]
+    else:
+        allowed = None
+
+
+    if not daemon:
+        try:
+            main(listen_port, host, port, mode, allowed)
+        except KeyboardInterrupt:
+            print()
+    else:
+        os.close(0)
+        os.close(1)
+        os.close(2)
+        os.open("/dev/null", os.O_RDONLY)
+        os.open("/dev/null", os.O_RDWR)
+        os.dup(1)
+
+        if os.fork() == 0:
+            # We are the child
+            try:
+                sys.exit(main(listen_port, host, port, mode, allowed))
+            except KeyboardInterrupt:
+                print()
+            sys.exit(0)
+
diff --git a/hacks/terminal-color-table b/hacks/terminal-color-table
new file mode 100755 (executable)
index 0000000..159d339
--- /dev/null
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+echo
+for bg in 00 $(seq 40 47); do
+    for bold in 0 1; do
+        echo -en " \\\\033[${bg}m  "
+        for fg in $(seq 30 37); do
+           echo -en "\\033[${bg}m\\033[${bold};${fg}m \\\\033[${bold};${fg}m  "
+        done
+        echo -e "\\033[0m"
+    done
+    echo
+done
diff --git a/hacks/usb-reset b/hacks/usb-reset
new file mode 100755 (executable)
index 0000000..22dab0a
--- /dev/null
@@ -0,0 +1,45 @@
+#!/usr/bin/env python2
+
+import fcntl
+import os
+import subprocess
+import sys
+import time
+
+if not sys.platform.startswith("linux"):
+    print >>sys.stderr, "Sorry, this tool requires Linux"
+    sys.exit(1)
+
+try:
+    search_usb_id = sys.argv[1].lower()
+except IndexError:
+    print >>sys.stderr, "Usage: %s vendorid:devid" % sys.argv[0]
+    print >>sys.stderr, "\nThis tool will reset all USB devices with the given ID (eg 1f4d:a803)"
+    sys.exit(1)
+
+
+USBDEVFS_RESET = 21780
+
+os.umask(0007)
+
+p = subprocess.Popen(["lsusb"], stdout=subprocess.PIPE)
+for line in p.stdout:
+    line = line.split()
+    usb_id = line[5].lower()
+    if usb_id != search_usb_id:
+        continue
+    bus = line[1]
+    dev = line[3].replace(":", "")
+
+    filename = "/dev/bus/usb/%s/%s" % (bus, dev)
+    print "Resetting", filename, "...",
+    sys.stdout.flush()
+    fd = os.open(filename, os.O_WRONLY)
+    ret = fcntl.ioctl(fd, USBDEVFS_RESET, 0)
+    if ret < 0:
+        print >>sys.stderr, "\nError in ioctl:", ret
+        sys.exit(1)
+    os.close(fd)
+    time.sleep(1)
+    print "done"
+
diff --git a/hacks/wordpress-salt-gen b/hacks/wordpress-salt-gen
new file mode 100755 (executable)
index 0000000..5ad5485
--- /dev/null
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+import random
+
+names = [
+    "AUTH_KEY",
+    "AUTH_SALT",
+    "LOGGED_IN_KEY",
+    "LOGGED_IN_SALT",
+    "NONCE_KEY",
+    "NONCE_SALT",
+    "SECURE_AUTH_KEY",
+    "SECURE_AUTH_SALT",
+]
+
+alnum_chars = list(filter(lambda c: c.isalnum(), map(chr, range(128))))
+
+def generate():
+    return "".join([random.choice(alnum_chars) for i in range(40)])
+
+for name in names:
+    print("fastcgi_param %s '%s';" % (name.ljust(20), generate()))
diff --git a/healthcheck/disk-usage b/healthcheck/disk-usage
new file mode 100755 (executable)
index 0000000..3dd4649
--- /dev/null
@@ -0,0 +1,53 @@
+#!/usr/bin/env python3
+
+import os
+import sys
+
+def pp_size(size):
+    suffixes = ["", "KiB", "MiB", "GiB"]
+    for i, suffix in enumerate(suffixes):
+        if size < 1024:
+            break
+        size /= 1024
+    return "%.2f %s" % (size, suffix)
+
+
+def check_path(path):
+    stat = os.statvfs(path)
+    total = stat.f_bsize * stat.f_blocks
+    free = stat.f_bsize * stat.f_bavail
+    warn = False
+
+    if total < 5*1024*1024*1024:
+        if free < total * 0.05:
+            warn = True
+    elif free < 2*1024*1024*1024:
+        warn = True
+
+    if warn:
+        print("WARNING! %s has only %s remaining" % (path, pp_size(free)))
+        return False
+
+    return True
+
+def read_fstab():
+    for line in open("/etc/fstab"):
+        if line.startswith("#") or not line.strip():
+            continue
+        _, path, _ = line.split(maxsplit=2)
+        if path.startswith("/"):
+            yield path
+
+def main():
+    paths = set(["/"])
+    paths.update(read_fstab())
+    ok = True
+    for path in paths:
+        ok = ok and check_path(path)
+
+    if not ok:
+        sys.exit(1)
+
+if __name__ == "__main__":
+    main()
+
diff --git a/healthcheck/packages b/healthcheck/packages
new file mode 100755 (executable)
index 0000000..0df8c33
--- /dev/null
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+set -eu
+
+if [ -z "${CHRONIC_WRAPPED:-}" ]; then
+    export CHRONIC_WRAPPED=1
+    exec chronic -e "$0"
+fi
+
+. /etc/os-release
+
+function is_distro {
+    [ "$ID" = "$1" ] || [ "${ID_LIKE:-}" = "$1" ]
+}
+
+if is_distro debian; then
+    echo "# aptorphan"
+    aptorphan 1>&2
+fi
+
+if is_distro arch; then
+    echo "# pacorphan"
+    pacorphan 1>&2
+fi
diff --git a/healthcheck/run-all b/healthcheck/run-all
new file mode 100755 (executable)
index 0000000..e5a12c3
--- /dev/null
@@ -0,0 +1,16 @@
+#!/bin/bash
+
+. /etc/os-release
+. ~/.bashrc
+
+set -eu
+cd "$(dirname "$(readlink -f "$0")")"
+
+for i in ./*; do
+    if [ "$(basename "$i")" != "$(basename "$0")" ]; then
+        if ! "$i"; then
+            echo -e "\n^^ FAILED! $(cat /etc/hostname) $PRETTY_NAME -- $i ^^\n"
+            exit 1
+        fi
+    fi
+done
diff --git a/healthcheck/systemd-units b/healthcheck/systemd-units
new file mode 100755 (executable)
index 0000000..8a6bc63
--- /dev/null
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+if systemctl is-system-running > /dev/null; then
+    exit 0
+fi
+
+echo -e "# systemctl --failed"
+systemctl --failed
+exit 1
diff --git a/healthcheck/systemd-user-timers b/healthcheck/systemd-user-timers
new file mode 100755 (executable)
index 0000000..e4679e7
--- /dev/null
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+cd ~/.config/systemd/user/ &> /dev/null || exit 0
+
+error=0
+for timer in *.timer; do
+    if ! systemctl --user is-enabled "$timer" > /dev/null; then
+        echo "disabled timer $timer"
+        error=1
+    fi
+    if ! systemctl --user is-active "$timer" > /dev/null; then
+        echo "inactive timer $timer"
+        error=1
+    fi
+done
+
+exit "$error"
diff --git a/healthcheck/systemd-user-units b/healthcheck/systemd-user-units
new file mode 100755 (executable)
index 0000000..ddc1551
--- /dev/null
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+if systemctl --user is-system-running > /dev/null; then
+    exit 0
+fi
+
+echo -e "# systemctl --user --failed"
+systemctl --user --failed
+exit 1
diff --git a/healthcheck/zpool-health b/healthcheck/zpool-health
new file mode 100755 (executable)
index 0000000..0864b80
--- /dev/null
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+if ! command -v zpool > /dev/null; then
+   exit 0
+fi
+
+if zpool list -H -o health,name | grep -qv ONLINE; then
+   zpool status -v
+fi