Add 'old-conf/' from commit '62a64a79a8'
git-subtree-dir: old-conf git-subtree-mainline:4667974392git-subtree-split:62a64a79a8
This commit is contained in:
commit
83de52d5db
195 changed files with 13408 additions and 0 deletions
59
old-conf/hosts/ds9/attic.nix
Normal file
59
old-conf/hosts/ds9/attic.nix
Normal file
|
|
@ -0,0 +1,59 @@
|
|||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
inputs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
stateDir = "/var/lib/atticd2";
|
||||
in
|
||||
{
|
||||
# imports = [ inputs.attic.nixosModules.atticd ];
|
||||
ragon.agenix.secrets.ds9AtticEnv = { };
|
||||
ragon.persist.extraDirectories = [
|
||||
stateDir
|
||||
];
|
||||
|
||||
systemd.services.atticd.serviceConfig.ReadWritePaths = [ stateDir ];
|
||||
services.atticd = {
|
||||
enable = true;
|
||||
|
||||
# Replace with absolute path to your environment file
|
||||
environmentFile = config.age.secrets.ds9AtticEnv.path;
|
||||
|
||||
settings = {
|
||||
listen = "[::]:8089";
|
||||
database.url = "sqlite://${stateDir}/server.db?mode=rwc";
|
||||
storage = {
|
||||
type = "local";
|
||||
path = "${stateDir}/storage";
|
||||
};
|
||||
|
||||
jwt = { };
|
||||
|
||||
# Data chunking
|
||||
#
|
||||
# Warning: If you change any of the values here, it will be
|
||||
# difficult to reuse existing chunks for newly-uploaded NARs
|
||||
# since the cutpoints will be different. As a result, the
|
||||
# deduplication ratio will suffer for a while after the change.
|
||||
chunking = {
|
||||
# The minimum NAR size to trigger chunking
|
||||
#
|
||||
# If 0, chunking is disabled entirely for newly-uploaded NARs.
|
||||
# If 1, all NARs are chunked.
|
||||
nar-size-threshold = 64 * 1024; # 64 KiB
|
||||
|
||||
# The preferred minimum size of a chunk, in bytes
|
||||
min-size = 16 * 1024; # 16 KiB
|
||||
|
||||
# The preferred average size of a chunk, in bytes
|
||||
avg-size = 64 * 1024; # 64 KiB
|
||||
|
||||
# The preferred maximum size of a chunk, in bytes
|
||||
max-size = 256 * 1024; # 256 KiB
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
91
old-conf/hosts/ds9/authentik.nix
Normal file
91
old-conf/hosts/ds9/authentik.nix
Normal file
|
|
@ -0,0 +1,91 @@
|
|||
{
|
||||
pkgs,
|
||||
config,
|
||||
lib,
|
||||
inputs,
|
||||
...
|
||||
}:
|
||||
let version = "2025.10.1"; in
|
||||
{
|
||||
imports = [
|
||||
inputs.quadlet-nix.nixosModules.quadlet
|
||||
];
|
||||
ragon.agenix.secrets.ds9AuthentikEnv = { };
|
||||
ragon.agenix.secrets.ds9AuthentikLdapEnv = { };
|
||||
virtualisation.quadlet = {
|
||||
containers = {
|
||||
authentik-server.containerConfig.image = "ghcr.io/goauthentik/server:${version}";
|
||||
|
||||
authentik-server.containerConfig.exec = "server";
|
||||
authentik-server.containerConfig.networks = [
|
||||
"podman"
|
||||
"db-net"
|
||||
"authentik-net"
|
||||
];
|
||||
authentik-server.containerConfig.volumes = [
|
||||
"authentik-media:/media"
|
||||
"authentik-certs:/certs"
|
||||
];
|
||||
authentik-server.containerConfig.environments = {
|
||||
AUTHENTIK_REDIS__HOST = "authentik-redis";
|
||||
AUTHENTIK_POSTGRESQL__HOST = "postgres";
|
||||
AUTHENTIK_POSTGRESQL__USER = "authentik";
|
||||
AUTHENTIK_POSTGRESQL__NAME = "authentik";
|
||||
|
||||
};
|
||||
authentik-server.serviceConfig.TimeoutStartSec = "60";
|
||||
authentik-server.containerConfig.environmentFiles = [
|
||||
config.age.secrets.ds9AuthentikEnv.path
|
||||
];
|
||||
authentik-worker.containerConfig.image = "ghcr.io/goauthentik/server:${version}";
|
||||
|
||||
authentik-worker.containerConfig.exec = "worker";
|
||||
authentik-worker.containerConfig.networks = [
|
||||
"podman"
|
||||
"db-net"
|
||||
"authentik-net"
|
||||
];
|
||||
authentik-worker.containerConfig.volumes = [
|
||||
"authentik-media:/media"
|
||||
"authentik-certs:/certs"
|
||||
];
|
||||
authentik-worker.containerConfig.environments = {
|
||||
AUTHENTIK_REDIS__HOST = "authentik-redis";
|
||||
AUTHENTIK_POSTGRESQL__HOST = "postgres";
|
||||
AUTHENTIK_POSTGRESQL__USER = "authentik";
|
||||
AUTHENTIK_POSTGRESQL__NAME = "authentik";
|
||||
|
||||
};
|
||||
authentik-worker.containerConfig.environmentFiles = [
|
||||
config.age.secrets.ds9AuthentikEnv.path
|
||||
];
|
||||
authentik-worker.serviceConfig.TimeoutStartSec = "60";
|
||||
authentik-ldap.containerConfig.image = "ghcr.io/goauthentik/ldap:${version}";
|
||||
|
||||
authentik-ldap.containerConfig.networks = [
|
||||
"podman"
|
||||
"authentik-net"
|
||||
];
|
||||
authentik-ldap.containerConfig.environments = {
|
||||
AUTHENTIK_HOST = "http://authentik-server:9000";
|
||||
AUTHENTIK_INSECURE = "true";
|
||||
};
|
||||
authentik-ldap.containerConfig.environmentFiles = [
|
||||
config.age.secrets.ds9AuthentikLdapEnv.path
|
||||
];
|
||||
authentik-ldap.serviceConfig.TimeoutStartSec = "60";
|
||||
authentik-redis.containerConfig.image = "docker.io/library/redis:alpine";
|
||||
authentik-redis.containerConfig.networks = [
|
||||
"authentik-net"
|
||||
|
||||
];
|
||||
authentik-redis.containerConfig.volumes = [ "authentik-redis:/data" ];
|
||||
authentik-redis.serviceConfig.TimeoutStartSec = "60";
|
||||
};
|
||||
networks = {
|
||||
authentik.networkConfig.ipv6 = true;
|
||||
authentik.networkConfig.name = "authentik-net";
|
||||
authentik.networkConfig.internal = true;
|
||||
};
|
||||
};
|
||||
}
|
||||
49
old-conf/hosts/ds9/backup.nix
Normal file
49
old-conf/hosts/ds9/backup.nix
Normal file
|
|
@ -0,0 +1,49 @@
|
|||
{ config, pkgs, lib, ... }: {
|
||||
ragon.agenix.secrets."ds9OffsiteBackupSSH" = { };
|
||||
ragon.agenix.secrets."ds9SyncoidHealthCheckUrl" = { };
|
||||
ragon.agenix.secrets."gatebridgeHostKeys" = { };
|
||||
ragon.agenix.secrets."borgmaticEncryptionKey" = { };
|
||||
|
||||
# Backup Target
|
||||
users.users.picardbackup = {
|
||||
createHome = false;
|
||||
group = "users";
|
||||
uid = 993;
|
||||
home = "/backups/picard";
|
||||
shell = "/run/current-system/sw/bin/bash";
|
||||
isSystemUser = true;
|
||||
openssh.authorizedKeys.keys = [
|
||||
''command="${pkgs.borgbackup}/bin/borg serve --restrict-to-path /backups/picard/",restrict ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHvCF8KGgpF9O8Q7k+JXqZ5eMeEeTaMhCIk/2ZFOzXL0''
|
||||
];
|
||||
};
|
||||
|
||||
services.borgmatic = {
|
||||
enable = true;
|
||||
configurations."ds9-offsite" = {
|
||||
source_directories = [ "/backups" "/data" "/persistent" ];
|
||||
repositories = [{ label = "gatebridge"; path = "ssh://root@gatebridge/media/backup/ds9"; }];
|
||||
exclude_if_present = [ ".nobackup" ];
|
||||
#upload_rate_limit = "4000";
|
||||
encryption_passcommand = "${pkgs.coreutils}/bin/cat ${config.age.secrets.borgmaticEncryptionKey.path}";
|
||||
compression = "auto,zstd,10";
|
||||
extra_borg_options = {
|
||||
init = "--lock-wait 600";
|
||||
create = "--lock-wait 600";
|
||||
prune = "--lock-wait 600";
|
||||
compact = "--lock-wait 600";
|
||||
check = "--lock-wait 600";
|
||||
};
|
||||
ssh_command = "ssh -o ServerAliveInterval=10 -o ServerAliveCountMax=30 -o GlobalKnownHostsFile=${config.age.secrets.gatebridgeHostKeys.path} -i ${config.age.secrets.ds9OffsiteBackupSSH.path}";
|
||||
before_actions = [ "${pkgs.curl}/bin/curl -fss -m 10 --retry 5 -o /dev/null $(${pkgs.coreutils}/bin/cat ${config.age.secrets.ds9SyncoidHealthCheckUrl.path})/start" ];
|
||||
after_actions = [ "${pkgs.curl}/bin/curl -fss -m 10 --retry 5 -o /dev/null $(${pkgs.coreutils}/bin/cat ${config.age.secrets.ds9SyncoidHealthCheckUrl.path})" ];
|
||||
on_error = [ "${pkgs.curl}/bin/curl -fss -m 10 --retry 5 -o /dev/null $(${pkgs.coreutils}/bin/cat ${config.age.secrets.ds9SyncoidHealthCheckUrl.path})/fail" ];
|
||||
retention = {
|
||||
keep_daily = 7;
|
||||
keep_weekly = 3;
|
||||
keep_monthly = 6;
|
||||
keep_yearly = 2;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
}
|
||||
467
old-conf/hosts/ds9/containers.nix
Normal file
467
old-conf/hosts/ds9/containers.nix
Normal file
|
|
@ -0,0 +1,467 @@
|
|||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
postgres-multi-db = pkgs.writeText "postgres-multiple-db.sh" ''
|
||||
#!/usr/bin/env bash
|
||||
set -eu
|
||||
|
||||
if [ -n "$POSTGRES_MULTIPLE_DATABASES" ]; then
|
||||
echo "Multiple database creation requested: $POSTGRES_MULTIPLE_DATABASES"
|
||||
(
|
||||
for db in $(echo $POSTGRES_MULTIPLE_DATABASES | tr ',' ' '); do
|
||||
echo "CREATE DATABASE $db;"
|
||||
done
|
||||
for user in $(echo $POSTGRES_MULTIPLE_DATABASES_USERS | tr ',' ' '); do
|
||||
while IFS=":" read -r usr pw
|
||||
do
|
||||
echo "CREATE USER $usr PASSWORD '$pw';"
|
||||
echo "GRANT ALL PRIVILEGES ON DATABASE \"$usr\" TO $usr;"
|
||||
done <(echo $user)
|
||||
done
|
||||
) | psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER"
|
||||
fi
|
||||
'';
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
./authentik.nix
|
||||
];
|
||||
networking.firewall.interfaces."podman+".allowedUDPPorts = [ 53 ];
|
||||
networking.firewall.interfaces."podman+".allowedTCPPorts = [
|
||||
12300
|
||||
3001
|
||||
];
|
||||
fileSystems."/var/lib/containers" = {
|
||||
device = "spool/safe/containers";
|
||||
fsType = "zfs";
|
||||
};
|
||||
# plex
|
||||
# networking.firewall = {
|
||||
# allowedTCPPorts = [ 32400 3005 8324 32469 ];
|
||||
# allowedUDPPorts = [ 1900 5353 32410 32412 32413 32414 ];
|
||||
# };
|
||||
# virtualisation.oci-containers.containers.plex = {
|
||||
# image = "docker.io/plexinc/pms-docker";
|
||||
# extraOptions = [ "--network=host" ];
|
||||
# environment = {
|
||||
# TZ = "Europe/Berlin";
|
||||
# PLEX_UID = "1000";
|
||||
# PLEX_GID = "100";
|
||||
# };
|
||||
|
||||
# volumes = [
|
||||
# "/data/media:/data/media"
|
||||
# "plex-transcode:/transcode"
|
||||
# "plex-db:/config"
|
||||
# ];
|
||||
# };
|
||||
# postgres
|
||||
boot.binfmt.emulatedSystems = [ "aarch64-linux" ];
|
||||
ragon.agenix.secrets.ds9PostgresEnv = { };
|
||||
systemd.services."podman-db-network" = {
|
||||
script = ''
|
||||
${pkgs.podman}/bin/podman network exists db-net || ${pkgs.podman}/bin/podman network create db-net --internal --ipv6
|
||||
'';
|
||||
};
|
||||
virtualisation.oci-containers.containers.postgres = {
|
||||
image = "docker.io/tensorchord/pgvecto-rs:pg16-v0.2.1";
|
||||
extraOptions = [
|
||||
"--network=db-net"
|
||||
"--network=podman"
|
||||
"--health-cmd"
|
||||
"pg_isready -U postgres"
|
||||
];
|
||||
# dependsOn = [ "db-network" ];
|
||||
environment = {
|
||||
POSTGRES_INITDB_ARGS = "--data-checksums";
|
||||
};
|
||||
environmentFiles = [
|
||||
config.age.secrets.ds9PostgresEnv.path
|
||||
];
|
||||
ports = [ "5432:5432" ];
|
||||
volumes = [
|
||||
"${postgres-multi-db}:/docker-entrypoint-initdb.d/create-multiple-postgresql-databases.sh"
|
||||
"postgres:/var/lib/postgresql/data"
|
||||
];
|
||||
};
|
||||
# immich
|
||||
ragon.agenix.secrets.ds9ImmichEnv = { };
|
||||
# systemd.services."podman-immich-network" = {
|
||||
# script = ''
|
||||
# echo "Creating immich network"
|
||||
# ${pkgs.podman}/bin/podman network exists immich-net || ${pkgs.podman}/bin/podman network create immich-net --internal --ipv6
|
||||
# echo "Created immich network"
|
||||
# '';
|
||||
# };
|
||||
virtualisation.oci-containers.containers.immich-redis = {
|
||||
image = "docker.io/valkey/valkey:7.2.6-alpine";
|
||||
environment.TZ = "Europe/Berlin";
|
||||
extraOptions = [
|
||||
"--health-cmd"
|
||||
"valkey-cli ping || exit 1"
|
||||
"--network=immich-net"
|
||||
];
|
||||
environmentFiles = [
|
||||
config.age.secrets.ds9ImmichEnv.path
|
||||
];
|
||||
# dependsOn = [ "immich-network" ];
|
||||
};
|
||||
virtualisation.oci-containers.containers.immich-server = {
|
||||
user = "1000:100";
|
||||
image = "ghcr.io/immich-app/immich-server:release";
|
||||
extraOptions = [
|
||||
"--network=podman"
|
||||
"--network=immich-net"
|
||||
"--network=db-net"
|
||||
];
|
||||
dependsOn = [
|
||||
# "immich-network"
|
||||
"immich-redis"
|
||||
"postgres"
|
||||
];
|
||||
ports = [ "8765:3001" ];
|
||||
volumes = [
|
||||
"/data/immich:/usr/src/app/upload"
|
||||
];
|
||||
environment = {
|
||||
IMICH_HOST = "0.0.0.0";
|
||||
DB_HOSTNAME = "postgres";
|
||||
REDIS_HOSTNAME = "immich-redis";
|
||||
TZ = "Europe/Berlin";
|
||||
};
|
||||
environmentFiles = [
|
||||
config.age.secrets.ds9ImmichEnv.path
|
||||
];
|
||||
};
|
||||
virtualisation.oci-containers.containers.immich-machine-learning = {
|
||||
user = "1000:100";
|
||||
image = "ghcr.io/immich-app/immich-machine-learning:release";
|
||||
extraOptions = [
|
||||
"--network=immich-net"
|
||||
"--network=db-net"
|
||||
"--network=podman"
|
||||
];
|
||||
dependsOn = [
|
||||
# "immich-network"
|
||||
"immich-redis"
|
||||
"postgres"
|
||||
];
|
||||
volumes = [
|
||||
"immich-model-cache:/cache"
|
||||
];
|
||||
environment = {
|
||||
DB_HOSTNAME = "postgres";
|
||||
REDIS_HOSTNAME = "immich-redis";
|
||||
TZ = "Europe/Berlin";
|
||||
};
|
||||
environmentFiles = [
|
||||
config.age.secrets.ds9ImmichEnv.path
|
||||
];
|
||||
};
|
||||
# navidrome
|
||||
# virtualisation.oci-containers.containers.lms = {
|
||||
# # don't tell mom
|
||||
# # user = "1000:100";
|
||||
# image = "epoupon/lms:latest";
|
||||
# cmd = [ "/lms.conf" ];
|
||||
# extraOptions = [ "--network=podman" ];
|
||||
# volumes =
|
||||
# let
|
||||
# lmsConfig = pkgs.writeText "lms-config" ''
|
||||
# original-ip-header = "X-Forwarded-For";
|
||||
# behind-reverse-proxy = true;
|
||||
# trusted-proxies =
|
||||
# (
|
||||
# "10.88.0.1"
|
||||
# );
|
||||
# authentication-backend = "http-headers";
|
||||
# http-headers-login-field = "X-Webauth-User";
|
||||
# '';
|
||||
# in
|
||||
# [
|
||||
# "lightweight-music-server-data:/var/lms:rw"
|
||||
# "${lmsConfig}:/lms.conf"
|
||||
# "/data/media/beets/music:/music:ro"
|
||||
# ];
|
||||
# environment = { };
|
||||
# };
|
||||
|
||||
# changedetection
|
||||
systemd.services."podman-cd-network" = {
|
||||
script = ''
|
||||
${pkgs.podman}/bin/podman network exists cd-net || ${pkgs.podman}/bin/podman network create cd-net --internal --ipv6
|
||||
'';
|
||||
};
|
||||
|
||||
virtualisation.oci-containers.containers.changedetection = {
|
||||
image = "ghcr.io/dgtlmoon/changedetection.io";
|
||||
extraOptions = [
|
||||
"--network=podman"
|
||||
"--network=cd-net"
|
||||
];
|
||||
volumes = [
|
||||
"changedetection-data:/datastore"
|
||||
];
|
||||
environment = {
|
||||
PLAYWRIGHT_DRIVER_URL = "ws://changedetection-chrome:3000";
|
||||
HIDE_REFERER = "true";
|
||||
USE_X_SETTINGS = "1";
|
||||
};
|
||||
};
|
||||
virtualisation.oci-containers.containers.changedetection-chrome = {
|
||||
image = "dgtlmoon/sockpuppetbrowser:latest";
|
||||
extraOptions = [
|
||||
"--network=podman"
|
||||
"--network=cd-net"
|
||||
];
|
||||
environment = {
|
||||
SCREEN_WIDTH = "1920";
|
||||
SCREEN_HEIGHT = "1024";
|
||||
SCREEN_DEPTH = "16";
|
||||
MAX_CONCURRENT_CHROME_PROCESSES = "10";
|
||||
};
|
||||
};
|
||||
|
||||
networking.firewall.interfaces."podman0".allowedTCPPorts = [ 9090 ];
|
||||
virtualisation.oci-containers.containers.grafana = {
|
||||
image = "grafana/grafana-oss:latest";
|
||||
extraOptions = [
|
||||
"--network=podman"
|
||||
"--network=db-net"
|
||||
];
|
||||
volumes =
|
||||
let
|
||||
ini = pkgs.writeText "grafana.ini" ''
|
||||
[users]
|
||||
allow_sign_up = false
|
||||
auto_assign_org = true
|
||||
auto_assign_org_role = Viewer
|
||||
|
||||
[auth.proxy]
|
||||
enabled = true
|
||||
headers = Name:X-Authentik-Username Email:X-Authentik-Email Role:X-Grafana-Role
|
||||
header_name = X-Authentik-Username
|
||||
header_property = username
|
||||
auto_sign_up = true
|
||||
'';
|
||||
in
|
||||
[
|
||||
"grafana-data:/var/lib/grafana"
|
||||
"${ini}:/etc/grafana/grafana.ini"
|
||||
|
||||
];
|
||||
environment = {
|
||||
GF_SERVER_ROOT_URL = "https://grafana.hailsatan.eu/";
|
||||
GF_INSTALL_PLUGINS = "";
|
||||
GF_FEATURE_TOGGLES_ENABLE = "featureToggleAdminPage, regressionTransformation";
|
||||
GF_FEATURE_MANAGEMENT_ALLOW_EDITING = "true";
|
||||
};
|
||||
};
|
||||
virtualisation.oci-containers.containers.node-red = {
|
||||
image = "nodered/node-red:latest";
|
||||
extraOptions = [
|
||||
"--network=podman"
|
||||
"--network=db-net"
|
||||
];
|
||||
volumes = [
|
||||
"nodered-data:/data"
|
||||
];
|
||||
};
|
||||
virtualisation.oci-containers.containers.jellyfin = {
|
||||
image = "jellyfin/jellyfin:latest";
|
||||
user = "1000:100";
|
||||
extraOptions = [
|
||||
"--network=podman"
|
||||
"--mount"
|
||||
"type=bind,source=/data/media,destination=/media,ro=true,relabel=private"
|
||||
"-p"
|
||||
"127.0.0.1:8096:8096"
|
||||
];
|
||||
volumes = [
|
||||
"jellyfin-config:/config"
|
||||
"jellyfin-cache:/cache"
|
||||
];
|
||||
};
|
||||
# archivebox
|
||||
systemd.services."podman-archivebox-network" = {
|
||||
script = ''
|
||||
${pkgs.podman}/bin/podman network create archivebox-net --internal --ipv6 --ignore
|
||||
'';
|
||||
};
|
||||
virtualisation.oci-containers.containers.archivebox = {
|
||||
image = "archivebox/archivebox:dev";
|
||||
environment = {
|
||||
ALLOWED_HOSTS = "*"; # set this to the hostname(s) you're going to serve the site from!
|
||||
CSRF_TRUSTED_ORIGINS = "https://archive.hailsatan.eu"; # you MUST set this to the server's URL for admin login and the REST API to work
|
||||
REVERSE_PROXY_USER_HEADER = "X-Authentik-Username";
|
||||
REVERSE_PROXY_WHITELIST = "10.88.0.1/32";
|
||||
PUBLIC_INDEX = "False"; # set to False to prevent anonymous users from viewing snapshot list
|
||||
PUBLIC_SNAPSHOTS = "False"; # set to False to prevent anonymous users from viewing snapshot content
|
||||
PUBLIC_ADD_VIEW = "False"; # set to True to allow anonymous users to submit new URLs to archive
|
||||
SEARCH_BACKEND_ENGINE = "sonic"; # tells ArchiveBox to use sonic container below for fast full-text search
|
||||
SEARCH_BACKEND_HOST_NAME = "archivebox_sonic";
|
||||
SEARCH_BACKEND_PASSWORD = "SomeSecretPassword";
|
||||
};
|
||||
extraOptions = [
|
||||
"--network=archivebox-net"
|
||||
"--network=podman"
|
||||
];
|
||||
volumes = [
|
||||
"/data/media/archivebox:/data"
|
||||
];
|
||||
};
|
||||
virtualisation.oci-containers.containers.archivebox_scheduler = {
|
||||
image = "archivebox/archivebox:latest";
|
||||
cmd = [
|
||||
"schedule"
|
||||
"--foreground"
|
||||
"--update"
|
||||
"--every=day"
|
||||
];
|
||||
environment = {
|
||||
TIMEOUT = "120";
|
||||
ALLOWED_HOSTS = "*"; # set this to the hostname(s) you're going to serve the site from!
|
||||
CSRF_TRUSTED_ORIGINS = "https://archive.hailsatan.eu"; # you MUST set this to the server's URL for admin login and the REST API to work
|
||||
PUBLIC_INDEX = "True"; # set to False to prevent anonymous users from viewing snapshot list
|
||||
PUBLIC_SNAPSHOTS = "True"; # set to False to prevent anonymous users from viewing snapshot content
|
||||
PUBLIC_ADD_VIEW = "False"; # set to True to allow anonymous users to submit new URLs to archive
|
||||
SEARCH_BACKEND_ENGINE = "sonic"; # tells ArchiveBox to use sonic container below for fast full-text search
|
||||
SEARCH_BACKEND_HOST_NAME = "archivebox_sonic";
|
||||
SEARCH_BACKEND_PASSWORD = "SomeSecretPassword";
|
||||
};
|
||||
extraOptions = [
|
||||
"--network=archivebox-net"
|
||||
"--network=podman"
|
||||
];
|
||||
volumes = [
|
||||
"/data/media/archivebox:/data"
|
||||
];
|
||||
};
|
||||
virtualisation.oci-containers.containers.archivebox_sonic = {
|
||||
image = "archivebox/sonic:latest";
|
||||
environment = {
|
||||
SEARCH_BACKEND_PASSWORD = "SomeSecretPassword";
|
||||
};
|
||||
extraOptions = [ "--network=archivebox-net" ];
|
||||
volumes = [
|
||||
"archivebox-sonic:/data"
|
||||
];
|
||||
};
|
||||
# printer
|
||||
virtualisation.oci-containers.containers.labello = {
|
||||
image = "telegnom/labello:latest";
|
||||
environment = {
|
||||
LAB_PRINTER_DEVICE = "tcp://BRN008077572A96.lan:9100";
|
||||
# LABELLO_DOWNLOAD_FONT = "yes";
|
||||
};
|
||||
extraOptions = [ "--network=podman" ];
|
||||
volumes =
|
||||
let
|
||||
fonts = pkgs.runCommandNoCC "labello-fonts" { } ''
|
||||
mkdir $out
|
||||
cp ${pkgs.roboto}/share/fonts/truetype/* $out
|
||||
cp ${pkgs.roboto-mono}/share/fonts/truetype/* $out
|
||||
'';
|
||||
in
|
||||
[
|
||||
"${fonts}:/opt/labello/fonts"
|
||||
# "/nix/store:/nix/store"
|
||||
];
|
||||
};
|
||||
virtualisation.oci-containers.containers.copyparty = {
|
||||
image = "docker.io/copyparty/ac:latest";
|
||||
extraOptions = [ "--network=podman" ];
|
||||
ports = [ ];
|
||||
volumes =
|
||||
let
|
||||
copypartyCfg = ''
|
||||
[global]
|
||||
xff-src: 10.88.0.1/24
|
||||
idp-h-usr: X-Authentik-Username
|
||||
idp-h-grp: X-Copyparty-Group
|
||||
e2dsa # enable file indexing and filesystem scanning
|
||||
e2ts # enable multimedia indexing
|
||||
ansi # enable colors in log messages
|
||||
re-maxage: 3600 # rescan every something
|
||||
hist: /data/media/copyparty/cache
|
||||
name: the gayest storage in the west
|
||||
no-robots
|
||||
shr: /shr
|
||||
shr-adm: @admin
|
||||
[/]
|
||||
/data/media/copyparty/srv
|
||||
accs:
|
||||
A: @admin
|
||||
r: *
|
||||
[/dump]
|
||||
/data/media/copyparty/srv/dump
|
||||
flags:
|
||||
dedup
|
||||
accs:
|
||||
A: @admin
|
||||
w: *
|
||||
[/pub]
|
||||
/data/media/copyparty/srv/pub
|
||||
flags:
|
||||
dedup
|
||||
accs:
|
||||
A: @admin
|
||||
rw: *
|
||||
[/tv]
|
||||
/data/media/tv
|
||||
flags:
|
||||
hist: /data/media/copyparty/hist/tv
|
||||
accs:
|
||||
r: *
|
||||
[/movies]
|
||||
/data/media/movies
|
||||
flags:
|
||||
hist: /data/media/copyparty/hist/movies
|
||||
accs:
|
||||
r: *
|
||||
[/books]
|
||||
/data/media/books
|
||||
flags:
|
||||
hist: /data/media/copyparty/hist/books
|
||||
accs:
|
||||
r: *
|
||||
[/audiobooks]
|
||||
/data/media/audiobooks
|
||||
flags:
|
||||
hist: /data/media/copyparty/hist/audiobooks
|
||||
accs:
|
||||
r: *
|
||||
[/music]
|
||||
/data/media/music
|
||||
flags:
|
||||
hist: /data/media/copyparty/hist/music
|
||||
accs:
|
||||
r: *
|
||||
[/games]
|
||||
/data/media/games
|
||||
flags:
|
||||
hist: /data/media/copyparty/hist/games
|
||||
accs:
|
||||
r: *
|
||||
'';
|
||||
cpp = pkgs.writeText "copyparty.conf" copypartyCfg;
|
||||
in
|
||||
[
|
||||
|
||||
"/data/media/tv:/data/media/tv:ro"
|
||||
"/data/media/movies:/data/media/movies:ro"
|
||||
"/data/media/audiobooks:/data/media/audiobooks:ro"
|
||||
"/data/media/books:/data/media/books:ro"
|
||||
"/data/media/games:/data/media/games:ro"
|
||||
"/data/media/beets:/data/media/music:ro"
|
||||
"/data/media/copyparty:/data/media/copyparty"
|
||||
"/data/media/copyparty/cfg:/cfg"
|
||||
"${cpp}:/cfg/copyparty.conf"
|
||||
];
|
||||
};
|
||||
|
||||
}
|
||||
495
old-conf/hosts/ds9/default.nix
Normal file
495
old-conf/hosts/ds9/default.nix
Normal file
|
|
@ -0,0 +1,495 @@
|
|||
{
|
||||
config,
|
||||
inputs,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
pubkeys = import ../../data/pubkeys.nix;
|
||||
caddy-with-plugins = import ./custom-caddy.nix { inherit pkgs; };
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
./hardware-configuration.nix
|
||||
|
||||
./containers.nix
|
||||
./backup.nix
|
||||
./grist.nix
|
||||
# ./plex.nix
|
||||
./samba.nix
|
||||
./paperless.nix
|
||||
./maubot.nix
|
||||
./woodpecker.nix
|
||||
./attic.nix
|
||||
./ytdl-sub.nix
|
||||
./snipe-it.nix
|
||||
./radicale.nix
|
||||
./lms.nix
|
||||
|
||||
../../nixos-modules/networking/tailscale.nix
|
||||
../../nixos-modules/services/docker.nix
|
||||
../../nixos-modules/services/libvirt.nix
|
||||
../../nixos-modules/services/msmtp.nix
|
||||
# ../../nixos-modules/services/paperless.nix
|
||||
# ../../nixos-modules/services/photoprism.nix
|
||||
../../nixos-modules/services/samba.nix
|
||||
../../nixos-modules/services/ssh.nix
|
||||
../../nixos-modules/services/caddy
|
||||
../../nixos-modules/system/agenix.nix
|
||||
../../nixos-modules/system/fs.nix
|
||||
../../nixos-modules/system/persist.nix
|
||||
../../nixos-modules/system/security.nix
|
||||
../../nixos-modules/user
|
||||
];
|
||||
|
||||
# Don't Use the systemd-boot EFI boot loader.
|
||||
boot.loader.systemd-boot.enable = true;
|
||||
|
||||
# power save stuffzies
|
||||
services.udev.path = [ pkgs.hdparm ];
|
||||
services.udev.extraRules = ''
|
||||
ACTION=="add|change", KERNEL=="sd[a-z]", ATTRS{queue/rotational}=="1", RUN+="${pkgs.hdparm}/bin/hdparm -S 60 -B 100 /dev/%k"
|
||||
'';
|
||||
|
||||
services.syncthing.enable = true;
|
||||
services.syncthing.user = "ragon";
|
||||
|
||||
programs.mosh.enable = true;
|
||||
security.sudo.wheelNeedsPassword = false;
|
||||
networking.useDHCP = true;
|
||||
networking.useNetworkd = true;
|
||||
systemd.network.networks."enp1s0f1".ipv6AcceptRAConfig = {
|
||||
Token = "prefixstable";
|
||||
};
|
||||
networking.bridges."br0".interfaces = [ ];
|
||||
networking.hostId = "7b4c2932";
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
9000
|
||||
25565
|
||||
80
|
||||
443
|
||||
];
|
||||
networking.firewall.allowedUDPPorts = [ 443 ]; # http3 :3
|
||||
boot.initrd.network = {
|
||||
enable = true;
|
||||
postCommands = ''
|
||||
zpool import rpool
|
||||
zpool import spool
|
||||
echo "zfs load-key -a; killall zfs" >> /root/.profile
|
||||
'';
|
||||
ssh = {
|
||||
enable = true;
|
||||
port = 2222;
|
||||
hostKeys = [
|
||||
"/persistent/etc/nixos/secrets/initrd/ssh_host_rsa_key"
|
||||
"/persistent/etc/nixos/secrets/initrd/ssh_host_ed25519_key"
|
||||
];
|
||||
authorizedKeys = pubkeys.ragon.computers;
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
boot.kernel.sysctl."fs.inotify.max_user_instances" = 512;
|
||||
|
||||
# Immutable users due to tmpfs
|
||||
users.mutableUsers = false;
|
||||
|
||||
# users.users.nia = {
|
||||
# createHome = true;
|
||||
# isNormalUser = true;
|
||||
# extraGroups = [
|
||||
# "docker"
|
||||
# "podman"
|
||||
# "wheel"
|
||||
# ];
|
||||
# openssh.authorizedKeys.keys = [
|
||||
# "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDq+jk1Bi8/x0lYDiVi/iVnp9nEleocoQ+xHmlpDt9Qs"
|
||||
# ];
|
||||
# };
|
||||
users.users.bzzt = {
|
||||
description = "bzzt server service user";
|
||||
home = "/var/lib/bzzt";
|
||||
createHome = true;
|
||||
isSystemUser = true;
|
||||
group = "bzzt";
|
||||
};
|
||||
users.groups.bzzt = { };
|
||||
users.users.minecraft = {
|
||||
description = "Minecraft server service user";
|
||||
home = "/var/lib/minecraft";
|
||||
createHome = true;
|
||||
isSystemUser = true;
|
||||
group = "minecraft";
|
||||
};
|
||||
users.groups.minecraft = { };
|
||||
environment.systemPackages = [
|
||||
pkgs.jdk17
|
||||
pkgs.borgbackup
|
||||
pkgs.beets
|
||||
];
|
||||
|
||||
services.smartd = {
|
||||
enable = true;
|
||||
extraOptions = [ "--interval=7200" ];
|
||||
notifications.test = true;
|
||||
};
|
||||
|
||||
|
||||
services.zfs.zed.enableMail = true;
|
||||
services.zfs.zed.settings = {
|
||||
ZED_EMAIL_ADDR = [ "root" ];
|
||||
ZED_EMAIL_PROG = "${pkgs.msmtp}/bin/msmtp";
|
||||
ZED_EMAIL_OPTS = "@ADDRESS@";
|
||||
|
||||
ZED_NOTIFY_INTERVAL_SECS = 7200;
|
||||
ZED_NOTIFY_VERBOSE = true;
|
||||
|
||||
ZED_USE_ENCLOSURE_LEDS = false;
|
||||
ZED_SCRUB_AFTER_RESILVER = true;
|
||||
};
|
||||
|
||||
# dyndns
|
||||
|
||||
systemd.services."dyndns-refresh" = {
|
||||
script = ''
|
||||
set -eu
|
||||
export PATH=$PATH:${pkgs.curl}/bin:${pkgs.jq}/bin:${pkgs.iproute2}/bin
|
||||
${pkgs.bash}/bin/bash ${config.age.secrets.ds9DynDns.path}
|
||||
'';
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
User = "root";
|
||||
};
|
||||
startAt = "*:0/10";
|
||||
};
|
||||
|
||||
# services.tailscaleAuth.enable = true;
|
||||
# services.tailscaleAuth.group = config.services.caddy.group;
|
||||
systemd.services.caddy.serviceConfig.EnvironmentFile = config.age.secrets.desec.path;
|
||||
services.caddy = {
|
||||
# ragon.services.caddy is enabled
|
||||
extraConfig = ''
|
||||
(blockBots) {
|
||||
@botForbidden header_regexp User-Agent "(?i)AdsBot-Google|Amazonbot|anthropic-ai|Applebot|Applebot-Extended|AwarioRssBot|AwarioSmartBot|Bytespider|CCBot|ChatGPT|ChatGPT-User|Claude-Web|ClaudeBot|cohere-ai|DataForSeoBot|Diffbot|FacebookBot|Google-Extended|GPTBot|ImagesiftBot|magpie-crawler|omgili|Omgilibot|peer39_crawler|PerplexityBot|YouBot"
|
||||
|
||||
handle @botForbidden {
|
||||
redir https://hil-speed.hetzner.com/10GB.bin
|
||||
}
|
||||
handle /robots.txt {
|
||||
respond <<TXT
|
||||
User-Agent: *
|
||||
Disallow: /
|
||||
TXT 200
|
||||
}
|
||||
}
|
||||
(podmanRedir) {
|
||||
reverse_proxy {args[:]} {
|
||||
transport http {
|
||||
resolvers 10.88.0.1 # podman dns
|
||||
}
|
||||
}
|
||||
}
|
||||
(podmanRedirWithAuth) {
|
||||
route {
|
||||
# always forward outpost path to actual outpost
|
||||
reverse_proxy /outpost.goauthentik.io/* http://authentik-server:9000 {
|
||||
transport http {
|
||||
resolvers 10.88.0.1 # podman dns
|
||||
}
|
||||
}
|
||||
forward_auth http://authentik-server:9000 {
|
||||
transport http {
|
||||
resolvers 10.88.0.1 # podman dns
|
||||
}
|
||||
uri /outpost.goauthentik.io/auth/caddy
|
||||
copy_headers {
|
||||
X-Authentik-Username
|
||||
X-Copyparty-Group
|
||||
X-Authentik-Groups
|
||||
X-Authentik-Entitlements
|
||||
X-Authentik-Email
|
||||
X-Authentik-Name
|
||||
X-Authentik-Uid
|
||||
X-Authentik-Jwt
|
||||
X-Authentik-Meta-Jwks
|
||||
X-Authentik-Meta-Outpost
|
||||
X-Authentik-Meta-Provider
|
||||
X-Authentik-Meta-App
|
||||
X-Authentik-Meta-Version
|
||||
X-Grafana-Role
|
||||
X-Authentik-Username>X-Remote-User
|
||||
}
|
||||
}
|
||||
reverse_proxy {args[:]} {
|
||||
transport http {
|
||||
resolvers 10.88.0.1 # podman dns
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
'';
|
||||
globalConfig = ''
|
||||
acme_ca https://acme-v02.api.letsencrypt.org/directory # hard coded so zerossl doesn't get used
|
||||
acme_dns desec {
|
||||
token "{$TOKEN}"
|
||||
}
|
||||
metrics {
|
||||
per_host
|
||||
}
|
||||
servers {
|
||||
trusted_proxies static 100.96.45.2/32 fd7a:115c:a1e0:ab12:4843:cd96:6260:2d02/128
|
||||
}
|
||||
'';
|
||||
virtualHosts."*.hailsatan.eu ".logFormat = ''
|
||||
output file ${config.services.caddy.logDir}/access-*hailsatan.eu_internet.log
|
||||
'';
|
||||
virtualHosts."*.hailsatan.eu ".extraConfig = ''
|
||||
import blockBots
|
||||
@blog host blog.hailsatan.eu
|
||||
handle @blog {
|
||||
route {
|
||||
# always forward outpost path to actual outpost
|
||||
reverse_proxy /outpost.goauthentik.io/* http://authentik-server:9000 {
|
||||
transport http {
|
||||
resolvers 10.88.0.1 # podman dns
|
||||
}
|
||||
}
|
||||
forward_auth http://authentik-server:9000 {
|
||||
transport http {
|
||||
resolvers 10.88.0.1 # podman dns
|
||||
}
|
||||
uri /outpost.goauthentik.io/auth/caddy
|
||||
copy_headers X-Authentik-Username X-Copyparty-Group X-Authentik-Groups X-Authentik-Entitlements X-Authentik-Email X-Authentik-Name X-Authentik-Uid X-Authentik-Jwt X-Authentik-Meta-Jwks X-Authentik-Meta-Outpost X-Authentik-Meta-Provider X-Authentik-Meta-App X-Authentik-Meta-Version X-Grafana-Role
|
||||
}
|
||||
root * /srv/www/xynospace
|
||||
file_server
|
||||
|
||||
}
|
||||
}
|
||||
@jellyfin host j.hailsatan.eu
|
||||
handle @jellyfin {
|
||||
handle /metrics* {
|
||||
abort
|
||||
}
|
||||
import podmanRedir http://jellyfin:8096
|
||||
}
|
||||
@mautrix-signal host mautrix-signal.hailsatan.eu
|
||||
handle @mautrix-signal {
|
||||
import podmanRedir http://mautrix-signal:29328
|
||||
}
|
||||
@woodpecker host woodpecker.hailsatan.eu
|
||||
handle @woodpecker {
|
||||
import podmanRedir http://woodpecker-server:8000
|
||||
}
|
||||
@attic host attic.hailsatan.eu
|
||||
handle @attic {
|
||||
reverse_proxy http://[::1]:8089
|
||||
}
|
||||
@auth host auth.hailsatan.eu
|
||||
handle @auth {
|
||||
import podmanRedir http://authentik-server:9000
|
||||
}
|
||||
@radicale host radicale.hailsatan.eu
|
||||
handle @radicale {
|
||||
import podmanRedirWithAuth http://[::1]:5232
|
||||
}
|
||||
@grafana host grafana.hailsatan.eu
|
||||
handle @grafana {
|
||||
import podmanRedirWithAuth http://grafana:3000
|
||||
}
|
||||
@lms host lms.hailsatan.eu
|
||||
handle @lms {
|
||||
handle /rest* {
|
||||
|
||||
import podmanRedir http://localhost:5082
|
||||
}
|
||||
import podmanRedirWithAuth http://localhost:5082
|
||||
}
|
||||
@immich host immich.hailsatan.eu
|
||||
handle @immich {
|
||||
import podmanRedir http://immich-server:2283
|
||||
}
|
||||
@cd host cd.hailsatan.eu
|
||||
handle @cd {
|
||||
import podmanRedirWithAuth http://changedetection:5000
|
||||
}
|
||||
@node-red host node-red.hailsatan.eu
|
||||
handle @node-red {
|
||||
import podmanRedirWithAuth http://node-red:1880
|
||||
}
|
||||
@labello host labello.hailsatan.eu
|
||||
handle @labello {
|
||||
import podmanRedirWithAuth http://labello:4242
|
||||
}
|
||||
@paperless host paperless.hailsatan.eu
|
||||
handle @paperless {
|
||||
import podmanRedirWithAuth http://paperless-server:8000
|
||||
}
|
||||
@archivebox host archivebox.hailsatan.eu
|
||||
handle @archivebox {
|
||||
handle /api/* {
|
||||
import podmanRedir http://archivebox:8000
|
||||
}
|
||||
handle {
|
||||
import podmanRedirWithAuth http://archivebox:8000
|
||||
}
|
||||
}
|
||||
@grist host grist.hailsatan.eu
|
||||
handle @grist {
|
||||
import podmanRedir http://grist:8484
|
||||
|
||||
}
|
||||
@snipe-it host snipe-it.hailsatan.eu
|
||||
handle @snipe-it {
|
||||
root * ${pkgs.snipe-it}/share/php/snipe-it/public
|
||||
php_fastcgi unix//${config.services.phpfpm.pools."snipe-it".socket}
|
||||
file_server
|
||||
}
|
||||
@copyparty host c.hailsatan.eu
|
||||
handle @copyparty {
|
||||
# @proxy {
|
||||
# header_regexp Cookie authentik_proxy_([a-zA-Z0-9])
|
||||
# }
|
||||
# handle @proxy {
|
||||
# import podmanRedirWithAuth http://copyparty:3923
|
||||
# }
|
||||
handle /shr/* {
|
||||
import podmanRedir http://copyparty:3923
|
||||
}
|
||||
handle /.cpr/* {
|
||||
import podmanRedir http://copyparty:3923
|
||||
}
|
||||
# @noauth {
|
||||
# path_regexp ^\/(noauth(\/.*|)|[a-z.]+\.(css|js)|[1-9].png)$
|
||||
# }
|
||||
# @getoptionshead {
|
||||
# method GET OPTIONS HEAD
|
||||
# }
|
||||
# handle @noauth {
|
||||
# handle @getoptionshead {
|
||||
# import podmanRedir http://copyparty:3923
|
||||
# }
|
||||
# }
|
||||
handle {
|
||||
import podmanRedirWithAuth http://copyparty:3923
|
||||
}
|
||||
}
|
||||
handle {
|
||||
import podmanRedirWithAuth http://127.0.0.1:8001
|
||||
}
|
||||
'';
|
||||
};
|
||||
|
||||
services.prometheus = {
|
||||
enable = true;
|
||||
exporters.node = {
|
||||
enable = true;
|
||||
enabledCollectors = [ "systemd" ];
|
||||
};
|
||||
exporters.postgres = {
|
||||
enable = true;
|
||||
environmentFile = config.age.secrets.ds9PostgresExporterEnv.path;
|
||||
};
|
||||
scrapeConfigs = [
|
||||
{
|
||||
job_name = "postgres";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [
|
||||
"localhost:${toString config.services.prometheus.exporters.postgres.port}"
|
||||
"picard.kangaroo-galaxy.ts.net:${toString config.services.prometheus.exporters.postgres.port}"
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
job_name = "caddy";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [
|
||||
"localhost:2019"
|
||||
"picard.kangaroo-galaxy.ts.net:2019"
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
job_name = "node";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [
|
||||
"localhost:${toString config.services.prometheus.exporters.node.port}"
|
||||
"picard.kangaroo-galaxy.ts.net:${toString config.services.prometheus.exporters.node.port}"
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
home-manager.users.ragon =
|
||||
{
|
||||
pkgs,
|
||||
lib,
|
||||
inputs,
|
||||
config,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
# ../../hm-modules/nvim
|
||||
../../hm-modules/helix
|
||||
# ../../hm-modules/zsh
|
||||
../../hm-modules/tmux
|
||||
# ../../hm-modules/xonsh
|
||||
../../hm-modules/cli.nix
|
||||
../../hm-modules/files.nix
|
||||
];
|
||||
# ragon.xonsh.enable = true;
|
||||
|
||||
programs.home-manager.enable = true;
|
||||
home.stateVersion = "23.11";
|
||||
};
|
||||
|
||||
# begin kube
|
||||
# services.k3s = {
|
||||
# enable = true;
|
||||
# extraFlags = "--disable=traefik --cluster-cidr 10.42.0.0/16,2001:cafe:42::/56 --service-cidr=10.43.0.0/16,2001:cafe:43::/112 --vpn-auth-file=/persistent/tailscale-auth-file";
|
||||
#};
|
||||
# systemd.services.k3s.path = [pkgs.tailscale pkgs.coreutils pkgs.bash];
|
||||
# end kube
|
||||
|
||||
ragon = {
|
||||
agenix.secrets."desec" = { };
|
||||
agenix.secrets."ds9DynDns" = { };
|
||||
agenix.secrets."ds9PostgresExporterEnv" = { };
|
||||
user.enable = true;
|
||||
persist.enable = true;
|
||||
persist.extraDirectories = [
|
||||
"/home/nia"
|
||||
"/var/lib/syncthing"
|
||||
"/var/lib/minecraft"
|
||||
"/var/lib/bzzt"
|
||||
"/var/lib/rancher"
|
||||
"/etc/rancher"
|
||||
"/root/.cache"
|
||||
"/srv/www"
|
||||
"/var/lib/${config.services.prometheus.stateDir}"
|
||||
];
|
||||
|
||||
services = {
|
||||
caddy.enable = true;
|
||||
docker.enable = true;
|
||||
ssh.enable = true;
|
||||
msmtp.enable = true;
|
||||
# photoprism.enable = true;
|
||||
tailscale.enable = true;
|
||||
tailscale.exitNode = true;
|
||||
tailscale.extraUpCommands = "--advertise-routes=10.0.0.0/16";
|
||||
# libvirt.enable = true;
|
||||
# paperless.enable = true;
|
||||
};
|
||||
|
||||
};
|
||||
}
|
||||
34
old-conf/hosts/ds9/grist.nix
Normal file
34
old-conf/hosts/ds9/grist.nix
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
{
|
||||
pkgs,
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
ragon.agenix.secrets.ds9GristEnv = { };
|
||||
virtualisation.quadlet = {
|
||||
containers.grist = {
|
||||
containerConfig = {
|
||||
image = "docker.io/gristlabs/grist-oss";
|
||||
networks = [
|
||||
"podman"
|
||||
"db-net"
|
||||
];
|
||||
volumes = [
|
||||
"grist:/persist"
|
||||
];
|
||||
environments = {
|
||||
GRIST_SANDBOX_FLAVOR = "gvisor";
|
||||
APP_HOME_URL = "https://grist.hailsatan.eu";
|
||||
GRIST_FORCE_LOGIN = "true";
|
||||
GRIST_TELEMETRY_LEVEL = "off";
|
||||
GRIST_ALLOW_AUTOMATIC_VERSION_CHECKING = "false";
|
||||
};
|
||||
addCapabilities = [ "SYS_PTRACE" ];
|
||||
environmentFiles = [
|
||||
config.age.secrets.ds9GristEnv.path
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
57
old-conf/hosts/ds9/hardware-configuration.nix
Normal file
57
old-conf/hosts/ds9/hardware-configuration.nix
Normal file
|
|
@ -0,0 +1,57 @@
|
|||
# Do not modify this file! It was generated by ‘nixos-generate-config’
|
||||
# and may be overwritten by future invocations. Please make changes
|
||||
# to /etc/nixos/configuration.nix instead.
|
||||
{ config, lib, pkgs, modulesPath, ... }:
|
||||
|
||||
{
|
||||
imports = [ "${modulesPath}/installer/scan/not-detected.nix" ];
|
||||
|
||||
boot.initrd.availableKernelModules = [ "r8169" "ahci" "vfio-pci" "xhci_pci" "ehci_pci" "nvme" "usbhid" "sd_mod" "sr_mod" ];
|
||||
boot.kernelModules = [ "kvm-amd" ];
|
||||
nix.settings.max-jobs = lib.mkDefault 12;
|
||||
powerManagement.powertop.enable = true;
|
||||
powerManagement.cpuFreqGovernor = "powersave";
|
||||
powerManagement.scsiLinkPolicy = "min_power";
|
||||
|
||||
services.zfs.autoScrub.enable = true;
|
||||
ragon.system.fs = {
|
||||
enable = true;
|
||||
mediadata = false;
|
||||
swap = false;
|
||||
persistentSnapshot = false;
|
||||
nix = "spool/local/nix";
|
||||
varlog = "spool/local/journal";
|
||||
persistent = "spool/safe/persist";
|
||||
arcSize = 16;
|
||||
};
|
||||
|
||||
services.sanoid.datasets."rpool/content/safe/data/media" = { };
|
||||
services.sanoid.enable = true;
|
||||
services.sanoid.interval = "0/8:00:00";
|
||||
|
||||
swapDevices = [{ device = "/dev/disk/by-id/nvme-eui.000000000000000100a075202c247839-part1"; randomEncryption = true; }];
|
||||
fileSystems."/boot".device = "/dev/disk/by-uuid/149F-23AA";
|
||||
|
||||
fileSystems."/data" = {
|
||||
device = "rpool/content/safe/data";
|
||||
fsType = "zfs";
|
||||
};
|
||||
fileSystems."/data/media" = {
|
||||
device = "rpool/content/safe/data/media";
|
||||
fsType = "zfs";
|
||||
};
|
||||
fileSystems."/backups/DaedalusTimeMachine" = {
|
||||
device = "rpool/content/local/backups/daedalus";
|
||||
fsType = "zfs";
|
||||
};
|
||||
fileSystems."/backups" = {
|
||||
device = "rpool/content/local/backups";
|
||||
fsType = "zfs";
|
||||
};
|
||||
# fileSystems."/data/media/nzbr" = {
|
||||
# device = "10.0.1.2:/storage/media";
|
||||
# fsType = "nfs";
|
||||
# options = [ "x-systemd.automount" "noauto" ];
|
||||
# };
|
||||
|
||||
}
|
||||
101
old-conf/hosts/ds9/lms.nix
Normal file
101
old-conf/hosts/ds9/lms.nix
Normal file
|
|
@ -0,0 +1,101 @@
|
|||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
inherit (lib) concatStringsSep concatMapStringsSep mapAttrsToList;
|
||||
lmsConfig = {
|
||||
api-subsonic-support-user-password-auth = true;
|
||||
behind-reverse-proxy = true;
|
||||
authentication-backend = "http-headers";
|
||||
http-headers-login-field = "X-Remote-User";
|
||||
working-dir = "/var/lib/lms";
|
||||
scanner-skip-duplicate-mbid = true;
|
||||
ffmpeg-file = "${pkgs.ffmpeg-full}/bin/ffmpeg";
|
||||
wt-resources = "${pkgs.wt}/share/Wt/resources";
|
||||
docroot = "${pkgs.lms}/share/lms/docroot/;/resources,/css,/images,/js,/favicon.ico";
|
||||
approot = "${pkgs.lms}/share/lms/approot";
|
||||
# log-min-severity = "debug";
|
||||
trusted-proxies = ["127.0.0.1" "::1"];
|
||||
# db-show-queries = true;
|
||||
};
|
||||
writeVal =
|
||||
x:
|
||||
if builtins.typeOf x == "string" then
|
||||
''"${x}"''
|
||||
else if builtins.typeOf x == "list" then
|
||||
''(${(concatMapStringsSep ",\n" writeVal x)})''
|
||||
else if builtins.typeOf x == "bool" then
|
||||
(if x then "true" else "false")
|
||||
else
|
||||
(writeVal (toString x));
|
||||
lmsConfigFile = pkgs.writeText "lms.conf" (
|
||||
(concatStringsSep "\n" (mapAttrsToList (n: v: "${n} = ${writeVal v};") lmsConfig)) + "\n"
|
||||
);
|
||||
in
|
||||
{
|
||||
systemd.services.lms = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
environment.OMP_THREAD_LIMIT = "1";
|
||||
serviceConfig = {
|
||||
DynamicUser = true;
|
||||
ExecStart = ''
|
||||
${pkgs.lms}/bin/lms ${lmsConfigFile}
|
||||
'';
|
||||
Group = "users";
|
||||
StateDirectory = "lms";
|
||||
RuntimeDirectory = "lms";
|
||||
WorkingDirectory = "/var/lib/lms";
|
||||
RootDirectory = "/run/lms";
|
||||
ReadWritePaths = "";
|
||||
BindReadOnlyPaths = [
|
||||
"${config.security.pki.caBundle}:/etc/ssl/certs/ca-certificates.crt"
|
||||
builtins.storeDir
|
||||
"/etc"
|
||||
"/data/media/beets/music"
|
||||
]
|
||||
++ lib.optionals config.services.resolved.enable [
|
||||
"/run/systemd/resolve/stub-resolv.conf"
|
||||
"/run/systemd/resolve/resolv.conf"
|
||||
];
|
||||
CapabilityBoundingSet = "";
|
||||
RestrictAddressFamilies = [
|
||||
"AF_UNIX"
|
||||
"AF_INET"
|
||||
"AF_INET6"
|
||||
];
|
||||
RestrictNamespaces = true;
|
||||
PrivateDevices = true;
|
||||
PrivateUsers = true;
|
||||
ProtectClock = true;
|
||||
ProtectControlGroups = true;
|
||||
ProtectHome = true;
|
||||
ProtectKernelLogs = true;
|
||||
ProtectKernelModules = true;
|
||||
ProtectKernelTunables = true;
|
||||
SystemCallArchitectures = "native";
|
||||
SystemCallFilter = [
|
||||
"@system-service"
|
||||
"~@privileged"
|
||||
];
|
||||
RestrictRealtime = true;
|
||||
LockPersonality = true;
|
||||
MemoryDenyWriteExecute = true;
|
||||
UMask = "0066";
|
||||
ProtectHostname = true;
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
ragon.persist.extraDirectories = [
|
||||
{
|
||||
directory = "/var/lib/private/lms";
|
||||
mode = "0700";
|
||||
defaultPerms.mode = "0700";
|
||||
}
|
||||
];
|
||||
}
|
||||
25
old-conf/hosts/ds9/maubot.nix
Normal file
25
old-conf/hosts/ds9/maubot.nix
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
virtualisation.quadlet = {
|
||||
containers = {
|
||||
mautrix-signal = {
|
||||
containerConfig.image = "dock.mau.dev/mautrix/signal:latest";
|
||||
containerConfig.volumes = [
|
||||
"mautrix-signal:/data"
|
||||
];
|
||||
# containerConfig.publishPorts = [
|
||||
# "100.83.96.25:29328:29328"
|
||||
# ];
|
||||
containerConfig.networks = [
|
||||
"podman"
|
||||
"db-net"
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
75
old-conf/hosts/ds9/paperless.nix
Normal file
75
old-conf/hosts/ds9/paperless.nix
Normal file
|
|
@ -0,0 +1,75 @@
|
|||
{
|
||||
pkgs,
|
||||
config,
|
||||
lib,
|
||||
inputs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
virtualisation.quadlet = {
|
||||
containers = {
|
||||
paperless-server.containerConfig.image = "ghcr.io/paperless-ngx/paperless-ngx:latest";
|
||||
|
||||
paperless-server.containerConfig.networks = [
|
||||
"podman"
|
||||
"db-net"
|
||||
"paperless-net"
|
||||
];
|
||||
paperless-server.containerConfig.volumes = [
|
||||
"paperless-media:/usr/src/paperless/media"
|
||||
"paperless-data:/usr/src/paperless/data"
|
||||
"/data/paperless-export:/usr/src/paperless/export"
|
||||
"/data/paperless-consume:/usr/src/paperless/consume"
|
||||
];
|
||||
paperless-server.containerConfig.environments = {
|
||||
PAPERLESS_REDIS = "redis://paperless-redis:6379";
|
||||
PAPERLESS_DBHOST = "postgres";
|
||||
PAPERLESS_TIKA_ENABLED = "1";
|
||||
PAPERLESS_TIKA_GOTENBERG_ENDPOINT = "http://paperless-gotenberg:3000";
|
||||
PAPERLESS_TIKA_ENDPOINT = "http://paperless-tika:9998";
|
||||
USERMAP_UID = "1000";
|
||||
USERMAP_GID = "100";
|
||||
PAPERLESS_URL = "https://paperless.hailsatan.eu";
|
||||
PAPERLESS_TIME_ZONE = "Europe/Berlin";
|
||||
PAPERLESS_OCR_LANGUAGE = "deu";
|
||||
PAPERLESS_TRUSTED_PROXIES = "10.89.0.1";
|
||||
PAPERLESS_ENABLE_HTTP_REMOTE_USER = "true";
|
||||
PAPERLESS_ENABLE_HTTP_REMOTE_API = "true";
|
||||
PAPERLESS_HTTP_REMOTE_USER_HEADER_NAME = "HTTP_X_AUTHENTIK_USERNAME";
|
||||
PAPERLESS_DISABLE_REGULAR_LOGIN = "true";
|
||||
|
||||
};
|
||||
paperless-server.serviceConfig.TimeoutStartSec = "60";
|
||||
paperless-redis.containerConfig.image = "docker.io/library/redis:alpine";
|
||||
paperless-redis.containerConfig.networks = [
|
||||
"paperless-net"
|
||||
];
|
||||
paperless-redis.containerConfig.volumes = [ "paperless-redis:/data" ];
|
||||
paperless-redis.serviceConfig.TimeoutStartSec = "60";
|
||||
paperless-gotenberg = {
|
||||
containerConfig = {
|
||||
image = "docker.io/gotenberg/gotenberg:8.7";
|
||||
exec = "gotenberg --chromium-disable-javascript=true --chromium-allow-list=file:///tmp/.*";
|
||||
networks = [
|
||||
"paperless-net"
|
||||
];
|
||||
};
|
||||
serviceConfig.TimeoutStartSec = "60";
|
||||
};
|
||||
paperless-tika = {
|
||||
containerConfig = {
|
||||
image = "docker.io/apache/tika:latest";
|
||||
networks = [
|
||||
"paperless-net"
|
||||
];
|
||||
};
|
||||
serviceConfig.TimeoutStartSec = "60";
|
||||
};
|
||||
};
|
||||
networks = {
|
||||
paperless.networkConfig.ipv6 = true;
|
||||
paperless.networkConfig.name = "paperless-net";
|
||||
paperless.networkConfig.internal = true;
|
||||
};
|
||||
};
|
||||
}
|
||||
9
old-conf/hosts/ds9/plex.nix
Normal file
9
old-conf/hosts/ds9/plex.nix
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
{ config, pkgs, lib, inputs, ... }: {
|
||||
ragon.persist.extraDirectories = [ config.services.plex.dataDir ];
|
||||
services.plex = {
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
user = "ragon";
|
||||
group = "users";
|
||||
};
|
||||
}
|
||||
43
old-conf/hosts/ds9/radicale.nix
Normal file
43
old-conf/hosts/ds9/radicale.nix
Normal file
|
|
@ -0,0 +1,43 @@
|
|||
{
|
||||
pkgs,
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
services.radicale = {
|
||||
enable = true;
|
||||
settings = {
|
||||
server.hosts = [ "[::1]:5232" ];
|
||||
auth = {
|
||||
type = "http_x_remote_user";
|
||||
# remote_ip_source = "X-Remote-Addr";
|
||||
};
|
||||
storage = {
|
||||
filesystem_folder = "/var/lib/radicale/collections";
|
||||
};
|
||||
};
|
||||
rights = {
|
||||
root = {
|
||||
user = ".+";
|
||||
collection = "";
|
||||
permissions = "R";
|
||||
};
|
||||
principal = {
|
||||
user = ".+";
|
||||
collection = "{user}";
|
||||
permissions = "RW";
|
||||
};
|
||||
calendars = {
|
||||
user = ".+";
|
||||
collection = "{user}/[^/]+";
|
||||
permissions = "rw";
|
||||
};
|
||||
|
||||
};
|
||||
};
|
||||
ragon.persist.extraDirectories = [
|
||||
"/var/lib/radicale"
|
||||
];
|
||||
|
||||
}
|
||||
66
old-conf/hosts/ds9/samba.nix
Normal file
66
old-conf/hosts/ds9/samba.nix
Normal file
|
|
@ -0,0 +1,66 @@
|
|||
{ config, pkgs, lib, ... }: {
|
||||
# services.samba.extraConfig = ''
|
||||
# min protocol = SMB3
|
||||
# vfs objects = acl_xattr catia fruit streams_xattr
|
||||
# fruit:nfs_aces = no
|
||||
# inherit permissions = yes
|
||||
# fruit:posix_rename = yes
|
||||
# fruit:resource = xattr
|
||||
# fruit:model = MacSamba
|
||||
# fruit:veto_appledouble = no
|
||||
# fruit:wipe_intentionally_left_blank_rfork = yes
|
||||
# fruit:delete_empty_adfiles = yes
|
||||
# fruit:metadata = stream
|
||||
# '';
|
||||
|
||||
services.avahi.enable = true;
|
||||
services.avahi.nssmdns = true;
|
||||
services.avahi.publish.enable = true;
|
||||
services.avahi.extraServiceFiles.smb = ''
|
||||
<?xml version="1.0" standalone='no'?>
|
||||
<!DOCTYPE service-group SYSTEM "avahi-service.dtd">
|
||||
<service-group>
|
||||
<name replace-wildcards="yes">%h</name>
|
||||
<service>
|
||||
<type>_smb._tcp</type>
|
||||
<port>445</port>
|
||||
<host-name>ds9.kangaroo-galaxy.ts.net</host-name>
|
||||
</service>
|
||||
<service>
|
||||
<type>_device-info._tcp</type>
|
||||
<port>0</port>
|
||||
<txt-record>model=MacPro7,1@ECOLOR=226,226,224</txt-record>
|
||||
</service>
|
||||
<service>
|
||||
<type>_adisk._tcp</type>
|
||||
<txt-record>sys=waMa=0,adVF=0x100</txt-record>
|
||||
<txt-record>dk0=adVN=TimeMachine,adVF=0x82</txt-record>
|
||||
<host-name>ds9.kangaroo-galaxy.ts.net</host-name>
|
||||
</service>
|
||||
</service-group>
|
||||
'';
|
||||
|
||||
ragon.services = {
|
||||
samba.enable = true;
|
||||
samba.shares = {
|
||||
TimeMachine = {
|
||||
path = "/backups/DaedalusTimeMachine";
|
||||
comment = "DaedalusTimeMachine";
|
||||
"write list" = "@wheel";
|
||||
"read only" = "no";
|
||||
"writable" = "yes";
|
||||
"browseable" = "yes";
|
||||
"fruit:time machine" = "yes";
|
||||
"fruit:time machine max size" = "2050G";
|
||||
"vfs objects" = "acl_xattr fruit streams_xattr";
|
||||
"inherit acls" = "yes";
|
||||
};
|
||||
data = {
|
||||
path = "/data";
|
||||
comment = "some data for the people";
|
||||
"write list" = "@wheel";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
}
|
||||
34
old-conf/hosts/ds9/snipe-it.nix
Normal file
34
old-conf/hosts/ds9/snipe-it.nix
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
{
|
||||
pkgs,
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
with lib;
|
||||
{
|
||||
users.users.nginx.isSystemUser = true;
|
||||
users.users.nginx.group = "nginx";
|
||||
users.groups.nginx = { };
|
||||
services.nginx.enable = mkForce false;
|
||||
services.nginx.virtualHosts."snipe-it" = mkForce null;
|
||||
users.users.caddy.extraGroups = [ config.services.snipe-it.group ];
|
||||
ragon.agenix.secrets.ds9SnipeIt = {
|
||||
group = config.services.snipe-it.group;
|
||||
owner = config.services.snipe-it.user;
|
||||
mode = "440";
|
||||
};
|
||||
services.snipe-it = {
|
||||
enable = true;
|
||||
database.createLocally = true;
|
||||
mail.driver = "sendmail";
|
||||
appURL = "https://snipe-it.hailsatan.eu";
|
||||
hostName = "snipe-it";
|
||||
appKeyFile = config.age.secrets.ds9SnipeIt.path;
|
||||
mail.from.address = "root@hailsatan.eu";
|
||||
};
|
||||
ragon.persist.extraDirectories = [
|
||||
config.services.snipe-it.dataDir
|
||||
"/var/lib/mysql"
|
||||
];
|
||||
|
||||
}
|
||||
59
old-conf/hosts/ds9/woodpecker.nix
Normal file
59
old-conf/hosts/ds9/woodpecker.nix
Normal file
|
|
@ -0,0 +1,59 @@
|
|||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
virtualisation.podman.dockerSocket.enable = true;
|
||||
ragon.agenix.secrets.ds9WoodpeckerEnv = { };
|
||||
ragon.agenix.secrets.ds9WoodpeckerAgentSecretEnv = { };
|
||||
virtualisation.quadlet = {
|
||||
containers = {
|
||||
woodpecker-server = {
|
||||
containerConfig.image = "woodpeckerci/woodpecker-server:v3";
|
||||
containerConfig.volumes = [
|
||||
"woodpecker-server-data:/var/lib/woodpecker"
|
||||
];
|
||||
containerConfig.networks = [
|
||||
"woodpecker-net"
|
||||
"podman"
|
||||
];
|
||||
containerConfig.environments = {
|
||||
WOODPECKER_HOST = "https://woodpecker.hailsatan.eu";
|
||||
WOODPECKER_OPEN = "true";
|
||||
WOODPECKER_ADMIN = "xyno";
|
||||
};
|
||||
containerConfig.environmentFiles = [
|
||||
config.age.secrets.ds9WoodpeckerEnv.path
|
||||
config.age.secrets.ds9WoodpeckerAgentSecretEnv.path
|
||||
];
|
||||
};
|
||||
woodpecker-agent = {
|
||||
containerConfig.environmentFiles = [
|
||||
config.age.secrets.ds9WoodpeckerAgentSecretEnv.path
|
||||
];
|
||||
containerConfig.image = "woodpeckerci/woodpecker-agent:v3";
|
||||
containerConfig.volumes = [
|
||||
"woodpecker-agent-config:/etc/woodpecker"
|
||||
"/var/run/docker.sock:/var/run/docker.sock"
|
||||
];
|
||||
containerConfig.environments = {
|
||||
WOODPECKER_SERVER = "woodpecker-server:9000";
|
||||
BACKEND_DOCKER_ENABLE_IPV6 = "true";
|
||||
};
|
||||
containerConfig.networks = [
|
||||
"woodpecker-net"
|
||||
"podman"
|
||||
];
|
||||
};
|
||||
};
|
||||
networks = {
|
||||
woodpecker.networkConfig = {
|
||||
ipv6 = true;
|
||||
name = "woodpecker-net";
|
||||
internal = false;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
130
old-conf/hosts/ds9/ytdl-sub.nix
Normal file
130
old-conf/hosts/ds9/ytdl-sub.nix
Normal file
|
|
@ -0,0 +1,130 @@
|
|||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
inputs,
|
||||
...
|
||||
}:
|
||||
with lib;
|
||||
let
|
||||
channels = {
|
||||
"Entertainment" = [
|
||||
"2BoredGuysOfficial"
|
||||
"AlexPrinz"
|
||||
"BagelBoyOfficial"
|
||||
"DiedeutschenBackrooms"
|
||||
"DankPods"
|
||||
"Defunctland"
|
||||
"Ididathing"
|
||||
"GarbageTime420"
|
||||
"Boy_Boy"
|
||||
"ContraPoints"
|
||||
"PhilosophyTube"
|
||||
"PosyMusic"
|
||||
"RobBubble"
|
||||
"agingwheels"
|
||||
"NileBlue"
|
||||
"NileRed"
|
||||
"styropyro"
|
||||
"williamosman"
|
||||
"billwurtz"
|
||||
"f4micom"
|
||||
"hbomberguy"
|
||||
"simonegiertz"
|
||||
"Parabelritter"
|
||||
"DeviantOllam"
|
||||
"MaxFosh"
|
||||
"MichaelReeves"
|
||||
"TomScottGo"
|
||||
"WilliamOsman2"
|
||||
];
|
||||
"Tism" = [
|
||||
"Echoray1" # alwin meschede
|
||||
"TechnologyConnections"
|
||||
"TechnologyConnextras"
|
||||
"TheB1M"
|
||||
"bahnblick_eu"
|
||||
"jameshoffmann"
|
||||
"scottmanley"
|
||||
"theCodyReeder"
|
||||
"standupmaths"
|
||||
];
|
||||
"Making" = [
|
||||
"DIYPerks"
|
||||
"MaxMakerChannel"
|
||||
"Nerdforge"
|
||||
"iliketomakestuff"
|
||||
"ZackFreedman"
|
||||
|
||||
];
|
||||
"Games" = [
|
||||
"TylerMcVicker1"
|
||||
"gabe.follower"
|
||||
"altf4games"
|
||||
];
|
||||
"Programming" = [
|
||||
"BenEater"
|
||||
"NoBoilerplate"
|
||||
"stacksmashing"
|
||||
];
|
||||
"Tech" = [
|
||||
"LinusTechTips"
|
||||
];
|
||||
};
|
||||
in
|
||||
|
||||
{
|
||||
systemd.services."ytdl-sub-default".serviceConfig.ReadWritePaths = [ "/data/media/yt" ];
|
||||
services.ytdl-sub = {
|
||||
instances.default = {
|
||||
enable = true;
|
||||
schedule = "0/2:0";
|
||||
config = {
|
||||
presets."Sponsorblock" = {
|
||||
ytdl_options.cookiefile = "/data/media/yt/cookies.Personal.txt";
|
||||
subtitles = {
|
||||
embed_subtitles = true;
|
||||
languages = [
|
||||
"en"
|
||||
"de"
|
||||
];
|
||||
allow_auto_generated_subtitles = false;
|
||||
};
|
||||
chapters = {
|
||||
embed_chapters = true;
|
||||
sponsorblock_categories = [
|
||||
# "outro"
|
||||
"selfpromo"
|
||||
"preview"
|
||||
"interaction"
|
||||
"sponsor"
|
||||
"music_offtopic"
|
||||
# "intro"
|
||||
];
|
||||
remove_sponsorblock_categories = "all";
|
||||
force_key_frames = false;
|
||||
};
|
||||
};
|
||||
};
|
||||
subscriptions = {
|
||||
"__preset__".overrides = {
|
||||
tv_show_directory = "/data/media/yt";
|
||||
only_recent_max_files = 30;
|
||||
# only_recent_date_range = "30days";
|
||||
};
|
||||
"Jellyfin TV Show by Date | Sponsorblock | Only Recent | Max 1080p" = mapAttrs' (
|
||||
n: v: nameValuePair "= ${n}" (genAttrs v (x: "https://youtube.com/@${x}"))
|
||||
) channels;
|
||||
"Jellyfin TV Show Collection | Sponsorblock" = {
|
||||
"~Murder Drones" = {
|
||||
s01_url = "https://www.youtube.com/playlist?list=PLHovnlOusNLiJz3sm0d5i2Evwa2LDLdrg";
|
||||
tv_show_collection_episode_ordering = "playlist-index";
|
||||
tv_show_directory = "/data/media/tv";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
group = "users";
|
||||
|
||||
};
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue