update
Some checks failed
ci/woodpecker/push/build-cache Pipeline failed
ci/woodpecker/cron/dependency-pr Pipeline was successful

This commit is contained in:
Lucy Hochkamp 2025-10-23 16:20:45 +02:00
parent 33ee2f5760
commit f7afa33a13
No known key found for this signature in database
14 changed files with 319 additions and 178 deletions

View file

@ -333,6 +333,9 @@ in
Mod+Shift+P { power-off-monitors; }
Super+Backslash { focus-workspace "scratchpad"; }
}
hotkey-overlay {
skip-at-startup
}
layout {
// center-focused-column "always"
gaps 8
@ -349,9 +352,10 @@ in
tab-indicator {
hide-when-single-tab
position "top"
position "bottom"
place-within-column
width 8
width 12
length total-proportion=0.8
gap 8
}
}

View file

@ -58,21 +58,23 @@ let
# It would be ideal to determine _when_ the current event ends, and set the
# timeout accordinly. That would require parsing khal's output a bit more.
while true; do
(inotifywait \
--event modify \
--event create \
--event delete \
--event close_write \
--event moved_to \
--event move \
--monitor \
--timeout 120 \
--recursive \
"$HOME/.calendars" 2> /dev/null) || true | \
while read -r _; do
# (inotifywait \
# --event modify \
# --event create \
# --event delete \
# --event close_write \
# --event moved_to \
# --event move \
# --monitor \
# --timeout 120 \
# --recursive \
# "$HOME/.calendars" 2> /dev/null) || true | \
# while read -r _; do
# TODO: fix inotify stuff
render
sleep 30
# timeout 3 cat || true # debounce for 3s, https://stackoverflow.com/a/69945839
done
# done
done
'';

View file

@ -28,6 +28,7 @@
./services/monitoring.nix
./services/wireguard.nix
./system/impermanence.nix
./system/meta.nix
./system/user.nix
./user-services/khal.nix
./user-services/syncthing.nix

View file

@ -25,7 +25,6 @@ in
]; # theseus
environment.etc."msmtprc".enable = false;
sops.defaultSopsFile = ../../secrets/common.yaml;
sops.secrets = lib.mkMerge (
[
{

View file

@ -2,18 +2,16 @@
pkgs,
lib,
config,
instanceConfig,
instanceConfigs,
# inputs,
otherNodes,
...
}:
with lib;
let
cfg = config.xyno.services.monitoring;
firstInstanceWithPromServer = (builtins.head (
builtins.filter (x: x ? prometheusServer && x.prometheusServer) (attrValues instanceConfigs)
)).hostName;
firstInstanceWithPromServer = if cfg.prometheusServer then config.networking.hostName else (builtins.head (
attrValues (filterAttrs (n: v: v.config.xyno.services.monitoring.prometheusServer) (otherNodes))
)).config.networking.hostName;
vmBasicAuthUsername = "xyno-monitoring";
in
{
@ -24,6 +22,10 @@ in
default = "http://${firstInstanceWithPromServer}.${config.xyno.services.wireguard.monHostsDomain}:8428/api/v1/write";
description = "where prometheus metrics should be pushed to";
};
options.xyno.services.monitoring.prometheusServer = mkOption {
type = types.bool;
default = false;
};
options.xyno.services.monitoring.exporters = mkOption {
type = types.attrsOf (types.either types.int types.str);
description = "names of exporters and their ports (to open fw and generate prometheus config)";
@ -64,7 +66,7 @@ in
};
})
(mkIf (cfg.enable && instanceConfig ? prometheusServer && instanceConfig.prometheusServer) {
(mkIf (cfg.enable && cfg.prometheusServer) {
xyno.impermanence.directories = [ "/var/lib/${config.services.victoriametrics.stateDir}" ];
sops.secrets."victoriametrics/basicAuthPassword" = {
reloadUnits = [ "victoriametrics.service" ];

View file

@ -2,14 +2,13 @@
pkgs,
lib,
config,
instanceConfigs,
instanceConfig,
otherNodes,
...
}:
with lib;
let
wgServer = instanceConfig ? wg.server && instanceConfig.wg.server;
cfg = config.xyno.services.wireguard;
wgServer = cfg.server;
ula = cfg.ula;
ulaPrefix = "${ula}:1337"; # /64 for normal vpn
monitoringUlaPrefix = "${ula}:2337"; # /64 for monitoring
@ -25,51 +24,50 @@ let
in
"${prefix}:${localPart}";
# peers list for networkd
filteredConfigs = builtins.filter (x: x.hostName != config.networking.hostName) (
attrValues instanceConfigs
);
wgPeersLists = map (
wgPeersLists = attrValues (mapAttrs (
c:
let
hasV4 = c.xyno.services.wireguard.v4 && cfg.v4;
isServer = c.xyno.services.wireguard.server;
publicHostname = c.deployment.targetHost;
pubKey = c.xyno.services.wireguard.pubKey;
in
(
(optional (c ? publicHostname) {
(optional (publicHostname != null) {
# if peer is publicly on the internet
AllowedIPs =
(optionals (c ? wg.server && c.wg.server) [
# is server
(optionals (isServer) [
"::/0"
])
++ (optionals (c ? wg.server && c.wg.server && c ? wg.v4 && instanceConfig ? wg.v4) [
# both client and server have a v4
++ (optionals (isServer && hasV4) [
"0.0.0.0/0"
])
++ (optionals (!c ? wg.server || !c.wg.server) [
# is not server
"${genUlaForHost ulaPrefix c.hostName}/128" # if a host is reachable but shouldn't play server, send only to the hosts ip
++ (optionals (!isServer) [
"${genUlaForHost ulaPrefix c.networking.hostName}/128" # if a host is reachable but shouldn't play server, send only to the hosts ip
])
++ (optionals ((!c ? wg.server || !c.wg.server) && c ? wg.v4 && instanceConfig ? wg.v4) [
# no server, no ipv4 yay
++ (optionals ((!isServer) && hasV4) [
"${c.wg.v4}/32"
]);
RouteTable = 1000;
Endpoint = "${c.publicHostname}:51820";
Endpoint = "${publicHostname}:51820";
PersistentKeepalive = 25;
PublicKey = c.wg.pubKey;
PublicKey = pubKey;
PresharedKeyFile = config.sops.secrets."wg/psk".path;
})
++ (optional ((!c ? publicHostname) && wgServer && (c ? wg.pubKey)) {
++ (optional ((publicHostname == null) && wgServer && (pubKey != null)) {
# if this is the server and the peer isn't reachable on the internet
AllowedIPs = [
"${genUlaForHost ulaPrefix c.hostName}/128"
"${genUlaForHost monitoringUlaPrefix c.hostName}/128"
]
++ (optionals (c ? wg.v4 && instanceConfig ? wg.v4) [
++ (optionals (hasV4) [
"${c.wg.v4}/32"
]);
PublicKey = c.wg.pubKey;
PublicKey = pubKey;
PresharedKeyFile = config.sops.secrets."wg/psk".path;
})
)
) filteredConfigs;
) otherNodes);
wgPeers = flatten wgPeersLists;
in
{
@ -94,18 +92,34 @@ in
type = types.str;
default = genUlaForHost monitoringUlaPrefix config.networking.hostName;
};
options.xyno.services.wireguard.pubKey = mkOption {
type = types.nullOr types.str;
default = null;
};
options.xyno.services.wireguard.server = mkOption {
type = types.bool;
default = false;
};
options.xyno.services.wireguard.v4 = mkOption {
type = types.nullOr types.str;
default = null;
};
config = mkIf cfg.enable {
# TODO: add a all traffic through this network
networking.hosts =
(mapAttrs' (
n: v: nameValuePair (genUlaForHost ulaPrefix v.hostName) [ "${v.hostName}.${cfg.hostsDomain}" ]
) instanceConfigs)
n: v:
nameValuePair (genUlaForHost ulaPrefix v.networking.hostName) [
"${v.networking.hostName}.${cfg.hostsDomain}"
]
) otherNodes)
// (mapAttrs' (
n: v:
nameValuePair (genUlaForHost monitoringUlaPrefix v.hostName) [
"${v.hostName}.${cfg.monHostsDomain}"
nameValuePair (genUlaForHost monitoringUlaPrefix v.networking.hostName) [
"${v.networking.hostName}.${cfg.monHostsDomain}"
]
) instanceConfigs);
) otherNodes);
networking.firewall.allowedUDPPorts = optional wgServer 51820;
networking.firewall.interfaces."wg0".allowedUDPPorts = optional wgServer 53;
systemd.network.netdevs."99-wg0" = {
@ -126,15 +140,15 @@ in
matchConfig.Name = "wg0";
networkConfig = {
Description = "xyno wireguard";
IPMasquerade = mkIf (instanceConfig ? wg.server && instanceConfig.wg.server) "both";
IPv4Forwarding = (instanceConfig ? wg.server && instanceConfig.wg.server);
IPv6Forwarding = (instanceConfig ? wg.server && instanceConfig.wg.server);
IPMasquerade = mkIf wgServer "both";
IPv4Forwarding = wgServer;
IPv6Forwarding = wgServer;
};
address = [
"${(genUlaForHost ulaPrefix config.networking.hostName)}/64"
"${(genUlaForHost monitoringUlaPrefix config.networking.hostName)}/64"
]
++ (optionals (instanceConfig ? wg.v4) [ "${instanceConfig.wg.v4}/24" ]);
++ (optionals (cfg.v4) [ "${cfg.v4}/24" ]);
};
systemd.network.networks."51-wg0-all-traffic" = {
matchConfig.Name = "wg0";

8
modules/system/meta.nix Normal file
View file

@ -0,0 +1,8 @@
{lib,...}: with lib;{
options.xyno.meta = {
sopsKey = mkOption { type = types.text; };
};
config = {
sops.defaultSopsFile = ../../secrets/common.yaml;
};
}

View file

@ -17,6 +17,7 @@ in
config = lib.mkIf cfg.enable {
environment.homeBinInPath = true;
nix.settings.trusted-users = [cfg.name];
users.groups.plugdev = {};
users.users.${cfg.name} = {
openssh.authorizedKeys.keys = ["ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAID/oMAi5jyQsNohfhcSH2ItisTpBGB0WtYTVxJYKKqhj"]; # theseus
isNormalUser = true;