feat: introduce treefmt and fmt all

This commit is contained in:
steveej 2024-11-15 10:17:56 +01:00
parent 80250b0179
commit 27c6c4f9fa
237 changed files with 5440 additions and 5214 deletions

View file

@ -1,14 +1,12 @@
# WARN: this file will get overwritten by $ cachix use <name>
{
pkgs,
lib,
...
}: let
{ lib, ... }:
let
folder = ./cachix;
toImport = name: value: folder + ("/" + name);
toImport = name: _value: folder + ("/" + name);
filterCaches = key: value: value == "regular" && lib.hasSuffix ".nix" key;
imports = lib.mapAttrsToList toImport (lib.filterAttrs filterCaches (builtins.readDir folder));
in {
in
{
inherit imports;
nix.settings.substituters = ["https://cache.nixos.org/"];
nix.settings.substituters = [ "https://cache.nixos.org/" ];
}

View file

@ -1,8 +1,6 @@
{
nix = {
settings.substituters = [
"https://nixpkgs-wayland.cachix.org"
];
settings.substituters = [ "https://nixpkgs-wayland.cachix.org" ];
settings.trusted-public-keys = [
"nixpkgs-wayland.cachix.org-1:3lwxaILxMRkVhehr5StQprHdEo4IrE8sRho9R9HOLYA="
];

View file

@ -5,88 +5,107 @@
subvolumes,
targetPathSuffix ? "",
autoStart ? false,
}: let
}:
let
passwords = import ../../variables/passwords.crypt.nix;
subvolumeParentDir = "/var/lib/container-volumes";
in {
config = {pkgs, ...}: {
system.stateVersion = "20.03"; # Did you read the comment?
in
{
config =
{ pkgs, ... }:
{
system.stateVersion = "20.03"; # Did you read the comment?
imports = [../profiles/containers/configuration.nix];
imports = [ ../profiles/containers/configuration.nix ];
environment.systemPackages = with pkgs; [btrfs-progs btrbk];
environment.systemPackages = with pkgs; [
btrfs-progs
btrbk
];
networking.firewall.enable = true;
networking.firewall.enable = true;
systemd.services."bkp-sync" = {
enable = true;
description = "bkp-sync service";
systemd.services."bkp-sync" = {
enable = true;
description = "bkp-sync service";
serviceConfig = {Type = "oneshot";};
serviceConfig = {
Type = "oneshot";
};
after = ["bkp-run.service"];
after = [ "bkp-run.service" ];
requires = ["bkp-run.service"];
requires = [ "bkp-run.service" ];
path = with pkgs; [utillinux];
script = ''
set -x
true
'';
};
systemd.services."bkp-run" = {
enable = true;
description = "bkp-run";
serviceConfig = {Type = "oneshot";};
partOf = ["bkp-sync.service"];
path = with pkgs; [btrfs-progs btrbk coreutils];
script = let
btrbkConf = pkgs.writeText "cfg" ''
timestamp_format long
ssh_identity ${passwords.storage.backupTarget.keyPath}
ssh_user ${passwords.storage.backupTarget.user}
ssh_compression no
backend_remote btrfs-progs-sudo
compat_remote busybox
btrfs_commit_delete each
snapshot_create onchange
snapshot_preserve_min latest
snapshot_preserve 7d 4w
target_preserve_min latest
target_preserve 7d 4w 12m *y
volume ${subvolumeParentDir}
target ${passwords.storage.backupTarget.target}/container-volumes/${targetPathSuffix}
${builtins.foldl' (sum: elem: sum + " subvolume " + elem + "\n") ""
subvolumes}
path = with pkgs; [ utillinux ];
script = ''
set -x
true
'';
in ''
#! ${pkgs.bash}/bin/bash
set -Eeuxo pipefail
};
btrbk -c ${btrbkConf} --progress ''${@:-run}
'';
};
systemd.services."bkp-run" = {
enable = true;
description = "bkp-run";
systemd.timers."bkp" = {
description = "Timer to trigger bkp periodically";
enable = true;
wantedBy = ["timer.target" "multi-user.target"];
timerConfig = {
# Obtained using `systemd-analyze calendar "Wed 23:00"`
# OnCalendar = "Wed *-*-* 23:00:00";
OnStartupSec = "1m";
Unit = "bkp-sync.service";
OnUnitInactiveSec = "2h";
Persistent = "true";
serviceConfig = {
Type = "oneshot";
};
partOf = [ "bkp-sync.service" ];
path = with pkgs; [
btrfs-progs
btrbk
coreutils
];
script =
let
btrbkConf = pkgs.writeText "cfg" ''
timestamp_format long
ssh_identity ${passwords.storage.backupTarget.keyPath}
ssh_user ${passwords.storage.backupTarget.user}
ssh_compression no
backend_remote btrfs-progs-sudo
compat_remote busybox
btrfs_commit_delete each
snapshot_create onchange
snapshot_preserve_min latest
snapshot_preserve 7d 4w
target_preserve_min latest
target_preserve 7d 4w 12m *y
volume ${subvolumeParentDir}
target ${passwords.storage.backupTarget.target}/container-volumes/${targetPathSuffix}
${builtins.foldl' (sum: elem: sum + " subvolume " + elem + "\n") "" subvolumes}
'';
in
''
#! ${pkgs.bash}/bin/bash
set -Eeuxo pipefail
btrbk -c ${btrbkConf} --progress ''${@:-run}
'';
};
systemd.timers."bkp" = {
description = "Timer to trigger bkp periodically";
enable = true;
wantedBy = [
"timer.target"
"multi-user.target"
];
timerConfig = {
# Obtained using `systemd-analyze calendar "Wed 23:00"`
# OnCalendar = "Wed *-*-* 23:00:00";
OnStartupSec = "1m";
Unit = "bkp-sync.service";
OnUnitInactiveSec = "2h";
Persistent = "true";
};
};
};
};
inherit autoStart;
@ -114,10 +133,10 @@ in {
}
];
extraFlags = ["--resolv-conf=bind-host"];
extraFlags = [ "--resolv-conf=bind-host" ];
privateNetwork = true;
forwardPorts = [];
forwardPorts = [ ];
inherit hostAddress localAddress;
}

View file

@ -6,198 +6,206 @@
imapsPort ? 993,
sievePort ? 4190,
autoStart ? false,
}: {
}:
{
inherit specialArgs;
config = {
pkgs,
config,
lib,
repoFlake,
...
}: {
system.stateVersion = "22.05"; # Did you read the comment?
config =
{
pkgs,
config,
repoFlake,
...
}:
{
system.stateVersion = "22.05"; # Did you read the comment?
imports = [
../profiles/containers/configuration.nix
imports = [
../profiles/containers/configuration.nix
repoFlake.inputs.sops-nix.nixosModules.sops
../profiles/common/user.nix
];
repoFlake.inputs.sops-nix.nixosModules.sops
../profiles/common/user.nix
];
networking.firewall.allowedTCPPorts = [
imapsPort
sievePort
];
networking.firewall.allowedTCPPorts = [
imapsPort
sievePort
];
# FIXME: find out how to use the `defaultSopsFile` so i don't have to specify each secret separately
# sops.defaultSopsFile = ./mailserver_secrets.yaml;
# FIXME: find out how to use the `defaultSopsFile` so i don't have to specify each secret separately
# sops.defaultSopsFile = ./mailserver_secrets.yaml;
sops.age.sshKeyPaths = ["/etc/ssh/ssh_host_ed25519_key"];
sops.secrets.email_mailStefanjunkerDe = {
sopsFile = ./mailserver_secrets.yaml;
owner = config.users.users.steveej.name;
};
sops.secrets.email_mailStefanjunkerDeHetzner = {
sopsFile = ./mailserver_secrets.yaml;
owner = config.users.users.steveej.name;
};
sops.secrets.email_schtifATwebDe = {
sopsFile = ./mailserver_secrets.yaml;
owner = config.users.users.steveej.name;
};
sops.secrets.email_dovecot_steveej = {
sopsFile = ./mailserver_secrets.yaml;
owner = config.users.users.dovecot2.name;
};
sops.age.sshKeyPaths = [ "/etc/ssh/ssh_host_ed25519_key" ];
sops.secrets.email_mailStefanjunkerDe = {
sopsFile = ./mailserver_secrets.yaml;
owner = config.users.users.steveej.name;
};
sops.secrets.email_mailStefanjunkerDeHetzner = {
sopsFile = ./mailserver_secrets.yaml;
owner = config.users.users.steveej.name;
};
sops.secrets.email_schtifATwebDe = {
sopsFile = ./mailserver_secrets.yaml;
owner = config.users.users.steveej.name;
};
sops.secrets.email_dovecot_steveej = {
sopsFile = ./mailserver_secrets.yaml;
owner = config.users.users.dovecot2.name;
};
# TODO: switch to something other than ddclient as it's no longer maintained
# TODO: switch to something other than ddclient as it's no longer maintained
# TODO: switch to a let's encrypt certificate
sops.secrets.dovecotSslServerCert = {
sopsFile = ./mailserver_secrets.yaml;
owner = config.users.users.dovecot2.name;
};
sops.secrets.dovecotSslServerKey = {
sopsFile = ./mailserver_secrets.yaml;
owner = config.users.users.dovecot2.name;
};
services.dovecot2 = {
enable = true;
# TODO: switch to a let's encrypt certificate
sops.secrets.dovecotSslServerCert = {
sopsFile = ./mailserver_secrets.yaml;
owner = config.users.users.dovecot2.name;
};
sops.secrets.dovecotSslServerKey = {
sopsFile = ./mailserver_secrets.yaml;
owner = config.users.users.dovecot2.name;
};
services.dovecot2 = {
enable = true;
modules = [pkgs.dovecot_pigeonhole];
protocols = ["sieve"];
modules = [ pkgs.dovecot_pigeonhole ];
protocols = [ "sieve" ];
enableImap = true;
enableLmtp = true;
enablePAM = true;
showPAMFailure = true;
mailLocation = "maildir:~/.maildir";
sslServerCert = config.sops.secrets.dovecotSslServerCert.path;
sslServerKey = config.sops.secrets.dovecotSslServerKey.path;
enableImap = true;
enableLmtp = true;
enablePAM = true;
showPAMFailure = true;
mailLocation = "maildir:~/.maildir";
sslServerCert = config.sops.secrets.dovecotSslServerCert.path;
sslServerKey = config.sops.secrets.dovecotSslServerKey.path;
#configFile = "/etc/dovecot/dovecot2_manual.conf";
extraConfig = ''
auth_mechanisms = cram-md5 digest-md5
auth_verbose = yes
#configFile = "/etc/dovecot/dovecot2_manual.conf";
extraConfig = ''
auth_mechanisms = cram-md5 digest-md5
auth_verbose = yes
passdb {
driver = passwd-file
args = scheme=CRYPT username_format=%u /etc/dovecot/users
}
passdb {
driver = passwd-file
args = scheme=CRYPT username_format=%u /etc/dovecot/users
}
protocol lda {
postmaster_address = "mail@stefanjunker.de"
mail_plugins = $mail_plugins sieve
}
protocol lda {
postmaster_address = "mail@stefanjunker.de"
mail_plugins = $mail_plugins sieve
}
protocol imap {
mail_max_userip_connections = 64
}
'';
};
environment.etc."dovecot/users".source = config.sops.secrets.email_dovecot_steveej.path;
systemd.services.steveej-getmail-stefanjunker = {
enable = true;
wantedBy = ["multi-user.target"];
serviceConfig.User = "steveej";
serviceConfig.Group = "dovecot2";
serviceConfig.RestartSec = 600;
serviceConfig.Restart = "always";
description = "Getmail service";
path = [pkgs.getmail6];
script = let
rc = pkgs.writeText "mailATstefanjunker.de.getmail.rc" ''
[options]
verbose = 1
read_all = 0
delete_after = 30
[retriever]
type = SimpleIMAPSSLRetriever
server = ssl0.ovh.net
port = 993
username = mail@stefanjunker.de
password_command = ("${pkgs.coreutils}/bin/cat", "${config.sops.secrets.email_mailStefanjunkerDe.path}")
mailboxes = ('INBOX',)
[destination]
type = MDA_external
path = ${pkgs.dovecot}/libexec/dovecot/dovecot-lda
protocol imap {
mail_max_userip_connections = 64
}
'';
in ''
getmail --idle=INBOX --rcfile=${rc}
'';
};
environment.etc."dovecot/users".source = config.sops.secrets.email_dovecot_steveej.path;
systemd.services.steveej-getmail-stefanjunker = {
enable = true;
wantedBy = [ "multi-user.target" ];
serviceConfig.User = "steveej";
serviceConfig.Group = "dovecot2";
serviceConfig.RestartSec = 600;
serviceConfig.Restart = "always";
description = "Getmail service";
path = [ pkgs.getmail6 ];
script =
let
rc = pkgs.writeText "mailATstefanjunker.de.getmail.rc" ''
[options]
verbose = 1
read_all = 0
delete_after = 30
[retriever]
type = SimpleIMAPSSLRetriever
server = ssl0.ovh.net
port = 993
username = mail@stefanjunker.de
password_command = ("${pkgs.coreutils}/bin/cat", "${config.sops.secrets.email_mailStefanjunkerDe.path}")
mailboxes = ('INBOX',)
[destination]
type = MDA_external
path = ${pkgs.dovecot}/libexec/dovecot/dovecot-lda
'';
in
''
getmail --idle=INBOX --rcfile=${rc}
'';
};
systemd.services.steveej-getmail-stefanjunker-hetzner = {
enable = true;
wantedBy = [ "multi-user.target" ];
serviceConfig.User = "steveej";
serviceConfig.Group = "dovecot2";
serviceConfig.RestartSec = 60;
serviceConfig.Restart = "always";
description = "Getmail service";
path = [ pkgs.getmail6 ];
script =
let
rc = pkgs.writeText "mailATstefanjunker.de.getmail.rc" ''
[options]
verbose = 2
read_all = 0
delete_after = 30
[retriever]
type = SimpleIMAPSSLRetriever
server = mail.your-server.de
port = 993
username = mail@stefanjunker.de
password_command = ("${pkgs.coreutils}/bin/cat", "${config.sops.secrets.email_mailStefanjunkerDeHetzner.path}")
mailboxes = ('INBOX',)
[destination]
type = MDA_external
path = ${pkgs.dovecot}/libexec/dovecot/dovecot-lda
'';
in
''
getmail --rcfile=${rc} --idle=INBOX
'';
};
systemd.services.steveej-getmail-webde = {
enable = true;
wantedBy = [ "multi-user.target" ];
serviceConfig.User = "steveej";
serviceConfig.Group = "dovecot2";
description = "Getmail service";
path = [ pkgs.getmail6 ];
serviceConfig.RestartSec = 1000;
serviceConfig.Restart = "always";
script =
let
rc = pkgs.writeText "schtifATweb.de.getmail.rc" ''
[options]
verbose = 1
read_all = 0
delete_after = 30
[retriever]
type = SimpleIMAPSSLRetriever
server = imap.web.de
port = 993
username = schtif
password_command = ("${pkgs.coreutils}/bin/cat", "${config.sops.secrets.email_schtifATwebDe.path}")
mailboxes = ('INBOX',)
[destination]
type = Maildir
path = ~/.maildir/
'';
in
''
getmail --rcfile=${rc} --idle=INBOX
'';
};
};
systemd.services.steveej-getmail-stefanjunker-hetzner = {
enable = true;
wantedBy = ["multi-user.target"];
serviceConfig.User = "steveej";
serviceConfig.Group = "dovecot2";
serviceConfig.RestartSec = 60;
serviceConfig.Restart = "always";
description = "Getmail service";
path = [pkgs.getmail6];
script = let
rc = pkgs.writeText "mailATstefanjunker.de.getmail.rc" ''
[options]
verbose = 2
read_all = 0
delete_after = 30
[retriever]
type = SimpleIMAPSSLRetriever
server = mail.your-server.de
port = 993
username = mail@stefanjunker.de
password_command = ("${pkgs.coreutils}/bin/cat", "${config.sops.secrets.email_mailStefanjunkerDeHetzner.path}")
mailboxes = ('INBOX',)
[destination]
type = MDA_external
path = ${pkgs.dovecot}/libexec/dovecot/dovecot-lda
'';
in ''
getmail --rcfile=${rc} --idle=INBOX
'';
};
systemd.services.steveej-getmail-webde = {
enable = true;
wantedBy = ["multi-user.target"];
serviceConfig.User = "steveej";
serviceConfig.Group = "dovecot2";
description = "Getmail service";
path = [pkgs.getmail6];
serviceConfig.RestartSec = 1000;
serviceConfig.Restart = "always";
script = let
rc = pkgs.writeText "schtifATweb.de.getmail.rc" ''
[options]
verbose = 1
read_all = 0
delete_after = 30
[retriever]
type = SimpleIMAPSSLRetriever
server = imap.web.de
port = 993
username = schtif
password_command = ("${pkgs.coreutils}/bin/cat", "${config.sops.secrets.email_schtifATwebDe.path}")
mailboxes = ('INBOX',)
[destination]
type = Maildir
path = ~/.maildir/
'';
in ''
getmail --rcfile=${rc} --idle=INBOX
'';
};
};
inherit autoStart;
bindMounts = {

View file

@ -7,37 +7,37 @@ dovecotSslServerCert: ENC[AES256_GCM,data:ylK0IIj2vdY0mXOqSgA5zYmFYGote/uMtDWy2r
dovecotSslServerKey: ENC[AES256_GCM,data:KYpQZbioLGrp/6R6j/c4uJhBpoDT2aj7UffQQug8Otzr/0rk51tavsjg4YRQGIv+ZpFYpWAuHbhW4O8AsRgpi0AX3hKsZICEdNubfK5zfd+SInXveaVFbHHjOuzcqftraUrqx9APu+omk4LlpxpWTbj/bAcRnRBn0C093AeJNi1giaCZd4NxmmkYqwYzrjUc6LYHvICEnjA87ZVpeOKE/6B2Ng5QWDKhZNmjy7YDXAk4DS+P2grLmoGvnz6ubtaypSzaKXYTFz/uxEvtCCPlIaJHm3Nz0i0j1rjX3S/w3c26zuIFtwCmAQzGnHyQwbx7ILwCXfnyQnpM7+R5+fxcYvcK2GEJyTGzg/JFa++TI1YO+wpknjzxK3Sa8aX0pUbx/TEjnY3+tRnx7YNuih2ZNZrPHy8uJJtO9Aef84Sq5vLQG5n1/ya0pVhjCbs1pgpeK/qT3ikLbkcJg6NxAq3hqqQdR4TTkZBwKLVfzcMXLDZB0GphhVvtO0W7afRCE+nA/FPDT2NN6WLD15cN5F8w6USi0iQlwFb+TE8nt1ghhoGmwCMx+lX1Bk/jdIlYtJ62T8+T3nRVJ6ZRlUa1rkbAADaWZVvLR2/ylaEkeYFo/CC6lUg4DWPCVoGFxaWaU+ZaIDjbiYcqGQFBwq8JZ44hAOyJQpb7N1zgDVyPh/xr+ukmjutFuu97FY55VTn+8eipRiR4TZpPRH+KvB/FmlLNaim76YZCRH9Dv2ENbz9fXpWv7P+yh06+ci9HKvjNAzR6NRr368tK2srEEhWzFv+nAsRetzc2VcfwNMcg5/mvlWHVZSmONXC5adEo/W5XgJgUnH/fkz5IRPY/1iteq8PTCPUkubzF+qT2+suzEDnvgXlaKsqHkrk+n8YySl+GRABnasmnBYdb8vboDM41ptw3PXDoL+l07o6KxTwPOWWl9BVNMT8VzL7gAl+dlxjkEUSqn53OrsYDluxefBa3c0rfvk8CCvOMjgLkagK9O+VavqJEo00zd3f0ZzMcIoRebuDzYILw3DTrG/qyLXGsRoybBr+qcuSVBzM5RnjcToFJO4W/0EIdH1drZmqHdNgSNwPPRSNCivrhV25syUCrTee/xkDVUr47z67pK/5Mh0ewlwq0hcl/dBoA0YP/PptntK0CHfistD8chNtdMk3PyzqSiFaDPQ3T4wdc3zTNUjXeQ5643k5weJXFPg4tUuCCa8HxUJHd5sLnNY0OaRBwh2SLkQlcXYFQDzVHSoVscR3tf+57L7aF2hVQT2QtJKdZQjOyMg5YK0UlVc3tkyPZzyjOVaP7eTCRKwXI1NminHmmy1ZzZ+w+8+oX8cfvE9HdbqDoDp0MnkicS0+5S0lZwkRWrjUx/gS4aMWLbCHUQHY8wm+fmyDLJ/oI4ukdUI5YLOutlCsIY+aotnVMoORgdd/EPeZVYJmci/pvMjPF9Eard0aD4rLA7z/HwGgc3VEGmNluE+20BXO3bFIqwa9tzMqzOJB0qglP35MjVGiUe6Svq13DAmSOnzN+WqcVbTMJG8J1bwKqvmaN8AEpO0zU94ZhHspUtGyQQ0D6sMsw9jqJ1WyLE7aXeFR6OHrpw3DC2mCpr/qX8QFsveeyB83Za2+CuVVi2sqGAKYzkwlUPkeuaxfBak0apwJsF2trT1uMvPOuIda8k4XhtYLxah2BDJZIoMqUVz2xcN4OuW8bdSX/lepsyZZO34VEQDLBa2dxCCHJmCKf6io/0YlswNKGDQh+DI935KTdqBnHSJ9IjvADQuu+K37aS0L9V0ZLXiM5SBQtbB7kQpHjvivq97ru7QpFqJf8HCl1vDs4gJ/NV+J0+CX6dQTQOtHvwxD2CPGiiSv40ycoJAcwiqTh5T+hRPtca6bSes/jGN5iQjfLCRbwvL/ItLLAK3F2cEIdKZnfhJkdEAIwWFLvR4R5I7ZcCK5GgKz5dPROup8BAONA8XxcJWXaXV0YkfEmCDbZYMFC7pcx4NAnGp881RyAaG/HlstBHHVagpP2fwZ8K0J/2KPillOq/Die+vNc2++hx4EuftvNkZhSd+7zIYNKHQd0M4Ea74flgmmW5lG73bE1BkhVd2DsgEDihH19/vJjFH4PxKINKp0ij4jMyq9w+WsGiUqSDaQz/MZJ8wjzaSjvmSj4qlOAitr/s3f041e77rMb0W2ieCtYEy7IsebIqIWgKn/crm5FhyUtBCPEqFZgAKS313bXUio8LktqXCrZjZ0ZG8DmQG6hnK4PstKlIUQoNuFnb8Bp1zDgY4i2hb6Zmu7NnqnOaJJTjSGwaZOav0oMousn67BuFtwoMaGp+OjCopZ3HPfg19usnjvWpOgccXWYlQc0HOlGXUq+otKlXtQwAjUvz50GmV+lY3t4rpCgqk+pj9iH62xuzDQ01FOXl+v3Ehnw97mNJk9YarueG0Hl/1f6dhwXnjeEv35LLyWUjQolOoYgycEkgQ/cCCOSm7zgK1VT0oTLFISai8IG0qDP9HCszteHZhp+y4bsXQfAJTY11QLr7hx9/nQmVlHksDN5Wsno4wbkT+D2xb5EaDU2RBqZfTVcbRBWRtAhQcRPxdaUXyI7oKEaFg8fvQZ8wK/Ae+L18ub+Latb5W69dUVT6I13tPleXDl1oen9BXzaX7sygSpY4lJoXlu+SCKyNTMrC36PrB39QUWosw03ZsiKT5xjgN5+1m32yv4cg8lAwNCR4xxShrnhSbZ328yifaAuTnSawZmUGBVxPx4glVcvNUOXW2UvVtmeKU0SG1E+UGBAq7/UfaadMM7BsjyaaKpBa/tXZTm0rn8UiFqujvgNjQ3F/3ybRdlO5d6eMI9Na+1gqg6qxYSGR0H0wAdPhtyGRxpumehAQGeMKd49Sg6jspaf3NAjjuZ0Yp+eJV9652WqVZ7xtCNqRURV353h+XPGR+ZZ9siHRDQ+NcbxPkfbHw0/RTvZvEIdaDi5+DLh6tgIxMEtOpwTlfFrOUDaIcmWvzk92VtBFuafvoGzTipryTnMszjCsUTvyEPN8jPd6r8UmOFGXF2aVNksmn/bI97i4s1kYLgY8XsEOyx+Q9pUTkTEMn2JWgnEcSOAtaX1ZskHnfueKzUPb+/YWb+z8SNCgnUqHqa42qBqwlhdshzYhhfKhEisUptirzzp1kcbyHrug5PzHxh8Qri2pjHxSHYQ5sjig6K6B1YEuHP6uo19fL6BdgGlhKroiOF/6TMAcE9V3+yqvDdsW/IC0QXLHIBKC7wlDgLc25ltGogD/76P6tViDAb6+HNSSXJO056Ovq0z2BrXhnq1AmWa99mVnOLJwafRWPZC,iv:XxnAsh6yx9gICi3N6oTttpGXvguGZImWNIMp9srDJLM=,tag:M9gFSD5PNIfoCLet6Vy6QA==,type:str]
hetznerDnsApiToken: ENC[AES256_GCM,data:JfL4Xg9TZu4Og35g0SwfrI1uxiqgdFa7p5AQcfiPwLY=,iv:yOak3uXX7CNglu8O2UW/1sOI7BGZxpRQAFJCvRbzU0Y=,tag:6orkQIy7BxACziLWpYoS5Q==,type:str]
sops:
kms: []
gcp_kms: []
azure_kv: []
hc_vault: []
age:
- recipient: age18dmqd7r7fanrfmdxsvwgv9psvhs3hw5ydpvw0na0dv3dlrg4rs3q2wpvgv
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBWaE9nNytUbXhWeWZYWndn
R3pMV3N1NjJPK2gzUDl2YitxWEU4NUFPRUJ3Cm1tMGhGcU56bSs5SUIzRmhqVHN2
dXIzYlkvS1JnWnladGdXTnRKMXNOWncKLS0tIGhyRFhFV1VRVXVYN1pJWjNFd1Rj
bENCWmVNUEJyZFBpRmYwbWVndFJGcUEKzvkGz3WycSrU4qPcBhs4cSCn/9TQ62sl
T6TuDra97qJJ1Pg9VZGHT/OoSleLA4s0qpNlAxnAnNaO0conTsREZQ==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2023-07-17T12:01:21Z"
mac: ENC[AES256_GCM,data:003nzaNWdXLscJy9XZcwAb93M9Eo3Bdg9s5MHHiv4/TitaaZE7VghWHKv5DrcoA0GGdN9SnIVqHd+o6OPVER91XLVxoiX7ixtlu1RIRfqdama3RRPtSki5wP5wPz6qF4vRBIKfrTpZK7thXLYs2NhCB9HJYljNhcgLtzEG5bWgY=,iv:tEP530Pij3bt3hc5PCYGjFFyPiKgo34dHm23Xtmrxt8=,tag:macr/U8R5+wktTBJ9OqI/w==,type:str]
pgp:
- created_at: "2023-07-02T20:30:30Z"
enc: |-
-----BEGIN PGP MESSAGE-----
kms: []
gcp_kms: []
azure_kv: []
hc_vault: []
age:
- recipient: age18dmqd7r7fanrfmdxsvwgv9psvhs3hw5ydpvw0na0dv3dlrg4rs3q2wpvgv
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBWaE9nNytUbXhWeWZYWndn
R3pMV3N1NjJPK2gzUDl2YitxWEU4NUFPRUJ3Cm1tMGhGcU56bSs5SUIzRmhqVHN2
dXIzYlkvS1JnWnladGdXTnRKMXNOWncKLS0tIGhyRFhFV1VRVXVYN1pJWjNFd1Rj
bENCWmVNUEJyZFBpRmYwbWVndFJGcUEKzvkGz3WycSrU4qPcBhs4cSCn/9TQ62sl
T6TuDra97qJJ1Pg9VZGHT/OoSleLA4s0qpNlAxnAnNaO0conTsREZQ==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2023-07-17T12:01:21Z"
mac: ENC[AES256_GCM,data:003nzaNWdXLscJy9XZcwAb93M9Eo3Bdg9s5MHHiv4/TitaaZE7VghWHKv5DrcoA0GGdN9SnIVqHd+o6OPVER91XLVxoiX7ixtlu1RIRfqdama3RRPtSki5wP5wPz6qF4vRBIKfrTpZK7thXLYs2NhCB9HJYljNhcgLtzEG5bWgY=,iv:tEP530Pij3bt3hc5PCYGjFFyPiKgo34dHm23Xtmrxt8=,tag:macr/U8R5+wktTBJ9OqI/w==,type:str]
pgp:
- created_at: "2023-07-02T20:30:30Z"
enc: |-
-----BEGIN PGP MESSAGE-----
wcBMA0SHG/zF3227AQgAli6PQTNwh9N5Oo1LJvHysQNdxdZAq4QbfcwcIfpJIDds
0TQs28EeSttv47P2ga4Nb1O5dVUnlvwbP+uV9RbioYF4LfZ2/uNlS1lSGwsLbPcf
SsY+U2WvpJgyo3EWQRusR/OXLFg0EdqDPDseH1w1u8tGALDrewre5oBjrMa0GRbb
5F8lK/FVxSJxz70UkHgE7c6pSqPpznlgVduUwoOWnlhTw4aet7lLik+/C9K7LBDc
Q04sW1W2yqYr2882xPAUwfBhgfZQ1Uld5aDwqwPH+1Ttx26e7JrGSFaIX8GTVVpc
RJqN6uman5at3lOaEKXS1qf7T9ZI003CvdFwHS1G89JRAemdeK4bur5wS3VXBhDx
44fHgmDcOohHilTahwmyXCT70KjjHbd9665vAhsl0N9aOrOBdOgq0HmLjNzAQkz5
uGcEfsNiUXPngkNrh/Nvhh9w
=yHDZ
-----END PGP MESSAGE-----
fp: 6F7069FE6B96E894E60EC45C6EEFA706CB17E89B
unencrypted_suffix: _unencrypted
version: 3.7.3
wcBMA0SHG/zF3227AQgAli6PQTNwh9N5Oo1LJvHysQNdxdZAq4QbfcwcIfpJIDds
0TQs28EeSttv47P2ga4Nb1O5dVUnlvwbP+uV9RbioYF4LfZ2/uNlS1lSGwsLbPcf
SsY+U2WvpJgyo3EWQRusR/OXLFg0EdqDPDseH1w1u8tGALDrewre5oBjrMa0GRbb
5F8lK/FVxSJxz70UkHgE7c6pSqPpznlgVduUwoOWnlhTw4aet7lLik+/C9K7LBDc
Q04sW1W2yqYr2882xPAUwfBhgfZQ1Uld5aDwqwPH+1Ttx26e7JrGSFaIX8GTVVpc
RJqN6uman5at3lOaEKXS1qf7T9ZI003CvdFwHS1G89JRAemdeK4bur5wS3VXBhDx
44fHgmDcOohHilTahwmyXCT70KjjHbd9665vAhsl0N9aOrOBdOgq0HmLjNzAQkz5
uGcEfsNiUXPngkNrh/Nvhh9w
=yHDZ
-----END PGP MESSAGE-----
fp: 6F7069FE6B96E894E60EC45C6EEFA706CB17E89B
unencrypted_suffix: _unencrypted
version: 3.7.3

View file

@ -11,350 +11,361 @@
inputs.nixpkgs.follows = "nixpkgs";
};
};
outputs = {
self,
nixpkgs,
nixos-generators,
...
}: let
systems = [
"aarch64-linux"
"x86_64-linux"
];
forAllSystems = nixpkgs.lib.genAttrs systems;
in {
nixosConfigurations.default =
nixpkgs.lib.nixosSystem
{
outputs =
{ self, nixpkgs, ... }:
let
systems = [
"aarch64-linux"
"x86_64-linux"
];
forAllSystems = nixpkgs.lib.genAttrs systems;
in
{
nixosConfigurations.default = nixpkgs.lib.nixosSystem {
system = "aarch64-linux";
specialArgs = {};
specialArgs = { };
modules = [
({
config,
modulesPath,
pkgs,
lib,
...
}: {
nixpkgs.overlays = [
(final: previous: {
# inherit (self.inputs.nixpkgs-systemd256.legacyPackages.${pkgs.system}) systemd systemdMinimal;
# systemd =
# self.inputs.nixpkgs-systemd256.legacyPackages.${pkgs.system}.systemd.overrideAttrs (prevAttrs: {
# src = /home/steveej/src/others/systemd;
(
{
config,
modulesPath,
pkgs,
lib,
...
}:
{
nixpkgs.overlays = [
(_final: _previous: {
# inherit (self.inputs.nixpkgs-systemd256.legacyPackages.${pkgs.system}) systemd systemdMinimal;
# systemd =
# self.inputs.nixpkgs-systemd256.legacyPackages.${pkgs.system}.systemd.overrideAttrs (prevAttrs: {
# src = /home/steveej/src/others/systemd;
# withAppArmor = false;
# withRepart = false;
# withHomed = false;
# withAcl = false;
# withEfi = false;
# withBootloader = false;
# withCryptsetup = false;
# withLibBPF = false;
# withOomd = false;
# withFido2 = false;
# withApparmor = false;
# withDocumentation = false;
# withUtmp = false;
# withQrencode = false;
# withVmspawn = false;
# withMachined = false;
# withLogTrace = true;
# withArchive = false;
# # don't need these but cause errors for exampel files not found
# # withLogind = false;
# })
# pkgs.systemdMinimal.override {
# # getting errors with these disabled
# withCoredump = true;
# withCompression = true;
# withLogind = true;
# withSysusers = true;
# withUserDb = true;
# }
# pkgs.systemdMinimal
# pkgs.systemd.override {
# withRepart = false;
# withHomed = false;
# withAcl = false;
# withEfi = false;
# withBootloader = false;
# withCryptsetup = false;
# withLibBPF = false;
# withOomd = false;
# withFido2 = false;
# withApparmor = false;
# withDocumentation = false;
# withUtmp = false;
# withQrencode = false;
# withVmspawn = false;
# withMachined = false;
# withLogTrace = true;
# # don't need these but cause errors for exampel files not found
# # withLogind = false;
# }
# ;
})
];
# withAppArmor = false;
# withRepart = false;
# withHomed = false;
# withAcl = false;
# withEfi = false;
# withBootloader = false;
# withCryptsetup = false;
# withLibBPF = false;
# withOomd = false;
# withFido2 = false;
# withApparmor = false;
# withDocumentation = false;
# withUtmp = false;
# withQrencode = false;
# withVmspawn = false;
# withMachined = false;
# withLogTrace = true;
# withArchive = false;
# # don't need these but cause errors for exampel files not found
# # withLogind = false;
# })
# pkgs.systemdMinimal.override {
# # getting errors with these disabled
# withCoredump = true;
# withCompression = true;
# withLogind = true;
# withSysusers = true;
# withUserDb = true;
# }
# pkgs.systemdMinimal
# pkgs.systemd.override {
# withRepart = false;
# withHomed = false;
# withAcl = false;
# withEfi = false;
# withBootloader = false;
# withCryptsetup = false;
# withLibBPF = false;
# withOomd = false;
# withFido2 = false;
# withApparmor = false;
# withDocumentation = false;
# withUtmp = false;
# withQrencode = false;
# withVmspawn = false;
# withMachined = false;
# withLogTrace = true;
# # don't need these but cause errors for exampel files not found
# # withLogind = false;
# }
# ;
})
];
imports = [
(modulesPath + "/profiles/minimal.nix")
];
system.stateVersion = "24.11";
imports = [ (modulesPath + "/profiles/minimal.nix") ];
system.stateVersion = "24.11";
# https://github.com/hercules-ci/arion/blob/c24c185e67f093298a081900b49ca18716077dec/src/nix/modules/nixos/container-systemd.nix
boot.isContainer = true;
# boot.tmp.useTmpfs = true;
boot.loader.grub.enable = lib.mkForce false;
boot.loader.systemd-boot.enable = lib.mkForce false;
services.journald.console = "/dev/console";
services.journald.storage = "none";
# boot.specialFileSystems = lib.mkForce {};
# https://github.com/hercules-ci/arion/blob/c24c185e67f093298a081900b49ca18716077dec/src/nix/modules/nixos/container-systemd.nix
boot.isContainer = true;
# boot.tmp.useTmpfs = true;
boot.loader.grub.enable = lib.mkForce false;
boot.loader.systemd-boot.enable = lib.mkForce false;
services.journald.console = "/dev/console";
services.journald.storage = "none";
# boot.specialFileSystems = lib.mkForce {};
services.nscd.enable = false;
system.nssModules = lib.mkForce [];
systemd.services.systemd-logind.enable = false;
systemd.services.console-getty.enable = false;
services.nscd.enable = false;
system.nssModules = lib.mkForce [ ];
systemd.services.systemd-logind.enable = false;
systemd.services.console-getty.enable = false;
systemd.sockets.nix-daemon.enable = false;
systemd.services.nix-daemon.enable = false;
systemd.oomd.enable = false;
networking.useDHCP = false;
networking.firewall.enable = false;
systemd.sockets.nix-daemon.enable = false;
systemd.services.nix-daemon.enable = false;
systemd.oomd.enable = false;
networking.useDHCP = false;
networking.firewall.enable = false;
# system.build.earlyMountScript =
# lib.mkForce ''
# '';
# system.activationScripts.specialfs =
# lib.mkForce ''
# '';
boot.postBootCommands = ''
ls -lha /run
mkdir -p /run/wrappers
'';
# system.build.earlyMountScript =
# lib.mkForce ''
# '';
# system.activationScripts.specialfs =
# lib.mkForce ''
# '';
boot.postBootCommands = ''
ls -lha /run
mkdir -p /run/wrappers
'';
boot.kernelParams = [
"systemd.log_level=debug"
];
boot.kernelParams = [ "systemd.log_level=debug" ];
# services.udev.enable = false;
# services.udev.enable = false;
# TODO: this is only needed because `/run/current-system` is missing
# environment.variables.PATH = "${lib.makeBinPath config.environment.systemPackages}:$PATH";
# TODO: this is only needed because `/run/current-system` is missing
# environment.variables.PATH = "${lib.makeBinPath config.environment.systemPackages}:$PATH";
systemd.mounts = lib.mkForce [];
fileSystems = lib.mkForce {};
systemd.mounts = lib.mkForce [ ];
fileSystems = lib.mkForce { };
services.mycelium.enable = false;
services.mycelium.keyFile = "/var/lib/secrets/mycelium-keyfile";
systemd.services.mycelium.serviceConfig.DynamicUser = lib.mkForce false;
systemd.services.mycelium.serviceConfig.User = lib.mkForce "root";
systemd.services.mycelium.serviceConfig.ExecStart = lib.mkForce (pkgs.writeShellScript "mycelium" ''
while true; do
ls -lha $CREDENTIALS_DIRECTORY
sleep 5
done
'');
systemd.services.testing-credentials = {
wantedBy = ["multi-user.target"];
path = [pkgs.coreutils];
serviceConfig = {
# SyslogIdentifier = "testing-credentials";
# StateDirectory = "testing-credentials";
# DynamicUser = true;
# User = "tc";
# ProtectHome = true;
# ProtectSystem = true;
# LoadCredential = [
# "mycelium-keyfile:${self.nixosConfigurations.default.config.services.mycelium.keyFile}"
# "hosts:/etc/hosts"
# ];
SetCredential = "mycelium-keyfile:not secret string";
ExecStart = lib.mkForce (pkgs.writeShellScript "mycelium" ''
cd $STATE_DIRECTORY
pwd
env
services.mycelium.enable = false;
services.mycelium.keyFile = "/var/lib/secrets/mycelium-keyfile";
systemd.services.mycelium.serviceConfig.DynamicUser = lib.mkForce false;
systemd.services.mycelium.serviceConfig.User = lib.mkForce "root";
systemd.services.mycelium.serviceConfig.ExecStart = lib.mkForce (
pkgs.writeShellScript "mycelium" ''
while true; do
ls -lha $CREDENTIALS_DIRECTORY
sleep 5
done
'');
};
};
''
);
services.caddy = {
enable = true;
globalConfig = ''
auto_https off
'';
virtualHosts.":80" = {
extraConfig = ''
respond "hello from ${config.networking.hostName}"
systemd.services.testing-credentials = {
wantedBy = [ "multi-user.target" ];
path = [ pkgs.coreutils ];
serviceConfig = {
# SyslogIdentifier = "testing-credentials";
# StateDirectory = "testing-credentials";
# DynamicUser = true;
# User = "tc";
# ProtectHome = true;
# ProtectSystem = true;
# LoadCredential = [
# "mycelium-keyfile:${self.nixosConfigurations.default.config.services.mycelium.keyFile}"
# "hosts:/etc/hosts"
# ];
SetCredential = "mycelium-keyfile:not secret string";
ExecStart = lib.mkForce (
pkgs.writeShellScript "mycelium" ''
cd $STATE_DIRECTORY
pwd
env
while true; do
ls -lha $CREDENTIALS_DIRECTORY
sleep 5
done
''
);
};
};
services.caddy = {
enable = true;
globalConfig = ''
auto_https off
'';
virtualHosts.":80" = {
extraConfig = ''
respond "hello from ${config.networking.hostName}"
'';
};
};
};
})
];
};
packages = forAllSystems (system: let
name = "mycelium";
inherit (self.inputs) nix-snapshotter;
config = {
entrypoint = "${self.nixosConfigurations.default.config.system.build.toplevel}/init";
# port = 2379;
args = [
];
# nodePort = 30001;
};
myceliumPorts = {
tcp = [9651];
udp = [9650 9651];
};
inherit
(config)
entrypoint
# port
args
# nodePort
;
pkgs = import nixpkgs {
overlays = [nix-snapshotter.overlays.default];
};
image = pkgs.nix-snapshotter.buildImage {
inherit name;
resolvedByNix = true;
config = {
entrypoint = [entrypoint];
env = [
# this is read by the `/init` script and prevents various incompatible commands like mount, etc.
# the value of this doesn't seem to matter as long as it's not an empty string.
"container=nerd"
"SYSTEMD_LOG_LEVEL=debug"
];
volumes = {
# "/var/lib/private/mycelium/key.bin" = {};
# "/run" = {};
# "/tmp" = {};
# "/etc" = {};
};
copyToRoot = [
# self.nixosConfigurations.default.config.system.build.toplevel
];
};
};
in {
k8s = let
pod = pkgs.writeText "${name}-pod.json" (builtins.toJSON {
apiVersion = "v1";
kind = "Pod";
metadata = {
inherit name;
labels = {inherit name;};
};
spec.containers = [
{
inherit name args;
image = "nix:0${image}";
ports = [
{
name = "mycelium-tcp-0";
containerPort = builtins.elemAt myceliumPorts.tcp 0;
}
{
name = "mycelium-udp-0";
protocol = "UDP";
containerPort = builtins.elemAt myceliumPorts.udp 0;
}
{
name = "mycelium-udp-1";
protocol = "UDP";
containerPort = builtins.elemAt myceliumPorts.udp 1;
}
];
}
];
});
)
];
};
packages = forAllSystems (
system:
let
name = "mycelium";
inherit (self.inputs) nix-snapshotter;
service = pkgs.writeText "${name}-service.json" (builtins.toJSON {
apiVersion = "v1";
kind = "Service";
metadata.name = "${name}-service";
spec = {
type = "NodePort";
selector = {inherit name;};
ports = [
{
name = "mycelium-tcp-0";
port = builtins.elemAt myceliumPorts.tcp 0 + 50000;
targetPort = "mycelium-tcp-0";
}
{
name = "mycelium-udp-0";
protocol = "UDP";
port = builtins.elemAt myceliumPorts.udp 0 + 50000;
targetPort = "mycelium-udp-0";
}
{
name = "mycelium-udp-1";
protocol = "UDP";
port = builtins.elemAt myceliumPorts.udp 1 + 50000;
targetPort = "mycelium-udp-1";
}
config = {
entrypoint = "${self.nixosConfigurations.default.config.system.build.toplevel}/init";
# port = 2379;
args = [ ];
# nodePort = 30001;
};
myceliumPorts = {
tcp = [ 9651 ];
udp = [
9650
9651
];
};
});
in
pkgs.runCommand "declarative-k8s" {} ''
mkdir -p $out/share/k8s
cp ${pod} $out/share/k8s/
cp ${service} $out/share/k8s/
'';
inherit image;
inherit (config)
entrypoint
# port
start = pkgs.writeShellApplication {
name = "start";
text = ''
set -x
rm -rf ./result
nix build --impure .#image
sudo nix2container load ./result
sudo -E nerdctl run --name ${name} --privileged -dt \
--cgroup-manager cgroupfs \
--volume "$PWD/key.bin.crypt:${self.nixosConfigurations.default.config.services.mycelium.keyFile}:ro" \
"nix:0$(readlink result):latest"
'';
};
args
# nodePort
stop = pkgs.writeShellApplication {
name = "stop";
text = ''
set +e
sudo -E nerdctl stop -t 60 ${name}
sudo -E nerdctl rm --force ${name}
sudo -E nerdctl system prune --all --force
sudo systemctl stop nix-snapshotter
sudo systemctl stop containerd
mount | rg -No '(/var/lib/container|/tmp/initial)[^ ]+' | tac | xargs sudo umount -l
sudo systemctl start containerd
sudo systemctl start nix-snapshotter
'';
;
# tmpfs on /run/credentials/mycelium.service type tmpfs (ro,nosuid,nodev,noexec,relatime,nosymfollow,size=1024k,nr_inodes=1024,mode=700,noswap)
pkgs = import nixpkgs { overlays = [ nix-snapshotter.overlays.default ]; };
# mount -t tmpfs tmpfs /run/credentials/mycelium.service -o ro,nosuid,nodev,noexec,relatime,nosymfollow,size=1024k,nr_inodes=1024,mode=700,noswap
};
});
};
image = pkgs.nix-snapshotter.buildImage {
inherit name;
resolvedByNix = true;
config = {
entrypoint = [ entrypoint ];
env = [
# this is read by the `/init` script and prevents various incompatible commands like mount, etc.
# the value of this doesn't seem to matter as long as it's not an empty string.
"container=nerd"
"SYSTEMD_LOG_LEVEL=debug"
];
volumes = {
# "/var/lib/private/mycelium/key.bin" = {};
# "/run" = {};
# "/tmp" = {};
# "/etc" = {};
};
copyToRoot = [
# self.nixosConfigurations.default.config.system.build.toplevel
];
};
};
in
{
k8s =
let
pod = pkgs.writeText "${name}-pod.json" (
builtins.toJSON {
apiVersion = "v1";
kind = "Pod";
metadata = {
inherit name;
labels = {
inherit name;
};
};
spec.containers = [
{
inherit name args;
image = "nix:0${image}";
ports = [
{
name = "mycelium-tcp-0";
containerPort = builtins.elemAt myceliumPorts.tcp 0;
}
{
name = "mycelium-udp-0";
protocol = "UDP";
containerPort = builtins.elemAt myceliumPorts.udp 0;
}
{
name = "mycelium-udp-1";
protocol = "UDP";
containerPort = builtins.elemAt myceliumPorts.udp 1;
}
];
}
];
}
);
service = pkgs.writeText "${name}-service.json" (
builtins.toJSON {
apiVersion = "v1";
kind = "Service";
metadata.name = "${name}-service";
spec = {
type = "NodePort";
selector = {
inherit name;
};
ports = [
{
name = "mycelium-tcp-0";
port = builtins.elemAt myceliumPorts.tcp 0 + 50000;
targetPort = "mycelium-tcp-0";
}
{
name = "mycelium-udp-0";
protocol = "UDP";
port = builtins.elemAt myceliumPorts.udp 0 + 50000;
targetPort = "mycelium-udp-0";
}
{
name = "mycelium-udp-1";
protocol = "UDP";
port = builtins.elemAt myceliumPorts.udp 1 + 50000;
targetPort = "mycelium-udp-1";
}
];
};
}
);
in
pkgs.runCommand "declarative-k8s" { } ''
mkdir -p $out/share/k8s
cp ${pod} $out/share/k8s/
cp ${service} $out/share/k8s/
'';
inherit image;
start = pkgs.writeShellApplication {
name = "start";
text = ''
set -x
rm -rf ./result
nix build --impure .#image
sudo nix2container load ./result
sudo -E nerdctl run --name ${name} --privileged -dt \
--cgroup-manager cgroupfs \
--volume "$PWD/key.bin.crypt:${self.nixosConfigurations.default.config.services.mycelium.keyFile}:ro" \
"nix:0$(readlink result):latest"
'';
};
stop = pkgs.writeShellApplication {
name = "stop";
text = ''
set +e
sudo -E nerdctl stop -t 60 ${name}
sudo -E nerdctl rm --force ${name}
sudo -E nerdctl system prune --all --force
sudo systemctl stop nix-snapshotter
sudo systemctl stop containerd
mount | rg -No '(/var/lib/container|/tmp/initial)[^ ]+' | tac | xargs sudo umount -l
sudo systemctl start containerd
sudo systemctl start nix-snapshotter
'';
# tmpfs on /run/credentials/mycelium.service type tmpfs (ro,nosuid,nodev,noexec,relatime,nosymfollow,size=1024k,nr_inodes=1024,mode=700,noswap)
# mount -t tmpfs tmpfs /run/credentials/mycelium.service -o ro,nosuid,nodev,noexec,relatime,nosymfollow,size=1024k,nr_inodes=1024,mode=700,noswap
};
}
);
};
}

View file

@ -6,28 +6,27 @@
syncthingPort ? 22000,
syncthingLocalAnnouncePort ? 21027,
autoStart ? false,
}: {
}:
{
inherit specialArgs;
config = {
config,
pkgs,
...
}: {
system.stateVersion = "20.05"; # Did you read the comment?
config =
{ ... }:
{
system.stateVersion = "20.05"; # Did you read the comment?
imports = [../profiles/containers/configuration.nix];
imports = [ ../profiles/containers/configuration.nix ];
networking.firewall.allowedTCPPorts = [
# syncthing gui
8384
];
networking.firewall.allowedTCPPorts = [
# syncthing gui
8384
];
services.syncthing = {
enable = true;
openDefaultPorts = true;
guiAddress = "0.0.0.0:8384";
services.syncthing = {
enable = true;
openDefaultPorts = true;
guiAddress = "0.0.0.0:8384";
};
};
};
inherit autoStart;

View file

@ -7,405 +7,417 @@
httpsPort,
forgejoSshPort,
autoStart ? false,
}: let
}:
let
domain = "www.stefanjunker.de";
in {
in
{
inherit specialArgs;
config = {
config,
pkgs,
lib,
repoFlake,
nodeFlake,
system,
...
}: {
system.stateVersion = "22.05"; # Did you read the comment?
config =
{
config,
pkgs,
lib,
repoFlake,
nodeFlake,
system,
...
}:
{
system.stateVersion = "22.05"; # Did you read the comment?
disabledModules = [
"services/misc/forgejo.nix"
"services/security/kanidm.nix"
];
imports = [
"${nodeFlake.inputs.nixpkgs-unstable}/nixos/modules/services/misc/forgejo.nix"
"${repoFlake.inputs.nixpkgs-kanidm}/nixos/modules/services/security/kanidm.nix"
../profiles/containers/configuration.nix
repoFlake.inputs.sops-nix.nixosModules.sops
];
sops.defaultSopsFile = ./webserver_secrets.yaml;
networking.firewall.allowedTCPPorts = [
httpPort
httpsPort
forgejoSshPort
];
sops.age.sshKeyPaths = ["/etc/ssh/ssh_host_ed25519_key"];
sops.secrets.hedgedoc_environment_file = {
sopsFile = ./webserver_secrets.yaml;
owner = config.users.users.hedgedoc.name;
};
services.caddy = {
enable = true;
logFormat = ''
level ERROR
'';
virtualHosts."${domain}" = {
extraConfig = ''
redir /hedgedoc* https://hedgedoc.${domain}
file_server /*/* {
browse
root /var/www/stefanjunker.de/htdocs/caddy
pass_thru
}
# respond "Hi"
# respond (not /*/*) "Hi"
'';
};
virtualHosts."hedgedoc.${domain}" = {
extraConfig = ''
reverse_proxy http://[::1]:3000
'';
};
virtualHosts."authelia.${domain}" = {
extraConfig = ''
reverse_proxy http://127.0.0.1:${builtins.toString config.services.authelia.instances.default.settings.server.port}
'';
};
virtualHosts."lldap.${domain}" = {
extraConfig = ''
reverse_proxy http://127.0.0.1:${builtins.toString config.services.lldap.settings.http_port}
'';
};
virtualHosts."forgejo.${domain}" = {
extraConfig = ''
reverse_proxy http://127.0.0.1:${builtins.toString config.services.forgejo.settings.server.HTTP_PORT}
'';
};
virtualHosts."kanidm.${domain}" = {
extraConfig = ''
reverse_proxy https://${builtins.toString config.services.kanidm.serverSettings.bindaddress} {
transport http {
tls_server_name ${config.services.kanidm.serverSettings.domain}
}
}
'';
};
};
services.hedgedoc = {
enable = true;
settings = {
domain = "hedgedoc.${domain}";
urlPath = "";
protocolUseSSL = true;
db = {
dialect = "sqlite";
storage = "/var/lib/hedgedoc/db.hedgedoc.sqlite";
};
allowAnonymous = false;
allowAnonymousEdits = false;
allowGravatar = false;
allowFreeURL = false;
defaultPermission = "private";
allowEmailRegister = false;
email = false;
ldap = {
url = "ldap://127.0.0.1:${builtins.toString config.services.lldap.settings.ldap_port}";
bindDn = "uid=admin,ou=people,dc=stefanjunker,dc=de";
# these are set via the `environmentFile`
# bindCredentials = "$LDAP_ADMIN_PASSWORD";
searchBase = "ou=people,dc=stefanjunker,dc=de";
searchFilter = "(&(memberOf=cn=hedgedoc,ou=groups,dc=stefanjunker,dc=de)(uid={{username}}))";
useridField = "uid";
};
oauth2 = let
originURL = config.services.kanidm.serverSettings.origin;
in {
providerName = "kanidm (${originURL})";
authorizationURL = "${originURL}/ui/oauth2";
tokenURL = "${originURL}/oauth2/token";
userProfileURL = "${originURL}/oauth2/openid/hedgedoc/userinfo";
scope = "openid email profile";
# rolesClaim = "roles";
# accessRole = "role/hedgedoc";
userProfileUsernameAttr = "name";
userProfileDisplayNameAttr = "displayname";
userProfileEmailAttr = "email";
clientID = "hedgedoc";
# set via the `environmentFile`
# clientSecret = "$CMD_OAUTH2_CLIENT_SECRET";
};
uploadsPath = "/var/lib/hedgedoc/uploads";
};
environmentFile = config.sops.secrets.hedgedoc_environment_file.path;
};
services.jitsi-meet = {
enable = false;
hostName = "meet.${domain}";
config = {
prejoinPageEnabled = true;
};
caddy.enable = true;
nginx.enable = false;
};
sops.secrets.authelia_storageEncryptionKey = {
sopsFile = ./webserver_secrets.yaml;
owner = config.users.users.authelia-default.name;
};
sops.secrets.authelia_jwtSecret = {
sopsFile = ./webserver_secrets.yaml;
owner = config.users.users.authelia-default.name;
};
services.authelia.instances.default = let
baseDir = "/var/lib/authelia-default";
in {
enable = true;
secrets.storageEncryptionKeyFile = config.sops.secrets.authelia_storageEncryptionKey.path;
secrets.jwtSecretFile = config.sops.secrets.authelia_jwtSecret.path;
settings = {
theme = "auto";
default_2fa_method = "totp";
log.level = "debug";
server = {
disable_healthcheck = true;
host = "127.0.0.1";
port = 9091;
# path = "authelia";
};
storage = {
local.path = "${baseDir}/authelia.sqlite";
};
authentication_backend = {
file.path = "${baseDir}/first_factor.yaml";
file.search.email = true;
file.search.case_insensitive = false;
};
access_control = {
default_policy = "one_factor";
};
session.domain = "stefanjunker.de";
notifier = {
disable_startup_check = true;
filesystem.filename = "${baseDir}/notification.txt";
};
};
};
users.groups.lldap = {};
users.users.lldap = {
isSystemUser = true;
group = "lldap";
};
sops.secrets.lldap_jwtSecret = {
sopsFile = ./webserver_secrets.yaml;
owner = config.users.users.lldap.name;
};
sops.secrets.lldap_adminPassword = {
sopsFile = ./webserver_secrets.yaml;
owner = config.users.users.lldap.name;
};
sops.secrets.lldap_environmentFile = {
sopsFile = ./webserver_secrets.yaml;
owner = config.users.users.lldap.name;
};
services.lldap = {
enable = true;
environment = {
LLDAP_JWT_SECRET_FILE = config.sops.secrets.lldap_jwtSecret.path;
LLDAP_LDAP_USER_PASS_FILE = config.sops.secrets.lldap_adminPassword.path;
};
environmentFile = config.sops.secrets.lldap_environmentFile.path;
settings = {
verbose = true;
ldap_base_dn = "dc=stefanjunker,dc=de";
http_url = "https://lldap.${domain}";
## Options to configure SMTP parameters, to send password reset emails.
## To set these options from environment variables, use the following format
## (example with "password"): LLDAP_SMTP_OPTIONS__PASSWORD
smtp_options = {
## Whether to enabled password reset via email, from LLDAP.
enable_password_reset = true;
# port = 465;
## How the connection is encrypted, either "NONE" (no encryption), "TLS" or "STARTTLS".
# smtp_encryption = "TLS";
};
# database_url = "sqlite:///var/lib/lldap/users.db?mode=rwc";
};
};
sops.secrets.FORGEJO_JWT_SECRET = {};
sops.secrets.FORGEJO_INTERNAL_TOKEN = {};
sops.secrets.FORGEJO_SECRET_KEY = {};
services.forgejo = {
enable = true;
package = nodeFlake.inputs.nixpkgs-unstable.legacyPackages.${pkgs.system}.forgejo;
settings = {
service.DISABLE_REGISTRATION = true;
server.HTTP_ADDR = "127.0.0.1";
server.START_SSH_SERVER = true;
server.SSH_PORT = forgejoSshPort;
server.ROOT_URL = "https://forgejo.${domain}";
server.HTTP_PORT = 3001;
# TODO: how do i get a 3072 length SSH key with the yubikey?
"ssh.minimum_key_sizes".RSA = 2048;
};
secrets = {
oauth2.JWT_SECRET = lib.mkForce config.sops.secrets.FORGEJO_JWT_SECRET.path;
security.INTERNAL_TOKEN = lib.mkForce config.sops.secrets.FORGEJO_INTERNAL_TOKEN.path;
security.SECRET_KEY = lib.mkForce config.sops.secrets.FORGEJO_SECRET_KEY.path;
};
};
systemd.services.lldap.serviceConfig.User = config.users.users.lldap.name;
systemd.services.lldap.serviceConfig.Group = config.users.groups.lldap.name;
systemd.services.lldap.serviceConfig.DynamicUser = lib.mkForce false;
# combine a path watcher with a service that transfers the certs by caddy to kanidm
systemd.paths.kanidm-tls-watch = {
enable = true;
requiredBy = ["kanidm.service"];
pathConfig = {
PathChanged = [
"${config.services.caddy.dataDir}/.local/share/caddy/certificates/acme-v02.api.letsencrypt.org-directory/${config.services.kanidm.serverSettings.domain}/${config.services.kanidm.serverSettings.domain}.key"
"${config.services.caddy.dataDir}/.local/share/caddy/certificates/acme-v02.api.letsencrypt.org-directory/${config.services.kanidm.serverSettings.domain}/${config.services.kanidm.serverSettings.domain}.crt"
];
Unit = "kanidm-tls-update.service";
};
};
systemd.services.kanidm-tls-update = let
dbDir =
builtins.dirOf
config.services.kanidm.serverSettings.db_path;
in {
enable = true;
requiredBy = ["kanidm.service"];
unitConfig = {
# ConditionPathExists = [
# "${config.services.caddy.dataDir}/.local/share/caddy/certificates/acme-v02.api.letsencrypt.org-directory/${config.services.kanidm.serverSettings.domain}/${config.services.kanidm.serverSettings.domain}.key"
# "${config.services.caddy.dataDir}/.local/share/caddy/certificates/acme-v02.api.letsencrypt.org-directory/${config.services.kanidm.serverSettings.domain}/${config.services.kanidm.serverSettings.domain}.crt"
# ];
};
serviceConfig.Type = "oneshot";
script = let
tlsDir = builtins.dirOf config.services.kanidm.serverSettings.tls_key;
in ''
set -xe
cat "${config.services.caddy.dataDir}/.local/share/caddy/certificates/acme-v02.api.letsencrypt.org-directory/${config.services.kanidm.serverSettings.domain}/${config.services.kanidm.serverSettings.domain}.key" > tls.key
cat "${config.services.caddy.dataDir}/.local/share/caddy/certificates/acme-v02.api.letsencrypt.org-directory/${config.services.kanidm.serverSettings.domain}/${config.services.kanidm.serverSettings.domain}.crt" > tls.chain
chown ${config.systemd.services.kanidm.serviceConfig.User}:${config.systemd.services.kanidm.serviceConfig.Group} tls.{key,chain}
chmod 400 tls.{key,chain}
# create the kanidm directory in case it's missing
if [[ ! -d ${tlsDir} ]]; then
mkdir -p ${tlsDir}
chown -R ${config.systemd.services.kanidm.serviceConfig.User}:${config.systemd.services.kanidm.serviceConfig.Group} ${tlsDir}
chmod 700 ${tlsDir}
fi
mv tls.key ${config.services.kanidm.serverSettings.tls_key}
mv tls.chain ${config.services.kanidm.serverSettings.tls_chain}
if [[ ! -d ${dbDir} ]]; then
mkdir -p ${dbDir}
chown -R ${config.systemd.services.kanidm.serviceConfig.User}:${config.systemd.services.kanidm.serviceConfig.Group} ${dbDir}
chmod 700 ${dbDir}
fi
'';
};
systemd.services.kanidm.serviceConfig = let
dbDir =
builtins.dirOf
config.services.kanidm.serverSettings.db_path;
# stateDir = "/var/lib/${config.systemd.services.kanidm.serviceConfig.StateDirectory}";
in {
# ExecStartPre = ''
# mkdir -p ${dbDir}
# '';
BindPaths = [
dbDir
# stateDir
disabledModules = [
"services/misc/forgejo.nix"
"services/security/kanidm.nix"
];
};
services.kanidm = let
dataDir = "/var/lib/kanidm";
in {
package = repoFlake.inputs.nixpkgs-kanidm.legacyPackages.${pkgs.system}.kanidm;
imports = [
"${nodeFlake.inputs.nixpkgs-unstable}/nixos/modules/services/misc/forgejo.nix"
"${repoFlake.inputs.nixpkgs-kanidm}/nixos/modules/services/security/kanidm.nix"
enablePam = false;
enableClient = false;
../profiles/containers/configuration.nix
enableServer = true;
serverSettings = {
role = "WriteReplica";
log_level = "debug";
repoFlake.inputs.sops-nix.nixosModules.sops
];
domain = "kanidm.${domain}";
origin = "https://kanidm.${domain}";
sops.defaultSopsFile = ./webserver_secrets.yaml;
db_path = "${dataDir}/db/kanidm.db";
networking.firewall.allowedTCPPorts = [
httpPort
httpsPort
forgejoSshPort
];
bindaddress = "127.0.0.1:8444";
sops.age.sshKeyPaths = [ "/etc/ssh/ssh_host_ed25519_key" ];
sops.secrets.hedgedoc_environment_file = {
sopsFile = ./webserver_secrets.yaml;
owner = config.users.users.hedgedoc.name;
};
# don't expose ldap
# ldapbindaddress = "[::1]:6636";
services.caddy = {
enable = true;
logFormat = ''
level ERROR
'';
virtualHosts."${domain}" = {
extraConfig = ''
redir /hedgedoc* https://hedgedoc.${domain}
tls_key = "${dataDir}/tls/tls.key";
tls_chain = "${dataDir}/tls/tls.chain";
file_server /*/* {
browse
root /var/www/stefanjunker.de/htdocs/caddy
pass_thru
}
online_backup = {
schedule = "00 06 * * *";
# respond "Hi"
# respond (not /*/*) "Hi"
'';
};
virtualHosts."hedgedoc.${domain}" = {
extraConfig = ''
reverse_proxy http://[::1]:3000
'';
};
virtualHosts."authelia.${domain}" = {
extraConfig = ''
reverse_proxy http://127.0.0.1:${builtins.toString config.services.authelia.instances.default.settings.server.port}
'';
};
virtualHosts."lldap.${domain}" = {
extraConfig = ''
reverse_proxy http://127.0.0.1:${builtins.toString config.services.lldap.settings.http_port}
'';
};
virtualHosts."forgejo.${domain}" = {
extraConfig = ''
reverse_proxy http://127.0.0.1:${builtins.toString config.services.forgejo.settings.server.HTTP_PORT}
'';
};
virtualHosts."kanidm.${domain}" = {
extraConfig = ''
reverse_proxy https://${builtins.toString config.services.kanidm.serverSettings.bindaddress} {
transport http {
tls_server_name ${config.services.kanidm.serverSettings.domain}
}
}
'';
};
};
services.hedgedoc = {
enable = true;
settings = {
domain = "hedgedoc.${domain}";
urlPath = "";
protocolUseSSL = true;
db = {
dialect = "sqlite";
storage = "/var/lib/hedgedoc/db.hedgedoc.sqlite";
};
allowAnonymous = false;
allowAnonymousEdits = false;
allowGravatar = false;
allowFreeURL = false;
defaultPermission = "private";
allowEmailRegister = false;
email = false;
ldap = {
url = "ldap://127.0.0.1:${builtins.toString config.services.lldap.settings.ldap_port}";
bindDn = "uid=admin,ou=people,dc=stefanjunker,dc=de";
# these are set via the `environmentFile`
# bindCredentials = "$LDAP_ADMIN_PASSWORD";
searchBase = "ou=people,dc=stefanjunker,dc=de";
searchFilter = "(&(memberOf=cn=hedgedoc,ou=groups,dc=stefanjunker,dc=de)(uid={{username}}))";
useridField = "uid";
};
oauth2 =
let
originURL = config.services.kanidm.serverSettings.origin;
in
{
providerName = "kanidm (${originURL})";
authorizationURL = "${originURL}/ui/oauth2";
tokenURL = "${originURL}/oauth2/token";
userProfileURL = "${originURL}/oauth2/openid/hedgedoc/userinfo";
scope = "openid email profile";
# rolesClaim = "roles";
# accessRole = "role/hedgedoc";
userProfileUsernameAttr = "name";
userProfileDisplayNameAttr = "displayname";
userProfileEmailAttr = "email";
clientID = "hedgedoc";
# set via the `environmentFile`
# clientSecret = "$CMD_OAUTH2_CLIENT_SECRET";
};
uploadsPath = "/var/lib/hedgedoc/uploads";
};
environmentFile = config.sops.secrets.hedgedoc_environment_file.path;
};
services.jitsi-meet = {
enable = false;
hostName = "meet.${domain}";
config = {
prejoinPageEnabled = true;
};
caddy.enable = true;
nginx.enable = false;
};
sops.secrets.authelia_storageEncryptionKey = {
sopsFile = ./webserver_secrets.yaml;
owner = config.users.users.authelia-default.name;
};
sops.secrets.authelia_jwtSecret = {
sopsFile = ./webserver_secrets.yaml;
owner = config.users.users.authelia-default.name;
};
services.authelia.instances.default =
let
baseDir = "/var/lib/authelia-default";
in
{
enable = true;
secrets.storageEncryptionKeyFile = config.sops.secrets.authelia_storageEncryptionKey.path;
secrets.jwtSecretFile = config.sops.secrets.authelia_jwtSecret.path;
settings = {
theme = "auto";
default_2fa_method = "totp";
log.level = "debug";
server = {
disable_healthcheck = true;
host = "127.0.0.1";
port = 9091;
# path = "authelia";
};
storage = {
local.path = "${baseDir}/authelia.sqlite";
};
authentication_backend = {
file.path = "${baseDir}/first_factor.yaml";
file.search.email = true;
file.search.case_insensitive = false;
};
access_control = {
default_policy = "one_factor";
};
session.domain = "stefanjunker.de";
notifier = {
disable_startup_check = true;
filesystem.filename = "${baseDir}/notification.txt";
};
};
};
users.groups.lldap = { };
users.users.lldap = {
isSystemUser = true;
group = "lldap";
};
sops.secrets.lldap_jwtSecret = {
sopsFile = ./webserver_secrets.yaml;
owner = config.users.users.lldap.name;
};
sops.secrets.lldap_adminPassword = {
sopsFile = ./webserver_secrets.yaml;
owner = config.users.users.lldap.name;
};
sops.secrets.lldap_environmentFile = {
sopsFile = ./webserver_secrets.yaml;
owner = config.users.users.lldap.name;
};
services.lldap = {
enable = true;
environment = {
LLDAP_JWT_SECRET_FILE = config.sops.secrets.lldap_jwtSecret.path;
LLDAP_LDAP_USER_PASS_FILE = config.sops.secrets.lldap_adminPassword.path;
};
environmentFile = config.sops.secrets.lldap_environmentFile.path;
settings = {
verbose = true;
ldap_base_dn = "dc=stefanjunker,dc=de";
http_url = "https://lldap.${domain}";
## Options to configure SMTP parameters, to send password reset emails.
## To set these options from environment variables, use the following format
## (example with "password"): LLDAP_SMTP_OPTIONS__PASSWORD
smtp_options = {
## Whether to enabled password reset via email, from LLDAP.
enable_password_reset = true;
# port = 465;
## How the connection is encrypted, either "NONE" (no encryption), "TLS" or "STARTTLS".
# smtp_encryption = "TLS";
};
# database_url = "sqlite:///var/lib/lldap/users.db?mode=rwc";
};
};
sops.secrets.FORGEJO_JWT_SECRET = { };
sops.secrets.FORGEJO_INTERNAL_TOKEN = { };
sops.secrets.FORGEJO_SECRET_KEY = { };
services.forgejo = {
enable = true;
package = nodeFlake.inputs.nixpkgs-unstable.legacyPackages.${pkgs.system}.forgejo;
settings = {
service.DISABLE_REGISTRATION = true;
server.HTTP_ADDR = "127.0.0.1";
server.START_SSH_SERVER = true;
server.SSH_PORT = forgejoSshPort;
server.ROOT_URL = "https://forgejo.${domain}";
server.HTTP_PORT = 3001;
# TODO: how do i get a 3072 length SSH key with the yubikey?
"ssh.minimum_key_sizes".RSA = 2048;
};
secrets = {
oauth2.JWT_SECRET = lib.mkForce config.sops.secrets.FORGEJO_JWT_SECRET.path;
security.INTERNAL_TOKEN = lib.mkForce config.sops.secrets.FORGEJO_INTERNAL_TOKEN.path;
security.SECRET_KEY = lib.mkForce config.sops.secrets.FORGEJO_SECRET_KEY.path;
};
};
systemd.services.lldap.serviceConfig.User = config.users.users.lldap.name;
systemd.services.lldap.serviceConfig.Group = config.users.groups.lldap.name;
systemd.services.lldap.serviceConfig.DynamicUser = lib.mkForce false;
# combine a path watcher with a service that transfers the certs by caddy to kanidm
systemd.paths.kanidm-tls-watch = {
enable = true;
requiredBy = [ "kanidm.service" ];
pathConfig = {
PathChanged = [
"${config.services.caddy.dataDir}/.local/share/caddy/certificates/acme-v02.api.letsencrypt.org-directory/${config.services.kanidm.serverSettings.domain}/${config.services.kanidm.serverSettings.domain}.key"
"${config.services.caddy.dataDir}/.local/share/caddy/certificates/acme-v02.api.letsencrypt.org-directory/${config.services.kanidm.serverSettings.domain}/${config.services.kanidm.serverSettings.domain}.crt"
];
Unit = "kanidm-tls-update.service";
};
};
systemd.services.kanidm-tls-update =
let
dbDir = builtins.dirOf config.services.kanidm.serverSettings.db_path;
in
{
enable = true;
requiredBy = [ "kanidm.service" ];
unitConfig = {
# ConditionPathExists = [
# "${config.services.caddy.dataDir}/.local/share/caddy/certificates/acme-v02.api.letsencrypt.org-directory/${config.services.kanidm.serverSettings.domain}/${config.services.kanidm.serverSettings.domain}.key"
# "${config.services.caddy.dataDir}/.local/share/caddy/certificates/acme-v02.api.letsencrypt.org-directory/${config.services.kanidm.serverSettings.domain}/${config.services.kanidm.serverSettings.domain}.crt"
# ];
};
serviceConfig.Type = "oneshot";
script =
let
tlsDir = builtins.dirOf config.services.kanidm.serverSettings.tls_key;
in
''
set -xe
cat "${config.services.caddy.dataDir}/.local/share/caddy/certificates/acme-v02.api.letsencrypt.org-directory/${config.services.kanidm.serverSettings.domain}/${config.services.kanidm.serverSettings.domain}.key" > tls.key
cat "${config.services.caddy.dataDir}/.local/share/caddy/certificates/acme-v02.api.letsencrypt.org-directory/${config.services.kanidm.serverSettings.domain}/${config.services.kanidm.serverSettings.domain}.crt" > tls.chain
chown ${config.systemd.services.kanidm.serviceConfig.User}:${config.systemd.services.kanidm.serviceConfig.Group} tls.{key,chain}
chmod 400 tls.{key,chain}
# create the kanidm directory in case it's missing
if [[ ! -d ${tlsDir} ]]; then
mkdir -p ${tlsDir}
chown -R ${config.systemd.services.kanidm.serviceConfig.User}:${config.systemd.services.kanidm.serviceConfig.Group} ${tlsDir}
chmod 700 ${tlsDir}
fi
mv tls.key ${config.services.kanidm.serverSettings.tls_key}
mv tls.chain ${config.services.kanidm.serverSettings.tls_chain}
if [[ ! -d ${dbDir} ]]; then
mkdir -p ${dbDir}
chown -R ${config.systemd.services.kanidm.serviceConfig.User}:${config.systemd.services.kanidm.serviceConfig.Group} ${dbDir}
chmod 700 ${dbDir}
fi
'';
};
systemd.services.kanidm.serviceConfig =
let
dbDir = builtins.dirOf config.services.kanidm.serverSettings.db_path;
in
# stateDir = "/var/lib/${config.systemd.services.kanidm.serviceConfig.StateDirectory}";
{
# ExecStartPre = ''
# mkdir -p ${dbDir}
# '';
BindPaths = [
dbDir
# stateDir
];
};
services.kanidm =
let
dataDir = "/var/lib/kanidm";
in
{
package = repoFlake.inputs.nixpkgs-kanidm.legacyPackages.${pkgs.system}.kanidm;
enablePam = false;
enableClient = false;
enableServer = true;
serverSettings = {
role = "WriteReplica";
log_level = "debug";
domain = "kanidm.${domain}";
origin = "https://kanidm.${domain}";
db_path = "${dataDir}/db/kanidm.db";
bindaddress = "127.0.0.1:8444";
# don't expose ldap
# ldapbindaddress = "[::1]:6636";
tls_key = "${dataDir}/tls/tls.key";
tls_chain = "${dataDir}/tls/tls.chain";
online_backup = {
schedule = "00 06 * * *";
};
};
};
};
};
inherit autoStart;

View file

@ -9,37 +9,37 @@ FORGEJO_JWT_SECRET: ENC[AES256_GCM,data:nVz9x7+K+rBIZxuQP7o0WNFHUz89eR9cwBjfSAx9
FORGEJO_INTERNAL_TOKEN: ENC[AES256_GCM,data:EIono9HSyvp1nQM0ij3ln3IUXO4moFbRgVddeV0BZBXmZG05jdjZ1SIXo/BxoSmRKnjllR7P00CpajNM5zORldlsBId5oAYL5GZtY3/nmxeXucJidknuow22G7Z8wRJJGBdishbgQhmc,iv:1D93gTUF1+DUR8qLJgML+oUhvSslhxEjGnbBC/PWHXw=,tag:NZB+mwba4TzLcUANZLDRTw==,type:str]
FORGEJO_SECRET_KEY: ENC[AES256_GCM,data:CewYFZtcXKUD5/oSM0Q32rhw+urdA0eQhdYp8EFHUXxEtL6f5NWK6IOwIlMuEv1/FjtTWlqxWekOZpmxBRzwnw==,iv:qLyVB7Nc+rDbBoO5g82/vPdykwOATHCSDLhvS+fK9PM=,tag:4NMhUvKmrRd6qrcQq3R8wA==,type:str]
sops:
kms: []
gcp_kms: []
azure_kv: []
hc_vault: []
age:
- recipient: age18dmqd7r7fanrfmdxsvwgv9psvhs3hw5ydpvw0na0dv3dlrg4rs3q2wpvgv
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA0SkxFSkJQb29UeVRnZnFh
U3BJeGZ1NUVzMk0ycXBvWExyZDcveXZrd2pJCmpCeS80VE5McWVHQnZpaHFERmNh
YWZIMHRtQkd5Vm54MWR3bkhUUDRvejQKLS0tIG9NSkpCSkEyZFRyOXorWE1KLytP
eWdXVHg5MTlQQU9GeElPeUZXUlBlaTgKceDu3tLbQM/DxY0tJYJTPy2Dl/SBYaoc
KfMZOkc322/NvgWu/3Ke0hV1/eMk8EICwXbSwHhXr5a0+cwPZ9xV4A==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2024-10-16T12:28:51Z"
mac: ENC[AES256_GCM,data:nrd2czzJlBcFfwn6lzh4qqco+/XsU2J6BqvQqMtskh3mL4Xx25IAzxiCno0KlNGr6o4YsuZP5anOX9RvrDq76Us3JQ7pDi3iQGPhmg+SE9u3Rwqn1/3YConvdfPV2DNB+tuyG3UVoRqpA4d+HdcYjN9n1UKk54R6UdSm9UrA+zc=,iv:Juupyet09zUAMu7bmVxq+/Q0bXJAzR0wAyt6vKNns3w=,tag:owdUWuXrQcDdiWi+1geY9A==,type:str]
pgp:
- created_at: "2023-07-09T17:51:27Z"
enc: |-
-----BEGIN PGP MESSAGE-----
kms: []
gcp_kms: []
azure_kv: []
hc_vault: []
age:
- recipient: age18dmqd7r7fanrfmdxsvwgv9psvhs3hw5ydpvw0na0dv3dlrg4rs3q2wpvgv
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA0SkxFSkJQb29UeVRnZnFh
U3BJeGZ1NUVzMk0ycXBvWExyZDcveXZrd2pJCmpCeS80VE5McWVHQnZpaHFERmNh
YWZIMHRtQkd5Vm54MWR3bkhUUDRvejQKLS0tIG9NSkpCSkEyZFRyOXorWE1KLytP
eWdXVHg5MTlQQU9GeElPeUZXUlBlaTgKceDu3tLbQM/DxY0tJYJTPy2Dl/SBYaoc
KfMZOkc322/NvgWu/3Ke0hV1/eMk8EICwXbSwHhXr5a0+cwPZ9xV4A==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2024-10-16T12:28:51Z"
mac: ENC[AES256_GCM,data:nrd2czzJlBcFfwn6lzh4qqco+/XsU2J6BqvQqMtskh3mL4Xx25IAzxiCno0KlNGr6o4YsuZP5anOX9RvrDq76Us3JQ7pDi3iQGPhmg+SE9u3Rwqn1/3YConvdfPV2DNB+tuyG3UVoRqpA4d+HdcYjN9n1UKk54R6UdSm9UrA+zc=,iv:Juupyet09zUAMu7bmVxq+/Q0bXJAzR0wAyt6vKNns3w=,tag:owdUWuXrQcDdiWi+1geY9A==,type:str]
pgp:
- created_at: "2023-07-09T17:51:27Z"
enc: |-
-----BEGIN PGP MESSAGE-----
wcBMA0SHG/zF3227AQgAs92CvegZAcuyNllIp9zHUp7jFqfXhuoAOKKmOZvN4TBD
gQM7jKAXXwbMy90gGWF9EkdMzeBqG4S9ZM8gPAYcZkt98F0PGu6wBSvvYnUdDOcO
8tvhEhBSE19xCIR7BeG9bhooEJ1V3LSZzrwyikeHUHAqDQLrwM7jrPOef22PIzH+
XPtwWMVwVzwRJTZ/uV11vIV60b0zfnB8ZJzv7RbXsob8octy4LRe6Vb0BUd5ON3w
YULnyMlFFGekiqAPBk0K5Xib35qBu6mtnmxWzVUqT4pgiShoZsRQs3At1Onm7Cku
bqIDMoCYTvSzwDCOYp2+ni/ZOIuDvBiRCPoNuLjkD9JRAcCbjuxA6w0eaJKFTzoI
F1olIecBtQOQQn+iXya/rx69wDtR9965gecWRMbRg6tYncumpdQB//MWALhVmr6i
g+ZF+9NNqOTKsBzEnuGsZRnI
=iXfo
-----END PGP MESSAGE-----
fp: 6F7069FE6B96E894E60EC45C6EEFA706CB17E89B
unencrypted_suffix: _unencrypted
version: 3.8.1
wcBMA0SHG/zF3227AQgAs92CvegZAcuyNllIp9zHUp7jFqfXhuoAOKKmOZvN4TBD
gQM7jKAXXwbMy90gGWF9EkdMzeBqG4S9ZM8gPAYcZkt98F0PGu6wBSvvYnUdDOcO
8tvhEhBSE19xCIR7BeG9bhooEJ1V3LSZzrwyikeHUHAqDQLrwM7jrPOef22PIzH+
XPtwWMVwVzwRJTZ/uV11vIV60b0zfnB8ZJzv7RbXsob8octy4LRe6Vb0BUd5ON3w
YULnyMlFFGekiqAPBk0K5Xib35qBu6mtnmxWzVUqT4pgiShoZsRQs3At1Onm7Cku
bqIDMoCYTvSzwDCOYp2+ni/ZOIuDvBiRCPoNuLjkD9JRAcCbjuxA6w0eaJKFTzoI
F1olIecBtQOQQn+iXya/rx69wDtR9965gecWRMbRg6tYncumpdQB//MWALhVmr6i
g+ZF+9NNqOTKsBzEnuGsZRnI
=iXfo
-----END PGP MESSAGE-----
fp: 6F7069FE6B96E894E60EC45C6EEFA706CB17E89B
unencrypted_suffix: _unencrypted
version: 3.8.1

View file

@ -1,20 +1,25 @@
{
dir,
pkgs ? import <channels-nixos-stable> {},
ownLib ? import ../lib/default.nix {inherit (pkgs) lib;},
pkgs ? import <channels-nixos-stable> { },
ownLib ? import ../lib/default.nix { inherit (pkgs) lib; },
gitRoot ? "$(git rev-parse --show-toplevel)",
# FIXME: why do these need explicit mentioning?
moreargs ? "",
rebuildarg ? "",
...
} @ args: let
rebuildargsSudo = ["switch" "boot"];
rebuild = {
gitRoot,
rebuildarg ? "dry-activate",
moreargs ? "",
...
}:
}@args:
let
rebuildargsSudo = [
"switch"
"boot"
];
rebuild =
{
gitRoot,
rebuildarg ? "dry-activate",
moreargs ? "",
...
}:
pkgs.writeScript "script" ''
#!/usr/bin/env bash
set -xe
@ -30,25 +35,24 @@
${
if
(builtins.elem rebuildarg rebuildargsSudo)
&& (builtins.match ".*--target-host.*" moreargs) == null
then "sudo -E \\"
else ""
(builtins.elem rebuildarg rebuildargsSudo) && (builtins.match ".*--target-host.*" moreargs) == null
then
"sudo -E \\"
else
""
}
nixos-rebuild --show-trace -I nixos-config=''${NIXOS_CONFIG} ${rebuildarg} ${moreargs}
'';
in {
recipes =
{
rebuild =
rebuild {
inherit gitRoot;
inherit moreargs;
inherit rebuildarg;
}
# // pkgs.lib.attrsets.optionalAttrs (moreargs != "") { inherit moreargs; }
# // pkgs.lib.attrsets.optionalAttrs (rebuildarg != "") { inherit rebuildarg; }
;
in
{
recipes = {
rebuild = rebuild {
inherit gitRoot;
inherit moreargs;
inherit rebuildarg;
}
// (import ./disk.nix (args // {inherit pkgs ownLib gitRoot;}));
# // pkgs.lib.attrsets.optionalAttrs (moreargs != "") { inherit moreargs; }
# // pkgs.lib.attrsets.optionalAttrs (rebuildarg != "") { inherit rebuildarg; }
;
} // (import ./disk.nix (args // { inherit pkgs ownLib gitRoot; }));
}

View file

@ -3,40 +3,29 @@
ownLib,
dir,
gitRoot,
diskId ?
(import ((builtins.getEnv "PWD") + "/${dir}/hw.nix")
{})
.hardware
.opinionatedDisk
.diskId,
diskId ? (import ((builtins.getEnv "PWD") + "/${dir}/hw.nix") { }).hardware.opinionatedDisk.diskId,
encrypted ?
(import ((builtins.getEnv "PWD") + "/${dir}/hw.nix")
{})
.hardware
.opinionatedDisk
.encrypted,
(import ((builtins.getEnv "PWD") + "/${dir}/hw.nix") { }).hardware.opinionatedDisk.encrypted,
previousDiskId ? "",
...
}: let
}:
let
mntRootVol = "/mnt/${diskId}-root";
in rec {
in
rec {
diskMount = pkgs.writeScript "script" ''
#!/usr/bin/env bash
set -xe
echo Mounting ${diskId}
${pkgs.lib.strings.optionalString encrypted ''
sudo cryptsetup luksOpen ${ownLib.disk.bootLuksDevice diskId} ${
ownLib.disk.luksName diskId
}
sudo cryptsetup luksOpen ${ownLib.disk.bootLuksDevice diskId} ${ownLib.disk.luksName diskId}
''}
sleep 1
sudo vgchange -ay ${ownLib.disk.volumeGroup diskId}
sudo mkdir -p /mnt
sudo mkdir ${mntRootVol}
sudo mount ${ownLib.disk.rootFsDevice diskId} ${mntRootVol}
sudo mount ${
ownLib.disk.rootFsDevice diskId
} ${mntRootVol}/nixos/home -o subvol=home
sudo mount ${ownLib.disk.rootFsDevice diskId} ${mntRootVol}/nixos/home -o subvol=home
sudo mount ${ownLib.disk.bootFsDevice diskId} ${mntRootVol}/nixos/boot
'';
@ -73,9 +62,7 @@ in rec {
#!/usr/bin/env bash
set -xe
read -p "Continue to format ${
ownLib.disk.bootGrubDevice diskId
} (YES/n)? " choice
read -p "Continue to format ${ownLib.disk.bootGrubDevice diskId} (YES/n)? " choice
case "$choice" in
YES ) echo "Continuing in 3 seconds..."; sleep 3;;
n|N ) echo "Exiting..."; exit 0;;
@ -122,15 +109,11 @@ in rec {
${pkgs.lib.strings.optionalString encrypted ''
# Encrypt
sudo cryptsetup luksFormat ${ownLib.disk.bootLuksDevice diskId} -
sudo cryptsetup luksOpen ${ownLib.disk.bootLuksDevice diskId} ${
ownLib.disk.luksName diskId
}
sudo cryptsetup luksOpen ${ownLib.disk.bootLuksDevice diskId} ${ownLib.disk.luksName diskId}
''}
# LVM
sudo vgcreate ${ownLib.disk.volumeGroup diskId} ${
ownLib.disk.lvmPv diskId encrypted
}
sudo vgcreate ${ownLib.disk.volumeGroup diskId} ${ownLib.disk.lvmPv diskId encrypted}
sudo lvcreate ${ownLib.disk.volumeGroup diskId} -L 2G -n swap
sudo lvcreate ${ownLib.disk.volumeGroup diskId} -l 100%FREE -n root
@ -154,9 +137,7 @@ in rec {
#!/usr/bin/env bash
set -xe
read -p "Continue to relabel ${
ownLib.disk.bootGrubDevice diskId
} (YES/n)?" choice
read -p "Continue to relabel ${ownLib.disk.bootGrubDevice diskId} (YES/n)?" choice
case "$choice" in
YES ) echo "Continuing in 3 seconds..."; sleep 3;;
n|N ) echo "Exiting..."; exit 0;;
@ -187,13 +168,9 @@ in rec {
if test "${previousDiskId}"; then
${
pkgs.lib.strings.optionalString encrypted ''
sudo cryptsetup luksOpen ${ownLib.disk.bootLuksDevice diskId} ${
ownLib.disk.luksName diskId
}
''
}
${pkgs.lib.strings.optionalString encrypted ''
sudo cryptsetup luksOpen ${ownLib.disk.bootLuksDevice diskId} ${ownLib.disk.luksName diskId}
''}
sync
sleep 1
if sudo vgs ${previousDiskId}; then

View file

@ -1,4 +1,5 @@
{lib, ...}: {
{ lib, ... }:
{
boot.loader.grub.efiSupport = lib.mkForce false;
boot.loader.efi.canTouchEfiVariables = lib.mkForce false;
}

View file

@ -1,4 +1,5 @@
{...}: {
{ ... }:
{
imports = [
../../profiles/common/configuration.nix
../../profiles/graphical/configuration.nix

View file

@ -3,17 +3,17 @@
repoFlake,
nodeFlake,
...
}: let
}:
let
system = "x86_64-linux";
in {
in
{
meta.nodeSpecialArgs.${nodeName} = {
inherit repoFlake nodeName nodeFlake;
packages' = repoFlake.packages.${system};
};
meta.nodeNixpkgs.${nodeName} = import nodeFlake.inputs.nixpkgs.outPath {
inherit system;
};
meta.nodeNixpkgs.${nodeName} = import nodeFlake.inputs.nixpkgs.outPath { inherit system; };
${nodeName} = {
deployment.targetHost = "elias-e525.lan";

View file

@ -6,5 +6,5 @@
inputs.nixpkgs.follows = "nixpkgs";
};
outputs = _: {};
outputs = _: { };
}

View file

@ -1,4 +1,4 @@
{...}: {
_: {
# TASK: new device
hardware.opinionatedDisk = {
enable = true;

View file

@ -1,8 +1,5 @@
{
pkgs,
lib,
...
}: let
{ pkgs, lib, ... }:
let
homeEnv = keyboard: {
imports = [
../../../home-manager/profiles/common.nix
@ -22,26 +19,27 @@
rustdesk
];
};
in {
services.gnome = builtins.mapAttrs (attr: value: lib.mkForce value) {
in
{
services.gnome = builtins.mapAttrs (_attr: value: lib.mkForce value) {
gnome-remote-desktop.enable = true;
};
home-manager.users.steveej = homeEnv {
layout = "en";
options = ["nodeadkey"];
options = [ "nodeadkey" ];
variant = "altgr-intl";
};
home-manager.users.elias = homeEnv {
layout = "de";
options = [];
options = [ ];
variant = "";
};
home-manager.users.justyna = homeEnv {
layout = "de";
options = [];
options = [ ];
variant = "";
};

View file

@ -1,10 +1,5 @@
{ pkgs, lib, ... }:
{
pkgs,
lib,
config,
...
}: let
in {
# TASK: new device
networking.hostName = "elias-e525"; # Define your hostname.
@ -38,11 +33,13 @@ in {
# udev.packages = [ pkgs.gnome3.gnome-settings-daemon ];
};
security.pki.certificateFiles = ["${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt"];
security.pki.certificateFiles = [ "${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt" ];
services.xserver.videoDrivers = ["modesetting"];
services.xserver.videoDrivers = [ "modesetting" ];
boot.kernelPackages = lib.mkForce pkgs.linuxPackages_latest;
nix.gc = {automatic = true;};
nix.gc = {
automatic = true;
};
}

View file

@ -1,12 +1,9 @@
{
config,
pkgs,
lib,
...
}: let
{ config, pkgs, ... }:
let
keys = import ../../../variables/keys.nix;
inherit (pkgs.callPackage ../../lib/default.nix {}) mkUser;
in {
inherit (pkgs.callPackage ../../lib/default.nix { }) mkUser;
in
{
sops.secrets.sharedUsers-elias = {
sopsFile = ../../../../secrets/shared-users.yaml;
neededForUsers = true;

View file

@ -1,4 +1,5 @@
{lib, ...}: {
{ lib, ... }:
{
boot.loader.grub.efiInstallAsRemovable = lib.mkForce true;
boot.loader.efi.canTouchEfiVariables = lib.mkForce false;
}

View file

@ -1,4 +1,5 @@
{...}: {
{ ... }:
{
imports = [
../../profiles/common/configuration.nix
../../modules/opinionatedDisk.nix

View file

@ -1,5 +1,4 @@
{...}: let
in {
_: {
# TASK: new device
hardware.opinionatedDisk = {
enable = true;

View file

@ -1,17 +1,17 @@
{pkgs, ...}: {
nixpkgs.config.packageOverrides = pkgs:
with pkgs; {
nixPath =
(import ../../../default.nix {
versionsPath = ./versions.nix;
})
.nixPath;
{ pkgs, ... }:
{
nixpkgs.config.packageOverrides =
pkgs: with pkgs; {
inherit ((import ../../../default.nix { versionsPath = ./versions.nix; })) nixPath;
};
home-manager.users.steveej = import ../../../home-manager/configuration/text-minimal.nix {
inherit pkgs;
};
environment.systemPackages = with pkgs; [iw wirelesstools];
environment.systemPackages = with pkgs; [
iw
wirelesstools
];
system.stateVersion = "21.11";
}

View file

@ -1,12 +1,8 @@
{
pkgs,
lib,
config,
...
}: let
keys = import ../../../variables/keys.nix;
{ pkgs, lib, ... }:
let
passwords = import ../../../variables/passwords.crypt.nix;
in {
in
{
# TASK: new device
networking.hostName = "fwhost1"; # Define your hostname.
@ -21,11 +17,14 @@ in {
networking.firewall.logRefusedConnections = false;
networking.usePredictableInterfaceNames = false;
networking.bridges.breth.interfaces = ["eth0" "eth1"];
networking.bridges.breth.interfaces = [
"eth0"
"eth1"
];
networking.bridges.breth.rstp = true;
networking.defaultGateway.address = "172.172.171.10";
networking.nameservers = ["172.172.171.10"];
networking.nameservers = [ "172.172.171.10" ];
# WAN interfaces, currently unused because the OPNsense guest acts as a router.
networking.vlans.wan1.id = 3;

View file

@ -1,9 +1 @@
{
config,
pkgs,
...
}: let
passwords = import ../../../variables/passwords.crypt.nix;
keys = import ../../../variables/keys.nix;
inherit (import ../../lib/default.nix {}) mkUser;
in {}
_: { }

View file

@ -4,9 +4,12 @@ let
ref = "nixos-21.11";
rev = "386234e2a61e1e8acf94dfa3a3d3ca19a6776efb";
};
in {
in
{
inherit nixpkgs;
nixos = nixpkgs // {suffix = "/nixos";};
nixos = nixpkgs // {
suffix = "/nixos";
};
"channels-nixos-stable" = nixpkgs;
"channels-nixos-unstable" = {

View file

@ -6,9 +6,12 @@ let
<% git ls-remote https://github.com/nixos/nixpkgs nixos-21.11 | awk '{ print $1 }' | tr -d '
' -%>'';
};
in {
in
{
inherit nixpkgs;
nixos = nixpkgs // {suffix = "/nixos";};
nixos = nixpkgs // {
suffix = "/nixos";
};
"channels-nixos-stable" = nixpkgs;
"channels-nixos-unstable" = {

View file

@ -1,4 +1,5 @@
{lib, ...}: {
{ lib, ... }:
{
boot.loader.grub.efiInstallAsRemovable = lib.mkForce true;
boot.loader.efi.canTouchEfiVariables = lib.mkForce false;
}

View file

@ -1,4 +1,5 @@
{...}: {
{ ... }:
{
imports = [
../../profiles/common/configuration.nix
../../modules/opinionatedDisk.nix

View file

@ -1,5 +1,4 @@
{...}: let
in {
_: {
# TASK: new device
hardware.opinionatedDisk = {
enable = true;

View file

@ -1,17 +1,17 @@
{pkgs, ...}: {
nixpkgs.config.packageOverrides = pkgs:
with pkgs; {
nixPath =
(import ../../../default.nix {
versionsPath = ./versions.nix;
})
.nixPath;
{ pkgs, ... }:
{
nixpkgs.config.packageOverrides =
pkgs: with pkgs; {
inherit ((import ../../../default.nix { versionsPath = ./versions.nix; })) nixPath;
};
home-manager.users.steveej = import ../../../home-manager/configuration/text-minimal.nix {
inherit pkgs;
};
environment.systemPackages = with pkgs; [iw wirelesstools];
environment.systemPackages = with pkgs; [
iw
wirelesstools
];
system.stateVersion = "21.11";
}

View file

@ -1,13 +1,8 @@
{
pkgs,
lib,
config,
utils,
...
}: let
keys = import ../../../variables/keys.nix;
{ pkgs, lib, ... }:
let
passwords = import ../../../variables/passwords.crypt.nix;
in {
in
{
# TASK: new device
networking.hostName = "fwhost2"; # Define your hostname.
@ -22,11 +17,14 @@ in {
networking.firewall.logRefusedConnections = false;
networking.usePredictableInterfaceNames = false;
networking.bridges.breth.interfaces = ["eth0" "eth1"];
networking.bridges.breth.interfaces = [
"eth0"
"eth1"
];
networking.bridges.breth.rstp = true;
networking.defaultGateway.address = "172.172.171.10";
networking.nameservers = ["172.172.171.10"];
networking.nameservers = [ "172.172.171.10" ];
# WAN interfaces, currently unused because the OPNsense guest acts as a router.
networking.vlans.wan1.id = 3;

View file

@ -1,12 +1,4 @@
{
config,
pkgs,
...
}: let
passwords = import ../../../variables/passwords.crypt.nix;
keys = import ../../../variables/keys.nix;
inherit (import ../../lib/default.nix {inherit (pkgs) lib;}) mkUser;
in {
_: {
# users.extraUsers.steveej2 = mkUser {
# uid = 1001;
# openssh.authorizedKeys.keys = keys.users.steveej.openssh;

View file

@ -4,9 +4,12 @@ let
ref = "nixos-21.11";
rev = "386234e2a61e1e8acf94dfa3a3d3ca19a6776efb";
};
in {
in
{
inherit nixpkgs;
nixos = nixpkgs // {suffix = "/nixos";};
nixos = nixpkgs // {
suffix = "/nixos";
};
"channels-nixos-stable" = nixpkgs;
"channels-nixos-unstable" = {

View file

@ -6,9 +6,12 @@ let
<% git ls-remote https://github.com/nixos/nixpkgs nixos-21.11 | awk '{ print $1 }' | tr -d '
' -%>'';
};
in {
in
{
inherit nixpkgs;
nixos = nixpkgs // {suffix = "/nixos";};
nixos = nixpkgs // {
suffix = "/nixos";
};
"channels-nixos-stable" = nixpkgs;
"channels-nixos-unstable" = {

View file

@ -1,7 +1,6 @@
## bootstrapping
```
# TODO: generate an SSH host-key and deploy it via --extra-files
# TODO: generate an SSH host-key and deploy it via --extra-files
nixos-anywhere --flake .\#sj-bm-hostkey0 root@185.130.227.252
```

View file

@ -1,17 +1,14 @@
{
modulesPath,
repoFlake,
packages',
pkgs,
lib,
config,
nodeFlake,
nodeName,
system,
...
}: {
disabledModules = [
];
}:
{
disabledModules = [ ];
imports = [
nodeFlake.inputs.disko.nixosModules.disko
@ -28,9 +25,7 @@
}
../../snippets/nix-settings.nix
{
nix.settings.sandbox = lib.mkForce "relaxed";
}
{ nix.settings.sandbox = lib.mkForce "relaxed"; }
../../snippets/mycelium.nix
@ -80,60 +75,58 @@
nat.enable = true;
firewall.enable = true;
firewall.allowedTCPPorts = [
5201
];
firewall.allowedUDPPorts = [
5201
];
firewall.allowedTCPPorts = [ 5201 ];
firewall.allowedUDPPorts = [ 5201 ];
};
disko.devices = let
disk = id: {
type = "disk";
device = "/dev/${id}";
content = {
type = "gpt";
partitions = {
boot = {
size = "1M";
type = "EF02"; # for grub MBR
};
mdadm = {
size = "100%";
content = {
type = "mdraid";
name = "raid0";
disko.devices =
let
disk = id: {
type = "disk";
device = "/dev/${id}";
content = {
type = "gpt";
partitions = {
boot = {
size = "1M";
type = "EF02"; # for grub MBR
};
mdadm = {
size = "100%";
content = {
type = "mdraid";
name = "raid0";
};
};
};
};
};
};
in {
disk = {
sda = disk "sda";
sdb = disk "sdb";
};
mdadm = {
raid0 = {
type = "mdadm";
level = 0;
content = {
type = "gpt";
partitions = {
primary = {
size = "100%";
content = {
type = "filesystem";
format = "btrfs";
mountpoint = "/";
in
{
disk = {
sda = disk "sda";
sdb = disk "sdb";
};
mdadm = {
raid0 = {
type = "mdadm";
level = 0;
content = {
type = "gpt";
partitions = {
primary = {
size = "100%";
content = {
type = "filesystem";
format = "btrfs";
mountpoint = "/";
};
};
};
};
};
};
};
};
system.stateVersion = "24.05";
@ -149,7 +142,5 @@
virtualisation.libvirtd.enable = true;
boot.binfmt.emulatedSystems = [
"aarch64-linux"
];
boot.binfmt.emulatedSystems = [ "aarch64-linux" ];
}

View file

@ -3,19 +3,22 @@
repoFlake,
nodeFlake,
...
}: let
}:
let
system = "x86_64-linux";
in {
in
{
meta.nodeSpecialArgs.${nodeName} = {
inherit repoFlake nodeName nodeFlake system;
inherit
repoFlake
nodeName
nodeFlake
system
;
packages' = repoFlake.packages.${system};
};
meta.nodeNixpkgs.${nodeName} =
import nodeFlake.inputs.nixpkgs.outPath
{
inherit system;
};
meta.nodeNixpkgs.${nodeName} = import nodeFlake.inputs.nixpkgs.outPath { inherit system; };
${nodeName} = {
deployment.targetHost = "185.130.224.33";

View file

@ -16,38 +16,37 @@
# outputs = _: {};
outputs = {
self,
get-flake,
nixpkgs,
...
} @ attrs: let
system = "x86_64-linux";
nodeName = "hostkey-0";
outputs =
{
self,
get-flake,
nixpkgs,
...
}:
let
system = "x86_64-linux";
nodeName = "hostkey-0";
mkNixosConfiguration = {extraModules ? [], ...} @ attrs:
nixpkgs.lib.nixosSystem (
nixpkgs.lib.attrsets.recursiveUpdate
attrs
mkNixosConfiguration =
{
specialArgs = {
nodeFlake = self;
repoFlake = get-flake ../../../..;
inherit nodeName;
};
extraModules ? [ ],
...
}@attrs:
nixpkgs.lib.nixosSystem (
nixpkgs.lib.attrsets.recursiveUpdate attrs {
specialArgs = {
nodeFlake = self;
repoFlake = get-flake ../../../..;
inherit nodeName;
};
modules =
[
./configuration.nix
]
++ extraModules;
}
);
in {
nixosConfigurations = {
native = mkNixosConfiguration {
inherit system;
modules = [ ./configuration.nix ] ++ extraModules;
}
);
in
{
nixosConfigurations = {
native = mkNixosConfiguration { inherit system; };
};
};
};
}

View file

@ -1,16 +1,24 @@
{
"enabled": 1,
"hidden": false,
"description": "Jobsets",
"nixexprinput": "src",
"nixexprpath": "default.nix",
"checkinterval": 300,
"schedulingshares": 100,
"enableemail": false,
"emailoverride": "",
"keepnr": 3,
"inputs": {
"src": { "type": "git", "value": "git://github.com/shlevy/declarative-hydra-example.git", "emailresponsible": false },
"nixpkgs": { "type": "git", "value": "git://github.com/NixOS/nixpkgs.git release-16.03", "emailresponsible": false }
"enabled": 1,
"hidden": false,
"description": "Jobsets",
"nixexprinput": "src",
"nixexprpath": "default.nix",
"checkinterval": 300,
"schedulingshares": 100,
"enableemail": false,
"emailoverride": "",
"keepnr": 3,
"inputs": {
"src": {
"type": "git",
"value": "git://github.com/shlevy/declarative-hydra-example.git",
"emailresponsible": false
},
"nixpkgs": {
"type": "git",
"value": "git://github.com/NixOS/nixpkgs.git release-16.03",
"emailresponsible": false
}
}
}

View file

@ -1,4 +1,5 @@
{lib, ...}: {
{ lib, ... }:
{
boot.loader.grub.efiInstallAsRemovable = lib.mkForce false;
boot.loader.efi.canTouchEfiVariables = lib.mkForce false;
boot.loader.grub.efiSupport = lib.mkForce false;

View file

@ -1,4 +1,5 @@
{...}: {
{ ... }:
{
imports = [
../../profiles/common/configuration.nix
../../profiles/graphical/configuration.nix

View file

@ -3,17 +3,17 @@
repoFlake,
nodeFlake,
...
}: let
}:
let
system = "x86_64-linux";
in {
in
{
meta.nodeSpecialArgs.${nodeName} = {
inherit repoFlake nodeName nodeFlake;
packages' = repoFlake.packages.${system};
};
meta.nodeNixpkgs.${nodeName} = import nodeFlake.inputs.nixpkgs.outPath {
inherit system;
};
meta.nodeNixpkgs.${nodeName} = import nodeFlake.inputs.nixpkgs.outPath { inherit system; };
${nodeName} = {
deployment.targetHost = nodeName;

View file

@ -6,8 +6,8 @@
inputs.nixpkgs.follows = "nixpkgs";
};
inputs.disko.url = github:nix-community/disko;
inputs.disko.url = "github:nix-community/disko";
inputs.disko.inputs.nixpkgs.follows = "nixpkgs";
outputs = _: {};
outputs = _: { };
}

View file

@ -1,12 +1,6 @@
{ nodeFlake, ... }:
{
repoFlake,
nodeFlake,
lib,
...
}: {
imports = [
nodeFlake.inputs.disko.nixosModules.disko
];
imports = [ nodeFlake.inputs.disko.nixosModules.disko ];
disko.devices.disk.sda = {
device = "/dev/sda";
@ -20,7 +14,7 @@
start = "0";
end = "1M";
part-type = "primary";
flags = ["bios_grub"];
flags = [ "bios_grub" ];
}
{
name = "root";
@ -30,14 +24,14 @@
bootable = true;
content = {
type = "btrfs";
extraArgs = ["-f"]; # Override existing partition
extraArgs = [ "-f" ]; # Override existing partition
subvolumes = {
# Subvolume name is different from mountpoint
"/rootfs" = {
mountpoint = "/";
};
"/nix" = {
mountOptions = ["noatime"];
mountOptions = [ "noatime" ];
};
};
};

View file

@ -3,7 +3,8 @@
lib,
packages',
...
}: let
}:
let
homeEnv = keyboard: {
imports = [
../../../home-manager/profiles/common.nix
@ -23,15 +24,19 @@
rustdesk
];
};
in {
services.gnome = builtins.mapAttrs (attr: value: lib.mkForce value) {
in
{
services.gnome = builtins.mapAttrs (_attr: value: lib.mkForce value) {
gnome-remote-desktop.enable = true;
};
services.printing.drivers = lib.mkForce (with packages'; [
dcpj4110dwDriver
dcpj4110dwCupswrapper
]);
services.printing.drivers = lib.mkForce (
with packages';
[
dcpj4110dwDriver
dcpj4110dwCupswrapper
]
);
services.printing.extraConf = ''
LogLevel debug
@ -39,31 +44,29 @@ in {
home-manager.users.steveej = homeEnv {
layout = "en";
options = ["nodeadkey"];
options = [ "nodeadkey" ];
variant = "altgr-intl";
};
home-manager.users.elias = homeEnv {
layout = "de";
options = [];
options = [ ];
variant = "";
};
home-manager.users.justyna =
lib.attrsets.recursiveUpdate
(homeEnv {
layout = "de";
options = [];
variant = "";
})
{
services.syncthing.enable = true;
services.syncthing.tray = true;
(homeEnv {
layout = "de";
options = [ ];
variant = "";
})
{
services.syncthing.enable = true;
services.syncthing.tray = true;
home.packages = with pkgs; [
session-desktop
];
};
home.packages = with pkgs; [ session-desktop ];
};
system.stateVersion = "21.11";
}

View file

@ -1,11 +1,8 @@
{
pkgs,
lib,
config,
...
}: let
{ pkgs, lib, ... }:
let
passwords = import ../../../variables/passwords.crypt.nix;
in {
in
{
networking.firewall.enable = true;
networking.firewall.allowedTCPPorts = [
# iperf3
@ -39,11 +36,13 @@ in {
# udev.packages = [ pkgs.gnome3.gnome-settings-daemon ];
};
security.pki.certificateFiles = ["${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt"];
security.pki.certificateFiles = [ "${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt" ];
services.xserver.videoDrivers = ["modesetting"];
services.xserver.videoDrivers = [ "modesetting" ];
boot.kernelPackages = lib.mkForce pkgs.linuxPackages_latest;
nix.gc = {automatic = true;};
nix.gc = {
automatic = true;
};
}

View file

@ -1,11 +1,9 @@
{
config,
pkgs,
...
}: let
{ config, pkgs, ... }:
let
keys = import ../../../variables/keys.nix;
inherit (pkgs.callPackage ../../lib/default.nix {}) mkUser;
in {
inherit (pkgs.callPackage ../../lib/default.nix { }) mkUser;
in
{
sops.secrets.sharedUsers-elias = {
sopsFile = ../../../../secrets/shared-users.yaml;
neededForUsers = true;

File diff suppressed because it is too large Load diff

View file

@ -5,25 +5,24 @@
nodeFlake,
localDomainName ? "internal",
...
}: {
}:
{
meta.nodeSpecialArgs.${nodeName} = {
inherit repoFlake nodeName nodeFlake system;
inherit
repoFlake
nodeName
nodeFlake
system
;
packages' = repoFlake.packages.${system};
nodePackages' = nodeFlake.packages.${system};
inherit
(nodeFlake.inputs.bpir3.packages.${system})
armTrustedFirmwareMT7986
;
inherit (nodeFlake.inputs.bpir3.packages.${system}) armTrustedFirmwareMT7986;
inherit localDomainName;
};
meta.nodeNixpkgs.${nodeName} =
import nodeFlake.inputs.nixpkgs.outPath
{
inherit system;
};
meta.nodeNixpkgs.${nodeName} = import nodeFlake.inputs.nixpkgs.outPath { inherit system; };
${nodeName} = {
deployment.targetHost = "${nodeName}.${localDomainName}";

View file

@ -18,8 +18,8 @@
# "github:steveej-forks/nakato_nixos-sbc/kernel-6.9_and_cross-compile"
# "github:steveej-forks/nakato_nixos-sbc/kernel-6.10_and_cross-compile"
"github:steveej-forks/nakato_nixos-sbc/kernel-6.10_and_cross-compile_mtkbump"
# "git+file:///home/steveej/src/others/nakato_nixos-sbc/"
;
# "git+file:///home/steveej/src/others/nakato_nixos-sbc/"
;
nixos-sbc.inputs.nixpkgs.follows = "nixpkgs";
nixos-nftables-firewall.url = "github:thelegy/nixos-nftables-firewall";
@ -39,43 +39,34 @@
# };
};
outputs = {
self,
get-flake,
nixpkgs,
nixos-sbc,
...
}: let
nativeSystem = "aarch64-linux";
nodeName = "router0-dmz0";
outputs =
{
self,
get-flake,
nixpkgs,
...
}:
let
nativeSystem = "aarch64-linux";
nodeName = "router0-dmz0";
pkgs = nixpkgs.legacyPackages.${nativeSystem};
pkgsCross = import self.inputs.nixpkgs {
system = "x86_64-linux";
crossSystem = {
config = "aarch64-unknown-linux-gnu";
};
};
mkNixosConfiguration = {extraModules ? [], ...} @ attrs:
nixpkgs.lib.nixosSystem (
nixpkgs.lib.attrsets.recursiveUpdate
attrs
mkNixosConfiguration =
{
specialArgs =
(import ./default.nix {
system = nativeSystem;
inherit nodeName;
extraModules ? [ ],
...
}@attrs:
nixpkgs.lib.nixosSystem (
nixpkgs.lib.attrsets.recursiveUpdate attrs {
specialArgs =
(import ./default.nix {
system = nativeSystem;
inherit nodeName;
repoFlake = get-flake ../../../..;
nodeFlake = self;
})
.meta
.nodeSpecialArgs
.${nodeName};
repoFlake = get-flake ../../../..;
nodeFlake = self;
}).meta.nodeSpecialArgs.${nodeName};
modules =
[
modules = [
./configuration.nix
# flake registry
@ -83,34 +74,30 @@
nixpkgs.overlays = builtins.attrValues self.overlays;
nix.registry.nixpkgs.flake = nixpkgs;
}
]
++ extraModules;
}
);
in {
nixosConfigurations = {
native = mkNixosConfiguration {
system = nativeSystem;
};
cross = mkNixosConfiguration {
extraModules = [
{
nixpkgs.buildPlatform.system = "x86_64-linux";
nixpkgs.hostPlatform.system = nativeSystem;
] ++ extraModules;
}
];
};
};
);
in
{
nixosConfigurations = {
native = mkNixosConfiguration { system = nativeSystem; };
overlays.default = final: previous: {
hostapd = previous.hostapd.overrideDerivation (attrs: {
patches =
attrs.patches
++ [
cross = mkNixosConfiguration {
extraModules = [
{
nixpkgs.buildPlatform.system = "x86_64-linux";
nixpkgs.hostPlatform.system = nativeSystem;
}
];
};
};
overlays.default = _final: previous: {
hostapd = previous.hostapd.overrideDerivation (attrs: {
patches = attrs.patches ++ [
"${self.inputs.openwrt}/package/network/services/hostapd/patches/710-vlan_no_bridge.patch"
];
});
});
};
};
};
}

View file

@ -5,11 +5,11 @@
config,
nodeFlake,
nodeName,
localDomainName,
system,
variables,
...
}: {
}:
{
system.stateVersion = "24.05";
imports = [
@ -48,7 +48,7 @@
boot.loader.grub.efiSupport = false;
# forcing seems required or else there's an error about duplicated devices
boot.loader.grub.devices = lib.mkForce ["/dev/vda"];
boot.loader.grub.devices = lib.mkForce [ "/dev/vda" ];
disko.devices.disk.vda = {
device = "/dev/vda";
@ -64,14 +64,14 @@
size = "100%";
content = {
type = "btrfs";
extraArgs = ["-f"]; # Override existing partition
extraArgs = [ "-f" ]; # Override existing partition
subvolumes = {
# Subvolume name is different from mountpoint
"/rootfs" = {
mountpoint = "/";
};
"/nix" = {
mountOptions = ["noatime"];
mountOptions = [ "noatime" ];
mountpoint = "/nix";
};
"/boot" = {
@ -156,9 +156,7 @@
interface = "eth0";
address = variables.ipv4gateway;
};
nameservers = [
variables.ipv4dns
];
nameservers = [ variables.ipv4dns ];
# these will be configured via nftables
nat.enable = lib.mkForce false;
@ -176,17 +174,20 @@
snippets.nnf-common.enable = true;
zones.wan = {
interfaces = ["eth0"];
interfaces = [ "eth0" ];
};
zones.vpn = {
interfaces = ["wg0" "wg1"];
interfaces = [
"wg0"
"wg1"
];
};
rules = {
to-fw = {
from = "all";
to = ["fw"];
to = [ "fw" ];
verdict = "drop";
allowedTCPPorts = [
@ -202,8 +203,8 @@
};
vpn-to-wan-nat = {
from = ["vpn"];
to = ["wan"];
from = [ "vpn" ];
to = [ "wan" ];
masquerade = true;
verdict = "accept";
};
@ -283,9 +284,7 @@
systemd.network.networks.wg0 = {
enable = true;
matchConfig.Name = "wg0";
address = [
"10.0.1.0/31"
];
address = [ "10.0.1.0/31" ];
routes = [
{
@ -299,9 +298,7 @@
systemd.network.networks.wg1 = {
enable = true;
matchConfig.Name = "wg1";
address = [
"10.0.1.2/31"
];
address = [ "10.0.1.2/31" ];
routes = [
{

View file

@ -4,20 +4,24 @@
repoFlake,
nodeFlake,
...
}: let
}:
let
variables = import ./variables.crypt.nix;
in {
in
{
meta.nodeSpecialArgs.${nodeName} = {
inherit repoFlake nodeName nodeFlake system variables;
inherit
repoFlake
nodeName
nodeFlake
system
variables
;
packages' = repoFlake.packages.${system};
nodePackages' = nodeFlake.packages.${system};
};
meta.nodeNixpkgs.${nodeName} =
import nodeFlake.inputs.nixpkgs.outPath
{
inherit system;
};
meta.nodeNixpkgs.${nodeName} = import nodeFlake.inputs.nixpkgs.outPath { inherit system; };
${nodeName} = {
deployment.targetHost = variables.ipv4;

View file

@ -15,5 +15,5 @@
nixos-nftables-firewall.inputs.nixpkgs.follows = "nixpkgs";
};
outputs = _: {};
outputs = _: { };
}

View file

@ -5,11 +5,11 @@
config,
nodeFlake,
nodeName,
localDomainName,
system,
variables,
...
}: {
}:
{
system.stateVersion = "23.11";
imports = [
@ -48,7 +48,7 @@
boot.loader.grub.efiSupport = false;
# forcing seems required or else there's an error about duplicated devices
boot.loader.grub.devices = lib.mkForce ["/dev/vda"];
boot.loader.grub.devices = lib.mkForce [ "/dev/vda" ];
disko.devices.disk.vda = {
device = "/dev/vda";
@ -64,14 +64,14 @@
size = "100%";
content = {
type = "btrfs";
extraArgs = ["-f"]; # Override existing partition
extraArgs = [ "-f" ]; # Override existing partition
subvolumes = {
# Subvolume name is different from mountpoint
"/rootfs" = {
mountpoint = "/";
};
"/nix" = {
mountOptions = ["noatime"];
mountOptions = [ "noatime" ];
mountpoint = "/nix";
};
"/boot" = {
@ -156,9 +156,7 @@
interface = "eth0";
address = variables.ipv4gateway;
};
nameservers = [
variables.ipv4dns
];
nameservers = [ variables.ipv4dns ];
# these will be configured via nftables
nat.enable = lib.mkForce false;
@ -176,17 +174,20 @@
snippets.nnf-common.enable = true;
zones.wan = {
interfaces = ["eth0"];
interfaces = [ "eth0" ];
};
zones.vpn = {
interfaces = ["wg0" "wg1"];
interfaces = [
"wg0"
"wg1"
];
};
rules = {
to-fw = {
from = "all";
to = ["fw"];
to = [ "fw" ];
verdict = "drop";
allowedTCPPorts = [
@ -202,8 +203,8 @@
};
vpn-to-wan-nat = {
from = ["vpn"];
to = ["wan"];
from = [ "vpn" ];
to = [ "wan" ];
masquerade = true;
verdict = "accept";
};
@ -283,9 +284,7 @@
systemd.network.networks.wg0 = {
enable = true;
matchConfig.Name = "wg0";
address = [
"10.0.0.0/31"
];
address = [ "10.0.0.0/31" ];
routes = [
{
@ -299,9 +298,7 @@
systemd.network.networks.wg1 = {
enable = true;
matchConfig.Name = "wg1";
address = [
"10.0.0.2/31"
];
address = [ "10.0.0.2/31" ];
routes = [
{

View file

@ -4,20 +4,24 @@
repoFlake,
nodeFlake,
...
}: let
}:
let
variables = import ./variables.crypt.nix;
in {
in
{
meta.nodeSpecialArgs.${nodeName} = {
inherit repoFlake nodeName nodeFlake system variables;
inherit
repoFlake
nodeName
nodeFlake
system
variables
;
packages' = repoFlake.packages.${system};
nodePackages' = nodeFlake.packages.${system};
};
meta.nodeNixpkgs.${nodeName} =
import nodeFlake.inputs.nixpkgs.outPath
{
inherit system;
};
meta.nodeNixpkgs.${nodeName} = import nodeFlake.inputs.nixpkgs.outPath { inherit system; };
${nodeName} = {
deployment.targetHost = variables.ipv4;

View file

@ -15,5 +15,5 @@
nixos-nftables-firewall.inputs.nixpkgs.follows = "nixpkgs";
};
outputs = _: {};
outputs = _: { };
}

View file

@ -1,3 +1 @@
{lib, ...}: {
boot.extraModulePackages = [];
}
_: { boot.extraModulePackages = [ ]; }

View file

@ -1,10 +1,6 @@
{ nodeName, config, ... }:
{
nodeName,
config,
pkgs,
...
}: {
disabledModules = [];
disabledModules = [ ];
imports = [
../../profiles/common/configuration.nix
{

View file

@ -3,17 +3,17 @@
repoFlake,
nodeFlake,
...
}: let
}:
let
system = "x86_64-linux";
in {
in
{
meta.nodeSpecialArgs.${nodeName} = {
inherit repoFlake nodeName nodeFlake;
packages' = repoFlake.packages.${system};
};
meta.nodeNixpkgs.${nodeName} = import nodeFlake.inputs.nixpkgs.outPath {
inherit system;
};
meta.nodeNixpkgs.${nodeName} = import nodeFlake.inputs.nixpkgs.outPath { inherit system; };
${nodeName} = {
deployment.targetHost = "${nodeName}.dmz.internal";

View file

@ -12,5 +12,5 @@
inputs.nixpkgs_forgejo.url = "github:NixOS/nixpkgs/af4ac075a3e97cb239078e187112afdf380cd47b";
# nixpkgs_forgejo.url = "github:steveej-forks/nixpkgs/9c3519ab3beb11b8d997281f8922330f707df419";
outputs = _: {};
outputs = _: { };
}

View file

@ -1,4 +1,5 @@
{...}: let
_:
let
stage1Modules = [
"virtio_balloon"
"virtio_scsi"
@ -38,7 +39,8 @@
"cdc_ether"
"uas"
];
in {
in
{
hardware.opinionatedDisk = {
enable = true;
encrypted = false;

View file

@ -6,29 +6,29 @@
nodeFlake,
nodeName,
...
}: let
}:
let
hostBridgeAddress = "192.168.101.1";
in {
in
{
imports = [
../../snippets/systemd-resolved.nix
{
# make sure it uses the DNS that comes in via DHCP
networking.nameservers = lib.mkForce [];
networking.nameservers = lib.mkForce [ ];
services.resolved.enable = true;
# provide DNS to the containers
services.resolved.extraConfig = ''
DNSStubListenerExtra=${hostBridgeAddress}
'';
networking.firewall.interfaces.br0.allowedTCPPorts = [53];
networking.firewall.interfaces.br0.allowedUDPPorts = [53];
networking.firewall.interfaces.br0.allowedTCPPorts = [ 53 ];
networking.firewall.interfaces.br0.allowedUDPPorts = [ 53 ];
}
];
programs.wireshark.enable = true;
environment.systemPackages = [
pkgs.dnsutils
];
environment.systemPackages = [ pkgs.dnsutils ];
networking.firewall.enable = true;
networking.nftables.enable = true;
@ -48,13 +48,13 @@ in {
networking.nat = {
enable = true;
internalInterfaces = ["br0"];
internalInterfaces = [ "br0" ];
externalInterface = "dmz0";
};
networking.bridges = {
br0 = {
interfaces = [];
interfaces = [ ];
};
};
networking.interfaces = {
@ -89,9 +89,7 @@ in {
networkConfig.LinkLocalAddressing = "no";
# TODO: i'm not sure if and if so why this is required
macvlan = [
"dmz0"
];
macvlan = [ "dmz0" ];
DHCP = "no";
};
@ -111,45 +109,49 @@ in {
};
# virtualization
virtualisation = {docker.enable = false;};
virtualisation = {
docker.enable = false;
};
nix.gc = {automatic = true;};
nix.gc = {
automatic = true;
};
sops.secrets.restic-password.sopsFile = ../../../../secrets/${nodeName}/secrets.yaml;
# adapted from https://github.com/lilyinstarlight/foosteros/blob/5c75ded111878970fd4f600c7adc013f971d5e71/config/restic.nix
services.restic.backups.${nodeName} = let
btrfs = "${pkgs.btrfs-progs}/bin/btrfs";
in {
initialize = true;
repository = "sftp://u217879-sub3@u217879-sub3.your-storagebox.de:23/restic/${nodeName}";
services.restic.backups.${nodeName} =
let
btrfs = "${pkgs.btrfs-progs}/bin/btrfs";
in
{
initialize = true;
repository = "sftp://u217879-sub3@u217879-sub3.your-storagebox.de:23/restic/${nodeName}";
paths = [
"/backup"
];
paths = [ "/backup" ];
pruneOpts = [
"--keep-daily 7"
"--keep-weekly 5"
"--keep-monthly 12"
"--keep-yearly 2"
];
pruneOpts = [
"--keep-daily 7"
"--keep-weekly 5"
"--keep-monthly 12"
"--keep-yearly 2"
];
timerConfig = {
OnCalendar = lib.mkDefault "daily";
Persistent = true;
timerConfig = {
OnCalendar = lib.mkDefault "daily";
Persistent = true;
};
passwordFile = config.sops.secrets.restic-password.path;
backupPrepareCommand = ''
${btrfs} su snapshot -r /var/lib/container-volumes /backup/container-volumes
'';
backupCleanupCommand = ''
${btrfs} su delete /backup/container-volumes
'';
};
passwordFile = config.sops.secrets.restic-password.path;
backupPrepareCommand = ''
${btrfs} su snapshot -r /var/lib/container-volumes /backup/container-volumes
'';
backupCleanupCommand = ''
${btrfs} su delete /backup/container-volumes
'';
};
containers = {
mailserver = import ../../containers/mailserver.nix {
specialArgs = {
@ -167,25 +169,23 @@ in {
sievePort = 4190;
};
webserver =
import ../../containers/webserver.nix
{
specialArgs = {
inherit repoFlake nodeFlake;
hostAddress = hostBridgeAddress;
};
autoStart = true;
hostBridge = "br0";
webserver = import ../../containers/webserver.nix {
specialArgs = {
inherit repoFlake nodeFlake;
hostAddress = hostBridgeAddress;
localAddress = "192.168.101.11/24";
httpPort = 80;
httpsPort = 443;
forgejoSshPort = 2222;
};
autoStart = true;
hostBridge = "br0";
hostAddress = hostBridgeAddress;
localAddress = "192.168.101.11/24";
httpPort = 80;
httpsPort = 443;
forgejoSshPort = 2222;
};
syncthing = import ../../containers/syncthing.nix {
specialArgs = {
inherit repoFlake nodeFlake;

View file

@ -1,4 +1,5 @@
{lib, ...}: {
{ lib, ... }:
{
boot.loader.grub.efiSupport = lib.mkForce false;
boot.extraModulePackages = [];
boot.extraModulePackages = [ ];
}

View file

@ -1,10 +1,6 @@
{ nodeName, config, ... }:
{
nodeName,
config,
pkgs,
...
}: {
disabledModules = [];
disabledModules = [ ];
imports = [
../../profiles/common/configuration.nix
{

View file

@ -3,17 +3,17 @@
repoFlake,
nodeFlake,
...
}: let
}:
let
system = "x86_64-linux";
in {
in
{
meta.nodeSpecialArgs.${nodeName} = {
inherit repoFlake nodeName nodeFlake;
packages' = repoFlake.packages.${system};
};
meta.nodeNixpkgs.${nodeName} = import nodeFlake.inputs.nixpkgs.outPath {
inherit system;
};
meta.nodeNixpkgs.${nodeName} = import nodeFlake.inputs.nixpkgs.outPath { inherit system; };
${nodeName} = {
deployment.targetHost = "${nodeName}.infra.stefanjunker.de";

View file

@ -8,5 +8,5 @@
inputs.nixpkgs.follows = "nixpkgs";
};
outputs = _: {};
outputs = _: { };
}

View file

@ -1,4 +1,5 @@
{...}: let
_:
let
stage1Modules = [
"virtio_balloon"
"virtio_scsi"
@ -14,7 +15,8 @@
"pata_acpi"
"ata_generic"
];
in {
in
{
hardware.opinionatedDisk = {
enable = true;
encrypted = false;

View file

@ -1,16 +1,14 @@
{
pkgs,
lib,
config,
repoFlake,
nodeName,
...
}: let
}:
let
wireguardPort = 51820;
in {
imports = [
../../snippets/systemd-resolved.nix
];
in
{
imports = [ ../../snippets/systemd-resolved.nix ];
networking.firewall.enable = true;
networking.nftables.enable = true;
@ -19,9 +17,7 @@ in {
# iperf3
5201
];
networking.firewall.allowedUDPPorts = [
wireguardPort
];
networking.firewall.allowedUDPPorts = [ wireguardPort ];
networking.firewall.logRefusedConnections = false;
@ -38,7 +34,7 @@ in {
"prefixLength" = 29;
}
];
ipv6.addresses = [];
ipv6.addresses = [ ];
};
networking.defaultGateway = {
@ -53,7 +49,10 @@ in {
networking.nat = {
enable = true;
internalInterfaces = ["ve-*" "wg*"];
internalInterfaces = [
"ve-*"
"wg*"
];
externalInterface = "eth0";
};
@ -70,15 +69,12 @@ in {
networking.wireguard.interfaces.wg0 = {
# eth0 MTU (1400) - 80
mtu = 1320;
ips = [
"192.168.99.1/31"
];
listenPort =
wireguardPort;
ips = [ "192.168.99.1/31" ];
listenPort = wireguardPort;
privateKeyFile = config.sops.secrets.wg0-private.path;
peers = [
{
allowedIPs = ["192.168.99.2/32"];
allowedIPs = [ "192.168.99.2/32" ];
publicKey = "O3k4jEdX6jkV1fHP/J8KSH5tvi+n1VvnBTD5na6Naw0=";
presharedKeyFile = config.sops.secrets.wg0-psk-steveej-psk.path;
}
@ -86,14 +82,18 @@ in {
};
# virtualization
virtualisation = {docker.enable = false;};
virtualisation = {
docker.enable = false;
};
services.spice-vdagentd.enable = true;
services.qemuGuest.enable = true;
nix.gc = {automatic = true;};
nix.gc = {
automatic = true;
};
containers = {};
containers = { };
home-manager.users.steveej = import ../../../home-manager/configuration/text-minimal.nix {
inherit pkgs;

View file

@ -1,7 +1,6 @@
## bootstrapping
```
# TODO: generate an SSH host-key and deploy it via --extra-files
# TODO: generate an SSH host-key and deploy it via --extra-files
nixos-anywhere --flake .\#srv0-dmz0 root@srv0.dmz0.noosphere.life
```

View file

@ -1,14 +1,14 @@
{
modulesPath,
repoFlake,
packages',
pkgs,
config,
...
}: let
}:
let
disk = "/dev/disk/by-id/ata-INTEL_SSDSC2BW240A4_PHDA435602332403GN";
in {
disabledModules = [];
in
{
disabledModules = [ ];
imports = [
repoFlake.inputs.disko.nixosModules.disko
repoFlake.inputs.srvos.nixosModules.server
@ -23,7 +23,7 @@ in {
];
## bare-metal machines
srvos.boot.consoles = ["tty0"];
srvos.boot.consoles = [ "tty0" ];
boot.loader.grub.enable = false;
boot.loader.efi.canTouchEfiVariables = false;
@ -39,7 +39,7 @@ in {
start = "0";
end = "1M";
part-type = "primary";
flags = ["bios_grub"];
flags = [ "bios_grub" ];
}
{
name = "ESP";
@ -60,14 +60,14 @@ in {
bootable = true;
content = {
type = "btrfs";
extraArgs = ["-f"]; # Override existing partition
extraArgs = [ "-f" ]; # Override existing partition
subvolumes = {
# Subvolume name is different from mountpoint
"/rootfs" = {
mountpoint = "/";
};
"/nix" = {
mountOptions = ["noatime"];
mountOptions = [ "noatime" ];
};
};
};
@ -109,7 +109,7 @@ in {
networking.nat = {
enable = true;
internalInterfaces = ["ve-+"];
internalInterfaces = [ "ve-+" ];
externalInterface = "eth0";
};
@ -119,9 +119,11 @@ in {
# virtualization
# virtualisation = {docker.enable = true;};
nix.gc = {automatic = true;};
nix.gc = {
automatic = true;
};
containers = {};
containers = { };
# sops.secrets.holochain-nomad-agent-ca = {
# sopsFile = ../../../../secrets/holochain-infra/nomad.yaml;

View file

@ -3,17 +3,17 @@
repoFlake,
nodeFlake,
...
}: let
}:
let
system = "x86_64-linux";
in {
in
{
meta.nodeSpecialArgs.${nodeName} = {
inherit repoFlake nodeName nodeFlake;
packages' = repoFlake.packages.${system};
};
meta.nodeNixpkgs.${nodeName} = import nodeFlake.inputs.nixpkgs.outPath {
inherit system;
};
meta.nodeNixpkgs.${nodeName} = import nodeFlake.inputs.nixpkgs.outPath { inherit system; };
${nodeName} = {
deployment.targetHost = "srv0.dmz0.noosphere.life";

View file

@ -8,5 +8,5 @@
inputs.nixpkgs.follows = "nixpkgs";
};
outputs = _: {};
outputs = _: { };
}

View file

@ -1,4 +1,4 @@
{lib, ...}: {
_: {
boot.loader.grub.efiSupport = true;
boot.extraModulePackages = [];
boot.extraModulePackages = [ ];
}

View file

@ -1,5 +1,6 @@
{...}: {
disabledModules = [];
{ ... }:
{
disabledModules = [ ];
imports = [
../../profiles/common/configuration.nix
../../modules/opinionatedDisk.nix

View file

@ -1,4 +1,5 @@
{...}: let
_:
let
stage1Modules = [
"aesni_intel"
"kvm-intel"
@ -17,7 +18,8 @@
"xhci_hcd"
"xhci_pci"
];
in {
in
{
# TASK: new device
hardware.opinionatedDisk = {
enable = true;

View file

@ -1,16 +1,8 @@
{ config, pkgs, ... }:
{
config,
pkgs,
lib,
...
}: {
nixpkgs.config.packageOverrides = pkgs:
with pkgs; {
nixPath =
(import ../../../default.nix {
versionsPath = ./versions.nix;
})
.nixPath;
nixpkgs.config.packageOverrides =
pkgs: with pkgs; {
inherit ((import ../../../default.nix { versionsPath = ./versions.nix; })) nixPath;
};
home-manager.users.steveej = import ../../../home-manager/configuration/text-minimal.nix {
inherit pkgs;
@ -20,7 +12,12 @@
{
hostName = "localhost";
system = "x86_64-linux";
supportedFeatures = ["kvm" "nixos-test" "big-parallel" "benchmark"];
supportedFeatures = [
"kvm"
"nixos-test"
"big-parallel"
"benchmark"
];
maxJobs = 4;
}
];

View file

@ -1,11 +1,4 @@
{
pkgs,
lib,
config,
...
}: let
keys = import ../../../variables/keys.nix;
in {
_: {
# TASK: new device
networking.hostName = "srv0"; # Define your hostname.
# networking.domain = "home-ch.stefanjunker.de";
@ -37,7 +30,7 @@ in {
networking.nat = {
enable = true;
internalInterfaces = ["ve-+"];
internalInterfaces = [ "ve-+" ];
externalInterface = "eth0";
};
@ -45,14 +38,20 @@ in {
# services.kubernetes.roles = ["master" "node"];
# virtualization
virtualisation = {docker.enable = true;};
virtualisation = {
docker.enable = true;
};
nix.gc = {automatic = true;};
nix.gc = {
automatic = true;
};
networking.useHostResolvConf = false;
services.resolved = {enable = true;};
services.resolved = {
enable = true;
};
containers = {};
containers = { };
# This value determines the NixOS release from which the default
# settings for stateful data, like file locations and database versions

View file

@ -4,7 +4,8 @@ let
ref = "nixos-22.05";
rev = "040c6d8374d090f46ab0e99f1f7c27a4529ecffd";
};
in {
in
{
inherit nixpkgs;
"channels-nixos-stable" = nixpkgs;
"nixpkgs-master" = {

View file

@ -6,7 +6,8 @@ let
<% git ls-remote https://github.com/nixos/nixpkgs nixos-22.05 | awk '{ print $1 }' | tr -d '
' -%>'';
};
in {
in
{
inherit nixpkgs;
"channels-nixos-stable" = nixpkgs;
"nixpkgs-master" = {

View file

@ -1,4 +1,5 @@
{...}: {
{ ... }:
{
imports = [
../../profiles/common/configuration.nix
../../profiles/graphical/configuration.nix

View file

@ -1,4 +1,4 @@
{...}: {
_: {
# TASK: new device
hardware.encryptedDisk = {
enable = true;

View file

@ -1,11 +1,7 @@
{ pkgs, lib, ... }:
{
pkgs,
lib,
...
}: let
in {
services.udev.extraRules = ''SUBSYSTEM=="sgx", MODE="0660", GROUP="sgx"'';
users.groups.sgx = {};
users.groups.sgx = { };
networking.hostName = "steveej-nuc7pjyh-work"; # Define your hostname.
boot.kernelPackages = lib.mkForce pkgs.linuxPackages_sgx_latest;
}

View file

@ -1,12 +1,9 @@
{
config,
pkgs,
...
}: let
passwords = import ../../../variables/passwords.crypt.nix;
{ pkgs, ... }:
let
keys = import ../../../variables/keys.nix;
inherit (import ../../lib/default.nix {inherit (pkgs) lib;}) mkUser;
in {
inherit (import ../../lib/default.nix { inherit (pkgs) lib; }) mkUser;
in
{
users.extraUsers.sjunker = mkUser {
uid = 1001;
openssh.authorizedKeys.keys = keys.users.steveej.openssh;
@ -14,7 +11,7 @@ in {
image = "quay.io/enarx/fedora";
run_args = "-v /dev/sgx:/dev/sgx";
};
extraGroups = ["sgx"];
extraGroups = [ "sgx" ];
subUidRanges = [
{

View file

@ -1,4 +1,5 @@
{lib, ...}: {
{ lib, ... }:
{
boot.loader.grub.efiInstallAsRemovable = lib.mkForce true;
boot.loader.efi.canTouchEfiVariables = lib.mkForce false;
}

View file

@ -1,4 +1,5 @@
{...}: {
{ ... }:
{
imports = [
../../profiles/common/configuration.nix
../../profiles/graphical/configuration.nix

View file

@ -1,4 +1,5 @@
{...}: let
_:
let
stage1Modules = [
"aesni_intel"
"kvm-intel"
@ -7,7 +8,8 @@
"xhci_pci"
"hxci_hcd"
];
in {
in
{
# TASK: new device
hardware.opinionatedDisk = {
enable = true;

View file

@ -1,11 +1,8 @@
{pkgs, ...}: {
nixpkgs.config.packageOverrides = pkgs:
with pkgs; {
nixPath =
(import ../../../default.nix {
versionsPath = ./versions.nix;
})
.nixPath;
{ pkgs, ... }:
{
nixpkgs.config.packageOverrides =
pkgs: with pkgs; {
inherit ((import ../../../default.nix { versionsPath = ./versions.nix; })) nixPath;
};
home-manager.users.steveej = import ../../../home-manager/configuration/graphical-fullblown.nix {
inherit pkgs;

View file

@ -1,11 +1,5 @@
{ pkgs, lib, ... }:
{
pkgs,
lib,
config,
...
}: let
keys = import ../../../variables/keys.nix;
in {
# TASK: new device
networking.hostName = "steveej-pa600"; # Define your hostname.
@ -20,7 +14,11 @@ in {
services.printing = {
enable = true;
drivers = with pkgs; [hplip mfcl3770cdw.driver mfcl3770cdw.cupswrapper];
drivers = with pkgs; [
hplip
mfcl3770cdw.driver
mfcl3770cdw.cupswrapper
];
};
services.fprintd.enable = true;
@ -29,9 +27,9 @@ in {
sudo.fprintAuth = true;
};
security.pki.certificateFiles = ["${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt"];
security.pki.certificateFiles = [ "${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt" ];
services.xserver.videoDrivers = ["modesetting"];
services.xserver.videoDrivers = [ "modesetting" ];
services.xserver.serverFlagsSection = ''
Option "BlankTime" "0"
Option "StandbyTime" "0"

View file

@ -1,12 +1,9 @@
{
config,
pkgs,
...
}: let
passwords = import ../../../variables/passwords.crypt.nix;
{ pkgs, ... }:
let
keys = import ../../../variables/keys.nix;
inherit (import ../../lib/default.nix {inherit (pkgs) lib;}) mkUser;
in {
inherit (import ../../lib/default.nix { inherit (pkgs) lib; }) mkUser;
in
{
users.extraUsers.steveej2 = mkUser {
uid = 1001;
openssh.authorizedKeys.keys = keys.users.steveej.openssh;

View file

@ -4,9 +4,12 @@ let
ref = "nixos-20.09";
rev = "e065200fc90175a8f6e50e76ef10a48786126e1c";
};
in {
in
{
inherit nixpkgs;
nixos = nixpkgs // {suffix = "/nixos";};
nixos = nixpkgs // {
suffix = "/nixos";
};
"channels-nixos-stable" = nixpkgs;
"channels-nixos-unstable" = {
url = "https://github.com/NixOS/nixpkgs/";

View file

@ -6,9 +6,12 @@ let
<% git ls-remote https://github.com/nixos/nixpkgs nixos-20.09 | awk '{ print $1 }' | tr -d '
' -%>'';
};
in {
in
{
inherit nixpkgs;
nixos = nixpkgs // {suffix = "/nixos";};
nixos = nixpkgs // {
suffix = "/nixos";
};
"channels-nixos-stable" = nixpkgs;
"channels-nixos-unstable" = {
url = "https://github.com/NixOS/nixpkgs/";

View file

@ -1,4 +1,5 @@
{...}: {
{ ... }:
{
imports = [
../../profiles/common/configuration.nix
../../profiles/graphical/configuration.nix

View file

@ -1,4 +1,4 @@
{...}: {
_: {
# TASK: new device
hardware.encryptedDisk = {
enable = true;

View file

@ -1,3 +1,3 @@
{...}: {
_: {
networking.hostName = "steveej-rmvbl-mmc-SL32G_0x259093f6"; # Define your hostname.
}

View file

@ -1,11 +1,8 @@
{...}: {
nixpkgs.config.packageOverrides = pkgs:
with pkgs; {
nixPath =
(import ../../../default.nix {
versionsPath = ./versions.nix;
})
.nixPath;
{ ... }:
{
nixpkgs.config.packageOverrides =
pkgs: with pkgs; {
inherit ((import ../../../default.nix { versionsPath = ./versions.nix; })) nixPath;
};
imports = [

View file

@ -1,4 +1,4 @@
{...}: {
_: {
# TASK: new device
hardware.opinionatedDisk.diskId = "usb-SanDisk_Extreme_Pro_12345978EC62-0:0";
hardware.opinionatedDisk.encrypted = true;

View file

@ -1,4 +1,4 @@
{...}: {
_: {
networking.hostName = "steveej-rmvbl-sdep0"; # Define your hostname.
system.stateVersion = "21.05";
}

View file

@ -2,35 +2,33 @@ let
nixpkgs = {
url = "https://github.com/NixOS/nixpkgs/";
ref = "nixos-22.11";
rev = ''
0040164e473509b4aee6aedb3b923e400d6df10b'';
rev = ''0040164e473509b4aee6aedb3b923e400d6df10b'';
};
in {
in
{
inherit nixpkgs;
nixos = nixpkgs // {suffix = "/nixos";};
nixos = nixpkgs // {
suffix = "/nixos";
};
"channels-nixos-stable" = nixpkgs;
"channels-nixos-unstable" = {
url = "https://github.com/NixOS/nixpkgs/";
ref = "nixos-unstable";
rev = ''
d9f759f2ea8d265d974a6e1259bd510ac5844c5d'';
rev = ''d9f759f2ea8d265d974a6e1259bd510ac5844c5d'';
};
"channels-nixos-unstable-small" = {
url = "https://github.com/NixOS/nixpkgs/";
ref = "nixos-unstable-small";
rev = ''
9c34c8adba80180608794cce600b10183b048942'';
rev = ''9c34c8adba80180608794cce600b10183b048942'';
};
"nixpkgs-master" = {
url = "https://github.com/NixOS/nixpkgs/";
ref = "master";
rev = ''
f9adb566707a492bd3d17fee1e223695d939b52a'';
rev = ''f9adb566707a492bd3d17fee1e223695d939b52a'';
};
"home-manager-module" = {
url = "https://github.com/nix-community/home-manager";
ref = "release-22.11";
rev = ''
d6f3ba090ed090ae664ab5bac329654093aae725'';
rev = ''d6f3ba090ed090ae664ab5bac329654093aae725'';
};
}

View file

@ -6,9 +6,12 @@ let
<% git ls-remote https://github.com/nixos/nixpkgs nixos-22.11 | awk '{ print $1 }' | tr -d '
' -%>'';
};
in {
in
{
inherit nixpkgs;
nixos = nixpkgs // {suffix = "/nixos";};
nixos = nixpkgs // {
suffix = "/nixos";
};
"channels-nixos-stable" = nixpkgs;
"channels-nixos-unstable" = {
url = "https://github.com/NixOS/nixpkgs/";

Some files were not shown because too many files have changed in this diff Show more