server/home: init; router/unbound: fix avahi resolver
this has binary cache, hydra, metrics, etc
This commit is contained in:
parent
7f5711eb8d
commit
db2c8d7c3d
|
@ -228,11 +228,11 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1689016040,
|
"lastModified": 1690432004,
|
||||||
"narHash": "sha256-g2K2WD6wK6lMkV+fjSKfLLapv8nm+XimX+8tB7xh6hc=",
|
"narHash": "sha256-mGK512GjUbTNdy6AorlX6OF3oAZ5GkGWuxEKWj8xpUw=",
|
||||||
"owner": "chayleaf",
|
"owner": "chayleaf",
|
||||||
"repo": "nixos-router",
|
"repo": "nixos-router",
|
||||||
"rev": "6078d93845b70656cfdd0b3932ac7215f6c527c1",
|
"rev": "c86131f52922907d77653d553851f03a8e064071",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
|
|
@ -103,7 +103,6 @@
|
||||||
};
|
};
|
||||||
router-emmc = rec {
|
router-emmc = rec {
|
||||||
system = "aarch64-linux";
|
system = "aarch64-linux";
|
||||||
specialArgs.router-lib = if devNixRt then import /${devPath}/nixos-router/lib.nix { inherit (nixpkgs) lib; } else nixos-router.lib.${system};
|
|
||||||
specialArgs.server-config = nixosConfigurations.nixserver.config;
|
specialArgs.server-config = nixosConfigurations.nixserver.config;
|
||||||
modules = [
|
modules = [
|
||||||
{
|
{
|
||||||
|
@ -116,7 +115,6 @@
|
||||||
};
|
};
|
||||||
router-sd = rec {
|
router-sd = rec {
|
||||||
system = "aarch64-linux";
|
system = "aarch64-linux";
|
||||||
specialArgs.router-lib = if devNixRt then import /${devPath}/nixos-router/lib.nix { inherit (nixpkgs) lib; } else nixos-router.lib.${system};
|
|
||||||
specialArgs.server-config = nixosConfigurations.nixserver.config;
|
specialArgs.server-config = nixosConfigurations.nixserver.config;
|
||||||
modules = [
|
modules = [
|
||||||
{
|
{
|
||||||
|
|
|
@ -11,11 +11,13 @@
|
||||||
|
|
||||||
nix.settings = {
|
nix.settings = {
|
||||||
trusted-public-keys = [
|
trusted-public-keys = [
|
||||||
|
"binarycache.pavluk.org:Vk0ms/vSqoOV2JXeNVOroc8EfilgVxCCUtpCShGIKsQ="
|
||||||
"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="
|
"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="
|
||||||
"nix-gaming.cachix.org-1:nbjlureqMbRAxR1gJ/f3hxemL9svXaZF/Ees8vCUUs4="
|
"nix-gaming.cachix.org-1:nbjlureqMbRAxR1gJ/f3hxemL9svXaZF/Ees8vCUUs4="
|
||||||
# "nixpkgs-wayland.cachix.org-1:3lwxaILxMRkVhehr5StQprHdEo4IrE8sRho9R9HOLYA="
|
# "nixpkgs-wayland.cachix.org-1:3lwxaILxMRkVhehr5StQprHdEo4IrE8sRho9R9HOLYA="
|
||||||
];
|
];
|
||||||
trusted-substituters = [
|
trusted-substituters = [
|
||||||
|
"https://binarycache.pavluk.org"
|
||||||
"https://cache.nixos.org"
|
"https://cache.nixos.org"
|
||||||
"https://nix-gaming.cachix.org"
|
"https://nix-gaming.cachix.org"
|
||||||
# "https://nixpkgs-wayland.cachix.org"
|
# "https://nixpkgs-wayland.cachix.org"
|
||||||
|
@ -96,6 +98,6 @@
|
||||||
|
|
||||||
[input]
|
[input]
|
||||||
rawMouse=yes
|
rawMouse=yes
|
||||||
escapeKey=KEY_INSERT
|
escapeKey=KEY_RIGHTALT
|
||||||
'';
|
'';
|
||||||
}
|
}
|
||||||
|
|
|
@ -265,8 +265,8 @@ in rec {
|
||||||
COMMON_CLK_MEDIATEK_FHCTL = yes;
|
COMMON_CLK_MEDIATEK_FHCTL = yes;
|
||||||
COMMON_CLK_MT7986 = yes;
|
COMMON_CLK_MT7986 = yes;
|
||||||
COMMON_CLK_MT7986_ETHSYS = yes;
|
COMMON_CLK_MT7986_ETHSYS = yes;
|
||||||
CPU_THERMAL = yes;
|
# CPU_THERMAL = yes;
|
||||||
THERMAL_OF = yes;
|
# THERMAL_OF = yes;
|
||||||
EINT_MTK = yes;
|
EINT_MTK = yes;
|
||||||
MEDIATEK_GE_PHY = yes;
|
MEDIATEK_GE_PHY = yes;
|
||||||
MEDIATEK_WATCHDOG = yes;
|
MEDIATEK_WATCHDOG = yes;
|
||||||
|
|
|
@ -149,7 +149,20 @@
|
||||||
programs.ccache.enable = true;
|
programs.ccache.enable = true;
|
||||||
services.sshd.enable = true;
|
services.sshd.enable = true;
|
||||||
boot.binfmt.emulatedSystems = [ "aarch64-linux" ];
|
boot.binfmt.emulatedSystems = [ "aarch64-linux" ];
|
||||||
nix.settings.trusted-users = [ "root" config.common.mainUsername ];
|
nix.settings = {
|
||||||
|
trusted-users = [ "root" config.common.mainUsername ];
|
||||||
|
netrc-file = "/secrets/netrc";
|
||||||
|
substituters = [
|
||||||
|
"https://binarycache.pavluk.org"
|
||||||
|
"https://cache.nixos.org/"
|
||||||
|
# "https://nix-community.cachix.org"
|
||||||
|
];
|
||||||
|
trusted-public-keys = [
|
||||||
|
"binarycache.pavluk.org:Vk0ms/vSqoOV2JXeNVOroc8EfilgVxCCUtpCShGIKsQ="
|
||||||
|
"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="
|
||||||
|
# "nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs="
|
||||||
|
];
|
||||||
|
};
|
||||||
services.udev.packages = [
|
services.udev.packages = [
|
||||||
pkgs.android-udev-rules
|
pkgs.android-udev-rules
|
||||||
];
|
];
|
||||||
|
@ -160,5 +173,8 @@
|
||||||
];
|
];
|
||||||
documentation.dev.enable = true;
|
documentation.dev.enable = true;
|
||||||
|
|
||||||
impermanence.directories = [ /etc/nixos ];
|
impermanence.directories = [
|
||||||
|
/secrets
|
||||||
|
/etc/nixos
|
||||||
|
];
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,25 +7,11 @@ let
|
||||||
cfg = config.server;
|
cfg = config.server;
|
||||||
|
|
||||||
hosted-domains =
|
hosted-domains =
|
||||||
map
|
builtins.concatLists
|
||||||
(prefix: if prefix == null then cfg.domainName else "${prefix}.${cfg.domainName}")
|
(builtins.attrValues
|
||||||
[
|
(builtins.mapAttrs
|
||||||
null
|
(k: v: [ k ] ++ v.serverAliases)
|
||||||
"dns"
|
config.services.nginx.virtualHosts));
|
||||||
"mumble"
|
|
||||||
"mail"
|
|
||||||
"music"
|
|
||||||
"www"
|
|
||||||
"matrix"
|
|
||||||
"search"
|
|
||||||
"git"
|
|
||||||
"cloud"
|
|
||||||
"ns1"
|
|
||||||
"ns2"
|
|
||||||
];
|
|
||||||
|
|
||||||
unbound-python = pkgs.python3.withPackages (pkgs: with pkgs; [ pydbus dnspython ]);
|
|
||||||
|
|
||||||
in {
|
in {
|
||||||
imports = [
|
imports = [
|
||||||
./options.nix
|
./options.nix
|
||||||
|
@ -33,11 +19,13 @@ in {
|
||||||
./fdroid.nix
|
./fdroid.nix
|
||||||
./mumble.nix
|
./mumble.nix
|
||||||
./mailserver.nix
|
./mailserver.nix
|
||||||
|
./home.nix
|
||||||
];
|
];
|
||||||
|
|
||||||
system.stateVersion = "22.11";
|
system.stateVersion = "22.11";
|
||||||
impermanence.directories = [
|
impermanence.directories = [
|
||||||
{ directory = /var/www/${cfg.domainName}; }
|
{ directory = /var/www; }
|
||||||
|
{ directory = /secrets; mode = "0755"; }
|
||||||
];
|
];
|
||||||
networking.useDHCP = true;
|
networking.useDHCP = true;
|
||||||
networking.firewall = {
|
networking.firewall = {
|
||||||
|
@ -57,15 +45,17 @@ in {
|
||||||
allowedUDPPorts = lib.mkIf config.services.unbound.enable [
|
allowedUDPPorts = lib.mkIf config.services.unbound.enable [
|
||||||
# dns
|
# dns
|
||||||
53 853
|
53 853
|
||||||
|
# quic
|
||||||
|
443
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
|
|
||||||
# UNBOUND
|
# UNBOUND
|
||||||
users.users.${config.common.mainUsername}.extraGroups = lib.mkIf config.services.unbound.enable [ config.services.unbound.group ];
|
users.users.${config.common.mainUsername}.extraGroups = lib.mkIf config.services.unbound.enable [ config.services.unbound.group ];
|
||||||
|
|
||||||
#networking.resolvconf.extraConfig = ''
|
networking.resolvconf.extraConfig = lib.mkIf config.services.unbound.enable ''
|
||||||
# name_servers="127.0.0.1 ::1"
|
name_servers="127.0.0.1 ::1"
|
||||||
#'';
|
'';
|
||||||
services.unbound = {
|
services.unbound = {
|
||||||
enable = false;
|
enable = false;
|
||||||
package = pkgs.unbound-with-systemd.override {
|
package = pkgs.unbound-with-systemd.override {
|
||||||
|
@ -78,64 +68,22 @@ in {
|
||||||
settings = {
|
settings = {
|
||||||
server = {
|
server = {
|
||||||
interface = [ "0.0.0.0" "::" ];
|
interface = [ "0.0.0.0" "::" ];
|
||||||
access-control = [ "${cfg.lanCidrV4} allow" "${cfg.lanCidrV6} allow" ];
|
access-control = [ "0.0.0.0/0 allow" "::/0 allow" ];
|
||||||
aggressive-nsec = true;
|
aggressive-nsec = true;
|
||||||
do-ip6 = true;
|
do-ip6 = true;
|
||||||
module-config = ''"validator python iterator"'';
|
|
||||||
local-zone = [
|
|
||||||
''"local." static''
|
|
||||||
] ++ (lib.optionals (cfg.localIpV4 != null || cfg.localIpV6 != null) [
|
|
||||||
''"${cfg.domainName}." typetransparent''
|
|
||||||
]);
|
|
||||||
local-data = builtins.concatLists (map (domain:
|
|
||||||
lib.optionals (cfg.localIpV4 != null) [
|
|
||||||
''"${domain}. A ${cfg.localIpV4}"''
|
|
||||||
] ++ (lib.optionals (cfg.localIpV6 != null) [
|
|
||||||
''"${domain}. AAAA ${cfg.localIpV6}"''
|
|
||||||
])) hosted-domains);
|
|
||||||
};
|
};
|
||||||
python.python-script = toString (pkgs.fetchurl {
|
|
||||||
url = "https://raw.githubusercontent.com/NLnetLabs/unbound/a912786ca9e72dc1ccde98d5af7d23595640043b/pythonmod/examples/avahi-resolver.py";
|
|
||||||
sha256 = "0r1iqjf08wrkpzvj6pql1jqa884hbbfy9ix5gxdrkrva09msiqgi";
|
|
||||||
});
|
|
||||||
remote-control.control-enable = true;
|
remote-control.control-enable = true;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
systemd.services.unbound = lib.mkIf config.services.unbound.enable {
|
|
||||||
environment = {
|
|
||||||
MDNS_ACCEPT_NAMES = "^.*\\.local\\.$";
|
|
||||||
PYTHONPATH = "${unbound-python}/${unbound-python.sitePackages}";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
# just in case
|
# just in case
|
||||||
networking.hosts."127.0.0.1" = [ "localhost" ] ++ hosted-domains;
|
networking.hosts."127.0.0.1" = hosted-domains;
|
||||||
|
networking.hosts."::1" = hosted-domains;
|
||||||
# CUPS
|
|
||||||
services.printing = {
|
|
||||||
enable = true;
|
|
||||||
allowFrom = [ cfg.lanCidrV4 cfg.lanCidrV6 ];
|
|
||||||
browsing = true;
|
|
||||||
clientConf = ''
|
|
||||||
ServerName ${cfg.domainName}
|
|
||||||
'';
|
|
||||||
defaultShared = true;
|
|
||||||
drivers = [ pkgs.hplip ];
|
|
||||||
startWhenNeeded = false;
|
|
||||||
};
|
|
||||||
|
|
||||||
services.postgresql.enable = true;
|
services.postgresql.enable = true;
|
||||||
services.postgresql.package = pkgs.postgresql_13;
|
services.postgresql.package = pkgs.postgresql_13;
|
||||||
|
|
||||||
# SSH
|
# SSH
|
||||||
services.openssh = {
|
services.openssh.enable = true;
|
||||||
enable = true;
|
|
||||||
# settings.PermitRootLogin = false;
|
|
||||||
/*listenAddresses = [{
|
|
||||||
addr = "0.0.0.0";
|
|
||||||
} {
|
|
||||||
addr = "::";
|
|
||||||
}];*/
|
|
||||||
};
|
|
||||||
services.fail2ban = {
|
services.fail2ban = {
|
||||||
enable = true;
|
enable = true;
|
||||||
ignoreIP = lib.optionals (cfg.lanCidrV4 != "0.0.0.0/0") [ cfg.lanCidrV4 ]
|
ignoreIP = lib.optionals (cfg.lanCidrV4 != "0.0.0.0/0") [ cfg.lanCidrV4 ]
|
||||||
|
@ -189,6 +137,7 @@ in {
|
||||||
services.uwsgi.instance.vassals.searx.pythonPackages = lib.mkForce (self: [ pkgs.searxng self.pytomlpp ]);
|
services.uwsgi.instance.vassals.searx.pythonPackages = lib.mkForce (self: [ pkgs.searxng self.pytomlpp ]);
|
||||||
|
|
||||||
services.nginx.virtualHosts."search.${cfg.domainName}" = let inherit (config.services.searx) settings; in {
|
services.nginx.virtualHosts."search.${cfg.domainName}" = let inherit (config.services.searx) settings; in {
|
||||||
|
quic = true;
|
||||||
enableACME = true;
|
enableACME = true;
|
||||||
forceSSL = true;
|
forceSSL = true;
|
||||||
# locations."/".proxyPass = "http://${lib.quoteListenAddr settings.server.bind_address}:${toString settings.server.port}";
|
# locations."/".proxyPass = "http://${lib.quoteListenAddr settings.server.bind_address}:${toString settings.server.port}";
|
||||||
|
@ -200,6 +149,9 @@ in {
|
||||||
|
|
||||||
# NGINX
|
# NGINX
|
||||||
services.nginx.enable = true;
|
services.nginx.enable = true;
|
||||||
|
services.nginx.enableReload = true;
|
||||||
|
services.nginx.package = pkgs.nginxQuic;
|
||||||
|
/* DNS over TLS
|
||||||
services.nginx.streamConfig =
|
services.nginx.streamConfig =
|
||||||
let
|
let
|
||||||
inherit (config.security.acme.certs."${cfg.domainName}") directory;
|
inherit (config.security.acme.certs."${cfg.domainName}") directory;
|
||||||
|
@ -215,16 +167,40 @@ in {
|
||||||
ssl_trusted_certificate ${directory}/chain.pem;
|
ssl_trusted_certificate ${directory}/chain.pem;
|
||||||
proxy_pass dns;
|
proxy_pass dns;
|
||||||
}
|
}
|
||||||
'';
|
'';*/
|
||||||
services.nginx.commonHttpConfig = "log_format postdata '{\"ip\":\"$remote_addr\",\"time\":\"$time_iso8601\",\"referer\":\"$http_referer\",\"body\":\"$request_body\",\"ua\":\"$http_user_agent\"}';";
|
services.nginx.commonHttpConfig =
|
||||||
services.nginx.recommendedTlsSettings = true;
|
let
|
||||||
services.nginx.recommendedOptimisation = true;
|
realIpsFromList = lib.strings.concatMapStringsSep "\n" (x: "set_real_ip_from ${x};");
|
||||||
|
fileToList = x: lib.strings.splitString "\n" (builtins.readFile x);
|
||||||
|
cfipv4 = fileToList (pkgs.fetchurl {
|
||||||
|
url = "https://www.cloudflare.com/ips-v4";
|
||||||
|
sha256 = "0ywy9sg7spafi3gm9q5wb59lbiq0swvf0q3iazl0maq1pj1nsb7h";
|
||||||
|
});
|
||||||
|
cfipv6 = fileToList (pkgs.fetchurl {
|
||||||
|
url = "https://www.cloudflare.com/ips-v6";
|
||||||
|
sha256 = "1ad09hijignj6zlqvdjxv7rjj8567z357zfavv201b9vx3ikk7cy";
|
||||||
|
});
|
||||||
|
in
|
||||||
|
''
|
||||||
|
log_format postdata '{\"ip\":\"$remote_addr\",\"time\":\"$time_iso8601\",\"referer\":\"$http_referer\",\"body\":\"$request_body\",\"ua\":\"$http_user_agent\"}';
|
||||||
|
|
||||||
|
${realIpsFromList cfipv4}
|
||||||
|
${realIpsFromList cfipv6}
|
||||||
|
real_ip_header CF-Connecting-IP;
|
||||||
|
'';
|
||||||
|
# brotli and zstd requires recompilation so I don't enable it
|
||||||
|
# services.nginx.recommendedBrotliSettings = true;
|
||||||
|
# services.nginx.recommendedZstdSettings = true;
|
||||||
services.nginx.recommendedGzipSettings = true;
|
services.nginx.recommendedGzipSettings = true;
|
||||||
|
services.nginx.recommendedOptimisation = true;
|
||||||
services.nginx.recommendedProxySettings = true;
|
services.nginx.recommendedProxySettings = true;
|
||||||
|
services.nginx.recommendedTlsSettings = true;
|
||||||
|
|
||||||
# BLOG
|
# BLOG
|
||||||
services.nginx.virtualHosts."${cfg.domainName}" = {
|
services.nginx.virtualHosts.${cfg.domainName} = {
|
||||||
|
quic = true;
|
||||||
enableACME = true;
|
enableACME = true;
|
||||||
|
serverAliases = [ "www.${cfg.domainName}" ];
|
||||||
forceSSL = true;
|
forceSSL = true;
|
||||||
extraConfig = "autoindex on;";
|
extraConfig = "autoindex on;";
|
||||||
locations."/".root = "/var/www/${cfg.domainName}/";
|
locations."/".root = "/var/www/${cfg.domainName}/";
|
||||||
|
@ -242,13 +218,9 @@ in {
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
services.nginx.virtualHosts."www.${cfg.domainName}" = {
|
|
||||||
enableACME = true;
|
|
||||||
globalRedirect = cfg.domainName;
|
|
||||||
};
|
|
||||||
|
|
||||||
# GITEA
|
# GITEA
|
||||||
services.nginx.virtualHosts."git.${cfg.domainName}" = let inherit (config.services.gitea) settings; in {
|
services.nginx.virtualHosts."git.${cfg.domainName}" = let inherit (config.services.gitea) settings; in {
|
||||||
|
quic = true;
|
||||||
enableACME = true;
|
enableACME = true;
|
||||||
forceSSL = true;
|
forceSSL = true;
|
||||||
locations."/".proxyPass = "http://${lib.quoteListenAddr settings.server.HTTP_ADDR}:${toString settings.server.HTTP_PORT}";
|
locations."/".proxyPass = "http://${lib.quoteListenAddr settings.server.HTTP_ADDR}:${toString settings.server.HTTP_PORT}";
|
||||||
|
@ -290,6 +262,7 @@ in {
|
||||||
|
|
||||||
# NEXTCLOUD
|
# NEXTCLOUD
|
||||||
services.nginx.virtualHosts."cloud.${cfg.domainName}" = {
|
services.nginx.virtualHosts."cloud.${cfg.domainName}" = {
|
||||||
|
quic = true;
|
||||||
enableACME = true;
|
enableACME = true;
|
||||||
forceSSL = true;
|
forceSSL = true;
|
||||||
};
|
};
|
||||||
|
@ -311,26 +284,62 @@ in {
|
||||||
https = true;
|
https = true;
|
||||||
};
|
};
|
||||||
|
|
||||||
services.pleroma = {
|
services.akkoma = {
|
||||||
enable = true;
|
enable = true;
|
||||||
secretConfigFile = "/var/lib/pleroma/secrets.exs";
|
config.":pleroma"."Pleroma.Web.Endpoint" = {
|
||||||
configs = [ ''
|
url = {
|
||||||
import Config
|
scheme = "https";
|
||||||
'' ];
|
host = "pleroma.${cfg.domainName}";
|
||||||
|
port = 443;
|
||||||
|
};
|
||||||
|
secret_key_base._secret = "/secrets/akkoma/secret_key_base";
|
||||||
|
signing_salt._secret = "/secrets/akkoma/signing_salt";
|
||||||
|
live_view.signing_salt._secret = "/secrets/akkoma/live_view_signing_salt";
|
||||||
|
};
|
||||||
|
extraStatic."static/terms-of-service.html" = pkgs.writeText "terms-of-service.html" ''
|
||||||
|
no bigotry kthx
|
||||||
|
'';
|
||||||
|
initDb = {
|
||||||
|
enable = false;
|
||||||
|
username = "pleroma";
|
||||||
|
password._secret = "/secrets/akkoma/postgres_password";
|
||||||
|
};
|
||||||
|
config.":pleroma".":instance" = {
|
||||||
|
name = cfg.domainName;
|
||||||
|
description = "Insert instance description here";
|
||||||
|
email = "webmaster-akkoma@${cfg.domainName}";
|
||||||
|
notify_email = "noreply@${cfg.domainName}";
|
||||||
|
limit = 5000;
|
||||||
|
registrations_open = true;
|
||||||
|
};
|
||||||
|
config.":pleroma"."Pleroma.Repo" = {
|
||||||
|
adapter = (pkgs.formats.elixirConf { }).lib.mkRaw "Ecto.Adapters.Postgres";
|
||||||
|
username = "pleroma";
|
||||||
|
password._secret = "/secrets/akkoma/postgres_password";
|
||||||
|
database = "pleroma";
|
||||||
|
hostname = "localhost";
|
||||||
|
};
|
||||||
|
config.":web_push_encryption".":vapid_details" = {
|
||||||
|
subject = "mailto:webmaster-akkoma@${cfg.domainName}";
|
||||||
|
public_key._secret = "/secrets/akkoma/push_public_key";
|
||||||
|
private_key._secret = "/secrets/akkoma/push_private_key";
|
||||||
|
};
|
||||||
|
config.":joken".":default_signer"._secret = "/secrets/akkoma/joken_signer";
|
||||||
|
nginx = {
|
||||||
|
serverAliases = [ "akkoma.${cfg.domainName}" ];
|
||||||
|
quic = true;
|
||||||
|
enableACME = true;
|
||||||
|
forceSSL = true;
|
||||||
|
};
|
||||||
};
|
};
|
||||||
systemd.services.pleroma.path = [ pkgs.exiftool pkgs.gawk ];
|
systemd.services.akkoma.path = [ pkgs.exiftool pkgs.gawk ];
|
||||||
systemd.services.pleroma.serviceConfig = {
|
systemd.services.akkoma.serviceConfig = {
|
||||||
Restart = "on-failure";
|
Restart = "on-failure";
|
||||||
};
|
};
|
||||||
systemd.services.pleroma.unitConfig = {
|
systemd.services.akkoma.unitConfig = {
|
||||||
StartLimitIntervalSec = 60;
|
StartLimitIntervalSec = 60;
|
||||||
StartLimitBurst = 3;
|
StartLimitBurst = 3;
|
||||||
};
|
};
|
||||||
services.nginx.virtualHosts."pleroma.${cfg.domainName}" = {
|
|
||||||
enableACME = true;
|
|
||||||
forceSSL = true;
|
|
||||||
locations."/".proxyPass = "http://127.0.0.1:9970";
|
|
||||||
};
|
|
||||||
|
|
||||||
/*locations."/dns-query".extraConfig = ''
|
/*locations."/dns-query".extraConfig = ''
|
||||||
grpc_pass grpc://127.0.0.1:53453;
|
grpc_pass grpc://127.0.0.1:53453;
|
||||||
|
|
|
@ -8,6 +8,13 @@ in {
|
||||||
impermanence.directories = [
|
impermanence.directories = [
|
||||||
{ directory = /var/lib/fdroid; user = "fdroid"; group = "fdroid"; mode = "0755"; }
|
{ directory = /var/lib/fdroid; user = "fdroid"; group = "fdroid"; mode = "0755"; }
|
||||||
];
|
];
|
||||||
|
services.nginx.virtualHosts."fdroid.${cfg.domainName}" = {
|
||||||
|
quic = true;
|
||||||
|
enableACME = true;
|
||||||
|
forceSSL = true;
|
||||||
|
globalRedirect = cfg.domainName;
|
||||||
|
locations."/repo/".alias = "/var/lib/fdroid/repo/";
|
||||||
|
};
|
||||||
services.nginx.virtualHosts."${cfg.domainName}" = {
|
services.nginx.virtualHosts."${cfg.domainName}" = {
|
||||||
locations."/fdroid/".alias = "/var/lib/fdroid/repo/";
|
locations."/fdroid/".alias = "/var/lib/fdroid/repo/";
|
||||||
locations."/fdroid/repo/".alias = "/var/lib/fdroid/repo/";
|
locations."/fdroid/repo/".alias = "/var/lib/fdroid/repo/";
|
||||||
|
|
302
system/hosts/nixserver/home.nix
Normal file
302
system/hosts/nixserver/home.nix
Normal file
|
@ -0,0 +1,302 @@
|
||||||
|
{ config
|
||||||
|
, lib
|
||||||
|
, pkgs
|
||||||
|
, ... }:
|
||||||
|
|
||||||
|
let
|
||||||
|
cfg = config.server;
|
||||||
|
synapseMetricsPort = 8009;
|
||||||
|
synapseMetricsAddr = "127.0.0.1";
|
||||||
|
collectListeners = names:
|
||||||
|
map
|
||||||
|
(x: "127.0.0.1:${toString x.port}")
|
||||||
|
(builtins.attrValues
|
||||||
|
(lib.filterAttrs (k: v: builtins.elem k names && v.enable) config.services.prometheus.exporters));
|
||||||
|
in {
|
||||||
|
# a bunch of services for personal use not intended for the public
|
||||||
|
services.grafana = {
|
||||||
|
enable = true;
|
||||||
|
settings = {
|
||||||
|
"auth.basic".enabled = false;
|
||||||
|
# nginx login is used so this is fine, hopefully
|
||||||
|
"auth.anonymous" = {
|
||||||
|
enabled = true;
|
||||||
|
# org_role = "Admin";
|
||||||
|
};
|
||||||
|
server.root_url = "https://home.${cfg.domainName}/grafana/";
|
||||||
|
server.domain = "home.${cfg.domainName}";
|
||||||
|
server.http_addr = "127.0.0.1";
|
||||||
|
server.protocol = "socket";
|
||||||
|
security.admin_user = "chayleaf";
|
||||||
|
security.admin_password = "$__file{/secrets/grafana_password_file}";
|
||||||
|
security.secret_key = "$__file{/secrets/grafana_key_file}";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
services.nginx.upstreams.grafana.servers."unix:/${config.services.grafana.settings.server.socket}" = {};
|
||||||
|
|
||||||
|
services.nginx.virtualHosts."home.${cfg.domainName}" = {
|
||||||
|
quic = true;
|
||||||
|
enableACME = true;
|
||||||
|
forceSSL = true;
|
||||||
|
basicAuthFile = "/secrets/home_password";
|
||||||
|
extraConfig = ''
|
||||||
|
satisfy any;
|
||||||
|
${lib.optionalString (cfg.lanCidrV4 != "0.0.0.0/0") "allow ${cfg.lanCidrV4};"}
|
||||||
|
${lib.optionalString (cfg.lanCidrV6 != "::/0") "allow ${cfg.lanCidrV6};"}
|
||||||
|
deny all;
|
||||||
|
'';
|
||||||
|
# locations."/.well-known/acme-challenge".extraConfig = "auth_basic off;";
|
||||||
|
locations."/".root = "/var/www/home.${cfg.domainName}/";
|
||||||
|
locations."/grafana/" = {
|
||||||
|
proxyPass = "http://grafana/";
|
||||||
|
proxyWebsockets = true;
|
||||||
|
};
|
||||||
|
locations."/grafana/public/".alias = "${config.services.grafana.settings.server.static_root_path}/";
|
||||||
|
};
|
||||||
|
services.nginx.virtualHosts."hydra.${cfg.domainName}" = {
|
||||||
|
quic = true;
|
||||||
|
enableACME = true;
|
||||||
|
forceSSL = true;
|
||||||
|
basicAuthFile = "/secrets/home_password";
|
||||||
|
extraConfig = ''
|
||||||
|
satisfy any;
|
||||||
|
${lib.optionalString (cfg.lanCidrV4 != "0.0.0.0/0") "allow ${cfg.lanCidrV4};"}
|
||||||
|
${lib.optionalString (cfg.lanCidrV6 != "::/0") "allow ${cfg.lanCidrV6};"}
|
||||||
|
deny all;
|
||||||
|
'';
|
||||||
|
locations."/".proxyPass = "http://${lib.quoteListenAddr config.services.hydra.listenHost}:${toString config.services.hydra.port}/";
|
||||||
|
locations."/static/".root = "${config.services.hydra.package}/libexec/hydra/root/";
|
||||||
|
};
|
||||||
|
users.users.nginx.extraGroups = [ "grafana" ];
|
||||||
|
|
||||||
|
services.nix-serve = {
|
||||||
|
enable = true;
|
||||||
|
package = pkgs.nix-serve-ng.override {
|
||||||
|
nix = config.nix.package;
|
||||||
|
};
|
||||||
|
bindAddress = "127.0.0.1";
|
||||||
|
secretKeyFile = "/secrets/cache-priv-key.pem";
|
||||||
|
};
|
||||||
|
nix.settings.allowed-users = [ "nix-serve" "hydra" ];
|
||||||
|
services.nginx.virtualHosts."binarycache.${cfg.domainName}" = {
|
||||||
|
quic = true;
|
||||||
|
enableACME = true;
|
||||||
|
addSSL = true;
|
||||||
|
basicAuthFile = "/secrets/home_password";
|
||||||
|
locations."/".proxyPass = "http://${config.services.nix-serve.bindAddress}:${toString config.services.nix-serve.port}";
|
||||||
|
};
|
||||||
|
|
||||||
|
services.hydra = {
|
||||||
|
enable = true;
|
||||||
|
package = pkgs.hydra_unstable.override {
|
||||||
|
nix = config.nix.package;
|
||||||
|
};
|
||||||
|
hydraURL = "home.${cfg.domainName}/hydra";
|
||||||
|
listenHost = "127.0.0.1";
|
||||||
|
minimumDiskFree = 30;
|
||||||
|
notificationSender = "noreply@${cfg.domainName}";
|
||||||
|
# smtpHost = "mail.${cfg.domainName}";
|
||||||
|
useSubstitutes = true;
|
||||||
|
};
|
||||||
|
systemd.services.nix-daemon = {
|
||||||
|
serviceConfig.CPUQuota = "50%";
|
||||||
|
};
|
||||||
|
|
||||||
|
services.nginx.statusPage = true;
|
||||||
|
services.gitea.settings.metrics.ENABLED = true;
|
||||||
|
services.akkoma.config.":prometheus"."Pleroma.Web.Endpoint.MetricsExporter" = {
|
||||||
|
enabled = true;
|
||||||
|
auth = [ ((pkgs.formats.elixirConf { }).lib.mkRaw ":basic") "prometheus" {
|
||||||
|
_secret = "/secrets/akkoma/prometheus_password";
|
||||||
|
} ];
|
||||||
|
ip_whitelist = ["127.0.0.1"];
|
||||||
|
path = "/api/pleroma/app_metrics";
|
||||||
|
format = (pkgs.formats.elixirConf { }).lib.mkRaw ":text";
|
||||||
|
};
|
||||||
|
services.prometheus = {
|
||||||
|
enable = true;
|
||||||
|
exporters = {
|
||||||
|
node = {
|
||||||
|
enable = true;
|
||||||
|
enabledCollectors = [ "logind" "systemd" ];
|
||||||
|
port = 9101; # cups is 9100
|
||||||
|
};
|
||||||
|
dovecot = {
|
||||||
|
enable = true;
|
||||||
|
scopes = [ "user" "global" ];
|
||||||
|
};
|
||||||
|
nextcloud = {
|
||||||
|
enable = true;
|
||||||
|
url = "https://cloud.${cfg.domainName}";
|
||||||
|
username = "nextcloud-exporter";
|
||||||
|
passwordFile = "/secrets/nextcloud_exporter_password";
|
||||||
|
};
|
||||||
|
nginx = { enable = true; };
|
||||||
|
nginxlog = {
|
||||||
|
enable = true;
|
||||||
|
group = "nginx";
|
||||||
|
settings.namespaces = [
|
||||||
|
{
|
||||||
|
name = "comments";
|
||||||
|
format = "{\"ip\":\"$remote_addr\",\"time\":\"$time_iso8601\",\"referer\":\"$http_referer\",\"body\":\"$request_body\",\"ua\":\"$http_user_agent\"}";
|
||||||
|
source.files = [ "/var/log/nginx/comments.log" ];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
postfix = { enable = true; };
|
||||||
|
postgres = { enable = true; };
|
||||||
|
process.enable = true;
|
||||||
|
redis.enable = true;
|
||||||
|
rspamd.enable = true;
|
||||||
|
smartctl.enable = true;
|
||||||
|
};
|
||||||
|
checkConfig = "syntax-only";
|
||||||
|
scrapeConfigs = [
|
||||||
|
{
|
||||||
|
job_name = "local_frequent";
|
||||||
|
scrape_interval = "1m";
|
||||||
|
static_configs = [ {
|
||||||
|
targets = collectListeners [
|
||||||
|
"node"
|
||||||
|
"nginx"
|
||||||
|
"process"
|
||||||
|
];
|
||||||
|
labels.machine = "server";
|
||||||
|
} ];
|
||||||
|
}
|
||||||
|
{
|
||||||
|
job_name = "local_medium_freq";
|
||||||
|
scrape_interval = "15m";
|
||||||
|
static_configs = [ {
|
||||||
|
targets = [ "127.0.0.1:9548" ];
|
||||||
|
labels.machine = "server";
|
||||||
|
} ];
|
||||||
|
}
|
||||||
|
{
|
||||||
|
job_name = "local_infrequent";
|
||||||
|
scrape_interval = "1h";
|
||||||
|
static_configs = [ {
|
||||||
|
targets = collectListeners [
|
||||||
|
"dovecot"
|
||||||
|
"nextcloud"
|
||||||
|
"nginxlog"
|
||||||
|
"postfix"
|
||||||
|
"postgres"
|
||||||
|
"redis"
|
||||||
|
"rspamd"
|
||||||
|
"smartctl"
|
||||||
|
];
|
||||||
|
labels.machine = "server";
|
||||||
|
} ];
|
||||||
|
}
|
||||||
|
{
|
||||||
|
job_name = "gitea";
|
||||||
|
bearer_token_file = "/secrets/prometheus_bearer";
|
||||||
|
scrape_interval = "1h";
|
||||||
|
static_configs = [ {
|
||||||
|
targets = [ "git.${cfg.domainName}" ];
|
||||||
|
labels.machine = "server";
|
||||||
|
} ];
|
||||||
|
}
|
||||||
|
{
|
||||||
|
job_name = "router_frequent";
|
||||||
|
scrape_interval = "1m";
|
||||||
|
static_configs = [ {
|
||||||
|
targets = [
|
||||||
|
"retracker.local:9101"
|
||||||
|
"retracker.local:9256"
|
||||||
|
"retracker.local:9167"
|
||||||
|
];
|
||||||
|
labels.machine = "router";
|
||||||
|
} ];
|
||||||
|
}
|
||||||
|
{
|
||||||
|
job_name = "router_infrequent";
|
||||||
|
scrape_interval = "10m";
|
||||||
|
static_configs = [ {
|
||||||
|
targets = [
|
||||||
|
"retracker.local:9430"
|
||||||
|
"retracker.local:9547"
|
||||||
|
];
|
||||||
|
labels.machine = "router";
|
||||||
|
} ];
|
||||||
|
}
|
||||||
|
{
|
||||||
|
job_name = "synapse";
|
||||||
|
metrics_path = "/_synapse/metrics";
|
||||||
|
scrape_interval = "15s";
|
||||||
|
static_configs = [ {
|
||||||
|
targets = [ "${lib.quoteListenAddr synapseMetricsAddr}:${toString synapseMetricsPort}" ];
|
||||||
|
labels.machine = "server";
|
||||||
|
} ];
|
||||||
|
}
|
||||||
|
{
|
||||||
|
job_name = "akkoma";
|
||||||
|
metrics_path = "/api/pleroma/app_metrics";
|
||||||
|
scrape_interval = "10m";
|
||||||
|
basic_auth.username = "prometheus";
|
||||||
|
basic_auth.password_file = "/secrets/akkoma/prometheus_password";
|
||||||
|
static_configs = [ {
|
||||||
|
targets = [ "pleroma.${cfg.domainName}" ];
|
||||||
|
labels.machine = "server";
|
||||||
|
} ];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
services.matrix-synapse.settings = {
|
||||||
|
enable_metrics = true;
|
||||||
|
federation_metrics_domains = [ "matrix.org" ];
|
||||||
|
/*
|
||||||
|
normally you're supposed to use
|
||||||
|
- port: 9000
|
||||||
|
type: metrics
|
||||||
|
bind_addresses: ['::1', '127.0.0.1']
|
||||||
|
|
||||||
|
but the NixOS module doesn't allow creating such a listener
|
||||||
|
*/
|
||||||
|
listeners = [ {
|
||||||
|
port = synapseMetricsPort;
|
||||||
|
bind_addresses = [ synapseMetricsAddr ];
|
||||||
|
type = "metrics";
|
||||||
|
tls = false;
|
||||||
|
resources = [ ];
|
||||||
|
} ];
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
# this uses elasticsearch, rip
|
||||||
|
services.parsedmarc = {
|
||||||
|
enable = true;
|
||||||
|
provision = {
|
||||||
|
localMail = {
|
||||||
|
enable = true;
|
||||||
|
hostname = cfg.domainName;
|
||||||
|
};
|
||||||
|
grafana = {
|
||||||
|
datasource = true;
|
||||||
|
dashboard = true;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};*/
|
||||||
|
|
||||||
|
networking.firewall.allowedTCPPorts = [ 631 9100 ];
|
||||||
|
services.printing = {
|
||||||
|
enable = true;
|
||||||
|
allowFrom = [ cfg.lanCidrV4 cfg.lanCidrV6 ];
|
||||||
|
browsing = true;
|
||||||
|
clientConf = ''
|
||||||
|
ServerName home.${cfg.domainName}
|
||||||
|
'';
|
||||||
|
listenAddresses = [ "*:631" "*:9100" ];
|
||||||
|
defaultShared = true;
|
||||||
|
drivers = [ pkgs.hplip ];
|
||||||
|
startWhenNeeded = false;
|
||||||
|
};
|
||||||
|
services.avahi = {
|
||||||
|
enable = true;
|
||||||
|
hostName = "home";
|
||||||
|
publish.enable = true;
|
||||||
|
publish.addresses = true;
|
||||||
|
publish.userServices = true;
|
||||||
|
};
|
||||||
|
}
|
|
@ -13,7 +13,9 @@ in {
|
||||||
# roundcube
|
# roundcube
|
||||||
# TODO: fix sending mail via roundcube
|
# TODO: fix sending mail via roundcube
|
||||||
services.nginx.virtualHosts."mail.${cfg.domainName}" = {
|
services.nginx.virtualHosts."mail.${cfg.domainName}" = {
|
||||||
|
quic = true;
|
||||||
enableACME = true;
|
enableACME = true;
|
||||||
|
forceSSL = true;
|
||||||
};
|
};
|
||||||
services.roundcube = {
|
services.roundcube = {
|
||||||
enable = true;
|
enable = true;
|
||||||
|
|
|
@ -34,6 +34,7 @@ in {
|
||||||
};
|
};
|
||||||
|
|
||||||
services.nginx.virtualHosts."matrix.${cfg.domainName}" = {
|
services.nginx.virtualHosts."matrix.${cfg.domainName}" = {
|
||||||
|
quic = true;
|
||||||
enableACME = true;
|
enableACME = true;
|
||||||
forceSSL = true;
|
forceSSL = true;
|
||||||
locations = {
|
locations = {
|
||||||
|
@ -59,8 +60,8 @@ in {
|
||||||
];
|
];
|
||||||
allow_guest_access = true;
|
allow_guest_access = true;
|
||||||
url_preview_enabled = true;
|
url_preview_enabled = true;
|
||||||
tls_certificate_path = config.security.acme.certs."matrix.${cfg.domainName}".directory + "/fullchain.pem";
|
# tls_certificate_path = config.security.acme.certs."matrix.${cfg.domainName}".directory + "/fullchain.pem";
|
||||||
tls_private_key_path = config.security.acme.certs."matrix.${cfg.domainName}".directory + "/key.pem";
|
# tls_private_key_path = config.security.acme.certs."matrix.${cfg.domainName}".directory + "/key.pem";
|
||||||
public_baseurl = "https://matrix.${cfg.domainName}/";
|
public_baseurl = "https://matrix.${cfg.domainName}/";
|
||||||
server_name = "matrix.${cfg.domainName}";
|
server_name = "matrix.${cfg.domainName}";
|
||||||
max_upload_size = "100M";
|
max_upload_size = "100M";
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
{ config
|
{ config
|
||||||
, pkgs
|
|
||||||
, lib
|
, lib
|
||||||
, ... }:
|
, ... }:
|
||||||
|
|
||||||
|
@ -30,6 +29,7 @@ in {
|
||||||
|
|
||||||
# Mumble music bot
|
# Mumble music bot
|
||||||
services.nginx.virtualHosts."mumble.${cfg.domainName}" = let inherit (config.services.botamusique) settings; in {
|
services.nginx.virtualHosts."mumble.${cfg.domainName}" = let inherit (config.services.botamusique) settings; in {
|
||||||
|
quic = true;
|
||||||
enableACME = true;
|
enableACME = true;
|
||||||
forceSSL = true;
|
forceSSL = true;
|
||||||
globalRedirect = cfg.domainName;
|
globalRedirect = cfg.domainName;
|
||||||
|
@ -39,20 +39,6 @@ in {
|
||||||
|
|
||||||
services.botamusique = {
|
services.botamusique = {
|
||||||
enable = true;
|
enable = true;
|
||||||
# TODO: remove after next nixpkgs version bump
|
|
||||||
package = pkgs.botamusique.override {
|
|
||||||
python3Packages = pkgs.python3Packages // {
|
|
||||||
pymumble = pkgs.python3Packages.pymumble.overrideAttrs (old: rec {
|
|
||||||
version = "1.6.1";
|
|
||||||
src = pkgs.fetchFromGitHub {
|
|
||||||
owner = "azlux";
|
|
||||||
repo = "pymumble";
|
|
||||||
rev = "refs/tags/${version}";
|
|
||||||
hash = "sha256-+sT5pqdm4A2rrUcUUmvsH+iazg80+/go0zM1vr9oeuE=";
|
|
||||||
};
|
|
||||||
});
|
|
||||||
};
|
|
||||||
};
|
|
||||||
settings = {
|
settings = {
|
||||||
youtube_dl = {
|
youtube_dl = {
|
||||||
cookiefile = "/var/lib/private/botamusique/cookie_ydl";
|
cookiefile = "/var/lib/private/botamusique/cookie_ydl";
|
||||||
|
|
|
@ -200,6 +200,7 @@ PROTO_UNSPEC = -1
|
||||||
NFT_QUERIES = {}
|
NFT_QUERIES = {}
|
||||||
# dynamic query update token
|
# dynamic query update token
|
||||||
NFT_TOKEN = ""
|
NFT_TOKEN = ""
|
||||||
|
DOMAIN_NAME_OVERRIDES = {}
|
||||||
|
|
||||||
sysbus = None
|
sysbus = None
|
||||||
avahi = None
|
avahi = None
|
||||||
|
@ -263,14 +264,23 @@ class RecordBrowser:
|
||||||
self.records = []
|
self.records = []
|
||||||
self.error = None
|
self.error = None
|
||||||
self.getone = getone
|
self.getone = getone
|
||||||
|
name1 = DOMAIN_NAME_OVERRIDES.get(name, name)
|
||||||
|
if name1 != name:
|
||||||
|
self.overrides = {
|
||||||
|
name1: name,
|
||||||
|
}
|
||||||
|
if name.endswith('.') and name1.endswith('.'):
|
||||||
|
self.overrides[name1[:-1]] = name[:-1]
|
||||||
|
else:
|
||||||
|
self.overrides = { }
|
||||||
|
|
||||||
self.timer = None if timeout is None else GLib.timeout_add(timeout, self.timedOut)
|
self.timer = None if timeout is None else GLib.timeout_add(timeout, self.timedOut)
|
||||||
|
|
||||||
self.browser_path = avahi.RecordBrowserNew(IF_UNSPEC, PROTO_UNSPEC, name, dns.rdataclass.IN, type_, 0)
|
self.browser_path = avahi.RecordBrowserNew(IF_UNSPEC, PROTO_UNSPEC, name1, dns.rdataclass.IN, type_, 0)
|
||||||
trampoline[self.browser_path] = self
|
trampoline[self.browser_path] = self
|
||||||
self.browser = sysbus.get('.Avahi', self.browser_path)
|
self.browser = sysbus.get('.Avahi', self.browser_path)
|
||||||
self.dbg('Created RecordBrowser(name=%s, type=%s, getone=%s, timeout=%s)'
|
self.dbg('Created RecordBrowser(name=%s, type=%s, getone=%s, timeout=%s)'
|
||||||
% (name, dns.rdatatype.to_text(type_), getone, timeout))
|
% (name1, dns.rdatatype.to_text(type_), getone, timeout))
|
||||||
|
|
||||||
def dbg(self, msg):
|
def dbg(self, msg):
|
||||||
dbg('[%s] %s' % (self.browser_path, msg))
|
dbg('[%s] %s' % (self.browser_path, msg))
|
||||||
|
@ -288,13 +298,13 @@ class RecordBrowser:
|
||||||
|
|
||||||
def itemNew(self, interface, protocol, name, class_, type_, rdata, flags):
|
def itemNew(self, interface, protocol, name, class_, type_, rdata, flags):
|
||||||
self.dbg('Got signal ItemNew')
|
self.dbg('Got signal ItemNew')
|
||||||
self.records.append((name, class_, type_, rdata))
|
self.records.append((self.overrides.get(name, name), class_, type_, rdata))
|
||||||
if self.getone:
|
if self.getone:
|
||||||
self._done()
|
self._done()
|
||||||
|
|
||||||
def itemRemove(self, interface, protocol, name, class_, type_, rdata, flags):
|
def itemRemove(self, interface, protocol, name, class_, type_, rdata, flags):
|
||||||
self.dbg('Got signal ItemRemove')
|
self.dbg('Got signal ItemRemove')
|
||||||
self.records.remove((name, class_, type_, rdata))
|
self.records.remove((self.overrides.get(name, name), class_, type_, rdata))
|
||||||
|
|
||||||
def failure(self, error):
|
def failure(self, error):
|
||||||
self.dbg('Got signal Failure')
|
self.dbg('Got signal Failure')
|
||||||
|
@ -490,7 +500,14 @@ def init(*args, **kwargs):
|
||||||
global MDNS_TTL, MDNS_GETONE, MDNS_TIMEOUT
|
global MDNS_TTL, MDNS_GETONE, MDNS_TIMEOUT
|
||||||
global MDNS_REJECT_TYPES, MDNS_ACCEPT_TYPES
|
global MDNS_REJECT_TYPES, MDNS_ACCEPT_TYPES
|
||||||
global MDNS_REJECT_NAMES, MDNS_ACCEPT_NAMES
|
global MDNS_REJECT_NAMES, MDNS_ACCEPT_NAMES
|
||||||
global NFT_QUERIES, NFT_TOKEN
|
global NFT_QUERIES, NFT_TOKEN, DOMAIN_NAME_OVERRIDES
|
||||||
|
|
||||||
|
domain_name_overrides = os.environ.get('DOMAIN_NAME_OVERRIDES', '')
|
||||||
|
if domain_name_overrides:
|
||||||
|
for kv in domain_name_overrides.split(';'):
|
||||||
|
k, v = kv.split('->')
|
||||||
|
DOMAIN_NAME_OVERRIDES[k] = v
|
||||||
|
DOMAIN_NAME_OVERRIDES[k + '.'] = v + '.'
|
||||||
|
|
||||||
NFT_TOKEN = os.environ.get('NFT_TOKEN', '')
|
NFT_TOKEN = os.environ.get('NFT_TOKEN', '')
|
||||||
nft_queries = os.environ.get('NFT_QUERIES', '')
|
nft_queries = os.environ.get('NFT_QUERIES', '')
|
||||||
|
@ -829,8 +846,11 @@ def operate(id, event, qstate, qdata):
|
||||||
if not m.set_return_msg(qstate):
|
if not m.set_return_msg(qstate):
|
||||||
raise Exception("Error in set_return_msg")
|
raise Exception("Error in set_return_msg")
|
||||||
|
|
||||||
if not storeQueryInCache(qstate, qstate.return_msg.qinfo, qstate.return_msg.rep, 0):
|
# For some reason this breaks everything! Unbound responds with SERVFAIL instead of using the cache
|
||||||
raise Exception("Error in storeQueryInCache")
|
# i.e. the first response is fine, but loading it from cache just doesn't work
|
||||||
|
# Resolution via Avahi works fast anyway so whatever
|
||||||
|
#if not storeQueryInCache(qstate, qstate.return_msg.qinfo, qstate.return_msg.rep, 0):
|
||||||
|
# raise Exception("Error in storeQueryInCache")
|
||||||
|
|
||||||
qstate.return_msg.rep.security = 2
|
qstate.return_msg.rep.security = 2
|
||||||
qstate.return_rcode = RCODE_NOERROR
|
qstate.return_rcode = RCODE_NOERROR
|
||||||
|
|
|
@ -91,9 +91,7 @@ let
|
||||||
# log if they are meant for us...
|
# log if they are meant for us...
|
||||||
[(is.eq ip.saddr selfIp4) (is.eq (fib (f: with f; [ daddr iif ]) (f: f.type)) (f: f.local)) (log "${logPrefix}self4 ") drop]
|
[(is.eq ip.saddr selfIp4) (is.eq (fib (f: with f; [ daddr iif ]) (f: f.type)) (f: f.local)) (log "${logPrefix}self4 ") drop]
|
||||||
[(is.eq ip6.saddr selfIp6) (is.eq (fib (f: with f; [ daddr iif ]) (f: f.type)) (f: f.local)) (log "${logPrefix}self6 ") drop]
|
[(is.eq ip6.saddr selfIp6) (is.eq (fib (f: with f; [ daddr iif ]) (f: f.type)) (f: f.local)) (log "${logPrefix}self6 ") drop]
|
||||||
# ...but drop silently if they're multicast/broadcast
|
# ...but ignore if they're multicast/broadcast
|
||||||
[(is.eq ip.saddr selfIp4) drop]
|
|
||||||
[(is.eq ip6.saddr selfIp6) drop]
|
|
||||||
[return];
|
[return];
|
||||||
|
|
||||||
ingress_lan_common = add chain
|
ingress_lan_common = add chain
|
||||||
|
@ -106,8 +104,17 @@ let
|
||||||
netdevIngressWanRules
|
netdevIngressWanRules
|
||||||
[(jump "ingress_common")]
|
[(jump "ingress_common")]
|
||||||
# [(is.ne (fib (f: with f; [ daddr iif ]) (f: f.type)) (f: set [ f.local f.broadcast f.multicast ])) (log "${logPrefix}non-{local,broadcast,multicast} ") drop]
|
# [(is.ne (fib (f: with f; [ daddr iif ]) (f: f.type)) (f: set [ f.local f.broadcast f.multicast ])) (log "${logPrefix}non-{local,broadcast,multicast} ") drop]
|
||||||
[(is.eq ip.protocol (f: f.icmp)) (limit { rate = 100; per = f: f.second; }) accept]
|
# separate limits for echo-request and all other icmp types
|
||||||
[(is.eq ip6.nexthdr (f: f.ipv6-icmp)) (limit { rate = 100; per = f: f.second; }) accept]
|
[(is.eq ip.protocol (f: f.icmp)) (is.eq icmp.type (f: f.echo-request)) (limit { rate = 50; per = f: f.second; }) accept]
|
||||||
|
[(is.eq ip.protocol (f: f.icmp)) (is.ne icmp.type (f: f.echo-request)) (limit { rate = 100; per = f: f.second; }) accept]
|
||||||
|
[(is.eq ip6.nexthdr (f: f.ipv6-icmp)) (is.eq icmpv6.type (f: f.echo-request)) (limit { rate = 50; per = f: f.second; }) accept]
|
||||||
|
[(is.eq ip6.nexthdr (f: f.ipv6-icmp)) (is.ne icmpv6.type (f: f.echo-request)) (limit { rate = 100; per = f: f.second; }) accept]
|
||||||
|
# always accept destination unreachable and time-exceeded
|
||||||
|
[(is.eq ip.protocol (f: f.icmp)) (is.eq icmp.type (f: with f; set [ destination-unreachable time-exceeded ])) accept]
|
||||||
|
[(is.eq ip6.nexthdr (f: f.ipv6-icmp)) (is.eq icmpv6.type (f: with f; set [ destination-unreachable time-exceeded ])) accept]
|
||||||
|
# don't log echo-request drops
|
||||||
|
[(is.eq ip.protocol (f: f.icmp)) (is.eq icmp.type (f: f.echo-request)) drop]
|
||||||
|
[(is.eq ip6.nexthdr (f: f.ipv6-icmp)) (is.eq icmpv6.type (f: f.echo-request)) drop]
|
||||||
[(is.eq ip.protocol (f: f.icmp)) (log "${logPrefix}icmp flood ") drop]
|
[(is.eq ip.protocol (f: f.icmp)) (log "${logPrefix}icmp flood ") drop]
|
||||||
[(is.eq ip6.nexthdr (f: f.ipv6-icmp)) (log "${logPrefix}icmp6 flood ") drop];
|
[(is.eq ip6.nexthdr (f: f.ipv6-icmp)) (log "${logPrefix}icmp6 flood ") drop];
|
||||||
}
|
}
|
||||||
|
@ -128,7 +135,7 @@ let
|
||||||
[(is ct.status (f: f.dnat)) accept]
|
[(is ct.status (f: f.dnat)) accept]
|
||||||
[(is.eq (bit.and tcp.flags (f: f.syn)) 0) (is.eq ct.state (f: f.new)) (log "${logPrefix}new non-syn ") drop]
|
[(is.eq (bit.and tcp.flags (f: f.syn)) 0) (is.eq ct.state (f: f.new)) (log "${logPrefix}new non-syn ") drop]
|
||||||
# icmp: only accept ping requests
|
# icmp: only accept ping requests
|
||||||
[(is.eq ip.protocol (f: f.icmp)) (is.eq icmp.type (f: with f; set [ echo-request ])) accept]
|
[(is.eq ip.protocol (f: f.icmp)) (is.eq icmp.type (f: f.echo-request)) accept]
|
||||||
# icmpv6: accept no-route info from link-local addresses
|
# icmpv6: accept no-route info from link-local addresses
|
||||||
[(is.eq ip6.nexthdr (f: f.ipv6-icmp)) (is.eq ip6.saddr (cidr "fe80::/10")) (is.eq icmpv6.code (f: f.no-route))
|
[(is.eq ip6.nexthdr (f: f.ipv6-icmp)) (is.eq ip6.saddr (cidr "fe80::/10")) (is.eq icmpv6.code (f: f.no-route))
|
||||||
(is.eq icmpv6.type (f: with f; set [ mld-listener-query mld-listener-report mld-listener-done mld2-listener-report ]))
|
(is.eq icmpv6.type (f: with f; set [ mld-listener-query mld-listener-report mld-listener-done mld2-listener-report ]))
|
||||||
|
@ -211,9 +218,14 @@ let
|
||||||
vacuumAddress4 = addToIp parsedGatewayAddr4 2;
|
vacuumAddress4 = addToIp parsedGatewayAddr4 2;
|
||||||
vacuumAddress6 = addToIp parsedGatewayAddr6 2;
|
vacuumAddress6 = addToIp parsedGatewayAddr6 2;
|
||||||
|
|
||||||
hosted-domains = builtins.attrNames server-config.services.nginx.virtualHosts;
|
hosted-domains =
|
||||||
|
builtins.concatLists
|
||||||
|
(builtins.attrValues
|
||||||
|
(builtins.mapAttrs
|
||||||
|
(k: v: [ k ] ++ v.serverAliases)
|
||||||
|
server-config.services.nginx.virtualHosts));
|
||||||
in {
|
in {
|
||||||
imports = [ ./options.nix ];
|
imports = [ ./options.nix ./metrics.nix ];
|
||||||
system.stateVersion = "22.11";
|
system.stateVersion = "22.11";
|
||||||
|
|
||||||
boot.kernel.sysctl = {
|
boot.kernel.sysctl = {
|
||||||
|
@ -245,15 +257,19 @@ in {
|
||||||
|
|
||||||
# dnat to server, take ports from its firewall config
|
# dnat to server, take ports from its firewall config
|
||||||
router-settings.dnatRules = let
|
router-settings.dnatRules = let
|
||||||
|
bannedPorts = [
|
||||||
|
631 9100 # printing
|
||||||
|
5353 # avahi
|
||||||
|
];
|
||||||
inherit (server-config.networking.firewall) allowedTCPPorts allowedTCPPortRanges allowedUDPPorts allowedUDPPortRanges;
|
inherit (server-config.networking.firewall) allowedTCPPorts allowedTCPPortRanges allowedUDPPorts allowedUDPPortRanges;
|
||||||
|
|
||||||
tcpAndUdp = builtins.filter (x: builtins.elem x allowedTCPPorts) allowedUDPPorts;
|
tcpAndUdp = builtins.filter (x: !builtins.elem x bannedPorts && builtins.elem x allowedTCPPorts) allowedUDPPorts;
|
||||||
tcpOnly = builtins.filter (x: !(builtins.elem x allowedUDPPorts)) allowedTCPPorts;
|
tcpOnly = builtins.filter (x: !builtins.elem x (bannedPorts ++ allowedUDPPorts)) allowedTCPPorts;
|
||||||
udpOnly = builtins.filter (x: !(builtins.elem x allowedTCPPorts)) allowedUDPPorts;
|
udpOnly = builtins.filter (x: !builtins.elem x (bannedPorts ++ allowedTCPPorts)) allowedUDPPorts;
|
||||||
|
|
||||||
rangesTcpAndUdp = builtins.filter (x: builtins.elem x allowedTCPPortRanges) allowedUDPPortRanges;
|
rangesTcpAndUdp = builtins.filter (x: builtins.elem x allowedTCPPortRanges) allowedUDPPortRanges;
|
||||||
rangesTcpOnly = builtins.filter (x: !(builtins.elem x allowedUDPPortRanges)) allowedTCPPortRanges;
|
rangesTcpOnly = builtins.filter (x: !builtins.elem x allowedUDPPortRanges) allowedTCPPortRanges;
|
||||||
rangesUdpOnly = builtins.filter (x: !(builtins.elem x allowedTCPPortRanges)) allowedUDPPortRanges;
|
rangesUdpOnly = builtins.filter (x: !builtins.elem x allowedTCPPortRanges) allowedUDPPortRanges;
|
||||||
in lib.optional (tcpAndUdp != [ ]) {
|
in lib.optional (tcpAndUdp != [ ]) {
|
||||||
port = notnft.dsl.set tcpAndUdp; tcp = true; udp = true;
|
port = notnft.dsl.set tcpAndUdp; tcp = true; udp = true;
|
||||||
target4.address = serverAddress4; target6.address = serverAddress6;
|
target4.address = serverAddress4; target6.address = serverAddress6;
|
||||||
|
@ -394,7 +410,7 @@ in {
|
||||||
{ extraArgs = [ netCidrs.lan6 "dev" "br0" "proto" "kernel" "metric" "256" "pref" "medium" "table" wan_table ]; }
|
{ extraArgs = [ netCidrs.lan6 "dev" "br0" "proto" "kernel" "metric" "256" "pref" "medium" "table" wan_table ]; }
|
||||||
];
|
];
|
||||||
ipv4.kea.enable = true;
|
ipv4.kea.enable = true;
|
||||||
ipv6.radvd.enable = true;
|
ipv6.corerad.enable = true;
|
||||||
ipv6.kea.enable = true;
|
ipv6.kea.enable = true;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -487,6 +503,12 @@ in {
|
||||||
[(is.eq meta.iifname "br0") (mangle meta.mark vpn_table)]
|
[(is.eq meta.iifname "br0") (mangle meta.mark vpn_table)]
|
||||||
[(is.eq ip.daddr "@force_unvpn4") (mangle meta.mark wan_table)]
|
[(is.eq ip.daddr "@force_unvpn4") (mangle meta.mark wan_table)]
|
||||||
[(is.eq ip6.daddr "@force_unvpn6") (mangle meta.mark wan_table)]
|
[(is.eq ip6.daddr "@force_unvpn6") (mangle meta.mark wan_table)]
|
||||||
|
# don't vpn smtp requests so spf works fine (and in case the vpn blocks requests over port 25)
|
||||||
|
[(is.eq ip.saddr serverAddress4) (is.eq ip.protocol (f: f.tcp)) (is.eq tcp.dport 25) (mangle meta.mark wan_table)]
|
||||||
|
[(is.eq ip6.saddr serverAddress6) (is.eq ip6.nexthdr (f: f.tcp)) (is.eq tcp.dport 25) (mangle meta.mark wan_table)]
|
||||||
|
# but block requests to port 25 from other hosts so they can't send mail pretending to originate from my domain
|
||||||
|
[(is.ne ip.saddr serverAddress4) (is.eq ip.protocol (f: f.tcp)) (is.eq tcp.dport 25) drop]
|
||||||
|
[(is.ne ip6.saddr serverAddress6) (is.eq ip6.nexthdr (f: f.tcp)) (is.eq tcp.dport 25) drop]
|
||||||
[(is.eq ip.daddr "@force_vpn4") (mangle meta.mark vpn_table)]
|
[(is.eq ip.daddr "@force_vpn4") (mangle meta.mark vpn_table)]
|
||||||
[(is.eq ip6.daddr "@force_vpn6") (mangle meta.mark vpn_table)]
|
[(is.eq ip6.daddr "@force_vpn6") (mangle meta.mark vpn_table)]
|
||||||
] ++ # 1. dnat non-vpn: change rttable to wan
|
] ++ # 1. dnat non-vpn: change rttable to wan
|
||||||
|
@ -523,9 +545,9 @@ in {
|
||||||
[(is.eq ip.daddr "@block4") drop]
|
[(is.eq ip.daddr "@block4") drop]
|
||||||
[(is.eq ip6.daddr "@block6") drop]
|
[(is.eq ip6.daddr "@block6") drop]
|
||||||
# this doesn't work... it still gets routed, even though iot_table doesn't have a default route
|
# this doesn't work... it still gets routed, even though iot_table doesn't have a default route
|
||||||
# instead of debugging that, simply change the approach
|
|
||||||
# [(is.eq ip.saddr vacuumAddress4) (is.ne ip.daddr) (mangle meta.mark iot_table)]
|
# [(is.eq ip.saddr vacuumAddress4) (is.ne ip.daddr) (mangle meta.mark iot_table)]
|
||||||
# [(is.eq ether.saddr cfg.vacuumMac) (mangle meta.mark iot_table)]
|
# [(is.eq ether.saddr cfg.vacuumMac) (mangle meta.mark iot_table)]
|
||||||
|
# instead of debugging that, simply change the approach
|
||||||
[(is.eq ether.saddr cfg.vacuumMac) (is.ne ip.daddr (cidr netCidrs.lan4)) (is.ne ip.daddr "@allow_iot4") (log "iot4 ") drop]
|
[(is.eq ether.saddr cfg.vacuumMac) (is.ne ip.daddr (cidr netCidrs.lan4)) (is.ne ip.daddr "@allow_iot4") (log "iot4 ") drop]
|
||||||
[(is.eq ether.saddr cfg.vacuumMac) (is.ne ip6.daddr (cidr netCidrs.lan6)) (is.ne ip6.daddr "@allow_iot6") (log "iot6 ") drop]
|
[(is.eq ether.saddr cfg.vacuumMac) (is.ne ip6.daddr (cidr netCidrs.lan6)) (is.ne ip6.daddr "@allow_iot6") (log "iot6 ") drop]
|
||||||
[(mangle ct.mark meta.mark)]
|
[(mangle ct.mark meta.mark)]
|
||||||
|
@ -659,14 +681,15 @@ in {
|
||||||
# we override resolvconf above manually
|
# we override resolvconf above manually
|
||||||
resolveLocalQueries = false;
|
resolveLocalQueries = false;
|
||||||
settings = {
|
settings = {
|
||||||
server = {
|
server = rec {
|
||||||
interface = [ netAddresses.netns4 netAddresses.netns6 netAddresses.lan4 netAddresses.lan6 ];
|
interface = [ netAddresses.netns4 netAddresses.netns6 netAddresses.lan4 netAddresses.lan6 ];
|
||||||
access-control = [ "${netCidrs.netns4} allow" "${netCidrs.netns6} allow" "${netCidrs.lan4} allow" "${netCidrs.lan6} allow" ];
|
access-control = [ "${netCidrs.netns4} allow" "${netCidrs.netns6} allow" "${netCidrs.lan4} allow" "${netCidrs.lan6} allow" ];
|
||||||
aggressive-nsec = true;
|
aggressive-nsec = true;
|
||||||
do-ip6 = true;
|
do-ip6 = true;
|
||||||
module-config = ''"validator python iterator"'';
|
module-config = ''"validator python iterator"'';
|
||||||
local-zone = [
|
local-zone = [
|
||||||
''"local." static''
|
# incompatible with avahi resolver
|
||||||
|
# ''"local." static''
|
||||||
''"${server-config.server.domainName}." typetransparent''
|
''"${server-config.server.domainName}." typetransparent''
|
||||||
];
|
];
|
||||||
local-data = builtins.concatLists (map (domain:
|
local-data = builtins.concatLists (map (domain:
|
||||||
|
@ -674,6 +697,23 @@ in {
|
||||||
''"${domain}. A ${serverAddress4}"''
|
''"${domain}. A ${serverAddress4}"''
|
||||||
''"${domain}. AAAA ${serverAddress6}"''
|
''"${domain}. AAAA ${serverAddress6}"''
|
||||||
]) hosted-domains);
|
]) hosted-domains);
|
||||||
|
# incompatible with avahi resolver
|
||||||
|
# ++ [
|
||||||
|
# ''"retracker.local. A ${netAddresses.lan4}"''
|
||||||
|
# ''"retracker.local. AAAA ${netAddresses.lan6}"''
|
||||||
|
# ];
|
||||||
|
|
||||||
|
# performance tuning
|
||||||
|
num-threads = 4; # cpu core count
|
||||||
|
msg-cache-slabs = 4; # nearest power of 2 to num-threads
|
||||||
|
rrset-cache-slabs = msg-cache-slabs;
|
||||||
|
infra-cache-slabs = msg-cache-slabs;
|
||||||
|
key-cache-slabs = msg-cache-slabs;
|
||||||
|
so-reuseport = true;
|
||||||
|
msg-cache-size = "50m"; # (default 4m)
|
||||||
|
rrset-cache-size = "100m"; # msg*2 (default 4m)
|
||||||
|
# timeouts
|
||||||
|
unknown-server-time-limit = 752; # default=376
|
||||||
};
|
};
|
||||||
# normally it would refer to the flake path, but then the service changes on every flake update
|
# normally it would refer to the flake path, but then the service changes on every flake update
|
||||||
# instead, write a new file in nix store
|
# instead, write a new file in nix store
|
||||||
|
@ -681,6 +721,10 @@ in {
|
||||||
remote-control.control-enable = true;
|
remote-control.control-enable = true;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
environment.etc."unbound/iot_ips.json".text = builtins.toJSON [
|
||||||
|
# local multicast
|
||||||
|
"224.0.0.0/24"
|
||||||
|
];
|
||||||
environment.etc."unbound/iot_domains.json".text = builtins.toJSON [
|
environment.etc."unbound/iot_domains.json".text = builtins.toJSON [
|
||||||
# ntp time sync
|
# ntp time sync
|
||||||
"pool.ntp.org"
|
"pool.ntp.org"
|
||||||
|
@ -694,14 +738,17 @@ in {
|
||||||
unbound-python = pkgs.python3.withPackages (ps: with ps; [ pydbus dnspython requests pytricia nftables ]);
|
unbound-python = pkgs.python3.withPackages (ps: with ps; [ pydbus dnspython requests pytricia nftables ]);
|
||||||
in
|
in
|
||||||
"${unbound-python}/${unbound-python.sitePackages}";
|
"${unbound-python}/${unbound-python.sitePackages}";
|
||||||
environment.MDNS_ACCEPT_NAMES = "^.*\\.local\\.$";
|
environment.MDNS_ACCEPT_NAMES = "^(.*\\.)?local\\.$";
|
||||||
|
# resolve retracker.local to whatever router.local resolves to
|
||||||
|
# we can't add a local zone alongside using avahi resolver, so we have to use hacks like this
|
||||||
|
environment.DOMAIN_NAME_OVERRIDES = "retracker.local->router.local";
|
||||||
# load vpn_domains.json and vpn_ips.json, as well as unvpn_domains.json and unvpn_ips.json
|
# load vpn_domains.json and vpn_ips.json, as well as unvpn_domains.json and unvpn_ips.json
|
||||||
# resolve domains and append it to ips and add it to the nftables sets
|
# resolve domains and append it to ips and add it to the nftables sets
|
||||||
environment.NFT_QUERIES = "vpn:force_vpn4,force_vpn6;unvpn!:force_unvpn4,force_unvpn6;iot:allow_iot4,allow_iot6";
|
environment.NFT_QUERIES = "vpn:force_vpn4,force_vpn6;unvpn!:force_unvpn4,force_unvpn6;iot:allow_iot4,allow_iot6";
|
||||||
serviceConfig.EnvironmentFile = "/secrets/unbound_env";
|
serviceConfig.EnvironmentFile = "/secrets/unbound_env";
|
||||||
# it needs to run after nftables has been set up because it sets up the sets
|
# it needs to run after nftables has been set up because it sets up the sets
|
||||||
after = [ "nftables-default.service" ];
|
after = [ "nftables-default.service" "avahi-daemon.service" ];
|
||||||
wants = [ "nftables-default.service" ];
|
wants = [ "nftables-default.service" "avahi-daemon.service" ];
|
||||||
# allow it to call nft
|
# allow it to call nft
|
||||||
serviceConfig.AmbientCapabilities = [ "CAP_NET_ADMIN" ];
|
serviceConfig.AmbientCapabilities = [ "CAP_NET_ADMIN" ];
|
||||||
};
|
};
|
||||||
|
@ -777,6 +824,11 @@ in {
|
||||||
bind = netAddresses.lan4;
|
bind = netAddresses.lan4;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
services.opentracker = {
|
||||||
|
enable = true;
|
||||||
|
extraOptions = "-i ${netAddresses.lan4} -p 6969 -P 6969 -p 80";
|
||||||
|
};
|
||||||
|
|
||||||
# it takes a stupidly long time when done via qemu
|
# it takes a stupidly long time when done via qemu
|
||||||
# (also it's supposed to be disabled by default but it was enabled for me, why?)
|
# (also it's supposed to be disabled by default but it was enabled for me, why?)
|
||||||
documentation.man.generateCaches = false;
|
documentation.man.generateCaches = false;
|
||||||
|
|
51
system/hosts/router/metrics.nix
Normal file
51
system/hosts/router/metrics.nix
Normal file
|
@ -0,0 +1,51 @@
|
||||||
|
{ config
|
||||||
|
, router-lib
|
||||||
|
, ... }:
|
||||||
|
let
|
||||||
|
cfg = config.router-settings;
|
||||||
|
netAddresses.lan4 = (router-lib.parseCidr cfg.network).address;
|
||||||
|
in {
|
||||||
|
services.prometheus.exporters = {
|
||||||
|
node = {
|
||||||
|
enable = true;
|
||||||
|
enabledCollectors = [ "logind" "systemd" ];
|
||||||
|
listenAddress = netAddresses.lan4;
|
||||||
|
port = 9101; # cups is 9100
|
||||||
|
};
|
||||||
|
process = {
|
||||||
|
enable = true;
|
||||||
|
listenAddress = netAddresses.lan4;
|
||||||
|
};
|
||||||
|
unbound = {
|
||||||
|
enable = true;
|
||||||
|
controlInterface = "/run/unbound/unbound.ctl";
|
||||||
|
listenAddress = netAddresses.lan4;
|
||||||
|
group = config.services.unbound.group;
|
||||||
|
};
|
||||||
|
kea = {
|
||||||
|
enable = true;
|
||||||
|
controlSocketPaths = [
|
||||||
|
"/run/kea/kea-dhcp4-ctrl.sock"
|
||||||
|
"/run/kea/kea-dhcp6-ctrl.sock"
|
||||||
|
];
|
||||||
|
listenAddress = netAddresses.lan4;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
router.interfaces.br0 = {
|
||||||
|
ipv4.kea.settings.control-socket = {
|
||||||
|
socket-name = "/run/kea/kea-dhcp4-ctrl.sock";
|
||||||
|
socket-type = "unix";
|
||||||
|
};
|
||||||
|
ipv6.kea.settings.control-socket = {
|
||||||
|
socket-name = "/run/kea/kea-dhcp6-ctrl.sock";
|
||||||
|
socket-type = "unix";
|
||||||
|
};
|
||||||
|
ipv6.corerad.settings.debug = {
|
||||||
|
address = "${netAddresses.lan4}:9430";
|
||||||
|
prometheus = true;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
services.unbound.settings.server = {
|
||||||
|
extended-statistics = true;
|
||||||
|
};
|
||||||
|
}
|
|
@ -96,6 +96,8 @@ in {
|
||||||
{ directory = /var/lib/opendkim; user = "opendkim"; group = "opendkim"; mode = "0700"; }
|
{ directory = /var/lib/opendkim; user = "opendkim"; group = "opendkim"; mode = "0700"; }
|
||||||
] ++ lib.optionals config.services.pleroma.enable [
|
] ++ lib.optionals config.services.pleroma.enable [
|
||||||
{ directory = /var/lib/pleroma; user = "pleroma"; group = "pleroma"; mode = "0700"; }
|
{ directory = /var/lib/pleroma; user = "pleroma"; group = "pleroma"; mode = "0700"; }
|
||||||
|
] ++ lib.optionals config.services.akkoma.enable [
|
||||||
|
{ directory = /var/lib/akkoma; user = "akkoma"; group = "akkoma"; mode = "0700"; }
|
||||||
] ++ lib.optionals config.services.postfix.enable [
|
] ++ lib.optionals config.services.postfix.enable [
|
||||||
{ directory = /var/lib/postfix; user = "root"; group = "root"; mode = "0755"; }
|
{ directory = /var/lib/postfix; user = "root"; group = "root"; mode = "0755"; }
|
||||||
] ++ lib.optionals config.services.postgresql.enable [
|
] ++ lib.optionals config.services.postgresql.enable [
|
||||||
|
|
Loading…
Reference in a new issue