router: finally works

This commit is contained in:
chayleaf 2023-06-24 07:12:11 +07:00
parent a7c308a5f6
commit 8894e0d89c
20 changed files with 1793 additions and 1098 deletions

View file

@ -181,6 +181,27 @@
"type": "gitlab" "type": "gitlab"
} }
}, },
"nixos-router": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1687562820,
"narHash": "sha256-cks0WUe/27kBlQDNulKA4ZHTggN9k/jGWhFDrMiEV8k=",
"owner": "chayleaf",
"repo": "nixos-router",
"rev": "5048633a6f38c6787cba1a010359ff5246ec532b",
"type": "github"
},
"original": {
"owner": "chayleaf",
"repo": "nixos-router",
"rev": "5048633a6f38c6787cba1a010359ff5246ec532b",
"type": "github"
}
},
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1686412476, "lastModified": 1686412476,
@ -235,6 +256,27 @@
"type": "github" "type": "github"
} }
}, },
"notnft": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1687562693,
"narHash": "sha256-imxVKPmthtrMq5RFst8IfdbnDPy4sEeln2lo9374W4o=",
"owner": "chayleaf",
"repo": "notnft",
"rev": "f090546a7c190557c2081129b7e49a595f2ab76f",
"type": "github"
},
"original": {
"owner": "chayleaf",
"repo": "notnft",
"rev": "f090546a7c190557c2081129b7e49a595f2ab76f",
"type": "github"
}
},
"nur": { "nur": {
"locked": { "locked": {
"lastModified": 1686488164, "lastModified": 1686488164,
@ -258,8 +300,10 @@
"nix-gaming": "nix-gaming", "nix-gaming": "nix-gaming",
"nixos-hardware": "nixos-hardware", "nixos-hardware": "nixos-hardware",
"nixos-mailserver": "nixos-mailserver", "nixos-mailserver": "nixos-mailserver",
"nixos-router": "nixos-router",
"nixpkgs": "nixpkgs", "nixpkgs": "nixpkgs",
"notlua": "notlua", "notlua": "notlua",
"notnft": "notnft",
"nur": "nur", "nur": "nur",
"rust-overlay": "rust-overlay" "rust-overlay": "rust-overlay"
} }

View file

@ -22,6 +22,14 @@
url = "github:chayleaf/notlua"; url = "github:chayleaf/notlua";
inputs.nixpkgs.follows = "nixpkgs"; inputs.nixpkgs.follows = "nixpkgs";
}; };
notnft = {
url = "github:chayleaf/notnft/f090546a7c190557c2081129b7e49a595f2ab76f";
inputs.nixpkgs.follows = "nixpkgs";
};
nixos-router = {
url = "github:chayleaf/nixos-router/5048633a6f38c6787cba1a010359ff5246ec532b";
inputs.nixpkgs.follows = "nixpkgs";
};
nixos-mailserver = { nixos-mailserver = {
url = "gitlab:simple-nixos-mailserver/nixos-mailserver"; url = "gitlab:simple-nixos-mailserver/nixos-mailserver";
inputs.nixpkgs.follows = "nixpkgs"; inputs.nixpkgs.follows = "nixpkgs";
@ -33,8 +41,9 @@
}; };
}; };
outputs = inputs@{ self, nixpkgs, nixos-hardware, impermanence, home-manager, nur, nix-gaming, notlua, nixos-mailserver, ... }: outputs = inputs@{ self, nixpkgs, nixos-hardware, impermanence, home-manager, nur, nix-gaming, notlua, notnft, nixos-mailserver, nixos-router, ... }:
let let
developing = false;
# IRL-related stuff I'd rather not put into git # IRL-related stuff I'd rather not put into git
priv = priv =
if builtins.pathExists ./private.nix then (import ./private.nix { }) if builtins.pathExists ./private.nix then (import ./private.nix { })
@ -43,6 +52,7 @@
else if builtins?extraBuiltins.secrets then builtins.extraBuiltins.secrets else if builtins?extraBuiltins.secrets then builtins.extraBuiltins.secrets
# yes, this is impure, this is a last ditch effort at getting access to secrets # yes, this is impure, this is a last ditch effort at getting access to secrets
else import /etc/nixos/private { }; else import /etc/nixos/private { };
devPath = priv.devPath or ../.;
# if x has key s, get it. Otherwise return def # if x has key s, get it. Otherwise return def
# All private config for hostname # All private config for hostname
getPriv = hostname: priv.${hostname} or { }; getPriv = hostname: priv.${hostname} or { };
@ -79,21 +89,25 @@
./system/hosts/nixserver ./system/hosts/nixserver
]; ];
}; };
router-emmc = { router-emmc = rec {
system = "aarch64-linux"; system = "aarch64-linux";
specialArgs.notnft = if developing then (import /${devPath}/notnft { inherit (nixpkgs) lib; }).config.notnft else notnft.lib.${system};
specialArgs.router-lib = if developing then import /${devPath}/nixos-router/lib.nix { inherit (nixpkgs) lib; } else nixos-router.lib.${system};
modules = [ modules = [
./system/hardware/bpi_r3/emmc.nix ./system/hardware/bpi_r3/emmc.nix
./system/hosts/router ./system/hosts/router
./system/modules/router (if developing then (import /${devPath}/nixos-router) else nixos-router.nixosModules.default)
{ networking.hostName = "router"; } { networking.hostName = "router"; }
]; ];
}; };
router-sd = { router-sd = rec {
system = "aarch64-linux"; system = "aarch64-linux";
specialArgs.notnft = if developing then (import /${devPath}/notnft { inherit (nixpkgs) lib; }).config.notnft else notnft.lib.${system};
specialArgs.router-lib = if developing then import /${devPath}/nixos-router/lib.nix { inherit (nixpkgs) lib; } else nixos-router.lib.${system};
modules = [ modules = [
./system/hardware/bpi_r3/sd.nix ./system/hardware/bpi_r3/sd.nix
./system/hosts/router ./system/hosts/router
./system/modules/router (if developing then (import /${devPath}/nixos-router) else nixos-router.nixosModules.default)
{ networking.hostName = "router"; } { networking.hostName = "router"; }
]; ];
}; };
@ -120,7 +134,6 @@
"x86_64-linux" "x86_64-linux"
"aarch64-linux" "aarch64-linux"
] (system: let self = overlay self (import nixpkgs { inherit system; }); in self ); ] (system: let self = overlay self (import nixpkgs { inherit system; }); in self );
# this is the system config part
nixosImages.router = let pkgs = import nixpkgs { system = "aarch64-linux"; overlays = [ overlay ]; }; in { nixosImages.router = let pkgs = import nixpkgs { system = "aarch64-linux"; overlays = [ overlay ]; }; in {
emmcImage = pkgs.callPackage ./system/hardware/bpi_r3/image.nix { emmcImage = pkgs.callPackage ./system/hardware/bpi_r3/image.nix {
inherit (nixosConfigurations.router-emmc) config; inherit (nixosConfigurations.router-emmc) config;
@ -133,7 +146,8 @@
bpiR3Stuff = pkgs.bpiR3StuffSd; bpiR3Stuff = pkgs.bpiR3StuffSd;
}; };
}; };
nixosConfigurations = builtins.mapAttrs (hostname: args @ { system ? "x86_64-linux", modules, nixpkgs ? {}, home ? {}, ... }: # this is the system config part
nixosConfigurations = builtins.mapAttrs (hostname: args @ { system ? "x86_64-linux", modules, specialArgs ? {}, nixpkgs ? {}, home ? {}, ... }:
lib.nixosSystem ({ lib.nixosSystem ({
inherit system; inherit system;
# allow modules to access nixpkgs directly, use customized lib, # allow modules to access nixpkgs directly, use customized lib,
@ -141,7 +155,7 @@
specialArgs = { specialArgs = {
inherit lib nixpkgs; inherit lib nixpkgs;
hardware = nixos-hardware.nixosModules; hardware = nixos-hardware.nixosModules;
}; } // specialArgs;
modules = modules ++ [ modules = modules ++ [
# Third-party NixOS modules # Third-party NixOS modules
impermanence.nixosModule impermanence.nixosModule

View file

@ -48,6 +48,7 @@ in
mpvScripts = pkgs.mpvScripts // (callPackage ./mpv-scripts { }); mpvScripts = pkgs.mpvScripts // (callPackage ./mpv-scripts { });
qemu_7 = callPackage ./qemu_7.nix { qemu_7 = callPackage ./qemu_7.nix {
stdenv = pkgs'.ccacheStdenv;
inherit (pkgs.darwin.apple_sdk.frameworks) CoreServices Cocoa Hypervisor vmnet; inherit (pkgs.darwin.apple_sdk.frameworks) CoreServices Cocoa Hypervisor vmnet;
inherit (pkgs.darwin.stubs) rez setfile; inherit (pkgs.darwin.stubs) rez setfile;
inherit (pkgs.darwin) sigtool; inherit (pkgs.darwin) sigtool;
@ -62,5 +63,23 @@ in
# TODO: when https://gitlab.com/virtio-fs/virtiofsd/-/issues/96 is fixed remove this # TODO: when https://gitlab.com/virtio-fs/virtiofsd/-/issues/96 is fixed remove this
virtiofsd = callPackage ./qemu_virtiofsd.nix { virtiofsd = callPackage ./qemu_virtiofsd.nix {
qemu = pkgs'.qemu_7; qemu = pkgs'.qemu_7;
stdenv = pkgs'.ccacheStdenv;
}; };
hostapd = (pkgs.hostapd.override { stdenv = pkgs'.ccacheStdenv; }).overrideAttrs (old: {
# also remove 80211N
extraConfig = old.extraConfig + ''
CONFIG_OCV=y
CONFIG_WPS=y
CONFIG_WPS_NFC=y
CONFIG_WNM=y
CONFIG_IEEE80211AX=y
CONFIG_IEEE80211BE=y
CONFIG_ELOOP_EPOLL=y
CONFIG_MBO=y
CONFIG_TAXONOMY=y
CONFIG_OWE=y
CONFIG_AIRTIME_POLICY=y
'';
});
} // (import ../system/hardware/bpi_r3/pkgs.nix { inherit pkgs pkgs' lib sources; }) } // (import ../system/hardware/bpi_r3/pkgs.nix { inherit pkgs pkgs' lib sources; })

View file

@ -27,7 +27,6 @@
boot.kernelParams = [ "boot.shell_on_fail" "console=ttyS0,115200" ]; boot.kernelParams = [ "boot.shell_on_fail" "console=ttyS0,115200" ];
boot.initrd.compressor = "zstd"; boot.initrd.compressor = "zstd";
nixpkgs.buildPlatform = "x86_64-linux";
system.build.rootfsImage = pkgs.callPackage "${pkgs.path}/nixos/lib/make-ext4-fs.nix" { system.build.rootfsImage = pkgs.callPackage "${pkgs.path}/nixos/lib/make-ext4-fs.nix" {
storePaths = config.system.build.toplevel; storePaths = config.system.build.toplevel;

View file

@ -7,7 +7,7 @@
set -euxo pipefail set -euxo pipefail
(which zstd && which rsync) || exit 1 which zstd || exit 1
userspace="$(which lklfuse >/dev/null && echo -n 1 || echo -n)" userspace="$(which lklfuse >/dev/null && echo -n 1 || echo -n)"
use_rsync="$(which rsync >/dev/null && echo -n 1 || echo -n)" use_rsync="$(which rsync >/dev/null && echo -n 1 || echo -n)"
@ -74,15 +74,19 @@ mkdir -p "$tmp/rootfs" "$tmp/out"
Mount ext4 "$rootfs" "$tmp/rootfs" ro Mount ext4 "$rootfs" "$tmp/rootfs" ro
rootfs="$tmp/rootfs" rootfs="$tmp/rootfs"
Mount btrfs "$template" "$tmp/out" Mount btrfs "$template" "$tmp/out"
run cp -rv "$boot"/* "$tmp/out/@boot/" cpr "$boot" "$tmp/out/@boot"
run umount "$tmp/out" run umount "$tmp/out"
Mount btrfs "$template" "$tmp/out" "compress=zstd:15" Mount btrfs "$template" "$tmp/out" "compress=zstd:15"
run cp -v "$rootfs/nix-path-registration" "$tmp/out/@/" run cp -v "$rootfs/nix-path-registration" "$tmp/out/@/"
# those two are the only dirs needed for impermanence in boot stage 1 # those two are the only dirs needed for impermanence in boot stage 1
sudo -A mkdir -p "$tmp/out/@/var/lib/nixos" run mkdir -p "$tmp/out/@/var/lib/nixos"
sudo -A mkdir -p "$tmp/out/@/var/log" run mkdir -p "$tmp/out/@/var/log"
run ls "$boot"
cpr "$boot" "$tmp/out/@boot" # secrets, we don't want to pass them via the store
run mkdir -p "$tmp/out/@/secrets"
run cp -v /etc/nixos/private/wireguard-key "$tmp/out/@/secrets/"
run chmod -R 000 "$tmp/out/@/secrets"
cpr "$rootfs/nix" "$tmp/out/@nix" cpr "$rootfs/nix" "$tmp/out/@nix"
run umount "$rootfs" run umount "$rootfs"

View file

@ -10,6 +10,7 @@ let
src = pkgs.fetchFromGitHub { src = pkgs.fetchFromGitHub {
owner = "frank-w"; owner = "frank-w";
repo = "u-boot"; repo = "u-boot";
# branch r3-atf
rev = "c30a1caf8274af67bf31f3fb5abc45df5737df36"; rev = "c30a1caf8274af67bf31f3fb5abc45df5737df36";
hash = "sha256-pW2yytXRIFEIbG1gnuXq8TiLe/Eew7zESe6Pijh2qVk="; hash = "sha256-pW2yytXRIFEIbG1gnuXq8TiLe/Eew7zESe6Pijh2qVk=";
}; };
@ -61,10 +62,10 @@ let
CONFIG_USE_BOOTCOMMAND=y CONFIG_USE_BOOTCOMMAND=y
CONFIG_ZSTD=y CONFIG_ZSTD=y
''; '';
ubootVersion = "2023.07-rc3"; ubootVersion = "2023.07-rc4";
ubootSrc = pkgs.fetchurl { ubootSrc = pkgs.fetchurl {
url = "ftp://ftp.denx.de/pub/u-boot/u-boot-${ubootVersion}.tar.bz2"; url = "ftp://ftp.denx.de/pub/u-boot/u-boot-${ubootVersion}.tar.bz2";
hash = "sha256-QuwINnS9MPpMFueMP19FPAjZ9zdZWne13aWVrDoJ2C8="; hash = "sha256-tqp9fnGPQFeNGrkU/A6AusDEz7neh2KiR9HWbR7+WTY=";
}; };
in rec { in rec {
ubootBpiR3Sd = pkgs.buildUBoot { ubootBpiR3Sd = pkgs.buildUBoot {

View file

@ -182,11 +182,15 @@ in {
27015 27015
25565 25565
7777 7777
] 9887
];
# kde connect # kde connect
++ (lib.range 1714 1764); networking.firewall.allowedTCPPortRanges = [
networking.firewall.allowedUDPPorts = lib.range 1714 1764; { from = 1714; to = 1764; }
];
networking.firewall.allowedUDPPortRanges = [
{ from = 1714; to = 1764; }
];
networking.wireless.iwd.enable = true; networking.wireless.iwd.enable = true;
services.ratbagd.enable = true; services.ratbagd.enable = true;

View file

@ -28,6 +28,8 @@ let
"ns2" "ns2"
]; ];
unbound-python = pkgs.python3.withPackages (pkgs: with pkgs; [ pydbus dnspython ]);
in { in {
imports = [ imports = [
./options.nix ./options.nix
@ -108,8 +110,9 @@ in {
services.unbound = { services.unbound = {
enable = true; enable = true;
package = pkgs.unbound-with-systemd.override { package = pkgs.unbound-with-systemd.override {
stdenv = pkgs.ccacheStdenv;
withPythonModule = true; withPythonModule = true;
python = pkgs.python3.withPackages (pkgs: with pkgs; [ pydbus dnspython ]); python = unbound-python;
}; };
localControlSocketPath = "/run/unbound/unbound.ctl"; localControlSocketPath = "/run/unbound/unbound.ctl";
resolveLocalQueries = false; resolveLocalQueries = false;
@ -119,7 +122,7 @@ in {
access-control = [ "${cfg.lanCidrV4} allow" "${cfg.lanCidrV6} allow" ]; access-control = [ "${cfg.lanCidrV4} allow" "${cfg.lanCidrV6} allow" ];
aggressive-nsec = true; aggressive-nsec = true;
do-ip6 = true; do-ip6 = true;
module-config = ''"validator iterator"''; module-config = ''"validator python iterator"'';
local-zone = [ local-zone = [
''"local." static'' ''"local." static''
] ++ (lib.optionals (cfg.localIpV4 != null || cfg.localIpV6 != null) [ ] ++ (lib.optionals (cfg.localIpV4 != null || cfg.localIpV6 != null) [
@ -129,7 +132,7 @@ in {
lib.optionals (cfg.localIpV4 != null) [ lib.optionals (cfg.localIpV4 != null) [
''"${domain}. A ${cfg.localIpV4}"'' ''"${domain}. A ${cfg.localIpV4}"''
] ++ (lib.optionals (cfg.localIpV6 != null) [ ] ++ (lib.optionals (cfg.localIpV6 != null) [
''"${domain}. A ${cfg.localIpV6}"'' ''"${domain}. AAAA ${cfg.localIpV6}"''
])) hosted-domains); ])) hosted-domains);
}; };
python.python-script = toString (pkgs.fetchurl { python.python-script = toString (pkgs.fetchurl {
@ -139,7 +142,10 @@ in {
remote-control.control-enable = true; remote-control.control-enable = true;
}; };
}; };
systemd.services.unbound.environment.MDNS_ACCEPT_NAMES = "^.*\\.local\\.$"; systemd.services.unbound.environment = {
MDNS_ACCEPT_NAMES = "^.*\\.local\\.$";
PYTHONPATH = "${unbound-python}/${unbound-python.sitePackages}";
};
# just in case # just in case
networking.hosts."127.0.0.1" = [ "localhost" ] ++ hosted-domains; networking.hosts."127.0.0.1" = [ "localhost" ] ++ hosted-domains;

View file

@ -0,0 +1,850 @@
#!/usr/bin/env python3
#
# A plugin for the Unbound DNS resolver to resolve DNS records in
# multicast DNS [RFC 6762] via Avahi.
# Modified by chayleaf to resolve addresses and import them into
# nftables.
#
# Copyright (C) 2018-2019 Internet Real-Time Lab, Columbia University
# http://www.cs.columbia.edu/irt/
#
# Written by Jan Janak <janakj@cs.columbia.edu>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# Dependendies:
# Unbound with pythonmodule configured for Python 3
# dnspython [http://www.dnspython.org]
# pydbus [https://github.com/LEW21/pydbus]
#
# To enable Python 3 support, configure Unbound as follows:
# PYTHON_VERSION=3 ./configure --with-pythonmodule
#
# The plugin in meant to be used as a fallback resolver that resolves
# records in multicast DNS if the upstream server cannot be reached or
# provides no answer (NXDOMAIN).
#
# mDNS requests for negative records, i.e., records for which Avahi
# returns no answer (NXDOMAIN), are expensive. Since there is no
# single authoritative server in mDNS, such requests terminate only
# via a timeout. The timeout is about a second (if MDNS_TIMEOUT is not
# configured), or the value configured via MDNS_TIMEOUT. The
# corresponding Unbound thread will be blocked for this amount of
# time. For this reason, it is important to configure an appropriate
# number of threads in unbound.conf and limit the RR types and names
# that will be resolved via Avahi via the environment variables
# described later.
#
# An example unbound.conf with the plugin enabled:
#
# | server:
# | module-config: "validator python iterator"
# | num-threads: 32
# | cache-max-negative-ttl: 60
# | cache-max-ttl: 60
# | python:
# | python-script: path/to/this/file
#
#
# The plugin can also be run interactively. Provide the name and
# record type to be resolved as command line arguments and the
# resolved record will be printed to standard output:
#
# $ ./avahi-resolver.py voip-phx4.phxnet.org A
# voip-phx4.phxnet.org. 120 IN A 10.4.3.2
#
#
# The behavior of the plugin can be controlled via the following
# environment variables:
#
# DBUS_SYSTEM_BUS_ADDRESS
#
# The address of the system DBus bus, in the format expected by DBus,
# e.g., unix:path=/run/avahi/system-bus.sock
#
#
# DEBUG
#
# Set this environment variable to "yes", "true", "on", or "1" to
# enable debugging. In debugging mode, the plugin will output a lot
# more information about what it is doing either to the standard
# output (when run interactively) or to Unbound via log_info and
# log_error.
#
# By default debugging is disabled.
#
#
# MDNS_TTL
#
# Avahi does not provide the TTL value for the records it returns.
# This environment variable can be used to configure the TTL value for
# such records.
#
# The default value is 120 seconds.
#
#
# MDNS_TIMEOUT
#
# The maximum amount of time (in milliseconds) an Avahi request is
# allowed to run. This value sets the time it takes to resolve
# negative (non-existent) records in Avahi. If unset, the request
# terminates when Avahi sends the "AllForNow" signal, telling the
# client that more records are unlikely to arrive. This takes roughly
# about one second. You may need to configure a longer value here on
# slower networks, e.g., networks that relay mDNS packets such as
# MANETs.
#
#
# MDNS_GETONE
#
# If set to "true", "1", or "on", an Avahi request will terminate as
# soon as at least one record has been found. If there are multiple
# nodes in the mDNS network publishing the same record, only one (or
# subset) will be returned.
#
# If set to "false", "0", or "off", the plugin will gather records for
# MDNS_TIMEOUT and return all records found. This is only useful in
# networks where multiple nodes are known to publish different records
# under the same name and the client needs to be able to obtain them
# all. When configured this way, all Avahi requests will always take
# MDNS_TIMEOUT to complete!
#
# This option is set to true by default.
#
#
# MDNS_REJECT_TYPES
#
# A comma-separated list of record types that will NOT be resolved in
# mDNS via Avahi. Use this environment variable to prevent specific
# record types from being resolved via Avahi. For example, if your
# network does not support IPv6, you can put AAAA on this list.
#
# The default value is an empty list.
#
# Example: MDNS_REJECT_TYPES=aaaa,mx,soa
#
#
# MDNS_ACCEPT_TYPES
#
# If set, a record type will be resolved via Avahi if and only if it
# is present on this comma-separated list. In other words, this is a
# whitelist.
#
# The default value is an empty list which means all record types will
# be resolved via Avahi.
#
# Example: MDNS_ACCEPT_TYPES=a,ptr,txt,srv,aaaa,cname
#
#
# MDNS_REJECT_NAMES
#
# If the name being resolved matches the regular expression in this
# environment variable, the name will NOT be resolved via Avahi. In
# other words, this environment variable provides a blacklist.
#
# The default value is empty--no names will be reject.
#
# Example: MDNS_REJECT_NAMES=(^|\.)example\.com\.$
#
#
# MDNS_ACCEPT_NAMES
#
# If set to a regular expression, a name will be resolved via Avahi if
# and only if it matches the regular expression. In other words, this
# variable provides a whitelist.
#
# The default value is empty--all names will be resolved via Avahi.
#
# Example: MDNS_ACCEPT_NAMES=^.*\.example\.com\.$
#
import json
import ipaddress
import os
import subprocess
import pytricia
import re
import array
import threading
import traceback
import dns.rdata
import dns.rdatatype
import dns.rdataclass
from queue import Queue
from gi.repository import GLib
from pydbus import SystemBus
IF_UNSPEC = -1
PROTO_UNSPEC = -1
NFT_QUERIES = {}
sysbus = None
avahi = None
trampoline = dict()
thread_local = threading.local()
dbus_thread = None
dbus_loop = None
def is_valid_ip4(x):
try:
_ = ipaddress.IPv4Address(x)
return True
except ipaddress.AddressValueError:
return False
def is_valid_ip6(x):
try:
_ = ipaddress.IPv6Address(x)
return True
except ipaddress.AddressValueError:
return False
def str2bool(v):
if v.lower() in ['false', 'no', '0', 'off', '']:
return False
return True
def dbg(msg):
if DEBUG != False:
log_info('avahi-resolver: %s' % msg)
#
# Although pydbus has an internal facility for handling signals, we
# cannot use that with Avahi. When responding from an internal cache,
# Avahi sends the first signal very quickly, before pydbus has had a
# chance to subscribe for the signal. This will result in lost signal
# and missed data:
#
# https://github.com/LEW21/pydbus/issues/87
#
# As a workaround, we subscribe to all signals before creating a
# record browser and do our own signal matching and dispatching via
# the following function.
#
def signal_dispatcher(connection, sender, path, interface, name, args):
o = trampoline.get(path, None)
if o is None:
return
if name == 'ItemNew': o.itemNew(*args)
elif name == 'ItemRemove': o.itemRemove(*args)
elif name == 'AllForNow': o.allForNow(*args)
elif name == 'Failure': o.failure(*args)
class RecordBrowser:
def __init__(self, callback, name, type_, timeout=None, getone=True):
self.callback = callback
self.records = []
self.error = None
self.getone = getone
self.timer = None if timeout is None else GLib.timeout_add(timeout, self.timedOut)
self.browser_path = avahi.RecordBrowserNew(IF_UNSPEC, PROTO_UNSPEC, name, dns.rdataclass.IN, type_, 0)
trampoline[self.browser_path] = self
self.browser = sysbus.get('.Avahi', self.browser_path)
self.dbg('Created RecordBrowser(name=%s, type=%s, getone=%s, timeout=%s)'
% (name, dns.rdatatype.to_text(type_), getone, timeout))
def dbg(self, msg):
dbg('[%s] %s' % (self.browser_path, msg))
def _done(self):
del trampoline[self.browser_path]
self.dbg('Freeing')
self.browser.Free()
if self.timer is not None:
self.dbg('Removing timer')
GLib.source_remove(self.timer)
self.callback(self.records, self.error)
def itemNew(self, interface, protocol, name, class_, type_, rdata, flags):
self.dbg('Got signal ItemNew')
self.records.append((name, class_, type_, rdata))
if self.getone:
self._done()
def itemRemove(self, interface, protocol, name, class_, type_, rdata, flags):
self.dbg('Got signal ItemRemove')
self.records.remove((name, class_, type_, rdata))
def failure(self, error):
self.dbg('Got signal Failure')
self.error = Exception(error)
self._done()
def allForNow(self):
self.dbg('Got signal AllForNow')
if self.timer is None:
self._done()
def timedOut(self):
self.dbg('Timed out')
self._done()
return False
#
# This function runs the main event loop for DBus (GLib). This
# function must be run in a dedicated worker thread.
#
def dbus_main():
global sysbus, avahi, dbus_loop
dbg('Connecting to system DBus')
sysbus = SystemBus()
dbg('Subscribing to .Avahi.RecordBrowser signals')
sysbus.con.signal_subscribe('org.freedesktop.Avahi',
'org.freedesktop.Avahi.RecordBrowser',
None, None, None, 0, signal_dispatcher)
avahi = sysbus.get('.Avahi', '/')
dbg("Connected to Avahi Daemon: %s (API %s) [%s]"
% (avahi.GetVersionString(), avahi.GetAPIVersion(), avahi.GetHostNameFqdn()))
dbg('Starting DBus main loop')
dbus_loop = GLib.MainLoop()
dbus_loop.run()
#
# This function must be run in the DBus worker thread. It creates a
# new RecordBrowser instance and once it has finished doing it thing,
# it will send the result back to the original thread via the queue.
#
def start_resolver(queue, *args, **kwargs):
try:
RecordBrowser(lambda *v: queue.put_nowait(v), *args, **kwargs)
except Exception as e:
queue.put_nowait((None, e))
return False
#
# To resolve a request, we setup a queue, post a task to the DBus
# worker thread, and wait for the result (or error) to arrive over the
# queue. If the worker thread reports an error, raise the error as an
# exception.
#
def resolve(*args, **kwargs):
try:
queue = thread_local.queue
except AttributeError:
dbg('Creating new per-thread queue')
queue = Queue()
thread_local.queue = queue
GLib.idle_add(lambda: start_resolver(queue, *args, **kwargs))
records, error = queue.get()
queue.task_done()
if error is not None:
raise error
return records
def parse_type_list(lst):
return list(map(dns.rdatatype.from_text, [v.strip() for v in lst.split(',') if len(v)]))
def build_ipset(ips: list):
pyt = pytricia.PyTricia()
for ip in ips:
try:
pyt.insert(ip, None)
except:
with open('/var/lib/unbound/error.log', 'at') as f:
f.write(f'Warning: couldn\'t insert ip {ip}:\n')
traceback.print_exc(file=f)
return pyt
def add_ips(set: str, ipv6: bool, ips: list, flush: bool = False):
#with open('/var/lib/unbound/info.log', 'at') as f:
#print('set', set, 'ipv6', ipv6, 'ips', ips, file=f)
pyt = build_ipset(ips)
ruleset: list = [ ]
if flush:
ruleset.append({"flush":{"set":{"family":"inet","table":"global","name":set}}})
elems: list = []
if ipv6:
maxn = 128
is_valid = is_valid_ip6
else:
maxn = 32
is_valid = is_valid_ip4
for ip in pyt.keys():
try:
if pyt.parent(ip) != None:
continue
except:
pass
if '/' not in ip:
n = maxn
else:
ip, n0 = ip.split('/')
try:
n = int(n0)
except:
continue
if not is_valid(ip):
continue
if n == maxn:
elems.append(ip)
else:
elems.append({"prefix":{"addr":ip,"len":n}})
#with open('/var/lib/unbound/info.log', 'at') as f:
#print('elems', elems, file=f)
if len(elems) == 0:
return
ruleset.append({"add":{"element":{"family":"inet","table":"global","name":set,"elem":elems}}})
data = json.dumps({"nftables":ruleset}).encode('utf-8')
#with open('/var/lib/unbound/info.log', 'at') as f:
#print('data', data, file=f)
try:
out = subprocess.run([
'/run/current-system/sw/bin/nft',
'-j', '-f', '/dev/stdin'
], capture_output=True, input=data)
#with open('/var/lib/unbound/info.log', 'at') as f:
#print('out', out, file=f)
if out.returncode != 0:
with open('/var/lib/unbound/nftables.log', 'wb') as f:
f.write(b'Error running nftables ruleset. Ruleset:\n')
f.write(data)
f.write(b'\n')
f.write(b'stdout:\n')
f.write(out.stdout)
f.write(b'\nstderr:\n')
f.write(out.stderr)
f.write(b'\n')
except:
with open('/var/lib/unbound/error.log', 'at') as f:
f.write(f'While adding ips for set {set}:\n')
traceback.print_exc(file=f)
def build_domains(domains):
ret = {}
def fill(tmp, splitDomain):
while splitDomain:
key = splitDomain[-1]
if key not in tmp.keys():
tmp[key] = {}
tmp = tmp[key]
splitDomain = splitDomain[:-1]
tmp['__IsTrue__'] = True
for domain in domains:
fill(ret, domain.split('.'))
return ret
def lookup_domain(domains, domain):
splitDomain = domain.split('.')
while len(splitDomain):
key = splitDomain[-1]
splitDomain = splitDomain[:-1]
star = domains.get('*', None)
if star != None and star.get('__IsTrue__', False):
return True
domains = domains.get(key, None)
if domains == None:
return False
star = domains.get('*', None)
if star != None and star.get('__IsTrue__', False):
return True
return domains.get('__IsTrue__', False)
def init(*args, **kwargs):
global dbus_thread, DEBUG
global MDNS_TTL, MDNS_GETONE, MDNS_TIMEOUT
global MDNS_REJECT_TYPES, MDNS_ACCEPT_TYPES
global MDNS_REJECT_NAMES, MDNS_ACCEPT_NAMES
global NFT_QUERIES
nft_queries = os.environ.get('NFT_QUERIES', '')
if nft_queries:
for query in nft_queries.split(';'):
name, sets = query.split(':')
set4, set6 = sets.split(',')
NFT_QUERIES[name] = { 'domains': [], 'ips4': [], 'ips6': [], 'name4': set4, 'name6': set6 }
for k, v in NFT_QUERIES.items():
try:
domains = json.load(open(f'/etc/unbound/{k}_domains.json', 'rt', encoding='utf-8'))
v['domains'].extend(domains)
except:
pass
try:
domains = json.load(open(f'/var/lib/unbound/{k}_domains.json', 'rt', encoding='utf-8'))
v['domains'].extend(domains)
except:
pass
v['domains'] = build_domains(v['domains'])
try:
ips = json.load(open(f'/etc/unbound/{k}_ips.json', 'rt', encoding='utf-8'))
v['ips4'].extend(filter(lambda x: '.' in x, ips))
v['ips6'].extend(filter(lambda x: ':' in x, ips))
except:
pass
try:
ips = json.load(open(f'/var/lib/unbound/{k}_ips.json', 'rt', encoding='utf-8'))
v['ips4'].extend(filter(lambda x: '.' in x, ips))
v['ips6'].extend(filter(lambda x: ':' in x, ips))
except:
pass
# cached resolved domains
try:
os.makedirs('/var/lib/unbound/domains4/', exist_ok=True)
for x in os.listdir('/var/lib/unbound/domains4/'):
with open('/var/lib/unbound/domains4/' + x, 'rt') as f:
data = f.read().split('\n')
for k, v in NFT_QUERIES.items():
if lookup_domain(v['domains'], x):
v['ips4'].extend(data)
except:
with open('/var/lib/unbound/error.log', 'at') as f:
traceback.print_exc(file=f)
try:
os.makedirs('/var/lib/unbound/domains6/', exist_ok=True)
for x in os.listdir('/var/lib/unbound/domains6/'):
with open('/var/lib/unbound/domains6/' + x, 'rt') as f:
data = f.read().split('\n')
for k, v in NFT_QUERIES.items():
if lookup_domain(v['domains'], x):
v['ips6'].extend(data)
except:
with open('/var/lib/unbound/error.log', 'at') as f:
traceback.print_exc(file=f)
# finally, add the ips to nftables
for k, v in NFT_QUERIES.items():
if v['ips4'] and v['name4']:
add_ips(v['name4'], False, v['ips4'], flush=True)
if v['ips6'] and v['name6']:
add_ips(v['name6'], True, v['ips6'], flush=True)
v['ips4'] = build_ipset(v['ips4'])
v['ips6'] = build_ipset(v['ips6'])
DEBUG = str2bool(os.environ.get('DEBUG', str(False)))
MDNS_TTL = int(os.environ.get('MDNS_TTL', 120))
dbg("TTL for records from Avahi: %d" % MDNS_TTL)
MDNS_REJECT_TYPES = parse_type_list(os.environ.get('MDNS_REJECT_TYPES', ''))
if MDNS_REJECT_TYPES:
dbg('Types NOT resolved via Avahi: %s' % MDNS_REJECT_TYPES)
MDNS_ACCEPT_TYPES = parse_type_list(os.environ.get('MDNS_ACCEPT_TYPES', ''))
if MDNS_ACCEPT_TYPES:
dbg('ONLY resolving the following types via Avahi: %s' % MDNS_ACCEPT_TYPES)
v = os.environ.get('MDNS_REJECT_NAMES', None)
MDNS_REJECT_NAMES = re.compile(v, flags=re.I | re.S) if v is not None else None
if MDNS_REJECT_NAMES is not None:
dbg('Names NOT resolved via Avahi: %s' % MDNS_REJECT_NAMES.pattern)
v = os.environ.get('MDNS_ACCEPT_NAMES', None)
MDNS_ACCEPT_NAMES = re.compile(v, flags=re.I | re.S) if v is not None else None
if MDNS_ACCEPT_NAMES is not None:
dbg('ONLY resolving the following names via Avahi: %s' % MDNS_ACCEPT_NAMES.pattern)
v = os.environ.get('MDNS_TIMEOUT', None)
MDNS_TIMEOUT = int(v) if v is not None else None
if MDNS_TIMEOUT is not None:
dbg('Avahi request timeout: %s' % MDNS_TIMEOUT)
MDNS_GETONE = str2bool(os.environ.get('MDNS_GETONE', str(True)))
dbg('Terminate Avahi requests on first record: %s' % MDNS_GETONE)
dbus_thread = threading.Thread(target=dbus_main)
dbus_thread.daemon = True
dbus_thread.start()
def deinit(*args, **kwargs):
dbus_loop.quit()
dbus_thread.join()
return True
def inform_super(id, qstate, superqstate, qdata):
return True
def get_rcode(msg):
if not msg:
return RCODE_SERVFAIL
return msg.rep.flags & 0xf
def rr2text(rec, ttl):
name, class_, type_, rdata = rec
wire = array.array('B', rdata).tobytes()
return '%s. %d %s %s %s' % (
name,
ttl,
dns.rdataclass.to_text(class_),
dns.rdatatype.to_text(type_),
dns.rdata.from_wire(class_, type_, wire, 0, len(wire), None))
def operate(id, event, qstate, qdata):
global NFT_QUERIES
qi = qstate.qinfo
name = qi.qname_str
type_ = qi.qtype
type_str = dns.rdatatype.to_text(type_)
class_ = qi.qclass
class_str = dns.rdataclass.to_text(class_)
rc = get_rcode(qstate.return_msg)
# vpn stuff
n2 = name.rstrip('.')
qnames = []
for k, v in NFT_QUERIES.items():
if lookup_domain(v['domains'], n2):
qnames.append(k)
# THIS IS PAIN
if qnames:
try:
ip4 = []
ip6 = []
if qstate.return_msg and qstate.return_msg.rep:
rep = qstate.return_msg.rep
for i in range(rep.rrset_count):
d = rep.rrsets[i].entry.data
rk = rep.rrsets[i].rk
for j in range(0, d.count + d.rrsig_count):
wire = array.array('B', d.rr_data[j]).tobytes()
# IN
if rk.rrset_class != 256: continue
# A, AAAA
if rk.type == 256 and len(wire) == 4+2 and wire[:2] == b'\x00\x04':
ip4.append('.'.join(str(x) for x in wire[2:]))
elif rk.type == 7168 and len(wire) == 16+2 and wire[:2] == b'\x00\x10':
b = list(hex(x)[2:].zfill(2) for x in wire[2:])
ip6.append(':'.join(''.join(b[x:x+2]) for x in range(0, len(b), 2)))
changed4 = False
changed6 = False
if ip4:
new_data = '\n'.join(sorted(ip4))
try:
with open('/var/lib/unbound/domains4/' + n2, 'rt') as f:
old_data = f.read()
except:
old_data = ''
if old_data != new_data:
changed4 = True
with open('/var/lib/unbound/domains4/' + n2, 'wt') as f:
f.write(new_data)
if ip6:
new_data = '\n'.join(sorted(ip6))
try:
with open('/var/lib/unbound/domains6/' + n2, 'rt') as f:
old_data = f.read()
except:
old_data = ''
if old_data != new_data:
changed6 = True
with open('/var/lib/unbound/domains6/' + n2, 'wt') as f:
f.write(new_data)
if changed4:
for qname in qnames:
name4 = NFT_QUERIES[qname]['name4']
if name4:
ip2 = []
for ip in ip4:
exists = False
try:
if ips4.has_key(ip) or ips4.parent(ip) != None:
exists = True
except:
pass
if not exists:
ips4.insert(ip, None)
ip2.append(ip)
if ip2:
add_ips(name4, False, ip2)
if changed6:
for qname in qnames:
name6 = NFT_QUERIES[qname]['name6']
if name6:
ip2 = []
for ip in ip6:
exists = False
try:
if ips6.has_key(ip) or ips6.parent(ip) != None:
exists = True
except:
pass
if not exists:
ips6.insert(ip, None)
ip2.append(ip)
if ip2:
add_ips(name6, True, ip2)
except:
with open('/var/lib/unbound/error.log', 'at') as f:
traceback.print_exc(file=f)
if event == MODULE_EVENT_NEW or event == MODULE_EVENT_PASS:
qstate.ext_state[id] = MODULE_WAIT_MODULE
return True
if event != MODULE_EVENT_MODDONE:
log_err("avahi-resolver: Unexpected event %d" % event)
qstate.ext_state[id] = MODULE_ERROR
return True
qstate.ext_state[id] = MODULE_FINISHED
# Only resolve via Avahi if we got NXDOMAIn from the upstream DNS
# server, or if we could not reach the upstream DNS server. If we
# got some records for the name from the upstream DNS server
# already, do not resolve the record in Avahi.
if rc != RCODE_NXDOMAIN and rc != RCODE_SERVFAIL:
return True
dbg("Got request for '%s %s %s'" % (name, class_str, type_str))
# Avahi only supports the IN class
if class_ != RR_CLASS_IN:
dbg('Rejected, Avahi only supports the IN class')
return True
# Avahi does not support meta queries (e.g., ANY)
if dns.rdatatype.is_metatype(type_):
dbg('Rejected, Avahi does not support the type %s' % type_str)
return True
# If we have a type blacklist and the requested type is on the
# list, reject it.
if MDNS_REJECT_TYPES and type_ in MDNS_REJECT_TYPES:
dbg('Rejected, type %s is on the blacklist' % type_str)
return True
# If we have a type whitelist and if the requested type is not on
# the list, reject it.
if MDNS_ACCEPT_TYPES and type_ not in MDNS_ACCEPT_TYPES:
dbg('Rejected, type %s is not on the whitelist' % type_str)
return True
# If we have a name blacklist and if the requested name matches
# the blacklist, reject it.
if MDNS_REJECT_NAMES is not None:
if MDNS_REJECT_NAMES.search(name):
dbg('Rejected, name %s is on the blacklist' % name)
return True
# If we have a name whitelist and if the requested name does not
# match the whitelist, reject it.
if MDNS_ACCEPT_NAMES is not None:
if not MDNS_ACCEPT_NAMES.search(name):
dbg('Rejected, name %s is not on the whitelist' % name)
return True
dbg("Resolving '%s %s %s' via Avahi" % (name, class_str, type_str))
recs = resolve(name, type_, getone=MDNS_GETONE, timeout=MDNS_TIMEOUT)
if not recs:
dbg('Result: Not found (NXDOMAIN)')
qstate.return_rcode = RCODE_NXDOMAIN
return True
m = DNSMessage(name, type_, class_, PKT_QR | PKT_RD | PKT_RA)
for r in recs:
s = rr2text(r, MDNS_TTL)
dbg('Result: %s' % s)
m.answer.append(s)
if not m.set_return_msg(qstate):
raise Exception("Error in set_return_msg")
if not storeQueryInCache(qstate, qstate.return_msg.qinfo, qstate.return_msg.rep, 0):
raise Exception("Error in storeQueryInCache")
qstate.return_msg.rep.security = 2
qstate.return_rcode = RCODE_NOERROR
return True
#
# It does not appear to be sufficient to check __name__ to determine
# whether we are being run in interactive mode. As a workaround, try
# to import module unboundmodule and if that fails, assume we're being
# run in interactive mode.
#
try:
import unboundmodule
embedded = True
except ImportError:
embedded = False
if __name__ == '__main__' and not embedded:
import sys
def log_info(msg):
print(msg)
def log_err(msg):
print('ERROR: %s' % msg, file=sys.stderr)
if len(sys.argv) != 3:
print('Usage: %s <name> <rr_type>' % sys.argv[0])
sys.exit(2)
name = sys.argv[1]
type_str = sys.argv[2]
try:
type_ = dns.rdatatype.from_text(type_str)
except dns.rdatatype.UnknownRdatatype:
log_err('Unsupported DNS record type "%s"' % type_str)
sys.exit(2)
if dns.rdatatype.is_metatype(type_):
log_err('Meta record type "%s" cannot be resolved via Avahi' % type_str)
sys.exit(2)
init()
try:
recs = resolve(name, type_, getone=MDNS_GETONE, timeout=MDNS_TIMEOUT)
if not len(recs):
print('%s not found (NXDOMAIN)' % name)
sys.exit(1)
for r in recs:
print(rr2text(r, MDNS_TTL))
finally:
deinit()

View file

@ -1,4 +1,8 @@
{ config { config
, pkgs
, notnft
, lib
, router-lib
, ... }: , ... }:
let let
@ -10,7 +14,6 @@ let
he_su_beamformer = true; he_su_beamformer = true;
he_su_beamformee = true; he_su_beamformee = true;
he_mu_beamformer = true; he_mu_beamformer = true;
he_bss_color = 128;
he_spr_sr_control = 3; he_spr_sr_control = 3;
he_default_pe_duration = 4; he_default_pe_duration = 4;
he_rts_threshold = 1023; he_rts_threshold = 1023;
@ -39,12 +42,230 @@ let
he_mu_edca_ac_vo_ecwmax = 7; he_mu_edca_ac_vo_ecwmax = 7;
he_mu_edca_ac_vo_timer = 255; he_mu_edca_ac_vo_timer = 255;
preamble = true; preamble = true;
vht_oper_chwidth = 1; # 80mhz ch width
vht_oper_centr_freq_seg0_idx = 42;
vht_capab = "[RXLDPC][SHORT-GI-80][SHORT-GI-160][TX-STBC-2BY1][SU-BEAMFORMER][SU-BEAMFORMEE][MU-BEAMFORMER][MU-BEAMFORMEE][RX-ANTENNA-PATTERN][TX-ANTENNA-PATTERN][RX-STBC-1][SOUNDING-DIMENSION-4][BF-ANTENNA-4][VHT160][MAX-MPDU-11454][MAX-A-MPDU-LEN-EXP7]";
country3 = "0x49"; # indoor country3 = "0x49"; # indoor
}; };
# routing tables
wan_table = 1;
# vpn table, assign an id but don't actually add a rule for it, so it is the default
vpn_table = 2;
# iot table without a route into the internet
iot_table = 3;
dnatRuleMode = rule:
if rule.mode != "" then rule.mode
else if rule.target4.address or null == gatewayAddr4 || rule.target6.address or null == gatewayAddr6 then "rule"
else "mark";
# nftables rules generator
# selfIp4/selfIp6 = block packets from these addresses
# extraInetEntries = stuff to add to inet table
# extraNetdevEntries = stuff to add to netdev table
# wans = external interfaces (internet)
# lans = internal interfaces (lan)
# netdevIngressWanRules = additional rules for ingress (netdev)
# inetInboundWanRules = additional rules for input from wan (inet), i.e. stuff meant directly for the router and not for any other device
# inetForwardRules = additional forward rules besides allow lan->wan forwarding
# inetSnatRules = snat rules (changing source address, usually just called nat)
# inetDnatRules = dnat rules (changing destination address, i.e. port forwarding)
# logPrefix = log prefix for drops
mkRules = {
selfIp4,
selfIp6,
extraInetEntries ? {},
extraNetdevEntries ? {},
wans,
lans,
netdevIngressWanRules ? [],
inetInboundWanRules ? [],
inetForwardRules ? [],
inetSnatRules ? [],
inetDnatRules ? [],
logPrefix ? "",
}: with notnft.dsl; with payload; ruleset {
filter = add table.netdev ({
ingress_common = add chain
[(is.eq (bit.and tcp.flags (f: bit.or f.fin f.syn)) (f: bit.or f.fin f.syn)) (log "${logPrefix}fin+syn drop ") drop]
[(is.eq (bit.and tcp.flags (f: bit.or f.syn f.rst)) (f: bit.or f.syn f.rst)) (log "${logPrefix}syn+rst drop ") drop]
[(is.eq (bit.and tcp.flags (f: with f; bit.or fin syn rst psh ack urg)) 0) (log "${logPrefix}null drop ") drop]
[(is tcp.flags (f: f.syn)) (is.eq tcpOpt.maxseg.size (range 0 500)) (log "${logPrefix}maxseg drop ") drop]
# reject requests with own saddr
# log if they are meant for us...
[(is.eq ip.saddr selfIp4) (is.eq (fib (f: with f; [ daddr iif ]) (f: f.type)) (f: f.local)) (log "${logPrefix}self4 ") drop]
[(is.eq ip6.saddr selfIp6) (is.eq (fib (f: with f; [ daddr iif ]) (f: f.type)) (f: f.local)) (log "${logPrefix}self6 ") drop]
# ...but drop silently if they're multicast/broadcast
[(is.eq ip.saddr selfIp4) drop]
[(is.eq ip6.saddr selfIp6) drop]
[return];
ingress_lan_common = add chain
[(is.eq (fib (f: with f; [ saddr mark iif ]) (f: f.oif)) missing) (log "${logPrefix}oif missing ") drop]
[(jump "ingress_common")];
ingress_wan_common = add chain
netdevIngressWanRules
[(jump "ingress_common")]
# [(is.ne (fib (f: with f; [ daddr iif ]) (f: f.type)) (f: set [ f.local f.broadcast f.multicast ])) (log "${logPrefix}non-{local,broadcast,multicast} ") drop]
[(is.eq ip.protocol (f: f.icmp)) (limit { rate = 100; per = f: f.second; }) accept]
[(is.eq ip6.nexthdr (f: f.ipv6-icmp)) (limit { rate = 100; per = f: f.second; }) accept]
[(is.eq ip.protocol (f: f.icmp)) (log "${logPrefix}icmp flood ") drop]
[(is.eq ip6.nexthdr (f: f.ipv6-icmp)) (log "${logPrefix}icmp6 flood ") drop];
}
// extraNetdevEntries
// builtins.listToAttrs (map (name: {
name = "ingress_${name}";
value = add chain { type = f: f.filter; hook = f: f.ingress; dev = name; prio = -500; policy = f: f.accept; }
[(jump "ingress_lan_common")];
}) lans)
// builtins.listToAttrs (map (name: {
name = "ingress_${name}";
value = add chain { type = f: f.filter; hook = f: f.ingress; dev = name; prio = -500; policy = f: f.accept; }
[(jump "ingress_wan_common")];
}) wans));
global = add table { family = f: f.inet; } ({
inbound_wan_common = add chain
[(vmap ct.state { established = accept; related = accept; invalid = drop; })]
[(is ct.status (f: f.dnat)) accept]
[(is.eq (bit.and tcp.flags (f: f.syn)) 0) (is.eq ct.state (f: f.new)) (log "${logPrefix}new non-syn ") drop]
# icmp: only accept ping requests
[(is.eq ip.protocol (f: f.icmp)) (is.eq icmp.type (f: with f; set [ echo-request ])) accept]
# icmpv6: accept no-route info from link-local addresses
[(is.eq ip6.nexthdr (f: f.ipv6-icmp)) (is.eq ip6.saddr (cidr "fe80::/10")) (is.eq icmpv6.code (f: f.no-route))
(is.eq icmpv6.type (f: with f; set [ mld-listener-query mld-listener-report mld-listener-done mld2-listener-report ]))
accept]
# icmpv6: accept commonly useful stuff
[(is.eq ip6.nexthdr (f: f.ipv6-icmp)) (is.eq icmpv6.type (f: with f; set [ destination-unreachable time-exceeded echo-request echo-reply ])) accept]
# icmpv6: more common stuff
[(is.eq ip6.nexthdr (f: f.ipv6-icmp)) (is.eq icmpv6.code (f: f.no-route))
(is.eq icmpv6.type (f: with f; set [ packet-too-big parameter-problem ])) accept]
[(is.eq ip6.nexthdr (f: f.ipv6-icmp)) (is.eq icmpv6.code (f: f.admin-prohibited))
(is.eq icmpv6.type (f: with f; set [ parameter-problem ])) accept]
inetInboundWanRules;
# trust the lan
inbound_lan_common = add chain
[accept];
inbound = add chain { type = f: f.filter; hook = f: f.input; prio = f: f.filter; policy = f: f.drop; }
[(vmap meta.iifname ({
lo = accept;
}
// lib.genAttrs lans (_: jump "inbound_lan_common")
// lib.genAttrs wans (_: jump "inbound_wan_common")
))]
[(log "${logPrefix}inbound drop ")];
forward = add chain { type = f: f.filter; hook = f: f.forward; prio = f: f.filter; policy = f: f.drop; }
[(vmap ct.state { established = accept; related = accept; invalid = drop; })]
[(is ct.status (f: f.dnat)) accept]
# accept lan->wan fw
[(is.eq meta.iifname (set lans)) (is.eq meta.oifname (set wans)) accept]
# accept lan->lan fw
[(is.eq meta.iifname (set lans)) (is.eq meta.oifname (set lans)) accept]
# accept wan->lan icmpv6 forward
[(is.eq meta.iifname (set wans)) (is.eq icmpv6.type (f: with f; set [ destination-unreachable time-exceeded echo-request echo-reply ])) accept]
[(is.eq meta.iifname (set wans)) (is.eq icmpv6.code (f: f.no-route)) (is.eq icmpv6.type (f: with f; set [ packet-too-big parameter-problem ])) accept]
[(is.eq meta.iifname (set wans)) (is.eq icmpv6.code (f: f.admin-prohibited)) (is.eq icmpv6.type (f: f.parameter-problem)) accept]
inetForwardRules
[(log "${logPrefix}forward drop ")];
postrouting = add chain { type = f: f.nat; hook = f: f.postrouting; prio = f: f.srcnat; policy = f: f.accept; }
# masquerade ipv6 because my isp doesn't provide it and my vpn gives a single ipv6
[(is.eq meta.protocol (f: set [ f.ip f.ip6 ])) (is.eq meta.iifname (set lans)) (is.eq meta.oifname (set wans)) masquerade]
inetSnatRules;
prerouting = add chain { type = f: f.nat; hook = f: f.prerouting; prio = f: f.dstnat; policy = f: f.accept; }
inetDnatRules;
} // extraInetEntries);
};
mkFlushRules = {}: with notnft.dsl; ruleset [ (flush ruleset) ];
unbound-python = pkgs.python3.withPackages (ps: with ps; [ pydbus dnspython requests pytricia nftables ]);
# parse a.b.c.d/x into { address, prefixLength }
netParsedCidr4 = router-lib.parseCidr cfg.network;
netParsedCidr6 = router-lib.parseCidr cfg.network6;
netnsParsedCidr4 = router-lib.parseCidr cfg.netnsNet;
netnsParsedCidr6 = router-lib.parseCidr cfg.netnsNet6;
# generate network cidr from device address
# (normalizeCidr applies network mask to the address)
netCidr4 = router-lib.serializeCidr (router-lib.normalizeCidr netParsedCidr4);
netCidr6 = router-lib.serializeCidr (router-lib.normalizeCidr netParsedCidr6);
netnsCidr4 = router-lib.serializeCidr (router-lib.normalizeCidr netnsParsedCidr4);
netnsCidr6 = router-lib.serializeCidr (router-lib.normalizeCidr netnsParsedCidr6);
gatewayAddr4 = netParsedCidr4.address;
gatewayAddr6 = netParsedCidr6.address;
mainNetnsAddr4 = netnsParsedCidr4.address;
mainNetnsAddr6 = netnsParsedCidr6.address;
wanNetnsAddr4 = cfg.wanNetnsAddr;
wanNetnsAddr6 = cfg.wanNetnsAddr6;
parsedGatewayAddr4 = router-lib.parseIp4 gatewayAddr4;
parsedGatewayAddr6 = router-lib.parseIp6 gatewayAddr6;
addToIp' = ip: n: lib.init ip ++ [ (lib.last ip + n) ];
addToIp = ip: n: router-lib.serializeIp (addToIp' ip n);
# server
serverAddress4 = addToIp parsedGatewayAddr4 1;
serverAddress6 = addToIp parsedGatewayAddr6 1;
# robot vacuum
vacuumAddress4 = addToIp parsedGatewayAddr4 2;
vacuumAddress6 = addToIp parsedGatewayAddr6 2;
# TODO: take from server config?
hosted-domains =
map
(prefix: if prefix == null then cfg.domainName else "${prefix}.${cfg.domainName}")
[
null "dns" "mumble" "mail" "music" "www" "matrix"
"search" "git" "cloud" "ns1" "ns2"
];
in { in {
router-settings.domainName = "pavluk.org";
router-settings.dhcpReservations = [
{ ipAddress = serverAddress4;
macAddress = cfg.serverMac; }
{ ipAddress = vacuumAddress4;
macAddress = cfg.vacuumMac; }
];
router-settings.dhcp6Reservations = [
{ ipAddress = serverAddress6;
macAddress = cfg.serverMac; }
{ ipAddress = vacuumAddress6;
macAddress = cfg.vacuumMac; }
];
router-settings.dnatRules = [
{
# TODO: take firewall settings from server config
port = notnft.dsl.set [
# http
80 443 8008 8448
# mail
25 587 465 143 993 110 995 4190
];
tcp = true; udp = false;
target4.address = serverAddress4;
target6.address = serverAddress6;
}
{
# mumble
port = 64738; tcp = true; udp = true;
target4.address = serverAddress4;
target6.address = serverAddress6;
}
{
# expose the default namespace's ssh via port 23
port = 23; tcp = true; udp = false;
target4.address = gatewayAddr4;
target4.port = 22;
target6.address = gatewayAddr6;
target6.port = 22;
}
];
imports = [ ./options.nix ]; imports = [ ./options.nix ];
system.stateVersion = "22.11"; system.stateVersion = "22.11";
fileSystems = { fileSystems = {
@ -61,17 +282,28 @@ in {
{ device = rootPart; fsType = "btrfs"; neededForBoot = true; { device = rootPart; fsType = "btrfs"; neededForBoot = true;
options = [ "compress=zstd:15" "subvol=@nix" ]; }; options = [ "compress=zstd:15" "subvol=@nix" ]; };
}; };
services.openssh.enable = true;
impermanence = { impermanence = {
enable = true; enable = true;
path = /persist; path = /persist;
directories = [ directories = [
{ directory = /home/${config.common.mainUsername}; user = config.common.mainUsername; group = "users"; mode = "0700"; } { directory = /home/${config.common.mainUsername}; user = config.common.mainUsername; group = "users"; mode = "0700"; }
{ directory = /root; mode = "0700"; } { directory = /root; mode = "0700"; }
{ directory = /var/db/dhcpcd; user = "root"; group = "root"; mode = "0755"; } { directory = /var/db/dhcpcd; mode = "0755"; }
{ directory = /var/lib/kea; user = "root"; group = "root"; mode = "0755"; } { directory = /var/lib/private/kea; mode = "0750"; }
# for wireguard key
{ directory = /secrets; mode = "0000"; }
]; ];
}; };
boot.kernel.sysctl = {
"net.ipv4.conf.all.src_valid_mark" = true;
"net.ipv4.conf.default.src_valid_mark" = true;
"net.ipv4.conf.all.forwarding" = true;
"net.ipv6.conf.all.forwarding" = true;
};
services.openssh.enable = true;
/*services.fail2ban = {
enable = true;
};*/
router.enable = true; router.enable = true;
router.interfaces.wlan0 = { router.interfaces.wlan0 = {
bridge = "br0"; bridge = "br0";
@ -79,6 +311,8 @@ in {
hostapd.settings = { hostapd.settings = {
inherit (cfg) ssid; inherit (cfg) ssid;
hw_mode = "g"; hw_mode = "g";
channel = 1;
chanlist = [ 1 ];
supported_rates = [ 60 90 120 180 240 360 480 540 ]; supported_rates = [ 60 90 120 180 240 360 480 540 ];
basic_rates = [ 60 120 240 ]; basic_rates = [ 60 120 240 ];
ht_capab = "[LDPC][SHORT-GI-20][SHORT-GI-40][TX-STBC][RX-STBC1][MAX-AMSDU-7935]"; ht_capab = "[LDPC][SHORT-GI-20][SHORT-GI-40][TX-STBC][RX-STBC1][MAX-AMSDU-7935]";
@ -88,39 +322,430 @@ in {
bridge = "br0"; bridge = "br0";
hostapd.enable = true; hostapd.enable = true;
hostapd.settings = { hostapd.settings = {
ssid = "${cfg.ssid} 5G"; ssid = "${cfg.ssid}_5G";
ieee80211h = true; ieee80211h = true;
hw_mode = "a"; hw_mode = "a";
channel = 36;
chanlist = [ 36 ];
tx_queue_data2_burst = 2; tx_queue_data2_burst = 2;
ht_capab = "[HT40+][LDPC][SHORT-GI-20][SHORT-GI-40][TX-STBC][RX-STBC1][MAX-AMSDU-7935]"; ht_capab = "[HT40+][LDPC][SHORT-GI-20][SHORT-GI-40][TX-STBC][RX-STBC1][MAX-AMSDU-7935]";
vht_oper_chwidth = 1; # 80mhz ch width
vht_oper_centr_freq_seg0_idx = 42;
vht_capab = "[RXLDPC][SHORT-GI-80][SHORT-GI-160][TX-STBC-2BY1][SU-BEAMFORMER][SU-BEAMFORMEE][MU-BEAMFORMER][MU-BEAMFORMEE][RX-ANTENNA-PATTERN][TX-ANTENNA-PATTERN][RX-STBC-1][SOUNDING-DIMENSION-4][BF-ANTENNA-4][VHT160][MAX-MPDU-11454][MAX-A-MPDU-LEN-EXP7]";
} // hapdConfig; } // hapdConfig;
}; };
router.interfaces.lan0 = { router.interfaces.lan0 = {
matchUdevAttrs.address = "11:11:11:11:11:11"; bridge = "br0";
macAddress = "11:22:33:44:55:66"; systemdLinkLinkConfig.MACAddressPolicy = "persistent";
}; };
router.interfaces.wan0 = { router.interfaces.lan1 = {
matchUdevAttrs.address = "22:11:11:11:11:11"; bridge = "br0";
macAddress = "22:22:33:44:55:66"; systemdLinkLinkConfig.MACAddressPolicy = "persistent";
};
router.interfaces.lan2 = {
bridge = "br0";
systemdLinkLinkConfig.MACAddressPolicy = "persistent";
};
router.interfaces.lan3 = {
bridge = "br0";
systemdLinkLinkConfig.MACAddressPolicy = "persistent";
};
router.interfaces.lan4 = {
bridge = "br0";
systemdLinkLinkConfig.MACAddressPolicy = "persistent";
};
/*router.interfaces.lan5 = {
bridge = "br0";
systemdLinkMatchConfig.OriginalName = "eth1";
systemdLinkLinkConfig.MACAddressPolicy = "persistent";
};*/
router.interfaces.wan = {
dependentServices = [
{ service = "wireguard-wg0"; inNetns = false; }
];
systemdLinkLinkConfig.MACAddressPolicy = "none";
systemdLinkLinkConfig.MACAddress = "11:22:33:44:55:66";
dhcpcd.enable = true; dhcpcd.enable = true;
networkNamespace = "wan";
}; };
networking.firewall.enable = false;
router.interfaces.br0 = { router.interfaces.br0 = {
dependentServices = [ { service = "unbound"; bindType = "wants"; } ];
ipv4.addresses = [ { ipv4.addresses = [ {
address = cfg.network; address = gatewayAddr4;
prefixLength = 24; inherit (netParsedCidr4) prefixLength;
dns = [ cfg.network ]; dns = [ gatewayAddr4 ];
keaSettings.reservations = map (res: {
hw-address = res.macAddress;
ip-address = res.ipAddress;
}) cfg.dhcpReservations;
} ]; } ];
ipv6.addresses = [ { ipv6.addresses = [ {
address = "0:0:0:5678::"; address = gatewayAddr6;
prefixLength = 64; inherit (netParsedCidr6) prefixLength;
dns = [ "fd00::1" ]; dns = [ gatewayAddr6 ];
radvdSettings = { gateways = [ gatewayAddr6 ];
Base6to4Interface = "br0"; radvdSettings.AdvAutonomous = true;
}; coreradSettings.autonomous = true;
# don't autoallocate addresses
keaSettings.pools = [ ];
# just assign the reservations
keaSettings.reservations = map (res: {
hw-address = res.macAddress;
ip-addresses = [ res.ipAddress ];
}) cfg.dhcp6Reservations;
} ]; } ];
ipv4.routes = [
{ extraArgs = [ netCidr4 "dev" "br0" "proto" "kernel" "scope" "link" "src" gatewayAddr4 "table" wan_table ]; }
# allow iot to contact ips inside the network
{ extraArgs = [ netCidr4 "dev" "br0" "proto" "kernel" "scope" "link" "src" gatewayAddr4 "table" iot_table ]; }
];
ipv6.routes = [
{ extraArgs = [ netCidr6 "dev" "br0" "proto" "kernel" "metric" "256" "pref" "medium" "table" wan_table ]; }
# allow iot to contact ips inside the network
{ extraArgs = [ netCidr6 "dev" "br0" "proto" "kernel" "metric" "256" "pref" "medium" "table" iot_table ]; }
];
ipv4.kea.enable = true; ipv4.kea.enable = true;
ipv6.kea.enable = false;
ipv6.radvd.enable = true; ipv6.radvd.enable = true;
ipv6.corerad.enable = false; ipv6.kea.enable = true;
nftables.stopJsonRules = mkFlushRules {};
nftables.jsonRules = mkRules {
selfIp4 = gatewayAddr4;
selfIp6 = gatewayAddr6;
lans = [ "br0" ];
wans = [ "wg0" "veth-wan-a" ];
netdevIngressWanRules = with notnft.dsl; with payload; [
# check oif only from vpn
# dont check it from veth-wan-a because of dnat fuckery and because we already check packets coming from wan there
[(is.eq meta.iifname "wg0") (is.eq (fib (f: with f; [ saddr mark iif ]) (f: f.oif)) missing) (log "lan oif missing ") drop]
];
inetDnatRules =
builtins.concatLists (map
(rule: let
inherit (notnft.inetProtos) tcp udp;
protocols = if rule.tcp && rule.udp then notnft.dsl.set [ tcp udp ] else if rule.tcp then tcp else udp;
rule4 = rule.target4; rule6 = rule.target6;
in with notnft.dsl; with payload;
lib.optionals (rule4 != null) [
[ (is.eq meta.iifname "wg0") (is.eq ip.protocol protocols) (is.eq th.dport rule.port)
(if rule4.port == null then dnat.ip rule4.address else dnat.ip rule4.address rule4.port) ]
] ++ lib.optionals (rule6 != null) [
[ (is.eq meta.iifname "wg0") (is.eq ip6.protocol protocols) (is.eq th.dport rule.port)
(if rule6.port == null then dnat.ip6 rule6.address else dnat.ip6 rule6.address rule6.port) ]
])
(builtins.filter (x: x.inVpn && (x.tcp || x.udp)) cfg.dnatRules));
inetForwardRules = with notnft.dsl; with payload; [
# allow access to lan from the wan namespace
[(is.eq meta.iifname "veth-wan-a") (is.eq meta.oifname "br0") accept]
# allow dnat ("ct status dnat" doesn't work)
];
logPrefix = "lan ";
inetInboundWanRules = with notnft.dsl; with payload; [
[(is.eq th.dport 22) accept]
[(is.eq ip.saddr (cidr netnsCidr4)) accept]
[(is.eq ip6.saddr (cidr netnsCidr6)) accept]
];
extraInetEntries = with notnft.dsl; with payload; {
block4 = add set { type = f: f.ipv4_addr; flags = f: with f; [ interval ]; } [
(cidr "194.190.137.0" 24)
(cidr "194.190.157.0" 24)
(cidr "194.190.21.0" 24)
(cidr "194.226.130.0" 23)
];
block6 = add set { type = f: f.ipv6_addr; flags = f: with f; [ interval ]; };
# those tables get populated by unbound
force_unvpn4 = add set { type = f: f.ipv4_addr; flags = f: with f; [ interval ]; };
force_vpn4 = add set { type = f: f.ipv4_addr; flags = f: with f; [ interval ]; };
force_unvpn6 = add set { type = f: f.ipv6_addr; flags = f: with f; [ interval ]; };
force_vpn6 = add set { type = f: f.ipv6_addr; flags = f: with f; [ interval ]; };
prerouting = add chain { type = f: f.filter; hook = f: f.prerouting; prio = f: f.filter; policy = f: f.accept; } ([
[(mangle meta.mark ct.mark)]
[(is.ne meta.mark 0) accept]
[(is.eq meta.iifname "br0") (mangle meta.mark vpn_table)]
[(is.eq ip.daddr "@force_unvpn4") (mangle meta.mark wan_table)]
[(is.eq ip6.daddr "@force_unvpn6") (mangle meta.mark wan_table)]
[(is.eq ip.daddr "@force_vpn4") (mangle meta.mark vpn_table)]
[(is.eq ip6.daddr "@force_vpn6") (mangle meta.mark vpn_table)]
] ++ # 1. dnat non-vpn: change rttable to wan
builtins.concatLists (map
(rule: let
inherit (notnft.inetProtos) tcp udp;
protocols = if rule.tcp && rule.udp then notnft.dsl.set [ tcp udp ] else if rule.tcp then tcp else udp;
rule4 = rule.target4; rule6 = rule.target6;
in with notnft.dsl; with payload;
lib.optionals (rule4 != null && rule4.address != gatewayAddr4) [
[ (is.eq meta.iifname "br0") (is.eq ip.protocol protocols) (is.eq ip.saddr rule4.address)
(is.eq th.sport (if rule4.port != null then rule4.port else rule.port)) (mangle meta.mark wan_table) ]
] ++ lib.optionals (rule6 != null && rule6.address != gatewayAddr6) [
[ (is.eq meta.iifname "br0") (is.eq ip6.nexthdr protocols) (is.eq ip6.saddr rule6.address)
(is.eq th.sport (if rule6.port != null then rule6.port else rule.port)) (mangle meta.mark wan_table) ]
])
(builtins.filter (x: !x.inVpn && (x.tcp || x.udp) && dnatRuleMode x == "mark") cfg.dnatRules))
++ # 2. dnat vpn: change rttable to vpn
builtins.concatLists (map
(rule: let
inherit (notnft.inetProtos) tcp udp;
protocols = if rule.tcp && rule.udp then notnft.dsl.set [ tcp udp ] else if rule.tcp then tcp else udp;
rule4 = rule.target4; rule6 = rule.target6;
in with notnft.dsl; with payload;
lib.optionals (rule4 != null && rule4.address != gatewayAddr4) [
[ (is ct.status (f: f.dnat)) (is.eq meta.iifname "br0") (is.eq ip.protocol protocols) (is.eq ip.saddr rule4.address)
(is.eq th.sport (if rule4.port != null then rule4.port else rule.port)) (mangle meta.mark vpn_table) ]
] ++ lib.optionals (rule6 != null && rule6.address != gatewayAddr6) [
[ (is ct.status (f: f.dnat)) (is.eq meta.iifname "br0") (is.eq ip6.protocol protocols) (is.eq ip6.saddr rule6.address)
(is.eq th.sport (if rule6.port != null then rule6.port else rule.port)) (mangle meta.mark vpn_table) ]
])
(builtins.filter (x: x.inVpn && (x.tcp || x.udp) && dnatRuleMode x == "mark") cfg.dnatRules))
++ [
[(is.eq ip.daddr "@block4") drop]
[(is.eq ip6.daddr "@block6") drop]
[(is.eq ether.saddr cfg.vacuumMac) (mangle meta.mark iot_table)]
[(mangle ct.mark meta.mark)]
]);
}; };
};
};
router.veths.veth-wan-a.peerName = "veth-wan-b";
router.interfaces.veth-wan-a = {
ipv4.addresses = [ netnsParsedCidr4 ];
ipv6.addresses = [ netnsParsedCidr6 ];
ipv4.routes = [
# default config duplicated for wan_table
{ extraArgs = [ netnsCidr4 "dev" "veth-wan-a" "proto" "kernel" "scope" "link" "src" mainNetnsAddr4 "table" wan_table ]; }
# default all traffic to wan in wan_table
{ extraArgs = [ "default" "via" wanNetnsAddr4 "table" wan_table ]; }
];
ipv6.routes = [
# default config duplicated for wan_table
{ extraArgs = [ netnsCidr6 "dev" "veth-wan-a" "proto" "kernel" "metric" "256" "pref" "medium" "table" wan_table ]; }
# default all traffic to wan in wan_table
{ extraArgs = [ "default" "via" wanNetnsAddr6 "table" wan_table ]; }
];
};
router.interfaces.veth-wan-b = {
networkNamespace = "wan";
ipv4.addresses = [ {
address = wanNetnsAddr4;
inherit (netnsParsedCidr4) prefixLength;
} ];
ipv6.addresses = [ {
address = wanNetnsAddr6;
inherit (netnsParsedCidr6) prefixLength;
} ];
ipv4.routes = [
{ extraArgs = [ netCidr4 "via" mainNetnsAddr4 ]; }
];
ipv6.routes = [
{ extraArgs = [ netCidr6 "via" mainNetnsAddr6 ]; }
];
nftables.stopJsonRules = mkFlushRules {};
nftables.jsonRules = mkRules {
selfIp4 = wanNetnsAddr4;
selfIp6 = wanNetnsAddr6;
lans = [ "veth-wan-b" ];
wans = [ "wan" ];
netdevIngressWanRules = with notnft.dsl; with payload; [
[(is.eq (fib (f: with f; [ saddr mark iif ]) (f: f.oif)) missing) (log "wan oif missing ") drop]
];
inetDnatRules =
builtins.concatLists (map
(rule: let
inherit (notnft.inetProtos) tcp udp;
protocols = if rule.tcp && rule.udp then notnft.dsl.set [ tcp udp ] else if rule.tcp then tcp else udp;
rule4 = rule.target4; rule6 = rule.target6;
in with notnft.dsl; with payload;
lib.optionals (rule4 != null) [
[ (is.eq meta.iifname "wan") (is.eq ip.protocol protocols) (is.eq th.dport rule.port)
(if rule4.port == null then dnat.ip rule4.address else dnat.ip rule4.address rule4.port) ]
] ++ lib.optionals (rule6 != null) [
[ (is.eq meta.iifname "wan") (is.eq ip6.nexthdr protocols) (is.eq th.dport rule.port)
(if rule6.port == null then dnat.ip6 rule6.address else dnat.ip6 rule6.address rule6.port) ]
])
(builtins.filter (x: !x.inVpn && (x.tcp || x.udp)) cfg.dnatRules));
inetSnatRules =
# historically, i needed this, now i switched to ip rules
# if i ever need this again, i have it right here
builtins.concatLists (map
(rule: let
inherit (notnft.inetProtos) tcp udp;
protocols = if rule.tcp && rule.udp then notnft.dsl.set [ tcp udp ] else if rule.tcp then tcp else udp;
rule4 = rule.target4; rule6 = rule.target6;
in with notnft.dsl; with payload;
lib.optionals (rule4 != null && rule4.address == gatewayAddr4) [
[ (is.eq meta.iifname "wan") (is.eq meta.oifname "veth-wan-b") (is.eq ip.protocol protocols)
(is.eq th.dport (if rule4.port != null then rule4.port else rule.port)) (is.eq ip.daddr rule4.address) masquerade ]
] ++ lib.optionals (rule6 != null && rule6.address == gatewayAddr6) [
[ (is.eq meta.iifname "wan") (is.eq meta.oifname "veth-wan-b") (is.eq ip6.nexthdr protocols)
(is.eq th.dport (if rule6.port != null then rule6.port else rule.port)) (is.eq ip6.daddr rule6.address) masquerade ]
])
(builtins.filter (x: !x.inVpn && (x.tcp || x.udp) && dnatRuleMode x == "snat") cfg.dnatRules));
logPrefix = "wan ";
inetInboundWanRules = with notnft.dsl; with payload; [
# DHCP
[(is.eq meta.nfproto (x: x.ipv4)) (is.eq udp.dport 68) accept]
[(is.eq meta.nfproto (x: x.ipv6)) (is.eq udp.dport 546) accept]
# igmp, used for setting up multicast groups
[(is.eq ip.protocol (f: f.igmp)) accept]
# accept router solicitation stuff
[(is.eq ip6.nexthdr (f: f.ipv6-icmp)) (is.eq icmpv6.type (f: with f; set [ nd-router-solicit nd-router-advert ])) accept]
# accept neighbor solicitation stuff
[(is.eq ip6.nexthdr (f: f.ipv6-icmp)) (is.eq icmpv6.code (f: f.no-route))
(is.eq icmpv6.type (f: with f; set [ nd-neighbor-solicit nd-neighbor-advert ]))
accept]
# SSH
[(is.eq th.dport 22) accept]
];
};
};
router.networkNamespaces.default.rules = [
{ ipv6 = false; extraArgs = [ "fwmark" wan_table "table" wan_table ]; }
{ ipv6 = true; extraArgs = [ "fwmark" wan_table "table" wan_table ]; }
# don't add vpn_table as it should be the default
{ ipv6 = false; extraArgs = [ "fwmark" iot_table "table" iot_table ]; }
{ ipv6 = true; extraArgs = [ "fwmark" iot_table "table" iot_table ]; }
] ++ builtins.concatLists (map (rule: let
table = if rule.inVpn then 0 else wan_table;
forEachPort = func: port:
if builtins.isInt port then [ (func port) ]
else if port?set then builtins.concatLists (map (forEachPort func) port.set)
else if port?range.min then let inherit (port.range) min max; in [ (func "${toString min}-${toString max}") ]
else if port?range then let max = builtins.elemAt port.range 1; min = builtins.head port.range; in [ (func "${toString min}-${toString max}" ) ]
else throw "Unsupported expr: ${builtins.toJSON port}";
gen = len: proto: tgt:
forEachPort
(port: [ "from" "${tgt.address}/${toString len}" "ipproto" proto "sport" port "table" table ])
(if tgt.port == null then rule.port else tgt.port);
in lib.optionals (rule.tcp && rule.target4 != null) (map (x: { ipv6 = false; extraArgs = x; }) (gen 32 "tcp" rule.target4))
++ lib.optionals (rule.udp && rule.target4 != null) (map (x: { ipv6 = false; extraArgs = x; }) (gen 32 "udp" rule.target4))
++ lib.optionals (rule.tcp && rule.target6 != null) (map (x: { ipv6 = true; extraArgs = x; }) (gen 128 "tcp" rule.target6))
++ lib.optionals (rule.udp && rule.target6 != null) (map (x: { ipv6 = true; extraArgs = x; }) (gen 128 "udp" rule.target6))
) (builtins.filter (x: (x.tcp || x.udp) && dnatRuleMode x == "rule") cfg.dnatRules));
networking.wireguard.interfaces.wg0 = cfg.wireguard // {
socketNamespace = "wan";
interfaceNamespace = "init";
};
networking.resolvconf.extraConfig = ''
name_servers="${mainNetnsAddr4} ${mainNetnsAddr6}"
'';
users.users.${config.common.mainUsername}.extraGroups = [ config.services.unbound.group ];
services.unbound = {
enable = true;
package = pkgs.unbound-with-systemd.override {
stdenv = pkgs.ccacheStdenv;
withPythonModule = true;
python = unbound-python;
};
localControlSocketPath = "/run/unbound/unbound.ctl";
# we override resolvconf above manually
resolveLocalQueries = false;
settings = {
server = {
interface = [ mainNetnsAddr4 mainNetnsAddr6 gatewayAddr4 gatewayAddr6 ];
access-control = [ "${netnsCidr4} allow" "${netnsCidr6} allow" "${netCidr4} allow" "${netCidr6} allow" ];
aggressive-nsec = true;
do-ip6 = true;
module-config = ''"validator python iterator"'';
local-zone = [
''"local." static''
''"${cfg.domainName}." typetransparent''
];
local-data = builtins.concatLists (map (domain:
[
''"${domain}. A ${serverAddress4}"''
''"${domain}. AAAA ${serverAddress6}"''
]) hosted-domains);
};
python.python-script = toString ./avahi-resolver-v2.py;
remote-control.control-enable = true;
};
};
networking.hosts."${serverAddress4}" = hosted-domains;
networking.hosts."${serverAddress6}" = hosted-domains;
systemd.services.unbound = {
environment.PYTHONPATH = "${unbound-python}/${unbound-python.sitePackages}";
environment.MDNS_ACCEPT_NAMES = "^.*\\.local\\.$";
# load vpn_domains.json and vpn_ips.json, as well as unvpn_domains.json and unvpn_ips.json
# resolve domains and append it to ips and add it to the nftables sets
environment.NFT_QUERIES = "vpn:force_vpn4,force_vpn6;unvpn:force_unvpn4,force_unvpn6";
# it needs to run after br0 has been set up because it sets up the sets
after = [ "nftables-br0.service" ];
wants = [ "nftables-br0.service" ];
# allow it to call nft
serviceConfig.AmbientCapabilities = [ "CAP_NET_ADMIN" ];
};
systemd.services.update-rkn-blacklist = {
# fetch vpn_ips.json and vpn_domains.json for unbound
script = ''
BLACKLIST=$(${pkgs.coreutils}/bin/mktemp) || exit 1
${pkgs.curl}/bin/curl "https://reestr.rublacklist.net/api/v2/ips/json/" -o "$BLACKLIST" || exit 1
chown unbound:unbound "$BLACKLIST" && mv "$BLACKLIST" /var/lib/unbound/vpn_ips.json
${pkgs.curl}/bin/curl "https://reestr.rublacklist.net/api/v2/domains/json/" -o "$BLACKLIST" || exit 1
chown unbound:unbound "$BLACKLIST" && mv "$BLACKLIST" /var/lib/unbound/vpn_domains.json
'';
serviceConfig = {
Type = "oneshot";
};
};
systemd.timers.update-rkn-blacklist = {
wantedBy = [ "timers.target" ];
partOf = [ "update-rkn-blacklist.service" ];
# slightly unusual time to reduce server load
timerConfig.OnCalendar = [ "*-*-* 00:00:00" ]; # every day
timerConfig.RandomizedDelaySec = 43200; # execute at random time in the first 12 hours
};
# run an extra sshd so we can connect even if forwarding/routing between namespaces breaks
# i don't want to connect by uart each time something goes wrong
systemd.services.sshd-wan = {
description = "SSH Daemon (WAN)";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" "netns-wan.service" ];
bindsTo = [ "netns-wan.service" ];
stopIfChanged = false;
path = with pkgs; [ gawk config.programs.ssh.package ];
environment.LD_LIBRARY_PATH = config.system.nssModules.path;
restartTriggers = [ config.environment.etc."ssh/sshd_config".source ];
preStart = config.systemd.services.sshd.preStart;
serviceConfig = {
ExecStart = "${config.programs.ssh.package}/bin/sshd -D -f /etc/ssh/sshd_config";
KillMode = "process";
Restart = "always";
Type = "simple";
NetworkNamespacePath = "/var/run/netns/wan";
};
};
services.printing = {
enable = true;
allowFrom = [ "localhost" netCidr4 netCidr6 ];
browsing = true;
clientConf = ''
ServerName router.local
'';
defaultShared = true;
drivers = [ pkgs.hplip ];
startWhenNeeded = false;
};
services.avahi = {
enable = true;
hostName = "router";
allowInterfaces = [ "br0" ];
publish = {
enable = true;
addresses = true;
domain = true;
userServices = true;
};
};
# it takes a stupidly long time when done via qemu
# (also it's supposed to be disabled by default but it was enabled for me, why?)
documentation.man.generateCaches = false;
} }

View file

@ -1,19 +1,156 @@
{ lib { lib
, notnft
, router-lib
, ... }: , ... }:
{ {
options.router-settings = { options.router-settings = {
country_code = lib.mkOption { serverMac = lib.mkOption {
description = "server's mac address";
type = lib.types.str;
};
# TODO: take this from server config
domainName = lib.mkOption {
description = "server's domain name";
type = lib.types.str;
};
vacuumMac = lib.mkOption {
description = "robot vacuum's mac address";
type = lib.types.str; type = lib.types.str;
}; };
network = lib.mkOption { network = lib.mkOption {
description = "network gateway+cidr (ex: 192.168.1.1/24)";
type = router-lib.types.cidr4;
};
network6 = lib.mkOption {
description = "network gateway+cidr6 (ex: fd00:1234:5678:90ab::1/64)";
type = router-lib.types.cidr6;
};
netnsNet = lib.mkOption {
description = "private inter-netns communication network cidr+main netns addr (ex: 192.168.2.1/24)";
type = router-lib.types.cidr4;
};
netnsNet6 = lib.mkOption {
description = "private inter-netns communication network cidr6+main netns addr6 (ex: fd01:ba09:8765:4321::1/64)";
type = router-lib.types.cidr6;
};
wanNetnsAddr = lib.mkOption {
description = "ip to assign to wan netns";
type = router-lib.types.ipv4;
};
wanNetnsAddr6 = lib.mkOption {
description = "ipv6 to assign to wan netns";
type = router-lib.types.ipv6;
};
country_code = lib.mkOption {
description = "wlan country_code (ex: US)";
type = lib.types.str; type = lib.types.str;
}; };
ssid = lib.mkOption { ssid = lib.mkOption {
description = "wlan ssid";
type = lib.types.str; type = lib.types.str;
}; };
wpa_passphrase = lib.mkOption { wpa_passphrase = lib.mkOption {
description = "wlan passphrase";
type = lib.types.str; type = lib.types.str;
}; };
wireguard = lib.mkOption {
description = "wireguard config";
type = lib.types.attrs;
};
dhcpReservations = lib.mkOption {
description = "dhcp reservations (ipv4)";
default = [ ];
type = lib.types.listOf (lib.types.submodule {
options.ipAddress = lib.mkOption {
type = router-lib.types.ipv4;
description = "device's ip address";
};
options.macAddress = lib.mkOption {
type = lib.types.str;
description = "device's mac address";
};
});
};
dhcp6Reservations = lib.mkOption {
description = "dhcp reservations (ipv6)";
default = [ ];
type = lib.types.listOf (lib.types.submodule {
options.ipAddress = lib.mkOption {
type = router-lib.types.ipv6;
description = "device's ip address";
};
options.macAddress = lib.mkOption {
type = lib.types.str;
description = "device's mac address";
};
});
};
dnatRules = lib.mkOption {
description = "dnat (port forwarding) rules";
default = [ ];
type = lib.types.listOf (lib.types.submodule {
options.inVpn = lib.mkOption {
type = lib.types.bool;
default = false;
description = "whether this is a vpn port forward";
};
options.mode = lib.mkOption {
type = lib.types.str;
default = "";
description = ''
forward mode.
snat = snat to router ip so routing is always correct; this mangles source ip and may not be desirable
mark = change ct mark if the sport/saddr match the target
rule = add an ip rule that does the above
none = do nothing
default = snat for target=router, mark otherwise
'';
};
# at least one of target4/target6 must be set
options.port = lib.mkOption {
type = notnft.types.expression;
description = "source port (nft expr)";
};
options.target4 = lib.mkOption {
default = null;
type = with lib.types; nullOr (submodule {
options.address = lib.mkOption {
type = router-lib.types.ipv4;
description = "ipv4 address";
};
options.port = lib.mkOption {
type = nullOr int;
description = "target port";
default = null;
};
});
description = "port forwarding target (ipv4)";
};
options.target6 = lib.mkOption {
default = null;
type = with lib.types; nullOr (submodule {
options.address = lib.mkOption {
type = router-lib.types.ipv6;
description = "ipv6 address";
};
options.port = lib.mkOption {
type = nullOr int;
description = "target port";
default = null;
};
});
description = "port forwarding target (ipv6)";
};
options.tcp = lib.mkOption {
type = lib.types.bool;
description = "whether to forward tcp";
};
options.udp = lib.mkOption {
type = lib.types.bool;
description = "whether to forward udp";
};
});
};
}; };
} }

View file

@ -1,5 +1,7 @@
{ config, lib, ... }: { config, lib, ... }:
# common impermanence config for all of my hosts
let let
cfg = config.impermanence; cfg = config.impermanence;
in { in {
@ -22,7 +24,7 @@ in {
description = "Extra directories to persist"; description = "Extra directories to persist";
}; };
files = mkOption { files = mkOption {
type = with types; listOf path; type = with types; listOf (either path attrs);
default = [ ]; default = [ ];
description = "Extra files to persist"; description = "Extra files to persist";
}; };
@ -60,71 +62,76 @@ in {
{ directory = /var/lib/systemd; user = "root"; group = "root"; mode = "0755"; } { directory = /var/lib/systemd; user = "root"; group = "root"; mode = "0755"; }
{ directory = /var/tmp; user = "root"; group = "root"; mode = "1777"; } { directory = /var/tmp; user = "root"; group = "root"; mode = "1777"; }
{ directory = /var/spool; user = "root"; group = "root"; mode = "0777"; } { directory = /var/spool; user = "root"; group = "root"; mode = "0777"; }
] ++ (lib.optionals cfg.persistTmp [ ] ++ lib.optionals cfg.persistTmp [
{ directory = /tmp; user = "root"; group = "root"; mode = "1777"; } { directory = /tmp; user = "root"; group = "root"; mode = "1777"; }
]) ++ (lib.optionals config.services.mullvad-vpn.enable [ ] ++ lib.optionals config.services.mullvad-vpn.enable [
{ directory = /etc/mullvad-vpn; user = "root"; group = "root"; mode = "0700"; } { directory = /etc/mullvad-vpn; user = "root"; group = "root"; mode = "0700"; }
{ directory = /var/cache/mullvad-vpn; user = "root"; group = "root"; mode = "0755"; } { directory = /var/cache/mullvad-vpn; user = "root"; group = "root"; mode = "0755"; }
]) ++ (lib.optionals config.virtualisation.libvirtd.enable ([ ] ++ lib.optionals config.virtualisation.libvirtd.enable ([
# { directory = /var/cache/libvirt; user = "root"; group = "root"; mode = "0755"; } # { directory = /var/cache/libvirt; user = "root"; group = "root"; mode = "0755"; }
{ directory = /var/lib/libvirt; user = "root"; group = "root"; mode = "0755"; } { directory = /var/lib/libvirt; user = "root"; group = "root"; mode = "0755"; }
] ++ (lib.optionals config.virtualisation.libvirtd.qemu.swtpm.enable [ ] ++ lib.optionals config.virtualisation.libvirtd.qemu.swtpm.enable [
{ directory = /var/lib/swtpm-localca; user = "root"; group = "root"; mode = "0750"; } { directory = /var/lib/swtpm-localca; user = "root"; group = "root"; mode = "0750"; }
]))) ++ (lib.optionals config.networking.wireless.iwd.enable [ ]) ++ lib.optionals config.networking.wireless.iwd.enable [
{ directory = /var/lib/iwd; user = "root"; group = "root"; mode = "0700"; } { directory = /var/lib/iwd; user = "root"; group = "root"; mode = "0700"; }
]) ++ (lib.optionals (builtins.any (x: x.useDHCP != false) (builtins.attrValues config.networking.interfaces) && config.networking.useDHCP) [ ] ++ lib.optionals (builtins.any (x: x.useDHCP != false) (builtins.attrValues config.networking.interfaces) && config.networking.useDHCP) [
{ directory = /var/db/dhcpcd; user = "root"; group = "root"; mode = "0755"; } { directory = /var/db/dhcpcd; user = "root"; group = "root"; mode = "0755"; }
]) ++ (lib.optionals config.services.gitea.enable [ ] ++ lib.optionals config.services.gitea.enable [
{ directory = /var/lib/gitea; user = "gitea"; group = "gitea"; mode = "0755"; } { directory = /var/lib/gitea; user = "gitea"; group = "gitea"; mode = "0755"; }
]) ++ (lib.optionals config.services.matrix-synapse.enable [ ] ++ lib.optionals config.services.matrix-synapse.enable [
{ directory = /var/lib/matrix-synapse; user = "matrix-synapse"; group = "matrix-synapse"; mode = "0700"; } { directory = /var/lib/matrix-synapse; user = "matrix-synapse"; group = "matrix-synapse"; mode = "0700"; }
]) ++ (lib.optionals config.services.heisenbridge.enable [ ] ++ lib.optionals config.services.heisenbridge.enable [
{ directory = /var/lib/heisenbridge; user = "heisenbridge"; group = "heisenbridge"; mode = "0755"; } { directory = /var/lib/heisenbridge; user = "heisenbridge"; group = "heisenbridge"; mode = "0755"; }
]) ++ (lib.optionals config.services.murmur.enable [ ] ++ lib.optionals config.services.murmur.enable [
{ directory = /var/lib/murmur; user = "murmur"; group = "murmur"; mode = "0700"; } { directory = /var/lib/murmur; user = "murmur"; group = "murmur"; mode = "0700"; }
]) ++ (lib.optionals config.services.nextcloud.enable [ ] ++ lib.optionals config.services.nextcloud.enable [
{ directory = /var/lib/nextcloud; user = "nextcloud"; group = "nextcloud"; mode = "0750"; } { directory = /var/lib/nextcloud; user = "nextcloud"; group = "nextcloud"; mode = "0750"; }
]) ++ (lib.optionals config.services.botamusique.enable [ ] ++ lib.optionals config.services.botamusique.enable [
{ directory = /var/lib/private/botamusique; user = "root"; group = "root"; mode = "0750"; } { directory = /var/lib/private/botamusique; user = "root"; group = "root"; mode = "0750"; }
]) ++ (lib.optionals config.security.acme.acceptTerms [ ] ++ lib.optionals config.security.acme.acceptTerms [
{ directory = /var/lib/acme; user = "acme"; group = "acme"; mode = "0755"; } { directory = /var/lib/acme; user = "acme"; group = "acme"; mode = "0755"; }
]) ++ (lib.optionals config.services.printing.enable [ ] ++ lib.optionals config.services.printing.enable [
{ directory = /var/lib/cups; user = "root"; group = "root"; mode = "0755"; } { directory = /var/lib/cups; user = "root"; group = "root"; mode = "0755"; }
]) ++ (lib.optionals config.services.fail2ban.enable [ ] ++ lib.optionals config.services.fail2ban.enable [
{ directory = /var/lib/fail2ban; user = "fail2ban"; group = "fail2ban"; mode = "0750"; } { directory = /var/lib/fail2ban; user = "fail2ban"; group = "fail2ban"; mode = "0750"; }
]) ++ (lib.optionals config.services.opendkim.enable [ ] ++ lib.optionals config.services.opendkim.enable [
{ directory = /var/lib/opendkim; user = "opendkim"; group = "opendkim"; mode = "0700"; } { directory = /var/lib/opendkim; user = "opendkim"; group = "opendkim"; mode = "0700"; }
]) ++ (lib.optionals config.services.pleroma.enable [ ] ++ lib.optionals config.services.pleroma.enable [
{ directory = /var/lib/pleroma; user = "pleroma"; group = "pleroma"; mode = "0700"; } { directory = /var/lib/pleroma; user = "pleroma"; group = "pleroma"; mode = "0700"; }
]) ++ (lib.optionals config.services.postfix.enable [ ] ++ lib.optionals config.services.postfix.enable [
{ directory = /var/lib/postfix; user = "root"; group = "root"; mode = "0755"; } { directory = /var/lib/postfix; user = "root"; group = "root"; mode = "0755"; }
]) ++ (lib.optionals config.services.postgresql.enable [ ] ++ lib.optionals config.services.postgresql.enable [
{ directory = /var/lib/postgresql; user = "postgres"; group = "postgres"; mode = "0755"; } { directory = /var/lib/postgresql; user = "postgres"; group = "postgres"; mode = "0755"; }
]) ++ (lib.optionals config.services.unbound.enable [ ] ++ lib.optionals config.services.unbound.enable [
{ directory = /var/lib/unbound; user = "unbound"; group = "unbound"; mode = "0755"; } { directory = /var/lib/unbound; user = "unbound"; group = "unbound"; mode = "0755"; }
]) ++ (lib.optionals config.services.searx.enable [ ] ++ lib.optionals config.services.searx.enable [
{ directory = /var/lib/searx; user = "searx"; group = "searx"; mode = "0700"; } { directory = /var/lib/searx; user = "searx"; group = "searx"; mode = "0700"; }
]) ++ (lib.optionals config.services.roundcube.enable [ ] ++ lib.optionals config.services.roundcube.enable [
{ directory = /var/lib/roundcube; user = "roundcube"; group = "roundcube"; mode = "0700"; } { directory = /var/lib/roundcube; user = "roundcube"; group = "roundcube"; mode = "0700"; }
]) ++ (lib.optionals config.services.rspamd.enable [ ] ++ lib.optionals config.services.rspamd.enable [
{ directory = /var/lib/rspamd; user = "rspamd"; group = "rspamd"; mode = "0700"; } { directory = /var/lib/rspamd; user = "rspamd"; group = "rspamd"; mode = "0700"; }
]) ++ (lib.optionals ( ] ++ lib.optionals (config.services.redis.servers.rspamd.enable or false) [
(builtins.hasAttr "rspamd" config.services.redis.servers)
&& (builtins.hasAttr "enable" config.services.redis.servers.rspamd)
&& config.services.redis.servers.rspamd.enable
) [
{ directory = /var/lib/redis-rspamd; user = "redis-rspamd"; group = "redis-rspamd"; mode = "0700"; } { directory = /var/lib/redis-rspamd; user = "redis-rspamd"; group = "redis-rspamd"; mode = "0700"; }
]) ++ (lib.optionals config.services.dovecot2.enable [ ] ++ lib.optionals config.services.dovecot2.enable [
{ directory = /var/lib/dhparams; user = "root"; group = "root"; mode = "0755"; } { directory = /var/lib/dhparams; user = "root"; group = "root"; mode = "0755"; }
{ directory = /var/lib/dovecot; user = "root"; group = "root"; mode = "0755"; } { directory = /var/lib/dovecot; user = "root"; group = "root"; mode = "0755"; }
]) ++ (lib.optionals config.security.sudo.enable [ ] ++ lib.optionals config.security.sudo.enable [
{ directory = /var/db/sudo/lectured; user = "root"; group = "root"; mode = "0700"; } { directory = /var/db/sudo/lectured; user = "root"; group = "root"; mode = "0700"; }
]) ++ cfg.directories); ] ++ cfg.directories);
files = map toString ([ files = map (x:
if builtins.isPath x then toString x
else if builtins.isAttrs x && x?file && builtins.isPath x.file then x // { file = toString x.file; }
else x) ([
# hardware-related # hardware-related
/etc/adjtime /etc/adjtime
# needed at least for /var/log # needed at least for /var/log
/etc/machine-id /etc/machine-id
] ++ lib.optionals config.services.openssh.enable [
# keep ssh fingerprints stable
/etc/ssh/ssh_host_ed25519_key
/etc/ssh/ssh_host_ed25519_key.pub
/etc/ssh/ssh_host_rsa_key
/etc/ssh/ssh_host_rsa_key.pub
] ++ cfg.files); ] ++ cfg.files);
}; };
}; };

View file

@ -1,11 +0,0 @@
{ lib
, config
, ... }:
let
cfg = config.router;
in {
services.avahi.enable = lib.mkDefault true;
services.avahi.publish.enable = lib.mkDefault true;
services.avahi.allowInterfaces = lib.mkDefault (builtins.attrNames cfg.interfaces);
}

View file

@ -1,60 +0,0 @@
{ lib
, config
, pkgs
, utils
, ... }:
let
cfg = config.router;
in {
config = lib.mkIf cfg.enable {
systemd.services = lib.mapAttrs' (interface: icfg: let
cfg = icfg.ipv6.corerad;
escapedInterface = utils.escapeSystemdPath interface;
settingsFormat = pkgs.formats.toml {};
configFile = if cfg.configFile != null then cfg.configFile else settingsFormat.generate "corerad-${escapedInterface}.toml" ({
interfaces = [
(rec {
name = interface;
monitor = false;
advertise = true;
managed = icfg.ipv6.kea.enable && builtins.any (x: lib.hasInfix ":" x.address) icfg.ipv6.addresses;
other_config = managed && cfg.interfaceSettings.managed or true;
prefix = map ({ address, prefixLength, coreradSettings, ... }: {
prefix = "${address}/${toString prefixLength}";
autonomous = !(other_config && cfg.interfaceSettings.other_config or true);
} // coreradSettings) icfg.ipv6.addresses;
route = builtins.concatLists (map ({ address, prefixLength, gateways, ... }: map (gateway: {
prefix = "${if builtins.isString gateway then gateway else gateway.address}/${toString (if gateway.prefixLength or null != null then gateway.prefixLength else prefixLength)}";
} // (gateway.coreradSettings or { })) gateways) icfg.ipv6.addresses);
rdnss = builtins.concatLists (map ({ dns, ... }: map (dns: {
servers = if builtins.isString dns then dns else dns.address;
} // (dns.coreradSettings or { })) dns) icfg.ipv6.addresses);
} // cfg.interfaceSettings)
];
} // cfg.settings);
package = pkgs.corerad;
in {
name = "corerad-${escapedInterface}";
value = lib.mkIf icfg.ipv6.corerad.enable {
description = "CoreRAD IPv6 NDP RA daemon (${interface})";
after = [ "network.target" "sys-subsystem-net-devices-${escapedInterface}.device" ];
bindsTo = [ "sys-subsystem-net-devices-${escapedInterface}.device" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
LimitNPROC = 512;
LimitNOFILE = 1048576;
CapabilityBoundingSet = "CAP_NET_ADMIN CAP_NET_RAW";
AmbientCapabilities = "CAP_NET_ADMIN CAP_NET_RAW";
NoNewPrivileges = true;
DynamicUser = true;
Type = "notify";
NotifyAccess = "main";
ExecStart = "${lib.getBin package}/bin/corerad -c=${configFile}";
Restart = "on-failure";
RestartKillSignal = "SIGHUP";
};
};
}) cfg.interfaces;
};
}

View file

@ -1,387 +0,0 @@
{ lib
, config
, pkgs
, ... }:
let
cfg = config.router;
in {
imports = [
/*./avahi.nix*/
./hostapd.nix
./kea.nix
./radvd.nix
./corerad.nix
];
options.router = {
enable = lib.mkEnableOption "router config";
interfaces = lib.mkOption {
default = { };
description = "All interfaces managed by the router";
type = lib.types.attrsOf (lib.types.submodule {
options.matchUdevAttrs = lib.mkOption {
default = { };
description = lib.mdDoc ''
When a device with those attrs is detected by udev, the device is automatically renamed to this interface name.
See [kernel docs](https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/tree/Documentation/ABI/testing/sysfs-class-net?h=linux-6.3.y) for the list of attrs available.
'';
example = lib.literalExpression { address = "11:22:33:44:55:66"; };
type = lib.types.attrs;
};
options.bridge = lib.mkOption {
description = "Add this device to this bridge";
default = null;
type = with lib.types; nullOr str;
};
options.macAddress = lib.mkOption {
description = "Change this device's mac address to this";
default = null;
type = with lib.types; nullOr str;
};
options.hostapd = lib.mkOption {
description = "hostapd options";
default = { };
type = lib.types.submodule {
options.enable = lib.mkEnableOption "hostapd";
options.settings = lib.mkOption {
description = "hostapd config";
default = { };
type = lib.types.attrs;
};
};
};
options.dhcpcd = lib.mkOption {
description = "dhcpcd options";
default = { };
type = lib.types.submodule {
options.enable = lib.mkEnableOption "dhcpcd";
options.extraConfig = lib.mkOption {
description = "dhcpcd text config";
default = "";
type = lib.types.lines;
};
};
};
options.ipv4 = lib.mkOption {
description = "IPv4 config";
default = { };
type = lib.types.submodule {
options.addresses = lib.mkOption {
description = "Device's IPv4 addresses";
default = [ ];
type = lib.types.listOf (lib.types.submodule {
options.address = lib.mkOption {
description = "IPv4 address";
type = lib.types.str;
};
options.prefixLength = lib.mkOption {
description = "IPv4 prefix length";
type = lib.types.int;
};
options.assign = lib.mkOption {
description = "Whether to assign this address to the device. Default: no if the first hextet is zero, yes otherwise.";
type = with lib.types; nullOr bool;
default = null;
};
options.gateways = lib.mkOption {
description = "IPv4 gateway addresses (optional)";
default = [ ];
type = with lib.types; listOf str;
};
options.dns = lib.mkOption {
description = "IPv4 DNS servers associated with this device";
type = with lib.types; listOf str;
default = [ ];
};
options.keaSettings = lib.mkOption {
default = { };
type = (pkgs.formats.json {}).type;
example = lib.literalExpression {
pools = [ { pool = "192.168.1.15 - 192.168.1.200"; } ];
option-data = [ {
name = "domain-name-servers";
code = 6;
csv-format = true;
space = "dhcp4";
data = "8.8.8.8, 8.8.4.4";
} ];
};
description = "Kea IPv4 prefix-specific settings";
};
});
};
options.kea = lib.mkOption {
description = "Kea options";
default = { };
type = lib.types.submodule {
options.enable = lib.mkOption {
type = lib.types.bool;
description = "Enable Kea for IPv4";
default = true;
};
options.extraArgs = lib.mkOption {
type = with lib.types; listOf str;
default = [ ];
description = "List of additional arguments to pass to the daemon.";
};
options.configFile = lib.mkOption {
type = with lib.types; nullOr path;
default = null;
description = "Kea config file (takes precedence over settings)";
};
options.settings = lib.mkOption {
default = { };
type = (pkgs.formats.json {}).type;
description = "Kea settings";
};
};
};
};
};
options.ipv6 = lib.mkOption {
description = "IPv6 config";
default = { };
type = lib.types.submodule {
options.addresses = lib.mkOption {
description = "Device's IPv6 addresses";
default = [ ];
type = lib.types.listOf (lib.types.submodule {
options.address = lib.mkOption {
description = "IPv6 address";
type = lib.types.str;
};
options.prefixLength = lib.mkOption {
description = "IPv6 prefix length";
type = lib.types.int;
};
options.assign = lib.mkOption {
description = "Whether to assign this address to the device. Default: no if the first hextet is zero, yes otherwise";
type = with lib.types; nullOr bool;
default = null;
};
options.gateways = lib.mkOption {
description = "IPv6 gateways information (optional)";
default = [ ];
type = with lib.types; listOf (either str (submodule {
options.address = lib.mkOption {
description = "Gateway's IPv6 address";
type = str;
};
options.prefixLength = lib.mkOption {
description = "Gateway's IPv6 prefix length (defaults to interface address's prefix length)";
type = nullOr int;
default = null;
};
options.radvdSettings = lib.mkOption {
default = { };
type = attrsOf (oneOf [ bool str int ]);
example = lib.literalExpression {
AdvRoutePreference = "high";
};
description = "radvd prefix-specific route settings";
};
options.coreradSettings = lib.mkOption {
default = { };
type = (pkgs.formats.toml {}).type;
example = lib.literalExpression {
preference = "high";
};
description = "CoreRAD prefix-specific route settings";
};
}));
};
options.dns = lib.mkOption {
description = "IPv6 DNS servers associated with this device";
type = with lib.types; listOf (either str (submodule {
options.address = lib.mkOption {
description = "DNS server's address";
type = lib.types.str;
};
options.radvdSettings = lib.mkOption {
default = { };
type = attrsOf (oneOf [ bool str int ]);
example = lib.literalExpression { FlushRDNSS = false; };
description = "radvd prefix-specific RDNSS settings";
};
options.coreradSettings = lib.mkOption {
default = { };
type = (pkgs.formats.toml {}).type;
example = lib.literalExpression { lifetime = "auto"; };
description = "CoreRAD prefix-specific RDNSS settings";
};
}));
default = [ ];
};
options.keaSettings = lib.mkOption {
default = { };
type = (pkgs.formats.json {}).type;
example = lib.literalExpression {
pools = [ {
pool = "192.168.1.15 - 192.168.1.200";
} ];
option-data = [ {
name = "dns-servers";
code = 23;
csv-format = true;
space = "dhcp6";
data = "aaaa::, bbbb::";
} ];
};
description = "Kea prefix-specific settings";
};
options.radvdSettings = lib.mkOption {
default = { };
type = with lib.types; attrsOf (oneOf [ bool str int ]);
example = lib.literalExpression {
AdvOnLink = true;
AdvAutonomous = true;
Base6to4Interface = "ppp0";
};
description = "radvd prefix-specific settings";
};
options.coreradSettings = lib.mkOption {
default = { };
type = (pkgs.formats.toml {}).type;
example = lib.literalExpression {
on_link = true;
autonomous = true;
};
description = "CoreRAD prefix-specific settings";
};
});
};
options.kea = lib.mkOption {
description = "Kea options";
default = { };
type = lib.types.submodule {
options.enable = lib.mkEnableOption "Kea for IPv6";
options.extraArgs = lib.mkOption {
type = with lib.types; listOf str;
default = [ ];
description = "List of additional arguments to pass to the daemon.";
};
options.configFile = lib.mkOption {
type = with lib.types; nullOr path;
default = null;
description = "Kea config file (takes precedence over settings)";
};
options.settings = lib.mkOption {
default = { };
type = (pkgs.formats.json {}).type;
description = "Kea settings";
};
};
};
options.radvd = lib.mkOption {
description = "radvd options";
default = { };
type = lib.types.submodule {
options.enable = lib.mkOption {
type = lib.types.bool;
description = "Enable radvd";
default = true;
};
options.interfaceSettings = lib.mkOption {
default = { };
type = with lib.types; attrsOf (oneOf [ bool str int ]);
example = lib.literalExpression {
UnicastOnly = true;
};
description = "radvd interface-specific settings";
};
};
};
options.corerad = lib.mkOption {
description = "CoreRAD options";
default = { };
type = lib.types.submodule {
options.enable = lib.mkEnableOption "CoreRAD (don't forget to disable radvd)";
options.configFile = lib.mkOption {
type = with lib.types; nullOr path;
default = null;
description = "CoreRAD config file (takes precedence over settings)";
};
options.interfaceSettings = lib.mkOption {
default = { };
type = (pkgs.formats.toml {}).type;
description = "CoreRAD interface-specific settings";
};
options.settings = lib.mkOption {
default = { };
type = (pkgs.formats.toml {}).type;
example = lib.literalExpression {
debug.address = "localhost:9430";
debug.prometheus = true;
};
description = "General CoreRAD settings";
};
};
};
};
};
});
};
};
config = lib.mkIf cfg.enable {
environment.systemPackages = with pkgs; [
dig.dnsutils
ethtool
tcpdump
];
# performance tweaks
powerManagement.cpuFreqGovernor = lib.mkDefault "ondemand";
services.irqbalance.enable = lib.mkDefault true;
boot.kernelPackages = lib.mkDefault pkgs.linuxPackages_xanmod;
boot.kernel.sysctl = {
"net.netfilter.nf_log_all_netns" = true;
"net.ipv4.conf.all.forwarding" = true;
"net.ipv4.conf.default.forwarding" = true;
"net.ipv6.conf.all.forwarding" = config.networking.enableIPv6;
"net.ipv6.conf.default.forwarding" = config.networking.enableIPv6;
};
networking.enableIPv6 = lib.mkDefault true;
networking.usePredictableInterfaceNames = true;
networking.firewall.allowPing = lib.mkDefault true;
networking.firewall.rejectPackets = lib.mkDefault false; # drop rather than reject
services.udev.extraRules =
let
devs = lib.filterAttrs (k: v: (v.matchUdevAttrs or { }) != { }) cfg.interfaces;
in lib.mkIf (devs != { })
(builtins.concatStringsSep "\n" (lib.mapAttrsToList (k: v:
let
attrs = lib.mapAttrsToList (k: v: "ATTR{${k}}==${builtins.toJSON (toString v)}") v.matchUdevAttrs;
in ''
SUBSYSTEM=="net", ACTION=="add", ${builtins.concatStringsSep ", " attrs}, NAME="${k}"
'') devs));
networking.interfaces = builtins.mapAttrs (interface: icfg: {
ipv4.addresses = map
({ address, prefixLength, ... }: { inherit address prefixLength; })
(builtins.filter
(x: x.assign == true || (x.assign == null && (lib.hasPrefix "0." x.address)))
icfg.ipv4.addresses);
ipv6.addresses = map
({ address, prefixLength, ... }: { inherit address prefixLength; })
(builtins.filter
(x: x.assign == true || (x.assign == null && (lib.hasPrefix ":" x.address || lib.hasPrefix "0:" x.address)))
icfg.ipv6.addresses);
} // lib.optionalAttrs (icfg.macAddress != null) {
inherit (icfg) macAddress;
}) cfg.interfaces;
networking.bridges =
builtins.zipAttrsWith
(k: vs: { interfaces = vs; })
(lib.mapAttrsToList
(interface: icfg:
if icfg.bridge != null && !icfg.hostapd.enable then {
${icfg.bridge} = interface;
} else {})
cfg.interfaces);
networking.useDHCP = lib.mkIf (builtins.any (x: x.dhcpcd.enable) (builtins.attrValues cfg.interfaces)) false;
};
}

View file

@ -1,69 +0,0 @@
{ lib
, config
, pkgs
, utils
, ... }:
let
cfg = config.router;
exitHook = pkgs.writeText "dhcpcd.exit-hook" ''
if [ "$reason" = BOUND -o "$reason" = REBOOT ]; then
# Restart ntpd. We need to restart it to make sure that it
# will actually do something: if ntpd cannot resolve the
# server hostnames in its config file, then it will never do
# anything ever again ("couldn't resolve ..., giving up on
# it"), so we silently lose time synchronisation. This also
# applies to openntpd.
/run/current-system/systemd/bin/systemctl try-reload-or-restart ntpd.service openntpd.service chronyd.service || true
fi
'';
in {
config = lib.mkIf (cfg.enable && builtins.any (x: x.dhcpcd.enable) (builtins.attrValues cfg.interfaces)) {
users.users.dhcpcd = {
isSystemUser = true;
group = "dhcpcd";
};
users.groups.dhcpcd = {};
environment.systemPackages = [ pkgs.dhcpcd ];
environment.etc."dhcpcd.exit-hook".source = exitHook;
powerManagement.resumeCommands = builtins.concatStringsSep "\n" (lib.mapAttrsToList (interface: icfg: ''
# Tell dhcpcd to rebind its interfaces if it's running.
/run/current-system/systemd/bin/systemctl reload "dhcpcd-${utils.escapeSystemdPath interface}.service"
''));
systemd.services = lib.mapAttrs' (interface: icfg: let
escapedInterface = utils.escapeSystemdPath interface;
dhcpcdConf = pkgs.writeText "dhcpcd.conf" ''
hostname
option domain_name_servers, domain_name, domain_search, host_name
option classless_static_routes, ntp_servers, interface_mtu
nohook lookup-hostname
denyinterfaces ve-* vb-* lo peth* vif* tap* tun* virbr* vnet* vboxnet* sit*
allowinterfaces ${interface}
waitip
${icfg.dhcpcd.extraConfig}
'';
in {
name = "dhcpcd-${escapedInterface}";
value = lib.mkIf icfg.dhcpcd.enable {
description = "DHCP Client";
wantedBy = [ "multi-user.target" "network-online.target" ];
wants = [ "network.target" ];
before = [ "network-online.target" ];
after = [ "sys-subsystem-net-devices-${escapedInterface}.device" ];
bindsTo = [ "sys-subsystem-net-devices-${escapedInterface}.device" ];
restartTriggers = [ exitHook ];
stopIfChanged = false;
path = [ pkgs.dhcpcd pkgs.nettools config.networking.resolvconf.package ];
unitConfig.ConditionCapability = "CAP_NET_ADMIN";
serviceConfig = {
Type = "forking";
PIDFile = "/run/dhcpcd/${interface}.pid";
RuntimeDirectory = "dhcpcd";
ExecStart = "@${pkgs.dhcpcd}/sbin/dhcpcd dhcpcd --quiet --config ${dhcpcdConf} ${lib.escapeShellArg interface}";
ExecReload = "${pkgs.dhcpcd}/sbin/dhcpcd --rebind";
Restart = "always";
};
};
}) cfg.interfaces;
};
}

View file

@ -1,84 +0,0 @@
{ lib
, config
, pkgs
, utils
, ... }:
let
cfg = config.router;
in {
config = lib.mkIf (cfg.enable && builtins.any (x: x.hostapd.enable) (builtins.attrValues cfg.interfaces)) {
environment.systemPackages = with pkgs; [ hostapd wirelesstools ];
services.udev.packages = with pkgs; [ crda ];
hardware.wirelessRegulatoryDatabase = true;
systemd.services = lib.mapAttrs' (interface: icfg: let
escapedInterface = utils.escapeSystemdPath interface;
compileValue = k: v:
if builtins.isBool v then (if v then "1" else "0")
else if builtins.isList v then builtins.concatStringsSep " " (map (compileValue k) v)
else if k == "ssid2" then "P${builtins.toJSON (toString v)}"
else toString v;
compileSettings = x:
let
y = builtins.removeAttrs x [ "ssid" ];
z = if y?ssid2 then y else y // { ssid2 = x.ssid; };
in
if !x?ssid && !x?ssid2 then
throw "Must specify ssid for hostapd"
else if x.wpa_key_mgmt == defaultSettings.wpa_key_mgmt && !x?wpa_passphrase && !x?sae_password then
throw "Either change authentication methods or specify wpa_passphrase for hostapd"
else builtins.concatStringsSep "\n" (lib.mapAttrsToList (k: v: "${k}=${compileValue k v}") z);
forceSettings = {
inherit interface;
};
defaultSettings = {
driver = "nl80211";
logger_syslog = -1;
logger_syslog_level = 2;
logger_stdout = -1;
logger_stdout_level = 2;
# not sure if enabling it when it isn't supported is gonna break anything?
ieee80211n = true; # wifi 4
ieee80211ac = true; # wifi 5
ieee80211ax = true; # wifi 6
ieee80211be = true; # wifi 7
ctrl_interface = "/run/hostapd";
disassoc_low_ack = true;
wmm_enabled = true;
uapsd_advertisement_enabled = true;
utf8_ssid = true;
sae_require_mfp = true;
ieee80211w = 1; # optional mfp
sae_pwe = 2;
auth_algs = 1;
wpa = 2;
wpa_pairwise = [ "CCMP" ];
wpa_key_mgmt = [ "WPA-PSK" "WPA-PSK-SHA256" "SAE" ];
okc = true;
group_mgmt_cipher = "AES-128-CMAC";
qos_map_set = "0,0,2,16,1,1,255,255,18,22,24,38,40,40,44,46,48,56"; # from openwrt
# ap_isolate = true; # to isolate clients
} // lib.optionalAttrs (icfg.hostapd.settings?country_code) {
ieee80211d = true;
} // lib.optionalAttrs (icfg.bridge != null) {
inherit (icfg) bridge;
};
settings = defaultSettings // icfg.hostapd.settings // forceSettings;
configFile = pkgs.writeText "hostapd.conf" (compileSettings settings);
in {
name = "hostapd-${escapedInterface}";
value = lib.mkIf icfg.hostapd.enable {
description = "hostapd wireless AP (${interface})";
path = [ pkgs.hostapd ];
after = [ "sys-subsystem-net-devices-${escapedInterface}.device" ];
bindsTo = [ "sys-subsystem-net-devices-${escapedInterface}.device" ];
requiredBy = [ "network-link-${escapedInterface}.service" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
ExecStart = "${pkgs.hostapd}/bin/hostapd ${configFile}";
Restart = "always";
};
};
}) cfg.interfaces;
};
}

View file

@ -1,255 +0,0 @@
{ lib
, config
, pkgs
, utils
, ... }:
let
cfg = config.router;
# add x to last component of an ipv4
addToLastComp4 = x: split:
let
n0 = lib.last split;
nx = n0 + x;
n = if nx >= 255 then 254 else if nx < 2 then 2 else nx;
in
if x > 0 && n0 >= 255 then null
else if x < 0 && n0 < 2 then null
else lib.init split ++ [ n ];
# add x to last component of an ipv6
addToLastComp6 = x: split:
let
n0 = lib.last split;
nx = n0 + x;
n = if nx >= 65535 then 65534 else if nx <= 2 then 2 else nx;
in
if x > 0 && n0 >= 65535 then null
else if x < 0 && n0 < 2 then null
else lib.init split ++ [ n ];
# generate an integer of `total` bits with `set` most significant bits set
genMask = total: set:
parseBin (builtins.concatStringsSep "" (builtins.genList (i: if i < set then "1" else "0") total));
# generate subnet mask for ipv4
genMask4 = len:
builtins.genList (i: let
len' = len - i * 8;
in
if len' <= 0 then 0
else if len' >= 8 then 255
else genMask 8 len') 4;
# generate subnet mask for ipv6
genMask6 = len:
builtins.genList (i: let
len' = len - i * 16;
in
if len' <= 0 then 0
else if len' >= 16 then 65535
else genMask 16 len') 8;
# invert a mask
invMask4 = map (builtins.bitXor 255);
invMask6 = map (builtins.bitXor 65535);
orMask = lib.zipListsWith builtins.bitOr;
andMask = lib.zipListsWith builtins.bitAnd;
# parses hexadecimal number
parseHex = x: (builtins.fromTOML "x=0x${x}").x;
# parses binary number
parseBin = x: (builtins.fromTOML "x=0b${x}").x;
# finds the longest zero-only sequence
# returns an attrset with maxS (start of the sequence) and max (sequence length)
longestZeroSeq =
builtins.foldl' ({ cur, max, curS, maxS, i }: elem: let self = {
i = i + 1;
cur = if elem == 0 then cur + 1 else 0;
max = if max >= self.cur then max else self.cur;
curS = if self.cur > 0 && cur > 0 then curS else if self.cur > 0 then i else -1;
maxS = if max >= self.cur then maxS else self.curS;
}; in self) { cur = 0; max = 0; curS = -1; maxS = -1; i = 0; };
# parses an IPv4 address
parseIp4 = x: map builtins.fromJSON (lib.splitString "." x);
# serializes an IPv4 address
compIp4 = x: builtins.concatStringsSep "." (map toString x);
# parses an IPv6 address
parseIp6 = x:
let
nzparts = map (x: if x == "" then [] else map parseHex (lib.splitString ":" x)) (lib.splitString "::" x);
in
if builtins.length nzparts == 1 then builtins.head nzparts
else let a = builtins.head nzparts; b = builtins.elemAt nzparts 1; in
a ++ (builtins.genList (_: 0) (8 - builtins.length a - builtins.length b)) ++ b;
# serializes an IPv6 address
compIp6 = x:
let
long = longestZeroSeq x;
joined = builtins.concatStringsSep ":" (builtins.foldl' ({ i, res }: x: {
i = i + 1;
res = res ++ (if i >= long.maxS && i < long.maxS + long.max then [ "" ] else [ (lib.toLower (lib.toHexString x)) ]);
}) { i = 0; res = [ ]; } x).res;
fix = builtins.replaceStrings [":::"] ["::"];
in
fix (fix (fix (fix (fix joined))));
format = pkgs.formats.json {};
package = pkgs.kea;
commonServiceConfig = {
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
DynamicUser = true;
User = "kea";
ConfigurationDirectory = "kea";
RuntimeDirectory = "kea";
StateDirectory = "kea";
UMask = "0077";
};
in {
config = lib.mkIf cfg.enable (lib.mkMerge [
(let
configs = builtins.mapAttrs (interface: icfg:
let
escapedInterface = utils.escapeSystemdPath interface;
cfg4 = icfg.ipv4.kea;
in if cfg4.configFile != null then cfg4.configFile else (format.generate "kea-dhcp4-${escapedInterface}.conf" {
Dhcp4 = {
valid-lifetime = 4000;
interfaces-config.interfaces = [ interface ];
lease-database = {
type = "memfile";
persist = true;
name = "/var/lib/kea/dhcp4-${escapedInterface}.leases";
};
subnet4 = map ({ address, prefixLength, gateways, dns, keaSettings, ... }:
let
subnetMask = genMask4 prefixLength;
parsed = parseIp4 address;
minIp = andMask subnetMask parsed;
maxIp = orMask (invMask4 subnetMask) parsed;
in {
subnet = "${address}/${toString prefixLength}";
option-data =
lib.optional (dns != [ ]) {
name = "domain-name-servers";
code = 6;
csv-format = true;
space = "dhcp4";
data = builtins.concatStringsSep ", " dns;
}
++ lib.optional (gateways != [ ]) {
name = "routers";
code = 3;
csv-format = true;
space = "dhcp4";
data = builtins.concatStringsSep ", " gateways;
};
pools = let
a = addToLastComp4 16 minIp;
b = addToLastComp4 (-16) parsed;
c = addToLastComp4 16 parsed;
d = addToLastComp4 (-16) maxIp;
in
lib.optional (a != null && b != null && a <= b) { pool = "${compIp4 a}-${compIp4 b}"; }
++ lib.optional (c != null && d != null && c <= d) { pool = "${compIp4 c}-${compIp4 d}"; };
} // keaSettings) icfg.ipv4.addresses;
} // cfg4.settings;
})) cfg.interfaces;
in {
environment.etc = lib.mapAttrs' (interface: icfg: {
name = "kea/dhcp4-server-${utils.escapeSystemdPath interface}.conf";
value = lib.mkIf (icfg.ipv4.kea.enable && icfg.ipv4.addresses != [ ]) {
source = configs.${interface};
};
}) cfg.interfaces;
systemd.services = lib.mapAttrs' (interface: icfg: let
escapedInterface = utils.escapeSystemdPath interface;
in {
name = "kea-dhcp4-server-${escapedInterface}";
value = lib.mkIf (icfg.ipv4.kea.enable && icfg.ipv4.addresses != [ ]) {
description = "Kea DHCP4 Server (${interface})";
documentation = [ "man:kea-dhcp4(8)" "https://kea.readthedocs.io/en/kea-${package.version}/arm/dhcp4-srv.html" ];
after = [ "network-online.target" "time-sync.target" "sys-subsystem-net-devices-${escapedInterface}.device" ];
bindsTo = [ "sys-subsystem-net-devices-${escapedInterface}.device" ];
wantedBy = [ "multi-user.target" ];
environment = { KEA_PIDFILE_DIR = "/run/kea"; KEA_LOCKFILE_DIR = "/run/kea"; };
restartTriggers = [ configs.${interface} ];
serviceConfig = {
ExecStart = "${package}/bin/kea-dhcp4 -c "
+ lib.escapeShellArgs ([ "/etc/kea/dhcp4-server-${escapedInterface}.conf" ]);
AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" "CAP_NET_RAW" ];
CapabilityBoundingSet = [ "CAP_NET_BIND_SERVICE" "CAP_NET_RAW" ];
} // commonServiceConfig;
};
}) cfg.interfaces;
})
(let
configs = builtins.mapAttrs (interface: icfg:
let
escapedInterface = utils.escapeSystemdPath interface;
cfg6 = icfg.ipv6.kea;
in if cfg6.configFile != null then cfg6.configFile else (format.generate "kea-dhcp6-${escapedInterface}.conf" {
Dhcp6 = {
valid-lifetime = 4000;
preferred-lifetime = 3000;
interfaces-config.interfaces = [ interface ];
lease-database = {
type = "memfile";
persist = true;
name = "/var/lib/kea/dhcp6-${escapedInterface}.leases";
};
subnet6 = map ({ address, prefixLength, dns, keaSettings, ... }:
let
subnetMask = genMask6 prefixLength;
parsed = parseIp6 address;
minIp = andMask subnetMask parsed;
maxIp = orMask (invMask6 subnetMask) parsed;
in {
option-data =
lib.optional (dns != [ ]) {
name = "dns-servers";
code = 23;
csv-format = true;
space = "dhcp6";
data = builtins.concatStringsSep ", " (map (x: if builtins.isString x then x else x.address) dns);
};
subnet = "${address}/${toString prefixLength}";
pools = let
a = addToLastComp6 16 minIp;
b = addToLastComp6 (-16) parsed;
c = addToLastComp6 16 parsed;
d = addToLastComp6 (-16) maxIp;
in
lib.optional (a != null && b != null && a <= b) {
pool = "${compIp6 a}-${compIp6 b}";
} ++ lib.optional (c != null && d != null && c <= d) {
pool = "${compIp6 c}-${compIp6 d}";
};
} // keaSettings) icfg.ipv6.addresses;
} // cfg6.settings;
})) cfg.interfaces;
in {
environment.etc = lib.mapAttrs' (interface: icfg: {
name = "kea/dhcp6-server-${utils.escapeSystemdPath interface}.conf";
value = lib.mkIf (icfg.ipv6.kea.enable && icfg.ipv6.addresses != [ ]) {
source = configs.${interface};
};
}) cfg.interfaces;
systemd.services = lib.mapAttrs' (interface: icfg: let
escapedInterface = utils.escapeSystemdPath interface;
in {
name = "kea-dhcp6-server-${escapedInterface}";
value = lib.mkIf (icfg.ipv6.kea.enable && icfg.ipv6.addresses != [ ]) {
description = "Kea DHCP6 Server (${interface})";
documentation = [ "man:kea-dhcp6(8)" "https://kea.readthedocs.io/en/kea-${package.version}/arm/dhcp6-srv.html" ];
after = [ "network-online.target" "time-sync.target" "sys-subsystem-net-devices-${escapedInterface}.device" ];
bindsTo = [ "sys-subsystem-net-devices-${escapedInterface}.device" ];
wantedBy = [ "multi-user.target" ];
environment = { KEA_PIDFILE_DIR = "/run/kea"; KEA_LOCKFILE_DIR = "/run/kea"; };
restartTriggers = [ configs.${interface} ];
serviceConfig = {
ExecStart = "${package}/bin/kea-dhcp6 -c "
+ lib.escapeShellArgs ([ "/etc/kea/dhcp6-server-${escapedInterface}.conf" ]);
AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" "CAP_NET_RAW" ];
CapabilityBoundingSet = [ "CAP_NET_BIND_SERVICE" "CAP_NET_RAW" ];
} // commonServiceConfig;
};
}) cfg.interfaces;
})
]);
}

View file

@ -1,81 +0,0 @@
{ lib
, pkgs
, config
, ... }:
let
baseSystem = modules: lib.nixosSystem {
inherit (pkgs) system;
modules = [
({ lib, ... }: {
networking = {
firewall.enable = false;
useDHCP = false;
};
system = {
inherit (config.system) stateVersion;
};
})
] ++ modules;
};
baseServices = builtins.concatLists (map builtins.attrNames (baseSystem [ ]).options.systemd.services.definitions);
baseEtc = builtins.concatLists (map builtins.attrNames (baseSystem [ ]).options.environment.etc.definitions);
cfg = config.multiservice;
in
{
options.multiservice = lib.mkOption {
type = lib.types.attrsOf (lib.types.submodule {
options = {
etc = lib.mkOption {
default = { };
type = lib.types.submodule {
options.enable = lib.mkEnableOption {
description = "Copy etc files";
};
options.fixup = lib.mkOption {
default = lib.id;
type = lib.types.function;
description = lib.mdDoc "Function applied to each etc files (must return an attrset with `name` and `value`)";
};
};
};
services = lib.mkOption {
default = { };
type = lib.types.submodule {
options.enable = lib.mkEnableOption {
description = "Copy services";
};
options.fixup = lib.mkOption {
default = lib.id;
type = lib.types.function;
description = "Function applied to each systemd service";
};
};
};
config = lib.mkOption {
description = "nixpkgs instance's config";
default = { };
type = lib.types.attrs;
};
};
});
};
config = lib.mkIf (cfg != { }) (lib.mkMerge (lib.mapAttrsToList (instName: instCfg:
let
result = baseSystem [ ({ ... }: instCfg.config) ];
in {
systemd.services = lib.mkIf instCfg.services.enable (lib.mkMerge (map
(services: lib.mapAttrs' (name: value: {
name = name + "-" + instName;
value = instCfg.services.fixup name value;
}) (builtins.removeAttrs services baseServices))
result.options.systemd.services.definitions));
environment.etc = lib.mkIf instCfg.etc.enable (lib.mkMerge
(map
(etc:
lib.mapAttrs'
(name: value: instCfg.etc.fixup { inherit name value; })
(builtins.removeAttrs etc baseEtc))
result.options.environment.etc.definitions));
}) cfg));
}

View file

@ -1,68 +0,0 @@
{ lib
, config
, pkgs
, utils
, ... }:
let
cfg = config.router;
in {
config = lib.mkIf (cfg.enable && builtins.any (x: x.ipv6.radvd.enable) (builtins.attrValues cfg.interfaces)) {
users.users.radvd = {
isSystemUser = true;
group = "radvd";
description = "Router Advertisement Daemon User";
};
users.groups.radvd = { };
systemd.services = lib.mapAttrs' (interface: icfg: let
escapedInterface = utils.escapeSystemdPath interface;
ifaceOpts = rec {
AdvSendAdvert = true;
AdvManagedFlag = icfg.ipv6.kea.enable && icfg.ipv6.addresses != [ ];
AdvOtherConfigFlag = AdvManagedFlag && icfg.ipv6.radvd.interfaceSettings.AdvManagedFlag or true;
} // icfg.ipv6.radvd.interfaceSettings;
prefixOpts = {
# if dhcp6 is enabled: don't autoconfigure addresses, ask dhcp
AdvAutonomous = !ifaceOpts.AdvManagedFlag;
};
compileOpt = x:
if x == true then "on"
else if x == false then "off"
else toString x;
compileOpts = lib.mapAttrsToList (k: v: "${k} ${compileOpt v};");
indent = map (x: " " + x);
confFile = pkgs.writeText "radvd-${escapedInterface}.conf" (
builtins.concatStringsSep "\n" (
[ "interface ${interface} {" ]
++ indent (
compileOpts ifaceOpts
++ builtins.concatLists (map ({ address, gateways, prefixLength, dns, radvdSettings, ... }:
[ "prefix ${address}/${toString prefixLength} {" ]
++ indent (compileOpts (prefixOpts // radvdSettings))
++ [ "};" ]
++ (builtins.concatLists (map (gateway:
[ "route ${if builtins.isString gateway then gateway else gateway.address}/${toString (if gateway.prefixLength or null != null then gateway.prefixLength else prefixLength)} {" ]
++ indent (compileOpts (gateway.radvdSettings or { }))
++ [ "};" ]) gateways))
++ (builtins.concatLists (map (dns:
[ "RDNSS ${if builtins.isString dns then dns else dns.address} {" ]
++ indent (compileOpts (dns.radvdSettings or { }))
++ [ "};" ]) dns))) icfg.ipv6.addresses)
) ++ [ "};" ]));
package = pkgs.radvd;
in {
name = "radvd-${escapedInterface}";
value = lib.mkIf icfg.ipv6.radvd.enable {
description = "IPv6 Router Advertisement Daemon (${interface})";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" "sys-subsystem-net-devices-${escapedInterface}.device" ];
bindsTo = [ "sys-subsystem-net-devices-${escapedInterface}.device" ];
serviceConfig = {
ExecStart = "@${package}/bin/radvd radvd -n -u radvd -C ${confFile}";
Restart = "always";
};
};
}) cfg.interfaces;
};
}