router: mss clamping
also other misc changes such as fix boot on the laptop
This commit is contained in:
parent
5272bf603a
commit
aa6fef1d9c
23
flake.lock
23
flake.lock
|
@ -228,11 +228,11 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1690432004,
|
"lastModified": 1691308996,
|
||||||
"narHash": "sha256-mGK512GjUbTNdy6AorlX6OF3oAZ5GkGWuxEKWj8xpUw=",
|
"narHash": "sha256-eXxZ7Mib2U1pfcchrCqSGDRZBjCIbVIWEq93OcE6pEI=",
|
||||||
"owner": "chayleaf",
|
"owner": "chayleaf",
|
||||||
"repo": "nixos-router",
|
"repo": "nixos-router",
|
||||||
"rev": "c86131f52922907d77653d553851f03a8e064071",
|
"rev": "c9528e1dc5acc77273543c88db47088ea5dd28b4",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -243,16 +243,15 @@
|
||||||
},
|
},
|
||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1690548937,
|
"lastModified": 1691178599,
|
||||||
"narHash": "sha256-x3ZOPGLvtC0/+iFAg9Kvqm/8hTAIkGjc634SqtgaXTA=",
|
"narHash": "sha256-P4r79P2toVs0dfjXvyIp8QdnNcrFQQRFEUl/fHJBNz0=",
|
||||||
"owner": "nixos",
|
"owner": "chayleaf",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "2a9d660ff0f7ffde9d73be328ee6e6f10ef66b28",
|
"rev": "f18d5184718a901ba5b1ab437570ad291cc1873c",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"owner": "nixos",
|
"owner": "chayleaf",
|
||||||
"ref": "nixos-unstable",
|
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
|
@ -302,11 +301,11 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1689260608,
|
"lastModified": 1691303305,
|
||||||
"narHash": "sha256-GHnXziKo8EzGnvMzKEExA5/H3N1Hp3GtRy84E2z4YN8=",
|
"narHash": "sha256-9JUabsdRAZl4ixq4/m7+vryk4Tv4Dq3JTNAUxGV+nOU=",
|
||||||
"owner": "chayleaf",
|
"owner": "chayleaf",
|
||||||
"repo": "notnft",
|
"repo": "notnft",
|
||||||
"rev": "98ff79679cfdc31f49abaeb513cb6d36905dd32e",
|
"rev": "f7fa096c285c0705bffc180d0d08ea82f3b8e957",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
|
|
@ -2,7 +2,8 @@
|
||||||
description = "NixOS + Home Manager configuration of chayleaf";
|
description = "NixOS + Home Manager configuration of chayleaf";
|
||||||
|
|
||||||
inputs = {
|
inputs = {
|
||||||
nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable";
|
# nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable";
|
||||||
|
nixpkgs.url = "github:chayleaf/nixpkgs";
|
||||||
nixos-hardware.url = "github:NixOS/nixos-hardware";
|
nixos-hardware.url = "github:NixOS/nixos-hardware";
|
||||||
impermanence.url = "github:nix-community/impermanence";
|
impermanence.url = "github:nix-community/impermanence";
|
||||||
nur.url = "github:nix-community/NUR";
|
nur.url = "github:nix-community/NUR";
|
||||||
|
|
|
@ -27,13 +27,22 @@ in
|
||||||
unstable = nixForNixPlugins;
|
unstable = nixForNixPlugins;
|
||||||
});
|
});
|
||||||
/* Various patches to change Nix version of existing packages so they don't error out because of nix-plugins in nix.conf
|
/* Various patches to change Nix version of existing packages so they don't error out because of nix-plugins in nix.conf
|
||||||
hydra_unstable = pkgs.hydra_unstable.override { nix = nixForNixPlugins; };
|
|
||||||
harmonia = pkgs.harmonia.override { nix = nixForNixPlugins; };
|
harmonia = pkgs.harmonia.override { nix = nixForNixPlugins; };
|
||||||
nix-init = pkgs.nix-init.override { nix = nixForNixPlugins; };
|
nix-init = pkgs.nix-init.override { nix = nixForNixPlugins; };
|
||||||
nix-serve = pkgs.nix-serve.override { nix = nixForNixPlugins; };
|
nix-serve = pkgs.nix-serve.override { nix = nixForNixPlugins; };
|
||||||
nix-serve-ng = pkgs.nix-serve-ng.override { nix = nixForNixPlugins; };
|
nix-serve-ng = pkgs.nix-serve-ng.override { nix = nixForNixPlugins; };
|
||||||
nurl = pkgs.nurl.override { nixVersions = builtins.mapAttrs (k: v: nixForNixPlugins) pkgs.nixVersions; };
|
nurl = pkgs.nurl.override { nixVersions = builtins.mapAttrs (k: v: nixForNixPlugins) pkgs.nixVersions; };
|
||||||
*/
|
*/
|
||||||
|
# TODO:
|
||||||
|
/*hydra_unstable = (pkgs.hydra_unstable.override {
|
||||||
|
nix = nixForNixPlugins;
|
||||||
|
}).overrideAttrs (old: {
|
||||||
|
version = "2023-05-08";
|
||||||
|
src = old.src.override {
|
||||||
|
rev = "13ef4e3c5d87bc6f68c91a36d78cdc7d589d8ff2";
|
||||||
|
sha256 = "sha256-niw0RHfwpo2/86wvtHrbU/DQYlkkwtrM+qG7GEC0qAo=";
|
||||||
|
};
|
||||||
|
});*/
|
||||||
|
|
||||||
clang-tools_latest = pkgs.clang-tools_16;
|
clang-tools_latest = pkgs.clang-tools_16;
|
||||||
clang_latest = pkgs.clang_16;
|
clang_latest = pkgs.clang_16;
|
||||||
|
|
|
@ -92,7 +92,7 @@
|
||||||
|
|
||||||
# System76 scheduler (not actually a scheduler, just a renice daemon) for improved responsiveness
|
# System76 scheduler (not actually a scheduler, just a renice daemon) for improved responsiveness
|
||||||
/*services.dbus.packages = [ pkgs.system76-scheduler ];
|
/*services.dbus.packages = [ pkgs.system76-scheduler ];
|
||||||
systemd.services."system76-scheduler" = {
|
systemd.services.system76-scheduler = {
|
||||||
description = "Automatically configure CPU scheduler for responsiveness on AC";
|
description = "Automatically configure CPU scheduler for responsiveness on AC";
|
||||||
wantedBy = [ "multi-user.target" ];
|
wantedBy = [ "multi-user.target" ];
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
|
@ -150,17 +150,22 @@
|
||||||
services.sshd.enable = true;
|
services.sshd.enable = true;
|
||||||
boot.binfmt.emulatedSystems = [ "aarch64-linux" ];
|
boot.binfmt.emulatedSystems = [ "aarch64-linux" ];
|
||||||
nix.settings = {
|
nix.settings = {
|
||||||
trusted-users = [ "root" config.common.mainUsername ];
|
|
||||||
netrc-file = "/secrets/netrc";
|
netrc-file = "/secrets/netrc";
|
||||||
substituters = [
|
substituters = [
|
||||||
"https://binarycache.pavluk.org"
|
"https://binarycache.pavluk.org"
|
||||||
"https://cache.nixos.org/"
|
"https://cache.nixos.org/"
|
||||||
# "https://nix-community.cachix.org"
|
];
|
||||||
|
trusted-substituters = [
|
||||||
|
"https://nix-community.cachix.org"
|
||||||
|
"https://nix-gaming.cachix.org"
|
||||||
|
"https://nixpkgs-wayland.cachix.org"
|
||||||
];
|
];
|
||||||
trusted-public-keys = [
|
trusted-public-keys = [
|
||||||
"binarycache.pavluk.org:Vk0ms/vSqoOV2JXeNVOroc8EfilgVxCCUtpCShGIKsQ="
|
"binarycache.pavluk.org:Vk0ms/vSqoOV2JXeNVOroc8EfilgVxCCUtpCShGIKsQ="
|
||||||
"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="
|
"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="
|
||||||
# "nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs="
|
"nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs="
|
||||||
|
"nix-gaming.cachix.org-1:nbjlureqMbRAxR1gJ/f3hxemL9svXaZF/Ees8vCUUs4="
|
||||||
|
"nixpkgs-wayland.cachix.org-1:3lwxaILxMRkVhehr5StQprHdEo4IrE8sRho9R9HOLYA="
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
services.udev.packages = [
|
services.udev.packages = [
|
||||||
|
|
|
@ -70,23 +70,25 @@ in {
|
||||||
proxy_read_timeout 300;
|
proxy_read_timeout 300;
|
||||||
proxy_connect_timeout 300;
|
proxy_connect_timeout 300;
|
||||||
proxy_send_timeout 300;
|
proxy_send_timeout 300;
|
||||||
|
client_body_timeout 300;
|
||||||
|
send_timeout 300;
|
||||||
'';
|
'';
|
||||||
locations."/".proxyPass = "http://${lib.quoteListenAddr config.services.hydra.listenHost}:${toString config.services.hydra.port}/";
|
locations."/".proxyPass = "http://${lib.quoteListenAddr config.services.hydra.listenHost}:${toString config.services.hydra.port}/";
|
||||||
locations."/static/".root = "${config.services.hydra.package}/libexec/hydra/root/";
|
locations."/static/".root = lib.mkIf config.services.hydra.enable "${config.services.hydra.package}/libexec/hydra/root/";
|
||||||
};
|
};
|
||||||
users.users.nginx.extraGroups = [ "grafana" ];
|
users.users.nginx.extraGroups = [ "grafana" ];
|
||||||
|
|
||||||
services.nix-serve = {
|
/*services.nix-serve = {
|
||||||
enable = true;
|
enable = true;
|
||||||
package = pkgs.nix-serve-ng;
|
package = pkgs.nix-serve-ng;
|
||||||
bindAddress = "127.0.0.1";
|
bindAddress = "127.0.0.1";
|
||||||
secretKeyFile = "/secrets/cache-priv-key.pem";
|
secretKeyFile = "/secrets/cache-priv-key.pem";
|
||||||
};
|
};*/
|
||||||
/*services.harmonia = {
|
services.harmonia = {
|
||||||
enable = true;
|
enable = true;
|
||||||
signKeyPath = "/secrets/cache-priv-key.pem";
|
signKeyPath = "/secrets/cache-priv-key.pem";
|
||||||
settings.bind = "[::1]:5000";
|
settings.bind = "[::1]:5000";
|
||||||
};*/
|
};
|
||||||
nix.settings.allowed-users = [ "nix-serve" "harmonia" "hydra" "hydra-www" ];
|
nix.settings.allowed-users = [ "nix-serve" "harmonia" "hydra" "hydra-www" ];
|
||||||
# only hydra has access to this file anyway
|
# only hydra has access to this file anyway
|
||||||
nix.settings.extra-builtins-file = "/etc/nixos/private/extra-builtins.nix";
|
nix.settings.extra-builtins-file = "/etc/nixos/private/extra-builtins.nix";
|
||||||
|
@ -106,28 +108,31 @@ in {
|
||||||
enableACME = true;
|
enableACME = true;
|
||||||
forceSSL = true;
|
forceSSL = true;
|
||||||
basicAuthFile = "/secrets/home_password";
|
basicAuthFile = "/secrets/home_password";
|
||||||
locations."/".proxyPass = "http://${config.services.nix-serve.bindAddress}:${toString config.services.nix-serve.port}";
|
/*locations."/".proxyPass = "http://${config.services.nix-serve.bindAddress}:${toString config.services.nix-serve.port}";
|
||||||
extraConfig = ''
|
extraConfig = ''
|
||||||
proxy_read_timeout 300;
|
proxy_read_timeout 300;
|
||||||
proxy_connect_timeout 300;
|
proxy_connect_timeout 300;
|
||||||
proxy_send_timeout 300;
|
proxy_send_timeout 300;
|
||||||
'';
|
'';*/
|
||||||
# TODO: fix
|
# TODO: fix
|
||||||
# https://github.com/nix-community/harmonia/issues/120
|
# https://github.com/nix-community/harmonia/issues/120
|
||||||
/*locations."/".proxyPass = "http://${config.services.harmonia.settings.bind or "[::1]:5000"}";
|
locations."/".proxyPass = "http://${config.services.harmonia.settings.bind or "[::1]:5000"}";
|
||||||
locations."/".extraConfig = ''
|
locations."/".extraConfig = ''
|
||||||
proxy_set_header Host $host;
|
proxy_set_header Host $host;
|
||||||
proxy_http_version 1.1;
|
proxy_http_version 1.1;
|
||||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
proxy_set_header Upgrade $http_upgrade;
|
proxy_set_header Upgrade $http_upgrade;
|
||||||
proxy_set_header Connection $connection_upgrade;
|
proxy_set_header Connection $connection_upgrade;
|
||||||
'';*/
|
location ~ "^/nar/([a-z0-9]{32})-.*\.narinfo$" {
|
||||||
|
proxy_pass http://127.0.0.1:5000/$1.narinfo$is_args$args;
|
||||||
|
}
|
||||||
|
'';
|
||||||
# zstd on;
|
# zstd on;
|
||||||
# zstd_types application/x-nix-archive;
|
# zstd_types application/x-nix-archive;
|
||||||
};
|
};
|
||||||
|
|
||||||
services.hydra = {
|
services.hydra = {
|
||||||
enable = true;
|
enable = false;
|
||||||
hydraURL = "home.${cfg.domainName}/hydra";
|
hydraURL = "home.${cfg.domainName}/hydra";
|
||||||
listenHost = "127.0.0.1";
|
listenHost = "127.0.0.1";
|
||||||
minimumDiskFree = 30;
|
minimumDiskFree = 30;
|
||||||
|
@ -151,9 +156,11 @@ in {
|
||||||
systemd.services.nix-daemon.serviceConfig.CPUQuota = "100%";
|
systemd.services.nix-daemon.serviceConfig.CPUQuota = "100%";
|
||||||
nix.daemonCPUSchedPolicy = "idle";
|
nix.daemonCPUSchedPolicy = "idle";
|
||||||
nix.daemonIOSchedClass = "idle";
|
nix.daemonIOSchedClass = "idle";
|
||||||
systemd.services.hydra-evaluator.serviceConfig.CPUQuota = "100%";
|
systemd.services.hydra-evaluator = lib.mkIf config.services.hydra.enable {
|
||||||
systemd.services.hydra-evaluator.serviceConfig.CPUSchedulingPolicy = "idle";
|
serviceConfig.CPUQuota = "100%";
|
||||||
systemd.services.hydra-evaluator.serviceConfig.IOSchedulingClass = "idle";
|
serviceConfig.CPUSchedulingPolicy = "idle";
|
||||||
|
serviceConfig.IOSchedulingClass = "idle";
|
||||||
|
};
|
||||||
programs.ccache.enable = true;
|
programs.ccache.enable = true;
|
||||||
|
|
||||||
services.nginx.statusPage = true;
|
services.nginx.statusPage = true;
|
||||||
|
|
|
@ -43,11 +43,16 @@ let
|
||||||
preamble = true;
|
preamble = true;
|
||||||
country3 = "0x49"; # indoor
|
country3 = "0x49"; # indoor
|
||||||
};
|
};
|
||||||
|
|
||||||
# routing tables
|
# routing tables
|
||||||
wan_table = 1;
|
wan_table = 1;
|
||||||
# vpn table, assign an id but don't actually add a rule for it, so it is the default
|
# vpn table, assign an id but don't actually add a rule for it, so it is the default
|
||||||
vpn_table = 2;
|
vpn_table = 2;
|
||||||
|
|
||||||
|
vpn_mtu = config.networking.wireguard.interfaces.wg0.mtu;
|
||||||
|
vpn_ipv4_mss = vpn_mtu - 40;
|
||||||
|
vpn_ipv6_mss = vpn_mtu - 60;
|
||||||
|
|
||||||
dnatRuleMode = rule:
|
dnatRuleMode = rule:
|
||||||
if rule.mode != "" then rule.mode
|
if rule.mode != "" then rule.mode
|
||||||
else if rule.target4.address or null == netAddresses.lan4 || rule.target6.address or null == netAddresses.lan6 then "rule"
|
else if rule.target4.address or null == netAddresses.lan4 || rule.target6.address or null == netAddresses.lan6 then "rule"
|
||||||
|
@ -109,9 +114,9 @@ let
|
||||||
[(is.eq ip.protocol (f: f.icmp)) (is.ne icmp.type (f: f.echo-request)) (limit { rate = 100; per = f: f.second; }) accept]
|
[(is.eq ip.protocol (f: f.icmp)) (is.ne icmp.type (f: f.echo-request)) (limit { rate = 100; per = f: f.second; }) accept]
|
||||||
[(is.eq ip6.nexthdr (f: f.ipv6-icmp)) (is.eq icmpv6.type (f: f.echo-request)) (limit { rate = 50; per = f: f.second; }) accept]
|
[(is.eq ip6.nexthdr (f: f.ipv6-icmp)) (is.eq icmpv6.type (f: f.echo-request)) (limit { rate = 50; per = f: f.second; }) accept]
|
||||||
[(is.eq ip6.nexthdr (f: f.ipv6-icmp)) (is.ne icmpv6.type (f: f.echo-request)) (limit { rate = 100; per = f: f.second; }) accept]
|
[(is.eq ip6.nexthdr (f: f.ipv6-icmp)) (is.ne icmpv6.type (f: f.echo-request)) (limit { rate = 100; per = f: f.second; }) accept]
|
||||||
# always accept destination unreachable and time-exceeded
|
# always accept destination unreachable, time-exceeded, parameter-problem, packet-too-big
|
||||||
[(is.eq ip.protocol (f: f.icmp)) (is.eq icmp.type (f: with f; set [ destination-unreachable time-exceeded ])) accept]
|
[(is.eq ip.protocol (f: f.icmp)) (is.eq icmp.type (f: with f; set [ destination-unreachable time-exceeded parameter-problem ])) accept]
|
||||||
[(is.eq ip6.nexthdr (f: f.ipv6-icmp)) (is.eq icmpv6.type (f: with f; set [ destination-unreachable time-exceeded ])) accept]
|
[(is.eq ip6.nexthdr (f: f.ipv6-icmp)) (is.eq icmpv6.type (f: with f; set [ destination-unreachable time-exceeded parameter-problem packet-too-big ])) accept]
|
||||||
# don't log echo-request drops
|
# don't log echo-request drops
|
||||||
[(is.eq ip.protocol (f: f.icmp)) (is.eq icmp.type (f: f.echo-request)) drop]
|
[(is.eq ip.protocol (f: f.icmp)) (is.eq icmp.type (f: f.echo-request)) drop]
|
||||||
[(is.eq ip6.nexthdr (f: f.ipv6-icmp)) (is.eq icmpv6.type (f: f.echo-request)) drop]
|
[(is.eq ip6.nexthdr (f: f.ipv6-icmp)) (is.eq icmpv6.type (f: f.echo-request)) drop]
|
||||||
|
@ -279,16 +284,16 @@ in {
|
||||||
} ++ lib.optional (udpOnly != [ ]) {
|
} ++ lib.optional (udpOnly != [ ]) {
|
||||||
port = notnft.dsl.set udpOnly; tcp = false; udp = true;
|
port = notnft.dsl.set udpOnly; tcp = false; udp = true;
|
||||||
target4.address = serverAddress4; target6.address = serverAddress6;
|
target4.address = serverAddress4; target6.address = serverAddress6;
|
||||||
} ++ map (range: {
|
} ++ lib.flip map rangesTcpAndUdp (range: {
|
||||||
port = notnft.dsl.range range.from range.to; tcp = true; udp = true;
|
port = notnft.dsl.range range.from range.to; tcp = true; udp = true;
|
||||||
target4.address = serverAddress4; target6.address = serverAddress6;
|
target4.address = serverAddress4; target6.address = serverAddress6;
|
||||||
}) rangesTcpAndUdp ++ map (range: {
|
}) ++ lib.flip map rangesTcpOnly (range: {
|
||||||
port = notnft.dsl.range range.from range.to; tcp = true; udp = false;
|
port = notnft.dsl.range range.from range.to; tcp = true; udp = false;
|
||||||
target4.address = serverAddress4; target6.address = serverAddress6;
|
target4.address = serverAddress4; target6.address = serverAddress6;
|
||||||
}) rangesTcpOnly ++ map (range: {
|
}) ++ lib.flip map rangesUdpOnly (range: {
|
||||||
port = notnft.dsl.range range.from range.to; tcp = false; udp = true;
|
port = notnft.dsl.range range.from range.to; tcp = false; udp = true;
|
||||||
target4.address = serverAddress4; target6.address = serverAddress6;
|
target4.address = serverAddress4; target6.address = serverAddress6;
|
||||||
}) rangesUdpOnly;
|
});
|
||||||
|
|
||||||
router.enable = true;
|
router.enable = true;
|
||||||
# 2.4g ap
|
# 2.4g ap
|
||||||
|
@ -325,32 +330,32 @@ in {
|
||||||
# ethernet lan0-3
|
# ethernet lan0-3
|
||||||
router.interfaces.lan0 = {
|
router.interfaces.lan0 = {
|
||||||
bridge = "br0";
|
bridge = "br0";
|
||||||
systemdLinkLinkConfig.MACAddressPolicy = "persistent";
|
systemdLink.linkConfig.MACAddressPolicy = "persistent";
|
||||||
};
|
};
|
||||||
router.interfaces.lan1 = {
|
router.interfaces.lan1 = {
|
||||||
bridge = "br0";
|
bridge = "br0";
|
||||||
systemdLinkLinkConfig.MACAddressPolicy = "persistent";
|
systemdLink.linkConfig.MACAddressPolicy = "persistent";
|
||||||
};
|
};
|
||||||
router.interfaces.lan2 = {
|
router.interfaces.lan2 = {
|
||||||
bridge = "br0";
|
bridge = "br0";
|
||||||
systemdLinkLinkConfig.MACAddressPolicy = "persistent";
|
systemdLink.linkConfig.MACAddressPolicy = "persistent";
|
||||||
};
|
};
|
||||||
router.interfaces.lan3 = {
|
router.interfaces.lan3 = {
|
||||||
bridge = "br0";
|
bridge = "br0";
|
||||||
systemdLinkLinkConfig.MACAddressPolicy = "persistent";
|
systemdLink.linkConfig.MACAddressPolicy = "persistent";
|
||||||
};
|
};
|
||||||
# sfp lan4
|
# sfp lan4
|
||||||
router.interfaces.lan4 = {
|
router.interfaces.lan4 = {
|
||||||
bridge = "br0";
|
bridge = "br0";
|
||||||
systemdLinkLinkConfig.MACAddressPolicy = "persistent";
|
systemdLink.linkConfig.MACAddressPolicy = "persistent";
|
||||||
};
|
};
|
||||||
/*
|
/*
|
||||||
# sfp lan5
|
# sfp lan5
|
||||||
router.interfaces.lan5 = {
|
router.interfaces.lan5 = {
|
||||||
bridge = "br0";
|
bridge = "br0";
|
||||||
# i could try to figure out why this doesn't work... but i don't even have sfp to plug into this
|
# i could try to figure out why this doesn't work... but i don't even have sfp to plug into this
|
||||||
systemdLinkMatchConfig.OriginalName = "eth1";
|
systemdLink.matchConfig.OriginalName = "eth1";
|
||||||
systemdLinkLinkConfig.MACAddressPolicy = "persistent";
|
systemdLink.linkConfig.MACAddressPolicy = "persistent";
|
||||||
};
|
};
|
||||||
*/
|
*/
|
||||||
# ethernet wan
|
# ethernet wan
|
||||||
|
@ -358,8 +363,8 @@ in {
|
||||||
dependentServices = [
|
dependentServices = [
|
||||||
{ service = "wireguard-wg0"; inNetns = false; }
|
{ service = "wireguard-wg0"; inNetns = false; }
|
||||||
];
|
];
|
||||||
systemdLinkLinkConfig.MACAddressPolicy = "none";
|
systemdLink.linkConfig.MACAddressPolicy = "none";
|
||||||
systemdLinkLinkConfig.MACAddress = cfg.routerMac;
|
systemdLink.linkConfig.MACAddress = cfg.routerMac;
|
||||||
dhcpcd = {
|
dhcpcd = {
|
||||||
enable = true;
|
enable = true;
|
||||||
# technically this should be assigned to br0 instead of veth-wan-b
|
# technically this should be assigned to br0 instead of veth-wan-b
|
||||||
|
@ -463,7 +468,7 @@ in {
|
||||||
[ (is.eq meta.iifname "wg0") (is.eq ip.protocol protocols) (is.eq th.dport rule.port)
|
[ (is.eq meta.iifname "wg0") (is.eq ip.protocol protocols) (is.eq th.dport rule.port)
|
||||||
(if rule4.port == null then dnat.ip rule4.address else dnat.ip rule4.address rule4.port) ]
|
(if rule4.port == null then dnat.ip rule4.address else dnat.ip rule4.address rule4.port) ]
|
||||||
] ++ lib.optionals (rule6 != null) [
|
] ++ lib.optionals (rule6 != null) [
|
||||||
[ (is.eq meta.iifname "wg0") (is.eq ip6.protocol protocols) (is.eq th.dport rule.port)
|
[ (is.eq meta.iifname "wg0") (is.eq ip6.nexthdr protocols) (is.eq th.dport rule.port)
|
||||||
(if rule6.port == null then dnat.ip6 rule6.address else dnat.ip6 rule6.address rule6.port) ]
|
(if rule6.port == null then dnat.ip6 rule6.address else dnat.ip6 rule6.address rule6.port) ]
|
||||||
])
|
])
|
||||||
(builtins.filter (x: x.inVpn && (x.tcp || x.udp)) cfg.dnatRules));
|
(builtins.filter (x: x.inVpn && (x.tcp || x.udp)) cfg.dnatRules));
|
||||||
|
@ -500,15 +505,17 @@ in {
|
||||||
prerouting = add chain { type = f: f.filter; hook = f: f.prerouting; prio = f: f.filter; policy = f: f.accept; } ([
|
prerouting = add chain { type = f: f.filter; hook = f: f.prerouting; prio = f: f.filter; policy = f: f.accept; } ([
|
||||||
[(mangle meta.mark ct.mark)]
|
[(mangle meta.mark ct.mark)]
|
||||||
[(is.ne meta.mark 0) accept]
|
[(is.ne meta.mark 0) accept]
|
||||||
|
[(is.eq ip.daddr "@block4") drop]
|
||||||
|
[(is.eq ip6.daddr "@block6") drop]
|
||||||
[(is.eq meta.iifname "br0") (mangle meta.mark vpn_table)]
|
[(is.eq meta.iifname "br0") (mangle meta.mark vpn_table)]
|
||||||
[(is.eq ip.daddr "@force_unvpn4") (mangle meta.mark wan_table)]
|
[(is.eq ip.daddr "@force_unvpn4") (mangle meta.mark wan_table)]
|
||||||
[(is.eq ip6.daddr "@force_unvpn6") (mangle meta.mark wan_table)]
|
[(is.eq ip6.daddr "@force_unvpn6") (mangle meta.mark wan_table)]
|
||||||
# don't vpn smtp requests so spf works fine (and in case the vpn blocks requests over port 25)
|
# block requests to port 25 from hosts other than the server so they can't send mail pretending to originate from my domain
|
||||||
[(is.eq ip.saddr serverAddress4) (is.eq ip.protocol (f: f.tcp)) (is.eq tcp.dport 25) (mangle meta.mark wan_table)]
|
|
||||||
[(is.eq ip6.saddr serverAddress6) (is.eq ip6.nexthdr (f: f.tcp)) (is.eq tcp.dport 25) (mangle meta.mark wan_table)]
|
|
||||||
# but block requests to port 25 from other hosts so they can't send mail pretending to originate from my domain
|
|
||||||
[(is.ne ip.saddr serverAddress4) (is.eq ip.protocol (f: f.tcp)) (is.eq tcp.dport 25) drop]
|
[(is.ne ip.saddr serverAddress4) (is.eq ip.protocol (f: f.tcp)) (is.eq tcp.dport 25) drop]
|
||||||
[(is.ne ip6.saddr serverAddress6) (is.eq ip6.nexthdr (f: f.tcp)) (is.eq tcp.dport 25) drop]
|
[(is.ne ip6.saddr serverAddress6) (is.eq ip6.nexthdr (f: f.tcp)) (is.eq tcp.dport 25) drop]
|
||||||
|
# don't vpn smtp requests so spf works fine (and in case the vpn blocks requests over port 25, which it usually does)
|
||||||
|
[(is.eq ip.protocol (f: f.tcp)) (is.eq tcp.dport 25) (mangle meta.mark wan_table)]
|
||||||
|
[(is.eq ip6.nexthdr (f: f.tcp)) (is.eq tcp.dport 25) (mangle meta.mark wan_table)]
|
||||||
[(is.eq ip.daddr "@force_vpn4") (mangle meta.mark vpn_table)]
|
[(is.eq ip.daddr "@force_vpn4") (mangle meta.mark vpn_table)]
|
||||||
[(is.eq ip6.daddr "@force_vpn6") (mangle meta.mark vpn_table)]
|
[(is.eq ip6.daddr "@force_vpn6") (mangle meta.mark vpn_table)]
|
||||||
] ++ # 1. dnat non-vpn: change rttable to wan
|
] ++ # 1. dnat non-vpn: change rttable to wan
|
||||||
|
@ -537,19 +544,22 @@ in {
|
||||||
[ (is ct.status (f: f.dnat)) (is.eq meta.iifname "br0") (is.eq ip.protocol protocols) (is.eq ip.saddr rule4.address)
|
[ (is ct.status (f: f.dnat)) (is.eq meta.iifname "br0") (is.eq ip.protocol protocols) (is.eq ip.saddr rule4.address)
|
||||||
(is.eq th.sport (if rule4.port != null then rule4.port else rule.port)) (mangle meta.mark vpn_table) ]
|
(is.eq th.sport (if rule4.port != null then rule4.port else rule.port)) (mangle meta.mark vpn_table) ]
|
||||||
] ++ lib.optionals (rule6 != null) [
|
] ++ lib.optionals (rule6 != null) [
|
||||||
[ (is ct.status (f: f.dnat)) (is.eq meta.iifname "br0") (is.eq ip6.protocol protocols) (is.eq ip6.saddr rule6.address)
|
[ (is ct.status (f: f.dnat)) (is.eq meta.iifname "br0") (is.eq ip6.nexthdr protocols) (is.eq ip6.saddr rule6.address)
|
||||||
(is.eq th.sport (if rule6.port != null then rule6.port else rule.port)) (mangle meta.mark vpn_table) ]
|
(is.eq th.sport (if rule6.port != null then rule6.port else rule.port)) (mangle meta.mark vpn_table) ]
|
||||||
])
|
])
|
||||||
(builtins.filter (x: x.inVpn && (x.tcp || x.udp) && dnatRuleMode x == "mark") cfg.dnatRules))
|
(builtins.filter (x: x.inVpn && (x.tcp || x.udp) && dnatRuleMode x == "mark") cfg.dnatRules))
|
||||||
++ [
|
++ [
|
||||||
[(is.eq ip.daddr "@block4") drop]
|
|
||||||
[(is.eq ip6.daddr "@block6") drop]
|
|
||||||
# this doesn't work... it still gets routed, even though iot_table doesn't have a default route
|
# this doesn't work... it still gets routed, even though iot_table doesn't have a default route
|
||||||
# [(is.eq ip.saddr vacuumAddress4) (is.ne ip.daddr) (mangle meta.mark iot_table)]
|
|
||||||
# [(is.eq ether.saddr cfg.vacuumMac) (mangle meta.mark iot_table)]
|
# [(is.eq ether.saddr cfg.vacuumMac) (mangle meta.mark iot_table)]
|
||||||
# instead of debugging that, simply change the approach
|
# instead of debugging that, simply change the approach
|
||||||
[(is.eq ether.saddr cfg.vacuumMac) (is.ne ip.daddr (cidr netCidrs.lan4)) (is.ne ip.daddr "@allow_iot4") (log "iot4 ") drop]
|
[(is.eq ether.saddr cfg.vacuumMac) (is.ne ip.daddr (cidr netCidrs.lan4)) (is.ne ip.daddr "@allow_iot4") (log "iot4 ") drop]
|
||||||
[(is.eq ether.saddr cfg.vacuumMac) (is.ne ip6.daddr (cidr netCidrs.lan6)) (is.ne ip6.daddr "@allow_iot6") (log "iot6 ") drop]
|
[(is.eq ether.saddr cfg.vacuumMac) (is.ne ip6.daddr (cidr netCidrs.lan6)) (is.ne ip6.daddr "@allow_iot6") (log "iot6 ") drop]
|
||||||
|
# MSS clamping - since VPN reduces max MTU
|
||||||
|
# We only do this for the first packet in a connection, which should be enough
|
||||||
|
[(is.eq ip6.nexthdr (f: f.tcp)) (is.eq meta.mark vpn_table)
|
||||||
|
(is.gt tcpOpt.maxseg.size vpn_ipv6_mss) (mangle tcpOpt.maxseg.size vpn_ipv6_mss)]
|
||||||
|
[(is.eq ip.protocol (f: f.tcp)) (is.eq meta.mark vpn_table)
|
||||||
|
(is.gt tcpOpt.maxseg.size vpn_ipv4_mss) (mangle tcpOpt.maxseg.size vpn_ipv4_mss)]
|
||||||
[(mangle ct.mark meta.mark)]
|
[(mangle ct.mark meta.mark)]
|
||||||
]);
|
]);
|
||||||
};
|
};
|
||||||
|
|
|
@ -3,7 +3,10 @@
|
||||||
, config
|
, config
|
||||||
, ... }:
|
, ... }:
|
||||||
|
|
||||||
{
|
let
|
||||||
|
# force some defaults even if they were set with mkDefault already...
|
||||||
|
mkForceDefault = lib.mkOverride 999;
|
||||||
|
in {
|
||||||
options.common = with lib; mkOption {
|
options.common = with lib; mkOption {
|
||||||
type = types.submodule {
|
type = types.submodule {
|
||||||
options = {
|
options = {
|
||||||
|
@ -35,9 +38,18 @@
|
||||||
cfg = config.common;
|
cfg = config.common;
|
||||||
in {
|
in {
|
||||||
nix = {
|
nix = {
|
||||||
|
channel.enable = false;
|
||||||
settings = {
|
settings = {
|
||||||
allowed-users = [ cfg.mainUsername ];
|
allowed-users = [ cfg.mainUsername ];
|
||||||
auto-optimise-store = true;
|
auto-optimise-store = true;
|
||||||
|
use-xdg-base-directories = true;
|
||||||
|
experimental-features = [
|
||||||
|
"ca-derivations"
|
||||||
|
"flakes"
|
||||||
|
"nix-command"
|
||||||
|
"no-url-literals"
|
||||||
|
"repl-flake"
|
||||||
|
];
|
||||||
};
|
};
|
||||||
gc = {
|
gc = {
|
||||||
automatic = true;
|
automatic = true;
|
||||||
|
@ -45,9 +57,6 @@
|
||||||
options = "--delete-older-than 30d";
|
options = "--delete-older-than 30d";
|
||||||
};
|
};
|
||||||
package = pkgs.nixForNixPlugins;
|
package = pkgs.nixForNixPlugins;
|
||||||
extraOptions = ''
|
|
||||||
experimental-features = nix-command flakes
|
|
||||||
'';
|
|
||||||
};
|
};
|
||||||
systemd.services.nix-daemon.serviceConfig.LimitSTACKSoft = "infinity";
|
systemd.services.nix-daemon.serviceConfig.LimitSTACKSoft = "infinity";
|
||||||
boot.kernelParams = lib.optionals (cfg.resolution != null) [
|
boot.kernelParams = lib.optionals (cfg.resolution != null) [
|
||||||
|
@ -105,23 +114,21 @@
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
# this is supposed to default to false, but it doesn't because of nixos fish module
|
# this is supposed to default to false, but it doesn't because of nixos fish module
|
||||||
documentation.man.generateCaches = lib.mkOverride 999 false;
|
documentation.man.generateCaches = lib.mkIf cfg.minimal (mkForceDefault false);
|
||||||
# and we don't need html files and so on on minimal machines (it's not like I ever use it anyway)
|
# we don't need stuff like html files (NixOS manual and so on) on minimal machines
|
||||||
# as a bonus, this disables the HTML NixOS manual which takes a while to build and which I
|
|
||||||
# definitely don't need on minimal machines
|
|
||||||
documentation.doc.enable = lib.mkIf cfg.minimal (lib.mkDefault false);
|
documentation.doc.enable = lib.mkIf cfg.minimal (lib.mkDefault false);
|
||||||
programs.fish.enable = true;
|
programs.fish.enable = true;
|
||||||
# conflicts with bash module's mkDefault
|
# conflicts with bash module's mkDefault
|
||||||
# only override on minimal systems because on non-minimal systems
|
# only override on minimal systems because on non-minimal systems
|
||||||
# my fish config doesn't work well in fb/drm console
|
# because my fish config doesn't work well in fb/drm console
|
||||||
users.defaultUserShell = lib.mkIf cfg.minimal (lib.mkOverride 999 pkgs.fish);
|
users.defaultUserShell = lib.mkIf cfg.minimal (mkForceDefault pkgs.fish);
|
||||||
users.users.${cfg.mainUsername} = {
|
users.users.${cfg.mainUsername} = {
|
||||||
uid = 1000;
|
uid = 1000;
|
||||||
isNormalUser = true;
|
isNormalUser = true;
|
||||||
extraGroups = [ "wheel" ];
|
extraGroups = [ "wheel" ];
|
||||||
};
|
};
|
||||||
# nixos-hardware uses mkDefault here, so we use slightly higher priority
|
# nixos-hardware uses mkDefault here, so we use slightly higher priority
|
||||||
services.xserver.libinput.enable = lib.mkOverride 999 (!cfg.minimal);
|
services.xserver.libinput.enable = mkForceDefault (!cfg.minimal);
|
||||||
# TODO: minimal fish/vim config
|
# TODO: minimal fish/vim config
|
||||||
/*
|
/*
|
||||||
services.xserver = {
|
services.xserver = {
|
||||||
|
|
Loading…
Reference in a new issue