Compare commits

..

1 Commits

Author SHA1 Message Date
cfec181d0d deploy-rs: initial setup
Some checks failed
flake / flake (push) Failing after 2m39s
2024-04-21 15:54:37 +01:00
158 changed files with 1430 additions and 4089 deletions

View File

@ -11,13 +11,14 @@ jobs:
flake:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: DeterminateSystems/nix-installer-action@b92f66560d6f97d6576405a7bae901ab57e72b6a # v15
- uses: DeterminateSystems/magic-nix-cache-action@87b14cf437d03d37989d87f0fa5ce4f5dc1a330b # v8
- name: lint
- uses: actions/checkout@v4
- name: Prepare for Nix installation
run: |
nix fmt
git diff --exit-code
apt-get update
apt-get install -y sudo
- uses: cachix/install-nix-action@v26
- name: lint
run: nix fmt
- name: flake check
run: nix flake check --all-systems
timeout-minutes: 10

View File

@ -1,27 +0,0 @@
{ config, pkgs, ... }:
{
config = {
system.stateVersion = 4;
networking.hostName = "jakehillion-mba-m2-15";
nix = {
useDaemon = true;
};
programs.zsh.enable = true;
security.pam.enableSudoTouchIdAuth = true;
environment.systemPackages = with pkgs; [
fd
htop
mosh
neovim
nix
ripgrep
sapling
];
};
}

View File

@ -2,9 +2,7 @@
"nodes": {
"agenix": {
"inputs": {
"darwin": [
"darwin"
],
"darwin": "darwin",
"home-manager": [
"home-manager"
],
@ -14,11 +12,11 @@
"systems": "systems"
},
"locked": {
"lastModified": 1723293904,
"narHash": "sha256-b+uqzj+Wa6xgMS9aNbX4I+sXeb5biPDi39VgvSFqFvU=",
"lastModified": 1712079060,
"narHash": "sha256-/JdiT9t+zzjChc5qQiF+jhrVhRt8figYH29rZO7pFe4=",
"owner": "ryantm",
"repo": "agenix",
"rev": "f6291c5935fdc4e0bef208cfc0dcab7e3f7a1c41",
"rev": "1381a759b205dff7a6818733118d02253340fd5e",
"type": "github"
},
"original": {
@ -30,40 +28,63 @@
"darwin": {
"inputs": {
"nixpkgs": [
"agenix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1731153869,
"narHash": "sha256-3Ftf9oqOypcEyyrWJ0baVkRpvQqroK/SVBFLvU3nPuc=",
"lastModified": 1700795494,
"narHash": "sha256-gzGLZSiOhf155FW7262kdHo2YDeugp3VuIFb4/GGng0=",
"owner": "lnl7",
"repo": "nix-darwin",
"rev": "5c74ab862c8070cbf6400128a1b56abb213656da",
"rev": "4b9b83d5a92e8c1fbfd8eb27eda375908c11ec4d",
"type": "github"
},
"original": {
"owner": "lnl7",
"ref": "master",
"repo": "nix-darwin",
"type": "github"
}
},
"disko": {
"deploy-rs": {
"inputs": {
"flake-compat": "flake-compat",
"nixpkgs": [
"nixpkgs"
],
"utils": [
"flake-utils"
]
},
"locked": {
"lastModified": 1731060864,
"narHash": "sha256-aYE7oAYZ+gPU1mPNhM0JwLAQNgjf0/JK1BF1ln2KBgk=",
"owner": "nix-community",
"repo": "disko",
"rev": "5e40e02978e3bd63c2a6a9fa6fa8ba0e310e747f",
"lastModified": 1711973905,
"narHash": "sha256-UFKME/N1pbUtn+2Aqnk+agUt8CekbpuqwzljivfIme8=",
"owner": "serokell",
"repo": "deploy-rs",
"rev": "88b3059b020da69cbe16526b8d639bd5e0b51c8b",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "disko",
"owner": "serokell",
"ref": "master",
"repo": "deploy-rs",
"type": "github"
}
},
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1696426674,
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
@ -72,11 +93,11 @@
"systems": "systems_2"
},
"locked": {
"lastModified": 1726560853,
"narHash": "sha256-X6rJYSESBVr3hBoH0WbKE5KvhPU5bloyZ2L4K60/fPQ=",
"lastModified": 1710146030,
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "c1dfcf08411b08f6b8615f7d8971a2bfa81d5e8a",
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
"type": "github"
},
"original": {
@ -92,47 +113,27 @@
]
},
"locked": {
"lastModified": 1726989464,
"narHash": "sha256-Vl+WVTJwutXkimwGprnEtXc/s/s8sMuXzqXaspIGlwM=",
"lastModified": 1712386041,
"narHash": "sha256-dA82pOMQNnCJMAsPG7AXG35VmCSMZsJHTFlTHizpKWQ=",
"owner": "nix-community",
"repo": "home-manager",
"rev": "2f23fa308a7c067e52dfcc30a0758f47043ec176",
"type": "github"
},
"original": {
"owner": "nix-community",
"ref": "release-24.05",
"repo": "home-manager",
"type": "github"
}
},
"home-manager-unstable": {
"inputs": {
"nixpkgs": [
"nixpkgs-unstable"
]
},
"locked": {
"lastModified": 1730837930,
"narHash": "sha256-0kZL4m+bKBJUBQse0HanewWO0g8hDdCvBhudzxgehqc=",
"owner": "nix-community",
"repo": "home-manager",
"rev": "2f607e07f3ac7e53541120536708e824acccfaa8",
"rev": "d6bb9f934f2870e5cbc5b94c79e9db22246141ff",
"type": "github"
},
"original": {
"owner": "nix-community",
"ref": "release-23.11",
"repo": "home-manager",
"type": "github"
}
},
"impermanence": {
"locked": {
"lastModified": 1730403150,
"narHash": "sha256-W1FH5aJ/GpRCOA7DXT/sJHFpa5r8sq2qAUncWwRZ3Gg=",
"lastModified": 1708968331,
"narHash": "sha256-VUXLaPusCBvwM3zhGbRIJVeYluh2uWuqtj4WirQ1L9Y=",
"owner": "nix-community",
"repo": "impermanence",
"rev": "0d09341beeaa2367bac5d718df1404bf2ce45e6f",
"rev": "a33ef102a02ce77d3e39c25197664b7a636f9c30",
"type": "github"
},
"original": {
@ -142,60 +143,45 @@
"type": "github"
}
},
"nixos-hardware": {
"locked": {
"lastModified": 1730919458,
"narHash": "sha256-yMO0T0QJlmT/x4HEyvrCyigGrdYfIXX3e5gWqB64wLg=",
"owner": "nixos",
"repo": "nixos-hardware",
"rev": "e1cc1f6483393634aee94514186d21a4871e78d7",
"type": "github"
},
"original": {
"owner": "nixos",
"repo": "nixos-hardware",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1730963269,
"narHash": "sha256-rz30HrFYCHiWEBCKHMffHbMdWJ35hEkcRVU0h7ms3x0=",
"lastModified": 1713344939,
"narHash": "sha256-jpHkAt0sG2/J7ueKnG7VvLLkBYUMQbXQ2L8OBpVG53s=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "83fb6c028368e465cd19bb127b86f971a5e41ebc",
"rev": "e402c3eb6d88384ca6c52ef1c53e61bdc9b84ddd",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixos-24.05",
"ref": "nixos-23.11",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-unstable": {
"locked": {
"lastModified": 1730867498,
"narHash": "sha256-Ce3a1w7Qf+UEPjVJcXxeSiWyPMngqf1M2EIsmqiluQw=",
"rev": "9240e11a83307a6e8cf2254340782cba4aa782fd",
"type": "tarball",
"url": "https://gitea.hillion.co.uk/api/v1/repos/JakeHillion/nixpkgs/archive/9240e11a83307a6e8cf2254340782cba4aa782fd.tar.gz"
"lastModified": 1713297878,
"narHash": "sha256-hOkzkhLT59wR8VaMbh1ESjtZLbGi+XNaBN6h49SPqEc=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "66adc1e47f8784803f2deb6cacd5e07264ec2d5c",
"type": "github"
},
"original": {
"type": "tarball",
"url": "https://gitea.hillion.co.uk/JakeHillion/nixpkgs/archive/nixos-unstable.tar.gz"
"owner": "nixos",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"agenix": "agenix",
"darwin": "darwin",
"disko": "disko",
"deploy-rs": "deploy-rs",
"flake-utils": "flake-utils",
"home-manager": "home-manager",
"home-manager-unstable": "home-manager-unstable",
"impermanence": "impermanence",
"nixos-hardware": "nixos-hardware",
"nixpkgs": "nixpkgs",
"nixpkgs-unstable": "nixpkgs-unstable"
}

View File

@ -1,67 +1,39 @@
{
inputs = {
nixpkgs.url = "github:nixos/nixpkgs/nixos-24.05";
nixpkgs-unstable.url = "https://gitea.hillion.co.uk/JakeHillion/nixpkgs/archive/nixos-unstable.tar.gz";
nixos-hardware.url = "github:nixos/nixos-hardware";
nixpkgs.url = "github:nixos/nixpkgs/nixos-23.11";
nixpkgs-unstable.url = "github:nixos/nixpkgs/nixos-unstable";
flake-utils.url = "github:numtide/flake-utils";
darwin.url = "github:lnl7/nix-darwin";
darwin.inputs.nixpkgs.follows = "nixpkgs";
agenix.url = "github:ryantm/agenix";
agenix.inputs.nixpkgs.follows = "nixpkgs";
agenix.inputs.darwin.follows = "darwin";
agenix.inputs.home-manager.follows = "home-manager";
home-manager.url = "github:nix-community/home-manager/release-24.05";
home-manager.url = "github:nix-community/home-manager/release-23.11";
home-manager.inputs.nixpkgs.follows = "nixpkgs";
home-manager-unstable.url = "github:nix-community/home-manager";
home-manager-unstable.inputs.nixpkgs.follows = "nixpkgs-unstable";
impermanence.url = "github:nix-community/impermanence/master";
disko.url = "github:nix-community/disko";
disko.inputs.nixpkgs.follows = "nixpkgs";
deploy-rs.url = "github:serokell/deploy-rs/master";
deploy-rs.inputs.nixpkgs.follows = "nixpkgs";
deploy-rs.inputs.utils.follows = "flake-utils";
};
description = "Hillion Nix flake";
outputs =
{ self
, agenix
, darwin
, disko
, flake-utils
, home-manager
, home-manager-unstable
, impermanence
, nixos-hardware
, nixpkgs
, nixpkgs-unstable
, ...
}@inputs:
let
getSystemOverlays = system: nixpkgsConfig: [
(final: prev: {
unstable = nixpkgs-unstable.legacyPackages.${prev.system};
"storj" = final.callPackage ./pkgs/storj.nix { };
})
];
in
{
outputs = { self, nixpkgs, nixpkgs-unstable, flake-utils, agenix, home-manager, impermanence, deploy-rs, ... }@inputs: {
nixosConfigurations =
let
fqdns = builtins.attrNames (builtins.readDir ./hosts);
getSystemOverlays = system: nixpkgsConfig: [
(final: prev: {
"storj" = final.callPackage ./pkgs/storj.nix { };
})
];
mkHost = fqdn:
let
system = builtins.readFile ./hosts/${fqdn}/system;
func = if builtins.pathExists ./hosts/${fqdn}/unstable then nixpkgs-unstable.lib.nixosSystem else nixpkgs.lib.nixosSystem;
home-manager-pick = if builtins.pathExists ./hosts/${fqdn}/unstable then home-manager-unstable else home-manager;
let system = builtins.readFile ./hosts/${fqdn}/system;
in
func {
nixpkgs.lib.nixosSystem {
inherit system;
specialArgs = inputs;
modules = [
@ -70,9 +42,8 @@
agenix.nixosModules.default
impermanence.nixosModules.impermanence
disko.nixosModules.disko
home-manager-pick.nixosModules.default
home-manager.nixosModules.default
{
home-manager.sharedModules = [
impermanence.nixosModules.home-manager.impermanence
@ -80,6 +51,7 @@
}
({ config, ... }: {
nix.registry.nixpkgs.flake = nixpkgs; # pin `nix shell` nixpkgs
system.configurationRevision = nixpkgs.lib.mkIf (self ? rev) self.rev;
nixpkgs.overlays = getSystemOverlays config.nixpkgs.hostPlatform.system config.nixpkgs.config;
})
@ -88,21 +60,20 @@
in
nixpkgs.lib.genAttrs fqdns mkHost;
darwinConfigurations = {
jakehillion-mba-m2-15 = darwin.lib.darwinSystem {
system = "aarch64-darwin";
specialArgs = inputs;
modules = [
./darwin/jakehillion-mba-m2-15/configuration.nix
({ config, ... }: {
nixpkgs.overlays = getSystemOverlays "aarch64-darwin" config.nixpkgs.config;
deploy = {
nodes = builtins.mapAttrs
(name: value: {
hostname = name;
profiles.system = {
user = "root";
remoteBuild = true;
path = deploy-rs.lib.aarch64-darwin.activate.nixos self.nixosConfigurations.${name};
};
})
];
};
self.nixosConfigurations;
};
checks = builtins.mapAttrs (system: deployLib: deployLib.deployChecks self.deploy) deploy-rs.lib;
} // flake-utils.lib.eachDefaultSystem (system: {
formatter = nixpkgs.legacyPackages.${system}.nixpkgs-fmt;
});

View File

@ -1,55 +0,0 @@
{ config, pkgs, lib, ... }:
{
imports = [
./hardware-configuration.nix
];
config = {
system.stateVersion = "23.11";
networking.hostName = "be";
networking.domain = "lt.ts.hillion.co.uk";
boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true;
custom.defaults = true;
## Impermanence
custom.impermanence = {
enable = true;
userExtraFiles.jake = [
".ssh/id_ecdsa_sk_keys"
];
};
## WiFi
age.secrets."wifi/be.lt.ts.hillion.co.uk".file = ../../secrets/wifi/be.lt.ts.hillion.co.uk.age;
networking.wireless = {
enable = true;
environmentFile = config.age.secrets."wifi/be.lt.ts.hillion.co.uk".path;
networks = {
"Hillion WPA3 Network".psk = "@HILLION_WPA3_NETWORK_PSK@";
};
};
## Desktop
custom.users.jake.password = true;
custom.desktop.awesome.enable = true;
## Tailscale
age.secrets."tailscale/be.lt.ts.hillion.co.uk".file = ../../secrets/tailscale/be.lt.ts.hillion.co.uk.age;
services.tailscale = {
enable = true;
authKeyFile = config.age.secrets."tailscale/be.lt.ts.hillion.co.uk".path;
};
security.sudo.wheelNeedsPassword = lib.mkForce true;
## Enable btrfs compression
fileSystems."/data".options = [ "compress=zstd" ];
fileSystems."/nix".options = [ "compress=zstd" ];
};
}

View File

@ -1,59 +0,0 @@
# Do not modify this file! It was generated by nixos-generate-config
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }:
{
imports =
[
(modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = [ "xhci_pci" "nvme" "usbhid" "usb_storage" "sd_mod" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ];
fileSystems."/" =
{
device = "tmpfs";
fsType = "tmpfs";
options = [ "mode=0755" ];
};
fileSystems."/boot" =
{
device = "/dev/disk/by-uuid/D184-A79B";
fsType = "vfat";
};
fileSystems."/nix" =
{
device = "/dev/disk/by-uuid/3fdc1b00-28d5-41dd-b8e0-fa6b1217f6eb";
fsType = "btrfs";
options = [ "subvol=nix" ];
};
boot.initrd.luks.devices."root".device = "/dev/disk/by-uuid/c8ffa91a-5152-4d84-8995-01232fd5acd6";
fileSystems."/data" =
{
device = "/dev/disk/by-uuid/3fdc1b00-28d5-41dd-b8e0-fa6b1217f6eb";
fsType = "btrfs";
options = [ "subvol=data" ];
};
swapDevices = [ ];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's
# still possible to use this option, but it's recommended to use it in conjunction
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
networking.useDHCP = lib.mkDefault true;
# networking.interfaces.enp0s20f0u1u4.useDHCP = lib.mkDefault true;
# networking.interfaces.wlp1s0.useDHCP = lib.mkDefault true;
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
powerManagement.cpuFreqGovernor = lib.mkDefault "powersave";
hardware.cpu.intel.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
}

View File

@ -1,7 +0,0 @@
# boron.cx.ts.hillion.co.uk
Additional installation step for Clevis/Tang:
$ echo -n $DISK_ENCRYPTION_PASSWORD | clevis encrypt sss "$(cat /etc/nixos/hosts/boron.cx.ts.hillion.co.uk/clevis_config.json)" >/mnt/data/disk_encryption.jwe
$ sudo chown root:root /mnt/data/disk_encryption.jwe
$ sudo chmod 0400 /mnt/data/disk_encryption.jwe

View File

@ -1,13 +0,0 @@
{
"t": 1,
"pins": {
"tang": [
{
"url": "http://80.229.251.26:7654"
},
{
"url": "http://185.240.111.53:7654"
}
]
}
}

View File

@ -1,181 +0,0 @@
{ config, pkgs, lib, ... }:
{
imports = [
./hardware-configuration.nix
];
config = {
system.stateVersion = "23.11";
networking.hostName = "boron";
networking.domain = "cx.ts.hillion.co.uk";
boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true;
boot.kernelParams = [ "ip=dhcp" ];
boot.initrd = {
availableKernelModules = [ "igb" ];
network.enable = true;
clevis = {
enable = true;
useTang = true;
devices = {
"disk0-crypt".secretFile = "/data/disk_encryption.jwe";
"disk1-crypt".secretFile = "/data/disk_encryption.jwe";
};
};
};
custom.defaults = true;
## Kernel
### Explicitly use the latest kernel at time of writing because the LTS
### kernels available in NixOS do not seem to support this server's very
### modern hardware.
### custom.sched_ext.enable implies >=6.12, if this is removed the kernel may need to be pinned again. >=6.10 seems good.
custom.sched_ext.enable = true;
## Enable btrfs compression
fileSystems."/data".options = [ "compress=zstd" ];
fileSystems."/nix".options = [ "compress=zstd" ];
## Impermanence
custom.impermanence = {
enable = true;
cache.enable = true;
userExtraFiles.jake = [
".ssh/id_ecdsa"
".ssh/id_rsa"
];
};
boot.initrd.postDeviceCommands = lib.mkAfter ''
btrfs subvolume delete /cache/system
btrfs subvolume snapshot /cache/empty_snapshot /cache/system
'';
## Custom Services
custom = {
locations.autoServe = true;
www.global.enable = true;
services = {
gitea.actions = {
enable = true;
tokenSecret = ../../secrets/gitea/actions/boron.age;
};
};
};
services.nsd.interfaces = [
"138.201.252.214"
"2a01:4f8:173:23d2::2"
];
## Enable ZRAM to help with root on tmpfs
zramSwap = {
enable = true;
memoryPercent = 200;
algorithm = "zstd";
};
## Filesystems
services.btrfs.autoScrub = {
enable = true;
interval = "Tue, 02:00";
# By default both /data and /nix would be scrubbed. They are the same filesystem so this is wasteful.
fileSystems = [ "/data" ];
};
## Resilio
custom.resilio = {
enable = true;
folders =
let
folderNames = [
"dad"
"joseph"
"projects"
"resources"
"sync"
];
mkFolder = name: {
name = name;
secret = {
name = "resilio/plain/${name}";
file = ../../secrets/resilio/plain/${name}.age;
};
};
in
builtins.map (mkFolder) folderNames;
};
services.resilio.directoryRoot = "/data/sync";
## General usability
### Make podman available for dev tools such as act
virtualisation = {
containers.enable = true;
podman = {
enable = true;
dockerCompat = true;
dockerSocket.enable = true;
};
};
users.users.jake.extraGroups = [ "podman" ];
## Networking
boot.kernel.sysctl = {
"net.ipv4.ip_forward" = true;
"net.ipv6.conf.all.forwarding" = true;
};
networking = {
useDHCP = false;
interfaces = {
enp6s0 = {
name = "eth0";
useDHCP = true;
ipv6.addresses = [{
address = "2a01:4f8:173:23d2::2";
prefixLength = 64;
}];
};
};
defaultGateway6 = {
address = "fe80::1";
interface = "eth0";
};
};
networking.firewall = {
trustedInterfaces = [ "tailscale0" ];
allowedTCPPorts = lib.mkForce [ ];
allowedUDPPorts = lib.mkForce [ ];
interfaces = {
eth0 = {
allowedTCPPorts = lib.mkForce [
22 # SSH
3022 # SSH (Gitea) - redirected to 22
53 # DNS
80 # HTTP 1-2
443 # HTTPS 1-2
8080 # Unifi (inform)
];
allowedUDPPorts = lib.mkForce [
53 # DNS
443 # HTTP 3
3478 # Unifi STUN
];
};
};
};
## Tailscale
age.secrets."tailscale/boron.cx.ts.hillion.co.uk".file = ../../secrets/tailscale/boron.cx.ts.hillion.co.uk.age;
services.tailscale = {
enable = true;
authKeyFile = config.age.secrets."tailscale/boron.cx.ts.hillion.co.uk".path;
};
};
}

View File

@ -1,72 +0,0 @@
# Do not modify this file! It was generated by nixos-generate-config
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }:
{
imports =
[
(modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = [ "nvme" "xhci_pci" "ahci" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-amd" ];
boot.extraModulePackages = [ ];
fileSystems."/" =
{
device = "tmpfs";
fsType = "tmpfs";
options = [ "mode=0755" "size=100%" ];
};
fileSystems."/boot" =
{
device = "/dev/disk/by-uuid/ED9C-4ABC";
fsType = "vfat";
options = [ "fmask=0022" "dmask=0022" ];
};
fileSystems."/data" =
{
device = "/dev/disk/by-uuid/9aebe351-156a-4aa0-9a97-f09b01ac23ad";
fsType = "btrfs";
options = [ "subvol=data" ];
};
fileSystems."/cache" =
{
device = "/dev/disk/by-uuid/9aebe351-156a-4aa0-9a97-f09b01ac23ad";
fsType = "btrfs";
options = [ "subvol=cache" ];
};
fileSystems."/nix" =
{
device = "/dev/disk/by-uuid/9aebe351-156a-4aa0-9a97-f09b01ac23ad";
fsType = "btrfs";
options = [ "subvol=nix" ];
};
boot.initrd.luks.devices."disk0-crypt" = {
device = "/dev/disk/by-uuid/a68ead16-1bdc-4d26-9e55-62c2be11ceee";
allowDiscards = true;
};
boot.initrd.luks.devices."disk1-crypt" = {
device = "/dev/disk/by-uuid/19bde205-bee4-430d-a4c1-52d635a23963";
allowDiscards = true;
};
swapDevices = [ ];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's
# still possible to use this option, but it's recommended to use it in conjunction
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
networking.useDHCP = lib.mkDefault true;
# networking.interfaces.enp6s0.useDHCP = lib.mkDefault true;
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
hardware.cpu.amd.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
}

View File

@ -1,7 +0,0 @@
# gendry.jakehillion-terminals.ts.hillion.co.uk
Additional installation step for Clevis/Tang:
$ echo -n $DISK_ENCRYPTION_PASSWORD | clevis encrypt sss "$(cat /etc/nixos/hosts/gendry.jakehillion-terminals.ts.hillion.co.uk/clevis_config.json)" >/mnt/data/disk_encryption.jwe
$ sudo chown root:root /mnt/data/disk_encryption.jwe
$ sudo chmod 0400 /mnt/data/disk_encryption.jwe

View File

@ -1,14 +0,0 @@
{
"t": 1,
"pins": {
"tang": [
{
"url": "http://10.64.50.21:7654"
},
{
"url": "http://10.64.50.25:7654"
}
]
}
}

View File

@ -2,6 +2,8 @@
{
imports = [
../../modules/common/default.nix
../../modules/spotify/default.nix
./bluetooth.nix
./hardware-configuration.nix
];
@ -15,24 +17,6 @@
boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true;
boot.kernelParams = [
"ip=dhcp"
];
boot.initrd = {
availableKernelModules = [ "r8169" ];
network.enable = true;
clevis = {
enable = true;
useTang = true;
devices."root".secretFile = "/data/disk_encryption.jwe";
};
};
custom.defaults = true;
## Custom scheduler
custom.sched_ext.enable = true;
## Impermanence
custom.impermanence = {
enable = true;
@ -45,13 +29,6 @@
];
};
## Enable ZRAM swap to help with root on tmpfs
zramSwap = {
enable = true;
memoryPercent = 200;
algorithm = "zstd";
};
## Desktop
custom.users.jake.password = true;
custom.desktop.awesome.enable = true;
@ -59,7 +36,9 @@
## Resilio
custom.resilio.enable = true;
services.resilio.deviceName = "gendry.jakehillion-terminals";
services.resilio.directoryRoot = "/data/sync";
services.resilio.storagePath = "/data/sync/.sync";
custom.resilio.folders =
let
@ -82,9 +61,9 @@
## Tailscale
age.secrets."tailscale/gendry.jakehillion-terminals.ts.hillion.co.uk".file = ../../secrets/tailscale/gendry.jakehillion-terminals.ts.hillion.co.uk.age;
services.tailscale = {
custom.tailscale = {
enable = true;
authKeyFile = config.age.secrets."tailscale/gendry.jakehillion-terminals.ts.hillion.co.uk".path;
preAuthKeyFile = config.age.secrets."tailscale/gendry.jakehillion-terminals.ts.hillion.co.uk".path;
};
security.sudo.wheelNeedsPassword = lib.mkForce true;
@ -97,13 +76,19 @@
boot.initrd.kernelModules = [ "amdgpu" ];
services.xserver.videoDrivers = [ "amdgpu" ];
## Spotify
home-manager.users.jake.services.spotifyd.settings = {
global = {
device_name = "Gendry";
device_type = "computer";
bitrate = 320;
};
};
users.users."${config.custom.user}" = {
packages = with pkgs; [
prismlauncher
];
};
## Networking
networking.nameservers = lib.mkForce [ ]; # Trust the DHCP nameservers
};
}

View File

@ -28,10 +28,7 @@
options = [ "subvol=nix" ];
};
boot.initrd.luks.devices."root" = {
device = "/dev/disk/by-uuid/af328e8d-d929-43f1-8d04-1c96b5147e5e";
allowDiscards = true;
};
boot.initrd.luks.devices."root".device = "/dev/disk/by-uuid/af328e8d-d929-43f1-8d04-1c96b5147e5e";
fileSystems."/data" =
{

View File

@ -0,0 +1,110 @@
{ config, pkgs, lib, ... }:
{
imports = [
../../modules/common/default.nix
./hardware-configuration.nix
];
config = {
system.stateVersion = "23.05";
networking.hostName = "jorah";
networking.domain = "cx.ts.hillion.co.uk";
boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true;
## Impermanence
custom.impermanence.enable = true;
## Custom Services
custom = {
locations.autoServe = true;
www.global.enable = true;
services = {
version_tracker.enable = true;
gitea.actions = {
enable = true;
tokenSecret = ../../secrets/gitea/actions/jorah.age;
};
};
};
services.foldingathome = {
enable = true;
user = "JakeH"; # https://stats.foldingathome.org/donor/id/357021
daemonNiceLevel = 19;
};
## Enable ZRAM to help with root on tmpfs
zramSwap = {
enable = true;
memoryPercent = 200;
algorithm = "zstd";
};
## Filesystems
services.btrfs.autoScrub = {
enable = true;
interval = "Tue, 02:00";
# By default both /data and /nix would be scrubbed. They are the same filesystem so this is wasteful.
fileSystems = [ "/data" ];
};
## Networking
boot.kernel.sysctl = {
"net.ipv4.ip_forward" = true;
"net.ipv6.conf.all.forwarding" = true;
};
networking = {
useDHCP = false;
interfaces = {
enp5s0 = {
name = "eth0";
useDHCP = true;
ipv6.addresses = [{
address = "2a01:4f9:4b:3953::2";
prefixLength = 64;
}];
};
};
defaultGateway6 = {
address = "fe80::1";
interface = "eth0";
};
};
networking.firewall = {
trustedInterfaces = [ "tailscale0" ];
allowedTCPPorts = lib.mkForce [
22 # SSH
3022 # Gitea SSH (accessed via public 22)
];
allowedUDPPorts = lib.mkForce [ ];
interfaces = {
eth0 = {
allowedTCPPorts = lib.mkForce [
80 # HTTP 1-2
443 # HTTPS 1-2
8080 # Unifi (inform)
];
allowedUDPPorts = lib.mkForce [
443 # HTTP 3
3478 # Unifi STUN
];
};
};
};
## Tailscale
age.secrets."tailscale/jorah.cx.ts.hillion.co.uk".file = ../../secrets/tailscale/jorah.cx.ts.hillion.co.uk.age;
custom.tailscale = {
enable = true;
preAuthKeyFile = config.age.secrets."tailscale/jorah.cx.ts.hillion.co.uk".path;
ipv4Addr = "100.96.143.138";
ipv6Addr = "fd7a:115c:a1e0:ab12:4843:cd96:6260:8f8a";
};
};
}

View File

@ -0,0 +1,48 @@
# Do not modify this file! It was generated by nixos-generate-config
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }:
{
imports =
[
(modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = [ "nvme" "xhci_pci" "ahci" "usbhid" "usb_storage" "sr_mod" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-amd" ];
boot.extraModulePackages = [ ];
fileSystems."/" =
{
device = "tmpfs";
fsType = "tmpfs";
options = [ "mode=0755" ];
};
fileSystems."/nix" =
{
device = "/dev/disk/by-id/nvme-KXG60ZNV512G_TOSHIBA_106S10VHT9LM_1-part2";
fsType = "btrfs";
options = [ "subvol=nix" ];
};
fileSystems."/data" =
{
device = "/dev/disk/by-id/nvme-KXG60ZNV512G_TOSHIBA_106S10VHT9LM_1-part2";
fsType = "btrfs";
options = [ "subvol=data" ];
};
fileSystems."/boot" =
{
device = "/dev/disk/by-uuid/4D7E-8DE8";
fsType = "vfat";
};
swapDevices = [ ];
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
hardware.cpu.amd.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
}

View File

@ -3,6 +3,7 @@
{
imports = [
./hardware-configuration.nix
../../modules/common/default.nix
../../modules/rpi/rpi4.nix
];
@ -12,11 +13,6 @@
networking.hostName = "li";
networking.domain = "pop.ts.hillion.co.uk";
custom.defaults = true;
## Custom Services
custom.locations.autoServe = true;
# Networking
## Tailscale
age.secrets."tailscale/li.pop.ts.hillion.co.uk".file = ../../secrets/tailscale/li.pop.ts.hillion.co.uk.age;
@ -37,14 +33,6 @@
## Run a persistent iperf3 server
services.iperf3.enable = true;
services.iperf3.openFirewall = true;
networking.firewall.interfaces = {
"end0" = {
allowedTCPPorts = [
7654 # Tang
];
};
};
};
}

View File

@ -1,75 +0,0 @@
{ config, pkgs, lib, ... }:
{
imports = [
./disko.nix
./hardware-configuration.nix
];
config = {
system.stateVersion = "24.05";
networking.hostName = "merlin";
networking.domain = "rig.ts.hillion.co.uk";
boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true;
boot.kernelParams = [
"ip=dhcp"
# zswap
"zswap.enabled=1"
"zswap.compressor=zstd"
"zswap.max_pool_percent=20"
];
boot.initrd = {
availableKernelModules = [ "igc" ];
network.enable = true;
clevis = {
enable = true;
useTang = true;
devices = {
"disk0-crypt".secretFile = "/data/disk_encryption.jwe";
};
};
};
boot.kernelPackages = pkgs.linuxPackages_latest;
custom.defaults = true;
custom.locations.autoServe = true;
custom.impermanence.enable = true;
custom.users.jake.password = true;
security.sudo.wheelNeedsPassword = lib.mkForce true;
# Networking
networking = {
interfaces.enp171s0.name = "eth0";
interfaces.enp172s0.name = "eth1";
};
networking.nameservers = lib.mkForce [ ]; # Trust the DHCP nameservers
networking.firewall = {
trustedInterfaces = [ "tailscale0" ];
allowedTCPPorts = lib.mkForce [
22 # SSH
];
allowedUDPPorts = lib.mkForce [ ];
interfaces = {
eth0 = {
allowedTCPPorts = lib.mkForce [ ];
allowedUDPPorts = lib.mkForce [ ];
};
};
};
## Tailscale
age.secrets."tailscale/merlin.rig.ts.hillion.co.uk".file = ../../secrets/tailscale/merlin.rig.ts.hillion.co.uk.age;
services.tailscale = {
enable = true;
authKeyFile = config.age.secrets."tailscale/merlin.rig.ts.hillion.co.uk".path;
};
};
}

View File

@ -1,70 +0,0 @@
{
disko.devices = {
disk = {
disk0 = {
type = "disk";
device = "/dev/nvme0n1";
content = {
type = "gpt";
partitions = {
ESP = {
size = "1G";
type = "EF00";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
mountOptions = [ "umask=0077" ];
};
};
disk0-crypt = {
size = "100%";
content = {
type = "luks";
name = "disk0-crypt";
settings = {
allowDiscards = true;
};
content = {
type = "btrfs";
subvolumes = {
"/data" = {
mountpoint = "/data";
mountOptions = [ "compress=zstd" "ssd" ];
};
"/nix" = {
mountpoint = "/nix";
mountOptions = [ "compress=zstd" "ssd" ];
};
};
};
};
};
swap = {
size = "64G";
content = {
type = "swap";
randomEncryption = true;
discardPolicy = "both";
};
};
};
};
};
};
nodev = {
"/" = {
fsType = "tmpfs";
mountOptions = [
"mode=755"
"size=100%"
];
};
};
};
}

View File

@ -1,28 +0,0 @@
# Do not modify this file! It was generated by nixos-generate-config
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }:
{
imports =
[
(modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = [ "xhci_pci" "thunderbolt" "nvme" "usbhid" "usb_storage" "sd_mod" "rtsx_pci_sdmmc" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's
# still possible to use this option, but it's recommended to use it in conjunction
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
networking.useDHCP = lib.mkDefault true;
# networking.interfaces.enp171s0.useDHCP = lib.mkDefault true;
# networking.interfaces.enp172s0.useDHCP = lib.mkDefault true;
# networking.interfaces.wlp173s0f0.useDHCP = lib.mkDefault true;
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
hardware.cpu.intel.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
}

View File

@ -1 +0,0 @@
x86_64-linux

View File

@ -3,6 +3,7 @@
{
imports = [
./hardware-configuration.nix
../../modules/common/default.nix
../../modules/rpi/rpi4.nix
];
@ -12,17 +13,17 @@
networking.hostName = "microserver";
networking.domain = "home.ts.hillion.co.uk";
custom.defaults = true;
## Custom Services
custom.locations.autoServe = true;
# Networking
## Tailscale
age.secrets."tailscale/microserver.home.ts.hillion.co.uk".file = ../../secrets/tailscale/microserver.home.ts.hillion.co.uk.age;
services.tailscale = {
custom.tailscale = {
enable = true;
authKeyFile = config.age.secrets."tailscale/microserver.home.ts.hillion.co.uk".path;
preAuthKeyFile = config.age.secrets."tailscale/microserver.home.ts.hillion.co.uk".path;
advertiseRoutes = [ "10.64.50.0/24" "10.239.19.0/24" ];
advertiseExitNode = true;
};
## Enable IoT VLAN
@ -37,17 +38,22 @@
bluetooth.enable = true;
};
## Enable IP forwarding for Tailscale
boot.kernel.sysctl = {
"net.ipv4.ip_forward" = true;
};
## Run a persistent iperf3 server
services.iperf3.enable = true;
services.iperf3.openFirewall = true;
networking.nameservers = lib.mkForce [ ]; # Trust the DHCP nameservers
networking.firewall.interfaces = {
"eth0" = {
allowedUDPPorts = [
5353 # HomeKit
];
allowedTCPPorts = [
7654 # Tang
21063 # HomeKit
];
};
};

View File

@ -1,7 +0,0 @@
# phoenix.st.ts.hillion.co.uk
Additional installation step for Clevis/Tang:
$ echo -n $DISK_ENCRYPTION_PASSWORD | clevis encrypt sss "$(cat /etc/nixos/hosts/phoenix.st.ts.hillion.co.uk/clevis_config.json)" >/mnt/data/disk_encryption.jwe
$ sudo chown root:root /mnt/data/disk_encryption.jwe
$ sudo chmod 0400 /mnt/data/disk_encryption.jwe

View File

@ -1,14 +0,0 @@
{
"t": 1,
"pins": {
"tang": [
{
"url": "http://10.64.50.21:7654"
},
{
"url": "http://10.64.50.25:7654"
}
]
}
}

View File

@ -1,161 +0,0 @@
{ config, pkgs, lib, ... }:
let
zpool_name = "practical-defiant-coffee";
in
{
imports = [
./disko.nix
./hardware-configuration.nix
];
config = {
system.stateVersion = "24.05";
networking.hostName = "phoenix";
networking.domain = "st.ts.hillion.co.uk";
networking.hostId = "4d7241e9";
boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true;
boot.kernelParams = [
"ip=dhcp"
"zfs.zfs_arc_max=34359738368"
# zswap
"zswap.enabled=1"
"zswap.compressor=zstd"
"zswap.max_pool_percent=20"
];
boot.initrd = {
availableKernelModules = [ "igc" ];
network.enable = true;
clevis = {
enable = true;
useTang = true;
devices = {
"disk0-crypt".secretFile = "/data/disk_encryption.jwe";
"disk1-crypt".secretFile = "/data/disk_encryption.jwe";
};
};
};
custom.defaults = true;
custom.locations.autoServe = true;
custom.impermanence.enable = true;
custom.users.jake.password = true; # TODO: remove me once booting has stabilised
## Filesystems
boot.supportedFilesystems = [ "zfs" ];
boot.zfs = {
forceImportRoot = false;
extraPools = [ zpool_name ];
};
services.btrfs.autoScrub = {
enable = true;
interval = "Tue, 02:00";
# All filesystems includes the BTRFS parts of all the hard drives. This
# would take forever and is redundant as they get fully read regularly.
fileSystems = [ "/data" ];
};
services.zfs.autoScrub = {
enable = true;
interval = "Wed, 02:00";
};
## Resilio
custom.resilio = {
enable = true;
backups.enable = true;
folders =
let
folderNames = [
"dad"
"joseph"
"projects"
"resources"
"sync"
];
mkFolder = name: {
name = name;
secret = {
name = "resilio/plain/${name}";
file = ../../secrets/resilio/plain/${name}.age;
};
};
in
builtins.map (mkFolder) folderNames;
};
services.resilio.directoryRoot = "/${zpool_name}/sync";
## Chia
age.secrets."chia/farmer.key" = {
file = ../../secrets/chia/farmer.key.age;
owner = "chia";
group = "chia";
};
custom.chia = {
enable = true;
keyFile = config.age.secrets."chia/farmer.key".path;
plotDirectories = builtins.genList (i: "/mnt/d${toString i}/plots/contract-k32") 8;
};
## Restic
custom.services.restic.path = "/${zpool_name}/backups/restic";
## Backups
### Git
custom.backups.git = {
enable = true;
extraRepos = [ "https://gitea.hillion.co.uk/JakeHillion/nixos.git" ];
};
## Downloads
custom.services.downloads = {
metadataPath = "/${zpool_name}/downloads/metadata";
downloadCachePath = "/${zpool_name}/downloads/torrents";
filmsPath = "/${zpool_name}/media/films";
tvPath = "/${zpool_name}/media/tv";
};
## Plex
users.users.plex.extraGroups = [ "mediaaccess" ];
services.plex.enable = true;
## Networking
networking = {
interfaces.enp4s0.name = "eth0";
interfaces.enp5s0.name = "eth1";
interfaces.enp6s0.name = "eth2";
interfaces.enp8s0.name = "eth3";
};
networking.nameservers = lib.mkForce [ ]; # Trust the DHCP nameservers
networking.firewall = {
trustedInterfaces = [ "tailscale0" ];
allowedTCPPorts = lib.mkForce [
22 # SSH
];
allowedUDPPorts = lib.mkForce [ ];
interfaces = {
eth0 = {
allowedTCPPorts = lib.mkForce [
32400 # Plex
];
allowedUDPPorts = lib.mkForce [ ];
};
};
};
## Tailscale
age.secrets."tailscale/phoenix.st.ts.hillion.co.uk".file = ../../secrets/tailscale/phoenix.st.ts.hillion.co.uk.age;
services.tailscale = {
enable = true;
authKeyFile = config.age.secrets."tailscale/phoenix.st.ts.hillion.co.uk".path;
};
};
}

View File

@ -1,103 +0,0 @@
{
disko.devices = {
disk = {
disk0 = {
type = "disk";
device = "/dev/nvme0n1";
content = {
type = "gpt";
partitions = {
ESP = {
size = "1G";
type = "EF00";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
mountOptions = [ "umask=0077" ];
};
};
disk0-crypt = {
size = "100%";
content = {
type = "luks";
name = "disk0-crypt";
settings = {
allowDiscards = true;
};
};
};
swap = {
size = "64G";
content = {
type = "swap";
randomEncryption = true;
discardPolicy = "both";
};
};
};
};
};
disk1 = {
type = "disk";
device = "/dev/nvme1n1";
content = {
type = "gpt";
partitions = {
disk1-crypt = {
size = "100%";
content = {
type = "luks";
name = "disk1-crypt";
settings = {
allowDiscards = true;
};
content = {
type = "btrfs";
extraArgs = [
"-d raid1"
"/dev/mapper/disk0-crypt"
];
subvolumes = {
"/data" = {
mountpoint = "/data";
mountOptions = [ "compress=zstd" "ssd" ];
};
"/nix" = {
mountpoint = "/nix";
mountOptions = [ "compress=zstd" "ssd" ];
};
};
};
};
};
swap = {
size = "64G";
content = {
type = "swap";
randomEncryption = true;
discardPolicy = "both";
};
};
};
};
};
};
nodev = {
"/" = {
fsType = "tmpfs";
mountOptions = [
"mode=755"
"size=100%"
];
};
};
};
}

View File

@ -1 +0,0 @@
x86_64-linux

View File

@ -2,6 +2,7 @@
{
imports = [
../../modules/common/default.nix
./hardware-configuration.nix
];
@ -18,8 +19,6 @@
"net.ipv4.conf.all.forwarding" = true;
};
custom.defaults = true;
## Interactive password
custom.users.jake.password = true;
@ -32,14 +31,6 @@
nat.enable = lib.mkForce false;
useDHCP = false;
vlans = {
cameras = {
id = 3;
interface = "eth2";
};
};
interfaces = {
enp1s0 = {
name = "eth0";
@ -64,14 +55,6 @@
}
];
};
cameras /* cameras@eth2 */ = {
ipv4.addresses = [
{
address = "10.133.145.1";
prefixLength = 24;
}
];
};
enp4s0 = { name = "eth3"; };
enp5s0 = { name = "eth4"; };
enp6s0 = { name = "eth5"; };
@ -98,10 +81,8 @@
ip protocol icmp counter accept comment "accept all ICMP types"
iifname "eth0" tcp dport 22 counter accept comment "SSH"
iifname { "eth0", "cameras" } ct state { established, related } counter accept
iifname { "eth0", "cameras" } drop
iifname "eth0" ct state { established, related } counter accept
iifname "eth0" drop
}
chain forward {
@ -110,7 +91,6 @@
iifname {
"eth1",
"eth2",
"tailscale0",
} oifname {
"eth0",
} counter accept comment "Allow trusted LAN to WAN"
@ -120,14 +100,19 @@
} oifname {
"eth1",
"eth2",
"tailscale0",
} ct state { established,related } counter accept comment "Allow established back to LANs"
} ct state established,related counter accept comment "Allow established back to LANs"
iifname "tailscale0" oifname { "eth1", "eth2" } counter accept comment "Allow LAN access from Tailscale"
iifname { "eth1", "eth2" } oifname "tailscale0" ct state { established,related } counter accept comment "Allow established back to Tailscale"
ip daddr 10.64.50.20 tcp dport 32400 counter accept comment "Plex"
ip daddr 10.64.50.27 tcp dport 32400 counter accept comment "Plex"
ip daddr 10.64.50.21 tcp dport 7654 counter accept comment "Tang"
ip daddr 10.64.50.20 tcp dport 8444 counter accept comment "Chia"
ip daddr 10.64.50.20 tcp dport 28967 counter accept comment "zfs.tywin.storj"
ip daddr 10.64.50.20 udp dport 28967 counter accept comment "zfs.tywin.storj"
ip daddr 10.64.50.20 tcp dport 28968 counter accept comment "d0.tywin.storj"
ip daddr 10.64.50.20 udp dport 28968 counter accept comment "d0.tywin.storj"
ip daddr 10.64.50.20 tcp dport 28969 counter accept comment "d1.tywin.storj"
ip daddr 10.64.50.20 udp dport 28969 counter accept comment "d1.tywin.storj"
ip daddr 10.64.50.20 tcp dport 28970 counter accept comment "d2.tywin.storj"
ip daddr 10.64.50.20 udp dport 28970 counter accept comment "d2.tywin.storj"
}
}
@ -135,17 +120,22 @@
chain prerouting {
type nat hook prerouting priority filter; policy accept;
iifname eth0 tcp dport 32400 counter dnat to 10.64.50.27
iifname eth0 tcp dport 7654 counter dnat to 10.64.50.21
iifname eth0 tcp dport 32400 counter dnat to 10.64.50.20
iifname eth0 tcp dport 8444 counter dnat to 10.64.50.20
iifname eth0 tcp dport 28967 counter dnat to 10.64.50.20
iifname eth0 udp dport 28967 counter dnat to 10.64.50.20
iifname eth0 tcp dport 28968 counter dnat to 10.64.50.20
iifname eth0 udp dport 28968 counter dnat to 10.64.50.20
iifname eth0 tcp dport 28969 counter dnat to 10.64.50.20
iifname eth0 udp dport 28969 counter dnat to 10.64.50.20
iifname eth0 tcp dport 28970 counter dnat to 10.64.50.20
iifname eth0 udp dport 28970 counter dnat to 10.64.50.20
}
chain postrouting {
type nat hook postrouting priority filter; policy accept;
oifname "eth0" masquerade
iifname tailscale0 oifname eth1 snat to 10.64.50.1
iifname tailscale0 oifname eth2 snat to 10.239.19.1
}
}
'';
@ -159,42 +149,12 @@
settings = {
interfaces-config = {
interfaces = [ "eth1" "eth2" "cameras" ];
interfaces = [ "eth1" "eth2" ];
};
lease-database = {
type = "memfile";
persist = true;
name = "/var/lib/kea/dhcp4.leases";
persist = false;
};
option-def = [
{
name = "cookie";
space = "vendor-encapsulated-options-space";
code = 1;
type = "string";
array = false;
}
];
client-classes = [
{
name = "APC";
test = "option[vendor-class-identifier].text == 'APC'";
option-data = [
{
always-send = true;
name = "vendor-encapsulated-options";
}
{
name = "cookie";
space = "vendor-encapsulated-options-space";
code = 1;
data = "1APC";
}
];
}
];
subnet4 = [
{
subnet = "10.64.50.0/24";
@ -213,25 +173,23 @@
}
{
name = "domain-name-servers";
data = "10.64.50.1, 1.1.1.1, 8.8.8.8";
data = "1.1.1.1, 8.8.8.8";
}
];
reservations = [
{
# tywin.storage.ts.hillion.co.uk
hw-address = "c8:7f:54:6d:e1:03";
ip-address = "10.64.50.20";
hostname = "tywin";
}
{
# syncbox
hw-address = "00:1e:06:49:06:1e";
ip-address = "10.64.50.22";
hostname = "syncbox";
}
];
reservations = lib.lists.remove null (lib.lists.imap0
(i: el: if el == null then null else {
ip-address = "10.64.50.${toString (20 + i)}";
inherit (el) hw-address hostname;
}) [
null
{ hostname = "microserver"; hw-address = "e4:5f:01:b4:58:95"; }
{ hostname = "theon"; hw-address = "00:1e:06:49:06:1e"; }
{ hostname = "server-switch"; hw-address = "84:d8:1b:9d:0d:85"; }
{ hostname = "apc-ap7921"; hw-address = "00:c0:b7:6b:f4:34"; }
{ hostname = "sodium"; hw-address = "d8:3a:dd:c3:d6:2b"; }
{ hostname = "gendry"; hw-address = "18:c0:4d:35:60:1e"; }
{ hostname = "phoenix"; hw-address = "a8:b8:e0:04:17:a5"; }
{ hostname = "merlin"; hw-address = "b0:41:6f:13:20:14"; }
{ hostname = "stinger"; hw-address = "7c:83:34:be:30:dd"; }
]);
}
{
subnet = "10.239.19.0/24";
@ -250,113 +208,37 @@
}
{
name = "domain-name-servers";
data = "10.239.19.1, 1.1.1.1, 8.8.8.8";
data = "1.1.1.1, 8.8.8.8";
}
];
reservations = [
{
# bedroom-everything-presence-one
hw-address = "40:22:d8:e0:1d:50";
ip-address = "10.239.19.2";
hostname = "bedroom-everything-presence-one";
}
{
# living-room-everything-presence-one
hw-address = "40:22:d8:e0:0f:78";
ip-address = "10.239.19.3";
hostname = "living-room-everything-presence-one";
}
{
hw-address = "a0:7d:9c:b0:f0:14";
ip-address = "10.239.19.4";
hostname = "hallway-wall-tablet";
}
{
hw-address = "d8:3a:dd:c3:d6:2b";
ip-address = "10.239.19.5";
hostname = "sodium";
}
{
hw-address = "48:da:35:6f:f2:4b";
ip-address = "10.239.19.6";
hostname = "hammer";
}
{
hw-address = "48:da:35:6f:83:b8";
ip-address = "10.239.19.7";
hostname = "charlie";
}
];
}
{
subnet = "10.133.145.0/24";
interface = "cameras";
pools = [{
pool = "10.133.145.64 - 10.133.145.254";
}];
option-data = [
{
name = "routers";
data = "10.133.145.1";
}
{
name = "broadcast-address";
data = "10.133.145.255";
}
{
name = "domain-name-servers";
data = "1.1.1.1, 8.8.8.8";
}
];
reservations = [
];
}
];
};
};
};
unbound = {
enable = true;
settings = {
server = {
interface = [
"127.0.0.1"
"10.64.50.1"
"10.239.19.1"
];
access-control = [
"10.64.50.0/24 allow"
"10.239.19.0/24 allow"
];
};
forward-zone = [
{
name = ".";
forward-tls-upstream = "yes";
forward-addr = [
"1.1.1.1#cloudflare-dns.com"
"1.0.0.1#cloudflare-dns.com"
"8.8.8.8#dns.google"
"8.8.4.4#dns.google"
];
}
];
};
};
};
## Tailscale
age.secrets."tailscale/router.home.ts.hillion.co.uk".file = ../../secrets/tailscale/router.home.ts.hillion.co.uk.age;
services.tailscale = {
custom.tailscale = {
enable = true;
authKeyFile = config.age.secrets."tailscale/router.home.ts.hillion.co.uk".path;
useRoutingFeatures = "server";
extraSetFlags = [
"--advertise-routes"
"10.64.50.0/24,10.239.19.0/24,10.133.145.0/24"
"--advertise-exit-node"
"--netfilter-mode=off"
];
preAuthKeyFile = config.age.secrets."tailscale/router.home.ts.hillion.co.uk".path;
ipv4Addr = "100.105.71.48";
ipv6Addr = "fd7a:115c:a1e0:ab12:4843:cd96:6269:4730";
};
## Enable btrfs compression
@ -380,34 +262,9 @@
};
services.caddy = {
enable = true;
virtualHosts = {
"graphs.router.home.ts.hillion.co.uk" = {
listenAddresses = [ config.custom.dns.tailscale.ipv4 config.custom.dns.tailscale.ipv6 ];
extraConfig = ''
tls {
ca https://ca.ts.hillion.co.uk:8443/acme/acme/directory
}
reverse_proxy unix///run/netdata/netdata.sock
'';
};
"hammer.kvm.ts.hillion.co.uk" = {
listenAddresses = [ config.custom.dns.tailscale.ipv4 config.custom.dns.tailscale.ipv6 ];
extraConfig = ''
tls {
ca https://ca.ts.hillion.co.uk:8443/acme/acme/directory
}
reverse_proxy http://10.239.19.6
'';
};
"charlie.kvm.ts.hillion.co.uk" = {
listenAddresses = [ config.custom.dns.tailscale.ipv4 config.custom.dns.tailscale.ipv6 ];
extraConfig = ''
tls {
ca https://ca.ts.hillion.co.uk:8443/acme/acme/directory
}
reverse_proxy http://10.239.19.7
'';
};
virtualHosts."http://graphs.router.home.ts.hillion.co.uk" = {
listenAddresses = [ config.custom.tailscale.ipv4Addr config.custom.tailscale.ipv6Addr ];
extraConfig = "reverse_proxy unix///run/netdata/netdata.sock";
};
};
users.users.caddy.extraGroups = [ "netdata" ];

View File

@ -1,103 +0,0 @@
{ config, pkgs, lib, nixos-hardware, ... }:
{
imports = [
"${nixos-hardware}/raspberry-pi/5/default.nix"
./hardware-configuration.nix
];
config = {
system.stateVersion = "24.05";
networking.hostName = "sodium";
networking.domain = "pop.ts.hillion.co.uk";
boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true;
custom.defaults = true;
## Enable btrfs compression
fileSystems."/data".options = [ "compress=zstd" ];
fileSystems."/nix".options = [ "compress=zstd" ];
## Impermanence
custom.impermanence = {
enable = true;
cache.enable = true;
};
boot.initrd.postDeviceCommands = lib.mkAfter ''
btrfs subvolume delete /cache/tmp
btrfs subvolume snapshot /cache/empty_snapshot /cache/tmp
chmod 1777 /cache/tmp
'';
## CA server
custom.ca.service.enable = true;
### nix only supports build-dir from 2.22. bind mount /tmp to something persistent instead.
fileSystems."/tmp" = {
device = "/cache/tmp";
options = [ "bind" ];
};
# nix = {
# settings = {
# build-dir = "/cache/tmp/";
# };
# };
## Custom Services
custom.locations.autoServe = true;
custom.www.home.enable = true;
custom.www.iot.enable = true;
custom.services.isponsorblocktv.enable = true;
# Networking
networking = {
interfaces.end0.name = "eth0";
vlans = {
iot = {
id = 2;
interface = "eth0";
};
};
};
networking.nameservers = lib.mkForce [ ]; # Trust the DHCP nameservers
networking.firewall = {
trustedInterfaces = [ "tailscale0" ];
allowedTCPPorts = lib.mkForce [
22 # SSH
];
allowedUDPPorts = lib.mkForce [ ];
interfaces = {
eth0 = {
allowedTCPPorts = lib.mkForce [
80 # HTTP 1-2
443 # HTTPS 1-2
7654 # Tang
];
allowedUDPPorts = lib.mkForce [
443 # HTTP 3
];
};
iot = {
allowedTCPPorts = lib.mkForce [
80 # HTTP 1-2
443 # HTTPS 1-2
];
allowedUDPPorts = lib.mkForce [
443 # HTTP 3
];
};
};
};
## Tailscale
age.secrets."tailscale/sodium.pop.ts.hillion.co.uk".file = ../../secrets/tailscale/sodium.pop.ts.hillion.co.uk.age;
services.tailscale = {
enable = true;
authKeyFile = config.age.secrets."tailscale/sodium.pop.ts.hillion.co.uk".path;
};
};
}

View File

@ -1,63 +0,0 @@
# Do not modify this file! It was generated by nixos-generate-config
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }:
{
imports =
[
(modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = [ "usbhid" "usb_storage" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ ];
boot.extraModulePackages = [ ];
fileSystems."/" =
{
device = "tmpfs";
fsType = "tmpfs";
options = [ "mode=0755" ];
};
fileSystems."/boot" =
{
device = "/dev/disk/by-uuid/417B-1063";
fsType = "vfat";
options = [ "fmask=0022" "dmask=0022" ];
};
fileSystems."/nix" =
{
device = "/dev/disk/by-uuid/48ae82bd-4d7f-4be6-a9c9-4fcc29d4aac0";
fsType = "btrfs";
options = [ "subvol=nix" ];
};
fileSystems."/data" =
{
device = "/dev/disk/by-uuid/48ae82bd-4d7f-4be6-a9c9-4fcc29d4aac0";
fsType = "btrfs";
options = [ "subvol=data" ];
};
fileSystems."/cache" =
{
device = "/dev/disk/by-uuid/48ae82bd-4d7f-4be6-a9c9-4fcc29d4aac0";
fsType = "btrfs";
options = [ "subvol=cache" ];
};
swapDevices = [ ];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's
# still possible to use this option, but it's recommended to use it in conjunction
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
networking.useDHCP = lib.mkDefault true;
# networking.interfaces.enu1u4.useDHCP = lib.mkDefault true;
# networking.interfaces.wlan0.useDHCP = lib.mkDefault true;
nixpkgs.hostPlatform = lib.mkDefault "aarch64-linux";
}

View File

@ -1 +0,0 @@
aarch64-linux

View File

@ -1,84 +0,0 @@
{ config, pkgs, lib, ... }:
{
imports = [
./disko.nix
./hardware-configuration.nix
];
config = {
system.stateVersion = "24.05";
networking.hostName = "stinger";
networking.domain = "pop.ts.hillion.co.uk";
boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true;
boot.kernelParams = [
"ip=dhcp"
# zswap
"zswap.enabled=1"
"zswap.compressor=zstd"
"zswap.max_pool_percent=20"
];
boot.initrd = {
availableKernelModules = [ "r8169" ];
network.enable = true;
clevis = {
enable = true;
useTang = true;
devices = {
"disk0-crypt".secretFile = "/data/disk_encryption.jwe";
};
};
};
custom.defaults = true;
custom.locations.autoServe = true;
custom.impermanence.enable = true;
hardware = {
bluetooth.enable = true;
};
# Networking
networking = {
interfaces.enp1s0.name = "eth0";
vlans = {
iot = {
id = 2;
interface = "eth0";
};
};
};
networking.nameservers = lib.mkForce [ ]; # Trust the DHCP nameservers
networking.firewall = {
trustedInterfaces = [ "tailscale0" ];
allowedTCPPorts = lib.mkForce [
22 # SSH
];
allowedUDPPorts = lib.mkForce [ ];
interfaces = {
eth0 = {
allowedTCPPorts = lib.mkForce [
1400 # HA Sonos
21063 # HomeKit
];
allowedUDPPorts = lib.mkForce [
5353 # HomeKit
];
};
};
};
## Tailscale
age.secrets."tailscale/stinger.pop.ts.hillion.co.uk".file = ../../secrets/tailscale/stinger.pop.ts.hillion.co.uk.age;
services.tailscale = {
enable = true;
authKeyFile = config.age.secrets."tailscale/stinger.pop.ts.hillion.co.uk".path;
};
};
}

View File

@ -1,70 +0,0 @@
{
disko.devices = {
disk = {
disk0 = {
type = "disk";
device = "/dev/nvme0n1";
content = {
type = "gpt";
partitions = {
ESP = {
size = "1G";
type = "EF00";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
mountOptions = [ "umask=0077" ];
};
};
disk0-crypt = {
size = "100%";
content = {
type = "luks";
name = "disk0-crypt";
settings = {
allowDiscards = true;
};
content = {
type = "btrfs";
subvolumes = {
"/data" = {
mountpoint = "/data";
mountOptions = [ "compress=zstd" "ssd" ];
};
"/nix" = {
mountpoint = "/nix";
mountOptions = [ "compress=zstd" "ssd" ];
};
};
};
};
};
swap = {
size = "64G";
content = {
type = "swap";
randomEncryption = true;
discardPolicy = "both";
};
};
};
};
};
};
nodev = {
"/" = {
fsType = "tmpfs";
mountOptions = [
"mode=755"
"size=100%"
];
};
};
};
}

View File

@ -1,28 +0,0 @@
# Do not modify this file! It was generated by nixos-generate-config
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }:
{
imports =
[
(modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = [ "xhci_pci" "ahci" "nvme" "usbhid" "usb_storage" "sd_mod" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's
# still possible to use this option, but it's recommended to use it in conjunction
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
networking.useDHCP = lib.mkDefault true;
# networking.interfaces.enp0s20f0u2.useDHCP = lib.mkDefault true;
# networking.interfaces.enp1s0.useDHCP = lib.mkDefault true;
# networking.interfaces.wlo1.useDHCP = lib.mkDefault true;
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
hardware.cpu.intel.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
}

View File

@ -1 +0,0 @@
x86_64-linux

View File

@ -2,6 +2,7 @@
{
imports = [
../../modules/common/default.nix
./hardware-configuration.nix
];
@ -14,18 +15,14 @@
boot.loader.grub.enable = false;
boot.loader.generic-extlinux-compatible.enable = true;
custom.defaults = true;
## Custom Services
custom = {
locations.autoServe = true;
};
## Networking
networking.useNetworkd = true;
systemd.network.enable = true;
networking.nameservers = lib.mkForce [ ]; # Trust the DHCP nameservers
networking.firewall = {
trustedInterfaces = [ "tailscale0" ];
allowedTCPPorts = lib.mkForce [
@ -42,9 +39,11 @@
## Tailscale
age.secrets."tailscale/theon.storage.ts.hillion.co.uk".file = ../../secrets/tailscale/theon.storage.ts.hillion.co.uk.age;
services.tailscale = {
custom.tailscale = {
enable = true;
authKeyFile = config.age.secrets."tailscale/theon.storage.ts.hillion.co.uk".path;
preAuthKeyFile = config.age.secrets."tailscale/theon.storage.ts.hillion.co.uk".path;
ipv4Addr = "100.104.142.22";
ipv6Addr = "fd7a:115c:a1e0::4aa8:8e16";
};
## Packages

View File

@ -0,0 +1,223 @@
{ config, pkgs, lib, ... }:
{
imports = [
../../modules/common/default.nix
./hardware-configuration.nix
];
config = {
system.stateVersion = "22.11";
networking.hostName = "tywin";
networking.domain = "storage.ts.hillion.co.uk";
networking.hostId = "2a9b6df5";
boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true;
custom.locations.autoServe = true;
## Tailscale
age.secrets."tailscale/tywin.storage.ts.hillion.co.uk".file = ../../secrets/tailscale/tywin.storage.ts.hillion.co.uk.age;
custom.tailscale = {
enable = true;
preAuthKeyFile = config.age.secrets."tailscale/tywin.storage.ts.hillion.co.uk".path;
ipv4Addr = "100.115.31.91";
ipv6Addr = "fd7a:115c:a1e0:ab12:4843:cd96:6273:1f5b";
};
## Filesystems
fileSystems."/".options = [ "compress=zstd" ];
boot.supportedFilesystems = [ "zfs" ];
boot.zfs = {
forceImportRoot = false;
extraPools = [ "data" ];
};
boot.kernelParams = [ "zfs.zfs_arc_max=25769803776" ];
services.zfs.autoScrub = {
enable = true;
interval = "Tue, 02:00";
};
## Backups
### Git
age.secrets."git/git_backups_ecdsa".file = ../../secrets/git/git_backups_ecdsa.age;
age.secrets."git/git_backups_remotes".file = ../../secrets/git/git_backups_remotes.age;
custom.backups.git = {
enable = true;
sshKey = config.age.secrets."git/git_backups_ecdsa".path;
reposFile = config.age.secrets."git/git_backups_remotes".path;
repos = [ "https://gitea.hillion.co.uk/JakeHillion/nixos.git" ];
};
## Resilio
custom.resilio.enable = true;
services.resilio.deviceName = "tywin.storage";
services.resilio.directoryRoot = "/data/users/jake/sync";
services.resilio.storagePath = "/data/users/jake/sync/.sync";
custom.resilio.folders =
let
folderNames = [
"dad"
"joseph"
"projects"
"resources"
"sync"
];
mkFolder = name: {
name = name;
secret = {
name = "resilio/plain/${name}";
file = ../../secrets/resilio/plain/${name}.age;
};
};
in
builtins.map (mkFolder) folderNames;
age.secrets."resilio/restic/128G.key" = {
file = ../../secrets/restic/128G.age;
owner = "rslsync";
group = "rslsync";
};
services.restic.backups."sync" = {
repository = "rest:http://restic.tywin.storage.ts.hillion.co.uk/128G";
user = "rslsync";
passwordFile = config.age.secrets."resilio/restic/128G.key".path;
timerConfig = {
Persistent = true;
OnUnitInactiveSec = "15m";
RandomizedDelaySec = "5m";
};
paths = [ "/data/users/jake/sync" ];
exclude = [
"/data/users/jake/sync/.sync"
"/data/users/jake/sync/*/.sync"
"/data/users/jake/sync/resources/media/films"
"/data/users/jake/sync/resources/media/iso"
"/data/users/jake/sync/resources/media/tv"
"/data/users/jake/sync/dad/media"
];
};
## Restic
age.secrets."restic/128G.key" = {
file = ../../secrets/restic/128G.age;
owner = "restic";
group = "restic";
};
age.secrets."restic/1.6T.key" = {
file = ../../secrets/restic/1.6T.age;
owner = "restic";
group = "restic";
};
services.restic.server = {
enable = true;
appendOnly = true;
extraFlags = [ "--no-auth" ];
dataDir = "/data/backups/restic";
listenAddress = "127.0.0.1:8000"; # TODO: can this be a Unix socket?
};
services.caddy = {
enable = true;
virtualHosts."http://restic.tywin.storage.ts.hillion.co.uk".extraConfig = ''
bind ${config.custom.tailscale.ipv4Addr} ${config.custom.tailscale.ipv6Addr}
reverse_proxy http://localhost:8000
'';
};
### HACK: Allow Caddy to restart if it fails. This happens because Tailscale
### is too late at starting. Upstream nixos caddy does restart on failure
### but it's prevented on exit code 1. Set the exit code to 0 (non-failure)
### to override this.
systemd.services.caddy = {
requires = [ "tailscaled.service" ];
after = [ "tailscaled.service" ];
serviceConfig = {
RestartPreventExitStatus = lib.mkForce 0;
};
};
services.restic.backups."prune-128G" = {
repository = "/data/backups/restic/128G";
user = "restic";
passwordFile = config.age.secrets."restic/128G.key".path;
timerConfig = {
Persistent = true;
OnCalendar = "02:30";
RandomizedDelaySec = "1h";
};
pruneOpts = [
"--keep-last 48"
"--keep-within-hourly 7d"
"--keep-within-daily 1m"
"--keep-within-weekly 6m"
"--keep-within-monthly 24m"
];
};
services.restic.backups."prune-1.6T" = {
repository = "/data/backups/restic/1.6T";
user = "restic";
passwordFile = config.age.secrets."restic/1.6T.key".path;
timerConfig = {
Persistent = true;
OnCalendar = "Wed, 02:30";
RandomizedDelaySec = "4h";
};
pruneOpts = [
"--keep-within-daily 14d"
"--keep-within-weekly 2m"
"--keep-within-monthly 18m"
];
};
## Chia
age.secrets."chia/farmer.key" = {
file = ../../secrets/chia/farmer.key.age;
owner = "chia";
group = "chia";
};
custom.chia = {
enable = true;
openFirewall = true;
keyFile = config.age.secrets."chia/farmer.key".path;
plotDirectories = builtins.genList (i: "/mnt/d${toString i}/plots/contract-k32") 7;
};
## Downloads
custom.services.downloads = {
metadataPath = "/data/downloads/metadata";
downloadCachePath = "/data/downloads/torrents";
filmsPath = "/data/media/films";
tvPath = "/data/media/tv";
};
## Plex
users.users.plex.extraGroups = [ "mediaaccess" ];
services.plex = {
enable = true;
openFirewall = true;
};
## Firewall
networking.firewall.interfaces."tailscale0".allowedTCPPorts = [
80 # Caddy (restic.tywin.storage.ts.)
14002 # Storj Dashboard (d0.)
14003 # Storj Dashboard (d1.)
14004 # Storj Dashboard (d2.)
14005 # Storj Dashboard (d3.)
];
};
}

View File

@ -9,11 +9,23 @@
(modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = [ "nvme" "ahci" "xhci_pci" "thunderbolt" "usbhid" "usb_storage" "sd_mod" ];
boot.initrd.availableKernelModules = [ "xhci_pci" "ahci" "usbhid" "usb_storage" "sd_mod" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-amd" ];
boot.extraModulePackages = [ ];
fileSystems."/" =
{
device = "/dev/disk/by-uuid/cb48d4ed-d268-490c-9977-2b5d31ce2c1b";
fsType = "btrfs";
};
fileSystems."/boot" =
{
device = "/dev/disk/by-uuid/BC57-0AF6";
fsType = "vfat";
};
fileSystems."/mnt/d0" =
{
device = "/dev/disk/by-uuid/9136434d-d883-4118-bd01-903f720e5ce1";
@ -50,28 +62,14 @@
fsType = "btrfs";
};
fileSystems."/mnt/d6" =
{
device = "/dev/disk/by-uuid/b461e07d-39ab-46b4-b1d1-14c2e0791915";
fsType = "btrfs";
};
fileSystems."/mnt/d7" =
{
device = "/dev/disk/by-uuid/eb8d32d0-e506-449b-8dbc-585ba05c4252";
fsType = "btrfs";
};
swapDevices = [ ];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's
# still possible to use this option, but it's recommended to use it in conjunction
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
networking.useDHCP = lib.mkDefault true;
# networking.interfaces.enp4s0.useDHCP = lib.mkDefault true;
# networking.interfaces.enp5s0.useDHCP = lib.mkDefault true;
# networking.interfaces.enp6s0.useDHCP = lib.mkDefault true;
# networking.interfaces.enp8s0.useDHCP = lib.mkDefault true;
# networking.interfaces.enp7s0.useDHCP = lib.mkDefault true;
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
hardware.cpu.amd.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
}

View File

@ -2,7 +2,7 @@
{
imports = [
./git/default.nix
./git.nix
./homeassistant.nix
./matrix.nix
];

View File

@ -7,17 +7,25 @@ in
options.custom.backups.git = {
enable = lib.mkEnableOption "git";
extraRepos = lib.mkOption {
repos = lib.mkOption {
description = "A list of remotes to clone.";
type = with lib.types; listOf str;
default = [ ];
};
reposFile = lib.mkOption {
description = "A file containing the remotes to clone, one per line.";
type = with lib.types; nullOr str;
default = null;
};
sshKey = lib.mkOption {
description = "SSH private key to use when cloning repositories over SSH.";
type = with lib.types; nullOr str;
default = null;
};
};
config = lib.mkIf cfg.enable {
age.secrets."git/git_backups_ecdsa".file = ../../../secrets/git/git_backups_ecdsa.age;
age.secrets."git/git_backups_remotes".file = ../../../secrets/git/git_backups_remotes.age;
age.secrets."git-backups/restic/128G".file = ../../../secrets/restic/128G.age;
age.secrets."git-backups/restic/128G".file = ../../secrets/restic/128G.age;
systemd.services.backup-git = {
description = "Git repo backup service.";
@ -29,10 +37,9 @@ in
WorkingDirectory = "%C/backup-git";
LoadCredential = [
"id_ecdsa:${config.age.secrets."git/git_backups_ecdsa".path}"
"repos_file:${config.age.secrets."git/git_backups_remotes".path}"
"restic_password:${config.age.secrets."git-backups/restic/128G".path}"
];
] ++ (if cfg.sshKey == null then [ ] else [ "id_ecdsa:${cfg.sshKey}" ])
++ (if cfg.reposFile == null then [ ] else [ "repos_file:${cfg.reposFile}" ]);
};
environment = {
@ -41,12 +48,11 @@ in
};
script = ''
set -x
shopt -s nullglob
# Read and deduplicate repos
readarray -t raw_repos < $CREDENTIALS_DIRECTORY/repos_file
declare -A repos=(${builtins.concatStringsSep " " (builtins.map (x : "[${x}]=1") cfg.extraRepos)})
${if cfg.reposFile == null then "" else "readarray -t raw_repos < $CREDENTIALS_DIRECTORY/repos_file"}
declare -A repos=(${builtins.concatStringsSep " " (builtins.map (x : "[${x}]=1") cfg.repos)})
for repo in ''${raw_repos[@]}; do repos[$repo]=1; done
# Clean up existing repos
@ -73,7 +79,7 @@ in
# Backup to Restic
${pkgs.restic}/bin/restic \
-r rest:https://restic.ts.hillion.co.uk/128G \
-r rest:http://restic.tywin.storage.ts.hillion.co.uk/128G \
--cache-dir .restic --exclude .restic \
backup .
@ -87,9 +93,9 @@ in
wantedBy = [ "timers.target" ];
timerConfig = {
Persistent = true;
OnBootSec = "10m";
OnUnitInactiveSec = "15m";
RandomizedDelaySec = "5m";
Unit = "backup-git.service";
};
};
};

View File

@ -1 +0,0 @@
ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBIc3WVROMCifYtqHRWf5gZAOQFdpbcSYOC0JckKzUVM5sGdXtw3VXNiVqY3npdMizS4e1V8Hh77UecD3q9CLkMA= backups-git@nixos

View File

@ -14,45 +14,20 @@ in
owner = "hass";
group = "hass";
};
age.secrets."backups/homeassistant/restic/1.6T" = {
file = ../../secrets/restic/1.6T.age;
owner = "postgres";
group = "postgres";
};
services = {
postgresqlBackup = {
enable = true;
compression = "none"; # for better diffing
databases = [ "homeassistant" ];
};
restic.backups = {
"homeassistant-config" = {
restic.backups."homeassistant" = {
user = "hass";
timerConfig = {
OnCalendar = "03:00";
RandomizedDelaySec = "60m";
};
repository = "rest:https://restic.ts.hillion.co.uk/128G";
repository = "rest:http://restic.tywin.storage.ts.hillion.co.uk/128G";
passwordFile = config.age.secrets."backups/homeassistant/restic/128G".path;
paths = [
config.services.home-assistant.configDir
];
};
"homeassistant-database" = {
user = "postgres";
timerConfig = {
OnCalendar = "03:00";
RandomizedDelaySec = "60m";
};
repository = "rest:https://restic.ts.hillion.co.uk/1.6T";
passwordFile = config.age.secrets."backups/homeassistant/restic/1.6T".path;
paths = [
"${config.services.postgresqlBackup.location}/homeassistant.sql"
];
};
};
};
};
}

View File

@ -24,7 +24,7 @@ in
OnCalendar = "03:00";
RandomizedDelaySec = "60m";
};
repository = "rest:https://restic.ts.hillion.co.uk/128G";
repository = "rest:http://restic.tywin.storage.ts.hillion.co.uk/128G";
passwordFile = config.age.secrets."backups/matrix/restic/128G".path;
paths = [
"${config.services.postgresqlBackup.location}/matrix-synapse.sql"

View File

@ -1,11 +0,0 @@
# ca
Getting the certificates in the right place is a manual process (for now, at least). This is to keep the most control over the root certificate's key and allow manual cycling. The manual commands should be run on a trusted machine.
Creating a 10 year root certificate:
nix run nixpkgs#step-cli -- certificate create 'Hillion ACME' cert.pem key.pem --kty=EC --curve=P-521 --profile=root-ca --not-after=87600h
Creating the intermediate key:
nix run nixpkgs#step-cli -- certificate create 'Hillion ACME (sodium.pop.ts.hillion.co.uk)' intermediate_cert.pem intermediate_key.pem --kty=EC --curve=P-521 --profile=intermediate-ca --not-after=8760h --ca=$NIXOS_ROOT/modules/ca/cert.pem --ca-key=DOWNLOADED_KEY.pem

View File

@ -1,13 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIB+TCCAVqgAwIBAgIQIZdaIUsuJdjnu7DQP1N8oTAKBggqhkjOPQQDBDAXMRUw
EwYDVQQDEwxIaWxsaW9uIEFDTUUwHhcNMjQwODAxMjIyMjEwWhcNMzQwNzMwMjIy
MjEwWjAXMRUwEwYDVQQDEwxIaWxsaW9uIEFDTUUwgZswEAYHKoZIzj0CAQYFK4EE
ACMDgYYABAAJI3z1PrV97EFc1xaENcr6ML1z6xdXTy+ReHtf42nWsw+c3WDKzJ45
+xHJ/p2BTOR5+NQ7RGQQ68zmFJnEYTYDogAw6U9YzxxDGlG1HlgnZ9PPmXoF+PFl
Zy2WZCiDPx5KDJcjTPzLV3ITt4fl3PMA12BREVeonvrvRLcpVrMfS2b7wKNFMEMw
DgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQEwHQYDVR0OBBYEFFBT
fMT0uUbS+lVUbGKK8/SZHPISMAoGCCqGSM49BAMEA4GMADCBiAJCAPNIwrQztPrN
MaHB3J0lNVODIGwQWblt99vnjqIWOKJhgckBxaElyInsyt8dlnmTCpOCJdY4BA+K
Nr87AfwIWdAaAkIBV5i4zXPXVKblGKnmM0FomFSbq2cYE3pmi5BO1StakH1kEHlf
vbkdwFgkw2MlARp0Ka3zbWivBG9zjPoZtsL/8tk=
-----END CERTIFICATE-----

View File

@ -1,14 +0,0 @@
{ config, pkgs, lib, ... }:
let
cfg = config.custom.ca.consumer;
in
{
options.custom.ca.consumer = {
enable = lib.mkEnableOption "ca.service";
};
config = lib.mkIf cfg.enable {
security.pki.certificates = [ (builtins.readFile ./cert.pem) ];
};
}

View File

@ -1,8 +0,0 @@
{ ... }:
{
imports = [
./consumer.nix
./service.nix
];
}

View File

@ -1,48 +0,0 @@
{ config, pkgs, lib, ... }:
let
cfg = config.custom.ca.service;
in
{
options.custom.ca.service = {
enable = lib.mkEnableOption "ca.service";
};
config = lib.mkIf cfg.enable {
users.users.step-ca.uid = config.ids.uids.step-ca;
users.groups.step-ca.gid = config.ids.gids.step-ca;
services.step-ca = {
enable = true;
address = config.custom.dns.tailscale.ipv4;
port = 8443;
intermediatePasswordFile = "/data/system/ca/intermediate.psk";
settings = {
root = ./cert.pem;
crt = "/data/system/ca/intermediate.crt";
key = "/data/system/ca/intermediate.pem";
dnsNames = [ "ca.ts.hillion.co.uk" ];
logger = { format = "text"; };
db = {
type = "badgerv2";
dataSource = "/var/lib/step-ca/db";
};
authority = {
provisioners = [
{
type = "ACME";
name = "acme";
}
];
};
};
};
};
}

View File

@ -22,8 +22,8 @@ in
default = null;
};
plotDirectories = lib.mkOption {
type = with lib.types; listOf str;
default = [ ];
type = with lib.types; nullOr (listOf str);
default = null;
};
openFirewall = lib.mkOption {
type = lib.types.bool;
@ -46,7 +46,7 @@ in
};
virtualisation.oci-containers.containers.chia = {
image = "ghcr.io/chia-network/chia:2.4.3";
image = "ghcr.io/chia-network/chia:2.1.4";
ports = [ "8444" ];
extraOptions = [
"--uidmap=0:${toString config.users.users.chia.uid}:1"
@ -62,11 +62,6 @@ in
};
};
systemd.tmpfiles.rules = [
"d ${cfg.path} 0700 chia chia - -"
"d ${cfg.path}/.chia 0700 chia chia - -"
];
networking.firewall = lib.mkIf cfg.openFirewall {
allowedTCPPorts = [ 8444 ];
};

View File

@ -0,0 +1,60 @@
{ pkgs, lib, config, agenix, ... }:
{
imports = [
../home/default.nix
./shell.nix
./ssh.nix
./update_scripts.nix
];
nix = {
settings.experimental-features = [ "nix-command" "flakes" ];
settings = {
auto-optimise-store = true;
};
gc = {
automatic = true;
dates = "weekly";
options = "--delete-older-than 90d";
};
};
nixpkgs.config.allowUnfree = true;
time.timeZone = "Europe/London";
i18n.defaultLocale = "en_GB.UTF-8";
users = {
mutableUsers = false;
users."jake" = {
isNormalUser = true;
extraGroups = [ "wheel" ]; # enable sudo
};
};
security.sudo.wheelNeedsPassword = false;
environment = {
systemPackages = with pkgs; [
agenix.packages."${system}".default
gh
git
htop
nix
sapling
vim
];
variables.EDITOR = "vim";
shellAliases = {
ls = "ls -p --color=auto";
};
};
networking = rec {
nameservers = [ "1.1.1.1" "8.8.8.8" ];
networkmanager.dns = "none";
};
networking.firewall.enable = true;
custom.hostinfo.enable = true;
}

View File

@ -1,20 +1,7 @@
{ pkgs, lib, config, ... }:
let
cfg = config.custom.shell;
in
{
imports = [
./update_scripts.nix
];
options.custom.shell = {
enable = lib.mkEnableOption "shell";
};
config = lib.mkIf cfg.enable {
custom.shell.update_scripts.enable = true;
config = {
users.defaultUserShell = pkgs.zsh;
environment.systemPackages = with pkgs; [ direnv ];

40
modules/common/ssh.nix Normal file
View File

@ -0,0 +1,40 @@
{ pkgs, lib, config, ... }:
{
users.users."jake".openssh.authorizedKeys.keys = [
"ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBOt74U+rL+BMtAEjfu/Optg1D7Ly7U+TupRxd5u9kfN7oJnW4dJA25WRSr4dgQNq7MiMveoduBY/ky2s0c9gvIA= jake@jake-gentoo"
"ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBC0uKIvvvkzrOcS7AcamsQRFId+bqPwUC9IiUIsiH5oWX1ReiITOuEo+TL9YMII5RyyfJFeu2ZP9moNuZYlE7Bs= jake@jake-mbp"
"ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAyFsYYjLZ/wyw8XUbcmkk6OKt2IqLOnWpRE5gEvm3X0V4IeTOL9F4IL79h7FTsPvi2t9zGBL1hxeTMZHSGfrdWaMJkQp94gA1W30MKXvJ47nEVt0HUIOufGqgTTaAn4BHxlFUBUuS7UxaA4igFpFVoPJed7ZMhMqxg+RWUmBAkcgTWDMgzUx44TiNpzkYlG8cYuqcIzpV2dhGn79qsfUzBMpGJgkxjkGdDEHRk66JXgD/EtVasZvqp5/KLNnOpisKjR88UJKJ6/buV7FLVra4/0hA9JtH9e1ecCfxMPbOeluaxlieEuSXV2oJMbQoPP87+/QriNdi/6QuCHkMDEhyGw== jake@jake-mbp"
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCw4lgH20nfuchDqvVf0YciqN0GnBw5hfh8KIun5z0P7wlNgVYnCyvPvdIlGf2Nt1z5EGfsMzMLhKDOZkcTMlhupd+j2Er/ZB764uVBGe1n3CoPeasmbIlnamZ12EusYDvQGm2hVJTGQPPp9nKaRxr6ljvTMTNl0KWlWvKP4kec74d28MGgULOPLT3HlAyvUymSULK4lSxFK0l97IVXLa8YwuL5TNFGHUmjoSsi/Q7/CKaqvNh+ib1BYHzHYsuEzaaApnCnfjDBNexHm/AfbI7s+g3XZDcZOORZn6r44dOBNFfwvppsWj3CszwJQYIFeJFuMRtzlC8+kyYxci0+FXHn jake@jake-gentoo"
];
programs.mosh.enable = true;
services.openssh = {
enable = true;
openFirewall = true;
settings = {
PermitRootLogin = "no";
PasswordAuthentication = false;
};
};
programs.ssh.knownHosts = {
# Global Internet hosts
"ssh.gitea.hillion.co.uk".publicKey = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCxQpywsy+WGeaEkEL67xOBL1NIE++pcojxro5xAPO6VQe2N79388NRFMLlX6HtnebkIpVrvnqdLOs0BPMAokjaWCC4Ay7T/3ko1kXSOlqHY5Ye9jtjRK+wPHMZgzf74a3jlvxjrXJMA70rPQ3X+8UGpA04eB3JyyLTLuVvc6znMe53QiZ0x+hSz+4pYshnCO2UazJ148vV3htN6wRK+uqjNdjjQXkNJ7llNBSrvmfrLidlf0LRphEk43maSQCBcLEZgf4pxXBA7rFuZABZTz1twbnxP2ziyBaSOs7rcII+jVhF2cqJlElutBfIgRNJ3DjNiTcdhNaZzkwJ59huR0LUFQlHI+SALvPzE9ZXWVOX/SqQG+oIB8VebR52icii0aJH7jatkogwNk0121xmhpvvR7gwbJ9YjYRTpKs4lew3bq/W/OM8GF/FEuCsCuNIXRXKqIjJVAtIpuuhxPymFHeqJH3wK3f6jTJfcAz/z33Rwpow2VOdDyqrRfAW8ti73CCnRlN+VJi0V/zvYGs9CHldY3YvMr7rSd0+fdGyJHSTSRBF0vcyRVA/SqSfcIo/5o0ssYoBnQCg6gOkc3nNQ0C0/qh1ww17rw4hqBRxFJ2t3aBUMK+UHPxrELLVmG6ZUmfg9uVkOoafjRsoML6DVDB4JAk5JsmcZhybOarI9PJfEQ==";
# Tailscale hosts
"dancefloor.dancefloor.ts.hillion.co.uk".publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEXkGueVYKr2wp/VHo2QLis0kmKtc/Upg3pGoHr6RkzY";
"gendry.jakehillion.terminals.ts.hillion.co.uk".publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPXM5aDvNv4MTITXAvJWSS2yvr/mbxJE31tgwJtcl38c";
"homeassistant.homeassistant.ts.hillion.co.uk".publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPM2ytacl/zYXhgvosvhudsl0zW5eQRHXm9aMqG9adux";
"jorah.cx.ts.hillion.co.uk".publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILA9Hp37ljgVRZwjXnTh+XqRuQWk23alOqe7ptwSr2A5";
"li.pop.ts.hillion.co.uk".publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHQWgcDFL9UZBDKHPiEGepT1Qsc4gz3Pee0/XVHJ6V6u";
"microserver.home.ts.hillion.co.uk".publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPPOCPqXm5a+vGB6PsJFvjKNgjLhM5MxrwCy6iHGRjXw";
"router.home.ts.hillion.co.uk".publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAlCj/i2xprN6h0Ik2tthOJQy6Qwq3Ony73+yfbHYTFu";
"theon.storage.ts.hillion.co.uk".publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIN59psLVu3/sQORA4x3p8H3ei8MCQlcwX5T+k3kBeBMf";
"tywin.storage.ts.hillion.co.uk".publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGATsjWO0qZNFp2BhfgDuWi+e/ScMkFxp79N2OZoed1k";
};
programs.ssh.knownHostsFiles = [ ./github_known_hosts ];
}

View File

@ -1,8 +1,6 @@
{ config, pkgs, lib, ... }:
let
cfg = config.custom.shell.update_scripts;
update = pkgs.writeScriptBin "update" ''
#! ${pkgs.runtimeShell}
set -e
@ -52,11 +50,7 @@ let
'';
in
{
options.custom.shell.update_scripts = {
enable = lib.mkEnableOption "update_scripts";
};
config = lib.mkIf cfg.enable {
config = {
environment.systemPackages = [
update
];

View File

@ -3,25 +3,19 @@
{
imports = [
./backups/default.nix
./ca/default.nix
./chia.nix
./defaults.nix
./common/hostinfo.nix
./desktop/awesome/default.nix
./dns.nix
./home/default.nix
./hostinfo.nix
./ids.nix
./impermanence.nix
./locations.nix
./prometheus/default.nix
./resilio.nix
./sched_ext.nix
./services/default.nix
./shell/default.nix
./ssh/default.nix
./storj.nix
./tailscale.nix
./users.nix
./www/default.nix
./www/global.nix
./www/www-repo.nix
];
options.custom = {

View File

@ -1,70 +0,0 @@
{ pkgs, nixpkgs-unstable, lib, config, agenix, ... }:
{
options.custom.defaults = lib.mkEnableOption "defaults";
config = lib.mkIf config.custom.defaults {
hardware.enableAllFirmware = true;
nix = {
settings.experimental-features = [ "nix-command" "flakes" ];
settings = {
auto-optimise-store = true;
};
gc = {
automatic = true;
dates = "weekly";
options = "--delete-older-than 90d";
};
};
nixpkgs.config.allowUnfree = true;
time.timeZone = "Europe/London";
i18n.defaultLocale = "en_GB.UTF-8";
users = {
mutableUsers = false;
users.${config.custom.user} = {
isNormalUser = true;
extraGroups = [ "wheel" ]; # enable sudo
uid = config.ids.uids.${config.custom.user};
};
};
security.sudo.wheelNeedsPassword = false;
environment = {
systemPackages = with pkgs; [
agenix.packages."${system}".default
gh
git
htop
nix
vim
];
variables.EDITOR = "vim";
shellAliases = {
ls = "ls -p --color=auto";
};
};
networking = rec {
nameservers = [ "1.1.1.1" "8.8.8.8" ];
networkmanager.dns = "none";
};
networking.firewall.enable = true;
nix.registry.nixpkgs-unstable.to = {
type = "path";
path = nixpkgs-unstable;
};
# Delegation
custom.ca.consumer.enable = true;
custom.dns.enable = true;
custom.home.defaults = true;
custom.hostinfo.enable = true;
custom.prometheus.client.enable = true;
custom.shell.enable = true;
custom.ssh.enable = true;
};
}

View File

@ -1,124 +0,0 @@
{ pkgs, lib, config, ... }:
let
cfg = config.custom.dns;
in
{
options.custom.dns = {
enable = lib.mkEnableOption "dns";
authoritative = {
ipv4 = lib.mkOption {
description = "authoritative ipv4 mappings";
readOnly = true;
};
ipv6 = lib.mkOption {
description = "authoritative ipv6 mappings";
readOnly = true;
};
};
tailscale =
{
ipv4 = lib.mkOption {
description = "tailscale ipv4 address";
readOnly = true;
};
ipv6 = lib.mkOption {
description = "tailscale ipv6 address";
readOnly = true;
};
};
};
config = lib.mkIf cfg.enable {
custom.dns.authoritative = {
ipv4 = {
uk = {
co = {
hillion = {
ts = {
cx = {
boron = "100.113.188.46";
};
home = {
microserver = "100.105.131.47";
router = "100.105.71.48";
};
jakehillion-terminals = { gendry = "100.70.100.77"; };
lt = { be = "100.105.166.79"; };
pop = {
li = "100.106.87.35";
sodium = "100.87.188.4";
stinger = "100.117.89.126";
};
rig = {
merlin = "100.69.181.56";
};
st = {
phoenix = "100.92.37.106";
};
storage = {
theon = "100.104.142.22";
};
};
};
};
};
};
ipv6 = {
uk = {
co = {
hillion = {
ts = {
cx = {
boron = "fd7a:115c:a1e0::2a01:bc2f";
};
home = {
microserver = "fd7a:115c:a1e0:ab12:4843:cd96:6269:832f";
router = "fd7a:115c:a1e0:ab12:4843:cd96:6269:4730";
};
jakehillion-terminals = { gendry = "fd7a:115c:a1e0:ab12:4843:cd96:6246:644d"; };
lt = { be = "fd7a:115c:a1e0::9001:a64f"; };
pop = {
li = "fd7a:115c:a1e0::e701:5723";
sodium = "fd7a:115c:a1e0::3701:bc04";
stinger = "fd7a:115c:a1e0::8401:597e";
};
rig = {
merlin = "fd7a:115c:a1e0::8d01:b538";
};
st = {
phoenix = "fd7a:115c:a1e0::6901:256a";
};
storage = {
theon = "fd7a:115c:a1e0::4aa8:8e16";
};
};
};
};
};
};
};
custom.dns.tailscale =
let
lookupFqdn = lib.attrsets.attrByPath (lib.reverseList (lib.splitString "." config.networking.fqdn)) null;
in
{
ipv4 = lookupFqdn cfg.authoritative.ipv4;
ipv6 = lookupFqdn cfg.authoritative.ipv6;
};
networking.hosts =
let
mkHosts = hosts:
(lib.collect (x: (builtins.hasAttr "name" x && builtins.hasAttr "value" x))
(lib.mapAttrsRecursive
(path: value:
lib.nameValuePair value [ (lib.concatStringsSep "." (lib.reverseList path)) ])
hosts));
in
builtins.listToAttrs (mkHosts cfg.authoritative.ipv4 ++ mkHosts cfg.authoritative.ipv6);
};
}

View File

@ -3,47 +3,24 @@
{
imports = [
./git.nix
./neovim.nix
./tmux/default.nix
];
options.custom.home.defaults = lib.mkEnableOption "home";
config = lib.mkIf config.custom.home.defaults {
home-manager =
let
stateVersion = if (builtins.compareVersions config.system.stateVersion "24.05") > 0 then config.system.stateVersion else "22.11";
in
{
config = {
home-manager = {
users.root.home = {
inherit stateVersion;
stateVersion = "22.11";
## Set an empty ZSH config and defer to the global one
file.".zshrc".text = "";
};
users."${config.custom.user}" = {
home = {
inherit stateVersion;
};
users."${config.custom.user}".home = {
stateVersion = "22.11";
services = {
ssh-agent.enable = true;
};
programs = {
zoxide = {
enable = true;
options = [ "--cmd cd" ];
};
zsh.enable = true;
## Set an empty ZSH config and defer to the global one
file.".zshrc".text = "";
};
};
};
# Delegation
custom.home.git.enable = true;
custom.home.neovim.enable = true;
custom.home.tmux.enable = true;
};
}

View File

@ -1,33 +1,13 @@
{ pkgs, lib, config, ... }:
let
cfg = config.custom.home.git;
in
{
options.custom.home.git = {
enable = lib.mkEnableOption "git";
};
config = lib.mkIf cfg.enable {
home-manager.users.jake.programs = {
sapling = lib.mkIf (config.custom.user == "jake") {
home-manager.users.jake.programs.git = {
enable = true;
userName = "Jake Hillion";
userEmail = "jake@hillion.co.uk";
extraConfig = {
ui = {
"merge:interactive" = ":merge3";
user = {
email = "jake@hillion.co.uk";
name = "Jake Hillion";
};
};
};
git = lib.mkIf (config.custom.user == "jake") {
enable = true;
userName = "Jake Hillion";
userEmail = "jake@hillion.co.uk";
extraConfig = {
pull = {
rebase = true;
};
@ -39,6 +19,4 @@ in
};
};
};
};
};
}

View File

@ -1,82 +0,0 @@
{ pkgs, lib, config, ... }:
let
cfg = config.custom.home.neovim;
in
{
options.custom.home.neovim = {
enable = lib.mkEnableOption "neovim";
};
config = lib.mkIf config.custom.home.neovim.enable {
home-manager.users."${config.custom.user}".programs.neovim = {
enable = true;
viAlias = true;
vimAlias = true;
plugins = with pkgs.vimPlugins; [
a-vim
dracula-nvim
telescope-nvim
];
extraLuaConfig = ''
-- Logical options
vim.opt.splitright = true
vim.opt.splitbelow = true
vim.opt.ignorecase = true
vim.opt.smartcase = true
vim.opt.expandtab = true
vim.opt.tabstop = 2
vim.opt.shiftwidth = 2
-- Appearance
vim.cmd[[colorscheme dracula-soft]]
vim.opt.number = true
vim.opt.relativenumber = true
-- Telescope
require('telescope').setup({
pickers = {
find_files = {
find_command = {
"${pkgs.fd}/bin/fd",
"--type=f",
"--strip-cwd-prefix",
"--no-require-git",
"--hidden",
"--exclude=.sl",
},
},
},
defaults = {
vimgrep_arguments = {
"${pkgs.ripgrep}/bin/rg",
"--color=never",
"--no-heading",
"--with-filename",
"--line-number",
"--column",
"--smart-case",
"--no-require-git",
"--hidden",
"--glob=!.sl",
},
},
})
-- Key bindings
vim.g.mapleader = ","
--- Key bindings: Telescope
local telescope_builtin = require('telescope.builtin')
vim.keymap.set('n', '<leader>ff', telescope_builtin.find_files, {})
vim.keymap.set('n', '<leader>fg', telescope_builtin.live_grep, {})
vim.keymap.set('n', '<leader>fb', telescope_builtin.buffers, {})
vim.keymap.set('n', '<leader>fh', telescope_builtin.help_tags, {})
'';
};
};
}

View File

@ -1,25 +1,10 @@
setw -g mouse on
# Large history
set -g history-limit 500000
# Bindings
bind C-Y set-window-option synchronize-panes
bind -n C-k clear-history
# Status pane
set -g status-right-length 100
set -g status-right "#(uname -r) • #(hostname -f | sed 's/\.ts\.hillion\.co\.uk//g') • %d-%b-%y %H:%M"
# New panes in the same directory
bind '"' split-window -c "#{pane_current_path}"
bind % split-window -h -c "#{pane_current_path}"
bind c new-window -c "#{pane_current_path}"
# Start indices at 1 to match keyboard
set -g base-index 1
setw -g pane-base-index 1
# Open a new session when attached to and one isn't open
# Must come after base-index settings
new-session

View File

@ -1,17 +1,8 @@
{ pkgs, lib, config, ... }:
let
cfg = config.custom.home.tmux;
in
{
options.custom.home.tmux = {
enable = lib.mkEnableOption "tmux";
};
config = lib.mkIf cfg.enable {
home-manager.users.jake.programs.tmux = {
enable = true;
extraConfig = lib.readFile ./.tmux.conf;
};
};
}

View File

@ -6,10 +6,6 @@
## Defined System Users (see https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/misc/ids.nix)
unifi = 183;
chia = 185;
gitea = 186;
node-exporter = 188;
step-ca = 198;
isponsorblocktv = 199;
## Consistent People
jake = 1000;
@ -19,10 +15,6 @@
## Defined System Groups (see https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/misc/ids.nix)
unifi = 183;
chia = 185;
gitea = 186;
node-exporter = 188;
step-ca = 198;
isponsorblocktv = 199;
## Consistent Groups
mediaaccess = 1200;

View File

@ -2,6 +2,7 @@
let
cfg = config.custom.impermanence;
listIf = (enable: x: if enable then x else [ ]);
in
{
options.custom.impermanence = {
@ -11,13 +12,6 @@ in
type = lib.types.str;
default = "/data";
};
cache = {
enable = lib.mkEnableOption "impermanence.cache";
path = lib.mkOption {
type = lib.types.str;
default = "/cache";
};
};
users = lib.mkOption {
type = with lib.types; listOf str;
@ -46,58 +40,22 @@ in
gitea.stateDir = "${cfg.base}/system/var/lib/gitea";
};
custom.chia = lib.mkIf config.custom.chia.enable {
path = lib.mkOverride 999 "/data/chia";
};
services.resilio = lib.mkIf config.services.resilio.enable {
directoryRoot = lib.mkOverride 999 "${cfg.base}/sync";
};
services.plex = lib.mkIf config.services.plex.enable {
dataDir = lib.mkOverride 999 "/data/plex";
};
services.home-assistant = lib.mkIf config.services.home-assistant.enable {
configDir = lib.mkOverride 999 "/data/home-assistant";
};
environment.persistence = lib.mkMerge [
{
"${cfg.base}/system" = {
environment.persistence."${cfg.base}/system" = {
hideMounts = true;
directories = [
"/etc/nixos"
] ++ (lib.lists.optional config.services.tailscale.enable "/var/lib/tailscale") ++
(lib.lists.optional config.services.zigbee2mqtt.enable config.services.zigbee2mqtt.dataDir) ++
(lib.lists.optional config.services.postgresql.enable config.services.postgresql.dataDir) ++
(lib.lists.optional config.hardware.bluetooth.enable "/var/lib/bluetooth") ++
(lib.lists.optional config.custom.services.unifi.enable "/var/lib/unifi") ++
(lib.lists.optional (config.virtualisation.oci-containers.containers != { }) "/var/lib/containers") ++
(lib.lists.optional config.services.tang.enable "/var/lib/private/tang") ++
(lib.lists.optional config.services.caddy.enable "/var/lib/caddy") ++
(lib.lists.optional config.services.prometheus.enable "/var/lib/${config.services.prometheus.stateDir}") ++
(lib.lists.optional config.custom.services.isponsorblocktv.enable "${config.custom.services.isponsorblocktv.dataDir}") ++
(lib.lists.optional config.services.step-ca.enable "/var/lib/step-ca/db");
] ++ (listIf config.custom.tailscale.enable [ "/var/lib/tailscale" ]) ++
(listIf config.services.zigbee2mqtt.enable [ config.services.zigbee2mqtt.dataDir ]) ++
(listIf config.services.postgresql.enable [ config.services.postgresql.dataDir ]) ++
(listIf config.hardware.bluetooth.enable [ "/var/lib/bluetooth" ]) ++
(listIf config.custom.services.unifi.enable [ "/var/lib/unifi" ]) ++
(listIf (config.virtualisation.oci-containers.containers != { }) [ "/var/lib/containers" ]);
};
}
(lib.mkIf cfg.cache.enable {
"${cfg.cache.path}/system" = {
hideMounts = true;
directories = (lib.lists.optional config.services.postgresqlBackup.enable config.services.postgresqlBackup.location);
};
})
];
home-manager.users =
let
mkUser = (x:
let
homeCfg = config.home-manager.users."${x}";
in
{
mkUser = (x: {
name = x;
value = {
home = {
@ -107,12 +65,9 @@ in
files = cfg.userExtraFiles.${x} or [ ];
directories = cfg.userExtraDirs.${x} or [ ];
};
sessionVariables = lib.attrsets.optionalAttrs homeCfg.programs.zoxide.enable { _ZO_DATA_DIR = "/data/users/${x}/.local/share/zoxide"; };
};
programs = {
zsh.history.path = lib.mkOverride 999 "/data/users/${x}/.zsh_history";
file.".zshrc".text = lib.mkForce ''
HISTFILE=/data/users/${x}/.zsh_history
'';
};
};
});

View File

@ -11,43 +11,25 @@ in
};
locations = lib.mkOption {
readOnly = true;
};
};
config = lib.mkMerge [
{
custom.locations.locations = {
default = {
services = {
authoritative_dns = [ "boron.cx.ts.hillion.co.uk" ];
downloads = "phoenix.st.ts.hillion.co.uk";
gitea = "boron.cx.ts.hillion.co.uk";
homeassistant = "stinger.pop.ts.hillion.co.uk";
downloads = "tywin.storage.ts.hillion.co.uk";
gitea = "jorah.cx.ts.hillion.co.uk";
homeassistant = "microserver.home.ts.hillion.co.uk";
mastodon = "";
matrix = "boron.cx.ts.hillion.co.uk";
prometheus = "boron.cx.ts.hillion.co.uk";
restic = "phoenix.st.ts.hillion.co.uk";
tang = [
"li.pop.ts.hillion.co.uk"
"microserver.home.ts.hillion.co.uk"
"sodium.pop.ts.hillion.co.uk"
];
unifi = "boron.cx.ts.hillion.co.uk";
version_tracker = [ "boron.cx.ts.hillion.co.uk" ];
matrix = "jorah.cx.ts.hillion.co.uk";
unifi = "jorah.cx.ts.hillion.co.uk";
};
};
};
};
}
(lib.mkIf cfg.autoServe
{
custom.services = lib.mapAttrsRecursive
(path: value: {
enable =
if builtins.isList value
then builtins.elem config.networking.fqdn value
else config.networking.fqdn == value;
})
cfg.locations.services;
})
];
config = lib.mkIf cfg.autoServe {
custom.services.downloads.enable = cfg.locations.services.downloads == config.networking.fqdn;
custom.services.gitea.enable = cfg.locations.services.gitea == config.networking.fqdn;
custom.services.homeassistant.enable = cfg.locations.services.homeassistant == config.networking.fqdn;
custom.services.mastodon.enable = cfg.locations.services.mastodon == config.networking.fqdn;
custom.services.matrix.enable = cfg.locations.services.matrix == config.networking.fqdn;
custom.services.unifi.enable = cfg.locations.services.unifi == config.networking.fqdn;
};
}

View File

@ -1,24 +0,0 @@
{ pkgs, lib, config, ... }:
let
cfg = config.custom.prometheus.client;
in
{
options.custom.prometheus.client = {
enable = lib.mkEnableOption "prometheus-client";
};
config = lib.mkIf cfg.enable {
users.users.node-exporter.uid = config.ids.uids.node-exporter;
users.groups.node-exporter.gid = config.ids.gids.node-exporter;
services.prometheus.exporters.node = {
enable = true;
port = 9000;
enabledCollectors = [
"systemd"
];
};
};
}

View File

@ -1,8 +0,0 @@
{ ... }:
{
imports = [
./client.nix
./service.nix
];
}

View File

@ -1,67 +0,0 @@
{ pkgs, lib, config, ... }:
let
cfg = config.custom.services.prometheus;
in
{
options.custom.services.prometheus = {
enable = lib.mkEnableOption "prometheus-client";
};
config = lib.mkIf cfg.enable {
services.prometheus = {
enable = true;
globalConfig = {
scrape_interval = "15s";
};
retentionTime = "1y";
scrapeConfigs = [{
job_name = "node";
static_configs = [{
targets = builtins.map (x: "${x}:9000") (builtins.attrNames (builtins.readDir ../../hosts));
}];
}];
rules = [
''
groups:
- name: service alerting
rules:
- alert: ResilioSyncDown
expr: node_systemd_unit_state{ name = 'resilio.service', state != 'active' } > 0
for: 10m
annotations:
summary: "Resilio Sync systemd service is down"
description: "The Resilio Sync systemd service is not active on instance {{ $labels.instance }}."
''
];
};
services.caddy = {
enable = true;
virtualHosts."prometheus.ts.hillion.co.uk" = {
listenAddresses = [ config.custom.dns.tailscale.ipv4 config.custom.dns.tailscale.ipv6 ];
extraConfig = ''
reverse_proxy http://localhost:9090
tls {
ca https://ca.ts.hillion.co.uk:8443/acme/acme/directory
}
'';
};
};
### HACK: Allow Caddy to restart if it fails. This happens because Tailscale
### is too late at starting. Upstream nixos caddy does restart on failure
### but it's prevented on exit code 1. Set the exit code to 0 (non-failure)
### to override this.
systemd.services.caddy = {
requires = [ "tailscaled.service" ];
after = [ "tailscaled.service" ];
serviceConfig = {
RestartPreventExitStatus = lib.mkForce 0;
};
};
};
}

View File

@ -19,14 +19,9 @@ in
type = with lib.types; uniq (listOf attrs);
default = [ ];
};
backups = {
enable = lib.mkEnableOption "resilio.backups";
};
};
config = lib.mkIf cfg.enable (lib.mkMerge [
{
config = lib.mkIf cfg.enable {
users.users =
let
mkUser =
@ -54,10 +49,6 @@ in
services.resilio = {
enable = true;
deviceName = lib.mkOverride 999 (lib.strings.concatStringsSep "." (lib.lists.take 2 (lib.strings.splitString "." config.networking.fqdnOrHostName)));
storagePath = lib.mkOverride 999 "${config.services.resilio.directoryRoot}/.sync";
sharedFolders =
let
mkFolder = name: secret: {
@ -73,39 +64,5 @@ in
in
builtins.map (folder: mkFolder folder.name folder.secret) cfg.folders;
};
systemd.services.resilio.unitConfig.RequiresMountsFor = builtins.map (folder: "${config.services.resilio.directoryRoot}/${folder.name}") cfg.folders;
}
(lib.mkIf cfg.backups.enable {
age.secrets."resilio/restic/128G.key" = {
file = ../secrets/restic/128G.age;
owner = "rslsync";
group = "rslsync";
};
services.restic.backups."resilio" = {
repository = "rest:https://restic.ts.hillion.co.uk/128G";
user = "rslsync";
passwordFile = config.age.secrets."resilio/restic/128G.key".path;
timerConfig = {
OnBootSec = "10m";
OnUnitInactiveSec = "15m";
RandomizedDelaySec = "5m";
};
paths = [ config.services.resilio.directoryRoot ];
exclude = [
"${config.services.resilio.directoryRoot}/.sync"
"${config.services.resilio.directoryRoot}/*/.sync"
"${config.services.resilio.directoryRoot}/resources/media/films"
"${config.services.resilio.directoryRoot}/resources/media/iso"
"${config.services.resilio.directoryRoot}/resources/media/tv"
"${config.services.resilio.directoryRoot}/dad/media"
];
};
})
]);
}

View File

@ -1,22 +0,0 @@
{ config, pkgs, lib, ... }:
let
cfg = config.custom.sched_ext;
in
{
options.custom.sched_ext = {
enable = lib.mkEnableOption "sched_ext";
};
config = lib.mkIf cfg.enable {
assertions = [{
assertion = config.boot.kernelPackages.kernelAtLeast "6.12";
message = "sched_ext requires a kernel >=6.12";
}];
boot.kernelPackages = if pkgs.linuxPackages.kernelAtLeast "6.12" then pkgs.linuxPackages else (if pkgs.linuxPackages_latest.kernelAtLeast "6.12" then pkgs.linuxPackages_latest else pkgs.unstable.linuxPackages_testing);
environment.systemPackages = with pkgs; [ unstable.scx.layered unstable.scx.lavd ];
};
}

View File

@ -1,56 +0,0 @@
{ pkgs, lib, config, ... }:
let
cfg = config.custom.services.authoritative_dns;
in
{
options.custom.services.authoritative_dns = {
enable = lib.mkEnableOption "authoritative_dns";
};
config = lib.mkIf cfg.enable {
services.nsd = {
enable = true;
zones = {
"ts.hillion.co.uk" = {
data =
let
makeRecords = type: s: (lib.concatStringsSep "\n" (lib.collect builtins.isString (lib.mapAttrsRecursive (path: value: "${lib.concatStringsSep "." (lib.reverseList path)} 86400 ${type} ${value}") s)));
in
''
$ORIGIN ts.hillion.co.uk.
$TTL 86400
ts.hillion.co.uk. IN SOA ns1.hillion.co.uk. hostmaster.hillion.co.uk. (
1 ;Serial
7200 ;Refresh
3600 ;Retry
1209600 ;Expire
3600 ;Negative response caching TTL
)
86400 NS ns1.hillion.co.uk.
ca 21600 CNAME sodium.pop.ts.hillion.co.uk.
restic 21600 CNAME ${config.custom.locations.locations.services.restic}.
prometheus 21600 CNAME ${config.custom.locations.locations.services.prometheus}.
deluge.downloads 21600 CNAME ${config.custom.locations.locations.services.downloads}.
prowlarr.downloads 21600 CNAME ${config.custom.locations.locations.services.downloads}.
radarr.downloads 21600 CNAME ${config.custom.locations.locations.services.downloads}.
sonarr.downloads 21600 CNAME ${config.custom.locations.locations.services.downloads}.
graphs.router.home 21600 CNAME router.home.ts.hillion.co.uk.
zigbee2mqtt.home 21600 CNAME router.home.ts.hillion.co.uk.
charlie.kvm 21600 CNAME router.home.ts.hillion.co.uk.
hammer.kvm 21600 CNAME router.home.ts.hillion.co.uk.
'' + (makeRecords "A" config.custom.dns.authoritative.ipv4.uk.co.hillion.ts) + "\n\n" + (makeRecords "AAAA" config.custom.dns.authoritative.ipv6.uk.co.hillion.ts);
};
};
};
};
}

View File

@ -2,15 +2,11 @@
{
imports = [
./authoritative_dns.nix
./downloads.nix
./gitea/default.nix
./homeassistant.nix
./isponsorblocktv.nix
./mastodon/default.nix
./matrix.nix
./restic.nix
./tang.nix
./unifi.nix
./version_tracker.nix
./zigbee2mqtt.nix

View File

@ -24,33 +24,27 @@ in
};
config = lib.mkIf cfg.enable {
age.secrets."wireguard/downloads".file = ../../secrets/wireguard/downloads.age;
age.secrets."deluge/auth" = {
file = ../../secrets/deluge/auth.age;
owner = "deluge";
};
services.caddy = {
enable = true;
virtualHosts = builtins.listToAttrs (builtins.map
(x: {
name = "${x}.downloads.ts.hillion.co.uk";
name = "http://${x}.downloads.ts.hillion.co.uk";
value = {
listenAddresses = [ config.custom.dns.tailscale.ipv4 config.custom.dns.tailscale.ipv6 ];
extraConfig = ''
reverse_proxy unix//${cfg.metadataPath}/caddy/caddy.sock
tls {
ca https://ca.ts.hillion.co.uk:8443/acme/acme/directory
}
'';
listenAddresses = [ config.custom.tailscale.ipv4Addr config.custom.tailscale.ipv6Addr ];
extraConfig = "reverse_proxy unix//${cfg.metadataPath}/caddy/caddy.sock";
};
}) [ "prowlarr" "sonarr" "radarr" "deluge" ]);
};
## Wireguard
age.secrets."wireguard/downloads".file = ../../secrets/wireguard/downloads.age;
age.secrets."deluge/auth" = {
file = ../../secrets/deluge/auth.age;
owner = "deluge";
};
networking.wireguard.interfaces."downloads" = {
privateKeyFile = config.age.secrets."wireguard/downloads".path;
ips = [ "10.2.0.2/32" ];
@ -138,10 +132,7 @@ in
script = with pkgs; "${iproute2}/bin/ip link set up lo";
};
networking = {
nameservers = [ "1.1.1.1" "8.8.8.8" ];
hosts = { "127.0.0.1" = builtins.map (x: "${x}.downloads.ts.hillion.co.uk") [ "prowlarr" "sonarr" "radarr" "deluge" ]; };
};
networking.hosts = { "127.0.0.1" = builtins.map (x: "${x}.downloads.ts.hillion.co.uk") [ "prowlarr" "sonarr" "radarr" "deluge" ]; };
services = {
prowlarr.enable = true;

View File

@ -63,11 +63,6 @@ in
runner = {
capacity = 3;
};
cache = {
enabled = true;
host = "10.108.27.2";
port = 41919;
};
};
};
@ -81,8 +76,6 @@ in
chain output {
type filter hook output priority 100; policy accept;
ct state { established, related } counter accept
ip daddr 10.0.0.0/8 drop
ip daddr 100.64.0.0/10 drop
ip daddr 172.16.0.0/12 drop

View File

@ -1,4 +1,4 @@
{ config, pkgs, lib, ... }:
{ config, pkgs, lib, nixpkgs-unstable, ... }:
let
cfg = config.custom.services.gitea;
@ -50,12 +50,9 @@ in
};
};
users.users.gitea.uid = config.ids.uids.gitea;
users.groups.gitea.gid = config.ids.gids.gitea;
services.gitea = {
enable = true;
package = pkgs.unstable.gitea;
package = nixpkgs-unstable.legacyPackages.x86_64-linux.gitea;
mailerPasswordFile = config.age.secrets."gitea/mailer_password".path;
appName = "Hillion Gitea";
@ -106,8 +103,8 @@ in
ip6tables -A PREROUTING -t nat -i eth0 -p tcp --dport 22 -j REDIRECT --to-port ${builtins.toString cfg.sshPort}
# proxy locally originating outgoing packets
iptables -A OUTPUT -d 138.201.252.214 -t nat -p tcp --dport 22 -j REDIRECT --to-port ${builtins.toString cfg.sshPort}
ip6tables -A OUTPUT -d 2a01:4f8:173:23d2::2 -t nat -p tcp --dport 22 -j REDIRECT --to-port ${builtins.toString cfg.sshPort}
iptables -A OUTPUT -d 95.217.229.104 -t nat -p tcp --dport 22 -j REDIRECT --to-port ${builtins.toString cfg.sshPort}
ip6tables -A OUTPUT -d 2a01:4f9:4b:3953::2 -t nat -p tcp --dport 22 -j REDIRECT --to-port ${builtins.toString cfg.sshPort}
'';
};
}

View File

@ -44,52 +44,27 @@ in
"bluetooth"
"default_config"
"esphome"
"fully_kiosk"
"flux"
"google_assistant"
"homekit"
"met"
"mobile_app"
"mqtt"
"otp"
"smartthings"
"sonos"
"sun"
"switchbot"
"waze_travel_time"
];
customComponents = with pkgs.home-assistant-custom-components; [
adaptive_lighting
];
config = {
default_config = { };
homeassistant = {
auth_providers = [
{ type = "homeassistant"; }
{
type = "trusted_networks";
trusted_networks = [ "10.239.19.4/32" ];
trusted_users = {
"10.239.19.4" = "fb4979873ecb480d9e3bb336250fa344";
};
allow_bypass_login = true;
}
];
};
recorder = {
db_url = "postgresql://@/homeassistant";
};
http = {
use_x_forwarded_for = true;
trusted_proxies = with config.custom.dns.authoritative; [
ipv4.uk.co.hillion.ts.cx.boron
ipv6.uk.co.hillion.ts.cx.boron
ipv4.uk.co.hillion.ts.pop.sodium
ipv6.uk.co.hillion.ts.pop.sodium
];
trusted_proxies = [ "100.96.143.138" ];
};
google_assistant = {
@ -101,9 +76,6 @@ in
report_state = true;
expose_by_default = true;
exposed_domains = [ "light" ];
entity_config = {
"input_boolean.sleep_mode" = { };
};
};
homekit = [{
filter = {
@ -113,7 +85,13 @@ in
bluetooth = { };
adaptive_lighting = {
switch = [
{
platform = "flux";
start_time = "07:00";
stop_time = "23:59";
mode = "mired";
disable_brightness_adjust = true;
lights = [
"light.bedroom_lamp"
"light.bedroom_light"
@ -124,8 +102,8 @@ in
"light.living_room_light"
"light.wardrobe_light"
];
min_sunset_time = "21:00";
};
}
];
light = [
{
@ -133,9 +111,12 @@ in
lights = {
bathroom_light = {
unique_id = "87a4cbb5-e5a7-44fd-9f28-fec2d6a62538";
value_template = "{{ false if state_attr('script.bathroom_light_switch_if_on', 'last_triggered') > states.sensor.bathroom_motion_sensor_illuminance_lux.last_reported else states('sensor.bathroom_motion_sensor_illuminance_lux') | int > 500 }}";
value_template = "on";
turn_on = { service = "script.noop"; };
turn_off = { service = "script.bathroom_light_switch_if_on"; };
turn_off = {
service = "switch.turn_on";
entity_id = "switch.bathroom_light";
};
};
};
}
@ -164,13 +145,6 @@ in
}
];
input_boolean = {
sleep_mode = {
name = "Set house to sleep mode";
icon = "mdi:sleep";
};
};
# UI managed expansions
automation = "!include automations.yaml";
script = "!include scripts.yaml";

View File

@ -1,62 +0,0 @@
{ config, pkgs, lib, ... }:
let
cfg = config.custom.services.isponsorblocktv;
ver = "v2.2.1";
ctl = pkgs.writeScriptBin "isponsorblocktv-config" ''
#! ${pkgs.runtimeShell}
set -e
sudo systemctl stop podman-isponsorblocktv
sudo ${pkgs.podman}/bin/podman run \
--rm -it \
--uidmap=0:${toString config.users.users.isponsorblocktv.uid}:1 \
--gidmap=0:${toString config.users.groups.isponsorblocktv.gid}:1 \
-v ${cfg.dataDir}:/app/data \
ghcr.io/dmunozv04/isponsorblocktv:${ver} \
--setup-cli
sudo systemctl start podman-isponsorblocktv
'';
in
{
options.custom.services.isponsorblocktv = {
enable = lib.mkEnableOption "isponsorblocktv";
dataDir = lib.mkOption {
type = lib.types.str;
default = "/var/lib/isponsorblocktv";
};
};
config = lib.mkIf cfg.enable {
environment.systemPackages = [ ctl ];
users.groups.isponsorblocktv = {
gid = config.ids.gids.isponsorblocktv;
};
users.users.isponsorblocktv = {
home = cfg.dataDir;
createHome = true;
isSystemUser = true;
group = "isponsorblocktv";
uid = config.ids.uids.isponsorblocktv;
};
virtualisation.oci-containers.containers.isponsorblocktv = {
image = "ghcr.io/dmunozv04/isponsorblocktv:${ver}";
extraOptions = [
"--uidmap=0:${toString config.users.users.isponsorblocktv.uid}:1"
"--gidmap=0:${toString config.users.groups.isponsorblocktv.gid}:1"
];
volumes = [ "${cfg.dataDir}:/app/data" ];
};
systemd.tmpfiles.rules = [
"d ${cfg.dataDir} 0700 isponsorblocktv isponsorblocktv - -"
];
};
}

View File

@ -41,10 +41,6 @@ in
owner = "matrix-synapse";
group = "matrix-synapse";
};
"matrix/matrix.hillion.co.uk/syncv3_secret" = {
file = ../../secrets/matrix/matrix.hillion.co.uk/syncv3_secret.age;
};
};
services = {
@ -80,8 +76,8 @@ in
x_forwarded = true;
bind_addresses = [
"::1"
config.custom.dns.tailscale.ipv4
config.custom.dns.tailscale.ipv6
config.custom.tailscale.ipv4Addr
config.custom.tailscale.ipv6Addr
];
resources = [
{
@ -118,15 +114,6 @@ in
};
};
matrix-sliding-sync = {
enable = true;
environmentFile = config.age.secrets."matrix/matrix.hillion.co.uk/syncv3_secret".path;
settings = {
SYNCV3_SERVER = "https://matrix.hillion.co.uk";
SYNCV3_BINDADDR = "[::]:8009";
};
};
heisenbridge = lib.mkIf cfg.heisenbridge {
enable = true;
owner = "@jake:hillion.co.uk";

View File

@ -1,306 +0,0 @@
{ config, pkgs, lib, ... }:
let
cfg = config.custom.services.restic;
in
{
options.custom.services.restic = {
enable = lib.mkEnableOption "restic http server";
path = lib.mkOption {
type = lib.types.path;
default = "/var/lib/restic";
};
repos = lib.mkOption {
readOnly = true;
type = with lib.types; attrsOf (submodule {
options = {
path = lib.mkOption {
default = null;
type = nullOr str;
};
passwordFile = lib.mkOption {
default = null;
type = nullOr str;
};
environmentFile = lib.mkOption {
default = null;
type = nullOr str;
};
forgetConfig = lib.mkOption {
default = null;
type = nullOr (submodule {
options = {
timerConfig = lib.mkOption {
type = attrs;
};
opts = lib.mkOption {
type = listOf str;
};
};
});
};
clones = lib.mkOption {
default = [ ];
type = listOf (submodule {
options = {
timerConfig = lib.mkOption {
type = attrs;
};
repo = lib.mkOption {
type = str;
};
};
});
};
};
});
default = {
"128G" = {
path = "${cfg.path}/128G";
passwordFile = config.age.secrets."restic/128G.key".path;
forgetConfig = {
timerConfig = {
OnCalendar = "02:30";
RandomizedDelaySec = "1h";
};
opts = [
"--keep-last 48"
"--keep-within-hourly 7d"
"--keep-within-daily 1m"
"--keep-within-weekly 6m"
"--keep-within-monthly 24m"
];
};
clones = [
{
repo = "128G-wasabi";
timerConfig = {
OnBootSec = "30m";
OnUnitInactiveSec = "60m";
RandomizedDelaySec = "20m";
};
}
{
repo = "128G-backblaze";
timerConfig = {
OnBootSec = "30m";
OnUnitInactiveSec = "60m";
RandomizedDelaySec = "20m";
};
}
];
};
"1.6T" = {
path = "${cfg.path}/1.6T";
passwordFile = config.age.secrets."restic/1.6T.key".path;
forgetConfig = {
timerConfig = {
OnCalendar = "Wed, 02:30";
RandomizedDelaySec = "4h";
};
opts = [
"--keep-within-daily 14d"
"--keep-within-weekly 2m"
"--keep-within-monthly 18m"
];
};
clones = [
{
repo = "1.6T-wasabi";
timerConfig = {
OnBootSec = "30m";
OnUnitInactiveSec = "60m";
RandomizedDelaySec = "20m";
};
}
{
repo = "1.6T-backblaze";
timerConfig = {
OnBootSec = "30m";
OnUnitInactiveSec = "60m";
RandomizedDelaySec = "20m";
};
}
];
};
"128G-wasabi" = {
environmentFile = config.age.secrets."restic/128G-wasabi.env".path;
};
"1.6T-wasabi" = {
environmentFile = config.age.secrets."restic/1.6T-wasabi.env".path;
};
"128G-backblaze" = {
environmentFile = config.age.secrets."restic/128G-backblaze.env".path;
};
"1.6T-backblaze" = {
environmentFile = config.age.secrets."restic/1.6T-backblaze.env".path;
};
};
};
};
config = lib.mkIf cfg.enable {
age.secrets = {
"restic/128G.key" = {
file = ../../secrets/restic/128G.age;
owner = "restic";
group = "restic";
};
"restic/128G-wasabi.env".file = ../../secrets/restic/128G-wasabi.env.age;
"restic/128G-backblaze.env".file = ../../secrets/restic/128G-backblaze.env.age;
"restic/1.6T.key" = {
file = ../../secrets/restic/1.6T.age;
owner = "restic";
group = "restic";
};
"restic/1.6T-wasabi.env".file = ../../secrets/restic/1.6T-wasabi.env.age;
"restic/1.6T-backblaze.env".file = ../../secrets/restic/1.6T-backblaze.env.age;
};
services.restic.server = {
enable = true;
appendOnly = true;
extraFlags = [ "--no-auth" ];
dataDir = cfg.path;
listenAddress = "127.0.0.1:8000"; # TODO: can this be a Unix socket?
};
services.caddy = {
enable = true;
virtualHosts."restic.ts.hillion.co.uk".extraConfig = ''
bind ${config.custom.dns.tailscale.ipv4} ${config.custom.dns.tailscale.ipv6}
tls {
ca https://ca.ts.hillion.co.uk:8443/acme/acme/directory
}
reverse_proxy http://localhost:8000
'';
};
systemd =
let
mkRepoInfo = repo_cfg: (if (repo_cfg.passwordFile != null) then {
serviceConfig.LoadCredential = [
"password_file:${repo_cfg.passwordFile}"
];
environment = {
RESTIC_REPOSITORY = repo_cfg.path;
RESTIC_PASSWORD_FILE = "%d/password_file";
};
} else {
serviceConfig.EnvironmentFile = repo_cfg.environmentFile;
});
mkForgetService = name: repo_cfg:
if (repo_cfg.forgetConfig != null) then
({
description = "Restic forget service for ${name}";
serviceConfig = {
User = "restic";
Group = "restic";
};
script = ''
set -xe
${pkgs.restic}/bin/restic forget ${lib.strings.concatStringsSep " " repo_cfg.forgetConfig.opts} \
--prune \
--retry-lock 30m
'';
} // (mkRepoInfo repo_cfg)) else { };
mkForgetTimer = repo_cfg:
if (repo_cfg.forgetConfig != null) then {
wantedBy = [ "timers.target" ];
timerConfig = repo_cfg.forgetConfig.timerConfig;
} else { };
mkCloneService = from_repo: clone_cfg: to_repo: {
name = "restic-clone-${from_repo.name}-${to_repo.name}";
value = lib.mkMerge [
{
description = "Restic copy from ${from_repo.name} to ${to_repo.name}";
serviceConfig = {
User = "restic";
Group = "restic";
LoadCredential = [
"from_password_file:${from_repo.cfg.passwordFile}"
];
};
environment = {
RESTIC_FROM_PASSWORD_FILE = "%d/from_password_file";
};
script = ''
set -xe
${pkgs.restic}/bin/restic copy \
--from-repo ${from_repo.cfg.path} \
--retry-lock 30m
'';
}
(mkRepoInfo to_repo.cfg)
];
};
mkCloneTimer = from_repo: clone_cfg: to_repo: {
name = "restic-clone-${from_repo.name}-${to_repo.name}";
value = {
wantedBy = [ "timers.target" ];
timerConfig = clone_cfg.timerConfig;
};
};
mapClones = fn: builtins.listToAttrs (lib.lists.flatten (lib.mapAttrsToList
(
from_repo_name: from_repo_cfg: (builtins.map
(
clone_cfg: (fn
{ name = from_repo_name; cfg = from_repo_cfg; }
clone_cfg
{ name = clone_cfg.repo; cfg = cfg.repos."${clone_cfg.repo}"; }
)
)
from_repo_cfg.clones)
)
cfg.repos));
in
{
services = {
caddy = {
### HACK: Allow Caddy to restart if it fails. This happens because Tailscale
### is too late at starting. Upstream nixos caddy does restart on failure
### but it's prevented on exit code 1. Set the exit code to 0 (non-failure)
### to override this.
requires = [ "tailscaled.service" ];
after = [ "tailscaled.service" ];
serviceConfig = {
RestartPreventExitStatus = lib.mkForce 0;
};
};
}
// lib.mapAttrs' (name: value: lib.attrsets.nameValuePair ("restic-forget-" + name) (mkForgetService name value)) cfg.repos
// mapClones mkCloneService;
timers = lib.mapAttrs' (name: value: lib.attrsets.nameValuePair ("restic-forget-" + name) (mkForgetTimer value)) cfg.repos
// mapClones mkCloneTimer;
};
};
}

View File

@ -1,23 +0,0 @@
{ config, pkgs, lib, ... }:
let
cfg = config.custom.services.tang;
in
{
options.custom.services.tang = {
enable = lib.mkEnableOption "tang";
};
config = lib.mkIf cfg.enable {
services.tang = {
enable = true;
ipAddressAllow = [
"138.201.252.214/32"
"10.64.50.26/32"
"10.64.50.27/32"
"10.64.50.28/32"
"10.64.50.29/32"
];
};
};
}

View File

@ -10,14 +10,20 @@ in
dataDir = lib.mkOption {
type = lib.types.str;
default = "/var/lib/unifi";
readOnly = true; # NixOS module only supports this directory
};
};
config = lib.mkIf cfg.enable {
# Fix dynamically allocated user and group ids
users.users.unifi.uid = config.ids.uids.unifi;
users.groups.unifi.gid = config.ids.gids.unifi;
users.users.unifi = {
uid = config.ids.uids.unifi;
isSystemUser = true;
group = "unifi";
description = "UniFi controller daemon user";
home = "${cfg.dataDir}";
};
users.groups.unifi = {
gid = config.ids.gids.unifi;
};
services.caddy = {
enable = true;
@ -32,9 +38,21 @@ in
};
};
services.unifi = {
enable = true;
unifiPackage = pkgs.unifi8;
virtualisation.oci-containers.containers = {
"unifi" = {
image = "lscr.io/linuxserver/unifi-controller:8.0.24-ls221";
environment = {
PUID = toString config.ids.uids.unifi;
PGID = toString config.ids.gids.unifi;
TZ = "Etc/UTC";
};
volumes = [ "${cfg.dataDir}:/config" ];
ports = [
"8080:8080"
"8443:8443"
"3478:3478/udp"
];
};
};
};
}

View File

@ -23,7 +23,7 @@ in
enable = true;
virtualHosts."http://zigbee2mqtt.home.ts.hillion.co.uk" = {
listenAddresses = [ config.custom.dns.tailscale.ipv4 config.custom.dns.tailscale.ipv6 ];
listenAddresses = [ config.custom.tailscale.ipv4Addr config.custom.tailscale.ipv6Addr ];
extraConfig = "reverse_proxy http://127.0.0.1:15606";
};
};
@ -75,7 +75,7 @@ in
};
services.restic.backups."zigbee2mqtt" = lib.mkIf cfg.backup {
repository = "rest:https://restic.ts.hillion.co.uk/1.6T";
repository = "rest:http://restic.tywin.storage.ts.hillion.co.uk/1.6T";
user = "zigbee2mqtt";
passwordFile = config.age.secrets."resilio/zigbee2mqtt/1.6T.key".path;

View File

@ -0,0 +1,25 @@
{ config, pkgs, lib, ... }:
{
config.age.secrets."spotify/11132032266" = {
file = ../../secrets/spotify/11132032266.age;
owner = "jake";
};
config.hardware.pulseaudio.enable = true;
config.users.users.jake.extraGroups = [ "audio" ];
config.users.users.jake.packages = with pkgs; [ spotify-tui ];
config.home-manager.users.jake.services.spotifyd = {
enable = true;
settings = {
global = {
username = "11132032266";
password_cmd = "cat ${config.age.secrets."spotify/11132032266".path}";
backend = "pulseaudio";
};
};
};
}

View File

@ -1,56 +0,0 @@
{ pkgs, lib, config, ... }:
let
cfg = config.custom.ssh;
in
{
options.custom.ssh = {
enable = lib.mkEnableOption "ssh";
};
config = lib.mkIf cfg.enable {
users.users =
if config.custom.user == "jake" then {
"jake".openssh.authorizedKeys.keys = [
"sk-ecdsa-sha2-nistp256@openssh.com AAAAInNrLWVjZHNhLXNoYTItbmlzdHAyNTZAb3BlbnNzaC5jb20AAAAIbmlzdHAyNTYAAABBBBwJH4udKNvi9TjOBgkxpBBy7hzWqmP0lT5zE9neusCpQLIiDhr6KXYMPXWXdZDc18wH1OLi2+639dXOvp8V/wgAAAAEc3NoOg== jake@beryllium-keys"
"sk-ecdsa-sha2-nistp256@openssh.com AAAAInNrLWVjZHNhLXNoYTItbmlzdHAyNTZAb3BlbnNzaC5jb20AAAAIbmlzdHAyNTYAAABBBPPJtW19jOaUsjmxc0+QibaLJ3J3yxPXSXZXwKT0Ean6VeaH5G8zG+zjt1Y6sg2d52lHgrRfeVl1xrG/UGX8qWoAAAAEc3NoOg== jakehillion@jakehillion-mbp"
"ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBOt74U+rL+BMtAEjfu/Optg1D7Ly7U+TupRxd5u9kfN7oJnW4dJA25WRSr4dgQNq7MiMveoduBY/ky2s0c9gvIA= jake@jake-gentoo"
"ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBC0uKIvvvkzrOcS7AcamsQRFId+bqPwUC9IiUIsiH5oWX1ReiITOuEo+TL9YMII5RyyfJFeu2ZP9moNuZYlE7Bs= jake@jake-mbp"
"ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAyFsYYjLZ/wyw8XUbcmkk6OKt2IqLOnWpRE5gEvm3X0V4IeTOL9F4IL79h7FTsPvi2t9zGBL1hxeTMZHSGfrdWaMJkQp94gA1W30MKXvJ47nEVt0HUIOufGqgTTaAn4BHxlFUBUuS7UxaA4igFpFVoPJed7ZMhMqxg+RWUmBAkcgTWDMgzUx44TiNpzkYlG8cYuqcIzpV2dhGn79qsfUzBMpGJgkxjkGdDEHRk66JXgD/EtVasZvqp5/KLNnOpisKjR88UJKJ6/buV7FLVra4/0hA9JtH9e1ecCfxMPbOeluaxlieEuSXV2oJMbQoPP87+/QriNdi/6QuCHkMDEhyGw== jake@jake-mbp"
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCw4lgH20nfuchDqvVf0YciqN0GnBw5hfh8KIun5z0P7wlNgVYnCyvPvdIlGf2Nt1z5EGfsMzMLhKDOZkcTMlhupd+j2Er/ZB764uVBGe1n3CoPeasmbIlnamZ12EusYDvQGm2hVJTGQPPp9nKaRxr6ljvTMTNl0KWlWvKP4kec74d28MGgULOPLT3HlAyvUymSULK4lSxFK0l97IVXLa8YwuL5TNFGHUmjoSsi/Q7/CKaqvNh+ib1BYHzHYsuEzaaApnCnfjDBNexHm/AfbI7s+g3XZDcZOORZn6r44dOBNFfwvppsWj3CszwJQYIFeJFuMRtzlC8+kyYxci0+FXHn jake@jake-gentoo"
];
} else { };
programs.mosh.enable = true;
services.openssh = {
enable = true;
openFirewall = true;
settings = {
PermitRootLogin = "no";
PasswordAuthentication = false;
};
};
programs.ssh.knownHosts = {
# Global Internet hosts
"ssh.gitea.hillion.co.uk".publicKey = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCxQpywsy+WGeaEkEL67xOBL1NIE++pcojxro5xAPO6VQe2N79388NRFMLlX6HtnebkIpVrvnqdLOs0BPMAokjaWCC4Ay7T/3ko1kXSOlqHY5Ye9jtjRK+wPHMZgzf74a3jlvxjrXJMA70rPQ3X+8UGpA04eB3JyyLTLuVvc6znMe53QiZ0x+hSz+4pYshnCO2UazJ148vV3htN6wRK+uqjNdjjQXkNJ7llNBSrvmfrLidlf0LRphEk43maSQCBcLEZgf4pxXBA7rFuZABZTz1twbnxP2ziyBaSOs7rcII+jVhF2cqJlElutBfIgRNJ3DjNiTcdhNaZzkwJ59huR0LUFQlHI+SALvPzE9ZXWVOX/SqQG+oIB8VebR52icii0aJH7jatkogwNk0121xmhpvvR7gwbJ9YjYRTpKs4lew3bq/W/OM8GF/FEuCsCuNIXRXKqIjJVAtIpuuhxPymFHeqJH3wK3f6jTJfcAz/z33Rwpow2VOdDyqrRfAW8ti73CCnRlN+VJi0V/zvYGs9CHldY3YvMr7rSd0+fdGyJHSTSRBF0vcyRVA/SqSfcIo/5o0ssYoBnQCg6gOkc3nNQ0C0/qh1ww17rw4hqBRxFJ2t3aBUMK+UHPxrELLVmG6ZUmfg9uVkOoafjRsoML6DVDB4JAk5JsmcZhybOarI9PJfEQ==";
# Tailscale hosts
"boron.cx.ts.hillion.co.uk".publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDtcJ7HY/vjtheMV8EN2wlTw1hU53CJebGIeRJcSkzt5";
"be.lt.ts.hillion.co.uk".publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILV3OSUT+cqFqrFHZGfn7/xi5FW3n1qjUFy8zBbYs2Sm";
"dancefloor.dancefloor.ts.hillion.co.uk".publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEXkGueVYKr2wp/VHo2QLis0kmKtc/Upg3pGoHr6RkzY";
"gendry.jakehillion.terminals.ts.hillion.co.uk".publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPXM5aDvNv4MTITXAvJWSS2yvr/mbxJE31tgwJtcl38c";
"homeassistant.homeassistant.ts.hillion.co.uk".publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPM2ytacl/zYXhgvosvhudsl0zW5eQRHXm9aMqG9adux";
"li.pop.ts.hillion.co.uk".publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHQWgcDFL9UZBDKHPiEGepT1Qsc4gz3Pee0/XVHJ6V6u";
"microserver.home.ts.hillion.co.uk".publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPPOCPqXm5a+vGB6PsJFvjKNgjLhM5MxrwCy6iHGRjXw";
"router.home.ts.hillion.co.uk".publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAlCj/i2xprN6h0Ik2tthOJQy6Qwq3Ony73+yfbHYTFu";
"sodium.pop.ts.hillion.co.uk".publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDQmG7v/XrinPmkTU2eIoISuU3+hoV4h60Bmbwd+xDjr";
"theon.storage.ts.hillion.co.uk".publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIN59psLVu3/sQORA4x3p8H3ei8MCQlcwX5T+k3kBeBMf";
};
programs.ssh.knownHostsFiles = [ ./github_known_hosts ];
};
}

65
modules/tailscale.nix Normal file
View File

@ -0,0 +1,65 @@
{ pkgs, lib, config, ... }:
let
cfg = config.custom.tailscale;
in
{
options.custom.tailscale = {
enable = lib.mkEnableOption "tailscale";
preAuthKeyFile = lib.mkOption {
type = lib.types.str;
};
advertiseRoutes = lib.mkOption {
type = with lib.types; listOf str;
default = [ ];
};
advertiseExitNode = lib.mkOption {
type = lib.types.bool;
default = false;
};
ipv4Addr = lib.mkOption { type = lib.types.str; };
ipv6Addr = lib.mkOption { type = lib.types.str; };
};
config = lib.mkIf cfg.enable {
environment.systemPackages = [ pkgs.tailscale ];
services.tailscale.enable = true;
networking.firewall.checkReversePath = lib.mkIf cfg.advertiseExitNode "loose";
systemd.services.tailscale-autoconnect = {
description = "Automatic connection to Tailscale";
# make sure tailscale is running before trying to connect to tailscale
after = [ "network-pre.target" "tailscale.service" ];
wants = [ "network-pre.target" "tailscale.service" ];
wantedBy = [ "multi-user.target" ];
# set this service as a oneshot job
serviceConfig.Type = "oneshot";
# have the job run this shell script
script = with pkgs; ''
# wait for tailscaled to settle
sleep 2
# check if we are already authenticated to tailscale
status="$(${tailscale}/bin/tailscale status -json | ${jq}/bin/jq -r .BackendState)"
if [ $status = "Running" ]; then # if so, then do nothing
exit 0
fi
# otherwise authenticate with tailscale
${tailscale}/bin/tailscale up \
--authkey "$(<${cfg.preAuthKeyFile})" \
--advertise-routes "${lib.concatStringsSep "," cfg.advertiseRoutes}" \
--advertise-exit-node=${if cfg.advertiseExitNode then "true" else "false"}
'';
};
};
}

View File

@ -1,10 +0,0 @@
{ config, lib, ... }:
{
imports = [
./global.nix
./home.nix
./iot.nix
./www-repo.nix
];
}

View File

@ -33,11 +33,6 @@ in
services.caddy = {
enable = true;
package = pkgs.unstable.caddy;
globalConfig = ''
email acme@hillion.co.uk
'';
virtualHosts = {
"hillion.co.uk".extraConfig = ''
@ -47,10 +42,7 @@ in
header /.well-known/matrix/* Access-Control-Allow-Origin *
respond /.well-known/matrix/server "{\"m.server\": \"matrix.hillion.co.uk:443\"}" 200
respond /.well-known/matrix/client `${builtins.toJSON {
"m.homeserver" = { "base_url" = "https://matrix.hillion.co.uk"; };
"org.matrix.msc3575.proxy" = { "url" = "https://matrix.hillion.co.uk"; };
}}` 200
respond /.well-known/matrix/client `{"m.homeserver":{"base_url":"https://matrix.hillion.co.uk"}}`
respond 404
}
@ -73,7 +65,6 @@ in
reverse_proxy http://${locations.services.gitea}:3000
'';
"matrix.hillion.co.uk".extraConfig = ''
reverse_proxy /_matrix/client/unstable/org.matrix.msc3575/sync http://${locations.services.matrix}:8009
reverse_proxy /_matrix/* http://${locations.services.matrix}:8008
reverse_proxy /_synapse/client/* http://${locations.services.matrix}:8008
'';

View File

@ -1,27 +0,0 @@
{ pkgs, lib, config, ... }:
let
cfg = config.custom.www.home;
locations = config.custom.locations.locations;
in
{
options.custom.www.home = {
enable = lib.mkEnableOption "home";
};
config = lib.mkIf cfg.enable {
services.caddy = {
enable = true;
virtualHosts = {
"homeassistant.home.hillion.co.uk".extraConfig = ''
bind 10.64.50.25
tls {
ca https://ca.ts.hillion.co.uk:8443/acme/acme/directory
}
reverse_proxy http://${locations.services.homeassistant}:8123
'';
};
};
};
}

View File

@ -1,32 +0,0 @@
{ pkgs, lib, config, ... }:
let
cfg = config.custom.www.iot;
locations = config.custom.locations.locations;
in
{
options.custom.www.iot = {
enable = lib.mkEnableOption "iot";
};
config = lib.mkIf cfg.enable {
services.caddy = {
enable = true;
package = pkgs.unstable.caddy;
virtualHosts = {
"homeassistant.iot.hillion.co.uk".extraConfig = ''
bind 10.239.19.5
tls {
ca https://ca.ts.hillion.co.uk:8443/acme/acme/directory
}
@blocked not remote_ip 10.239.19.4
respond @blocked "<h1>Access Denied</h1>" 403
reverse_proxy http://${locations.services.homeassistant}:8123
'';
};
};
};
}

View File

@ -57,7 +57,7 @@ in
${pkgs.git}/bin/git clone ${cfg.remote} ${cfg.location}
else
cd ${cfg.location}
${pkgs.git}/bin/git remote set-url origin ${cfg.remote}
${pkgs.git} remote set-url origin ${cfg.remote}
${pkgs.git}/bin/git fetch
${pkgs.git}/bin/git reset --hard origin/${cfg.branch}
fi

View File

@ -4,21 +4,7 @@
},
"lockFileMaintenance": {
"enabled": true,
"schedule": ["* 2-5 * * *"]
},
"rebaseWhen": "behind-base-branch",
"packageRules": [
{
"matchManagers": ["github-actions"],
"automerge": true,
"schedule": [
"after 11pm on Monday",
"after 11pm on Thursday"
]
"schedule": ["* 2-5 * * *"]
}
],
"extends": [
"config:recommended",
"helpers:pinGitHubActionDigests"
]
}

View File

@ -1,15 +0,0 @@
#!/usr/bin/env nix-shell
#!nix-shell -i bash -p restic rsync
set -e
HOST="restic.ts.hillion.co.uk"
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
cd "$DIR"
rsync -ar --no-perms --delete-after --rsync-path='sudo -u restic rsync' --progress $HOST:/practical-defiant-coffee/backups/restic/128G/ restic/128G
echo 'checking 128G'
restic -r restic/128G check --read-data-subset=25%
touch last_synced

View File

@ -2,12 +2,12 @@
#!nix-shell -i bash -p restic rsync
set -e
HOST="restic.ts.hillion.co.uk"
HOST="tywin.storage.ts.hillion.co.uk"
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
cd "$DIR"
rsync -ar --no-perms --delete-after --rsync-path='sudo -u restic rsync' --progress $HOST:/practical-defiant-coffee/backups/restic/ restic
rsync -ar --no-perms --delete-after --rsync-path='sudo -u restic rsync' --progress $HOST:/data/backups/restic/ restic
echo 'checking 128G'
restic -r restic/128G check --read-data
@ -15,3 +15,4 @@ echo 'checking 1.6T'
restic -r restic/1.6T check --read-data
touch last_synced

Binary file not shown.

Some files were not shown because too many files have changed in this diff Show More