mirror of
https://github.com/gensokyo-zone/infrastructure.git
synced 2026-02-09 04:19:19 -08:00
ops(k8s): replace k3s with k8s, provide bootstrap, ty @duckfullstop
This commit is contained in:
parent
067d72b8a8
commit
fc67b7a2e5
9 changed files with 266 additions and 47 deletions
|
|
@ -6,6 +6,7 @@ keys:
|
|||
- &tewi_osh age172nhlv3py990k2rgw64hy27hffmnpv6ssxyu9fepww7zxfgg347qna4gzt
|
||||
- &tei_osh age1a2quf2ekkj94ygu7wgvhrvh44fwn32c0l2cwvgvjh23wst90s54szdsvgr
|
||||
- &mediabox_osh age16klpkaut5759dut8mdm3jn0rnp8w6kxyvs9n6ntqrdsayjtd7upqlvw489
|
||||
- &kuwubernetes_osh age1q2yjpxlqkfhsfxumtmax6zsyt669vlr9ffjks3dpkjf3cqdakcwqt2nt66
|
||||
creation_rules:
|
||||
- path_regex: 'nixos/secrets/.+\.yaml$'
|
||||
shamir_threshold: 1
|
||||
|
|
@ -42,6 +43,12 @@ creation_rules:
|
|||
- pgp: *pgp_common
|
||||
age:
|
||||
- *mediabox_osh
|
||||
- path_regex: 'systems/kuwubernetes/secrets\.yaml$'
|
||||
shamir_threshold: 1
|
||||
key_groups:
|
||||
- pgp: *pgp_common
|
||||
age:
|
||||
- *kuwubernetes_osh
|
||||
- path_regex: 'systems/[^/]+/secrets\.yaml$'
|
||||
shamir_threshold: 1
|
||||
key_groups:
|
||||
|
|
|
|||
12
k8s/bootstrap.sh
Executable file
12
k8s/bootstrap.sh
Executable file
|
|
@ -0,0 +1,12 @@
|
|||
#!/usr/bin/env -S nix shell nixpkgs#kubernetes-helm --command bash
|
||||
echo "Installing flannel (CNI/Network Fabric)"
|
||||
kubectl create ns kube-flannel
|
||||
kubectl label --overwrite ns kube-flannel pod-security.kubernetes.io/enforce=privileged
|
||||
helm repo add flannel https://flannel-io.github.io/flannel/
|
||||
helm install flannel --set podCidr="10.42.0.0/16" --namespace kube-flannel flannel/flannel
|
||||
echo "Installing CoreDNS (Cluster DNS)"
|
||||
helm repo add coredns https://coredns.github.io/helm
|
||||
helm --namespace=kube-system install coredns coredns/coredns --set service.clusterIP=10.43.0.2
|
||||
echo "Installing ArgoCD (GitOps)"
|
||||
helm repo add argo https://argoproj.github.io/argo-helm
|
||||
helm install argocd argo/argo-cd --namespace argocd --create-namespace
|
||||
|
|
@ -11,9 +11,11 @@ spec:
|
|||
chart: coredns
|
||||
helm:
|
||||
valuesObject:
|
||||
services:
|
||||
clusterIP: 10.43.0.2
|
||||
destination:
|
||||
namespace: kube-system
|
||||
name: in-cluster
|
||||
syncPolicy:
|
||||
syncOptions:
|
||||
- ServerSideApply=true
|
||||
- ServerSideApply=true
|
||||
|
|
|
|||
|
|
@ -1,6 +1,10 @@
|
|||
{ config, lib, pkgs, ... }: with lib;
|
||||
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
with lib; {
|
||||
boot.kernelPackages = mkIf (elem "zfs" config.boot.supportedFilesystems) (mkDefault config.boot.zfs.package.latestCompatibleLinuxPackages);
|
||||
hardware.enableRedistributableFirmware = lib.mkDefault true;
|
||||
boot.zfs.enableUnstable = mkIf (elem "zfs" config.boot.supportedFilesystems) true;
|
||||
|
|
@ -10,7 +14,7 @@
|
|||
"net.core.wmem_max" = 16777216;
|
||||
"net.ipv4.tcp_rmem" = "4096 87380 16777216";
|
||||
"net.ipv4.tcp_wmem" = "4096 65536 16777216";
|
||||
"net.ipv4.ip_forward" = "1";
|
||||
"net.ipv4.ip_forward" = mkDefault "1";
|
||||
"net.ipv6.conf.all.forwarding" = "1";
|
||||
"net.ipv6.conf.all.accept_ra_rt_info_max_plen" = 128;
|
||||
"net.ipv6.conf.default.accept_ra_rt_info_max_plen" = 128;
|
||||
|
|
|
|||
|
|
@ -1,21 +0,0 @@
|
|||
{pkgs, ...}: {
|
||||
networking.firewall = {
|
||||
allowedTCPPorts = [
|
||||
6443
|
||||
];
|
||||
allowedUDPPorts = [
|
||||
];
|
||||
};
|
||||
|
||||
services.k3s = {
|
||||
enable = true;
|
||||
role = "server";
|
||||
disableAgent = false; # single node server+agent
|
||||
extraFlags = toString [
|
||||
"--disable=servicelb" # we want to use metallb
|
||||
# i guess it's kind of ok to keep the local path provisioner, even though i used to have the yaml files for deploying it on regular k8s
|
||||
];
|
||||
};
|
||||
|
||||
environment.systemPackages = [pkgs.k3s];
|
||||
}
|
||||
93
nixos/k8s.nix
Normal file
93
nixos/k8s.nix
Normal file
|
|
@ -0,0 +1,93 @@
|
|||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
inherit (lib.modules) mkForce;
|
||||
inherit (lib.strings) escapeShellArgs;
|
||||
kubeMasterIP = "10.1.1.173";
|
||||
kubeMasterHostname = "k8s.gensokyo.zone";
|
||||
kubeMasterAPIServerPort = 6443;
|
||||
in {
|
||||
# packages for administration tasks
|
||||
environment.systemPackages = with pkgs; [
|
||||
kompose
|
||||
kubectl
|
||||
kubernetes
|
||||
];
|
||||
|
||||
networking = {
|
||||
firewall.enable = mkForce false;
|
||||
nftables.enable = mkForce false;
|
||||
extraHosts = "${kubeMasterIP} ${kubeMasterHostname}";
|
||||
};
|
||||
|
||||
systemd.services.etcd.preStart = ''${pkgs.writeShellScript "etcd-wait" ''
|
||||
while [ ! -f /var/lib/kubernetes/secrets/etcd.pem ]; do sleep 1; done
|
||||
''}'';
|
||||
|
||||
services.kubernetes = {
|
||||
roles = ["master" "node"];
|
||||
addons.dns.enable = false;
|
||||
flannel.enable = false;
|
||||
easyCerts = true;
|
||||
masterAddress = kubeMasterHostname;
|
||||
clusterCidr = "10.42.0.0/16";
|
||||
apiserverAddress = "https://${kubeMasterHostname}:${toString kubeMasterAPIServerPort}";
|
||||
apiserver = {
|
||||
serviceClusterIpRange = "10.43.0.0/16";
|
||||
securePort = kubeMasterAPIServerPort;
|
||||
advertiseAddress = kubeMasterIP;
|
||||
extraOpts = escapeShellArgs [
|
||||
"--service-node-port-range=1-65535"
|
||||
];
|
||||
allowPrivileged = true;
|
||||
};
|
||||
kubelet = {
|
||||
extraOpts = "--fail-swap-on=false";
|
||||
clusterDns = "10.43.0.2";
|
||||
};
|
||||
};
|
||||
|
||||
# --- Credit for section to @duckfullstop --- #
|
||||
|
||||
# Set CRI binary directory to location where they'll be dropped by kubernetes setup containers
|
||||
# important note: this only works if the container drops a statically linked binary,
|
||||
# as dynamically linked ones would be looking for binaries that only exist in the nix store
|
||||
# (and not in conventional locations)
|
||||
virtualisation.containerd.settings = {
|
||||
plugins."io.containerd.grpc.v1.cri" = {
|
||||
containerd.snapshotter = "overlayfs";
|
||||
cni.bin_dir = "/opt/cni/bin";
|
||||
};
|
||||
};
|
||||
|
||||
# disable creating the CNI directory (cluster CNI make it for us)
|
||||
environment.etc."cni/net.d".enable = false;
|
||||
|
||||
# This by default removes all CNI plugins and replaces them with nix-defines ones
|
||||
# Since we bring our own CNI plugins via containers with host mounts, this causes
|
||||
# them to be removed on kubelet restart.
|
||||
# TODO(https://github.com/NixOS/nixpkgs/issues/53601): fix when resolved
|
||||
systemd.services.kubelet = {
|
||||
preStart = pkgs.lib.mkForce ''
|
||||
${lib.concatMapStrings (img: ''
|
||||
echo "Seeding container image: ${img}"
|
||||
${
|
||||
if (lib.hasSuffix "gz" img)
|
||||
then ''${pkgs.gzip}/bin/zcat "${img}" | ${pkgs.containerd}/bin/ctr -n k8s.io image import -''
|
||||
else ''${pkgs.coreutils}/bin/cat "${img}" | ${pkgs.containerd}/bin/ctr -n k8s.io image import -''
|
||||
}
|
||||
'')
|
||||
config.services.kubernetes.kubelet.seedDockerImages}
|
||||
${lib.concatMapStrings (package: ''
|
||||
echo "Linking cni package: ${package}"
|
||||
ln -fs ${package}/bin/* /opt/cni/bin
|
||||
'')
|
||||
config.services.kubernetes.kubelet.cni.packages}
|
||||
'';
|
||||
};
|
||||
|
||||
# --- End of section --- #
|
||||
}
|
||||
|
|
@ -7,7 +7,9 @@
|
|||
}: {
|
||||
imports = with meta; [
|
||||
(modulesPath + "/profiles/qemu-guest.nix")
|
||||
nixos.k3s
|
||||
nixos.sops
|
||||
nixos.cloudflared
|
||||
nixos.k8s
|
||||
];
|
||||
|
||||
boot = {
|
||||
|
|
@ -33,5 +35,26 @@
|
|||
|
||||
networking.interfaces.ens18.useDHCP = true;
|
||||
|
||||
sops.secrets.cloudflare_kubernetes_tunnel = {
|
||||
owner = config.services.cloudflared.user;
|
||||
};
|
||||
|
||||
services.cloudflared = let
|
||||
tunnelId = "3dde2376-1dd1-4282-b5a4-aba272594976";
|
||||
in {
|
||||
tunnels.${tunnelId} = {
|
||||
default = "http_status:404";
|
||||
credentialsFile = config.sops.secrets.cloudflare_kubernetes_tunnel.path;
|
||||
ingress = {
|
||||
"k8s.gensokyo.zone" = {
|
||||
service = "https://localhost:6443";
|
||||
originRequest.noTLSVerify = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
sops.defaultSopsFile = ./secrets.yaml;
|
||||
|
||||
system.stateVersion = "23.11";
|
||||
}
|
||||
|
|
|
|||
57
systems/kuwubernetes/secrets.yaml
Normal file
57
systems/kuwubernetes/secrets.yaml
Normal file
|
|
@ -0,0 +1,57 @@
|
|||
cloudflare_kubernetes_tunnel: ENC[AES256_GCM,data:NS5cmvbRsgGs8hrqkEtn4HdTZTfk2k/vG3aNeyCQz6egpEFuJsPcMphnawSsudQLx6mSNYn0Gnw0BRFH/7fQY8gY1A9F5s9TqeOUifOEy3mcLBn/5MuhSdy+An/OGCyuEBqX8vxauQtRHVydvUlV7Vlj6zFnvZRxWnSAUIYkPgyHbVjW3jnscZjqwHaO6bnjf9gHIe1XO3gVYQGEdkToTFQ1zY/2JCMhJHPXkGyCPARS0o5eizg=,iv:meZyBFDXk7LJpj0vGRX69uODlPXPEIkDwGC0GTVM2yk=,tag:UC22HvOGdCp7jZr66VpB2A==,type:str]
|
||||
sops:
|
||||
shamir_threshold: 1
|
||||
kms: []
|
||||
gcp_kms: []
|
||||
azure_kv: []
|
||||
hc_vault: []
|
||||
age:
|
||||
- recipient: age1q2yjpxlqkfhsfxumtmax6zsyt669vlr9ffjks3dpkjf3cqdakcwqt2nt66
|
||||
enc: |
|
||||
-----BEGIN AGE ENCRYPTED FILE-----
|
||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBMdTNVSmpxN3NTWDV0N0to
|
||||
SUJjYUtsTk55a3B6NjBmNjBvblRWc1RtRGhnCk5Kb3dTY2lFclBuOGlHa0x4SXFp
|
||||
VEk3VHVlazUyZ1hHekh4M2lucXdrUHMKLS0tIG5rRFdXbElrZDd4aExkWFlnU1Ax
|
||||
RTRBYXk4SnlJZmlCdi8zdWYwaXovTm8K4zVxkTFOE17W3AaWcM2rptIT553AWMln
|
||||
tsvfek5fraxh1RGjE06/Lsl1xMH9HtA3tyxGgbNm19P4TuQMJQRl7Q==
|
||||
-----END AGE ENCRYPTED FILE-----
|
||||
lastmodified: "2024-01-18T18:05:07Z"
|
||||
mac: ENC[AES256_GCM,data:1gKnsj3JWwoE2N19VDCsCr7tYwpuG1T6kMGTcTzIKhozPaicEhcYfH4FwcDaMEF93B9zYnPG7JIxINI0HcpAnSTgZVUEg6X76J97vbrEmCTxb34KnTv+Ngd9Ncs09yugXsHA8EE1u73MsqMy7bEcOvcnI1qZutsllT0+5nbIIsI=,iv:5jPHDi2lleQxDLS2A4rL+FWP1ijplAtxGV/YT/jFnCs=,tag:sXKAIfsEu0MM2X54psexjQ==,type:str]
|
||||
pgp:
|
||||
- created_at: "2024-01-18T17:50:14Z"
|
||||
enc: |-
|
||||
-----BEGIN PGP MESSAGE-----
|
||||
|
||||
hQIMA82M54yws73UAQ//cPw+8m3Yd7B/ELTu5Cp6xqSHdHctCuo7quNw2REWLG6i
|
||||
rcFzi2pPq8AiBWIUM4ly+e9jKEVIL5PT1eALuVj+y4tQ+SIF/fY9DOh7D8AWy9Ta
|
||||
YuA/I+SlPvYn/tvsPCiEflNv//LQTT2r14rrQD+t86c6nEVldyoQIwbfe5k9drNk
|
||||
BkqsiTi0xP7DWYk7MjmzTiwvWA58t7Vpq4qOHiYZu4ve4LsV+VWH+Qx77SB/3ofY
|
||||
onqlNljhCdzPzniST5C8XI+BDYxJUbmrPokLveoLvflB+z/94oqROtyR0yAzMSUj
|
||||
F9j+o7DD/SQrsX5P/Hxhn+aIsmbNyFWezMRYbccJR0mfgt5EOT+dlRNgvENQ0+iJ
|
||||
h+5UvswEhWGfT+YeXEVeGk1S/3gTNglmjRBWoYhm8mawp1RQRmTEfv/ehPOyuzqF
|
||||
9BXWuGBcVf5oHB7zx3XZY+QuaNPK+n72C5EFpVJzM7gAy6NRaspnTu07ZUU1OMmw
|
||||
CH1OCFX5cNIie7zdvwQYRz4yNKZL4l7kmXhm2D7XIqWt7JSbAiQHtF529bM9UC9X
|
||||
e/gCmb3Ke4u9lCIt8Xh9DYn13hsCnbYnoQaNMuZZluiFg+aFtma1wLcv3vwM4BqZ
|
||||
U33eECLx6yU4jlrZ/ZrlayWPwArlJEJmXcKB1sWaihg08QK78xTOVUi1sTb8AujS
|
||||
XgEZ+WDcZcticwbtnYb3Pb/M5bW7fP+crD7HGyeNLXIN+knFAI5v7/YUz4bprBP3
|
||||
2kzZmya5i/2ykueGx9Oxi02EyYKGGd/ztPLsonjsbjwdfR0etKD7C27NKfIPeso=
|
||||
=bBVJ
|
||||
-----END PGP MESSAGE-----
|
||||
fp: CD8CE78CB0B3BDD4
|
||||
- created_at: "2024-01-18T17:50:14Z"
|
||||
enc: |-
|
||||
-----BEGIN PGP MESSAGE-----
|
||||
|
||||
hQEMA2W9MER3HLb7AQf/bSdNBGYmpnn60a4I9e27BgEVy5/BjLbc9ujPyOLRf1mm
|
||||
W2s42lX4mABJ6Qs5a9D6IF+/OMKQO6KWMtLmwwuVmMDSkZxkRG1fE/IoDtnCKOUJ
|
||||
IDparKyYexB9rSeOdVxQUqr4+mMWPc/5p9vIh8dZ8ZiiCO5ev9EyGNQOiSnW+gIN
|
||||
Iim2uk7onKVbGwENuwqUB4bgeykqS4Maujfudzdi+sxVl7EKrjA3ZbYeYjPORpRu
|
||||
3EQRRxaPLwmemqtws4dg6m+AQLDQETevgWfZ8Gj4vUPmxUU9w/uHq5gxzzgsQK+m
|
||||
qM/VV20+5ZU4DG9cr9dVAHhICgk/h92nuyZqpyFFPdJeAQ1Wz9Ks3XJA9viLqgnk
|
||||
za4b9rvJb5kXdE9wdja0R6Z33uv0/26ZzJngtx8E7s3yQDxFylY76kweG6oDegsY
|
||||
o16GTqABBx5bp/FSXr3tyq5BWfmemEirOuWR5ilWKA==
|
||||
=RKwU
|
||||
-----END PGP MESSAGE-----
|
||||
fp: 65BD3044771CB6FB
|
||||
unencrypted_suffix: _unencrypted
|
||||
version: 3.8.1
|
||||
|
|
@ -9,25 +9,61 @@
|
|||
inherit (config.networking) hostName;
|
||||
cfg = config.services.cloudflared;
|
||||
apartment = "5e85d878-c6b2-4b15-b803-9aeb63d63543";
|
||||
systemFor = hostName: if hostName == config.networking.hostName
|
||||
systemFor = hostName:
|
||||
if hostName == config.networking.hostName
|
||||
then config
|
||||
else meta.network.nodes.${hostName};
|
||||
accessHostFor = { hostName, system ? systemFor hostName, access ? "local", ... }: let
|
||||
accessHostFor = {
|
||||
hostName,
|
||||
system ? systemFor hostName,
|
||||
access ? "local",
|
||||
...
|
||||
}: let
|
||||
host = system.networking.access.hostnameForNetwork.${access} or (throw "unsupported access ${access}");
|
||||
in if hostName == config.networking.hostName then "localhost" else host;
|
||||
ingressForNginx = { host ? system.networking.fqdn, port ? 80, hostName, system ? systemFor hostName }@args: nameValuePair host {
|
||||
service = "http://${accessHostFor args}:${toString port}";
|
||||
};
|
||||
ingressForHass = { host ? system.services.home-assistant.domain, port ? system.services.home-assistant.config.http.server_port, hostName, system ? systemFor hostName, ... }@args: nameValuePair host {
|
||||
service = "http://${accessHostFor args}:${toString port}";
|
||||
};
|
||||
ingressForVouch = { host ? system.services.vouch-proxy.domain, port ? system.services.vouch-proxy.settings.vouch.port, hostName, system ? systemFor hostName, ... }@args: nameValuePair host {
|
||||
service = "http://${accessHostFor args}:${toString port}";
|
||||
};
|
||||
ingressForKanidm = { host ? system.services.kanidm.server.frontend.domain, port ? system.services.kanidm.server.frontend.port, hostName, system ? systemFor hostName, ... }@args: nameValuePair host {
|
||||
service = "https://${accessHostFor args}:${toString port}";
|
||||
originRequest.noTLSVerify = true;
|
||||
};
|
||||
in
|
||||
if hostName == config.networking.hostName
|
||||
then "localhost"
|
||||
else host;
|
||||
ingressForNginx = {
|
||||
host ? system.networking.fqdn,
|
||||
port ? 80,
|
||||
hostName,
|
||||
system ? systemFor hostName,
|
||||
} @ args:
|
||||
nameValuePair host {
|
||||
service = "http://${accessHostFor args}:${toString port}";
|
||||
};
|
||||
ingressForHass = {
|
||||
host ? system.services.home-assistant.domain,
|
||||
port ? system.services.home-assistant.config.http.server_port,
|
||||
hostName,
|
||||
system ? systemFor hostName,
|
||||
...
|
||||
} @ args:
|
||||
nameValuePair host {
|
||||
service = "http://${accessHostFor args}:${toString port}";
|
||||
};
|
||||
ingressForVouch = {
|
||||
host ? system.services.vouch-proxy.domain,
|
||||
port ? system.services.vouch-proxy.settings.vouch.port,
|
||||
hostName,
|
||||
system ? systemFor hostName,
|
||||
...
|
||||
} @ args:
|
||||
nameValuePair host {
|
||||
service = "http://${accessHostFor args}:${toString port}";
|
||||
};
|
||||
ingressForKanidm = {
|
||||
host ? system.services.kanidm.server.frontend.domain,
|
||||
port ? system.services.kanidm.server.frontend.port,
|
||||
hostName,
|
||||
system ? systemFor hostName,
|
||||
...
|
||||
} @ args:
|
||||
nameValuePair host {
|
||||
service = "https://${accessHostFor args}:${toString port}";
|
||||
originRequest.noTLSVerify = true;
|
||||
};
|
||||
in {
|
||||
sops.secrets.cloudflared-tunnel-apartment.owner = cfg.user;
|
||||
services.cloudflared = {
|
||||
|
|
@ -36,11 +72,17 @@ in {
|
|||
credentialsFile = config.sops.secrets.cloudflared-tunnel-apartment.path;
|
||||
default = "http_status:404";
|
||||
ingress = listToAttrs [
|
||||
(ingressForNginx { host = config.networking.domain; inherit hostName; })
|
||||
(ingressForNginx { host = config.services.zigbee2mqtt.domain; inherit hostName; })
|
||||
(ingressForHass { inherit hostName; })
|
||||
(ingressForVouch { inherit hostName; })
|
||||
(ingressForKanidm { inherit hostName; })
|
||||
(ingressForNginx {
|
||||
host = config.networking.domain;
|
||||
inherit hostName;
|
||||
})
|
||||
(ingressForNginx {
|
||||
host = config.services.zigbee2mqtt.domain;
|
||||
inherit hostName;
|
||||
})
|
||||
(ingressForHass {inherit hostName;})
|
||||
(ingressForVouch {inherit hostName;})
|
||||
(ingressForKanidm {inherit hostName;})
|
||||
];
|
||||
};
|
||||
};
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue