refactor: get rid of config folder

This commit is contained in:
Kat Inskip 2022-07-08 17:53:16 -07:00
parent 2606e1d874
commit cb3ae5f434
Signed by: kat
GPG key ID: 465E64DECEA8CF0F
254 changed files with 79 additions and 101 deletions

90
services/access.nix Normal file
View file

@ -0,0 +1,90 @@
{ config, lib, meta, pkgs, ... }: with lib; {
deploy.tf.dns.records.services_plex = {
inherit (config.network.dns) zone;
domain = "plex";
cname = { inherit (config.network.addresses.public) target; };
};
deploy.tf.dns.records.services_cloud = {
inherit (config.network.dns) zone;
domain = "cloud";
cname = { inherit (config.network.addresses.public) target; };
};
deploy.tf.dns.records.services_home = {
inherit (config.network.dns) zone;
domain = "home";
cname = { inherit (config.network.addresses.public) target; };
};
deploy.tf.dns.records.gensokyo_root_v4 = {
zone = "gensokyo.zone.";
a = { inherit (config.network.addresses.public.tf.ipv4) address; };
};
deploy.tf.dns.records.gensokyo_root_v6 = {
zone = "gensokyo.zone.";
aaaa = { inherit (config.network.addresses.public.tf.ipv6) address; };
};
services.nginx.virtualHosts = mkMerge [
{
"gensokyo.zone" = {
forceSSL = true;
enableACME = true;
locations."/" = {
root = pkgs.gensokyoZone;
};
};
"home.${config.network.dns.domain}" = {
forceSSL = true;
enableACME = true;
locations = {
"/" = {
proxyPass = "http://yukari.ygg.kittywit.ch:8123";
extraConfig = ''
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_http_version 1.1;
'';
};
};
};
"cloud.${config.network.dns.domain}" = {
forceSSL = true;
enableACME = true;
locations = {
"/".proxyPass = "http://cloud.int.kittywit.ch/";
};
};
"plex.${config.network.dns.domain}" = {
forceSSL = true;
enableACME = true;
locations = {
"/" = {
proxyPass = "http://[${meta.network.nodes.nixos.yukari.network.addresses.yggdrasil.nixos.ipv6.address}]";
extraConfig = ''
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_redirect off;
proxy_buffering off;
proxy_set_header X-Plex-Client-Identifier $http_x_plex_client_identifier;
proxy_set_header X-Plex-Device $http_x_plex_device;
proxy_set_header X-Plex-Device-Name $http_x_plex_device_name;
proxy_set_header X-Plex-Platform $http_x_plex_platform;
proxy_set_header X-Plex-Platform-Version $http_x_plex_platform_version;
proxy_set_header X-Plex-Product $http_x_plex_product;
proxy_set_header X-Plex-Token $http_x_plex_token;
proxy_set_header X-Plex-Version $http_x_plex_version;
proxy_set_header X-Plex-Nocache $http_x_plex_nocache;
proxy_set_header X-Plex-Provides $http_x_plex_provides;
proxy_set_header X-Plex-Device-Vendor $http_x_plex_device_vendor;
proxy_set_header X-Plex-Model $http_x_plex_model;
'';
};
};
};
}
];
}

View file

@ -0,0 +1,57 @@
{ config, lib, pkgs, ... }: with lib; let
cfg = config.services.dht22-exporter;
in
{
options.services.dht22-exporter.socat = {
enable = mkEnableOption "socat service";
package = mkOption {
type = types.package;
default = pkgs.socat;
};
addresses = mkOption {
type = with types; coercedTo str singleton (listOf str);
default = singleton "::1";
};
};
config = {
systemd.services = mkIf cfg.socat.enable {
dht22-exporter-socat =
let
scfg = cfg.socat;
service = singleton "dht22-exporter.service";
in
{
after = service;
bindsTo = service;
serviceConfig = {
DynamicUser = true;
};
script =
let
port = toString (if cfg.port == null then 8001 else cfg.port);
addresser = addr: "${scfg.package}/bin/socat TCP6-LISTEN:${port},bind=${addr},fork TCP4:localhost:${port}";
lines = map addresser scfg.addresses;
in
''
${concatStringsSep "\n" lines}
'';
};
};
users.users.dht22-exporter = {
isSystemUser = true;
group = "gpio";
};
services.dht22-exporter = {
enable = true;
platform = "pi";
address = "127.0.0.1";
socat = {
enable = true;
};
user = "dht22-exporter";
group = "gpio";
};
};
}

View file

@ -0,0 +1,2 @@
{ config, lib, pkgs, ... }: {
}

189
services/filehost.nix Normal file
View file

@ -0,0 +1,189 @@
{ config, lib, pkgs, tf, ... }: with lib; let
toKeyValue = generators.toKeyValue {
mkKeyValue = generators.mkKeyValueDefault {} " = ";
};
installerReplacement = pkgs.writeShellScriptBin "installer_replacement" ''
set -exu
if [[ ! -f "/var/lib/xbackbone/state/installed" ]]; then
mkdir -p /var/lib/xbackbone/files
mkdir -p /var/lib/xbackbone/www
mkdir -p /var/lib/xbackbone/state
cp -Lr ${pkgs.xbackbone}/* /var/lib/xbackbone/www
cp ${config.secrets.files.xbackbone-config.path} /var/lib/xbackbone/www/config.php
chmod -R 0770 /var/lib/xbackbone/www
chown -R xbackbone:nginx /var/lib/xbackbone/www
touch /var/lib/xbackbone/state/installed
fi
'';
in {
kw.secrets.variables.xbackbone-ldap = {
path = "secrets/xbackbone";
field = "password";
};
secrets.files.xbackbone-config = {
text = ''
<?php
return [
'base_url' => 'https://files.kittywit.ch', // no trailing slash
'storage' => [
'driver' => 'local',
'path' => '/var/lib/xbackbone/files',
],
'db' => [
'connection' => 'sqlite', // current support for sqlite and mysql
'dsn' => '/var/lib/xbackbone/xbackbone.db', // if sqlite should be an absolute path
'username' => null, // username and password not needed for sqlite
'password' => null,
],
'ldap' => [
'enabled' => true, // enable it
'schema' => 'ldaps', // use 'ldap' or 'ldaps' Default is 'ldap'
'host' => 'auth.kittywit.ch', // set the ldap host
'port' => 636, // ldap port
'base_domain' => 'ou=users,dc=kittywit,dc=ch', // the base_dn string
'search_filter' => '(&(|(uid=????)(mail=????))(objectClass=inetOrgPerson))', // ???? is replaced with user provided username
'rdn_attribute' => 'uid=', // the attribute to use as username
'service_account_dn' => 'cn=xbackbone,ou=services,dc=kittywit,dc=ch', // LDAP Service Account Full DN
'service_account_password' => "${tf.variables.xbackbone-ldap.ref}",
]
];
'';
owner = "xbackbone";
group = "xbackbone";
mode = "0440";
};
systemd.tmpfiles.rules = [
"v /var/lib/xbackbone 0770 xbackbone nginx"
"v /var/lib/xbackbone/files 0770 xbackbone nginx"
];
users.users.xbackbone = {
isSystemUser = true;
group = "xbackbone";
home = "/var/lib/xbackbone";
};
users.groups.xbackbone.members = [
"xbackbone"
config.services.nginx.user
];
systemd.services.xbackbone = {
after = [ "network.target" ];
wantedBy = [ "phpfpm-xbackbone.service" ];
script = "${installerReplacement}/bin/installer_replacement";
serviceConfig = {
User = "xbackbone";
Group = "nginx";
Type = "oneshot";
StateDirectory = "xbackbone";
};
};
services.nginx.virtualHosts = {
"files.${config.network.dns.domain}" = {
root = "/var/lib/xbackbone/www";
locations = {
"/" = {
extraConfig = ''
try_files $uri $uri/ /index.php?$query_string;
'';
};
"~ \\.php$" = {
extraConfig = ''
include ${pkgs.nginx}/conf/fastcgi_params;
fastcgi_pass unix:${config.services.phpfpm.pools.xbackbone.socket};
fastcgi_split_path_info ^(.+\.php)(/.+)$;
fastcgi_index index.php;
fastcgi_param SCRIPT_FILENAME $realpath_root$fastcgi_script_name;
fastcgi_param SCRIPT_NAME $fastcgi_script_name;
'';
};
};
extraConfig = ''
client_max_body_size 512M;
index index.php index.html index.htm;
error_page 404 /index.php;
location /app {
return 403;
}
location /bin {
return 403;
}
location /bootstrap {
return 403;
}
location /resources {
return 403;
}
location /storage {
return 403;
}
location /vendor {
return 403;
}
location /logs {
return 403;
}
location CHANGELOG.md {
return 403;
}
'';
enableACME = true;
forceSSL = true;
};
};
services.phpfpm = {
pools.xbackbone = {
user = "xbackbone";
group = "nginx";
phpEnv = {
PATH = "/run/wrappers/bin:/nix/var/nix/profiles/default/bin:/run/current-system/sw/bin:/usr/bin:/bin";
};
settings = {
"pm" = "dynamic";
"pm.max_children" = "32";
"pm.start_servers" = "2";
"pm.min_spare_servers" = "2";
"pm.max_spare_servers" = "4";
"pm.max_requests" = "500";
"listen.owner" = "xbackbone";
"listen.group" = "xbackbone";
};
phpPackage = pkgs.php80.buildEnv {
extraConfig = toKeyValue {
upload_max_filesize = "512M";
post_max_size = "512M";
memory_limit = "512M";
};
extensions = { enabled, all }: (
with all;
enabled ++ [
sqlite3
intl
zip
ldap
gd
]
);
};
};
};
deploy.tf.dns.records.services_filehost = {
inherit (config.network.dns) zone;
domain = "files";
cname = { inherit (config.network.addresses.public) target; };
};
}

53
services/fusionpbx.nix Normal file
View file

@ -0,0 +1,53 @@
{ config, pkgs, tf, lib, ... }: with lib;
{
deploy.tf.dns.records.services_fusionpbx = {
inherit (config.network.dns) zone;
domain = "pbx";
cname = { inherit (config.network.addresses.private) target; };
};
kw.secrets.variables = mapListToAttrs
(field:
nameValuePair "fusionpbx-${field}" {
path = "services/fusionpbx";
inherit field;
}) [ "username" "password" ];
secrets.files.fusionpbx_env = {
text = ''
USER_NAME=${tf.variables.fusionpbx-username.ref}
USER_PASSWORD=${tf.variables.fusionpbx-password.ref}
'';
owner = "fusionpbx";
group = "fusionpbx";
};
security.acme.certs.services_fusionpbx = {
domain = "pbx.${config.network.dns.domain}";
group = "fusionpbx";
dnsProvider = "rfc2136";
credentialsFile = config.secrets.files.dns_creds.path;
postRun = "systemctl restart nginx";
};
services.fusionpbx = {
enable = true;
openFirewall = true;
useLocalPostgreSQL = true;
environmentFile = config.secrets.files.fusionpbx_env.path;
hardphones = true;
useACMEHost = "services_fusionpbx";
domain = "pbx.${config.network.dns.domain}";
package = with pkgs; fusionpbxWithApps [ fusionpbx-apps.sms ];
freeSwitchPackage = with pkgs; freeswitch;
};
services.nginx.virtualHosts."altar.kittywit.ch" = {
locations = {
"app/sms/hook/" = {
proxyPass = "http://pbx.kittywit.ch/app/sms/hook";
};
};
};
}

115
services/gitea/default.nix Normal file
View file

@ -0,0 +1,115 @@
{ config, lib, pkgs, tf, ... }:
{
kw.secrets.variables = {
gitea-mail-pass = {
path = "secrets/mail-kittywitch";
field = "gitea-pass";
};
};
secrets.files.gitea-mail-passfile = {
text = ''
${tf.variables.gitea-mail-pass.ref};
'';
owner = "gitea";
group = "gitea";
};
services.postgresql = {
enable = true;
ensureDatabases = [ "gitea" ];
ensureUsers = [{
name = "gitea";
ensurePermissions."DATABASE gitea" = "ALL PRIVILEGES";
}];
};
services.gitea = {
enable = true;
disableRegistration = true;
domain = "git.${config.network.dns.domain}";
rootUrl = "https://git.${config.network.dns.domain}";
httpAddress = "127.0.0.1";
appName = "kittywitch git";
ssh = { clonePort = 62954; };
database = {
type = "postgres";
name = "gitea";
user = "gitea";
};
mailerPasswordFile = config.secrets.files.gitea-mail-passfile.path;
settings = {
security = { DISABLE_GIT_HOOKS = false; };
api = { ENABLE_SWAGGER = true; };
openid = {
ENABLE_OPENID_SIGNIN = true;
ENABLE_OPENID_SIGNUP = true;
};
mailer = {
ENABLED = true;
SUBJECT = "%(APP_NAME)s";
HOST = "daiyousei.kittywit.ch:465";
USER = "gitea@kittywit.ch";
#SEND_AS_PLAIN_TEXT = true;
USE_SENDMAIL = false;
FROM = "\"kittywitch git\" <gitea@${config.network.dns.domain}>";
};
service = {
NO_REPLY_ADDRESS = "kittywit.ch";
REGISTER_EMAIL_CONFIRM = true;
ENABLE_NOTIFY_MAIL = true;
};
ui = {
THEMES = "gitea";
DEFAULT_THEME = "gitea";
THEME_COLOR_META_TAG = "#222222";
};
};
};
systemd.services.gitea.serviceConfig.ExecStartPre =
let
themePark = pkgs.fetchFromGitHub {
owner = "GilbN";
repo = "theme.park";
rev = "009a7b703544955f8a29197597507d9a1ae40d63";
sha256 = "1axqivwkmw6rq0ffwi1mm209bfkvv4lyld2hgyq2zmnl7mj3fifc";
};
binder = pkgs.writeText "styles.css" ''
@import url("/assets/css/gitea-base.css");
@import url("/assets/css/overseerr.css");
:root {
--color-code-bg: transparent;
}
.markup input[type="checkbox"] {
appearance: auto !important;
-moz-appearance: auto !important;
-webkit-appearance: auto !important;
}
'';
in
[
"${pkgs.coreutils}/bin/ln -sfT ${pkgs.runCommand "gitea-public" {
} ''
${pkgs.coreutils}/bin/mkdir -p $out/{css,img}
${pkgs.coreutils}/bin/cp ${themePark}/CSS/themes/gitea/gitea-base.css $out/css
${pkgs.coreutils}/bin/cp ${themePark}/CSS/variables/overseerr.css $out/css
${pkgs.coreutils}/bin/cp ${binder} $out/css/styles.css
${pkgs.coreutils}/bin/cp -r ${./public}/* $out/
''} /var/lib/gitea/custom/public"
"${pkgs.coreutils}/bin/ln -sfT ${./templates} /var/lib/gitea/custom/templates"
];
services.nginx.virtualHosts."git.${config.network.dns.domain}" = {
enableACME = true;
forceSSL = true;
locations = { "/".proxyPass = "http://127.0.0.1:3000"; };
};
deploy.tf.dns.records.services_gitea = {
inherit (config.network.dns) zone;
domain = "git";
cname = { inherit (config.network.addresses.public) target; };
};
}

View file

@ -0,0 +1 @@
<svg id="Layer_1" data-name="Layer 1" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 280 171.19"><defs><style>.cls-1,.cls-3{stroke:#000;stroke-miterlimit:10;}.cls-2{fill:#ccc;}.cls-3,.cls-4{fill:#f2f2f2;}</style></defs><path class="cls-1" d="M403.84,463.72s13.51-20.52,9.51-44.75c0,0-22.62-2.63-38,12.5C361.41,402.18,345.28,369,369.47,345c0,0,8,5.45,8.47,12.71,2,31.08-1.18,31.53-1.18,31.53,25.3-3.77,32.51-45.12,9.89-59.77-55.53-26.12-63.59-4.21-71.3,42.35-3.2,27.07-3.83,41.66-8.19,62.74-13.46-15.72-38.59-14.19-38-14-3.4,25,7.88,45.73,9.45,46.09-15,4.55-37.67,6.71-73,11.28,75.87,17.66,199,11.15,274.58-4.94C468.23,470.85,414.31,471.71,403.84,463.72Z" transform="translate(-202.9 -317.79)"/><polygon class="cls-2" points="98.68 121.45 90.04 110.5 90.44 120.65 75.53 115.4 83.03 130.62 71.94 130.38 81.58 144.37 98.68 121.45"/><path class="cls-3" d="M305.65,436.85" transform="translate(-202.9 -317.79)"/><path class="cls-4" d="M301.65,439.44l-.07-.2a85.43,85.43,0,0,0-6.65-5.65l0,6.41s-6,1.47-10.83-1.78c0,0,7.18,11.33,1.07,11.54a40.37,40.37,0,0,0-5.48.5l4.78,11.9C299.42,459.81,301.81,444.69,301.65,439.44Z" transform="translate(-202.9 -317.79)"/><polygon class="cls-2" points="179.03 119.28 187.68 108.33 187.28 118.47 202.19 113.22 194.69 128.44 205.78 128.21 196.13 142.19 179.03 119.28"/><path class="cls-4" d="M381.87,437.27l.07-.2a83.13,83.13,0,0,1,6.65-5.66l0,6.41s6,1.47,10.83-1.78c0,0-7.18,11.33-1.07,11.55a38.39,38.39,0,0,1,5.48.5L399,460C384.1,457.63,381.71,442.51,381.87,437.27Z" transform="translate(-202.9 -317.79)"/></svg>

After

Width:  |  Height:  |  Size: 1.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.6 KiB

View file

@ -0,0 +1 @@
<svg id="Layer_1" data-name="Layer 1" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 280 171.19"><defs><style>.cls-1,.cls-3{stroke:#000;stroke-miterlimit:10;}.cls-2{fill:#ccc;}.cls-3,.cls-4{fill:#f2f2f2;}</style></defs><path class="cls-1" d="M403.84,463.72s13.51-20.52,9.51-44.75c0,0-22.62-2.63-38,12.5C361.41,402.18,345.28,369,369.47,345c0,0,8,5.45,8.47,12.71,2,31.08-1.18,31.53-1.18,31.53,25.3-3.77,32.51-45.12,9.89-59.77-55.53-26.12-63.59-4.21-71.3,42.35-3.2,27.07-3.83,41.66-8.19,62.74-13.46-15.72-38.59-14.19-38-14-3.4,25,7.88,45.73,9.45,46.09-15,4.55-37.67,6.71-73,11.28,75.87,17.66,199,11.15,274.58-4.94C468.23,470.85,414.31,471.71,403.84,463.72Z" transform="translate(-202.9 -317.79)"/><polygon class="cls-2" points="98.68 121.45 90.04 110.5 90.44 120.65 75.53 115.4 83.03 130.62 71.94 130.38 81.58 144.37 98.68 121.45"/><path class="cls-3" d="M305.65,436.85" transform="translate(-202.9 -317.79)"/><path class="cls-4" d="M301.65,439.44l-.07-.2a85.43,85.43,0,0,0-6.65-5.65l0,6.41s-6,1.47-10.83-1.78c0,0,7.18,11.33,1.07,11.54a40.37,40.37,0,0,0-5.48.5l4.78,11.9C299.42,459.81,301.81,444.69,301.65,439.44Z" transform="translate(-202.9 -317.79)"/><polygon class="cls-2" points="179.03 119.28 187.68 108.33 187.28 118.47 202.19 113.22 194.69 128.44 205.78 128.21 196.13 142.19 179.03 119.28"/><path class="cls-4" d="M381.87,437.27l.07-.2a83.13,83.13,0,0,1,6.65-5.66l0,6.41s6,1.47,10.83-1.78c0,0-7.18,11.33-1.07,11.55a38.39,38.39,0,0,1,5.48.5L399,460C384.1,457.63,381.71,442.51,381.87,437.27Z" transform="translate(-202.9 -317.79)"/></svg>

After

Width:  |  Height:  |  Size: 1.5 KiB

View file

@ -0,0 +1 @@
<link rel="stylesheet" href="/assets/css/styles.css">

View file

@ -0,0 +1,18 @@
{{template "base/head" .}}
<div class="home">
<div class="ui stackable middle very relaxed page grid">
<div class="sixteen wide center aligned centered column">
<div>
<img class="logo" src="{{AssetUrlPrefix}}/img/gitea-lg.png" />
</div>
<div class="hero">
<br />
<h1 class="ui icon header title">
{{AppName}}
</h1>
<a href="https://kittywit.ch"><h2>back to home</h2></a>
</div>
</div>
</div>
</div>
{{template "base/footer" .}}

83
services/glauth.nix Normal file
View file

@ -0,0 +1,83 @@
{ config, tf, lib, ... }: with lib; {
network.firewall.public.tcp.ports = [ 636 ];
network.extraCerts.domain-auth = "auth.${config.network.dns.domain}";
users.groups.domain-auth.members = [ "nginx" "glauth" "keycloak" ];
security.acme.certs.domain-auth.group = "domain-auth";
services.glauth = {
enable = true;
configFile = config.secrets.files.glauth-config-file.path;
database = {
enable = true;
local = true;
type = "postgres";
passwordFile = config.secrets.files.glauth-postgres-file.path;
};
settings = {
syslog = true;
ldap = {
enabled = false;
listen = "0.0.0.0:3893";
};
ldaps = {
enabled = true;
listen = "0.0.0.0:636";
cert = "/var/lib/acme/domain-auth/fullchain.pem";
key = "/var/lib/acme/domain-auth/key.pem";
};
backend = {
baseDN = "dc=kittywitc,dc=ch";
};
users = [
{
name = "kat";
mail = "kat@kittywit.ch";
loginshell="/usr/bin/env zsh";
homedirectory="/home/kat";
passsha256 = tf.variables.glauth-password-hash.ref;
uidnumber = 1000;
primarygroup = 1500;
givenname = "kat";
sn = "witch";
}
{
name = "kc";
passsha256 = tf.variables.glauth-kc-password-hash.ref;
uidnumber = 999;
primarygroup = 1499;
}
];
groups = [
{
name = "admins";
gidnumber = 1499;
}
{
name = "users";
gidnumber = 1500;
}
];
};
};
kw.secrets.variables = mapListToAttrs
(field:
nameValuePair "glauth-${field}" {
path = "services/glauth";
inherit field;
}) [ "password-hash" "kc-password-hash" "postgres" ];
secrets.files = {
glauth-postgres-file = {
text = tf.variables.glauth-postgres.ref;
owner = "postgres";
group = "glauth";
};
glauth-config-file = {
text = toTOML config.services.glauth.settings;
owner = "glauth";
group = "glauth";
};
};
}

80
services/ha.nix Normal file
View file

@ -0,0 +1,80 @@
{ config, lib, tf, ... }: {
services = {
home-assistant = {
enable = true;
config = null;
extraComponents = [
"zha"
"esphome"
"apple_tv"
"spotify"
"met"
"default_config"
"cast"
"plex"
"google"
"google_assistant"
"google_cloud"
"google_translate"
"homekit"
"mqtt"
"wake_on_lan"
"zeroconf"
"luci"
];
};
mosquitto = {
enable = true;
persistence = true;
listeners = [ {
acl = [ "pattern readwrite #" ];
omitPasswordAuth = true;
settings.allow_anonymous = true;
} ];
};
zigbee2mqtt = {
enable = true;
settings = {
advanced = {
log_level = "info";
network_key = "!secret network_key";
};
homeassistant = true;
permit_join = false;
frontend = {
port = 8072;
};
serial = {
port = "tcp://192.168.1.149:8888";
adapter = "ezsp";
};
};
};
};
kw.secrets.variables.z2m-network-key = {
path = "secrets/zigbee2mqtt";
field = "password";
};
secrets.files.zigbee2mqtt-config = {
text = builtins.toJSON config.services.zigbee2mqtt.settings;
owner = "zigbee2mqtt";
group = "zigbee2mqtt";
};
secrets.files.zigbee2mqtt-secret = {
text = "network_key: ${tf.variables.z2m-network-key.ref}";
owner = "zigbee2mqtt";
group = "zigbee2mqtt";
};
systemd.services.zigbee2mqtt.preStart = let cfg = config.services.zigbee2mqtt; in lib.mkForce ''
cp --no-preserve=mode ${config.secrets.files.zigbee2mqtt-config.path} "${cfg.dataDir}/configuration.yaml"
cp --no-preserve=mode ${config.secrets.files.zigbee2mqtt-secret.path} "${cfg.dataDir}/secret.yaml"
'';
network.firewall.public.tcp.ports = [ 8123 8072 1883 ];
network.firewall.private.tcp.ports = [ 8123 ];
}

85
services/hedgedoc.nix Normal file
View file

@ -0,0 +1,85 @@
{ config, lib, tf, ... }: with lib;
{
kw.secrets.variables = (mapListToAttrs
(field:
nameValuePair "hedgedoc-${field}" {
path = "secrets/hedgedoc";
inherit field;
}) [ "secret" ]);
secrets.files.hedgedoc-env = {
text = ''
CMD_OAUTH2_USER_PROFILE_URL=https://auth.${config.network.dns.domain}/auth/realms/kittywitch/protocol/openid-connect/userinfo
CMD_OAUTH2_CLIENT_SECRET=${tf.variables.hedgedoc-secret.ref}
CMD_OAUTH2_USER_PROFILE_USERNAME_ATTR=preferred_username
CMD_OAUTH2_USER_PROFILE_DISPLAY_NAME_ATTR=name
CMD_OAUTH2_USER_PROFILE_EMAIL_ATTR=email
CMD_OAUTH2_PROVIDERNAME=Keycloak
CMD_DOMAIN=md.kittywit.ch
'';
owner = "hedgedoc";
group = "hedgedoc";
};
services.hedgedoc = {
enable = true;
configuration = {
debug = true;
path = "/run/hedgedoc/hedgedoc.sock";
domain = "md.${config.network.dns.domain}";
protocolUseSSL = true;
allowFreeURL = true;
email = false;
allowEmailRegister = false;
allowAnonymous = false;
allowAnonymousEdits = true;
imageUploadType = "filesystem";
allowGravatar = true;
db = {
dialect = "postgres";
host = "/run/postgresql";
};
oauth2 = {
tokenURL = "https://auth.${config.network.dns.domain}/auth/realms/kittywitch/protocol/openid-connect/token";
authorizationURL = "https://auth.${config.network.dns.domain}/auth/realms/kittywitch/protocol/openid-connect/auth";
clientID = "hedgedoc";
clientSecret = "";
};
};
environmentFile = config.secrets.files.hedgedoc-env.path;
};
deploy.tf.dns.records.services_hedgedoc = {
inherit (config.network.dns) zone;
domain = "md";
cname = { inherit (config.network.addresses.public) target; };
};
systemd.services.hedgedoc = {
serviceConfig = {
UMask = "0007";
RuntimeDirectory = "hedgedoc";
};
};
services.postgresql = {
ensureDatabases = [ "hedgedoc" ];
ensureUsers = [
{
name = "hedgedoc";
ensurePermissions."DATABASE hedgedoc" = "ALL PRIVILEGES";
}
];
};
users.users.nginx.extraGroups = [ "hedgedoc" ];
services.nginx.virtualHosts."md.${config.network.dns.domain}" = {
enableACME = true;
forceSSL = true;
locations."/" = {
proxyPass = "http://unix:/run/hedgedoc/hedgedoc.sock";
proxyWebsockets = true;
};
};
}

49
services/irlmail.nix Normal file
View file

@ -0,0 +1,49 @@
{ config, tf, meta, lib, ... }: with lib; {
dns.zones."inskip.me." = {
provider = "dns.katdns";
};
resources.gmail-mx = let
zone = config.dns.zones."inskip.me.";
in with zone; {
provider = provider.set;
type = "mx_record_set";
inputs = {
zone = domain;
ttl = 3600;
mx = [
{ preference = 1; exchange = "aspmx.l.google.com."; }
{ preference = 5; exchange = "alt1.aspmx.l.google.com."; }
{ preference = 5; exchange = "alt2.aspmx.l.google.com."; }
{ preference = 10; exchange = "alt3.aspmx.l.google.com."; }
{ preference = 10; exchange = "alt4.aspmx.l.google.com."; }
{ preference = 15; exchange = "6uyykkzhqi4zgogxiicbuamoqrxajwo5werga4byh77b2iyx3wma.mx-verification.google.com."; }
];
};
};
dns.records = {
services_inskip_a = {
zone = "inskip.me.";
a.address = meta.network.nodes.marisa.network.addresses.public.nixos.ipv4.address;
};
services_inskip_aaaa = {
zone = "inskip.me.";
aaaa.address = meta.network.nodes.marisa.network.addresses.public.nixos.ipv6.address;
};
services_gmail_spf = {
zone = "inskip.me.";
txt.value = "v=spf1 include:_spf.google.com ~all";
};
services_gmail_dkim = {
zone = "inskip.me.";
domain = "google._domainkey";
txt.value = "v=DKIM1; k=rsa; p=MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAkxag/EmXQ89XQmLrBDPpPtZ7EtEJT0hgvWf/+AFiOfBOm902tq9NbTTvRJ2dLeBLPaV+hNvq2Alc7UfkKUDlLTWQjeuiC6aOnRKQQg3LZ2W25U3AlIj0jd2IPiUhg9JGV4c66XiqQ5ylTBniShfUUyeAXxbPhYFBCkBg62LZcO/tFpFsdKWtZzLjgac5vTJID+M4F8duHpkA/ZCNNUEmtt7RNQB/LLI1Gr5yR4GdQl9z7NmwtOTo9pghbZuvljr8phYjdDrwZeFTMKQnvR1l2Eh/dZ8I0C4nP5Bk4QEfmLq666P1HzOxwT6iCU6Tc+P/pkWbrx0HJh39E1aKGyLJMQIDAQAB";
};
services_gmail_dmarc = {
zone = "inskip.me.";
domain = "_dmarc";
txt.value = "v=DMARC1; p=none; rua=mailto:dmarc-reports@inskip.me";
};
};
}

7
services/irlsite.nix Normal file
View file

@ -0,0 +1,7 @@
{ config, lib, pkgs, ... }: {
services.nginx.virtualHosts."inskip.me" = {
root = pkgs.irlsite;
enableACME = true;
forceSSL = true;
};
}

55
services/jellyfin.nix Normal file
View file

@ -0,0 +1,55 @@
{ config, lib, pkgs, kw, ... }:
{
services.nginx.virtualHosts = kw.virtualHostGen {
networkFilter = [ "private" "yggdrasil" ];
block.locations = {
"/jellyfin/".proxyPass = "http://127.0.0.1:8096/jellyfin/";
"/jellyfin/socket" = {
proxyPass = "http://127.0.0.1:8096/jellyfin/";
extraConfig = ''
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
'';
};
};
};
systemd.services = {
jellyfin-socat =
let
service = lib.singleton "jellyfin.service";
in
{
after = service;
bindsTo = service;
serviceConfig = {
DynamicUser = true;
};
script =
let
port = toString 8096;
addr = config.network.addresses.yggdrasil.nixos.ipv6.address;
in "${pkgs.socat}/bin/socat TCP6-LISTEN:${port},bind=${addr},fork TCP4:localhost:${port}";
};
};
network.firewall = {
public.tcp.ranges = [{
from = 32768;
to = 60999;
}];
public.tcp.ports = [ 8096 ];
private.tcp = {
ports = [
8096
];
ranges = [{
from = 32768;
to = 60999;
}];
};
};
services.jellyfin.enable = true;
}

56
services/jira.nix Normal file
View file

@ -0,0 +1,56 @@
{ config, pkgs, lib, tf, ... }: with lib; {
services.jira = {
enable = true;
};
deploy.tf.dns.records.services_jira = {
inherit (config.network.dns) zone;
domain = "jira";
cname = { inherit (config.network.addresses.public) target; };
};
systemd.services.jiraPostgresSQLInit = {
after = [ "postgresql.service" ];
before = [ "jira.service" ];
bindsTo = [ "postgresql.service" ];
path = [ config.services.postgresql.package ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
User = "postgres";
Group = "postgres";
};
script = ''
set -o errexit -o pipefail -o nounset -o errtrace
shopt -s inherit_errexit
create_role="$(mktemp)"
trap 'rm -f "$create_role"' ERR EXIT
echo "CREATE ROLE jira WITH LOGIN PASSWORD '$(<'${config.secrets.files.jira-postgres-file.path}')' CREATEDB" > "$create_role"
psql -tAc "SELECT 1 FROM pg_roles WHERE rolname='jira'" | grep -q 1 || psql -tA --file="$create_role"
psql -tAc "SELECT 1 FROM pg_database WHERE datname = 'jira'" | grep -q 1 || psql -tAc 'CREATE DATABASE "jira" OWNER "jira"'
'';
};
kw.secrets.variables.jira-postgres = {
path = "secrets/jira";
field = "password";
};
secrets.files.jira-postgres-file = {
text = "${tf.variables.jira-postgres.ref}";
owner = "postgres";
group = "jira";
};
users.users.nginx.extraGroups = [ "jira" ];
services.nginx.virtualHosts."jira.${config.network.dns.domain}" = {
enableACME = true;
forceSSL = true;
locations."/" = {
proxyPass = "http://127.0.0.1:8091";
proxyWebsockets = true;
};
};
}

126
services/kattv-ingest.nix Normal file
View file

@ -0,0 +1,126 @@
{ config, pkgs, lib, ... }:
with lib;
let
env = {
FREI0R_PATH = "${pkgs.frei0r}/lib/frei0r-1";
GST_PLUGIN_SYSTEM_PATH_1_0 = with pkgs.gst_all_1; lib.makeSearchPath "lib/gstreamer-1.0" [
gstreamer.out
gst-plugins-base
gst-plugins-good
gst-plugins-bad
gst-plugins-ugly
];
};
queue_frame = {
element."queue" = {
leaky = "downstream";
flush-on-eos = true;
max-size-buffers = 3;
};
};
queue_data = {
element.queue = {
#leaky = "downstream";
};
};
videoconvert_cpu = {
element.videoconvert = {
n-threads = 4;
dither = 0;
chroma-resampler = 0;
chroma-mode = 3;
};
};
videoconvert_gpu = [
"glupload"
"glcolorconvert"
"gldownload"
];
encodeopts = {
speed-preset = "veryfast";
ref = 1;
tune = "zerolatency";
pass = "qual";
#psy-tune = "film";
#noise-reduction=0;
quantizer = 21;
bitrate = 8192;
rc-lookahead = 6;
};
denoise = {
element.frei0r-filter-hqdn3d = {
spatial = 0.175;
temporal = 0.25;
};
};
encode_high = [
{
element.x264enc = {
key-int-max = 150;
} // encodeopts;
}
{
caps."video/x-h264" = {
profile = "high";
};
}
"h264parse"
];
tcpserversink = [
"flvmux"
queue_data
{
element.tcpserversink = {
port = 8989;
host = config.network.addresses.yggdrasil.nixos.ipv6.address;
};
}
];
pipeline = [
{
element.fdsrc = {
fd = 3;
};
}
"matroskademux"
"jpegdec"
queue_frame
videoconvert_cpu
denoise
videoconvert_cpu
encode_high
tcpserversink
];
in
{
network.firewall = {
private.tcp.ports = [ 1935 8989 8990 ];
public.tcp.ports = [ 4953 1935 ];
};
systemd.sockets.kattv = {
wantedBy = [ "sockets.target" ];
listenStreams = [ "0.0.0.0:4953" ];
socketConfig = {
Accept = true;
Backlog = 0;
MaxConnections = 1;
};
};
systemd.services."kattv@" = {
environment = env;
script = "exec ${pkgs.gst_all_1.gstreamer}/bin/gst-launch-1.0 -e --no-position ${pkgs.lib.gst.pipelineShellString pipeline}";
after = [ "nginx.service" ];
description = "RTMP stream of kat cam";
serviceConfig = {
Restart = "on-failure";
RestartSec = "10s";
};
};
}

67
services/kattv.nix Normal file
View file

@ -0,0 +1,67 @@
{ meta, config, pkgs, lib, ... }:
let
env = {
FREI0R_PATH = "${pkgs.frei0r}/lib/frei0r-1";
GST_PLUGIN_SYSTEM_PATH_1_0 = with pkgs.gst_all_1; lib.makeSearchPath "lib/gstreamer-1.0" [
gstreamer.out
gst-plugins-base
gst-plugins-good
gst-plugins-bad
gst-plugins-ugly
pkgs.gst-jpegtrunc
];
};
cameracapture = {
element."v4l2src" = {
device = "/dev/videomew";
brightness = 100;
#extra-controls = "c,exposure_auto=3";
};
};
queue_data = {
element.queue = {
leaky = "downstream";
};
};
v4l2src = [
cameracapture
{
caps."image/jpeg" = {
width = 1920;
height = 1080;
framerate = "30/1"; # "10/1"
};
}
];
pipeline = v4l2src ++ [
"jpegtrunc"
queue_data
{ element.matroskamux.streamable = true; }
{
element.tcpclientsink = {
host = meta.network.nodes.nixos.yukari.network.addresses.private.nixos.ipv4.address;
port = "4953";
sync = false;
};
}
];
in
{
services.udev.extraRules = ''
KERNEL=="video[0-9]*", SUBSYSTEM=="video4linux", SUBSYSTEMS=="usb", ATTR{index}=="0", ATTRS{idVendor}=="1c3f", ATTRS{idProduct}=="2002", SYMLINK+="videomew", TAG+="systemd"
'';
systemd.services.kattv = {
wantedBy = [ "dev-videomew.device" "multi-user.target" ];
after = [ "dev-videomew.device" "nginx.service" ];
description = "RTMP stream of kat cam";
bindsTo = [ "dev-videomew.device" ];
environment = env;
script = "exec ${pkgs.gst_all_1.gstreamer}/bin/gst-launch-1.0 -e --no-position ${pkgs.lib.gst.pipelineShellString pipeline}";
serviceConfig = {
Restart = "on-failure";
RestartSec = "10s";
};
};
}

126
services/kattv2-ingest.nix Normal file
View file

@ -0,0 +1,126 @@
{ config, pkgs, lib, ... }:
with lib;
let
env = {
FREI0R_PATH = "${pkgs.frei0r}/lib/frei0r-1";
GST_PLUGIN_SYSTEM_PATH_1_0 = with pkgs.gst_all_1; lib.makeSearchPath "lib/gstreamer-1.0" [
gstreamer.out
gst-plugins-base
gst-plugins-good
gst-plugins-bad
gst-plugins-ugly
];
};
queue_frame = {
element."queue" = {
leaky = "downstream";
flush-on-eos = true;
max-size-buffers = 3;
};
};
queue_data = {
element.queue = {
#leaky = "downstream";
};
};
videoconvert_cpu = {
element.videoconvert = {
n-threads = 4;
dither = 0;
chroma-resampler = 0;
chroma-mode = 3;
};
};
videoconvert_gpu = [
"glupload"
"glcolorconvert"
"gldownload"
];
encodeopts = {
speed-preset = "veryfast";
ref = 1;
tune = "zerolatency";
pass = "qual";
#psy-tune = "film";
#noise-reduction=0;
quantizer = 21;
bitrate = 8192;
rc-lookahead = 6;
};
denoise = {
element.frei0r-filter-hqdn3d = {
spatial = 0.175;
temporal = 0.25;
};
};
encode_high = [
{
element.x264enc = {
key-int-max = 150;
} // encodeopts;
}
{
caps."video/x-h264" = {
profile = "high";
};
}
"h264parse"
];
tcpserversink = [
"flvmux"
queue_data
{
element.tcpserversink = {
port = 8990;
host = config.network.addresses.yggdrasil.nixos.ipv6.address;
};
}
];
pipeline = [
{
element.fdsrc = {
fd = 3;
};
}
"matroskademux"
"jpegdec"
queue_frame
videoconvert_cpu
denoise
videoconvert_cpu
encode_high
tcpserversink
];
in
{
network.firewall = {
private.tcp.ports = singleton 1935;
public.tcp.ports = [ 4954 1935 ];
};
systemd.sockets.kattv2 = {
wantedBy = [ "sockets.target" ];
listenStreams = [ "0.0.0.0:4954" ];
socketConfig = {
Accept = true;
Backlog = 0;
MaxConnections = 1;
};
};
systemd.services."kattv2@" = {
environment = env;
script = "exec ${pkgs.gst_all_1.gstreamer}/bin/gst-launch-1.0 -e --no-position ${pkgs.lib.gst.pipelineShellString pipeline}";
after = [ "nginx.service" ];
description = "RTMP stream of kat cam";
serviceConfig = {
Restart = "on-failure";
RestartSec = "10s";
};
};
}

68
services/kattv2.nix Normal file
View file

@ -0,0 +1,68 @@
{ meta, config, pkgs, lib, ... }:
let
env = {
FREI0R_PATH = "${pkgs.frei0r}/lib/frei0r-1";
GST_PLUGIN_SYSTEM_PATH_1_0 = with pkgs.gst_all_1; lib.makeSearchPath "lib/gstreamer-1.0" [
gstreamer.out
gst-plugins-base
gst-plugins-good
gst-plugins-bad
gst-plugins-ugly
pkgs.gst-jpegtrunc
];
};
cameracapture = {
element."v4l2src" = {
device = "/dev/videomew";
# saturation = 100;
# brightness = 100;
# extra-controls = "c,exposure_auto=3";
};
};
queue_data = {
element.queue = {
leaky = "downstream";
};
};
v4l2src = [
cameracapture
{
caps."image/jpeg" = {
width = 1920;
height = 1080;
framerate = "30/1"; # "10/1"
};
}
];
pipeline = v4l2src ++ [
"jpegtrunc"
queue_data
{ element.matroskamux.streamable = true; }
{
element.tcpclientsink = {
host = meta.network.nodes.nixos.yukari.network.addresses.private.nixos.ipv4.address;
port = "4954";
sync = false;
};
}
];
in
{
services.udev.extraRules = ''
KERNEL=="video[0-9]*", SUBSYSTEM=="video4linux", SUBSYSTEMS=="usb", ATTR{index}=="0", ATTRS{idVendor}=="1c3f", ATTRS{idProduct}=="2002", SYMLINK+="videomew", TAG+="systemd"
'';
systemd.services.kattv = {
wantedBy = [ "dev-videomew.device" "multi-user.target" ];
after = [ "dev-videomew.device" "nginx.service" ];
description = "RTMP stream of kat cam";
bindsTo = [ "dev-videomew.device" ];
environment = env;
script = "exec ${pkgs.gst_all_1.gstreamer}/bin/gst-launch-1.0 -e --no-position ${pkgs.lib.gst.pipelineShellString pipeline}";
serviceConfig = {
Restart = "on-failure";
RestartSec = "10s";
};
};
}

65
services/keycloak.nix Normal file
View file

@ -0,0 +1,65 @@
{ config, pkgs, lib, tf, ... }: with lib; let
keystore-pass = "zZX3eS";
in {
services.keycloak = {
enable = builtins.getEnv "CI_PLATFORM" == "impure";
package = (pkgs.keycloak.override {
jre = pkgs.openjdk11;
});
initialAdminPassword = "mewpymewlymewlies";
database.passwordFile = config.secrets.files.keycloak-postgres-file.path;
settings = {
http-enabled = true;
http-host = "127.0.0.1";
http-port = 8089;
https-port = 8445;
proxy = "edge";
hostname = "auth.kittywit.ch";
http-relative-path = "/auth";
hostname-strict-backchannel = true;
https-key-store-file = "/var/lib/acme/domain-auth/trust-store.jks";
https-key-store-password = keystore-pass;
};
};
network.extraCerts.domain-auth = "auth.${config.network.dns.domain}";
users.groups.domain-auth.members = [ "nginx" "openldap" "keycloak" ];
security.acme.certs.domain-auth = {
group = "domain-auth";
postRun = ''
${pkgs.adoptopenjdk-jre-bin}/bin/keytool -delete -alias auth.kittywit.ch -keypass ${keystore-pass} -storepass ${keystore-pass} -keystore ./trust-store.jks
${pkgs.adoptopenjdk-jre-bin}/bin/keytool -import -alias auth.${config.network.dns.domain} -noprompt -keystore trust-store.jks -keypass ${keystore-pass} -storepass ${keystore-pass} -file cert.pem
chown acme:domain-auth ./trust-store.jks
'';
};
users.groups.keycloak = { };
users.users.keycloak = {
isSystemUser = true;
group = "keycloak";
};
kw.secrets.variables.keycloak-postgres = {
path = "services/keycloak";
field = "postgres";
};
secrets.files.keycloak-postgres-file = {
text = "${tf.variables.keycloak-postgres.ref}";
owner = "postgres";
group = "keycloak";
};
services.nginx.virtualHosts."auth.${config.network.dns.domain}" = {
useACMEHost = "domain-auth";
forceSSL = true;
locations = { "/".proxyPass = "http://127.0.0.1:8089"; };
};
deploy.tf.dns.records.services_keycloak = {
inherit (config.network.dns) zone;
domain = "auth";
cname = { inherit (config.network.addresses.public) target; };
};
}

35
services/knot/default.nix Normal file
View file

@ -0,0 +1,35 @@
{ config, lib, tf, pkgs, ... }:
{
network.dns.enable = false;
kw.secrets.variables = {
katdns-key-config = {
path = "secrets/katdns";
field = "notes";
};
};
network.firewall.public = {
tcp.ports = [ 53 ];
udp.ports = [ 53 ];
};
/* environment.etc."katdns/zones/gensokyo.zone.zone".text = let
dns = pkgs.dns;
in dns.lib.toString "gensokyo.zone" (import ./gensokyo.zone.nix { inherit dns lib; }); */
secrets.files.katdns-keyfile = {
text = "${tf.variables.katdns-key-config.ref}";
owner = "knot";
group = "knot";
};
services.knot = {
enable = true;
extraConfig = builtins.readFile ./knot.yaml;
keyFiles = [
config.secrets.files.katdns-keyfile.path
];
};
}

View file

@ -0,0 +1,20 @@
{ dns, lib }:
with dns.lib.combinators;
{
SOA = {
nameServer = "ns1";
adminEmail = "kat@kittywit.ch";
serial = 2021090100;
ttl = 3600;
};
CAA = map (x: x // { ttl = 3600; }) (letsEncrypt "acme@kittywit.ch");
NS = [
"ns1.kittywit.ch."
"rdns1.benjojo.co.uk."
"rdns2.benjojo.co.uk."
];
}

View file

@ -0,0 +1,20 @@
{ dns, lib }:
with dns.lib.combinators;
{
SOA = {
nameServer = "ns1";
adminEmail = "kat@kittywit.ch";
serial = 2021090100;
ttl = 3600;
};
CAA = map (x: x // { ttl = 3600; }) (letsEncrypt "acme@kittywit.ch");
NS = [
"ns1.kittywit.ch."
"rdns1.benjojo.co.uk."
"rdns2.benjojo.co.uk."
];
}

View file

@ -0,0 +1,20 @@
{ dns, lib }:
with dns.lib.combinators;
{
SOA = {
nameServer = "ns1";
adminEmail = "kat@kittywit.ch";
serial = 2021083001;
ttl = 3600;
};
CAA = map (x: x // { ttl = 3600; }) (letsEncrypt "acme@kittywit.ch");
NS = [
"ns1.kittywit.ch."
"rdns1.benjojo.co.uk."
"rdns2.benjojo.co.uk."
];
}

60
services/knot/knot.yaml Normal file
View file

@ -0,0 +1,60 @@
server:
listen: [ 0.0.0.0@53, ::@53 ]
remote:
- id: benjojo-1
address: [ 185.230.223.84, 2a0c:2f07:4896:666:216:3eff:fedb:c742 ]
- id: benjojo-2
address: 185.236.240.26
- id: benjojo-3
address: 185.230.223.7
acl:
- id: dnsupdate
key: dnsupdate.kittywit.ch.
action: update
- id: benjojo
remote: [ benjojo-1, benjojo-2, benjojo-3 ]
action: transfer
zone:
- domain: kittywit.ch
semantic-checks: on
storage: /var/lib/knot/zones/
file: kittywit.ch.zone
dnssec-signing: on
module: mod-stats
notify: [ benjojo-1, benjojo-2, benjojo-3 ]
zonefile-load: difference
acl: [ benjojo, dnsupdate ]
- domain: dork.dev
semantic-checks: on
storage: /var/lib/knot/zones/
file: dork.dev.zone
dnssec-signing: on
module: mod-stats
notify: [ benjojo-1, benjojo-2, benjojo-3 ]
zonefile-load: difference
acl: [ benjojo, dnsupdate ]
- domain: inskip.me
semantic-checks: on
storage: /var/lib/knot/zones/
file: inskip.me.zone
dnssec-signing: on
module: mod-stats
notify: [ benjojo-1, benjojo-2, benjojo-3 ]
zonefile-load: difference
acl: [ benjojo, dnsupdate ]
- domain: gensokyo.zone
semantic-checks: on
storage: /var/lib/knot/zones/
file: gensokyo.zone.zone
dnssec-signing: on
module: mod-stats
notify: [ benjojo-1, benjojo-2, benjojo-3 ]
zonefile-load: difference
acl: [ benjojo, dnsupdate ]
log:
- target: syslog
any: info

123
services/kubernetes.nix Normal file
View file

@ -0,0 +1,123 @@
{ config, pkgs, lib, ... }:
{
# Set some necessary sysctls
boot.kernel.sysctl = {
"net.ipv6.conf.all.forwarding" = 1;
"net.ipv4.conf.all.forwarding" = 1;
# k8s opens a LOT of files, raise the total number of openable files so we don't end up getting issues in userspace
"fs.inotify.max_user_instances" = 16384;
"vm.max_map_count" = 524288;
"vm.swappiness" = 10;
};
systemd.services.containerd = {
path = with pkgs; [ containerd kmod zfs runc iptables ];
};
virtualisation.containerd.settings = {
plugins."io.containerd.grpc.v1.cri" = {
cni.bin_dir = "/opt/cni/bin";
};
};
# disable creating the CNI directory (calico will make it for us)
environment.etc."cni/net.d".enable = false;
# Firewalling must be disabled for kubes.
networking.firewall.enable = false;
networking.nftables.enable = lib.mkForce false;
# Useful utilities.
environment.systemPackages = [
# kubectl_ppc
pkgs.kubectl pkgs.kubetail
];
# Kubernetes configuration.
services.kubernetes = {
# because fuck PKI honestly
easyCerts = true;
roles = ["master" "node"];
flannel.enable = false;
# where can we contact the (an) apiserver?
apiserverAddress = "https://yukari.int.kittywit.ch:6443";
# where can we contact the orchestrator?
masterAddress = "yukari.int.kittywit.ch";
#Â ipv4 cidr should be before ipv6 otherwise apps that make assumptions break horribly when binding to ipv4 interfaces and then attempting to contact themselves over ipv6
clusterCidr = "172.18.0.0/16,fc00:abc1::/48";
# define dns separately
addons.dns.enable = false;
#Â dns on ipv6 though
#addons.dns.clusterIp = "fc00:abc0::254";
#Â define newer coredns
#addons.dns.coredns = {
# # AMD64 version.
# # TODO upgrade to 1.8 (requires a new configmap)
# #Â (1.7 removes upstream directive, should just be a case of removing that)
# imageName = "coredns/coredns";
# imageDigest = "sha256:2044ffefe18e2dd3d6781e532119603ee4e8622b6ba38884dc7ab53325435151";
# finalImageTag = "1.6.9";
# sha256 = "0j5gj82jbqylapfrab61qdhm4187pqphyz244n31ik05wd5l8n17";
#};
apiserver = {
# address to advertise the apiserver at, must be reachable by the rest of the cluster
advertiseAddress = "192.168.1.154";
#Â privileged pods are required to run cluster services like MetalLB and longhorn
allowPrivileged = true;
# bind to ipv4 & ipv6
bindAddress = "::";
# needed otherwise we end up with a cert that isn't valid for ipv6
extraSANs = [ "172.19.0.1" "fc00:abc0::1" ];
serviceClusterIpRange = "172.19.0.0/16,fc00:abc0::/112";
# allow all ports (this is a really bad idea don't do this with untrusted workloads)
extraOpts = "--service-node-port-range=1-65535";
#extraOpts = "--service-node-port-range=1-65535";
enableAdmissionPlugins = [
"NamespaceLifecycle" "LimitRanger" "ServiceAccount" "TaintNodesByCondition" "Priority" "DefaultTolerationSeconds"
"DefaultStorageClass" "StorageObjectInUseProtection" "PersistentVolumeClaimResize" "RuntimeClass" "CertificateApproval" "CertificateSigning"
"CertificateSubjectRestriction" "DefaultIngressClass" "MutatingAdmissionWebhook" "ValidatingAdmissionWebhook" "ResourceQuota"
];
};
controllerManager = {
# bind to localhost ipv6
bindAddress = "::1";
extraOpts = "--service-cluster-ip-range=172.19.0.0/16,fc00:abc0::/64 --node-cidr-mask-size-ipv4=24 --node-cidr-mask-size-ipv6=64";
};
kubelet = {
featureGates = [ "NodeSwap" ];
clusterDns = "fc00:abc0::254";
networkPlugin = "cni";
cni.configDir = "/etc/cni/net.d";
nodeIp = "192.168.1.154,2a00:23c7:c5ad:6e00::c2e";# "10.0.0.1,2a02:8010:61d0:beef:428d:5cff:fe4e:6a2c";
extraOpts = ''
--root-dir=/var/lib/kubelet \
--fail-swap-on=false \
--cni-bin-dir=/opt/cni/bin \
'';
};
proxy = {
# bind to ipv6
bindAddress = "::";
};
};
systemd.services.kubelet = {
preStart = pkgs.lib.mkForce ''
${lib.concatMapStrings (img: ''
echo "Seeding container image: ${img}"
${if (lib.hasSuffix "gz" img) then
''${pkgs.gzip}/bin/zcat "${img}" | ${pkgs.containerd}/bin/ctr -n k8s.io image import -''
else
''${pkgs.coreutils}/bin/cat "${img}" | ${pkgs.containerd}/bin/ctr -n k8s.io image import -''
}
'') config.services.kubernetes.kubelet.seedDockerImages}
${lib.concatMapStrings (package: ''
echo "Linking cni package: ${package}"
ln -fs ${package}/bin/* /opt/cni/bin
'') config.services.kubernetes.kubelet.cni.packages}
'';
};
}

18
services/logrotate.nix Normal file
View file

@ -0,0 +1,18 @@
{ config, lib, ... }:
with lib;
{
services.logrotate = {
enable = true;
paths = {
nginx = mkIf config.services.nginx.enable {
path = "/var/log/nginx/*.log";
user = "nginx";
group = "nginx";
frequency = "weekly";
keep = 2;
};
};
};
}

View file

@ -0,0 +1,43 @@
{ pkgs, lib, config, ... }:
let
commonHeaders = lib.concatStringsSep "\n" (lib.filter (line: lib.hasPrefix "add_header" line) (lib.splitString "\n" config.services.nginx.commonHttpConfig));
in {
services.nginx.virtualHosts = {
"autoconfig.kittywit.ch" = {
enableACME = true;
forceSSL = true;
serverAliases = [
"autoconfig.dork.dev"
];
locations = {
"= /mail/config-v1.1.xml" = {
root = pkgs.writeTextDir "mail/config-v1.1.xml" ''
<?xml version="1.0" encoding="UTF-8"?>
<clientConfig version="1.1">
<emailProvider id="kittywit.ch">
<domain>kittywit.ch</domain>
<displayName>kittywit.ch Mail</displayName>
<displayShortName>kittywitch</displayShortName>
<incomingServer type="imap">
<hostname>${config.network.addresses.public.domain}</hostname>
<port>993</port>
<socketType>SSL</socketType>
<authentication>password-cleartext</authentication>
<username>%EMAILADDRESS%</username>
</incomingServer>
<outgoingServer type="smtp">
<hostname>${config.network.addresses.public.domain}</hostname>
<port>465</port>
<socketType>SSL</socketType>
<authentication>password-cleartext</authentication>
<username>%EMAILADDRESS%</username>
</outgoingServer>
</emailProvider>
</clientConfig>
'';
};
};
};
};
}

12
services/mail/default.nix Normal file
View file

@ -0,0 +1,12 @@
{ ... }: {
imports = [
./dns.nix
./rspamd.nix
./postfix.nix
./dovecot.nix
./opendkim.nix
./autoconfig.nix
# ./roundcube.nix
./sogo.nix
];
}

51
services/mail/dns.nix Normal file
View file

@ -0,0 +1,51 @@
{ config, pkgs, lib, tf, ... }: with lib; let
domains = [ "dork" "kittywitch" ];
in {
kw.secrets.variables = listToAttrs (map
(domain:
nameValuePair "mail-domainkey-${domain}" {
path = "secrets/mail-${domain}";
field = "notes";
})
domains);
deploy.tf.dns.records = mkMerge (map
(domain:
let
zoneGet = domain: if domain == "dork" then "dork.dev." else config.network.dns.zone;
in
{
"services_mail_${domain}_autoconfig_cname" = {
zone = zoneGet domain;
domain = "autoconfig";
cname = { inherit (config.network.addresses.public) target; };
};
"services_mail_${domain}_mx" = {
zone = zoneGet domain;
mx = {
priority = 10;
inherit (config.network.addresses.public) target;
};
};
"services_mail_${domain}_spf" = {
zone = zoneGet domain;
txt.value = "v=spf1 ip4:${config.network.addresses.public.tf.ipv4.address} ip6:${config.network.addresses.public.tf.ipv6.address} -all";
};
"services_mail_${domain}_dmarc" = {
zone = zoneGet domain;
domain = "_dmarc";
txt.value = "v=DMARC1; p=none";
};
"services_mail_${domain}_domainkey" = {
zone = zoneGet domain;
domain = "mail._domainkey";
txt.value = tf.variables."mail-domainkey-${domain}".ref;
};
})
domains);
}

206
services/mail/dovecot.nix Normal file
View file

@ -0,0 +1,206 @@
{ pkgs, config, lib, tf, ... }: with lib;
let
ldapConfig = pkgs.writeText "dovecot-ldap.conf" ''
uris = ldaps://auth.kittywit.ch:636
dn = cn=dovecot,dc=mail,dc=kittywit,dc=ch
dnpass = "@ldap-password@"
auth_bind = no
ldap_version = 3
base = ou=users,dc=kittywit,dc=ch
user_filter = (&(objectClass=mailAccount)(|(mail=%u)(uid=%u)))
user_attrs = \
quota=quota_rule=*:bytes=%$, \
=home=/var/vmail/%d/%n/, \
=mail=maildir:/var/vmail/%d/%n/Maildir
pass_attrs = mail=user,userPassword=password
pass_filter = (&(objectClass=mailAccount)(mail=%u))
iterate_attrs = =user=%{ldap:mail}
iterate_filter = (objectClass=mailAccount)
scope = subtree
default_pass_scheme = SSHA
'';
ldapConfig-services = pkgs.writeText "dovecot-ldap.conf" ''
uris = ldaps://auth.kittywit.ch:636
dn = cn=dovecot,dc=mail,dc=kittywit,dc=ch
dnpass = "@ldap-password@"
auth_bind = no
ldap_version = 3
base = ou=services,dc=kittywit,dc=ch
user_filter = (&(objectClass=mailAccount)(|(mail=%u)(uid=%u)))
user_attrs = \
quota=quota_rule=*:bytes=%$, \
=home=/var/vmail/%d/%n/, \
=mail=maildir:/var/vmail/%d/%n/Maildir
pass_attrs = mail=user,userPassword=password
pass_filter = (&(objectClass=mailAccount)(mail=%u))
iterate_attrs = =user=%{ldap:mail}
iterate_filter = (objectClass=mailAccount)
scope = subtree
default_pass_scheme = SSHA
'';
in
{
security.acme.certs.dovecot_domains = {
inherit (config.network.dns) domain;
group = "postfix";
dnsProvider = "rfc2136";
credentialsFile = config.secrets.files.dns_creds.path;
postRun = "systemctl restart dovecot2";
extraDomainNames =
[
config.network.dns.domain
"mail.${config.network.dns.domain}"
config.network.addresses.public.domain
"dork.dev"
];
};
services.dovecot2 = {
enable = true;
enableImap = true;
enableLmtp = true;
enablePAM = false;
mailLocation = "maildir:/var/vmail/%d/%n/Maildir";
mailUser = "vmail";
mailGroup = "vmail";
extraConfig = ''
ssl = yes
ssl_cert = </var/lib/acme/dovecot_domains/fullchain.pem
ssl_key = </var/lib/acme/dovecot_domains/key.pem
local_name kittywit.ch {
ssl_cert = </var/lib/acme/dovecot_domains/fullchain.pem
ssl_key = </var/lib/acme/dovecot_domains/key.pem
}
local_name dork.dev {
ssl_cert = </var/lib/acme/dovecot_domains/fullchain.pem
ssl_key = </var/lib/acme/dovecot_domains/key.pem
}
ssl_min_protocol = TLSv1.2
ssl_cipher_list = EECDH+AESGCM:EDH+AESGCM
ssl_prefer_server_ciphers = yes
ssl_dh=<${config.security.dhparams.params.dovecot2.path}
mail_plugins = virtual fts fts_lucene
service lmtp {
user = vmail
unix_listener /var/lib/postfix/queue/private/dovecot-lmtp {
group = postfix
mode = 0600
user = postfix
}
}
service doveadm {
inet_listener {
port = 4170
ssl = yes
}
}
protocol lmtp {
postmaster_address=postmaster@kittywit.ch
hostname=${config.network.addresses.public.domain}
mail_plugins = $mail_plugins sieve
}
service auth {
unix_listener auth-userdb {
mode = 0640
user = vmail
group = vmail
}
# Postfix smtp-auth
unix_listener /var/lib/postfix/queue/private/auth {
mode = 0666
user = postfix
group = postfix
}
}
userdb {
args = /run/dovecot2/ldap.conf
driver = ldap
}
userdb {
args = /run/dovecot2/ldap-services.conf
driver = ldap
}
passdb {
args = /run/dovecot2/ldap.conf
driver = ldap
}
passdb {
args = /run/dovecot2/ldap-services.conf
driver = ldap
}
service imap-login {
client_limit = 1000
service_count = 0
inet_listener imaps {
port = 993
}
}
service managesieve-login {
inet_listener sieve {
port = 4190
}
}
protocol sieve {
managesieve_logout_format = bytes ( in=%i : out=%o )
}
plugin {
sieve_dir = /var/vmail/%d/%n/sieve/scripts/
sieve = /var/vmail/%d/%n/sieve/active-script.sieve
sieve_extensions = +vacation-seconds
sieve_vacation_min_period = 1min
fts = lucene
fts_lucene = whitespace_chars=@.
}
# If you have Dovecot v2.2.8+ you may get a significant performance improvement with fetch-headers:
imapc_features = $imapc_features fetch-headers
# Read multiple mails in parallel, improves performance
mail_prefetch_count = 20
'';
modules = [
pkgs.dovecot_pigeonhole
];
protocols = [
"sieve"
];
};
users.users.vmail = {
home = "/var/vmail";
createHome = true;
isSystemUser = true;
uid = 1042;
shell = "/run/current-system/sw/bin/nologin";
};
security.dhparams = {
enable = true;
params.dovecot2 = { };
};
kw.secrets.variables."dovecot-ldap-password" = {
path = "services/dovecot";
field = "password";
};
secrets.files.dovecot-ldap-password.text = ''
${tf.variables.dovecot-ldap-password.ref}
'';
systemd.services.dovecot2.preStart = ''
sed -e "s!@ldap-password@!$(<${config.secrets.files.dovecot-ldap-password.path})!" ${ldapConfig} > /run/dovecot2/ldap.conf
sed -e "s!@ldap-password@!$(<${config.secrets.files.dovecot-ldap-password.path})!" ${ldapConfig-services} > /run/dovecot2/ldap-services.conf
'';
networking.firewall.allowedTCPPorts = [
143 # imap
993 # imaps
4190 # sieve
];
}

View file

@ -0,0 +1,71 @@
{ config, lib, pkgs, ... }:
with lib;
let
dkimUser = config.services.opendkim.user;
dkimGroup = config.services.opendkim.group;
dkimKeyDirectory = "/var/dkim";
dkimKeyBits = 1024;
dkimSelector = "mail";
domains = [ "kittywit.ch" "dork.dev" ];
createDomainDkimCert = dom:
let
dkim_key = "${dkimKeyDirectory}/${dom}.${dkimSelector}.key";
dkim_txt = "${dkimKeyDirectory}/${dom}.${dkimSelector}.txt";
in
''
if [ ! -f "${dkim_key}" ] || [ ! -f "${dkim_txt}" ]
then
${pkgs.opendkim}/bin/opendkim-genkey -s "${dkimSelector}" \
-d "${dom}" \
--bits="${toString dkimKeyBits}" \
--directory="${dkimKeyDirectory}"
mv "${dkimKeyDirectory}/${dkimSelector}.private" "${dkim_key}"
mv "${dkimKeyDirectory}/${dkimSelector}.txt" "${dkim_txt}"
echo "Generated key for domain ${dom} selector ${dkimSelector}"
fi
'';
createAllCerts = lib.concatStringsSep "\n" (map createDomainDkimCert domains);
keyTable = pkgs.writeText "opendkim-KeyTable"
(lib.concatStringsSep "\n" (lib.flip map domains
(dom: "${dom} ${dom}:${dkimSelector}:${dkimKeyDirectory}/${dom}.${dkimSelector}.key")));
signingTable = pkgs.writeText "opendkim-SigningTable"
(lib.concatStringsSep "\n" (lib.flip map domains (dom: "${dom} ${dom}")));
dkim = config.services.opendkim;
args = [ "-f" "-l" ] ++ lib.optionals (dkim.configFile != null) [ "-x" dkim.configFile ];
in
{
config = {
services.opendkim = {
enable = true;
selector = dkimSelector;
keyPath = dkimKeyDirectory;
domains = "csl:${builtins.concatStringsSep "," domains}";
configFile = pkgs.writeText "opendkim.conf" (''
Canonicalization relaxed/simple
UMask 0002
Socket ${dkim.socket}
KeyTable file:${keyTable}
SigningTable file:${signingTable}
'');
};
users.users = optionalAttrs (config.services.postfix.user == "postfix") {
postfix.extraGroups = [ "${dkimGroup}" ];
};
systemd.services.opendkim = {
preStart = lib.mkForce createAllCerts;
serviceConfig = {
ExecStart = lib.mkForce "${pkgs.opendkim}/bin/opendkim ${escapeShellArgs args}";
PermissionsStartOnly = lib.mkForce false;
};
};
systemd.tmpfiles.rules = [
"d '${dkimKeyDirectory}' - ${dkimUser} ${dkimGroup} - -"
];
};
}

220
services/mail/postfix.nix Normal file
View file

@ -0,0 +1,220 @@
{ pkgs, lib, config, tf, ... }:
let
publicCert = "public_${config.networking.hostName}";
ldaps = "ldaps://auth.${config.network.dns.domain}:636";
virtualRegex = pkgs.writeText "virtual-regex" ''
/^kat\.[^@.]+@kittywit\.ch$/ kat@kittywit.ch
/^kat\.[^@.]+@dork\.dev$/ kat@kittywit.ch
/^arc\.[^@.]+@kittywit\.ch$/ arc@kittywit.ch
/^arc\.[^@.]+@dork\.dev$/ arc@kittywit.ch
'';
helo_access = pkgs.writeText "helo_access" ''
${if tf.state.enable then config.network.addresses.public.nixos.ipv4.selfaddress else ""} REJECT Get lost - you're lying about who you are
${if tf.state.enable then config.network.addresses.public.nixos.ipv6.selfaddress else ""} REJECT Get lost - you're lying about who you are
kittywit.ch REJECT Get lost - you're lying about who you are
dork.dev REJECT Get lost - you're lying about who you are
'';
in {
kw.secrets.variables."postfix-ldap-password" = {
path = "services/dovecot";
field = "password";
};
secrets.files = {
domains-ldap = {
text = ''
server_host = ${ldaps}
search_base = dc=domains,dc=mail,dc=kittywit,dc=ch
query_filter = (&(dc=%s)(objectClass=mailDomain))
result_attribute = postfixTransport
bind = yes
version = 3
bind_dn = cn=dovecot,dc=mail,dc=kittywit,dc=ch
bind_pw = ${tf.variables.postfix-ldap-password.ref}
scope = one
'';
owner = "postfix";
group = "postfix";
};
accountsmap-ldap = {
text = ''
server_host = ${ldaps}
search_base = ou=users,dc=kittywit,dc=ch
query_filter = (&(objectClass=mailAccount)(|(uid=%s)(mail=%s)))
result_attribute = mail
version = 3
bind = yes
bind_dn = cn=dovecot,dc=mail,dc=kittywit,dc=ch
bind_pw = ${tf.variables.postfix-ldap-password.ref}
'';
owner = "postfix";
group = "postfix";
};
accountsmap-services-ldap = {
text = ''
server_host = ${ldaps}
search_base = ou=services,dc=kittywit,dc=ch
query_filter = (&(objectClass=mailAccount)(|(uid=%s)(mail=%s)))
result_attribute = mail
version = 3
bind = yes
bind_dn = cn=dovecot,dc=mail,dc=kittywit,dc=ch
bind_pw = ${tf.variables.postfix-ldap-password.ref}
'';
owner = "postfix";
group = "postfix";
};
aliases-ldap = {
text = ''
server_host = ${ldaps}
search_base = dc=aliases,dc=mail,dc=kittywit,dc=ch
query_filter = (&(objectClass=mailAlias)(mail=%s))
result_attribute = maildrop
version = 3
bind = yes
bind_dn = cn=dovecot,dc=mail,dc=kittywit,dc=ch
bind_pw = ${tf.variables.postfix-ldap-password.ref}
'';
owner = "postfix";
group = "postfix";
};
};
services.postfix = {
enable = true;
enableSubmission = true;
hostname = config.network.addresses.public.domain;
domain = config.network.dns.domain;
masterConfig."465" = {
type = "inet";
private = false;
command = "smtpd";
args = [
"-o smtpd_client_restrictions=permit_sasl_authenticated,reject"
"-o syslog_name=postfix/smtps"
"-o smtpd_tls_wrappermode=yes"
"-o smtpd_sasl_auth_enable=yes"
"-o smtpd_tls_security_level=none"
"-o smtpd_reject_unlisted_recipient=no"
"-o smtpd_recipient_restrictions="
"-o smtpd_relay_restrictions=permit_sasl_authenticated,reject"
"-o milter_macro_daemon_name=ORIGINATING"
];
};
mapFiles."virtual-regex" = virtualRegex;
mapFiles."helo_access" = helo_access;
extraConfig = ''
smtp_bind_address = ${if tf.state.enable then tf.resources.${config.networking.hostName}.getAttr "private_ip" else ""}
smtp_bind_address6 = ${if tf.state.enable then config.network.addresses.public.nixos.ipv6.selfaddress else ""}
mailbox_transport = lmtp:unix:private/dovecot-lmtp
masquerade_domains = ldap:${config.secrets.files.domains-ldap.path}
virtual_mailbox_domains = ldap:${config.secrets.files.domains-ldap.path}
virtual_alias_maps = ldap:${config.secrets.files.accountsmap-ldap.path},ldap:${config.secrets.files.accountsmap-services-ldap.path},ldap:${config.secrets.files.aliases-ldap.path},regexp:/var/lib/postfix/conf/virtual-regex
virtual_transport = lmtp:unix:private/dovecot-lmtp
smtpd_milters = unix:/run/opendkim/opendkim.sock,unix:/run/rspamd/rspamd-milter.sock
non_smtpd_milters = unix:/run/opendkim/opendkim.sock
milter_protocol = 6
milter_default_action = accept
milter_mail_macros = i {mail_addr} {client_addr} {client_name} {auth_type} {auth_authen} {auth_author} {mail_addr} {mail_host} {mail_mailer}
# bigger attachement size
mailbox_size_limit = 202400000
message_size_limit = 51200000
smtpd_helo_required = yes
smtpd_delay_reject = yes
strict_rfc821_envelopes = yes
# send Limit
smtpd_error_sleep_time = 1s
smtpd_soft_error_limit = 10
smtpd_hard_error_limit = 20
smtpd_use_tls = yes
smtp_tls_note_starttls_offer = yes
smtpd_tls_security_level = may
smtpd_tls_auth_only = yes
smtpd_tls_cert_file = /var/lib/acme/${publicCert}/full.pem
smtpd_tls_key_file = /var/lib/acme/${publicCert}/key.pem
smtpd_tls_CAfile = /var/lib/acme/${publicCert}/fullchain.pem
smtpd_tls_dh512_param_file = ${config.security.dhparams.params.postfix512.path}
smtpd_tls_dh1024_param_file = ${config.security.dhparams.params.postfix2048.path}
smtpd_tls_session_cache_database = btree:''${data_directory}/smtpd_scache
smtpd_tls_mandatory_protocols = !SSLv2,!SSLv3,!TLSv1,!TLSv1.1
smtpd_tls_protocols = !SSLv2,!SSLv3,!TLSv1,!TLSv1.1
smtpd_tls_mandatory_ciphers = medium
tls_medium_cipherlist = AES128+EECDH:AES128+EDH
# authentication
smtpd_sasl_auth_enable = yes
smtpd_sasl_local_domain = $mydomain
smtpd_sasl_security_options = noanonymous
smtpd_sasl_tls_security_options = $smtpd_sasl_security_options
smtpd_sasl_type = dovecot
smtpd_sasl_path = /var/lib/postfix/queue/private/auth
smtpd_relay_restrictions = permit_mynetworks,
permit_sasl_authenticated,
defer_unauth_destination
smtpd_client_restrictions = permit_mynetworks,
permit_sasl_authenticated,
reject_invalid_hostname,
reject_unknown_client,
permit
smtpd_helo_restrictions = permit_mynetworks,
permit_sasl_authenticated,
reject_unauth_pipelining,
reject_non_fqdn_hostname,
reject_invalid_hostname,
warn_if_reject reject_unknown_hostname,
permit
smtpd_recipient_restrictions = permit_mynetworks,
permit_sasl_authenticated,
reject_non_fqdn_sender,
reject_non_fqdn_recipient,
reject_non_fqdn_hostname,
reject_invalid_hostname,
reject_unknown_sender_domain,
reject_unknown_recipient_domain,
reject_unknown_client_hostname,
reject_unauth_pipelining,
reject_unknown_client,
permit
smtpd_sender_restrictions = permit_mynetworks,
permit_sasl_authenticated,
reject_non_fqdn_sender,
reject_unknown_sender_domain,
reject_unknown_client_hostname,
reject_unknown_address
smtpd_etrn_restrictions = permit_mynetworks, reject
smtpd_data_restrictions = reject_unauth_pipelining, reject_multi_recipient_bounce, permit
'';
};
systemd.services.postfix.wants = [ "openldap.service" "acme-${publicCert}.service" ];
systemd.services.postfix.after = [ "openldap.service" "acme-${publicCert}.service" "network.target" ];
security.dhparams = {
enable = true;
params.postfix512.bits = 512;
params.postfix2048.bits = 1024;
};
networking.firewall.allowedTCPPorts = [
25 # smtp
465 # stmps
587 # submission
];
}

View file

@ -0,0 +1,25 @@
{ config, lib, ... }: with lib; {
services.roundcube = {
enable = true;
hostName = "mail.${config.network.dns.domain}";
extraConfig = ''
$config['default_host'] = "ssl://${config.network.addresses.public.domain}";
$config['smtp_server'] = "ssl://${config.network.addresses.public.domain}";
$config['smtp_port'] = "465";
$config['product_name'] = "kittywitch mail";
'';
};
services.nginx.virtualHosts."mail.${config.network.dns.domain}" = {
useACMEHost = "dovecot_domains";
enableACME = mkForce false;
};
users.users.nginx.extraGroups = singleton "postfix";
deploy.tf.dns.records.services_roundcube = {
inherit (config.network.dns) zone;
domain = "mail";
cname = { inherit (config.network.addresses.public) target; };
};
}

85
services/mail/rspamd.nix Normal file
View file

@ -0,0 +1,85 @@
{ config, pkgs, lib, ... }:
let
postfixCfg = config.services.postfix;
rspamdCfg = config.services.rspamd;
rspamdSocket = "rspamd.service";
in
{
config = {
services.rspamd = {
enable = true;
locals = {
"milter_headers.conf" = { text = ''
extended_spam_headers = yes;
''; };
"redis.conf" = { text = ''
servers = "127.0.0.1:${toString config.services.redis.servers.rspamd.port}";
''; };
"classifier-bayes.conf" = { text = ''
cache {
backend = "redis";
}
''; };
"dkim_signing.conf" = { text = ''
# Disable outbound email signing, we use opendkim for this
enabled = false;
''; };
};
overrides = {
"milter_headers.conf" = {
text = ''
extended_spam_headers = true;
'';
};
};
workers.rspamd_proxy = {
type = "rspamd_proxy";
bindSockets = [{
socket = "/run/rspamd/rspamd-milter.sock";
mode = "0664";
}];
count = 1; # Do not spawn too many processes of this type
extraConfig = ''
milter = yes; # Enable milter mode
timeout = 120s; # Needed for Milter usually
upstream "local" {
default = yes; # Self-scan upstreams are always default
self_scan = yes; # Enable self-scan
}
'';
};
workers.controller = {
type = "controller";
count = 1;
bindSockets = [{
socket = "/run/rspamd/worker-controller.sock";
mode = "0666";
}];
includes = [];
extraConfig = ''
static_dir = "''${WWWDIR}"; # Serve the web UI static assets
'';
};
};
services.redis.servers.rspamd.enable = true;
systemd.services.rspamd = {
requires = [ "redis.service" ];
after = [ "redis.service" ];
};
systemd.services.postfix = {
after = [ rspamdSocket ];
requires = [ rspamdSocket ];
};
users.extraUsers.${postfixCfg.user}.extraGroups = [ rspamdCfg.group ];
};
}

82
services/mail/sogo.nix Normal file
View file

@ -0,0 +1,82 @@
{ config, tf, lib, ... }: with lib; {
kw.secrets.variables.sogo-ldap = {
path = "secrets/sogo";
field = "password";
};
secrets.files.sogo-ldap = {
text = ''
${tf.variables.sogo-ldap.ref}
'';
owner = "sogo";
group = "sogo";
};
services.nginx.virtualHosts."mail.${config.network.dns.domain}" = {
useACMEHost = "dovecot_domains";
enableACME = mkForce false;
forceSSL = true;
};
users.users.nginx.extraGroups = singleton "postfix";
deploy.tf.dns.records.services_sogo = {
inherit (config.network.dns) zone;
domain = "mail";
cname = { inherit (config.network.addresses.public) target; };
};
services.postgresql = {
enable = true;
ensureDatabases = [ "sogo" ];
ensureUsers = [{
name = "sogo";
ensurePermissions."DATABASE sogo" = "ALL PRIVILEGES";
}];
};
services.memcached = {
enable = true;
};
services.sogo = {
enable = true;
timezone = "Europe/London";
vhostName = "mail.${config.network.dns.domain}";
extraConfig = ''
SOGoMailDomain = "kittywit.ch";
SOGoPageTitle = "kittywitch";
SOGoProfileURL =
"postgresql://sogo@/sogo/sogo_user_profile";
OCSFolderInfoURL =
"postgresql://sogo@/sogo/sogo_folder_info";
OCSSessionsFolderURL =
"postgresql://sogo@/sogo/sogo_sessions_folder";
SOGoMailingMechanism = "smtp";
SOGoForceExternalLoginWithEmail = YES;
SOGoSMTPAuthenticationType = PLAIN;
SOGoSMTPServer = "smtps://${config.network.addresses.public.domain}:465";
SOGoIMAPServer = "imaps://${config.network.addresses.public.domain}:993";
SOGoUserSources = (
{
type = ldap;
CNFieldName = cn;
IDFieldName = uid;
UIDFieldName = uid;
baseDN = "ou=users,dc=kittywit,dc=ch";
bindDN = "cn=sogo,ou=services,dc=kittywit,dc=ch";
bindFields = (uid,mail);
bindPassword = "LDAP_BINDPW";
canAuthenticate = YES;
displayName = "kittywitch Org";
hostname = "ldaps://auth.kittywit.ch:636";
id = public;
isAddressBook = YES;
}
);
'';
configReplaces = {
LDAP_BINDPW = config.secrets.files.sogo-ldap.path;
};
};
}

View file

@ -0,0 +1,888 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Benjamin Jemlich <pcgod@user.sourceforge.net>
# Copyright (C) 2011 Nathaniel Kofalt <nkofalt@users.sourceforge.net>
# Copyright (C) 2013 Stefan Hacker <dd0t@users.sourceforge.net>
# Copyright (C) 2014 Dominik George <nik@naturalnet.de>
# Copyright (C) 2020 Andreas Valder <a.valder@syseleven.de>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the Mumble Developers nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# `AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This script will let you authenticate Murmur against an LDAP tree.
# Note that you should have a reasonable understanding of LDAP before trying to use this script.
#
# Unfortunately, LDAP is a rather complex concept / protocol / software suite.
# So if you're not already experienced with LDAP, the Mumble team may be unable to assist you.
# Unless you already have an existing LDAP tree, you may want to authenticate your users another way.
# However, LDAP has the advantage of being extremely scalable, flexible, and resilient.
# This is probably a decent choice for larger-scale deployments (code review this script first!)
#
# There are some excellent resources to get you started:
# Wikipedia article: http://en.wikipedia.org/wiki/LDAP
# OpenLDAP intro: http://www.openldap.org/doc/admin24/intro.html
# LDAP on Debian: http://techpubs.spinlocksolutions.com/dklar/ldap.html
# IRC Chat room: Channel #ldap on irc.freenode.net
#
# Configuring this to hit LDAP correctly can be a little tricky.
# This is largely due to the numerous ways you can store user information in LDAP.
# The example configuration is probably not the best way to do things; it's just a simple setup.
#
# The group-membership code will have to be expanded if you want multiple groups allowed, etc.
# This is just a simple example.
#
# In this configuration, I use a really simple groupOfUniqueNames and OU of inetOrgPersons.
# The tree already uses the "uid" attribute for usernames, so roomNumber was used to store UID.
# Note that mumble needs a username, password, and unique UID for each user.
# You can definitely set things up differently; this is a bit of a kludge.
#
# Here is the tree layout used in the example config:
# dc=example,dc=com (organization)
# ou=Groups (organizationalUnit)
# cn=mumble (groupOfUniqueNames)
# "uniqueMember: uid=user1,dc=example,dc=com"
# "uniqueMember: uid=user2,dc=example,dc=com"
# ou=Users (organizationalUnit)
# uid=user1 (inetOrgPerson)
# "userPassword: {SHA}password-hash"
# "displayName: User One"
# "roomNumber: 1"
# uid=user2 (inetOrgPerson)
# "userPassword: {SHA}password-hash"
# "displayName: User Two"
# "roomNumber: 2"
# uid=user3 (inetOrgPerson)
# "userPassword: {SHA}password-hash"
# "displayName: User Three"
# "roomNumber: 3"
#
# How the script operates:
# First, the script will attempt to "bind" with the user's credentials.
# If the bind fails, the username/password combination is rejected.
# Second, it optionally checks for a group membership.
# With groups off, all three users are let in; with groups on, only user1 & user2 are allowed.
# Finally, it optionally logs in the user with a separate "display_attr" name.
# This allows user1 to log in with the USERNAME "user1" but is displayed in mumble as "User One".
#
# If you use the bind_dn option, the script will bind with the specified DN
# and check for the existence of user and (optionally) the group membership
# before it binds with the username/password. This allows you to use a server
# which only allows authentication by end users without any search
# permissions. It also allows you to set the reject_on_miss option to false
# and let login IDs not found in LDAP fall-through to an alternate
# authentication scheme.
#
# Requirements:
# * python >=3.8 (maybe 3.6 is enough but it wasn't tested) and the following python modules:
# * ice-python
# * python-ldap
# * daemon (when run as a daemon)
# If you are using Ubuntu/Debian (only Ubuntu 20.04 was tested) the following packages provide these:
# * python3
# * python3-zeroc-ice
# * python3-ldap
# * python3-daemon
# * zeroc-ice-slice
import sys
import ldap
import Ice
import _thread
import urllib.request, urllib.error, urllib.parse
import logging
import configparser
from threading import Timer
from optparse import OptionParser
from logging import (debug,
info,
warning,
error,
critical,
exception,
getLogger)
def x2bool(s):
"""Helper function to convert strings from the config to bool"""
if isinstance(s, bool):
return s
elif isinstance(s, str):
return s.lower() in ['1', 'true']
raise ValueError()
#
#--- Default configuration values
#
cfgfile = 'LDAPauth.ini'
default = { 'ldap':(('ldap_uri', str, 'ldap://127.0.0.1'),
('bind_dn', str, ''),
('bind_pass', str, ''),
('users_dn', str, 'ou=Users,dc=example,dc=org'),
('discover_dn', x2bool, True),
('username_attr', str, 'uid'),
('number_attr', str, 'RoomNumber'),
('display_attr', str, 'displayName'),
('group_dn', str, ''),
('group_attr', str, ''),
('provide_info', x2bool, False),
('mail_attr', str, 'mail'),
('provide_users', x2bool, False),
('use_start_tls', x2bool, False)),
'user':(('id_offset', int, 1000000000),
('reject_on_error', x2bool, True),
('reject_on_miss', x2bool, True)),
'ice':(('host', str, '127.0.0.1'),
('port', int, 6502),
('slice', str, 'Murmur.ice'),
('secret', str, ''),
('watchdog', int, 30)),
'iceraw':None,
'murmur':(('servers', lambda x:list(map(int, x.split(','))), []),),
'glacier':(('enabled', x2bool, False),
('user', str, 'ldapauth'),
('password', str, 'secret'),
('host', str, 'localhost'),
('port', int, '4063')),
'log':(('level', int, logging.DEBUG),
('file', str, 'LDAPauth.log'))}
#
#--- Helper classes
#
class config(object):
"""
Small abstraction for config loading
"""
def __init__(self, filename = None, default = None):
if not filename or not default: return
cfg = configparser.ConfigParser()
cfg.optionxform = str
cfg.read(filename)
for h,v in default.items():
if not v:
# Output this whole section as a list of raw key/value tuples
try:
self.__dict__[h] = cfg.items(h)
except configparser.NoSectionError:
self.__dict__[h] = []
else:
self.__dict__[h] = config()
for name, conv, vdefault in v:
try:
self.__dict__[h].__dict__[name] = conv(cfg.get(h, name))
except (ValueError, configparser.NoSectionError, configparser.NoOptionError):
self.__dict__[h].__dict__[name] = vdefault
def do_main_program():
#
#--- Authenticator implementation
# All of this has to go in here so we can correctly daemonize the tool
# without loosing the file descriptors opened by the Ice module
slicedir = Ice.getSliceDir()
if not slicedir:
slicedir = ["-I/usr/share/Ice/slice", "-I/usr/share/slice"]
else:
slicedir = ['-I' + slicedir]
Ice.loadSlice('', slicedir + [cfg.ice.slice])
import Murmur
class LDAPAuthenticatorApp(Ice.Application):
def run(self, args):
self.shutdownOnInterrupt()
if not self.initializeIceConnection():
return 1
if cfg.ice.watchdog > 0:
self.failedWatch = True
self.checkConnection()
# Serve till we are stopped
self.communicator().waitForShutdown()
self.watchdog.cancel()
if self.interrupted():
warning('Caught interrupt, shutting down')
return 0
def initializeIceConnection(self):
"""
Establishes the two-way Ice connection and adds the authenticator to the
configured servers
"""
ice = self.communicator()
if cfg.ice.secret:
debug('Using shared ice secret')
ice.getImplicitContext().put("secret", cfg.ice.secret)
elif not cfg.glacier.enabled:
warning('Consider using an ice secret to improve security')
if cfg.glacier.enabled:
#info('Connecting to Glacier2 server (%s:%d)', glacier_host, glacier_port)
error('Glacier support not implemented yet')
#TODO: Implement this
info('Connecting to Ice server (%s:%d)', cfg.ice.host, cfg.ice.port)
base = ice.stringToProxy('Meta:tcp -h %s -p %d' % (cfg.ice.host, cfg.ice.port))
self.meta = Murmur.MetaPrx.uncheckedCast(base)
adapter = ice.createObjectAdapterWithEndpoints('Callback.Client', 'tcp -h %s' % cfg.ice.host)
adapter.activate()
metacbprx = adapter.addWithUUID(metaCallback(self))
self.metacb = Murmur.MetaCallbackPrx.uncheckedCast(metacbprx)
authprx = adapter.addWithUUID(LDAPAuthenticator())
self.auth = Murmur.ServerUpdatingAuthenticatorPrx.uncheckedCast(authprx)
return self.attachCallbacks()
def attachCallbacks(self, quiet = False):
"""
Attaches all callbacks for meta and authenticators
"""
# Ice.ConnectionRefusedException
#debug('Attaching callbacks')
try:
if not quiet: info('Attaching meta callback')
self.meta.addCallback(self.metacb)
for server in self.meta.getBootedServers():
if not cfg.murmur.servers or server.id() in cfg.murmur.servers:
if not quiet: info('Setting authenticator for virtual server %d', server.id())
server.setAuthenticator(self.auth)
except (Murmur.InvalidSecretException, Ice.UnknownUserException, Ice.ConnectionRefusedException) as e:
if isinstance(e, Ice.ConnectionRefusedException):
error('Server refused connection')
elif isinstance(e, Murmur.InvalidSecretException) or \
isinstance(e, Ice.UnknownUserException) and (e.unknown == 'Murmur::InvalidSecretException'):
error('Invalid ice secret')
else:
# We do not actually want to handle this one, re-raise it
raise e
self.connected = False
return False
self.connected = True
return True
def checkConnection(self):
"""
Tries reapplies all callbacks to make sure the authenticator
survives server restarts and disconnects.
"""
#debug('Watchdog run')
try:
if not self.attachCallbacks(quiet = not self.failedWatch):
self.failedWatch = True
else:
self.failedWatch = False
except Ice.Exception as e:
error('Failed connection check, will retry in next watchdog run (%ds)', cfg.ice.watchdog)
debug(str(e))
self.failedWatch = True
# Renew the timer
self.watchdog = Timer(cfg.ice.watchdog, self.checkConnection)
self.watchdog.start()
def checkSecret(func):
"""
Decorator that checks whether the server transmitted the right secret
if a secret is supposed to be used.
"""
if not cfg.ice.secret:
return func
def newfunc(*args, **kws):
if 'current' in kws:
current = kws["current"]
else:
current = args[-1]
if not current or 'secret' not in current.ctx or current.ctx['secret'] != cfg.ice.secret:
error('Server transmitted invalid secret. Possible injection attempt.')
raise Murmur.InvalidSecretException()
return func(*args, **kws)
return newfunc
def fortifyIceFu(retval = None, exceptions = (Ice.Exception,)):
"""
Decorator that catches exceptions,logs them and returns a safe retval
value. This helps preventing the authenticator getting stuck in
critical code paths. Only exceptions that are instances of classes
given in the exceptions list are not caught.
The default is to catch all non-Ice exceptions.
"""
def newdec(func):
def newfunc(*args, **kws):
try:
return func(*args, **kws)
except Exception as e:
catch = True
for ex in exceptions:
if isinstance(e, ex):
catch = False
break
if catch:
critical('Unexpected exception caught')
exception(e)
return retval
raise
return newfunc
return newdec
class metaCallback(Murmur.MetaCallback):
def __init__(self, app):
Murmur.MetaCallback.__init__(self)
self.app = app
@fortifyIceFu()
@checkSecret
def started(self, server, current = None):
"""
This function is called when a virtual server is started
and makes sure an authenticator gets attached if needed.
"""
if not cfg.murmur.servers or server.id() in cfg.murmur.servers:
info('Setting authenticator for virtual server %d', server.id())
try:
server.setAuthenticator(app.auth)
# Apparently this server was restarted without us noticing
except (Murmur.InvalidSecretException, Ice.UnknownUserException) as e:
if hasattr(e, "unknown") and e.unknown != "Murmur::InvalidSecretException":
# Special handling for Murmur 1.2.2 servers with invalid slice files
raise e
error('Invalid ice secret')
return
else:
debug('Virtual server %d got started', server.id())
@fortifyIceFu()
@checkSecret
def stopped(self, server, current = None):
"""
This function is called when a virtual server is stopped
"""
if self.app.connected:
# Only try to output the server id if we think we are still connected to prevent
# flooding of our thread pool
try:
if not cfg.murmur.servers or server.id() in cfg.murmur.servers:
info('Authenticated virtual server %d got stopped', server.id())
else:
debug('Virtual server %d got stopped', server.id())
return
except Ice.ConnectionRefusedException:
self.app.connected = False
debug('Server shutdown stopped a virtual server')
if cfg.user.reject_on_error: # Python 2.4 compat
authenticateFortifyResult = (-1, None, None)
else:
authenticateFortifyResult = (-2, None, None)
class LDAPAuthenticator(Murmur.ServerUpdatingAuthenticator):
def __init__(self):
Murmur.ServerUpdatingAuthenticator.__init__(self)
self.name_uid_cache = dict()
@fortifyIceFu(authenticateFortifyResult)
@checkSecret
def authenticate(self, name, pw, certlist, certhash, strong, current = None):
"""
This function is called to authenticate a user
"""
# Search for the user in the database
FALL_THROUGH = -2
AUTH_REFUSED = -1
# SuperUser is a special login.
if name == 'SuperUser':
debug('Forced fall through for SuperUser')
return (FALL_THROUGH, None, None)
# Otherwise, let's check the LDAP server.
uid = None
if cfg.ldap.use_start_tls:
# try StartTLS: global options
debug('use_start_tls is set, setting global option TLS_REQCERT = never')
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
ldap_trace = 0 # Change to 1 for more verbose trace
ldap_conn = ldap.initialize(cfg.ldap.ldap_uri, ldap_trace)
if cfg.ldap.use_start_tls:
# try StartTLS: connection specific options
debug('use_start_tls is set, setting connection options X_TLS_*')
ldap_conn.set_option(ldap.OPT_PROTOCOL_VERSION, 3)
ldap_conn.set_option(ldap.OPT_X_TLS, ldap.OPT_X_TLS_DEMAND)
ldap_conn.set_option(ldap.OPT_X_TLS_DEMAND, True)
try:
ldap_conn.start_tls_s()
except Exception as e:
warning('could not initiate StartTLS, e = ' + str(e))
return (AUTH_REFUSED, None, None)
if cfg.ldap.bind_dn:
# Bind the functional account to search the directory.
bind_dn = cfg.ldap.bind_dn
bind_pass = cfg.ldap.bind_pass
try:
debug('try to connect to ldap (bind_dn will be used)')
ldap_conn.bind_s(bind_dn, bind_pass)
except ldap.INVALID_CREDENTIALS:
ldap_conn.unbind()
warning('Invalid credentials for bind_dn=' + bind_dn)
return (AUTH_REFUSED, None, None)
elif cfg.ldap.discover_dn:
# Use anonymous bind to discover the DN
try:
ldap_conn.bind_s()
except ldap.INVALID_CREDENTIALS:
ldap_conn.unbind()
warning('Failed anomymous bind for discovering DN')
return (AUTH_REFUSED, None, None)
else:
# Prevent anonymous authentication.
if not pw:
warning("No password supplied for user " + name)
return (AUTH_REFUSED, None, None)
# Bind the user account to search the directory.
bind_dn = "%s=%s,%s" % (cfg.ldap.username_attr, name, cfg.ldap.users_dn)
bind_pass = pw
try:
ldap_conn.bind_s(bind_dn, bind_pass)
except ldap.INVALID_CREDENTIALS:
ldap_conn.unbind()
warning('User ' + name + ' failed with invalid credentials')
return (AUTH_REFUSED, None, None)
# Search for the user.
name_split = name.split(".")
username_to_try = name_split[0] if "." in name else name
device = name_split[1] if "." in name else ""
res = ldap_conn.search_s(cfg.ldap.users_dn, ldap.SCOPE_SUBTREE, '(%s=%s)' % (cfg.ldap.username_attr, username_to_try), [cfg.ldap.number_attr, cfg.ldap.display_attr])
if len(res) == 0:
warning("User " + username_to_try + " not found, input was " + name)
if cfg.user.reject_on_miss:
return (AUTH_REFUSED, None, None)
else:
return (FALL_THROUGH, None, None)
match = res[0] #Only interested in the first result, as there should only be one match
# Parse the user information.
uid = int(match[1][cfg.ldap.number_attr][0])
displayName = match[1][cfg.ldap.display_attr][0].decode()
user_dn = match[0]
debug('User match found, display "' + displayName + '" with UID ' + repr(uid))
# Optionally check groups.
if cfg.ldap.group_dn != "" :
debug('Checking group membership for ' + name)
#Search for user in group
res = ldap_conn.search_s(cfg.ldap.group_dn, ldap.SCOPE_SUBTREE, '(%s=%s)' % (cfg.ldap.group_attr, user_dn), [cfg.ldap.number_attr, cfg.ldap.display_attr])
# Check if the user is a member of the group
if len(res) < 1:
debug('User ' + name + ' failed with no group membership')
return (AUTH_REFUSED, None, None)
# Second bind to test user credentials if using bind_dn or discover_dn.
if cfg.ldap.bind_dn or cfg.ldap.discover_dn:
# Prevent anonymous authentication.
if not pw:
warning("No password supplied for user " + name)
return (AUTH_REFUSED, None, None)
bind_dn = user_dn
bind_pass = pw
try:
ldap_conn.bind_s(bind_dn, bind_pass)
except ldap.INVALID_CREDENTIALS:
ldap_conn.unbind()
warning('User ' + name + ' failed with wrong password')
return (AUTH_REFUSED, None, None)
# Unbind and close connection.
ldap_conn.unbind()
# If we get here, the login is correct.
# Add the user/id combo to cache, then accept:
self.name_uid_cache[displayName] = uid
debug("Login accepted for " + name)
if device != "":
displayName = f"{displayName} ({device})"
return (uid + cfg.user.id_offset, displayName, [])
@fortifyIceFu((False, None))
@checkSecret
def getInfo(self, id, current = None):
"""
Gets called to fetch user specific information
"""
if not cfg.ldap.provide_info:
# We do not expose any additional information so always fall through
debug('getInfo for %d -> denied', id)
return (False, None)
ldap_conn = ldap.initialize(cfg.ldap.ldap_uri, 0)
# Bind if configured, else do explicit anonymous bind
if cfg.ldap.bind_dn and cfg.ldap.bind_pass:
ldap_conn.simple_bind_s(cfg.ldap.bind_dn, cfg.ldap.bind_pass)
else:
ldap_conn.simple_bind_s()
name = self.idToName(id, current)
res = ldap_conn.search_s(cfg.ldap.users_dn,
ldap.SCOPE_SUBTREE,
'(%s=%s)' % (cfg.ldap.display_attr, name),
[cfg.ldap.display_attr,
cfg.ldap.mail_attr
])
#If user found, return info
if len(res) == 1:
info = {}
if cfg.ldap.mail_attr in res[0][1]:
info[Murmur.UserInfo.UserEmail] = res[0][1][cfg.ldap.mail_attr][0].decode()
debug('getInfo %s -> %s', name, repr(info))
return (True, info)
else:
debug('getInfo %s -> ?', name)
return (False, None)
@fortifyIceFu(-2)
@checkSecret
def nameToId(self, name, current = None):
"""
Gets called to get the id for a given username
"""
FALL_THROUGH = -2
if name == 'SuperUser':
debug('nameToId SuperUser -> forced fall through')
return FALL_THROUGH
if name in self.name_uid_cache:
uid = self.name_uid_cache[name] + cfg.user.id_offset
debug("nameToId %s (cache) -> %d", name, uid)
return uid
ldap_conn = ldap.initialize(cfg.ldap.ldap_uri, 0)
# Bind if configured, else do explicit anonymous bind
if cfg.ldap.bind_dn and cfg.ldap.bind_pass:
ldap_conn.simple_bind_s(cfg.ldap.bind_dn, cfg.ldap.bind_pass)
else:
ldap_conn.simple_bind_s()
res = ldap_conn.search_s(cfg.ldap.users_dn, ldap.SCOPE_SUBTREE, '(%s=%s)' % (cfg.ldap.display_attr, name), [cfg.ldap.number_attr])
#If user found, return the ID
if len(res) == 1:
uid = int(res[0][1][cfg.ldap.number_attr][0]) + cfg.user.id_offset
debug('nameToId %s -> %d', name, uid)
else:
debug('nameToId %s -> ?', name)
return FALL_THROUGH
return uid
@fortifyIceFu("")
@checkSecret
def idToName(self, id, current = None):
"""
Gets called to get the username for a given id
"""
FALL_THROUGH = ""
# Make sure the ID is in our range and transform it to the actual LDAP user id
if id < cfg.user.id_offset:
debug('idToName %d -> fall through', id)
return FALL_THROUGH
ldapid = id - cfg.user.id_offset
for name, uid in self.name_uid_cache.items():
if uid == ldapid:
if name == 'SuperUser':
debug('idToName %d -> "SuperUser" catched', id)
return FALL_THROUGH
debug('idToName %d -> "%s"', id, name)
return name
debug('idToName %d -> ?', id)
return FALL_THROUGH
@fortifyIceFu("")
@checkSecret
def idToTexture(self, id, current = None):
"""
Gets called to get the corresponding texture for a user
"""
FALL_THROUGH = ""
debug('idToTexture %d -> fall through', id)
return FALL_THROUGH
@fortifyIceFu(-2)
@checkSecret
def registerUser(self, name, current = None):
"""
Gets called when the server is asked to register a user.
"""
FALL_THROUGH = -2
debug('registerUser "%s" -> fall through', name)
return FALL_THROUGH
@fortifyIceFu(-1)
@checkSecret
def unregisterUser(self, id, current = None):
"""
Gets called when the server is asked to unregister a user.
"""
FALL_THROUGH = -1
# Return -1 to fall through to internal server database, we will not modify the LDAP directory
# but we can make murmur delete all additional information it got this way.
debug('unregisterUser %d -> fall through', id)
return FALL_THROUGH
@fortifyIceFu({})
@checkSecret
def getRegisteredUsers(self, filter, current = None):
"""
Returns a list of usernames in the LDAP directory which contain
filter as a substring.
"""
FALL_THROUGH = {}
if not cfg.ldap.provide_users:
# Fall through if not configured to provide user list
debug('getRegisteredUsers -> fall through')
return FALL_THROUGH
ldap_conn = ldap.initialize(cfg.ldap.ldap_uri, 0)
# Bind if configured, else do explicit anonymous bind
if cfg.ldap.bind_dn and cfg.ldap.bind_pass:
ldap_conn.simple_bind_s(cfg.ldap.bind_dn, cfg.ldap.bind_pass)
else:
ldap_conn.simple_bind_s()
if filter:
res = ldap_conn.search_s(cfg.ldap.users_dn, ldap.SCOPE_SUBTREE, '(&(uid=*)(%s=*%s*))' % (cfg.ldap.display_attr, filter), [cfg.ldap.number_attr, cfg.ldap.display_attr])
else:
res = ldap_conn.search_s(cfg.ldap.users_dn, ldap.SCOPE_SUBTREE, '(uid=*)', [cfg.ldap.number_attr, cfg.ldap.display_attr])
# Build result dict
users = {}
for dn, attrs in res:
if cfg.ldap.number_attr in attrs and cfg.ldap.display_attr in attrs:
uid = int(attrs[cfg.ldap.number_attr][0]) + cfg.user.id_offset
name = attrs[cfg.ldap.display_attr][0]
users[uid] = name
debug('getRegisteredUsers %s -> %s', filter, repr(users))
return users
@fortifyIceFu(-1)
@checkSecret
def setInfo(self, id, info, current = None):
"""
Gets called when the server is supposed to save additional information
about a user to his database
"""
FALL_THROUGH = -1
# Return -1 to fall through to the internal server handler. We do not store
# any information in LDAP
debug('setInfo %d -> fall through', id)
return FALL_THROUGH
@fortifyIceFu(-1)
@checkSecret
def setTexture(self, id, texture, current = None):
"""
Gets called when the server is asked to update the user texture of a user
"""
FALL_THROUGH = -1
# We do not store textures in LDAP
debug('setTexture %d -> fall through', id)
return FALL_THROUGH
class CustomLogger(Ice.Logger):
"""
Logger implementation to pipe Ice log messages into
out own log
"""
def __init__(self):
Ice.Logger.__init__(self)
self._log = getLogger('Ice')
def _print(self, message):
self._log.info(message)
def trace(self, category, message):
self._log.debug('Trace %s: %s', category, message)
def warning(self, message):
self._log.warning(message)
def error(self, message):
self._log.error(message)
#
#--- Start of authenticator
#
info('Starting LDAP mumble authenticator')
initdata = Ice.InitializationData()
initdata.properties = Ice.createProperties([], initdata.properties)
for prop, val in cfg.iceraw:
initdata.properties.setProperty(prop, val)
initdata.properties.setProperty('Ice.ImplicitContext', 'Shared')
initdata.properties.setProperty('Ice.Default.EncodingVersion', '1.0')
initdata.logger = CustomLogger()
app = LDAPAuthenticatorApp()
state = app.main(sys.argv[:1], initData = initdata)
info('Shutdown complete')
#
#--- Start of program
#
if __name__ == '__main__':
# Parse commandline options
parser = OptionParser()
parser.add_option('-i', '--ini',
help = 'load configuration from INI', default = cfgfile)
parser.add_option('-v', '--verbose', action='store_true', dest = 'verbose',
help = 'verbose output [default]', default = True)
parser.add_option('-q', '--quiet', action='store_false', dest = 'verbose',
help = 'only error output')
parser.add_option('-d', '--daemon', action='store_true', dest = 'force_daemon',
help = 'run as daemon', default = False)
parser.add_option('-a', '--app', action='store_true', dest = 'force_app',
help = 'do not run as daemon', default = False)
(option, args) = parser.parse_args()
if option.force_daemon and option.force_app:
parser.print_help()
sys.exit(1)
# Load configuration
try:
cfg = config(option.ini, default)
except Exception as e:
print('Fatal error, could not load config file from "%s"' % cfgfile, file=sys.stderr)
sys.exit(1)
# Initialize logger
if cfg.log.file:
try:
logfile = open(cfg.log.file, 'a')
except IOError as e:
#print>>sys.stderr, str(e)
print('Fatal error, could not open logfile "%s"' % cfg.log.file, file=sys.stderr)
sys.exit(1)
else:
logfile = logging.sys.stderr
if option.verbose:
level = cfg.log.level
else:
level = logging.ERROR
logging.basicConfig(level = level,
format='%(asctime)s %(levelname)s %(message)s',
stream = logfile)
# As the default try to run as daemon. Silently degrade to running as a normal application if this fails
# unless the user explicitly defined what he expected with the -a / -d parameter.
try:
if option.force_app:
raise ImportError # Pretend that we couldn't import the daemon lib
import daemon
except ImportError:
if option.force_daemon:
print('Fatal error, could not daemonize process due to missing "daemon" library, ' \
'please install the missing dependency and restart the authenticator', file=sys.stderr)
sys.exit(1)
do_main_program()
else:
context = daemon.DaemonContext(working_directory = sys.path[0],
stderr = logfile)
context.__enter__()
try:
do_main_program()
finally:
context.__exit__(None, None, None)

View file

@ -0,0 +1,80 @@
{ config, lib, tf, pkgs, ... }: with lib; let
murmurLdapScript = ./LDAPauth.py;
in {
kw.secrets.variables = {
murmur-ldap-pass = {
path = "social/mumble";
field = "ldap";
};
murmur-ice = {
path = "social/mumble";
field = "ice";
};
};
systemd.tmpfiles.rules = [
"v /etc/murmur 0770 murmur murmur"
];
secrets.files.murmur-ldap-ini = {
text = ''
[user]
id_offset = 1000000000
reject_on_error = True
reject_on_miss = False
[ice]
host = 127.0.0.1
port = 6502
slice = /etc/murmur/Murmur.ice
secret =${tf.variables.murmur-ice.ref}
watchdog = 30
[ldap]
bind_dn = cn=murmur,ou=services,dc=kittywit,dc=ch
bind_pass = ${tf.variables.murmur-ldap-pass.ref}
ldap_uri = ldaps://auth.kittywit.ch:636
users_dn = ou=users,dc=kittywit,dc=ch
discover_dn = false
username_attr = uid
number_attr = uidNumber
display_attr = cn
provide_info = True
mail_attr = mail
provide_users = True
[murmur]
servers =
[log]
level =
file =
[iceraw]
Ice.ThreadPool.Server.Size = 5
'';
owner = "murmur";
group = "murmur";
};
environment.etc."murmur/LDAPauth.ini".source = config.secrets.files.murmur-ldap-ini.path;
systemd.services.murmur-ldap = let
pythonEnv = pkgs.python39.withPackages(ps: with ps; [
ldap
zeroc-ice
python-daemon
]);
in {
after = [ "network.target" "murmur.service" ];
path = with pkgs; [
zeroc-ice
];
serviceConfig = {
User = "murmur";
Group = "murmur";
ExecStart = "${pythonEnv}/bin/python3 ${murmurLdapScript}";
WorkingDirectory = "/etc/murmur/";
};
};
}

154
services/murmur.nix Normal file
View file

@ -0,0 +1,154 @@
{ config, lib, pkgs, tf, ... }:
with lib;
let
cfg = config.services.murmur;
forking = (cfg.logFile != null);
in
{
network.firewall = {
public = {
tcp.ports = singleton 64738;
udp.ports = singleton 64738;
};
};
kw.secrets.variables = {
murmur-password = {
path = "social/mumble";
field = "password";
};
murmur-ice = {
path = "social/mumble";
field = "ice";
};
};
secrets.files.murmur-config = {
text = ''
database=/var/lib/murmur/murmur.sqlite
dbDriver=QSQLITE
autobanAttempts=${toString cfg.autobanAttempts}
autobanTimeframe=${toString cfg.autobanTimeframe}
autobanTime=${toString cfg.autobanTime}
logfile=${optionalString (cfg.logFile != null) cfg.logFile}
${optionalString forking "pidfile=/run/murmur/murmurd.pid"}
welcometext="${cfg.welcometext}"
port=${toString cfg.port}
${if cfg.password == "" then "" else "serverpassword="+cfg.password}
bandwidth=${toString cfg.bandwidth}
users=${toString cfg.users}
textmessagelength=${toString cfg.textMsgLength}
imagemessagelength=${toString cfg.imgMsgLength}
allowhtml=${boolToString cfg.allowHtml}
logdays=${toString cfg.logDays}
bonjour=${boolToString cfg.bonjour}
sendversion=${boolToString cfg.sendVersion}
${if cfg.registerName == "" then "" else "registerName="+cfg.registerName}
${if cfg.registerPassword == "" then "" else "registerPassword="+cfg.registerPassword}
${if cfg.registerUrl == "" then "" else "registerUrl="+cfg.registerUrl}
${if cfg.registerHostname == "" then "" else "registerHostname="+cfg.registerHostname}
certrequired=${boolToString cfg.clientCertRequired}
${if cfg.sslCert == "" then "" else "sslCert="+cfg.sslCert}
${if cfg.sslKey == "" then "" else "sslKey="+cfg.sslKey}
${if cfg.sslCa == "" then "" else "sslCA="+cfg.sslCa}
${cfg.extraConfig}
'';
owner = "murmur";
group = "murmur";
};
# Config to Template
services.murmur = {
hostName = "voice.${config.network.dns.domain}";
bandwidth = 130000;
welcometext = "mew!";
package = pkgs.murmur.override (old: { iceSupport = true; });
password = tf.variables.murmur-password.ref;
extraConfig = ''
sslCert=/var/lib/acme/services_murmur/fullchain.pem
sslKey=/var/lib/acme/services_murmur/key.pem
ice="tcp -h 127.0.0.1 -p 6502"
icesecretread=${tf.variables.murmur-ice.ref}
icesecretwrite=${tf.variables.murmur-ice.ref}
'';
};
# Service Replacement
users.users.murmur = {
description = "Murmur Service user";
home = "/var/lib/murmur";
createHome = true;
uid = config.ids.uids.murmur;
group = "murmur";
};
users.groups.murmur = {
gid = config.ids.gids.murmur;
};
systemd.services.murmur = {
description = "Murmur Chat Service";
wantedBy = [ "multi-user.target" ];
after = [ "network-online.target" ];
serviceConfig = {
# murmurd doesn't fork when logging to the console.
Type = if forking then "forking" else "simple";
PIDFile = mkIf forking "/run/murmur/murmurd.pid";
EnvironmentFile = mkIf (cfg.environmentFile != null) cfg.environmentFile;
ExecStart = "${cfg.package}/bin/mumble-server -ini ${config.secrets.files.murmur-config.path}";
Restart = "always";
RuntimeDirectory = "murmur";
RuntimeDirectoryMode = "0700";
User = "murmur";
Group = "murmur";
};
};
# Certs
network.extraCerts.services_murmur = "voice.${config.network.dns.domain}";
users.groups."voice-cert".members = [ "nginx" "murmur" ];
security.acme.certs.services_murmur = {
group = "voice-cert";
postRun = "systemctl restart murmur";
extraDomainNames = [ config.network.dns.domain ];
};
# DNS
deploy.tf.dns.records = {
services_murmur = {
inherit (config.network.dns) zone;
domain = "voice";
cname = { inherit (config.network.addresses.public) target; };
};
services_murmur_tcp_srv = {
inherit (config.network.dns) zone;
domain = "@";
srv = {
service = "mumble";
proto = "tcp";
priority = 0;
weight = 5;
port = 64738;
target = "voice.${config.network.dns.zone}";
};
};
services_murmur_udp_srv = {
inherit (config.network.dns) zone;
domain = "@";
srv = {
service = "mumble";
proto = "udp";
priority = 0;
weight = 5;
port = 64738;
target = "voice.${config.network.dns.zone}";
};
};
};
}

79
services/nextcloud.nix Normal file
View file

@ -0,0 +1,79 @@
{ config, pkgs, lib, tf, kw, ... }: with lib; let
cfg = config.services.nextcloud;
in {
deploy.tf.dns.records.services_internal_cloud = {
inherit (config.network.dns) zone;
domain = "cloud.int";
cname = { inherit (config.network.addresses.yggdrasil) target; };
};
kw.secrets.variables =
mapListToAttrs
(field:
nameValuePair "nextcloud-${field}" {
path = "secrets/nextcloud";
inherit field;
}) [ "adminpass" "dbpass" ];
secrets.files.nextcloud-adminpass = {
text = ''
${tf.variables.nextcloud-adminpass.ref}
'';
owner = "nextcloud";
group = "nextcloud";
};
services.postgresql = {
enable = true;
ensureDatabases = [ "nextcloud" ];
ensureUsers = [{
name = "nextcloud";
ensurePermissions."DATABASE nextcloud" = "ALL PRIVILEGES";
}];
};
services.nextcloud = {
enable = true;
package = pkgs.nextcloud24;
config = {
dbtype = "pgsql";
dbhost = "/run/postgresql";
defaultPhoneRegion = "GB";
adminpassFile = config.secrets.files.nextcloud-adminpass.path;
extraTrustedDomains = [
"cloud.kittywit.ch"
];
};
https = true;
enableImagemagick = true;
home = "/mnt/zenc/nextcloud";
hostName = "cloud.kittywit.ch";
autoUpdateApps = {
enable = true;
};
};
services.nginx.virtualHosts."cloud.kittywit.ch".extraConfig = mkForce ''
index index.php index.html /index.php$request_uri;
add_header X-Content-Type-Options nosniff;
add_header X-XSS-Protection "1; mode=block";
add_header X-Robots-Tag none;
add_header X-Download-Options noopen;
add_header X-Permitted-Cross-Domain-Policies none;
add_header X-Frame-Options sameorigin;
add_header Referrer-Policy no-referrer;
client_max_body_size ${cfg.maxUploadSize};
fastcgi_buffers 64 4K;
fastcgi_hide_header X-Powered-By;
gzip on;
gzip_vary on;
gzip_comp_level 4;
gzip_min_length 256;
gzip_proxied expired no-cache no-store private no_last_modified no_etag auth;
gzip_types application/atom+xml application/javascript application/json application/ld+json application/manifest+json application/rss+xml application/vnd.geo+json application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/bmp image/svg+xml image/x-icon text/cache-manifest text/css text/plain text/vcard text/vnd.rim.location.xloc text/vtt text/x-component text/x-cross-domain-policy;
${optionalString cfg.webfinger ''
rewrite ^/.well-known/host-meta /public.php?service=host-meta last;
rewrite ^/.well-known/host-meta.json /public.php?service=host-meta-json last;
''}
'';
}

21
services/nfs.nix Normal file
View file

@ -0,0 +1,21 @@
{ config, lib, kw, ... }:
with lib;
{
network.firewall = {
private.tcp.ports = [ 111 2049 ];
public.tcp.ports = [ 111 2049 ];
};
services.nfs.server.enable = true;
services.nfs.server.exports = "/mnt/zraw/media 192.168.1.0/24(rw) fe80::/10(rw) 200::/7(rw) 2a00:23c7:c597:7400::/56(rw)";
services.nginx.virtualHosts = kw.virtualHostGen {
networkFilter = [ "private" "yggdrasil" ];
block.locations."/" = {
alias = "/mnt/zraw/media/";
extraConfig = "autoindex on;";
};
};
}

46
services/nginx.nix Normal file
View file

@ -0,0 +1,46 @@
{ config, lib, pkgs, tf, ... }:
with lib;
{
secrets.files.dns_creds = {
text = ''
RFC2136_NAMESERVER='${tf.variables.katdns-address.ref}'
RFC2136_TSIG_ALGORITHM='hmac-sha512.'
RFC2136_TSIG_KEY='${tf.variables.katdns-name.ref}'
RFC2136_TSIG_SECRET='${tf.variables.katdns-key.ref}'
'';
};
network.firewall = {
public.tcp.ports = [ 443 80 ];
private.tcp.ports = [ 443 80 ];
};
services.nginx = {
enable = true;
recommendedGzipSettings = true;
recommendedOptimisation = true;
recommendedProxySettings = true;
recommendedTlsSettings = true;
commonHttpConfig = mkIf (config.networking.hostName != "yukari") ''
map $scheme $hsts_header {
https "max-age=31536000; includeSubdomains; preload";
}
add_header Strict-Transport-Security $hsts_header;
#add_header Content-Security-Policy "script-src 'self'; object-src 'none'; base-uri 'none';" always;
add_header 'Referrer-Policy' 'origin-when-cross-origin';
#add_header X-Frame-Options DENY;
#add_header X-Content-Type-Options nosniff;
#add_header X-XSS-Protection "1; mode=block";
#proxy_cookie_path / "/; secure; HttpOnly; SameSite=strict";
'';
clientMaxBodySize = "512m";
};
security.acme = {
defaults.email = config.network.dns.email;
#email = config.network.dns.email;
acceptTerms = true;
};
}

View file

@ -0,0 +1,177 @@
{ config, pkgs, tf, lib, ... }: with lib; {
network.firewall.public.tcp.ports = [ 636 ];
services.openldap = {
enable = true;
urlList = [ "ldap:///" "ldaps:///" ];
settings = {
attrs = {
objectClass = "olcGlobal";
cn = "config";
olcPidFile = "/run/slapd/slapd.pid";
olcTLSCACertificateFile = "/var/lib/acme/domain-auth/fullchain.pem";
olcTLSCertificateFile = "/var/lib/acme/domain-auth/cert.pem";
olcTLSCertificateKeyFile = "/var/lib/acme/domain-auth/key.pem";
};
children = {
"cn=module" = {
attrs = {
objectClass = "olcModuleList";
olcModuleLoad = "memberof";
};
};
"cn=schema" = {
attrs = {
cn = "schema";
objectClass = "olcSchemaConfig";
};
includes = [
"${pkgs.openldap}/etc/schema/core.ldif"
"${pkgs.openldap}/etc/schema/cosine.ldif"
"${pkgs.openldap}/etc/schema/inetorgperson.ldif"
"${pkgs.openldap}/etc/schema/nis.ldif"
];
};
"olcOverlay=memberof,olcDatabase={1}mdb" = {
attrs = {
objectClass = [
"olcOverlayConfig"
"olcMemberOf"
"olcConfig"
];
olcOverlay = "memberof";
olcMemberOfDangling = "ignore";
olcMemberOfGroupOC = "groupOfNames";
olcMemberOfMemberAD = "member";
olcMemberOfMemberOfAD = "memberOf";
olcMemberOfRefint = "TRUE";
};
};
"olcDatabase={-1}frontend" = {
attrs = {
objectClass = [
"olcDatabaseConfig"
"olcFrontendConfig"
];
olcDatabase = "{-1}frontend";
olcAccess = [
"{0}to * by dn.exact=gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth manage by * break"
"{1}to dn.exact=\"\" by * read"
"{2}to dn.base=\"cn=Subschema\" by * read"
];
};
};
"olcDatabase={0}config" = {
attrs = {
objectClass = "olcDatabaseConfig";
olcDatabase = "{0}config";
olcAccess = [ "{0}to * by * none break" ];
};
};
"olcDatabase={1}mdb" = {
attrs = {
objectClass = [ "olcDatabaseConfig" "olcMdbConfig" ];
olcDatabase = "{1}mdb";
olcDbDirectory = "/var/db/ldap";
olcSuffix = "dc=kittywit,dc=ch";
olcRootDN = "cn=root,dc=kittywit,dc=ch";
olcRootPW.path = config.secrets.files.openldap-root-password-file.path;
olcAccess = [
''{0}to attrs=userPassword
by anonymous auth
by dn.base="cn=dovecot,dc=mail,dc=kittywit,dc=ch" read
by dn.subtree="ou=services,dc=kittywit,dc=ch" read
by self write
by * none''
''{1}to dn.subtree="dc=kittywit,dc=ch"
by dn.exact="cn=root,dc=kittywit,dc=ch" manage
by dn.base="cn=dovecot,dc=mail,dc=kittywit,dc=ch" read
by dn.subtree="ou=services,dc=kittywit,dc=ch" read
by dn.subtree="ou=users,dc=kittywit,dc=ch" read
''
''{2}to dn.subtree="ou=users,dc=kittywit,dc=ch"
by dn.base="cn=dovecot,dc=mail,dc=kittywit,dc=ch" read
by dn.subtree="ou=users,dc=kittywit,dc=ch" read
by dn.subtree="ou=services,dc=kittywit,dc=ch" read
by * none''
''{3}to dn.subtree="ou=services,dc=kittywit,dc=ch"
by dn.base="cn=dovecot,dc=mail,dc=kittywit,dc=ch" read
by dn.subtree="ou=services,dc=kittywit,dc=ch" read
by * none''
''{4}to dn.subtree="ou=groups,dc=kittywit,dc=ch"
by dn.subtree="ou=users,dc=kittywit,dc=ch" read
by dn.subtree="ou=services,dc=kittywit,dc=ch" read
by * none''
''{5}to attrs=mail by self read''
''{6}to * by * read''
];
};
};
"cn={2}postfix,cn=schema".attrs = {
cn = "{2}postfix";
objectClass = "olcSchemaConfig";
olcAttributeTypes = [
''( 1.3.6.1.4.1.4203.666.1.200 NAME 'mailAcceptingGeneralId'
EQUALITY caseIgnoreIA5Match
SUBSTR caseIgnoreIA5SubstringsMatch
SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{256} )''
''(1.3.6.1.4.1.12461.1.1.1 NAME 'postfixTransport'
DESC 'A string directing postfix which transport to use'
EQUALITY caseExactIA5Match
SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{20} SINGLE-VALUE)''
''(1.3.6.1.4.1.12461.1.1.5 NAME 'mailbox'
DESC 'The absolute path to the mailbox for a mail account in a non-default location'
EQUALITY caseExactIA5Match
SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE)''
''(1.3.6.1.4.1.12461.1.1.6 NAME 'quota'
DESC 'A string that represents the quota on a mailbox'
EQUALITY caseExactIA5Match
SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE)''
''(1.3.6.1.4.1.12461.1.1.8 NAME 'maildrop'
DESC 'RFC822 Mailbox - mail alias'
EQUALITY caseIgnoreIA5Match
SUBSTR caseIgnoreIA5SubstringsMatch
SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{256})''
];
olcObjectClasses = [
''(1.3.6.1.4.1.12461.1.2.1 NAME 'mailAccount'
SUP top AUXILIARY
DESC 'Mail account objects'
MUST ( mail $ userPassword )
MAY ( cn $ description $ quota))''
''(1.3.6.1.4.1.12461.1.2.2 NAME 'mailAlias'
SUP top STRUCTURAL
DESC 'Mail aliasing/forwarding entry'
MUST ( mail $ maildrop )
MAY ( cn $ description ))''
''(1.3.6.1.4.1.12461.1.2.3 NAME 'mailDomain'
SUP domain STRUCTURAL
DESC 'Virtual Domain entry to be used with postfix transport maps'
MUST ( dc )
MAY ( postfixTransport $ description ))''
''(1.3.6.1.4.1.12461.1.2.4 NAME 'mailPostmaster'
SUP top AUXILIARY
DESC 'Added to a mailAlias to create a postmaster entry'
MUST roleOccupant)''
];
};
};
};
};
kw.secrets.variables = mapListToAttrs
(field:
nameValuePair "openldap-${field}" {
path = "services/openldap";
inherit field;
}) [ "password" ];
secrets.files = {
openldap-root-password-file = {
text = tf.variables.openldap-password.ref;
owner = "openldap";
group = "openldap";
};
};
}

View file

@ -0,0 +1,5 @@
dn: dc=kittywit, dc=ch
dc: kittywit
o: kittywitch
objectclass: organization
objectclass: dcObject

View file

@ -0,0 +1,51 @@
dn: dc=mail,dc=kittywit,dc=ch
objectClass: dcObject
objectClass: organizationalUnit
objectClass: top
dc: mail
ou: mail
dn: cn=dovecot,dc=mail,dc=kittywit,dc=ch
objectClass: organizationalRole
objectClass: simpleSecurityObject
objectClass: top
cn: dovecot
userPassword: {SSHA}GenerateYourOwn
dn: dc=aliases,dc=mail,dc=kittywit,dc=ch
objectClass: dcObject
objectClass: organizationalUnit
objectClass: top
dc: aliases
ou: aliases
dn: mail=@kittywit.ch,dc=aliases,dc=mail,dc=eve
objectClass: top
objectClass: mailAlias
mail: @kittywit.ch
maildrop: kat@kittywit.ch
dn: mail=@dork.dev,dc=aliases,dc=mail,dc=eve
objectClass: top
objectClass: mailAlias
mail: @dork.dev
maildrop: kat@kittywit.ch
dn: dc=domains,dc=mail,dc=kittywit,dc=ch
objectClass: dcObject
objectClass: organizationalUnit
objectClass: top
dc: domains
ou: domains
dn: dc=kittywit.ch,dc=domains,dc=mail,dc=kittywit,dc=ch
objectClass: mailDomain
objectClass: top
dc: kittywit.ch
postfixTransport: kittywit.ch
dn: dc=dork.dev,dc=domains,dc=mail,dc=kittywit,dc=ch
objectClass: top
objectClass: mailDomain
dc: dork.dev
postfixTransport: virtual:

View file

@ -0,0 +1,5 @@
dn: ou=services,dc=kittywit,dc=ch
objectClass: top
objectClass: organizationalUnit
description: kittywitch
ou: services

View file

@ -0,0 +1,5 @@
dn: ou=users,dc=kittywit,dc=ch
objectClass: top
objectClass: organizationalUnit
description: kittywitch
ou: users

43
services/plex.nix Normal file
View file

@ -0,0 +1,43 @@
{ config, kw, pkgs, lib, ... }: {
network.firewall.public.tcp.ports = [ 32400 ];
services = {
plex = {
enable = true;
package = pkgs.plex.overrideAttrs (x: let
# see https://www.plex.tv/media-server-downloads/ for 64bit rpm
version = "1.25.9.5721-965587f64";
sha256 = "sha256-NPfpQ8JwXDaq8xpvSabyqdDqMWjoqbeoJdu41nhdsI0=";
in {
name = "plex-${version}";
src = pkgs.fetchurl {
url = "https://downloads.plex.tv/plex-media-server-new/${version}/debian/plexmediaserver_${version}_amd64.deb";
inherit sha256;
};
}
);
};
nginx.virtualHosts."plex.kittywit.ch".locations."/" = {
proxyPass = "http://127.0.0.1:32400";
extraConfig = ''
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_redirect off;
proxy_buffering off;
proxy_set_header X-Plex-Client-Identifier $http_x_plex_client_identifier;
proxy_set_header X-Plex-Device $http_x_plex_device;
proxy_set_header X-Plex-Device-Name $http_x_plex_device_name;
proxy_set_header X-Plex-Platform $http_x_plex_platform;
proxy_set_header X-Plex-Platform-Version $http_x_plex_platform_version;
proxy_set_header X-Plex-Product $http_x_plex_product;
proxy_set_header X-Plex-Token $http_x_plex_token;
proxy_set_header X-Plex-Version $http_x_plex_version;
proxy_set_header X-Plex-Nocache $http_x_plex_nocache;
proxy_set_header X-Plex-Provides $http_x_plex_provides;
proxy_set_header X-Plex-Device-Vendor $http_x_plex_device_vendor;
proxy_set_header X-Plex-Model $http_x_plex_model;
'';
};
};
}

5
services/postgres.nix Normal file
View file

@ -0,0 +1,5 @@
{ config, pkgs, ... }:
{
services.postgresql.enable = true;
}

159
services/prosody.nix Normal file
View file

@ -0,0 +1,159 @@
{ config, pkgs, lib, ... }:
with lib;
{
network.firewall.public.tcp.ports = [
5000
5222
5223
5269
5280
5281
5347
5582
];
services.postgresql = {
ensureDatabases = [ "prosody" ];
ensureUsers = [{
name = "prosody";
ensurePermissions."DATABASE prosody" = "ALL PRIVILEGES";
}];
};
services.prosody = {
enable = true;
ssl.cert = "/var/lib/acme/prosody/fullchain.pem";
ssl.key = "/var/lib/acme/prosody/key.pem";
admins = singleton "kat@kittywit.ch";
package =
let
package = pkgs.prosody.override (old: {
withExtraLuaPackages = p: singleton p.luadbi-postgresql;
}); in
package;
extraConfig = ''
legacy_ssl_ports = { 5223 }
storage = "sql"
sql = {
driver = "PostgreSQL";
host = "";
database = "prosody";
username = "prosody";
}
'';
virtualHosts = {
"xmpp.${config.network.dns.domain}" = {
domain = config.network.dns.domain;
enabled = true;
ssl.cert = "/var/lib/acme/prosody/fullchain.pem";
ssl.key = "/var/lib/acme/prosody/key.pem";
};
};
muc = [{ domain = "conference.${config.network.dns.domain}"; }];
uploadHttp = { domain = "upload.${config.network.dns.domain}"; };
};
security.acme.certs.prosody = {
domain = "xmpp.${config.network.dns.domain}";
group = "prosody";
dnsProvider = "rfc2136";
credentialsFile = config.secrets.files.dns_creds.path;
postRun = "systemctl restart prosody";
extraDomainNames =
[ config.network.dns.domain "upload.${config.network.dns.domain}" "conference.${config.network.dns.domain}" ];
};
deploy.tf.dns.records = {
services_prosody_xmpp = {
inherit (config.network.dns) zone;
domain = "xmpp";
a.address = config.network.addresses.public.nixos.ipv4.selfaddress;
};
services_prosody_xmpp_v6 = {
inherit (config.network.dns) zone;
domain = "xmpp";
aaaa.address = config.network.addresses.public.nixos.ipv6.selfaddress;
};
services_prosody_upload = {
inherit (config.network.dns) zone;
domain = "upload";
cname.target = "xmpp.${config.network.dns.zone}";
};
services_prosody_conference = {
inherit (config.network.dns) zone;
domain = "conference";
cname.target = "xmpp.${config.network.dns.zone}";
};
services_prosody_muc = {
inherit (config.network.dns) zone;
domain = "conference";
srv = {
service = "xmpp-server";
proto = "tcp";
priority = 0;
weight = 5;
port = 5269;
target = "xmpp.${config.network.dns.zone}";
};
};
services_prosody_client_srv = {
inherit (config.network.dns) zone;
domain = "@";
srv = {
service = "xmpp-client";
proto = "tcp";
priority = 0;
weight = 5;
port = 5222;
target = "xmpp.${config.network.dns.zone}";
};
};
services_prosody_secure_client_srv = {
inherit (config.network.dns) zone;
domain = "@";
srv = {
service = "xmpps-client";
proto = "tcp";
priority = 0;
weight = 5;
port = 5223;
target = "xmpp.${config.network.dns.zone}";
};
};
services_prosody_server_srv = {
inherit (config.network.dns) zone;
domain = "@";
srv = {
service = "xmpp-server";
proto = "tcp";
priority = 0;
weight = 5;
port = 5269;
target = "xmpp.${config.network.dns.zone}";
};
};
};
services.nginx.virtualHosts = {
"upload.${config.network.dns.domain}" = {
useACMEHost = "prosody";
forceSSL = true;
};
"conference.${config.network.dns.domain}" = {
useACMEHost = "prosody";
forceSSL = true;
};
};
users.users.nginx.extraGroups = [ "prosody" ];
}

17
services/restic.nix Normal file
View file

@ -0,0 +1,17 @@
{ config, lib, pkgs, ... }:
{
services.restic.backups.tardis = {
passwordFile = "/etc/restic/system";
paths = [ "/home" "/var/lib" ];
pruneOpts = [ "--keep-daily 7" "--keep-weekly 5" "--keep-monthly 12" ];
repository = "";
};
systemd.services."restic-backups-tardis".environment.RESTIC_REPOSITORY_FILE =
"/etc/restic/system.repo";
services.postgresqlBackup = {
enable = config.services.postgresql.enable;
backupAll = true;
startAt = "*-*-* 23:45:00";
};
}

347
services/synapse.nix Normal file
View file

@ -0,0 +1,347 @@
{ config, pkgs, lib, tf, ... }:
with lib;
{
environment.systemPackages = [ pkgs.mx-puppet-discord pkgs.mautrix-whatsapp ];
services.postgresql.initialScript = pkgs.writeText "synapse-init.sql" ''
CREATE ROLE "matrix-synapse" WITH LOGIN PASSWORD 'synapse';
CREATE DATABASE "matrix-synapse" WITH OWNER "matrix-synapse"
TEMPLATE template0
LC_COLLATE = "C"
LC_CTYPE = "C";
'';
kw.secrets.variables = (mapListToAttrs
(field:
nameValuePair "mautrix-telegram-${field}" {
path = "secrets/mautrix-telegram";
inherit field;
}) [ "api-hash" "api-id" "as-token" "hs-token" ]
// (mapListToAttrs (field:
nameValuePair "synapse-saml2-${field}" {
path = "secrets/synapse-saml2-${field}";
}) ["cert" "key"])
// {
matrix-registration = {
path = "secrets/matrix-registration";
};
});
secrets.files.mautrix-telegram-env = {
text = ''
MAUTRIX_TELEGRAM_TELEGRAM_API_ID=${tf.variables.mautrix-telegram-api-id.ref}
MAUTRIX_TELEGRAM_TELEGRAM_API_HASH=${tf.variables.mautrix-telegram-api-hash.ref}
MAUTRIX_TELEGRAM_APPSERVICE_AS_TOKEN=${tf.variables.mautrix-telegram-as-token.ref}
MAUTRIX_TELEGRAM_APPSERVICE_HS_TOKEN=${tf.variables.mautrix-telegram-hs-token.ref}
'';
};
secrets.files.matrix-registration-secret = {
text = ''
registration_shared_secret: ${tf.variables.matrix-registration.ref}
'';
owner = "matrix-synapse";
group = "matrix-synapse";
};
secrets.files.saml2-cert = {
text = tf.variables.synapse-saml2-cert.ref;
owner = "matrix-synapse";
group = "matrix-synapse";
};
secrets.files.saml2-privkey = {
text = tf.variables.synapse-saml2-key.ref;
owner = "matrix-synapse";
group = "matrix-synapse";
};
secrets.files.saml2-map = {
fileName = "map.py";
text = ''
MAP = {
"identifier": "urn:oasis:names:tc:SAML:2.0:attrname-format:uri",
"fro": {
'uid': 'uid',
'displayName': 'displayName',
},
"to": {
'uid': 'uid',
'displayName': 'displayName',
}
}
'';
owner = "matrix-synapse";
group = "matrix-synapse";
};
secrets.files.saml2-config = {
fileName = "saml2-config.py";
text = ''
import saml2
from saml2.saml import NAME_FORMAT_URI
BASE = "https://kittywit.ch/"
CONFIG = {
"entityid": "matrix-kittywit.ch",
"description": "Matrix Server",
"service": {
"sp": {
"name": "matrix-login",
"endpoints": {
"single_sign_on_service": [
(BASE + "_matrix/saml2/authn_response", saml2.BINDING_HTTP_POST),
],
"assertion_consumer_service": [
(BASE + "_matrix/saml2/authn_response", saml2.BINDING_HTTP_POST),
],
#"single_logout_service": [
# (BASE + "_matrix/saml2/logout", saml2.BINDING_HTTP_POST),
#],
},
"required_attributes": ["uid",],
"optional_attributes": ["displayName"],
"sign_assertion": True,
"sign_response": True,
}
},
"debug": 0,
"key_file": "${config.secrets.files.saml2-privkey.path}",
"cert_file": "${config.secrets.files.saml2-cert.path}",
"encryption_keypairs": [
{
"key_file": "${config.secrets.files.saml2-privkey.path}",
"cert_file": "${config.secrets.files.saml2-cert.path}",
}
],
"attribute_map_dir": "${builtins.dirOf config.secrets.files.saml2-map.path}",
"metadata": {
"remote": [
{
"url": "https://auth.kittywit.ch/auth/realms/kittywitch/protocol/saml/descriptor",
},
],
},
# If you want to have organization and contact_person for the pysaml2 config
#"organization": {
# "name": "Example AB",
# "display_name": [("Example AB", "se"), ("Example Co.", "en")],
# "url": "http://example.com/roland",
#},
#"contact_person": [{
# "given_name": "Example",
# "sur_name": "Example",
# "email_address": ["example@example.com"],
# "contact_type": "technical",
# },
#],
# Make sure to have xmlsec1 installed on your host(s)!
"xmlsec_binary": "${pkgs.xmlsec}/bin/xmlsec1",
}
'';
owner = "matrix-synapse";
group = "matrix-synapse";
};
services.matrix-synapse.extraConfigFiles = [
config.secrets.files.matrix-registration-secret.path
];
services.mautrix-telegram.environmentFile =
config.secrets.files.mautrix-telegram-env.path;
services.matrix-synapse = {
enable = true;
settings = {
log_config = pkgs.writeText "nya.yaml" ''
version: 1
formatters:
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
filters:
context:
(): synapse.util.logcontext.LoggingContextFilter
request: ""
handlers:
console:
class: logging.StreamHandler
formatter: precise
filters: [context]
loggers:
synapse:
level: WARNING
synapse.storage.SQL:
# beware: increasing this to DEBUG will make synapse log sensitive
# information such as access tokens.
level: WARNING
root:
level: WARNING
handlers: [console]
'';
server_name = config.network.dns.domain;
app_service_config_files = [
"/var/lib/matrix-synapse/telegram-registration.yaml"
"/var/lib/matrix-synapse/discord-registration.yaml"
"/var/lib/matrix-synapse/whatsapp-registration.yaml"
];
max_upload_size = "512M";
rc_messages_per_second = mkDefault 0.1;
rc_message_burst_count = mkDefault 25;
public_baseurl = "https://${config.network.dns.domain}";
url_preview_enabled = mkDefault true;
enable_registration = mkDefault false;
enable_metrics = mkDefault false;
report_stats = mkDefault false;
dynamic_thumbnails = mkDefault true;
allow_guest_access = mkDefault true;
suppress_key_server_warning = mkDefault true;
listeners = [{
port = 8008;
bind_addresses = [ "::1" ] ;
type = "http";
tls = false;
x_forwarded = true;
resources = [{
names = [ "client" "federation" ];
compress = false;
}];
}];
saml2_config = {
sp_config.metadata.remote = [ {
url = "https://auth.kittywit.ch/auth/realms/kittywitch/protocol/saml/descriptor";
} ];
config_path = config.secrets.files.saml2-config.path;
user_mapping_provider = {
config = {};
};
password_config = {
enabled = false;
};
};
};
};
services.mautrix-telegram = {
enable = true;
settings = {
homeserver = {
address = "https://kittywit.ch";
domain = config.network.dns.domain;
};
appservice = {
provisioning.enabled = false;
id = "telegram";
public = {
enabled = false;
prefix = "/public";
external = "https://${config.network.dns.domain}/public";
};
};
bridge = {
relaybot.authless_portals = false;
permissions = {
"@kat:${config.network.dns.domain}" = "admin";
"${config.network.dns.domain}" = "full";
};
};
};
};
systemd.services.mx-puppet-discord = {
serviceConfig = {
Type = "simple";
Restart = "always";
ExecStart =
"${pkgs.mx-puppet-discord}/bin/mx-puppet-discord -c /var/lib/mx-puppet-discord/config.yaml -f /var/lib/mx-puppet-discord/discord-registration.yaml";
WorkingDirectory = "/var/lib/mx-puppet-discord";
DynamicUser = true;
StateDirectory = "mx-puppet-discord";
UMask = 27;
PrivateTmp = true;
ProtectSystem = "strict";
ProtectHome = true;
ProtectKernelTunables = true;
ProtectKernelModules = true;
ProtectControlGroups = true;
};
requisite = [ "matrix-synapse.service" ];
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
};
systemd.services.mautrix-whatsapp = {
serviceConfig = {
Type = "simple";
Restart = "always";
ExecStart =
"${pkgs.mautrix-whatsapp}/bin/mautrix-whatsapp -c /var/lib/mautrix-whatsapp/config.yaml -r /var/lib/mautrix-whatsapp/registration.yaml";
WorkingDirectory = "/var/lib/mautrix-whatsapp";
DynamicUser = true;
StateDirectory = "mautrix-whatsapp";
UMask = 27;
PrivateTmp = true;
ProtectSystem = "strict";
ProtectHome = true;
ProtectKernelTunables = true;
ProtectKernelModules = true;
ProtectControlGroups = true;
};
requisite = [ "matrix-synapse.service" ];
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
};
deploy.tf.dns.records.services_element = {
inherit (config.network.dns) zone;
domain = "element";
cname = { inherit (config.network.addresses.public) target; };
};
services.nginx.virtualHosts."element.${config.network.dns.domain}" = {
forceSSL = true;
enableACME = true;
extraConfig = ''
keepalive_requests 100000;
'';
root = pkgs.element-web.override {
conf = {
default_server_config."m.homeserver" = {
"base_url" = "https://${config.network.dns.domain}:443";
"server_name" = "kittywit.ch";
};
};
};
};
services.nginx.virtualHosts."${config.network.dns.domain}" = {
# allegedly fixes https://github.com/poljar/weechat-matrix/issues/240
extraConfig = ''
keepalive_requests 100000;
'';
locations = {
"/_matrix" = { proxyPass = "http://[::1]:8008"; };
"= /.well-known/matrix/server".extraConfig =
let server = { "m.server" = "${config.network.dns.domain}:443"; };
in
''
add_header Content-Type application/json;
return 200 '${builtins.toJSON server}';
'';
"= /.well-known/matrix/client".extraConfig =
let
client = {
"m.homeserver" = { "base_url" = "https://${config.network.dns.domain}"; };
"m.identity_server" = { "base_url" = "https://vector.im"; };
};
in
''
add_header Content-Type application/json;
add_header Access-Control-Allow-Origin *;
return 200 '${builtins.toJSON client}';
'';
};
};
}

66
services/syncplay.nix Normal file
View file

@ -0,0 +1,66 @@
{ config, lib, pkgs, tf, ... }:
with lib;
{
kw.secrets.variables =
let
fieldAdapt = field: if field == "pass" then "password" else field;
in
mapListToAttrs
(field:
nameValuePair "syncplay-${field}" {
path = "services/media/syncplay";
field = fieldAdapt field;
}) [ "pass" "salt" ];
users.users.syncplay = { isSystemUser = true; group = "sync-cert"; };
users.groups."sync-cert".members = [ "nginx" "syncplay" ];
security.acme = {
certs."sync.${config.network.dns.domain}" = {
group = "sync-cert";
postRun = ''
cp key.pem privkey.pem
chown acme:voice-cert privkey.pem
'';
};
};
network.firewall.public.tcp.ports = singleton 8999;
services.nginx.virtualHosts."sync.${config.network.dns.domain}" = {
enableACME = true;
forceSSL = true;
};
deploy.tf.dns.records.services_syncplay = {
inherit (config.network.dns) zone;
domain = "sync";
cname = { inherit (config.network.addresses.public) target; };
};
secrets.files.syncplay-env = {
text = ''
SYNCPLAY_PASSWORD=${tf.variables.syncplay-pass.ref}
SYNCPLAY_SALT=${tf.variables.syncplay-salt.ref}
'';
owner = "syncplay";
group = "sync-cert";
};
systemd.services.syncplay = {
description = "Syncplay Service";
wantedBy = singleton "multi-user.target";
after = singleton "network-online.target";
serviceConfig = {
EnvironmentFile = config.secrets.files.syncplay-env.path;
ExecStart =
"${pkgs.syncplay}/bin/syncplay-server --port 8999 --tls /var/lib/acme/sync.${config.network.dns.domain}/ --disable-ready";
User = "syncplay";
Group = "sync-cert";
};
};
}

14
services/taskserver.nix Normal file
View file

@ -0,0 +1,14 @@
{ config, lib, ... }:
with lib;
{
network.firewall.public.tcp.ports = singleton 53589;
services.taskserver = {
enable = true;
fqdn = "kittywit.ch";
listenHost = "::";
organisations.kittywitch.users = singleton "kat";
};
}

45
services/transmission.nix Normal file
View file

@ -0,0 +1,45 @@
{ config, pkgs, lib, kw, ... }:
{
services.nginx.virtualHosts = kw.virtualHostGen {
networkFilter = [ "private" "yggdrasil" ];
block = {
locations."/transmission" = {
proxyPass = "http://[::1]:9091";
extraConfig = "proxy_pass_header X-Transmission-Session-Id;";
};
};
};
services.transmission =
let
transmission-done-script = pkgs.writeScriptBin "script" ''
#!${pkgs.bash}/bin/bash
set -e
if [ "$TR_TORRENT_DIR"/"$TR_TORRENT_NAME" != "/" ]; then
cd "$TR_TORRENT_DIR"/"$TR_TORRENT_NAME"
if [ ! -z "*.rar" ]; then
${pkgs.unrar}/bin/unrar x "*.rar"
fi
chmod ugo=rwX .
fi'';
in
{
enable = true;
home = "/mnt/zraw/transmission";
downloadDirPermissions = "777";
settings = {
download-dir = "/mnt/zraw/media/unsorted";
incomplete-dir = "/mnt/zraw/media/.incomplete";
incomplete-dir-enabled = true;
rpc-bind-address = "::";
rpc-whitelist-enabled = false;
rpc-host-whitelist-enabled = false;
script-torrent-done-enabled = true;
dht-enabled = true;
pex-enabled = true;
script-torrent-done-filename = "${transmission-done-script}/bin/script";
umask = 0;
};
};
}

80
services/tt-rss.nix Normal file
View file

@ -0,0 +1,80 @@
{ config, pkgs, lib, tf, ... }: with lib; {
kw.secrets.variables = mapListToAttrs
(field:
nameValuePair "ttrss-${field}" {
path = "secrets/ttrss";
inherit field;
}) [ "password" "ldap" ];
secrets.files = {
ttrss-ldap-password = {
text = tf.variables.ttrss-ldap.ref;
owner = "tt_rss";
group = "tt_rss";
};
};
secrets.files = {
ttrss-db-password = {
text = tf.variables.ttrss-password.ref;
owner = "tt_rss";
group = "tt_rss";
};
};
deploy.tf.dns.records.services_ttrss = {
inherit (config.network.dns) zone;
domain = "rss";
cname = { inherit (config.network.addresses.public) target; };
};
services.tt-rss = {
enable = true;
virtualHost = "rss.kittywit.ch";
selfUrlPath = "https://rss.kittywit.ch";
pluginPackages = [
pkgs.tt-rss-plugin-auth-ldap
];
themePackages = [
pkgs.tt-rss-theme-feedly
];
plugins = [
"auth_internal"
"auth_ldap"
"note"
"updater"
"api_feedreader"
];
database = {
createLocally = true;
type = "pgsql";
host = "/run/postgresql";
};
extraConfig = ''
putenv('LDAP_DB_PASS=' . file_get_contents("${config.secrets.files.ttrss-db-password.path}"));
define('LDAP_AUTH_SERVER_URI', 'ldap://127.0.0.1:389/');
define('LDAP_AUTH_USETLS', FALSE); // Enable TLS Support for ldaps://
define('LDAP_AUTH_ALLOW_UNTRUSTED_CERT', FALSE); // Allows untrusted certificate
define('LDAP_AUTH_BINDDN', 'cn=root,dc=kittywit,dc=ch');
define('LDAP_AUTH_BINDPW', file_get_contents('${config.secrets.files.ttrss-ldap-password.path}'));
define('LDAP_AUTH_BASEDN', 'ou=users,dc=kittywit,dc=ch');
define('LDAP_AUTH_LOGIN_ATTRIB', 'mail');
define('LDAP_AUTH_ANONYMOUSBEFOREBIND', FALSE);
// ??? will be replaced with the entered username(escaped) at login
define('LDAP_AUTH_SEARCHFILTER', '(&(objectClass=inetOrgPerson)(|(mail=???)(uid=???)))');
// Optional configuration
define('LDAP_AUTH_LOG_ATTEMPTS', TRUE);
// Enable Debug Logging
define('LDAP_AUTH_DEBUG', TRUE);
'';
};
services.nginx = {
virtualHosts."rss.kittywit.ch" = {
enableACME = true;
forceSSL = true;
};
};
}

62
services/tvheadend.nix Normal file
View file

@ -0,0 +1,62 @@
{ config, pkgs, lib, kw, ... }:
{
hardware.firmware = [ pkgs.libreelec-dvb-firmware ];
services.tvheadend.enable = true;
systemd.services.tvheadend.enable = lib.mkForce false;
users.users.tvheadend.group = "tvheadend";
users.groups.tvheadend = {};
network.firewall.public = {
tcp.ports = [ 9981 9982 ];
};
services.nginx.virtualHosts = kw.virtualHostGen {
networkFilter = [ "private" "yggdrasil" ];
block = {
locations."/tvheadend" = {
proxyPass = "http://127.0.0.1:9981";
extraConfig = "proxy_pass_header X-Transmission-Session-Id;";
};
};
};
systemd.services.antennas = {
wantedBy = [ "plex.service" ];
after = [ "tvheadend-kat.service" ];
serviceConfig = let
antennaConf = pkgs.writeText "config.yaml" (builtins.toJSON {
antennas_url = "http://127.0.0.1:5009";
tvheadend_url = "http://127.0.0.1:9981";
tuner_count = "6";
}); in {
ExecStart = "${pkgs.antennas}/bin/antennas --config ${antennaConf}";
};
};
systemd.services.tvheadend-kat = {
description = "Tvheadend TV streaming server";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
script = ''
${pkgs.tvheadend}/bin/tvheadend \
--http_root /tvheadend \
--http_port 9981 \
--htsp_port 9982 \
-f \
-C \
-p ${config.users.users.tvheadend.home}/tvheadend.pid \
-u tvheadend \
-g video
'';
serviceConfig = {
Type = "forking";
PIDFile = "${config.users.users.tvheadend.home}/tvheadend.pid";
Restart = "always";
RestartSec = 5;
User = "tvheadend";
Group = "video";
ExecStop = "${pkgs.coreutils}/bin/rm ${config.users.users.tvheadend.home}/tvheadend.pid";
};
};
}

77
services/vaultwarden.nix Normal file
View file

@ -0,0 +1,77 @@
{ config, pkgs, lib, tf, ... }: with lib;
{
kw.secrets.variables = mapListToAttrs (field:
nameValuePair "vaultwarden-${field}" {
path = "secrets/vaultwarden";
inherit field;
}) [ "password" "smtp" ];
secrets.files.vaultwarden-env = {
text = ''
ADMIN_TOKEN=${tf.variables.vaultwarden-password.ref}
SMTP_HOST=daiyousei.kittywit.ch
SMTP_FROM=vaultwarden@kittywit.ch
SMTP_FROM_NAME=Vaultwarden
SMTP_PORT=465
SMTP_SSL=true
SMTP_EXPLICIT_TLS=true
SMTP_USERNAME=vaultwarden@kittywit.ch
SMTP_PASSWORD=${tf.variables.vaultwarden-smtp.ref}
'';
owner = "bitwarden_rs";
group = "bitwarden_rs";
};
services.vaultwarden = {
environmentFile = config.secrets.files.vaultwarden-env.path;
};
services.postgresql = {
ensureDatabases = [ "bitwarden_rs" ];
ensureUsers = [{
name = "bitwarden_rs";
ensurePermissions = { "DATABASE bitwarden_rs" = "ALL PRIVILEGES"; };
}];
};
users.users.vaultwarden.name = "bitwarden_rs";
users.groups.vaultwarden.name = "bitwarden_rs";
services.vaultwarden = {
enable = true;
dbBackend = "postgresql";
config = {
rocketPort = 4000;
websocketEnabled = true;
signupsAllowed = false;
domain = "https://vault.${config.network.dns.domain}";
databaseUrl = "postgresql://bitwarden_rs@/bitwarden_rs";
};
};
services.nginx.virtualHosts."vault.${config.network.dns.domain}" = {
enableACME = true;
forceSSL = true;
locations = {
"/" = {
proxyPass = "http://localhost:4000";
proxyWebsockets = true;
};
"/notifications/hub" = {
proxyPass = "http://localhost:3012";
proxyWebsockets = true;
};
"/notifications/hub/negotiate" = {
proxyPass = "http://localhost:4000";
proxyWebsockets = true;
};
};
};
deploy.tf.dns.records.services_vaultwarden = {
inherit (config.network.dns) zone;
domain = "vault";
cname = { inherit (config.network.addresses.public) target; };
};
}

105
services/vikunja.nix Normal file
View file

@ -0,0 +1,105 @@
{ config, pkgs, lib, tf, ... }: with lib;
let
settings = {
database = {
inherit (config.services.vikunja.database) type host user database path;
};
service = {
frontendurl = "${config.services.vikunja.frontendScheme}://${config.services.vikunja.frontendHostname}/";
JWTSecret = tf.variables.vikunja-jwt.ref;
timezone = "Europe/London";
};
mailer = {
enabled = true;
host = "daiyousei.kittywit.ch";
port = 465;
forcessl = true;
username = "vikunja@kittywit.ch";
password = tf.variables.vikunja-email.ref;
fromemail = "vikunja@kittywit.ch";
};
files = {
basepath = "/var/lib/vikunja/files";
};
log.http = "off";
auth = {
local = {
enabled = false;
};
openid = {
enabled = true;
providers = [{
name = "keycloak";
authurl = "https://auth.kittywit.ch/auth/realms/kittywitch";
clientid = "vikunja";
clientsecret = tf.variables.vikunja-secret.ref;
}];
};
};
};
in {
kw.secrets.variables = (mapListToAttrs
(field:
nameValuePair "vikunja-${field}" {
path = "secrets/vikunja";
inherit field;
}) [ "secret" "email" "jwt" ]);
secrets.files.vikunja-config = {
text = builtins.toJSON settings;
owner = "vikunja";
group = "vikunja";
};
deploy.tf.dns.records.services_vikunja = {
inherit (config.network.dns) zone;
domain = "todo";
cname = { inherit (config.network.addresses.public) target; };
};
environment.etc."vikunja/config.yaml".source = mkForce config.secrets.files.vikunja-config.path;
services.vikunja = {
enable = true;
frontendScheme = "https";
frontendHostname = "todo.${config.network.dns.domain}";
database = {
type = "postgres";
user = "vikunja";
database = "vikunja";
host = "/run/postgresql";
};
};
services.nginx.virtualHosts."${config.services.vikunja.frontendHostname}" = {
enableACME = true;
forceSSL = true;
};
services.postgresql = {
ensureDatabases = [ "vikunja" ];
ensureUsers = [
{ name = "vikunja";
ensurePermissions = { "DATABASE vikunja" = "ALL PRIVILEGES"; };
}
];
};
systemd.services.vikunja-api = {
serviceConfig = {
DynamicUser = lib.mkForce false;
User = "vikunja";
Group = "vikunja";
};
};
users.users.vikunja = {
description = "Vikunja Service";
createHome = false;
group = "vikunja";
isSystemUser = true;
};
users.groups.vikunja = {};
}

13
services/website.nix Normal file
View file

@ -0,0 +1,13 @@
{ config, lib, pkgs, ... }:
{
network.dns.isRoot = true;
services.nginx.virtualHosts = {
"${config.network.dns.domain}" = {
root = pkgs.gensokyoZone;
enableACME = true;
forceSSL = true;
};
};
}

21
services/weechat.nix Normal file
View file

@ -0,0 +1,21 @@
{ config, pkgs, ... }:
{
services.nginx.virtualHosts."irc.${config.network.dns.domain}" = {
enableACME = true;
forceSSL = true;
locations = {
"/" = { root = pkgs.glowing-bear; };
"^~ /weechat" = {
proxyPass = "http://127.0.0.1:9000";
proxyWebsockets = true;
};
};
};
deploy.tf.dns.records.services_weechat = {
inherit (config.network.dns) zone;
domain = "irc";
cname = { inherit (config.network.addresses.public) target; };
};
}

14
services/zfs.nix Normal file
View file

@ -0,0 +1,14 @@
{ config, lib, pkgs, ... }:
{
services.zfs = {
autoScrub.enable = true;
autoSnapshot = {
enable = true;
frequent = 1;
daily = 7;
weekly = 1;
monthly = 1;
};
};
}

196
services/znc.nix Normal file
View file

@ -0,0 +1,196 @@
{ config, tf, lib, pkgs, ... }:
with lib;
let
sortedAttrs = set: sort
(l: r:
if l == "extraConfig" then false # Always put extraConfig last
else if isAttrs set.${l} == isAttrs set.${r} then l < r
else isAttrs set.${r} # Attrsets should be last, makes for a nice config
# This last case occurs when any side (but not both) is an attrset
# The order of these is correct when the attrset is on the right
# which we're just returning
)
(attrNames set);
# Specifies an attrset that encodes the value according to its type
encode = name: value: {
null = [ ];
bool = [ "${name} = ${boolToString value}" ];
int = [ "${name} = ${toString value}" ];
# extraConfig should be inserted verbatim
string = [ (if name == "extraConfig" then value else "${name} = ${value}") ];
# Values like `Foo = [ "bar" "baz" ];` should be transformed into
# Foo=bar
# Foo=baz
list = concatMap (encode name) value;
# Values like `Foo = { bar = { Baz = "baz"; Qux = "qux"; Florps = null; }; };` should be transmed into
# <Foo bar>
# Baz=baz
# Qux=qux
# </Foo>
set = concatMap
(subname: optionals (value.${subname} != null) ([
"<${name} ${subname}>"
] ++ map (line: "\t${line}") (toLines value.${subname}) ++ [
"</${name}>"
]))
(filter (v: v != null) (attrNames value));
}.${builtins.typeOf value};
# One level "above" encode, acts upon a set and uses encode on each name,value pair
toLines = set: concatMap (name: encode name set.${name}) (sortedAttrs set);
in
{
network.firewall.public.tcp.ports = singleton 5001;
kw.secrets.variables =
let
fieldAdapt = field: if field == "cert" then "notes" else if field == "pass" then "password" else field;
in
listToAttrs (concatMap
(network:
map
(field:
nameValuePair "znc-${network}-${field}" {
path = "social/irc/${network}";
field = fieldAdapt field;
}) [ "cert" "pass" ]
) [ "liberachat" "espernet" ]
++ map
(field:
nameValuePair "znc-softnet-${field}" {
path = "social/irc/softnet";
field = fieldAdapt field;
}) [ "cert" "address" ]
++ singleton (nameValuePair "znc-savebuff-pass" {
path = "social/irc/znc";
field = "savebuff";
})
);
secrets.files.softnet-cert = {
text = tf.variables.znc-softnet-cert.ref;
owner = "znc";
group = "znc";
};
secrets.files.espernet-cert = {
text = tf.variables.znc-espernet-cert.ref;
owner = "znc";
group = "znc";
};
secrets.files.liberachat-cert = {
text = tf.variables.znc-liberachat-cert.ref;
owner = "znc";
group = "znc";
};
system.activationScripts = {
softnet-cert-deploy = {
text = ''
mkdir -p /var/lib/znc/users/kat/networks/softnet/moddata/cert
ln -fs ${config.secrets.files.softnet-cert.path} /var/lib/znc/users/kat/networks/softnet/moddata/cert/user.pem
'';
};
esperrnet-cert-deploy = {
text = ''
mkdir -p /var/lib/znc/users/kat/networks/espernet/moddata/cert
ln -fs ${config.secrets.files.espernet-cert.path} /var/lib/znc/users/kat/networks/espernet/moddata/cert/user.pem
'';
};
liberachat-cert-deploy = {
text = ''
mkdir -p /var/lib/znc/users/kat/networks/liberachat/moddata/cert
ln -fs ${config.secrets.files.liberachat-cert.path} /var/lib/znc/users/kat/networks/liberachat/moddata/cert/user.pem
'';
};
};
secrets.files.znc-config = {
text = concatStringsSep "\n" (toLines config.services.znc.config);
owner = "znc";
group = "znc";
};
services.nginx.virtualHosts."znc.${config.network.dns.domain}" = {
enableACME = true;
forceSSL = true;
locations = { "/".proxyPass = "http://127.0.0.1:5002"; };
};
deploy.tf.dns.records.services_znc = {
inherit (config.network.dns) zone;
domain = "znc";
cname = { inherit (config.network.addresses.public) target; };
};
services.znc = {
enable = true;
mutable = false;
useLegacyConfig = false;
openFirewall = false;
modulePackages = with pkgs.zncModules; [
clientbuffer
clientaway
playback
privmsg
];
config = lib.mkMerge [
({
Version = lib.getVersion pkgs.znc;
Listener.l = {
Port = 5002;
SSL = false;
AllowWeb = true;
};
Listener.j = {
Port = 5001;
SSL = true;
AllowWeb = false;
};
LoadModule = [ "webadmin" "adminlog" "playback" "privmsg" ];
User = {
kat = {
Admin = true;
Nick = "kat";
AltNick = "katrin";
AutoClearChanBuffer = false;
AutoClearQueryBuffer = false;
LoadModule = [ "clientbuffer autoadd" "buffextras" "clientaway" "savebuff ${tf.variables.znc-savebuff-pass.ref}" ];
Network.softnet = {
Server = "${tf.variables.znc-softnet-address.ref}";
Nick = "kat";
AltNick = "kat_";
JoinDelay = 2;
LoadModule = [ "simple_away" "cert" ];
};
Network.liberachat = {
Server = "irc.libera.chat +6697 ${tf.variables.znc-liberachat-pass.ref}";
Nick = "kat";
AltNick = "kat_";
JoinDelay = 2;
LoadModule = [ "cert" "simple_away" "nickserv" ];
};
Network.espernet = {
Server = "anarchy.esper.net +6697 ${tf.variables.znc-espernet-pass.ref}";
Nick = "kat";
AltNick = "katrin";
JoinDelay = 2;
LoadModule = [ "simple_away" "nickserv" "cert" ];
};
};
};
})
(mkIf config.deploy.profile.trusted (import config.kw.secrets.repo.znc.source))
];
configFile = config.secrets.files.znc-config.path;
};
}