summaryrefslogtreecommitdiffstats
path: root/lass/2configs
diff options
context:
space:
mode:
Diffstat (limited to 'lass/2configs')
-rw-r--r--lass/2configs/monitoring/node-exporter.nix13
-rw-r--r--lass/2configs/monitoring/prometheus-server.nix179
-rw-r--r--lass/2configs/reaktor-coders.nix15
-rw-r--r--lass/2configs/syncthing.nix1
-rw-r--r--lass/2configs/websites/util.nix6
5 files changed, 209 insertions, 5 deletions
diff --git a/lass/2configs/monitoring/node-exporter.nix b/lass/2configs/monitoring/node-exporter.nix
new file mode 100644
index 000000000..8c27e90d4
--- /dev/null
+++ b/lass/2configs/monitoring/node-exporter.nix
@@ -0,0 +1,13 @@
+{ config, lib, pkgs, ... }:
+{
+ networking.firewall.allowedTCPPorts = [ 9100 ];
+
+ services.prometheus.exporters = {
+ node = {
+ enable = true;
+ enabledCollectors = [
+ "systemd"
+ ];
+ };
+ };
+}
diff --git a/lass/2configs/monitoring/prometheus-server.nix b/lass/2configs/monitoring/prometheus-server.nix
new file mode 100644
index 000000000..d56d7e552
--- /dev/null
+++ b/lass/2configs/monitoring/prometheus-server.nix
@@ -0,0 +1,179 @@
+{ pkgs, lib, config, ... }:
+{
+ #networking = {
+ # firewall.allowedTCPPorts = [
+ # 3000 # grafana
+ # 9090 # prometheus
+ # 9093 # alertmanager
+ # ];
+ # useDHCP = true;
+ #};
+
+ services = {
+ prometheus = {
+ enable = true;
+ extraFlags = [
+ "-storage.local.retention 8760h"
+ "-storage.local.series-file-shrink-ratio 0.3"
+ "-storage.local.memory-chunks 2097152"
+ "-storage.local.max-chunks-to-persist 1048576"
+ "-storage.local.index-cache-size.fingerprint-to-metric 2097152"
+ "-storage.local.index-cache-size.fingerprint-to-timerange 1048576"
+ "-storage.local.index-cache-size.label-name-to-label-values 2097152"
+ "-storage.local.index-cache-size.label-pair-to-fingerprints 41943040"
+ ];
+ alertmanagerURL = [ "http://localhost:9093" ];
+ rules = [
+ ''
+ ALERT node_down
+ IF up == 0
+ FOR 5m
+ LABELS {
+ severity="page"
+ }
+ ANNOTATIONS {
+ summary = "{{$labels.alias}}: Node is down.",
+ description = "{{$labels.alias}} has been down for more than 5 minutes."
+ }
+ ALERT node_systemd_service_failed
+ IF node_systemd_unit_state{state="failed"} == 1
+ FOR 4m
+ LABELS {
+ severity="page"
+ }
+ ANNOTATIONS {
+ summary = "{{$labels.alias}}: Service {{$labels.name}} failed to start.",
+ description = "{{$labels.alias}} failed to (re)start service {{$labels.name}}."
+ }
+ ALERT node_filesystem_full_90percent
+ IF sort(node_filesystem_free{device!="ramfs"} < node_filesystem_size{device!="ramfs"} * 0.1) / 1024^3
+ FOR 5m
+ LABELS {
+ severity="page"
+ }
+ ANNOTATIONS {
+ summary = "{{$labels.alias}}: Filesystem is running out of space soon.",
+ description = "{{$labels.alias}} device {{$labels.device}} on {{$labels.mountpoint}} got less than 10% space left on its filesystem."
+ }
+ ALERT node_filesystem_full_in_4h
+ IF predict_linear(node_filesystem_free{device!="ramfs"}[1h], 4*3600) <= 0
+ FOR 5m
+ LABELS {
+ severity="page"
+ }
+ ANNOTATIONS {
+ summary = "{{$labels.alias}}: Filesystem is running out of space in 4 hours.",
+ description = "{{$labels.alias}} device {{$labels.device}} on {{$labels.mountpoint}} is running out of space of in approx. 4 hours"
+ }
+ ALERT node_filedescriptors_full_in_3h
+ IF predict_linear(node_filefd_allocated[1h], 3*3600) >= node_filefd_maximum
+ FOR 20m
+ LABELS {
+ severity="page"
+ }
+ ANNOTATIONS {
+ summary = "{{$labels.alias}} is running out of available file descriptors in 3 hours.",
+ description = "{{$labels.alias}} is running out of available file descriptors in approx. 3 hours"
+ }
+ ALERT node_load1_90percent
+ IF node_load1 / on(alias) count(node_cpu{mode="system"}) by (alias) >= 0.9
+ FOR 1h
+ LABELS {
+ severity="page"
+ }
+ ANNOTATIONS {
+ summary = "{{$labels.alias}}: Running on high load.",
+ description = "{{$labels.alias}} is running with > 90% total load for at least 1h."
+ }
+ ALERT node_cpu_util_90percent
+ IF 100 - (avg by (alias) (irate(node_cpu{mode="idle"}[5m])) * 100) >= 90
+ FOR 1h
+ LABELS {
+ severity="page"
+ }
+ ANNOTATIONS {
+ summary = "{{$labels.alias}}: High CPU utilization.",
+ description = "{{$labels.alias}} has total CPU utilization over 90% for at least 1h."
+ }
+ ALERT node_ram_using_90percent
+ IF node_memory_MemFree + node_memory_Buffers + node_memory_Cached < node_memory_MemTotal * 0.1
+ FOR 30m
+ LABELS {
+ severity="page"
+ }
+ ANNOTATIONS {
+ summary="{{$labels.alias}}: Using lots of RAM.",
+ description="{{$labels.alias}} is using at least 90% of its RAM for at least 30 minutes now.",
+ }
+ ALERT node_swap_using_80percent
+ IF node_memory_SwapTotal - (node_memory_SwapFree + node_memory_SwapCached) > node_memory_SwapTotal * 0.8
+ FOR 10m
+ LABELS {
+ severity="page"
+ }
+ ANNOTATIONS {
+ summary="{{$labels.alias}}: Running out of swap soon.",
+ description="{{$labels.alias}} is using 80% of its swap space for at least 10 minutes now."
+ }
+ ''
+ ];
+ scrapeConfigs = [
+ {
+ job_name = "node";
+ scrape_interval = "10s";
+ static_configs = [
+ {
+ targets = [
+ "localhost:9100"
+ ];
+ labels = {
+ alias = "prometheus.example.com";
+ };
+ }
+ ];
+ }
+ ];
+ alertmanager = {
+ enable = true;
+ listenAddress = "0.0.0.0";
+ configuration = {
+ "global" = {
+ "smtp_smarthost" = "smtp.example.com:587";
+ "smtp_from" = "alertmanager@example.com";
+ };
+ "route" = {
+ "group_by" = [ "alertname" "alias" ];
+ "group_wait" = "30s";
+ "group_interval" = "2m";
+ "repeat_interval" = "4h";
+ "receiver" = "team-admins";
+ };
+ "receivers" = [
+ {
+ "name" = "team-admins";
+ "email_configs" = [
+ {
+ "to" = "devnull@example.com";
+ "send_resolved" = true;
+ }
+ ];
+ "webhook_configs" = [
+ {
+ "url" = "https://example.com/prometheus-alerts";
+ "send_resolved" = true;
+ }
+ ];
+ }
+ ];
+ };
+ };
+ };
+ grafana = {
+ enable = true;
+ addr = "0.0.0.0";
+ domain = "grafana.example.com";
+ rootUrl = "https://grafana.example.com/";
+ security = import <secrets/grafana_security.nix>; # { AdminUser = ""; adminPassword = ""}
+ };
+ };
+}
diff --git a/lass/2configs/reaktor-coders.nix b/lass/2configs/reaktor-coders.nix
index 5fa1611ae..5a39f7115 100644
--- a/lass/2configs/reaktor-coders.nix
+++ b/lass/2configs/reaktor-coders.nix
@@ -4,7 +4,7 @@ with import <stockholm/lib>;
{
krebs.Reaktor.coders = {
nickname = "Reaktor|lass";
- channels = [ "#coders" "#germany" ];
+ channels = [ "#coders" "#germany" "#panthermoderns" ];
extraEnviron = {
REAKTOR_HOST = "irc.hackint.org";
};
@@ -87,6 +87,19 @@ with import <stockholm/lib>;
exec /run/wrappers/bin/ping -q -c1 "$1" 2>&1 | tail -1
'';
})
+ (buildSimpleReaktorPlugin "google" {
+ pattern = "^!g (?P<args>.*)$$";
+ script = pkgs.writeDash "google" ''
+ exec ${pkgs.ddgr}/bin/ddgr -C -n1 --json "$@" | \
+ ${pkgs.jq}/bin/jq '@text "\(.[0].abstract) \(.[0].url)"'
+ '';
+ })
+ (buildSimpleReaktorPlugin "blockchain" {
+ pattern = ".*[Bb]lockchain.*$$";
+ script = pkgs.writeDash "blockchain" ''
+ exec echo 'DID SOMEBODY SAY BLOCKCHAIN? https://paste.krebsco.de/r99pMoQq/+inline'
+ '';
+ })
];
};
}
diff --git a/lass/2configs/syncthing.nix b/lass/2configs/syncthing.nix
index cef43d1e6..17debf822 100644
--- a/lass/2configs/syncthing.nix
+++ b/lass/2configs/syncthing.nix
@@ -3,7 +3,6 @@ with import <stockholm/lib>;
{
services.syncthing = {
enable = true;
- useInotify = true;
};
krebs.iptables.tables.filter.INPUT.rules = [
{ predicate = "-p tcp --dport 22000"; target = "ACCEPT";}
diff --git a/lass/2configs/websites/util.nix b/lass/2configs/websites/util.nix
index 62055d0fd..441b7af90 100644
--- a/lass/2configs/websites/util.nix
+++ b/lass/2configs/websites/util.nix
@@ -16,7 +16,7 @@ rec {
in {
services.nginx.virtualHosts.${domain} = {
enableACME = true;
- enableSSL = true;
+ onlySSL = true;
extraConfig = ''
listen 80;
listen [::]:80;
@@ -34,7 +34,7 @@ rec {
in {
services.nginx.virtualHosts."${domain}" = {
enableACME = true;
- enableSSL = true;
+ onlySSL = true;
serverAliases = domains;
extraConfig = ''
listen 80;
@@ -148,7 +148,7 @@ rec {
in {
services.nginx.virtualHosts."${domain}" = {
enableACME = true;
- enableSSL = true;
+ onlySSL = true;
serverAliases = domains;
extraConfig = ''
listen 80;