diff options
62 files changed, 1611 insertions, 761 deletions
@@ -35,7 +35,7 @@ ifeq ($(filter),json) else filter() { cat; } endif - nix-instantiate \ + result=$$(nix-instantiate \ $${extraArgs-} \ --eval \ -A "$$get" \ @@ -45,8 +45,9 @@ endif --argstr current-host-name "$$HOSTNAME" \ --argstr current-user-name "$$LOGNAME" \ $${system+--argstr system "$$system"} \ - $${target+--argstr target "$$target"} \ - | filter + $${target+--argstr target "$$target"}) + echo "$$result" | filter + else $(error unbound variable: system[s]) endif diff --git a/krebs/3modules/Reaktor.nix b/krebs/3modules/Reaktor.nix index 0fca52203..92400139c 100644 --- a/krebs/3modules/Reaktor.nix +++ b/krebs/3modules/Reaktor.nix @@ -9,6 +9,7 @@ let ${cfg.overrideConfig} '' else ""} ## Extra Config + ${concatStringsSep "\n" (map (plug: plug.config) cfg.plugins)} ${cfg.extraConfig} ''; cfg = config.krebs.Reaktor; @@ -35,7 +36,6 @@ let ''; }; - overrideConfig = mkOption { default = null; type = types.nullOr types.str; @@ -44,6 +44,9 @@ let Reaktor default cfg can be retrieved via `reaktor get-config` ''; }; + plugins = mkOption { + default = [pkgs.ReaktorPlugins.nixos-version]; + }; extraConfig = mkOption { default = ""; type = types.string; @@ -51,6 +54,14 @@ let configuration appended to the default or overridden configuration ''; }; + + workdir = mkOption { + default = "/var/lib/Reaktor"; + type = types.str; + description = '' + Reaktor working directory + ''; + }; extraEnviron = mkOption { default = {}; type = types.attrsOf types.str; @@ -59,12 +70,17 @@ let REAKTOR_HOST REAKTOR_PORT REAKTOR_STATEDIR - REAKTOR_CHANNELS debug and nickname can be set separately via the Reaktor api ''; }; - + channels = mkOption { + default = [ "#krebs" ]; + type = types.listOf types.str; + description = '' + Channels the Reaktor should connect to at startup. + ''; + }; debug = mkOption { default = false; description = '' @@ -79,7 +95,7 @@ let name = "Reaktor"; uid = genid name; description = "Reaktor user"; - home = "/var/lib/Reaktor"; + home = cfg.workdir; createHome = true; }; @@ -101,6 +117,9 @@ let GIT_SSL_CAINFO = "${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt"; REAKTOR_NICKNAME = cfg.nickname; REAKTOR_DEBUG = (if cfg.debug then "True" else "False"); + REAKTOR_CHANNELS = lib.concatStringsSep "," cfg.channels; + state_dir = cfg.workdir; + } // cfg.extraEnviron; serviceConfig= { ExecStartPre = pkgs.writeScript "Reaktor-init" '' diff --git a/krebs/3modules/backup.nix b/krebs/3modules/backup.nix new file mode 100644 index 000000000..01bb16a2b --- /dev/null +++ b/krebs/3modules/backup.nix @@ -0,0 +1,286 @@ +{ config, lib, pkgs, ... }: +with lib; +let + out = { + options.krebs.backup = api; + config = mkIf cfg.enable imp; + }; + + cfg = config.krebs.backup; + + api = { + enable = mkEnableOption "krebs.backup" // { default = true; }; + plans = mkOption { + default = {}; + type = types.attrsOf (types.submodule ({ + # TODO enable = mkEnableOption "TODO" // { default = true; }; + options = { + method = mkOption { + type = types.enum ["pull" "push"]; + }; + name = mkOption { + type = types.str; + }; + src = mkOption { + type = types.krebs.file-location; + }; + dst = mkOption { + type = types.krebs.file-location; + }; + startAt = mkOption { + type = types.str; + }; + snapshots = mkOption { + type = types.attrsOf (types.submodule { + options = { + format = mkOption { + type = types.str; # TODO date's +FORMAT + }; + retain = mkOption { + type = types.nullOr types.int; + default = null; # null = retain all snapshots + }; + }; + }); + }; + }; + })); + }; + }; + + imp = { + users.groups.backup.gid = genid "backup"; + users.users = {} + // { + root.openssh.authorizedKeys.keys = + map (plan: plan.dst.host.ssh.pubkey) + (filter isPullSrc (attrValues cfg.plans)) + ++ + map (plan: plan.src.host.ssh.pubkey) + (filter isPushDst (attrValues cfg.plans)) + ; + } + ; + systemd.services = + flip mapAttrs' (filterAttrs (_:isPullDst) cfg.plans) (name: plan: { + name = "backup.${name}.pull"; + value = makePullService plan; + }) + // + flip mapAttrs' (filterAttrs (_:isPushSrc) cfg.plans) (name: plan: { + name = "backup.${name}.push"; + value = makePushService plan; + }) + ; + }; + + isPushSrc = plan: + plan.method == "push" && + plan.src.host.name == config.krebs.build.host.name; + + isPullSrc = plan: + plan.method == "pull" && + plan.src.host.name == config.krebs.build.host.name; + + isPushDst = plan: + plan.method == "push" && + plan.dst.host.name == config.krebs.build.host.name; + + isPullDst = plan: + plan.method == "pull" && + plan.dst.host.name == config.krebs.build.host.name; + + # TODO push destination needs this in the dst.user's PATH + service-path = [ + pkgs.coreutils + pkgs.gnused + pkgs.openssh + pkgs.rsync + pkgs.utillinux + ]; + + # TODO if there is plan.user, then use its privkey + makePushService = plan: assert isPushSrc plan; { + path = service-path; + serviceConfig = { + ExecStart = push plan; + Type = "oneshot"; + }; + startAt = plan.startAt; + }; + + makePullService = plan: assert isPullDst plan; { + path = service-path; + serviceConfig = { + ExecStart = pull plan; + Type = "oneshot"; + }; + startAt = plan.startAt; + }; + + push = plan: let + # We use writeDashBin and return the absolute path so systemd will produce + # nice names in the log, i.e. without the Nix store hash. + out = "${main}/bin/${main.name}"; + + main = writeDashBin "backup.${plan.name}.push" '' + set -efu + dst=${shell.escape plan.dst.path} + + mkdir -m 0700 -p "$dst" + exec flock -n "$dst" ${critical-section} + ''; + + critical-section = writeDash "backup.${plan.name}.push.critical-section" '' + # TODO check if there is a previous + set -efu + identity=${shell.escape plan.src.host.ssh.privkey.path} + src=${shell.escape plan.src.path} + dst_target=${shell.escape "root@${getFQDN plan.dst.host}"} + dst_path=${shell.escape plan.dst.path} + dst=$dst_target:$dst_path + + # Export NOW so runtime of rsync doesn't influence snapshot naming. + export NOW + NOW=$(date +%s) + + echo >&2 "update snapshot: current; $src -> $dst" + rsync >&2 \ + -aAXF --delete \ + -e "ssh -F /dev/null -i $identity" \ + --rsync-path ${shell.escape + "mkdir -m 0700 -p ${shell.escape plan.dst.path} && rsync"} \ + --link-dest="$dst_path/current" \ + "$src/" \ + "$dst/.partial" + + exec ssh -F /dev/null \ + -i "$identity" \ + "$dst_target" \ + -T \ + env NOW="$NOW" /bin/sh < ${remote-snapshot} + EOF + ''; + + remote-snapshot = writeDash "backup.${plan.name}.push.remote-snapshot" '' + set -efu + dst=${shell.escape plan.dst.path} + + if test -e "$dst/current"; then + mv "$dst/current" "$dst/.previous" + fi + mv "$dst/.partial" "$dst/current" + rm -fR "$dst/.previous" + echo >&2 + + (${(take-snapshots plan).text}) + ''; + + in out; + + # TODO admit plan.dst.user and its ssh identity + pull = plan: let + # We use writeDashBin and return the absolute path so systemd will produce + # nice names in the log, i.e. without the Nix store hash. + out = "${main}/bin/${main.name}"; + + main = writeDashBin "backup.${plan.name}.pull" '' + set -efu + dst=${shell.escape plan.dst.path} + + mkdir -m 0700 -p "$dst" + exec flock -n "$dst" ${critical-section} + ''; + + critical-section = writeDash "backup.${plan.name}.pull.critical-section" '' + # TODO check if there is a previous + set -efu + identity=${shell.escape plan.dst.host.ssh.privkey.path} + src=${shell.escape "root@${getFQDN plan.src.host}:${plan.src.path}"} + dst=${shell.escape plan.dst.path} + + # Export NOW so runtime of rsync doesn't influence snapshot naming. + export NOW + NOW=$(date +%s) + + echo >&2 "update snapshot: current; $dst <- $src" + mkdir -m 0700 -p ${shell.escape plan.dst.path} + rsync >&2 \ + -aAXF --delete \ + -e "ssh -F /dev/null -i $identity" \ + --link-dest="$dst/current" \ + "$src/" \ + "$dst/.partial" + mv "$dst/current" "$dst/.previous" + mv "$dst/.partial" "$dst/current" + rm -fR "$dst/.previous" + echo >&2 + + exec ${take-snapshots plan} + ''; + in out; + + take-snapshots = plan: writeDash "backup.${plan.name}.take-snapshots" '' + set -efu + NOW=''${NOW-$(date +%s)} + dst=${shell.escape plan.dst.path} + + snapshot() {( + : $ns $format $retain + name=$(date --date="@$NOW" +"$format") + if ! test -e "$dst/$ns/$name"; then + echo >&2 "create snapshot: $ns/$name" + mkdir -m 0700 -p "$dst/$ns" + rsync >&2 \ + -aAXF --delete \ + --link-dest="$dst/current" \ + "$dst/current/" \ + "$dst/$ns/.partial.$name" + mv "$dst/$ns/.partial.$name" "$dst/$ns/$name" + echo >&2 + fi + case $retain in + ([0-9]*) + delete_from=$(($retain + 1)) + ls -r "$dst/$ns" \ + | sed -n "$delete_from,\$p" \ + | while read old_name; do + echo >&2 "delete snapshot: $ns/$old_name" + rm -fR "$dst/$ns/$old_name" + done + ;; + (ALL) + : + ;; + esac + )} + + ${concatStringsSep "\n" (mapAttrsToList (ns: { format, retain ? null, ... }: + toString (map shell.escape [ + "ns=${ns}" + "format=${format}" + "retain=${if retain == null then "ALL" else toString retain}" + "snapshot" + ])) + plan.snapshots)} + ''; + + # TODO getFQDN: admit hosts in other domains + getFQDN = host: "${host.name}.${config.krebs.search-domain}"; + + writeDash = name: text: pkgs.writeScript name '' + #! ${pkgs.dash}/bin/dash + ${text} + ''; + + writeDashBin = name: text: pkgs.writeTextFile { + executable = true; + destination = "/bin/${name}"; + name = name; + text = '' + #! ${pkgs.dash}/bin/dash + ${text} + ''; + }; + +in out diff --git a/krebs/3modules/buildbot/master.nix b/krebs/3modules/buildbot/master.nix new file mode 100644 index 000000000..74385a433 --- /dev/null +++ b/krebs/3modules/buildbot/master.nix @@ -0,0 +1,385 @@ +{ config, pkgs, lib, ... }: + +with lib; +let + buildbot = pkgs.buildbot; + buildbot-master-config = pkgs.writeText "buildbot-master.cfg" '' + # -*- python -*- + from buildbot.plugins import * + import re + import json + c = BuildmasterConfig = {} + + c['slaves'] = [] + slaves = json.loads('${builtins.toJSON cfg.slaves}') + slavenames = [ s for s in slaves ] + for k,v in slaves.items(): + c['slaves'].append(buildslave.BuildSlave(k, v)) + + # TODO: configure protocols? + c['protocols'] = {'pb': {'port': 9989}} + + ####### Build Inputs + c['change_source'] = cs = [] + + ${ concatStringsSep "\n" + (mapAttrsToList (n: v: '' + #### Change_Source: Begin of ${n} + ${v} + #### Change_Source: End of ${n} + '') cfg.change_source )} + + ####### Build Scheduler + c['schedulers'] = sched = [] + + ${ concatStringsSep "\n" + (mapAttrsToList (n: v: '' + #### Schedulers: Begin of ${n} + ${v} + #### Schedulers: End of ${n} + '') cfg.scheduler )} + + ###### Builder + c['builders'] = bu = [] + + # Builder Pre: Begin + ${cfg.builder_pre} + # Builder Pre: End + + ${ concatStringsSep "\n" + (mapAttrsToList (n: v: '' + #### Builder: Begin of ${n} + ${v} + #### Builder: End of ${n} + '') cfg.builder )} + + + ####### Status + c['status'] = st = [] + + # If you want to configure this url, override with extraConfig + c['buildbotURL'] = "http://${config.networking.hostName}:${toString cfg.web.port}/" + + ${optionalString (cfg.web.enable) '' + from buildbot.status import html + from buildbot.status.web import authz, auth + authz_cfg=authz.Authz( + auth=auth.BasicAuth([ ("${cfg.web.username}","${cfg.web.password}") ]), + # TODO: configure harder + gracefulShutdown = False, + forceBuild = 'auth', + forceAllBuilds = 'auth', + pingBuilder = False, + stopBuild = 'auth', + stopAllBuilds = 'auth', + cancelPendingBuild = 'auth' + ) + # TODO: configure krebs.nginx + st.append(html.WebStatus(http_port=${toString cfg.web.port}, authz=authz_cfg)) + ''} + + ${optionalString (cfg.irc.enable) '' + from buildbot.status import words + irc = words.IRC("${cfg.irc.server}", "${cfg.irc.nick}", + channels=${builtins.toJSON cfg.irc.channels}, + notify_events={ + 'success': 1, + 'failure': 1, + 'exception': 1, + 'successToFailure': 1, + 'failureToSuccess': 1, + }${optionalString cfg.irc.allowForce ",allowForce=True"}) + c['status'].append(irc) + ''} + + ${ concatStringsSep "\n" + (mapAttrsToList (n: v: '' + #### Status: Begin of ${n} + ${v} + #### Status: End of ${n} + '') cfg.status )} + + ####### PROJECT IDENTITY + c['title'] = "${cfg.title}" + c['titleURL'] = "http://krebsco.de" + + + ####### DB URL + # TODO: configure + c['db'] = { + 'db_url' : "sqlite:///state.sqlite", + } + ${cfg.extraConfig} + ''; + + cfg = config.krebs.buildbot.master; + + api = { + enable = mkEnableOption "Buildbot Master"; + title = mkOption { + default = "Buildbot CI"; + type = types.str; + description = '' + Title of the Buildbot Installation + ''; + }; + workDir = mkOption { + default = "/var/lib/buildbot/master"; + type = types.str; + description = '' + Path to build bot master directory. + Will be created on startup. + ''; + }; + + secrets = mkOption { + default = []; + type = types.listOf types.str; + example = [ "cac.json" ]; + description = '' + List of all the secrets in <secrets> which should be copied into the + buildbot master directory. + ''; + }; + + slaves = mkOption { + default = {}; + type = types.attrsOf types.str; + description = '' + Attrset of slavenames with their passwords + slavename = slavepassword + ''; + }; + + change_source = mkOption { + default = {}; + type = types.attrsOf types.str; + example = { + stockholm = '' + cs.append(changes.GitPoller( + 'http://cgit.gum/stockholm', + workdir='stockholm-poller', branch='master', + project='stockholm', + pollinterval=120)) + ''; + }; + description = '' + Attrset of all the change_sources which should be configured. + It will be directly included into the master configuration. + + At the end an change object should be appended to <literal>cs</literal> + ''; + }; + + scheduler = mkOption { + default = {}; + type = types.attrsOf types.str; + example = { + force-scheduler = '' + sched.append(schedulers.ForceScheduler( + name="force", + builderNames=["full-tests"])) + ''; + }; + description = '' + Attrset of all the schedulers which should be configured. + It will be directly included into the master configuration. + + At the end an change object should be appended to <literal>sched</literal> + ''; + }; |