Merge remote-tracking branch 'origin/master' into haskell-updates

This commit is contained in:
Peter Simons 2020-08-27 14:26:14 +02:00
commit 24cd70f14a
961 changed files with 15970 additions and 8938 deletions

9
.github/CODEOWNERS vendored
View File

@ -195,10 +195,11 @@
/pkgs/top-level/php-packages.nix @NixOS/php /pkgs/top-level/php-packages.nix @NixOS/php
# Podman, CRI-O modules and related # Podman, CRI-O modules and related
/nixos/modules/virtualisation/containers.nix @NixOS/podman /nixos/modules/virtualisation/containers.nix @NixOS/podman @zowoq
/nixos/modules/virtualisation/cri-o.nix @NixOS/podman /nixos/modules/virtualisation/cri-o.nix @NixOS/podman @zowoq
/nixos/modules/virtualisation/podman.nix @NixOS/podman /nixos/modules/virtualisation/podman.nix @NixOS/podman @zowoq
/nixos/tests/podman.nix @NixOS/podman /nixos/tests/cri-o.nix @NixOS/podman @zowoq
/nixos/tests/podman.nix @NixOS/podman @zowoq
# Blockchains # Blockchains
/pkgs/applications/blockchains @mmahut /pkgs/applications/blockchains @mmahut

21
.github/workflows/pending-clear.yml vendored Normal file
View File

@ -0,0 +1,21 @@
name: "clear pending status"
on:
check_suite:
types: [ completed ]
jobs:
action:
runs-on: ubuntu-latest
steps:
- name: clear pending status
if: github.repository_owner == 'NixOS' && github.event.check_suite.app.name == 'OfBorg'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
curl \
-X POST \
-H "Accept: application/vnd.github.v3+json" \
-H "Authorization: token $GITHUB_TOKEN" \
-d '{"state": "success", "target_url": " ", "description": " ", "context": "Wait for ofborg"}' \
"https://api.github.com/repos/NixOS/nixpkgs/statuses/${{ github.event.check_suite.head_sha }}"

20
.github/workflows/pending-set.yml vendored Normal file
View File

@ -0,0 +1,20 @@
name: "set pending status"
on:
pull_request_target:
jobs:
action:
runs-on: ubuntu-latest
steps:
- name: set pending status
if: github.repository_owner == 'NixOS'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
curl \
-X POST \
-H "Accept: application/vnd.github.v3+json" \
-H "Authorization: token $GITHUB_TOKEN" \
-d '{"state": "failure", "target_url": " ", "description": "This failed status will be cleared when ofborg finishes eval.", "context": "Wait for ofborg"}' \
"https://api.github.com/repos/NixOS/nixpkgs/statuses/${{ github.event.pull_request.head.sha }}"

View File

@ -17,7 +17,6 @@ pkgs.runCommandNoCC "nixpkgs-lib-tests" {
export TEST_ROOT=$(pwd)/test-tmp export TEST_ROOT=$(pwd)/test-tmp
export NIX_BUILD_HOOK= export NIX_BUILD_HOOK=
export NIX_CONF_DIR=$TEST_ROOT/etc export NIX_CONF_DIR=$TEST_ROOT/etc
export NIX_DB_DIR=$TEST_ROOT/db
export NIX_LOCALSTATE_DIR=$TEST_ROOT/var export NIX_LOCALSTATE_DIR=$TEST_ROOT/var
export NIX_LOG_DIR=$TEST_ROOT/var/log/nix export NIX_LOG_DIR=$TEST_ROOT/var/log/nix
export NIX_STATE_DIR=$TEST_ROOT/var/nix export NIX_STATE_DIR=$TEST_ROOT/var/nix

View File

@ -254,6 +254,12 @@
githubId = 732652; githubId = 732652;
name = "Andreas Herrmann"; name = "Andreas Herrmann";
}; };
ahrzb = {
email = "ahrzb5@gmail.com";
github = "ahrzb";
githubId = 5220438;
name = "AmirHossein Roozbahani";
};
ahuzik = { ahuzik = {
email = "ales.guzik@gmail.com"; email = "ales.guzik@gmail.com";
github = "alesguzik"; github = "alesguzik";
@ -466,6 +472,12 @@
githubId = 858965; githubId = 858965;
name = "Andrew Morsillo"; name = "Andrew Morsillo";
}; };
andehen = {
email = "git@andehen.net";
github = "andehen";
githubId = 754494;
name = "Anders Asheim Hennum";
};
andersk = { andersk = {
email = "andersk@mit.edu"; email = "andersk@mit.edu";
github = "andersk"; github = "andersk";
@ -2705,6 +2717,12 @@
githubId = 857308; githubId = 857308;
name = "Joe Hermaszewski"; name = "Joe Hermaszewski";
}; };
extends = {
email = "sharosari@gmail.com";
github = "ImExtends";
githubId = 55919390;
name = "Vincent VILLIAUMEY";
};
eyjhb = { eyjhb = {
email = "eyjhbb@gmail.com"; email = "eyjhbb@gmail.com";
github = "eyJhb"; github = "eyJhb";
@ -3343,6 +3361,12 @@
githubId = 131599; githubId = 131599;
name = "Martin Weinelt"; name = "Martin Weinelt";
}; };
hh = {
email = "hh@m-labs.hk";
github = "HarryMakes";
githubId = 66358631;
name = "Harry Ho";
};
hhm = { hhm = {
email = "heehooman+nixpkgs@gmail.com"; email = "heehooman+nixpkgs@gmail.com";
github = "hhm0"; github = "hhm0";
@ -3715,6 +3739,12 @@
}]; }];
name = "Jiri Daněk"; name = "Jiri Daněk";
}; };
jdbaldry = {
email = "jack.baldry@grafana.com";
github = "jdbaldry";
githubId = 4599384;
name = "Jack Baldry";
};
jdehaas = { jdehaas = {
email = "qqlq@nullptr.club"; email = "qqlq@nullptr.club";
github = "jeroendehaas"; github = "jeroendehaas";
@ -3835,6 +3865,12 @@
githubId = 51518420; githubId = 51518420;
name = "jitwit"; name = "jitwit";
}; };
jjjollyjim = {
email = "jamie@kwiius.com";
github = "JJJollyjim";
githubId = 691552;
name = "Jamie McClymont";
};
jk = { jk = {
email = "hello+nixpkgs@j-k.io"; email = "hello+nixpkgs@j-k.io";
github = "06kellyjac"; github = "06kellyjac";
@ -6719,6 +6755,12 @@
githubId = 37715; githubId = 37715;
name = "Brian McKenna"; name = "Brian McKenna";
}; };
purcell = {
email = "steve@sanityinc.com";
github = "purcell";
githubId = 5636;
name = "Steve Purcell";
};
puzzlewolf = { puzzlewolf = {
email = "nixos@nora.pink"; email = "nixos@nora.pink";
github = "puzzlewolf"; github = "puzzlewolf";
@ -6755,6 +6797,12 @@
githubId = 115877; githubId = 115877;
name = "Kenny Shen"; name = "Kenny Shen";
}; };
quentini = {
email = "quentini@airmail.cc";
github = "QuentinI";
githubId = 18196237;
name = "Quentin Inkling";
};
qyliss = { qyliss = {
email = "hi@alyssa.is"; email = "hi@alyssa.is";
github = "alyssais"; github = "alyssais";
@ -8133,6 +8181,12 @@
githubId = 863327; githubId = 863327;
name = "Tyler Benster"; name = "Tyler Benster";
}; };
tcbravo = {
email = "tomas.bravo@protonmail.ch";
github = "tcbravo";
githubId = 66133083;
name = "Tomas Bravo";
};
tckmn = { tckmn = {
email = "andy@tck.mn"; email = "andy@tck.mn";
github = "tckmn"; github = "tckmn";
@ -8523,6 +8577,12 @@
githubId = 699403; githubId = 699403;
name = "Tomas Vestelind"; name = "Tomas Vestelind";
}; };
tviti = {
email = "tviti@hawaii.edu";
github = "tviti";
githubId = 2251912;
name = "Taylor Viti";
};
tvorog = { tvorog = {
email = "marszaripov@gmail.com"; email = "marszaripov@gmail.com";
github = "tvorog"; github = "tvorog";
@ -9132,6 +9192,16 @@
fingerprint = "85F8 E850 F8F2 F823 F934 535B EC50 6589 9AEA AF4C"; fingerprint = "85F8 E850 F8F2 F823 F934 535B EC50 6589 9AEA AF4C";
}]; }];
}; };
yusdacra = {
email = "y.bera003.06@protonmail.com";
github = "yusdacra";
githubId = 19897088;
name = "Yusuf Bera Ertan";
keys = [{
longkeyid = "rsa2048/0x61807181F60EFCB2";
fingerprint = "9270 66BD 8125 A45B 4AC4 0326 6180 7181 F60E FCB2";
}];
};
yvesf = { yvesf = {
email = "yvesf+nix@xapek.org"; email = "yvesf+nix@xapek.org";
github = "yvesf"; github = "yvesf";
@ -9404,4 +9474,20 @@
github = "fzakaria"; github = "fzakaria";
githubId = 605070; githubId = 605070;
}; };
yevhenshymotiuk = {
name = "Yevhen Shymotiuk";
email = "yevhenshymotiuk@gmail.com";
github = "yevhenshymotiuk";
githubId = 44244245;
};
hmenke = {
name = "Henri Menke";
email = "henri@henrimenke.de";
github = "hmenke";
githubId = 1903556;
keys = [{
longkeyid = "rsa4096/0xD65C9AFB4C224DA3";
fingerprint = "F1C5 760E 45B9 9A44 72E9 6BFB D65C 9AFB 4C22 4DA3";
}];
};
} }

View File

@ -70,35 +70,12 @@ Platform Vendor Advanced Micro Devices, Inc.</screen>
Core Next</link> (GCN) GPUs are supported through the Core Next</link> (GCN) GPUs are supported through the
<package>rocm-opencl-icd</package> package. Adding this package to <package>rocm-opencl-icd</package> package. Adding this package to
<xref linkend="opt-hardware.opengl.extraPackages"/> enables OpenCL <xref linkend="opt-hardware.opengl.extraPackages"/> enables OpenCL
support. However, OpenCL Image support is provided through the support:
non-free <package>rocm-runtime-ext</package> package. This package can
be added to the same configuration option, but requires that
<varname>allowUnfree</varname> option is is enabled for nixpkgs. Full
OpenCL support on supported AMD GPUs is thus enabled as follows:
<programlisting><xref linkend="opt-hardware.opengl.extraPackages"/> = [ <programlisting><xref linkend="opt-hardware.opengl.extraPackages"/> = [
rocm-opencl-icd rocm-opencl-icd
rocm-runtime-ext
];</programlisting> ];</programlisting>
</para> </para>
<para>
It is also possible to use the OpenCL Image extension without a
system-wide installation of the <package>rocm-runtime-ext</package>
package by setting the <varname>ROCR_EXT_DIR</varname> environment
variable to the directory that contains the extension:
<screen><prompt>$</prompt> export \
ROCR_EXT_DIR=`nix-build '&lt;nixpkgs&gt;' --no-out-link -A rocm-runtime-ext`/lib/rocm-runtime-ext</screen>
</para>
<para>
With either approach, you can verify that OpenCL Image support
is indeed working with the <command>clinfo</command> command:
<screen><prompt>$</prompt> clinfo | grep Image
Image support Yes</screen>
</para>
</section> </section>
<section xml:id="sec-gpu-accel-opencl-intel"> <section xml:id="sec-gpu-accel-opencl-intel">

View File

@ -136,7 +136,7 @@
<filename>/mnt</filename>: <filename>/mnt</filename>:
</para> </para>
<screen> <screen>
# nixos-enter /mnt # nixos-enter --root /mnt
</screen> </screen>
<para> <para>
Run a shell command: Run a shell command:

View File

@ -128,7 +128,7 @@ GRANT ALL PRIVILEGES ON *.* TO 'mysql'@'localhost' WITH GRANT OPTION;
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
Two new option <link linkend="opt-documentation.man.generateCaches">documentation.man.generateCaches</link> The new option <link linkend="opt-documentation.man.generateCaches">documentation.man.generateCaches</link>
has been added to automatically generate the <literal>man-db</literal> caches, which are needed by utilities has been added to automatically generate the <literal>man-db</literal> caches, which are needed by utilities
like <command>whatis</command> and <command>apropos</command>. The caches are generated during the build of like <command>whatis</command> and <command>apropos</command>. The caches are generated during the build of
the NixOS configuration: since this can be expensive when a large number of packages are installed, the the NixOS configuration: since this can be expensive when a large number of packages are installed, the
@ -137,7 +137,7 @@ GRANT ALL PRIVILEGES ON *.* TO 'mysql'@'localhost' WITH GRANT OPTION;
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
<varname>services.postfix.sslCACert</varname> was replaced by <varname>services.postfix.tlsTrustedAuthorities</varname> which now defaults to system certifcate authorities. <varname>services.postfix.sslCACert</varname> was replaced by <varname>services.postfix.tlsTrustedAuthorities</varname> which now defaults to system certificate authorities.
</para> </para>
</listitem> </listitem>
<listitem> <listitem>
@ -156,6 +156,54 @@ GRANT ALL PRIVILEGES ON *.* TO 'mysql'@'localhost' WITH GRANT OPTION;
Support for built-in LCDs in various pieces of Logitech hardware (keyboards and USB speakers). <varname>hardware.logitech.lcd.enable</varname> enables support for all hardware supported by the g15daemon project. Support for built-in LCDs in various pieces of Logitech hardware (keyboards and USB speakers). <varname>hardware.logitech.lcd.enable</varname> enables support for all hardware supported by the g15daemon project.
</para> </para>
</listitem> </listitem>
<listitem>
<para>
Zabbix now defaults to 5.0, updated from 4.4. Please carefully read through
<link xlink:href="https://www.zabbix.com/documentation/current/manual/installation/upgrade/sources">the upgrade guide</link>
and apply any changes required. Be sure to take special note of the section on
<link xlink:href="https://www.zabbix.com/documentation/current/manual/installation/upgrade_notes_500#enabling_extended_range_of_numeric_float_values">enabling extended range of numeric (float) values</link>
as you will need to apply this database migration manually.
</para>
<para>
If you are using Zabbix Server with a MySQL or MariaDB database you should note that using a character set of <literal>utf8</literal> and a collate of <literal>utf8_bin</literal> has become mandatory with
this release. See the upstream <link xlink:href="https://support.zabbix.com/browse/ZBX-17357">issue</link> for further discussion. Before upgrading you should check the character set and collation used by
your database and ensure they are correct:
<programlisting>
SELECT
default_character_set_name,
default_collation_name
FROM
information_schema.schemata
WHERE
schema_name = 'zabbix';
</programlisting>
If these values are not correct you should take a backup of your database and convert the character set and collation as required. Here is an
<link xlink:href="https://www.zabbix.com/forum/zabbix-help/396573-reinstall-after-upgrade?p=396891#post396891">example</link> of how to do so, taken from
the Zabbix forums:
<programlisting>
ALTER DATABASE `zabbix` DEFAULT CHARACTER SET utf8 COLLATE utf8_bin;
-- the following will produce a list of SQL commands you should subsequently execute
SELECT CONCAT("ALTER TABLE ", TABLE_NAME," CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin;") AS ExecuteTheString
FROM information_schema.`COLUMNS`
WHERE table_schema = "zabbix" AND COLLATION_NAME = "utf8_general_ci";
</programlisting>
</para>
</listitem>
<listitem>
<para>
The NixOS module system now supports freeform modules as a mix between <literal>types.attrsOf</literal> and <literal>types.submodule</literal>. These allow you to explicitly declare a subset of options while still permitting definitions without an associated option. See <xref linkend='sec-freeform-modules'/> for how to use them.
</para>
</listitem>
<listitem>
<para>
The GRUB module gained support for basic password protection, which
allows to restrict non-default entries in the boot menu to one or more
users. The users and passwords are defined via the option
<option>boot.loader.grub.users</option>.
Note: Password support is only avaiable in GRUB version 2.
</para>
</listitem>
</itemizedlist> </itemizedlist>
</section> </section>
@ -199,7 +247,7 @@ GRANT ALL PRIVILEGES ON *.* TO 'mysql'@'localhost' WITH GRANT OPTION;
in the source tree for downloaded modules instead of using go's <link in the source tree for downloaded modules instead of using go's <link
xlink:href="https://golang.org/cmd/go/#hdr-Module_proxy_protocol">module xlink:href="https://golang.org/cmd/go/#hdr-Module_proxy_protocol">module
proxy protocol</link>. This storage format is simpler and therefore less proxy protocol</link>. This storage format is simpler and therefore less
likekly to break with future versions of go. As a result likely to break with future versions of go. As a result
<literal>buildGoModule</literal> switched from <literal>buildGoModule</literal> switched from
<literal>modSha256</literal> to the <literal>vendorSha256</literal> <literal>modSha256</literal> to the <literal>vendorSha256</literal>
attribute to pin fetched version data. attribute to pin fetched version data.
@ -211,7 +259,7 @@ GRANT ALL PRIVILEGES ON *.* TO 'mysql'@'localhost' WITH GRANT OPTION;
<link xlink:href="https://grafana.com/docs/grafana/latest/guides/whats-new-in-v6-4/">deprecated in Grafana</link> <link xlink:href="https://grafana.com/docs/grafana/latest/guides/whats-new-in-v6-4/">deprecated in Grafana</link>
and the <package>phantomjs</package> project is and the <package>phantomjs</package> project is
<link xlink:href="https://github.com/ariya/phantomjs/issues/15344#issue-302015362">currently unmaintained</link>. <link xlink:href="https://github.com/ariya/phantomjs/issues/15344#issue-302015362">currently unmaintained</link>.
It can still be enabled by providing <literal>phantomJsSupport = true</literal> to the package instanciation: It can still be enabled by providing <literal>phantomJsSupport = true</literal> to the package instantiation:
<programlisting>{ <programlisting>{
services.grafana.package = pkgs.grafana.overrideAttrs (oldAttrs: rec { services.grafana.package = pkgs.grafana.overrideAttrs (oldAttrs: rec {
phantomJsSupport = false; phantomJsSupport = false;
@ -223,7 +271,7 @@ GRANT ALL PRIVILEGES ON *.* TO 'mysql'@'localhost' WITH GRANT OPTION;
<para> <para>
The <link linkend="opt-services.supybot.enable">supybot</link> module now uses <literal>/var/lib/supybot</literal> The <link linkend="opt-services.supybot.enable">supybot</link> module now uses <literal>/var/lib/supybot</literal>
as its default <link linkend="opt-services.supybot.stateDir">stateDir</link> path if <literal>stateVersion</literal> as its default <link linkend="opt-services.supybot.stateDir">stateDir</link> path if <literal>stateVersion</literal>
is 20.09 or higher. It also enables number of is 20.09 or higher. It also enables a number of
<link xlink:href="https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Sandboxing">systemd sandboxing options</link> <link xlink:href="https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Sandboxing">systemd sandboxing options</link>
which may possibly interfere with some plugins. If this is the case you can disable the options through attributes in which may possibly interfere with some plugins. If this is the case you can disable the options through attributes in
<option>systemd.services.supybot.serviceConfig</option>. <option>systemd.services.supybot.serviceConfig</option>.
@ -697,6 +745,13 @@ CREATE ROLE postgres LOGIN SUPERUSER;
The USBGuard module now removes options and instead hardcodes values for <literal>IPCAccessControlFiles</literal>, <literal>ruleFiles</literal>, and <literal>auditFilePath</literal>. Audit logs can be found in the journal. The USBGuard module now removes options and instead hardcodes values for <literal>IPCAccessControlFiles</literal>, <literal>ruleFiles</literal>, and <literal>auditFilePath</literal>. Audit logs can be found in the journal.
</para> </para>
</listitem> </listitem>
<listitem>
<para>
The NixOS module system now evaluates option definitions more strictly, allowing it to detect a larger set of problems.
As a result, what previously evaluated may not do so anymore.
See <link xlink:href="https://github.com/NixOS/nixpkgs/pull/82743#issuecomment-674520472">the PR that changed this</link> for more info.
</para>
</listitem>
</itemizedlist> </itemizedlist>
</section> </section>
@ -915,9 +970,18 @@ services.transmission.settings.rpc-bind-address = "0.0.0.0";
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
Nginx module <literal>nginxModules.fastcgi-cache-purge</literal> renamed to official name <literal>nginxModules.cache-purge</literal>.
Nginx module <literal>nginxModules.ngx_aws_auth</literal> renamed to official name <literal>nginxModules.aws-auth</literal>.
The packages <package>perl</package>, <package>rsync</package> and <package>strace</package> were removed from <option>systemPackages</option>. If you need them, install them again with <code><xref linkend="opt-environment.systemPackages"/> = with pkgs; [ perl rsync strace ];</code> in your <filename>configuration.nix</filename>. The packages <package>perl</package>, <package>rsync</package> and <package>strace</package> were removed from <option>systemPackages</option>. If you need them, install them again with <code><xref linkend="opt-environment.systemPackages"/> = with pkgs; [ perl rsync strace ];</code> in your <filename>configuration.nix</filename>.
</para> </para>
</listitem> </listitem>
<listitem>
<para>
The <literal>undervolt</literal> option no longer needs to apply its
settings every 30s. If they still become undone, open an issue and restore
the previous behaviour using <literal>undervolt.useTimer</literal>.
</para>
</listitem>
</itemizedlist> </itemizedlist>
</section> </section>
</section> </section>

View File

@ -24,11 +24,11 @@
check ? true check ? true
, prefix ? [] , prefix ? []
, lib ? import ../../lib , lib ? import ../../lib
, extraModules ? let e = builtins.getEnv "NIXOS_EXTRA_MODULE_PATH";
in if e == "" then [] else [(import e)]
}: }:
let extraArgs_ = extraArgs; pkgs_ = pkgs; let extraArgs_ = extraArgs; pkgs_ = pkgs;
extraModules = let e = builtins.getEnv "NIXOS_EXTRA_MODULE_PATH";
in if e == "" then [] else [(import e)];
in in
let let

View File

@ -22,9 +22,9 @@ rec {
else throw "Unknown QEMU serial device for system '${pkgs.stdenv.hostPlatform.system}'"; else throw "Unknown QEMU serial device for system '${pkgs.stdenv.hostPlatform.system}'";
qemuBinary = qemuPkg: { qemuBinary = qemuPkg: {
x86_64-linux = "${qemuPkg}/bin/qemu-kvm -cpu host"; x86_64-linux = "${qemuPkg}/bin/qemu-kvm -cpu max";
armv7l-linux = "${qemuPkg}/bin/qemu-system-arm -enable-kvm -machine virt -cpu host"; armv7l-linux = "${qemuPkg}/bin/qemu-system-arm -enable-kvm -machine virt -cpu host";
aarch64-linux = "${qemuPkg}/bin/qemu-system-aarch64 -enable-kvm -machine virt,gic-version=host -cpu host"; aarch64-linux = "${qemuPkg}/bin/qemu-system-aarch64 -enable-kvm -machine virt,gic-version=host -cpu host";
x86_64-darwin = "${qemuPkg}/bin/qemu-kvm -cpu host"; x86_64-darwin = "${qemuPkg}/bin/qemu-kvm -cpu max";
}.${pkgs.stdenv.hostPlatform.system} or "${qemuPkg}/bin/qemu-kvm"; }.${pkgs.stdenv.hostPlatform.system} or "${qemuPkg}/bin/qemu-kvm";
} }

View File

@ -1,19 +1,13 @@
#! /somewhere/python3 #! /somewhere/python3
from contextlib import contextmanager, _GeneratorContextManager
from queue import Queue, Empty
from typing import Tuple, Any, Callable, Dict, Iterator, Optional, List
from xml.sax.saxutils import XMLGenerator
import queue
import io
import _thread
import argparse import argparse
import atexit import atexit
import base64 import base64
import codecs import io
import logging
import os import os
import pathlib import pathlib
import ptpython.repl
import pty import pty
import queue
import re import re
import shlex import shlex
import shutil import shutil
@ -21,9 +15,12 @@ import socket
import subprocess import subprocess
import sys import sys
import tempfile import tempfile
import _thread
import time import time
import traceback from contextlib import contextmanager
import unicodedata from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple
import ptpython.repl
CHAR_TO_KEY = { CHAR_TO_KEY = {
"A": "shift-a", "A": "shift-a",
@ -88,13 +85,17 @@ CHAR_TO_KEY = {
")": "shift-0x0B", ")": "shift-0x0B",
} }
# Forward references # Forward reference
log: "Logger"
machines: "List[Machine]" machines: "List[Machine]"
logging.basicConfig(format="%(message)s")
logger = logging.getLogger("test-driver")
logger.setLevel(logging.INFO)
def eprint(*args: object, **kwargs: Any) -> None:
print(*args, file=sys.stderr, **kwargs) class MachineLogAdapter(logging.LoggerAdapter):
def process(self, msg: str, kwargs: Any) -> Tuple[str, Any]:
return f"{self.extra['machine']}: {msg}", kwargs
def make_command(args: list) -> str: def make_command(args: list) -> str:
@ -102,8 +103,7 @@ def make_command(args: list) -> str:
def create_vlan(vlan_nr: str) -> Tuple[str, str, "subprocess.Popen[bytes]", Any]: def create_vlan(vlan_nr: str) -> Tuple[str, str, "subprocess.Popen[bytes]", Any]:
global log logger.info(f"starting VDE switch for network {vlan_nr}")
log.log("starting VDE switch for network {}".format(vlan_nr))
vde_socket = tempfile.mkdtemp( vde_socket = tempfile.mkdtemp(
prefix="nixos-test-vde-", suffix="-vde{}.ctl".format(vlan_nr) prefix="nixos-test-vde-", suffix="-vde{}.ctl".format(vlan_nr)
) )
@ -142,70 +142,6 @@ def retry(fn: Callable) -> None:
raise Exception("action timed out") raise Exception("action timed out")
class Logger:
def __init__(self) -> None:
self.logfile = os.environ.get("LOGFILE", "/dev/null")
self.logfile_handle = codecs.open(self.logfile, "wb")
self.xml = XMLGenerator(self.logfile_handle, encoding="utf-8")
self.queue: "Queue[Dict[str, str]]" = Queue()
self.xml.startDocument()
self.xml.startElement("logfile", attrs={})
def close(self) -> None:
self.xml.endElement("logfile")
self.xml.endDocument()
self.logfile_handle.close()
def sanitise(self, message: str) -> str:
return "".join(ch for ch in message if unicodedata.category(ch)[0] != "C")
def maybe_prefix(self, message: str, attributes: Dict[str, str]) -> str:
if "machine" in attributes:
return "{}: {}".format(attributes["machine"], message)
return message
def log_line(self, message: str, attributes: Dict[str, str]) -> None:
self.xml.startElement("line", attributes)
self.xml.characters(message)
self.xml.endElement("line")
def log(self, message: str, attributes: Dict[str, str] = {}) -> None:
eprint(self.maybe_prefix(message, attributes))
self.drain_log_queue()
self.log_line(message, attributes)
def enqueue(self, message: Dict[str, str]) -> None:
self.queue.put(message)
def drain_log_queue(self) -> None:
try:
while True:
item = self.queue.get_nowait()
attributes = {"machine": item["machine"], "type": "serial"}
self.log_line(self.sanitise(item["msg"]), attributes)
except Empty:
pass
@contextmanager
def nested(self, message: str, attributes: Dict[str, str] = {}) -> Iterator[None]:
eprint(self.maybe_prefix(message, attributes))
self.xml.startElement("nest", attrs={})
self.xml.startElement("head", attributes)
self.xml.characters(message)
self.xml.endElement("head")
tic = time.time()
self.drain_log_queue()
yield
self.drain_log_queue()
toc = time.time()
self.log("({:.2f} seconds)".format(toc - tic))
self.xml.endElement("nest")
class Machine: class Machine:
def __init__(self, args: Dict[str, Any]) -> None: def __init__(self, args: Dict[str, Any]) -> None:
if "name" in args: if "name" in args:
@ -235,8 +171,8 @@ class Machine:
self.pid: Optional[int] = None self.pid: Optional[int] = None
self.socket = None self.socket = None
self.monitor: Optional[socket.socket] = None self.monitor: Optional[socket.socket] = None
self.logger: Logger = args["log"]
self.allow_reboot = args.get("allowReboot", False) self.allow_reboot = args.get("allowReboot", False)
self.logger = MachineLogAdapter(logger, extra=dict(machine=self.name))
@staticmethod @staticmethod
def create_startcommand(args: Dict[str, str]) -> str: def create_startcommand(args: Dict[str, str]) -> str:
@ -292,14 +228,6 @@ class Machine:
def is_up(self) -> bool: def is_up(self) -> bool:
return self.booted and self.connected return self.booted and self.connected
def log(self, msg: str) -> None:
self.logger.log(msg, {"machine": self.name})
def nested(self, msg: str, attrs: Dict[str, str] = {}) -> _GeneratorContextManager:
my_attrs = {"machine": self.name}
my_attrs.update(attrs)
return self.logger.nested(msg, my_attrs)
def wait_for_monitor_prompt(self) -> str: def wait_for_monitor_prompt(self) -> str:
assert self.monitor is not None assert self.monitor is not None
answer = "" answer = ""
@ -314,7 +242,7 @@ class Machine:
def send_monitor_command(self, command: str) -> str: def send_monitor_command(self, command: str) -> str:
message = ("{}\n".format(command)).encode() message = ("{}\n".format(command)).encode()
self.log("sending monitor command: {}".format(command)) self.logger.info(f"sending monitor command: {command}")
assert self.monitor is not None assert self.monitor is not None
self.monitor.send(message) self.monitor.send(message)
return self.wait_for_monitor_prompt() return self.wait_for_monitor_prompt()
@ -381,16 +309,19 @@ class Machine:
return self.execute("systemctl {}".format(q)) return self.execute("systemctl {}".format(q))
def require_unit_state(self, unit: str, require_state: str = "active") -> None: def require_unit_state(self, unit: str, require_state: str = "active") -> None:
with self.nested( self.logger.info(
"checking if unit {} has reached state '{}'".format(unit, require_state) f"checking if unit {unit} has reached state '{require_state}'"
): )
info = self.get_unit_info(unit) info = self.get_unit_info(unit)
state = info["ActiveState"] state = info["ActiveState"]
if state != require_state: if state != require_state:
raise Exception( raise Exception(
"Expected unit {} to to be in state ".format(unit) "Expected unit {} to to be in state ".format(unit)
+ "'{}' but it is in state {}".format(require_state, state) + "'{}' but it is in state {}".format(require_state, state)
) )
def log(self, message: str) -> None:
self.logger.info(message)
def execute(self, command: str) -> Tuple[int, str]: def execute(self, command: str) -> Tuple[int, str]:
self.connect() self.connect()
@ -414,25 +345,26 @@ class Machine:
"""Execute each command and check that it succeeds.""" """Execute each command and check that it succeeds."""
output = "" output = ""
for command in commands: for command in commands:
with self.nested("must succeed: {}".format(command)): self.logger.info(f"must succeed: {command}")
(status, out) = self.execute(command) (status, out) = self.execute(command)
if status != 0: if status != 0:
self.log("output: {}".format(out)) self.logger.info(f"output: {out}")
raise Exception( raise Exception(
"command `{}` failed (exit code {})".format(command, status) "command `{}` failed (exit code {})".format(command, status)
) )
output += out output += out
return output return output
def fail(self, *commands: str) -> None: def fail(self, *commands: str) -> str:
"""Execute each command and check that it fails.""" """Execute each command and check that it fails."""
output = ""
for command in commands: for command in commands:
with self.nested("must fail: {}".format(command)): self.logger.info(f"must fail: {command}")
status, output = self.execute(command) (status, out) = self.execute(command)
if status == 0: if status == 0:
raise Exception( raise Exception("command `{}` unexpectedly succeeded".format(command))
"command `{}` unexpectedly succeeded".format(command) output += out
) return output
def wait_until_succeeds(self, command: str) -> str: def wait_until_succeeds(self, command: str) -> str:
"""Wait until a command returns success and return its output. """Wait until a command returns success and return its output.
@ -445,9 +377,9 @@ class Machine:
status, output = self.execute(command) status, output = self.execute(command)
return status == 0 return status == 0
with self.nested("waiting for success: {}".format(command)): self.logger.info(f"waiting for success: {command}")
retry(check_success) retry(check_success)
return output return output
def wait_until_fails(self, command: str) -> str: def wait_until_fails(self, command: str) -> str:
"""Wait until a command returns failure. """Wait until a command returns failure.
@ -460,21 +392,21 @@ class Machine:
status, output = self.execute(command) status, output = self.execute(command)
return status != 0 return status != 0
with self.nested("waiting for failure: {}".format(command)): self.logger.info(f"waiting for failure: {command}")
retry(check_failure) retry(check_failure)
return output return output
def wait_for_shutdown(self) -> None: def wait_for_shutdown(self) -> None:
if not self.booted: if not self.booted:
return return
with self.nested("waiting for the VM to power off"): self.logger.info("waiting for the VM to power off")
sys.stdout.flush() sys.stdout.flush()
self.process.wait() self.process.wait()
self.pid = None self.pid = None
self.booted = False self.booted = False
self.connected = False self.connected = False
def get_tty_text(self, tty: str) -> str: def get_tty_text(self, tty: str) -> str:
status, output = self.execute( status, output = self.execute(
@ -492,19 +424,19 @@ class Machine:
def tty_matches(last: bool) -> bool: def tty_matches(last: bool) -> bool:
text = self.get_tty_text(tty) text = self.get_tty_text(tty)
if last: if last:
self.log( self.logger.info(
f"Last chance to match /{regexp}/ on TTY{tty}, " f"Last chance to match /{regexp}/ on TTY{tty}, "
f"which currently contains: {text}" f"which currently contains: {text}"
) )
return len(matcher.findall(text)) > 0 return len(matcher.findall(text)) > 0
with self.nested("waiting for {} to appear on tty {}".format(regexp, tty)): self.logger.info(f"waiting for {regexp} to appear on tty {tty}")
retry(tty_matches) retry(tty_matches)
def send_chars(self, chars: List[str]) -> None: def send_chars(self, chars: List[str]) -> None:
with self.nested("sending keys {}".format(chars)): self.logger.info(f"sending keys {chars}")
for char in chars: for char in chars:
self.send_key(char) self.send_key(char)
def wait_for_file(self, filename: str) -> None: def wait_for_file(self, filename: str) -> None:
"""Waits until the file exists in machine's file system.""" """Waits until the file exists in machine's file system."""
@ -513,16 +445,16 @@ class Machine:
status, _ = self.execute("test -e {}".format(filename)) status, _ = self.execute("test -e {}".format(filename))
return status == 0 return status == 0
with self.nested("waiting for file {}".format(filename)): self.logger.info(f"waiting for file {filename}")
retry(check_file) retry(check_file)
def wait_for_open_port(self, port: int) -> None: def wait_for_open_port(self, port: int) -> None:
def port_is_open(_: Any) -> bool: def port_is_open(_: Any) -> bool:
status, _ = self.execute("nc -z localhost {}".format(port)) status, _ = self.execute("nc -z localhost {}".format(port))
return status == 0 return status == 0
with self.nested("waiting for TCP port {}".format(port)): self.logger.info(f"waiting for TCP port {port}")
retry(port_is_open) retry(port_is_open)
def wait_for_closed_port(self, port: int) -> None: def wait_for_closed_port(self, port: int) -> None:
def port_is_closed(_: Any) -> bool: def port_is_closed(_: Any) -> bool:
@ -544,17 +476,17 @@ class Machine:
if self.connected: if self.connected:
return return
with self.nested("waiting for the VM to finish booting"): self.logger.info("waiting for the VM to finish booting")
self.start() self.start()
tic = time.time() tic = time.time()
self.shell.recv(1024) self.shell.recv(1024)
# TODO: Timeout # TODO: Timeout
toc = time.time() toc = time.time()
self.log("connected to guest root shell") self.logger.info("connected to guest root shell")
self.log("(connecting took {:.2f} seconds)".format(toc - tic)) self.logger.info(f"(connecting took {toc - tic:.2f} seconds)")
self.connected = True self.connected = True
def screenshot(self, filename: str) -> None: def screenshot(self, filename: str) -> None:
out_dir = os.environ.get("out", os.getcwd()) out_dir = os.environ.get("out", os.getcwd())
@ -563,15 +495,12 @@ class Machine:
filename = os.path.join(out_dir, "{}.png".format(filename)) filename = os.path.join(out_dir, "{}.png".format(filename))
tmp = "{}.ppm".format(filename) tmp = "{}.ppm".format(filename)
with self.nested( self.logger.info(f"making screenshot {filename}")
"making screenshot {}".format(filename), self.send_monitor_command("screendump {}".format(tmp))
{"image": os.path.basename(filename)}, ret = subprocess.run("pnmtopng {} > {}".format(tmp, filename), shell=True)
): os.unlink(tmp)
self.send_monitor_command("screendump {}".format(tmp)) if ret.returncode != 0:
ret = subprocess.run("pnmtopng {} > {}".format(tmp, filename), shell=True) raise Exception("Cannot convert screenshot")
os.unlink(tmp)
if ret.returncode != 0:
raise Exception("Cannot convert screenshot")
def copy_from_host_via_shell(self, source: str, target: str) -> None: def copy_from_host_via_shell(self, source: str, target: str) -> None:
"""Copy a file from the host into the guest by piping it over the """Copy a file from the host into the guest by piping it over the
@ -647,20 +576,18 @@ class Machine:
tess_args = "-c debug_file=/dev/null --psm 11 --oem 2" tess_args = "-c debug_file=/dev/null --psm 11 --oem 2"
with self.nested("performing optical character recognition"): self.logger.info("performing optical character recognition")
with tempfile.NamedTemporaryFile() as tmpin: with tempfile.NamedTemporaryFile() as tmpin:
self.send_monitor_command("screendump {}".format(tmpin.name)) self.send_monitor_command("screendump {}".format(tmpin.name))
cmd = "convert {} {} tiff:- | tesseract - - {}".format( cmd = "convert {} {} tiff:- | tesseract - - {}".format(
magick_args, tmpin.name, tess_args magick_args, tmpin.name, tess_args
) )
ret = subprocess.run(cmd, shell=True, capture_output=True) ret = subprocess.run(cmd, shell=True, capture_output=True)
if ret.returncode != 0: if ret.returncode != 0:
raise Exception( raise Exception("OCR failed with exit code {}".format(ret.returncode))
"OCR failed with exit code {}".format(ret.returncode)
)
return ret.stdout.decode("utf-8") return ret.stdout.decode("utf-8")
def wait_for_text(self, regex: str) -> None: def wait_for_text(self, regex: str) -> None:
def screen_matches(last: bool) -> bool: def screen_matches(last: bool) -> bool:
@ -668,15 +595,15 @@ class Machine:
matches = re.search(regex, text) is not None matches = re.search(regex, text) is not None
if last and not matches: if last and not matches:
self.log("Last OCR attempt failed. Text was: {}".format(text)) self.logger.info(f"Last OCR attempt failed. Text was: {text}")
return matches return matches
with self.nested("waiting for {} to appear on screen".format(regex)): self.logger.info(f"waiting for {regex} to appear on screen")
retry(screen_matches) retry(screen_matches)
def wait_for_console_text(self, regex: str) -> None: def wait_for_console_text(self, regex: str) -> None:
self.log("waiting for {} to appear on console".format(regex)) self.logger.info(f"waiting for {regex} to appear on console")
# Buffer the console output, this is needed # Buffer the console output, this is needed
# to match multiline regexes. # to match multiline regexes.
console = io.StringIO() console = io.StringIO()
@ -699,7 +626,7 @@ class Machine:
if self.booted: if self.booted:
return return
self.log("starting vm") self.logger.info("starting vm")
def create_socket(path: str) -> socket.socket: def create_socket(path: str) -> socket.socket:
if os.path.exists(path): if os.path.exists(path):
@ -756,7 +683,7 @@ class Machine:
# Store last serial console lines for use # Store last serial console lines for use
# of wait_for_console_text # of wait_for_console_text
self.last_lines: Queue = Queue() self.last_lines: queue.Queue = queue.Queue()
def process_serial_output() -> None: def process_serial_output() -> None:
assert self.process.stdout is not None assert self.process.stdout is not None
@ -764,8 +691,7 @@ class Machine:
# Ignore undecodable bytes that may occur in boot menus # Ignore undecodable bytes that may occur in boot menus
line = _line.decode(errors="ignore").replace("\r", "").rstrip() line = _line.decode(errors="ignore").replace("\r", "").rstrip()
self.last_lines.put(line) self.last_lines.put(line)
eprint("{} # {}".format(self.name, line)) self.logger.info(line)
self.logger.enqueue({"msg": line, "machine": self.name})
_thread.start_new_thread(process_serial_output, ()) _thread.start_new_thread(process_serial_output, ())
@ -774,10 +700,10 @@ class Machine:
self.pid = self.process.pid self.pid = self.process.pid
self.booted = True self.booted = True
self.log("QEMU running (pid {})".format(self.pid)) self.logger.info(f"QEMU running (pid {self.pid})")
def cleanup_statedir(self) -> None: def cleanup_statedir(self) -> None:
self.log("delete the VM state directory") self.logger.info("delete the VM state directory")
if os.path.isfile(self.state_dir): if os.path.isfile(self.state_dir):
shutil.rmtree(self.state_dir) shutil.rmtree(self.state_dir)
@ -792,7 +718,7 @@ class Machine:
if not self.booted: if not self.booted:
return return
self.log("forced crash") self.logger.info("forced crash")
self.send_monitor_command("quit") self.send_monitor_command("quit")
self.wait_for_shutdown() self.wait_for_shutdown()
@ -812,8 +738,8 @@ class Machine:
status, _ = self.execute("[ -e /tmp/.X11-unix/X0 ]") status, _ = self.execute("[ -e /tmp/.X11-unix/X0 ]")
return status == 0 return status == 0
with self.nested("waiting for the X11 server"): self.logger.info("waiting for the X11 server")
retry(check_x) retry(check_x)
def get_window_names(self) -> List[str]: def get_window_names(self) -> List[str]:
return self.succeed( return self.succeed(
@ -826,15 +752,14 @@ class Machine:
def window_is_visible(last_try: bool) -> bool: def window_is_visible(last_try: bool) -> bool:
names = self.get_window_names() names = self.get_window_names()
if last_try: if last_try:
self.log( self.logger.info(
"Last chance to match {} on the window list,".format(regexp) f"Last chance to match {regexp} on the window list, "
+ " which currently contains: " + f"which currently contains: {', '.join(names)}"
+ ", ".join(names)
) )
return any(pattern.search(name) for name in names) return any(pattern.search(name) for name in names)
with self.nested("Waiting for a window to appear"): self.logger.info("Waiting for a window to appear")
retry(window_is_visible) retry(window_is_visible)
def sleep(self, secs: int) -> None: def sleep(self, secs: int) -> None:
time.sleep(secs) time.sleep(secs)
@ -862,23 +787,22 @@ class Machine:
def create_machine(args: Dict[str, Any]) -> Machine: def create_machine(args: Dict[str, Any]) -> Machine:
global log global log
args["log"] = log
args["redirectSerial"] = os.environ.get("USE_SERIAL", "0") == "1" args["redirectSerial"] = os.environ.get("USE_SERIAL", "0") == "1"
return Machine(args) return Machine(args)
def start_all() -> None: def start_all() -> None:
global machines global machines
with log.nested("starting all VMs"): logger.info("starting all VMs")
for machine in machines: for machine in machines:
machine.start() machine.start()
def join_all() -> None: def join_all() -> None:
global machines global machines
with log.nested("waiting for all VMs to finish"): logger.info("waiting for all VMs to finish")
for machine in machines: for machine in machines:
machine.wait_for_shutdown() machine.wait_for_shutdown()
def test_script() -> None: def test_script() -> None:
@ -889,13 +813,12 @@ def run_tests() -> None:
global machines global machines
tests = os.environ.get("tests", None) tests = os.environ.get("tests", None)
if tests is not None: if tests is not None:
with log.nested("running the VM test script"): logger.info("running the VM test script")
try: try:
exec(tests, globals()) exec(tests, globals())
except Exception as e: except Exception:
eprint("error: ") logging.exception("error:")
traceback.print_exc() sys.exit(1)
sys.exit(1)
else: else:
ptpython.repl.embed(locals(), globals()) ptpython.repl.embed(locals(), globals())
@ -908,18 +831,19 @@ def run_tests() -> None:
@contextmanager @contextmanager
def subtest(name: str) -> Iterator[None]: def subtest(name: str) -> Iterator[None]:
with log.nested(name): logger.info(name)
try: try:
yield yield
return True return True
except Exception as e: except Exception as e:
log.log(f'Test "{name}" failed with error: "{e}"') logger.info(f'Test "{name}" failed with error: "{e}"')
raise e raise e
return False return False
if __name__ == "__main__": def main() -> None:
global machines
arg_parser = argparse.ArgumentParser() arg_parser = argparse.ArgumentParser()
arg_parser.add_argument( arg_parser.add_argument(
"-K", "-K",
@ -929,8 +853,6 @@ if __name__ == "__main__":
) )
(cli_args, vm_scripts) = arg_parser.parse_known_args() (cli_args, vm_scripts) = arg_parser.parse_known_args()
log = Logger()
vlan_nrs = list(dict.fromkeys(os.environ.get("VLANS", "").split())) vlan_nrs = list(dict.fromkeys(os.environ.get("VLANS", "").split()))
vde_sockets = [create_vlan(v) for v in vlan_nrs] vde_sockets = [create_vlan(v) for v in vlan_nrs]
for nr, vde_socket, _, _ in vde_sockets: for nr, vde_socket, _, _ in vde_sockets:
@ -941,23 +863,27 @@ if __name__ == "__main__":
if not cli_args.keep_vm_state: if not cli_args.keep_vm_state:
machine.cleanup_statedir() machine.cleanup_statedir()
machine_eval = [ machine_eval = [
"{0} = machines[{1}]".format(m.name, idx) for idx, m in enumerate(machines) "global {0}; {0} = machines[{1}]".format(m.name, idx)
for idx, m in enumerate(machines)
] ]
exec("\n".join(machine_eval)) exec("\n".join(machine_eval))
@atexit.register @atexit.register
def clean_up() -> None: def clean_up() -> None:
with log.nested("cleaning up"): logger.info("cleaning up")
for machine in machines: for machine in machines:
if machine.pid is None: if machine.pid is None:
continue continue
log.log("killing {} (pid {})".format(machine.name, machine.pid)) logger.info(f"killing {machine.name} (pid {machine.pid})")
machine.process.kill() machine.process.kill()
for _, _, process, _ in vde_sockets: for _, _, process, _ in vde_sockets:
process.terminate() process.terminate()
log.close()
tic = time.time() tic = time.time()
run_tests() run_tests()
toc = time.time() toc = time.time()
print("test script finished in {:.2f}s".format(toc - tic)) print("test script finished in {:.2f}s".format(toc - tic))
if __name__ == "__main__":
main()

View File

@ -62,7 +62,7 @@ rec {
'' ''
mkdir -p $out mkdir -p $out
LOGFILE=/dev/null tests='exec(os.environ["testScript"])' ${driver}/bin/nixos-test-driver tests='exec(os.environ["testScript"])' ${driver}/bin/nixos-test-driver
for i in */xchg/coverage-data; do for i in */xchg/coverage-data; do
mkdir -p $out/coverage-data mkdir -p $out/coverage-data

View File

@ -29,7 +29,7 @@ log() {
echo "$@" >&2 echo "$@" >&2
} }
if [ -z "$1" ]; then if [ "$#" -ne 1 ]; then
log "Usage: ./upload-amazon-image.sh IMAGE_OUTPUT" log "Usage: ./upload-amazon-image.sh IMAGE_OUTPUT"
exit 1 exit 1
fi fi

View File

@ -321,7 +321,7 @@ in
monetdb = 290; monetdb = 290;
restic = 291; restic = 291;
openvpn = 292; openvpn = 292;
meguca = 293; # meguca = 293; # removed 2020-08-21
yarn = 294; yarn = 294;
hdfs = 295; hdfs = 295;
mapred = 296; mapred = 296;
@ -622,7 +622,7 @@ in
monetdb = 290; monetdb = 290;
restic = 291; restic = 291;
openvpn = 292; openvpn = 292;
meguca = 293; # meguca = 293; # removed 2020-08-21
yarn = 294; yarn = 294;
hdfs = 295; hdfs = 295;
mapred = 296; mapred = 296;

View File

@ -127,7 +127,7 @@ in {
{ LOCATE_PATH = cfg.output; { LOCATE_PATH = cfg.output;
}; };
warnings = optional (isMLocate && cfg.localuser != null) "mlocate does not support searching as user other than root" warnings = optional (isMLocate && cfg.localuser != null) "mlocate does not support the services.locate.localuser option; updatedb will run as root. (Silence with services.locate.localuser = null.)"
++ optional (isFindutils && cfg.pruneNames != []) "findutils locate does not support pruning by directory component" ++ optional (isFindutils && cfg.pruneNames != []) "findutils locate does not support pruning by directory component"
++ optional (isFindutils && cfg.pruneBindMounts) "findutils locate does not support skipping bind mounts"; ++ optional (isFindutils && cfg.pruneBindMounts) "findutils locate does not support skipping bind mounts";

View File

@ -178,8 +178,6 @@ in
type = types.nullOr types.attrs; # TODO utilize lib.systems.parsedPlatform type = types.nullOr types.attrs; # TODO utilize lib.systems.parsedPlatform
default = null; default = null;
example = { system = "aarch64-linux"; config = "aarch64-unknown-linux-gnu"; }; example = { system = "aarch64-linux"; config = "aarch64-unknown-linux-gnu"; };
defaultText = literalExample
''(import "''${nixos}/../lib").lib.systems.examples.aarch64-multiplatform'';
description = '' description = ''
Specifies the platform for which NixOS should be Specifies the platform for which NixOS should be
built. Specify this only if it is different from built. Specify this only if it is different from

View File

@ -300,6 +300,7 @@
./services/desktops/dleyna-renderer.nix ./services/desktops/dleyna-renderer.nix
./services/desktops/dleyna-server.nix ./services/desktops/dleyna-server.nix
./services/desktops/pantheon/files.nix ./services/desktops/pantheon/files.nix
./services/desktops/espanso.nix
./services/desktops/flatpak.nix ./services/desktops/flatpak.nix
./services/desktops/geoclue2.nix ./services/desktops/geoclue2.nix
./services/desktops/gsignond.nix ./services/desktops/gsignond.nix
@ -886,7 +887,6 @@
./services/web-servers/lighttpd/collectd.nix ./services/web-servers/lighttpd/collectd.nix
./services/web-servers/lighttpd/default.nix ./services/web-servers/lighttpd/default.nix
./services/web-servers/lighttpd/gitweb.nix ./services/web-servers/lighttpd/gitweb.nix
./services/web-servers/meguca.nix
./services/web-servers/mighttpd2.nix ./services/web-servers/mighttpd2.nix
./services/web-servers/minio.nix ./services/web-servers/minio.nix
./services/web-servers/molly-brown.nix ./services/web-servers/molly-brown.nix

View File

@ -1,7 +1,7 @@
# A profile with most (vanilla) hardening options enabled by default, # A profile with most (vanilla) hardening options enabled by default,
# potentially at the cost of features and performance. # potentially at the cost of features and performance.
{ lib, pkgs, ... }: { config, lib, pkgs, ... }:
with lib; with lib;
@ -27,6 +27,9 @@ with lib;
security.forcePageTableIsolation = mkDefault true; security.forcePageTableIsolation = mkDefault true;
# This is required by podman to run containers in rootless mode.
security.unprivilegedUsernsClone = mkDefault config.virtualisation.containers.enable;
security.virtualisation.flushL1DataCache = mkDefault "always"; security.virtualisation.flushL1DataCache = mkDefault "always";
security.apparmor.enable = mkDefault true; security.apparmor.enable = mkDefault true;

View File

@ -48,6 +48,7 @@ with lib;
instead, or any other display manager in NixOS as they all support auto-login. instead, or any other display manager in NixOS as they all support auto-login.
'') '')
(mkRemovedOptionModule [ "services" "dnscrypt-proxy" ] "Use services.dnscrypt-proxy2 instead") (mkRemovedOptionModule [ "services" "dnscrypt-proxy" ] "Use services.dnscrypt-proxy2 instead")
(mkRemovedOptionModule [ "services" "meguca" ] "Use meguca has been removed from nixpkgs")
(mkRemovedOptionModule ["hardware" "brightnessctl" ] '' (mkRemovedOptionModule ["hardware" "brightnessctl" ] ''
The brightnessctl module was removed because newer versions of The brightnessctl module was removed because newer versions of
brightnessctl don't require the udev rules anymore (they can use the brightnessctl don't require the udev rules anymore (they can use the

View File

@ -150,6 +150,14 @@ let
''; '';
}; };
extraLegoFlags = mkOption {
type = types.listOf types.str;
default = [];
description = ''
Additional global flags to pass to all lego commands.
'';
};
extraLegoRenewFlags = mkOption { extraLegoRenewFlags = mkOption {
type = types.listOf types.str; type = types.listOf types.str;
default = []; default = [];
@ -157,6 +165,14 @@ let
Additional flags to pass to lego renew. Additional flags to pass to lego renew.
''; '';
}; };
extraLegoRunFlags = mkOption {
type = types.listOf types.str;
default = [];
description = ''
Additional flags to pass to lego run.
'';
};
}; };
}; };
@ -313,9 +329,10 @@ in
++ optionals (data.dnsProvider != null && !data.dnsPropagationCheck) [ "--dns.disable-cp" ] ++ optionals (data.dnsProvider != null && !data.dnsPropagationCheck) [ "--dns.disable-cp" ]
++ concatLists (mapAttrsToList (name: root: [ "-d" name ]) data.extraDomains) ++ concatLists (mapAttrsToList (name: root: [ "-d" name ]) data.extraDomains)
++ (if data.dnsProvider != null then [ "--dns" data.dnsProvider ] else [ "--http" "--http.webroot" data.webroot ]) ++ (if data.dnsProvider != null then [ "--dns" data.dnsProvider ] else [ "--http" "--http.webroot" data.webroot ])
++ optionals (cfg.server != null || data.server != null) ["--server" (if data.server == null then cfg.server else data.server)]; ++ optionals (cfg.server != null || data.server != null) ["--server" (if data.server == null then cfg.server else data.server)]
++ data.extraLegoFlags;
certOpts = optionals data.ocspMustStaple [ "--must-staple" ]; certOpts = optionals data.ocspMustStaple [ "--must-staple" ];
runOpts = escapeShellArgs (globalOpts ++ [ "run" ] ++ certOpts); runOpts = escapeShellArgs (globalOpts ++ [ "run" ] ++ certOpts ++ data.extraLegoRunFlags);
renewOpts = escapeShellArgs (globalOpts ++ renewOpts = escapeShellArgs (globalOpts ++
[ "renew" "--days" (toString cfg.validMinDays) ] ++ [ "renew" "--days" (toString cfg.validMinDays) ] ++
certOpts ++ data.extraLegoRenewFlags); certOpts ++ data.extraLegoRenewFlags);

View File

@ -51,7 +51,7 @@ in
}; };
secretKeyFile = mkOption { secretKeyFile = mkOption {
type = types.path; type = types.nullOr types.path;
default = null; default = null;
description = '' description = ''
A file containing your secret key. The security of your Duo application is tied to the security of your secret key. A file containing your secret key. The security of your Duo application is tied to the security of your secret key.

View File

@ -27,6 +27,16 @@ with lib;
''; '';
}; };
security.unprivilegedUsernsClone = mkOption {
type = types.bool;
default = false;
description = ''
When disabled, unprivileged users will not be able to create new namespaces.
By default unprivileged user namespaces are disabled.
This option only works in a hardened profile.
'';
};
security.protectKernelImage = mkOption { security.protectKernelImage = mkOption {
type = types.bool; type = types.bool;
default = false; default = false;
@ -115,6 +125,10 @@ with lib;
]; ];
}) })
(mkIf config.security.unprivilegedUsernsClone {
boot.kernel.sysctl."kernel.unprivileged_userns_clone" = mkDefault true;
})
(mkIf config.security.protectKernelImage { (mkIf config.security.protectKernelImage {
# Disable hibernation (allows replacing the running kernel) # Disable hibernation (allows replacing the running kernel)
boot.kernelParams = [ "nohibernate" ]; boot.kernelParams = [ "nohibernate" ];

View File

@ -47,7 +47,7 @@ in {
enable = mkEnableOption "Icecast server"; enable = mkEnableOption "Icecast server";
hostname = mkOption { hostname = mkOption {
type = types.str; type = types.nullOr types.str;
description = "DNS name or IP address that will be used for the stream directory lookups or possibily the playlist generation if a Host header is not provided."; description = "DNS name or IP address that will be used for the stream directory lookups or possibily the playlist generation if a Host header is not provided.";
default = config.networking.domain; default = config.networking.domain;
}; };

View File

@ -0,0 +1,25 @@
{ config, lib, pkgs, ... }:
with lib;
let cfg = config.services.espanso;
in {
meta = { maintainers = with lib.maintainers; [ numkem ]; };
options = {
services.espanso = { enable = options.mkEnableOption "Espanso"; };
};
config = mkIf cfg.enable {
systemd.user.services.espanso = {
description = "Espanso daemon";
path = with pkgs; [ espanso libnotify xclip ];
serviceConfig = {
ExecStart = "${pkgs.espanso}/bin/espanso daemon";
Restart = "on-failure";
};
wantedBy = [ "default.target" ];
};
environment.systemPackages = [ pkgs.espanso ];
};
}

View File

@ -15,7 +15,7 @@ let
jupyterhubConfig = pkgs.writeText "jupyterhub_config.py" '' jupyterhubConfig = pkgs.writeText "jupyterhub_config.py" ''
c.JupyterHub.bind_url = "http://${cfg.host}:${toString cfg.port}" c.JupyterHub.bind_url = "http://${cfg.host}:${toString cfg.port}"
c.JupyterHub.authentication_class = "${cfg.authentication}" c.JupyterHub.authenticator_class = "${cfg.authentication}"
c.JupyterHub.spawner_class = "${cfg.spawner}" c.JupyterHub.spawner_class = "${cfg.spawner}"
c.SystemdSpawner.default_url = '/lab' c.SystemdSpawner.default_url = '/lab'

View File

@ -12,7 +12,7 @@ in{
config = mkOption { config = mkOption {
default = null; default = null;
type = types.lines; type = types.nullOr types.lines;
description = "Fancontrol configuration file content. See <citerefentry><refentrytitle>pwmconfig</refentrytitle><manvolnum>8</manvolnum></citerefentry> from the lm_sensors package."; description = "Fancontrol configuration file content. See <citerefentry><refentrytitle>pwmconfig</refentrytitle><manvolnum>8</manvolnum></citerefentry> from the lm_sensors package.";
example = '' example = ''
# Configuration file generated by pwmconfig # Configuration file generated by pwmconfig

View File

@ -103,6 +103,17 @@ in
The temperature target on battery power in Celsius degrees. The temperature target on battery power in Celsius degrees.
''; '';
}; };
useTimer = mkOption {
type = types.bool;
default = false;
description = ''
Whether to set a timer that applies the undervolt settings every 30s.
This will cause spam in the journal but might be required for some
hardware under specific conditions.
Enable this if your undervolt settings don't hold.
'';
};
}; };
config = mkIf cfg.enable { config = mkIf cfg.enable {
@ -114,6 +125,11 @@ in
path = [ pkgs.undervolt ]; path = [ pkgs.undervolt ];
description = "Intel Undervolting Service"; description = "Intel Undervolting Service";
# Apply undervolt on boot, nixos generation switch and resume
wantedBy = [ "multi-user.target" "post-resume.target" ];
after = [ "post-resume.target" ]; # Not sure why but it won't work without this
serviceConfig = { serviceConfig = {
Type = "oneshot"; Type = "oneshot";
Restart = "no"; Restart = "no";
@ -121,7 +137,7 @@ in
}; };
}; };
systemd.timers.undervolt = { systemd.timers.undervolt = mkIf cfg.useTimer {
description = "Undervolt timer to ensure voltage settings are always applied"; description = "Undervolt timer to ensure voltage settings are always applied";
partOf = [ "undervolt.service" ]; partOf = [ "undervolt.service" ];
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];

View File

@ -5,54 +5,93 @@ with lib;
let let
cfg = config.services.logrotate; cfg = config.services.logrotate;
pathOptions = { pathOpts = {
options = { options = {
enable = mkOption {
type = types.bool;
default = true;
description = ''
Whether to enable log rotation for this path. This can be used to explicitly disable
logging that has been configured by NixOS.
'';
};
path = mkOption { path = mkOption {
type = types.str; type = types.str;
description = "The path to log files to be rotated"; description = ''
The path to log files to be rotated.
'';
}; };
user = mkOption { user = mkOption {
type = types.str; type = with types; nullOr str;
description = "The user account to use for rotation"; default = null;
description = ''
The user account to use for rotation.
'';
}; };
group = mkOption { group = mkOption {
type = types.str; type = with types; nullOr str;
description = "The group to use for rotation"; default = null;
description = ''
The group to use for rotation.
'';
}; };
frequency = mkOption { frequency = mkOption {
type = types.enum [ type = types.enum [ "daily" "weekly" "monthly" "yearly" ];
"daily" "weekly" "monthly" "yearly"
];
default = "daily"; default = "daily";
description = "How often to rotate the logs"; description = ''
How often to rotate the logs.
'';
}; };
keep = mkOption { keep = mkOption {
type = types.int; type = types.int;
default = 20; default = 20;
description = "How many rotations to keep"; description = ''
How many rotations to keep.
'';
}; };
extraConfig = mkOption { extraConfig = mkOption {
type = types.lines; type = types.lines;
default = ""; default = "";
description = "Extra logrotate config options for this path"; description = ''
Extra logrotate config options for this path. Refer to
<link xlink:href="https://linux.die.net/man/8/logrotate"/> for details.
'';
};
priority = mkOption {
type = types.int;
default = 1000;
description = ''
Order of this logrotate block in relation to the others. The semantics are
the same as with `lib.mkOrder`. Smaller values have a greater priority.
'';
}; };
}; };
};
pathConfig = options: '' config.extraConfig = ''
"${options.path}" {
su ${options.user} ${options.group}
${options.frequency}
missingok missingok
notifempty notifempty
rotate ${toString options.keep} '';
${options.extraConfig} };
mkConf = pathOpts: ''
# generated by NixOS using the `services.logrotate.paths.${pathOpts.name}` attribute set
"${pathOpts.path}" {
${optionalString (pathOpts.user != null || pathOpts.group != null) "su ${pathOpts.user} ${pathOpts.group}"}
${pathOpts.frequency}
rotate ${toString pathOpts.keep}
${pathOpts.extraConfig}
} }
''; '';
configFile = pkgs.writeText "logrotate.conf" ( paths = sortProperties (mapAttrsToList (name: pathOpts: pathOpts // { name = name; }) (filterAttrs (_: pathOpts: pathOpts.enable) cfg.paths));
(concatStringsSep "\n" ((map pathConfig cfg.paths) ++ [cfg.extraConfig])) configFile = pkgs.writeText "logrotate.conf" (concatStringsSep "\n" ((map mkConf paths) ++ [ cfg.extraConfig ]));
);
in in
{ {
@ -65,41 +104,66 @@ in
enable = mkEnableOption "the logrotate systemd service"; enable = mkEnableOption "the logrotate systemd service";
paths = mkOption { paths = mkOption {
type = types.listOf (types.submodule pathOptions); type = with types; attrsOf (submodule pathOpts);
default = []; default = {};
description = "List of attribute sets with paths to rotate"; description = ''
example = { Attribute set of paths to rotate. The order each block appears in the generated configuration file
"/var/log/myapp/*.log" = { can be controlled by the <link linkend="opt-services.logrotate.paths._name_.priority">priority</link> option
user = "myuser"; using the same semantics as `lib.mkOrder`. Smaller values have a greater priority.
group = "mygroup"; '';
rotate = "weekly"; example = literalExample ''
keep = 5; {
}; httpd = {
}; path = "/var/log/httpd/*.log";
user = config.services.httpd.user;
group = config.services.httpd.group;
keep = 7;
};
myapp = {
path = "/var/log/myapp/*.log";
user = "myuser";
group = "mygroup";
frequency = "weekly";
keep = 5;
priority = 1;
};
}
'';
}; };
extraConfig = mkOption { extraConfig = mkOption {
default = ""; default = "";
type = types.lines; type = types.lines;
description = '' description = ''
Extra contents to add to the logrotate config file. Extra contents to append to the logrotate configuration file. Refer to
See https://linux.die.net/man/8/logrotate <link xlink:href="https://linux.die.net/man/8/logrotate"/> for details.
''; '';
}; };
}; };
}; };
config = mkIf cfg.enable { config = mkIf cfg.enable {
systemd.services.logrotate = { assertions = mapAttrsToList (name: pathOpts:
description = "Logrotate Service"; { assertion = (pathOpts.user != null) == (pathOpts.group != null);
wantedBy = [ "multi-user.target" ]; message = ''
startAt = "*-*-* *:05:00"; If either of `services.logrotate.paths.${name}.user` or `services.logrotate.paths.${name}.group` are specified then *both* must be specified.
'';
}
) cfg.paths;
serviceConfig.Restart = "no"; systemd.services.logrotate = {
serviceConfig.User = "root"; description = "Logrotate Service";
wantedBy = [ "multi-user.target" ];
startAt = "*-*-* *:05:00";
script = '' script = ''
exec ${pkgs.logrotate}/sbin/logrotate ${configFile} exec ${pkgs.logrotate}/sbin/logrotate ${configFile}
''; '';
serviceConfig = {
Restart = "no";
User = "root";
};
}; };
}; };
} }

View File

@ -4,13 +4,9 @@ with lib;
let let
cfg = config.services.logstash; cfg = config.services.logstash;
pluginPath = lib.concatStringsSep ":" cfg.plugins;
havePluginPath = lib.length cfg.plugins > 0;
ops = lib.optionalString; ops = lib.optionalString;
verbosityFlag = "--log.level " + cfg.logLevel; verbosityFlag = "--log.level " + cfg.logLevel;
pluginsPath = "--path.plugins ${pluginPath}";
logstashConf = pkgs.writeText "logstash.conf" '' logstashConf = pkgs.writeText "logstash.conf" ''
input { input {
${cfg.inputConfig} ${cfg.inputConfig}
@ -173,7 +169,7 @@ in
ExecStart = concatStringsSep " " (filter (s: stringLength s != 0) [ ExecStart = concatStringsSep " " (filter (s: stringLength s != 0) [
"${cfg.package}/bin/logstash" "${cfg.package}/bin/logstash"
"-w ${toString cfg.filterWorkers}" "-w ${toString cfg.filterWorkers}"
(ops havePluginPath pluginsPath) (concatMapStringsSep " " (x: "--path.plugins ${x}") cfg.plugins)
"${verbosityFlag}" "${verbosityFlag}"
"-f ${logstashConf}" "-f ${logstashConf}"
"--path.settings ${logstashSettingsDir}" "--path.settings ${logstashSettingsDir}"

View File

@ -1,4 +1,4 @@
{ config, lib, pkgs, ... }: { options, config, lib, pkgs, ... }:
with lib; with lib;
@ -83,11 +83,11 @@ let
) )
( (
optionalString (cfg.mailboxes != []) '' optionalString (cfg.mailboxes != {}) ''
protocol imap { protocol imap {
namespace inbox { namespace inbox {
inbox=yes inbox=yes
${concatStringsSep "\n" (map mailboxConfig cfg.mailboxes)} ${concatStringsSep "\n" (map mailboxConfig (attrValues cfg.mailboxes))}
} }
} }
'' ''
@ -131,12 +131,13 @@ let
special_use = \${toString mailbox.specialUse} special_use = \${toString mailbox.specialUse}
'' + "}"; '' + "}";
mailboxes = { ... }: { mailboxes = { name, ... }: {
options = { options = {
name = mkOption { name = mkOption {
type = types.nullOr (types.strMatching ''[^"]+''); type = types.strMatching ''[^"]+'';
example = "Spam"; example = "Spam";
default = null; default = name;
readOnly = true;
description = "The name of the mailbox."; description = "The name of the mailbox.";
}; };
auto = mkOption { auto = mkOption {
@ -335,19 +336,11 @@ in
}; };
mailboxes = mkOption { mailboxes = mkOption {
type = with types; let m = submodule mailboxes; in either (listOf m) (attrsOf m); type = with types; coercedTo
(listOf unspecified)
(list: listToAttrs (map (entry: { name = entry.name; value = removeAttrs entry ["name"]; }) list))
(attrsOf (submodule mailboxes));
default = {}; default = {};
apply = x:
if isList x then warn "Declaring `services.dovecot2.mailboxes' as a list is deprecated and will break eval in 21.03!" x
else mapAttrsToList (name: value:
if value.name != null
then throw ''
When specifying dovecot2 mailboxes as attributes, declaring
a `name'-attribute is prohibited! The name ${value.name} should
be the attribute key!
''
else value // { inherit name; }
) x;
example = literalExample '' example = literalExample ''
{ {
Spam = { specialUse = "Junk"; auto = "create"; }; Spam = { specialUse = "Junk"; auto = "create"; };
@ -471,6 +464,10 @@ in
environment.systemPackages = [ dovecotPkg ]; environment.systemPackages = [ dovecotPkg ];
warnings = mkIf (any isList options.services.dovecot2.mailboxes.definitions) [
"Declaring `services.dovecot2.mailboxes' as a list is deprecated and will break eval in 21.03! See the release notes for more info for migration."
];
assertions = [ assertions = [
{ {
assertion = intersectLists cfg.protocols [ "pop3" "imap" ] != []; assertion = intersectLists cfg.protocols [ "pop3" "imap" ] != [];

View File

@ -172,7 +172,7 @@ in {
}; };
database = mkOption { database = mkOption {
type = types.str; type = types.nullOr types.str;
default = null; default = null;
description = "Database name to store sms data"; description = "Database name to store sms data";
}; };

View File

@ -50,6 +50,12 @@ in
description = "Parse and interpret emoji tags"; description = "Parse and interpret emoji tags";
}; };
h1-title = mkOption {
type = types.bool;
default = false;
description = "Use the first h1 as page title";
};
branch = mkOption { branch = mkOption {
type = types.str; type = types.str;
default = "master"; default = "master";
@ -102,6 +108,7 @@ in
--ref ${cfg.branch} \ --ref ${cfg.branch} \
${optionalString cfg.mathjax "--mathjax"} \ ${optionalString cfg.mathjax "--mathjax"} \
${optionalString cfg.emoji "--emoji"} \ ${optionalString cfg.emoji "--emoji"} \
${optionalString cfg.h1-title "--h1-title"} \
${optionalString (cfg.allowUploads != null) "--allow-uploads ${cfg.allowUploads}"} \ ${optionalString (cfg.allowUploads != null) "--allow-uploads ${cfg.allowUploads}"} \
${cfg.stateDir} ${cfg.stateDir}
''; '';

View File

@ -68,8 +68,8 @@ in
plugins = mkOption { plugins = mkOption {
default = plugins: []; default = plugins: [];
defaultText = "plugins: []"; defaultText = "plugins: []";
example = literalExample "plugins: [ m3d-fio ]"; example = literalExample "plugins: with plugins; [ m33-fio stlviewer ]";
description = "Additional plugins."; description = "Additional plugins to be used. Available plugins are passed through the plugins input.";
}; };
extraConfig = mkOption { extraConfig = mkOption {

View File

@ -29,13 +29,15 @@ in {
config = mkIf cfg.enable { config = mkIf cfg.enable {
systemd.services.ssm-agent = { systemd.services.ssm-agent = {
users.extraUsers.ssm-user = {};
inherit (cfg.package.meta) description; inherit (cfg.package.meta) description;
after = [ "network.target" ]; after = [ "network.target" ];
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
path = [ fake-lsb-release ]; path = [ fake-lsb-release pkgs.coreutils ];
serviceConfig = { serviceConfig = {
ExecStart = "${cfg.package}/bin/agent"; ExecStart = "${cfg.package}/bin/amazon-ssm-agent";
KillMode = "process"; KillMode = "process";
Restart = "on-failure"; Restart = "on-failure";
RestartSec = "15min"; RestartSec = "15min";

View File

@ -69,7 +69,7 @@ in {
mode = "0400"; mode = "0400";
}; };
system.nssModules = pkgs.sssd; system.nssModules = [ pkgs.sssd ];
system.nssDatabases = { system.nssDatabases = {
group = [ "sss" ]; group = [ "sss" ];
passwd = [ "sss" ]; passwd = [ "sss" ];
@ -92,4 +92,6 @@ in {
services.openssh.authorizedKeysCommand = "/etc/ssh/authorized_keys_command"; services.openssh.authorizedKeysCommand = "/etc/ssh/authorized_keys_command";
services.openssh.authorizedKeysCommandUser = "nobody"; services.openssh.authorizedKeysCommandUser = "nobody";
})]; })];
meta.maintainers = with maintainers; [ bbigras ];
} }

View File

@ -4,19 +4,29 @@ with lib;
let let
cfg = config.services.monit; cfg = config.services.monit;
extraConfig = pkgs.writeText "monitConfig" cfg.extraConfig;
in in
{ {
imports = [
(mkRenamedOptionModule [ "services" "monit" "config" ] ["services" "monit" "extraConfig" ])
];
options.services.monit = { options.services.monit = {
enable = mkEnableOption "Monit"; enable = mkEnableOption "Monit";
config = mkOption { configFiles = mkOption {
type = types.lines; type = types.listOf types.path;
default = ""; default = [];
description = "monitrc content"; description = "List of paths to be included in the monitrc file";
}; };
extraConfig = mkOption {
type = types.lines;
default = "";
description = "Additional monit config as string";
};
}; };
config = mkIf cfg.enable { config = mkIf cfg.enable {
@ -24,7 +34,7 @@ in
environment.systemPackages = [ pkgs.monit ]; environment.systemPackages = [ pkgs.monit ];
environment.etc.monitrc = { environment.etc.monitrc = {
text = cfg.config; text = concatMapStringsSep "\n" (path: "include ${path}") (cfg.configFiles ++ [extraConfig]);
mode = "0400"; mode = "0400";
}; };

View File

@ -46,7 +46,7 @@ let
cmdlineArgs = cfg.extraFlags ++ [ cmdlineArgs = cfg.extraFlags ++ [
"--storage.tsdb.path=${workingDir}/data/" "--storage.tsdb.path=${workingDir}/data/"
"--config.file=${prometheusYml}" "--config.file=${prometheusYml}"
"--web.listen-address=${cfg.listenAddress}" "--web.listen-address=${cfg.listenAddress}:${builtins.toString cfg.port}"
"--alertmanager.notification-queue-capacity=${toString cfg.alertmanagerNotificationQueueCapacity}" "--alertmanager.notification-queue-capacity=${toString cfg.alertmanagerNotificationQueueCapacity}"
"--alertmanager.timeout=${toString cfg.alertmanagerTimeout}s" "--alertmanager.timeout=${toString cfg.alertmanagerTimeout}s"
] ++ ] ++
@ -489,9 +489,17 @@ in {
''; '';
}; };
port = mkOption {
type = types.port;
default = 9090;
description = ''
Port to listen on.
'';
};
listenAddress = mkOption { listenAddress = mkOption {
type = types.str; type = types.str;
default = "0.0.0.0:9090"; default = "0.0.0.0";
description = '' description = ''
Address to listen on for the web interface, API, and telemetry. Address to listen on for the web interface, API, and telemetry.
''; '';
@ -619,6 +627,21 @@ in {
}; };
config = mkIf cfg.enable { config = mkIf cfg.enable {
assertions = [
( let
legacy = builtins.match "(.*):(.*)" cfg.listenAddress;
in {
assertion = legacy == null;
message = ''
Do not specify the port for Prometheus to listen on in the
listenAddress option; use the port option instead:
services.prometheus.listenAddress = ${builtins.elemAt legacy 0};
services.prometheus.port = ${builtins.elemAt legacy 1};
'';
}
)
];
users.groups.prometheus.gid = config.ids.gids.prometheus; users.groups.prometheus.gid = config.ids.gids.prometheus;
users.users.prometheus = { users.users.prometheus = {
description = "Prometheus daemon user"; description = "Prometheus daemon user";

View File

@ -20,7 +20,7 @@ let
${pkgs.coreutils}/bin/cat << EOF ${pkgs.coreutils}/bin/cat << EOF
From: smartd on ${host} <${nm.sender}> From: smartd on ${host} <${nm.sender}>
To: undisclosed-recipients:; To: undisclosed-recipients:;
Subject: SMART error on $SMARTD_DEVICESTRING: $SMARTD_FAILTYPE Subject: $SMARTD_SUBJECT
$SMARTD_FULLMESSAGE $SMARTD_FULLMESSAGE
EOF EOF
@ -239,11 +239,7 @@ in
systemd.services.smartd = { systemd.services.smartd = {
description = "S.M.A.R.T. Daemon"; description = "S.M.A.R.T. Daemon";
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
path = [ pkgs.nettools ]; # for hostname and dnsdomanname calls in smartd
serviceConfig.ExecStart = "${pkgs.smartmontools}/sbin/smartd ${lib.concatStringsSep " " cfg.extraOptions} --no-fork --configfile=${smartdConf}"; serviceConfig.ExecStart = "${pkgs.smartmontools}/sbin/smartd ${lib.concatStringsSep " " cfg.extraOptions} --no-fork --configfile=${smartdConf}";
}; };

View File

@ -5,8 +5,8 @@ let
pgsql = config.services.postgresql; pgsql = config.services.postgresql;
mysql = config.services.mysql; mysql = config.services.mysql;
inherit (lib) mkDefault mkEnableOption mkIf mkMerge mkOption; inherit (lib) mkAfter mkDefault mkEnableOption mkIf mkMerge mkOption;
inherit (lib) attrValues concatMapStringsSep literalExample optional optionalAttrs optionalString types; inherit (lib) attrValues concatMapStringsSep getName literalExample optional optionalAttrs optionalString types;
inherit (lib.generators) toKeyValue; inherit (lib.generators) toKeyValue;
user = "zabbix"; user = "zabbix";
@ -232,14 +232,15 @@ in
services.mysql = optionalAttrs mysqlLocal { services.mysql = optionalAttrs mysqlLocal {
enable = true; enable = true;
package = mkDefault pkgs.mariadb; package = mkDefault pkgs.mariadb;
ensureDatabases = [ cfg.database.name ];
ensureUsers = [
{ name = cfg.database.user;
ensurePermissions = { "${cfg.database.name}.*" = "ALL PRIVILEGES"; };
}
];
}; };
systemd.services.mysql.postStart = mkAfter (optionalString mysqlLocal ''
( echo "CREATE DATABASE IF NOT EXISTS \`${cfg.database.name}\` CHARACTER SET utf8 COLLATE utf8_bin;"
echo "CREATE USER IF NOT EXISTS '${cfg.database.user}'@'localhost' IDENTIFIED WITH ${if (getName config.services.mysql.package == getName pkgs.mariadb) then "unix_socket" else "auth_socket"};"
echo "GRANT ALL PRIVILEGES ON \`${cfg.database.name}\`.* TO '${cfg.database.user}'@'localhost';"
) | ${config.services.mysql.package}/bin/mysql -N
'');
services.postgresql = optionalAttrs pgsqlLocal { services.postgresql = optionalAttrs pgsqlLocal {
enable = true; enable = true;
ensureDatabases = [ cfg.database.name ]; ensureDatabases = [ cfg.database.name ];

View File

@ -5,8 +5,8 @@ let
pgsql = config.services.postgresql; pgsql = config.services.postgresql;
mysql = config.services.mysql; mysql = config.services.mysql;
inherit (lib) mkDefault mkEnableOption mkIf mkMerge mkOption; inherit (lib) mkAfter mkDefault mkEnableOption mkIf mkMerge mkOption;
inherit (lib) attrValues concatMapStringsSep literalExample optional optionalAttrs optionalString types; inherit (lib) attrValues concatMapStringsSep getName literalExample optional optionalAttrs optionalString types;
inherit (lib.generators) toKeyValue; inherit (lib.generators) toKeyValue;
user = "zabbix"; user = "zabbix";
@ -220,14 +220,15 @@ in
services.mysql = optionalAttrs mysqlLocal { services.mysql = optionalAttrs mysqlLocal {
enable = true; enable = true;
package = mkDefault pkgs.mariadb; package = mkDefault pkgs.mariadb;
ensureDatabases = [ cfg.database.name ];
ensureUsers = [
{ name = cfg.database.user;
ensurePermissions = { "${cfg.database.name}.*" = "ALL PRIVILEGES"; };
}
];
}; };
systemd.services.mysql.postStart = mkAfter (optionalString mysqlLocal ''
( echo "CREATE DATABASE IF NOT EXISTS \`${cfg.database.name}\` CHARACTER SET utf8 COLLATE utf8_bin;"
echo "CREATE USER IF NOT EXISTS '${cfg.database.user}'@'localhost' IDENTIFIED WITH ${if (getName config.services.mysql.package == getName pkgs.mariadb) then "unix_socket" else "auth_socket"};"
echo "GRANT ALL PRIVILEGES ON \`${cfg.database.name}\`.* TO '${cfg.database.user}'@'localhost';"
) | ${config.services.mysql.package}/bin/mysql -N
'');
services.postgresql = optionalAttrs pgsqlLocal { services.postgresql = optionalAttrs pgsqlLocal {
enable = true; enable = true;
ensureDatabases = [ cfg.database.name ]; ensureDatabases = [ cfg.database.name ];

View File

@ -83,14 +83,14 @@ in {
}; };
dataStorageSpace = mkOption { dataStorageSpace = mkOption {
type = types.str; type = types.nullOr types.str;
default = null; default = null;
example = "/data/storage"; example = "/data/storage";
description = "Directory for data storage."; description = "Directory for data storage.";
}; };
metadataStorageSpace = mkOption { metadataStorageSpace = mkOption {
type = types.str; type = types.nullOr types.str;
default = null; default = null;
example = "/data/meta"; example = "/data/meta";
description = "Directory for meta data storage."; description = "Directory for meta data storage.";

View File

@ -87,7 +87,7 @@ in
}; };
rpc.password = mkOption { rpc.password = mkOption {
type = types.str; type = types.nullOr types.str;
default = null; default = null;
description = '' description = ''
Password for RPC connections. Password for RPC connections.

View File

@ -89,7 +89,7 @@ in
}; };
rpc.password = mkOption { rpc.password = mkOption {
type = types.str; type = types.nullOr types.str;
default = null; default = null;
description = '' description = ''
Password for RPC connections. Password for RPC connections.

View File

@ -11,8 +11,13 @@ let
method = cfg.encryptionMethod; method = cfg.encryptionMethod;
mode = cfg.mode; mode = cfg.mode;
user = "nobody"; user = "nobody";
fast_open = true; fast_open = cfg.fastOpen;
} // optionalAttrs (cfg.password != null) { password = cfg.password; }; } // optionalAttrs (cfg.plugin != null) {
plugin = cfg.plugin;
plugin_opts = cfg.pluginOpts;
} // optionalAttrs (cfg.password != null) {
password = cfg.password;
};
configFile = pkgs.writeText "shadowsocks.json" (builtins.toJSON opts); configFile = pkgs.writeText "shadowsocks.json" (builtins.toJSON opts);
@ -74,6 +79,14 @@ in
''; '';
}; };
fastOpen = mkOption {
type = types.bool;
default = true;
description = ''
use TCP fast-open
'';
};
encryptionMethod = mkOption { encryptionMethod = mkOption {
type = types.str; type = types.str;
default = "chacha20-ietf-poly1305"; default = "chacha20-ietf-poly1305";
@ -82,6 +95,23 @@ in
''; '';
}; };
plugin = mkOption {
type = types.nullOr types.str;
default = null;
example = "\${pkgs.shadowsocks-v2ray-plugin}/bin/v2ray-plugin";
description = ''
SIP003 plugin for shadowsocks
'';
};
pluginOpts = mkOption {
type = types.str;
default = "";
example = "server;host=example.com";
description = ''
Options to pass to the plugin if one was specified
'';
};
}; };
}; };
@ -99,7 +129,7 @@ in
description = "shadowsocks-libev Daemon"; description = "shadowsocks-libev Daemon";
after = [ "network.target" ]; after = [ "network.target" ];
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
path = [ pkgs.shadowsocks-libev ] ++ optional (cfg.passwordFile != null) pkgs.jq; path = [ pkgs.shadowsocks-libev cfg.plugin ] ++ optional (cfg.passwordFile != null) pkgs.jq;
serviceConfig.PrivateTmp = true; serviceConfig.PrivateTmp = true;
script = '' script = ''
${optionalString (cfg.passwordFile != null) '' ${optionalString (cfg.passwordFile != null) ''

View File

@ -233,6 +233,9 @@ in {
path = [ pkgs.wpa_supplicant ]; path = [ pkgs.wpa_supplicant ];
script = '' script = ''
if [ -f /etc/wpa_supplicant.conf -a "/etc/wpa_supplicant.conf" != "${configFile}" ]
then echo >&2 "<3>/etc/wpa_supplicant.conf present but ignored. Generated ${configFile} is used instead."
fi
iface_args="-s -u -D${cfg.driver} -c ${configFile}" iface_args="-s -u -D${cfg.driver} -c ${configFile}"
${if ifaces == [] then '' ${if ifaces == [] then ''
for i in $(cd /sys/class/net && echo *); do for i in $(cd /sys/class/net && echo *); do

View File

@ -52,6 +52,14 @@ in
''; '';
}; };
lockMessage = mkOption {
type = types.str;
default = "";
description = ''
Message to show on physlock login terminal.
'';
};
lockOn = { lockOn = {
suspend = mkOption { suspend = mkOption {
@ -111,7 +119,7 @@ in
++ cfg.lockOn.extraTargets; ++ cfg.lockOn.extraTargets;
serviceConfig = { serviceConfig = {
Type = "forking"; Type = "forking";
ExecStart = "${pkgs.physlock}/bin/physlock -d${optionalString cfg.disableSysRq "s"}"; ExecStart = "${pkgs.physlock}/bin/physlock -d${optionalString cfg.disableSysRq "s"}${optionalString (cfg.lockMessage != "") " -p \"${cfg.lockMessage}\""}";
}; };
}; };

View File

@ -77,7 +77,6 @@ in {
// Paths // Paths
WOSendMail = "/run/wrappers/bin/sendmail"; WOSendMail = "/run/wrappers/bin/sendmail";
SOGoMailSpoolPath = "/var/lib/sogo/spool"; SOGoMailSpoolPath = "/var/lib/sogo/spool";
SOGoZipPath = "${pkgs.zip}/bin/zip";
// Enable CSRF protection // Enable CSRF protection
SOGoXSRFValidationEnabled = YES; SOGoXSRFValidationEnabled = YES;
// Remove dates from log (jornald does that) // Remove dates from log (jornald does that)

View File

@ -661,6 +661,25 @@ in
pkg pkg
]; ];
services.logrotate = optionalAttrs (cfg.logFormat != "none") {
enable = mkDefault true;
paths.httpd = {
path = "${cfg.logDir}/*.log";
user = cfg.user;
group = cfg.group;
frequency = "daily";
keep = 28;
extraConfig = ''
sharedscripts
compress
delaycompress
postrotate
systemctl reload httpd.service > /dev/null 2>/dev/null || true
endscript
'';
};
};
services.httpd.phpOptions = services.httpd.phpOptions =
'' ''
; Needed for PHP's mail() function. ; Needed for PHP's mail() function.

View File

@ -1,174 +0,0 @@
{ config, lib, pkgs, ... }:
let
cfg = config.services.meguca;
postgres = config.services.postgresql;
in with lib; {
options.services.meguca = {
enable = mkEnableOption "meguca";
dataDir = mkOption {
type = types.path;
default = "/var/lib/meguca";
example = "/home/okina/meguca";
description = "Location where meguca stores it's database and links.";
};
password = mkOption {
type = types.str;
default = "meguca";
example = "dumbpass";
description = "Password for the meguca database.";
};
passwordFile = mkOption {
type = types.path;
default = "/run/keys/meguca-password-file";
example = "/home/okina/meguca/keys/pass";
description = "Password file for the meguca database.";
};
reverseProxy = mkOption {
type = types.nullOr types.str;
default = null;
example = "192.168.1.5";
description = "Reverse proxy IP.";
};
sslCertificate = mkOption {
type = types.nullOr types.str;
default = null;
example = "/home/okina/meguca/ssl.cert";
description = "Path to the SSL certificate.";
};
listenAddress = mkOption {
type = types.nullOr types.str;
default = null;
example = "127.0.0.1:8000";
description = "Listen on a specific IP address and port.";
};
cacheSize = mkOption {
type = types.nullOr types.int;
default = null;
example = 256;
description = "Cache size in MB.";
};
postgresArgs = mkOption {
type = types.str;
example = "user=meguca password=dumbpass dbname=meguca sslmode=disable";
description = "Postgresql connection arguments.";
};
postgresArgsFile = mkOption {
type = types.path;
default = "/run/keys/meguca-postgres-args";
example = "/home/okina/meguca/keys/postgres";
description = "Postgresql connection arguments file.";
};
compressTraffic = mkOption {
type = types.bool;
default = false;
description = "Compress all traffic with gzip.";
};
assumeReverseProxy = mkOption {
type = types.bool;
default = false;
description = "Assume the server is behind a reverse proxy, when resolving client IPs.";
};
httpsOnly = mkOption {
type = types.bool;
default = false;
description = "Serve and listen only through HTTPS.";
};
videoPaths = mkOption {
type = types.listOf types.path;
default = [];
example = [ "/home/okina/Videos/tehe_pero.webm" ];
description = "Videos that will be symlinked into www/videos.";
};
};
config = mkIf cfg.enable {
security.sudo.enable = cfg.enable;
services.postgresql.enable = cfg.enable;
services.postgresql.package = pkgs.postgresql_11;
services.meguca.passwordFile = mkDefault (pkgs.writeText "meguca-password-file" cfg.password);
services.meguca.postgresArgsFile = mkDefault (pkgs.writeText "meguca-postgres-args" cfg.postgresArgs);
services.meguca.postgresArgs = mkDefault "user=meguca password=${cfg.password} dbname=meguca sslmode=disable";
systemd.services.meguca = {
description = "meguca";
after = [ "network.target" "postgresql.service" ];
wantedBy = [ "multi-user.target" ];
preStart = ''
# Ensure folder exists or create it and links and permissions are correct
mkdir -p ${escapeShellArg cfg.dataDir}/www
rm -rf ${escapeShellArg cfg.dataDir}/www/videos
ln -sf ${pkgs.meguca}/share/meguca/www/* ${escapeShellArg cfg.dataDir}/www
unlink ${escapeShellArg cfg.dataDir}/www/videos
mkdir -p ${escapeShellArg cfg.dataDir}/www/videos
for vid in ${escapeShellArg cfg.videoPaths}; do
ln -sf $vid ${escapeShellArg cfg.dataDir}/www/videos
done
chmod 750 ${escapeShellArg cfg.dataDir}
chown -R meguca:meguca ${escapeShellArg cfg.dataDir}
# Ensure the database is correct or create it
${pkgs.sudo}/bin/sudo -u ${postgres.superUser} ${postgres.package}/bin/createuser \
-SDR meguca || true
${pkgs.sudo}/bin/sudo -u ${postgres.superUser} ${postgres.package}/bin/createdb \
-T template0 -E UTF8 -O meguca meguca || true
${pkgs.sudo}/bin/sudo -u meguca ${postgres.package}/bin/psql \
-c "ALTER ROLE meguca WITH PASSWORD '$(cat ${escapeShellArg cfg.passwordFile})';" || true
'';
script = ''
cd ${escapeShellArg cfg.dataDir}
${pkgs.meguca}/bin/meguca -d "$(cat ${escapeShellArg cfg.postgresArgsFile})"''
+ optionalString (cfg.reverseProxy != null) " -R ${cfg.reverseProxy}"
+ optionalString (cfg.sslCertificate != null) " -S ${cfg.sslCertificate}"
+ optionalString (cfg.listenAddress != null) " -a ${cfg.listenAddress}"
+ optionalString (cfg.cacheSize != null) " -c ${toString cfg.cacheSize}"
+ optionalString (cfg.compressTraffic) " -g"
+ optionalString (cfg.assumeReverseProxy) " -r"
+ optionalString (cfg.httpsOnly) " -s" + " start";
serviceConfig = {
PermissionsStartOnly = true;
Type = "forking";
User = "meguca";
Group = "meguca";
ExecStop = "${pkgs.meguca}/bin/meguca stop";
};
};
users = {
groups.meguca.gid = config.ids.gids.meguca;
users.meguca = {
description = "meguca server service user";
home = cfg.dataDir;
createHome = true;
group = "meguca";
uid = config.ids.uids.meguca;
};
};
};
imports = [
(mkRenamedOptionModule [ "services" "meguca" "baseDir" ] [ "services" "meguca" "dataDir" ])
];
meta.maintainers = with maintainers; [ chiiruno ];
}

View File

@ -61,7 +61,8 @@ in
"--kill" "--kill"
] ++ cfg.extraOptions); ] ++ cfg.extraOptions);
ExecStop = "${pkgs.procps}/bin/pkill imwheel"; ExecStop = "${pkgs.procps}/bin/pkill imwheel";
Restart = "on-failure"; RestartSec = 3;
Restart = "always";
}; };
}; };
}; };

View File

@ -641,7 +641,7 @@ in
credential = mkOption { credential = mkOption {
default = null; default = null;
example = "f1d00200d8dc783f7fb1e10ace8da27f8312d72692abfca2f7e4960a73f48e82e1f7571f6ebfcee9fb434f9886ccc8fcc52a6614d8d2"; example = "f1d00200d8dc783f7fb1e10ace8da27f8312d72692abfca2f7e4960a73f48e82e1f7571f6ebfcee9fb434f9886ccc8fcc52a6614d8d2";
type = types.str; type = types.nullOr types.str;
description = "The FIDO2 credential ID."; description = "The FIDO2 credential ID.";
}; };

View File

@ -378,12 +378,14 @@ mountFS() {
mkdir -p "/mnt-root$mountPoint" mkdir -p "/mnt-root$mountPoint"
# For CIFS mounts, retry a few times before giving up. # For ZFS and CIFS mounts, retry a few times before giving up.
# We do this for ZFS as a workaround for issue NixOS/nixpkgs#25383.
local n=0 local n=0
while true; do while true; do
mount "/mnt-root$mountPoint" && break mount "/mnt-root$mountPoint" && break
if [ "$fsType" != cifs -o "$n" -ge 10 ]; then fail; break; fi if [ \( "$fsType" != cifs -a "$fsType" != zfs \) -o "$n" -ge 10 ]; then fail; break; fi
echo "retrying..." echo "retrying..."
sleep 1
n=$((n + 1)) n=$((n + 1))
done done

View File

@ -36,7 +36,7 @@ let
set -euo pipefail set -euo pipefail
declare -A seen declare -A seen
declare -a left left=()
patchelf="${pkgs.buildPackages.patchelf}/bin/patchelf" patchelf="${pkgs.buildPackages.patchelf}/bin/patchelf"
@ -48,7 +48,7 @@ let
done done
} }
add_needed $1 add_needed "$1"
while [ ''${#left[@]} -ne 0 ]; do while [ ''${#left[@]} -ne 0 ]; do
next=''${left[0]} next=''${left[0]}
@ -87,7 +87,9 @@ let
# copy what we need. Instead of using statically linked binaries, # copy what we need. Instead of using statically linked binaries,
# we just copy what we need from Glibc and use patchelf to make it # we just copy what we need from Glibc and use patchelf to make it
# work. # work.
extraUtils = pkgs.runCommandCC "extra-utils" extraUtils = let
# Use lvm2 without udev support, which is the same lvm2 we already have in the closure anyways
lvm2 = pkgs.lvm2.override { udev = null; }; in pkgs.runCommandCC "extra-utils"
{ nativeBuildInputs = [pkgs.buildPackages.nukeReferences]; { nativeBuildInputs = [pkgs.buildPackages.nukeReferences];
allowedReferences = [ "out" ]; # prevent accidents like glibc being included in the initrd allowedReferences = [ "out" ]; # prevent accidents like glibc being included in the initrd
} }
@ -111,8 +113,8 @@ let
copy_bin_and_libs ${pkgs.utillinux}/sbin/blkid copy_bin_and_libs ${pkgs.utillinux}/sbin/blkid
# Copy dmsetup and lvm. # Copy dmsetup and lvm.
copy_bin_and_libs ${getBin pkgs.lvm2}/bin/dmsetup copy_bin_and_libs ${getBin lvm2}/bin/dmsetup
copy_bin_and_libs ${getBin pkgs.lvm2}/bin/lvm copy_bin_and_libs ${getBin lvm2}/bin/lvm
# Add RAID mdadm tool. # Add RAID mdadm tool.
copy_bin_and_libs ${pkgs.mdadm}/sbin/mdadm copy_bin_and_libs ${pkgs.mdadm}/sbin/mdadm

View File

@ -25,7 +25,7 @@ let
"nss-lookup.target" "nss-lookup.target"
"nss-user-lookup.target" "nss-user-lookup.target"
"time-sync.target" "time-sync.target"
#"cryptsetup.target" "cryptsetup.target"
"sigpwr.target" "sigpwr.target"
"timers.target" "timers.target"
"paths.target" "paths.target"

View File

@ -1129,7 +1129,6 @@ in
++ optionals config.networking.wireless.enable [ ++ optionals config.networking.wireless.enable [
pkgs.wirelesstools # FIXME: obsolete? pkgs.wirelesstools # FIXME: obsolete?
pkgs.iw pkgs.iw
pkgs.rfkill
] ]
++ bridgeStp; ++ bridgeStp;

View File

@ -110,6 +110,7 @@ in
''; '';
environment.etc."cni/net.d/10-crio-bridge.conf".source = copyFile "${pkgs.cri-o-unwrapped.src}/contrib/cni/10-crio-bridge.conf"; environment.etc."cni/net.d/10-crio-bridge.conf".source = copyFile "${pkgs.cri-o-unwrapped.src}/contrib/cni/10-crio-bridge.conf";
environment.etc."cni/net.d/99-loopback.conf".source = copyFile "${pkgs.cri-o-unwrapped.src}/contrib/cni/99-loopback.conf";
# Enable common /etc/containers configuration # Enable common /etc/containers configuration
virtualisation.containers.enable = true; virtualisation.containers.enable = true;

View File

@ -1,134 +0,0 @@
{ config, lib, pkgs, ... }:
with lib;
with builtins;
let
cfg = config.virtualisation;
sanitizeImageName = image: replaceStrings ["/"] ["-"] image.imageName;
hash = drv: head (split "-" (baseNameOf drv.outPath));
# The label of an ext4 FS is limited to 16 bytes
labelFromImage = image: substring 0 16 (hash image);
# The Docker image is loaded and some files from /var/lib/docker/
# are written into a qcow image.
preload = image: pkgs.vmTools.runInLinuxVM (
pkgs.runCommand "docker-preload-image-${sanitizeImageName image}" {
buildInputs = with pkgs; [ docker e2fsprogs utillinux curl kmod ];
preVM = pkgs.vmTools.createEmptyImage {
size = cfg.dockerPreloader.qcowSize;
fullName = "docker-deamon-image.qcow2";
};
}
''
mkfs.ext4 /dev/vda
e2label /dev/vda ${labelFromImage image}
mkdir -p /var/lib/docker
mount -t ext4 /dev/vda /var/lib/docker
modprobe overlay
# from https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount
mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup
cd /sys/fs/cgroup
for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do
mkdir -p $sys
if ! mountpoint -q $sys; then
if ! mount -n -t cgroup -o $sys cgroup $sys; then
rmdir $sys || true
fi
fi
done
dockerd -H tcp://127.0.0.1:5555 -H unix:///var/run/docker.sock &
until $(curl --output /dev/null --silent --connect-timeout 2 http://127.0.0.1:5555); do
printf '.'
sleep 1
done
docker load -i ${image}
kill %1
find /var/lib/docker/ -maxdepth 1 -mindepth 1 -not -name "image" -not -name "overlay2" | xargs rm -rf
'');
preloadedImages = map preload cfg.dockerPreloader.images;
in
{
options.virtualisation.dockerPreloader = {
images = mkOption {
default = [ ];
type = types.listOf types.package;
description =
''
A list of Docker images to preload (in the /var/lib/docker directory).
'';
};
qcowSize = mkOption {
default = 1024;
type = types.int;
description =
''
The size (MB) of qcow files.
'';
};
};
config = mkIf (cfg.dockerPreloader.images != []) {
assertions = [{
# If docker.storageDriver is null, Docker choose the storage
# driver. So, in this case, we cannot be sure overlay2 is used.
assertion = cfg.docker.storageDriver == "overlay2"
|| cfg.docker.storageDriver == "overlay"
|| cfg.docker.storageDriver == null;
message = "The Docker image Preloader only works with overlay2 storage driver!";
}];
virtualisation.qemu.options =
map (path: "-drive if=virtio,file=${path}/disk-image.qcow2,readonly,media=cdrom,format=qcow2")
preloadedImages;
# All attached QCOW files are mounted and their contents are linked
# to /var/lib/docker/ in order to make image available.
systemd.services.docker-preloader = {
description = "Preloaded Docker images";
wantedBy = ["docker.service"];
after = ["network.target"];
path = with pkgs; [ mount rsync jq ];
script = ''
mkdir -p /var/lib/docker/overlay2/l /var/lib/docker/image/overlay2
echo '{}' > /tmp/repositories.json
for i in ${concatStringsSep " " (map labelFromImage cfg.dockerPreloader.images)}; do
mkdir -p /mnt/docker-images/$i
# The ext4 label is limited to 16 bytes
mount /dev/disk/by-label/$(echo $i | cut -c1-16) -o ro,noload /mnt/docker-images/$i
find /mnt/docker-images/$i/overlay2/ -maxdepth 1 -mindepth 1 -not -name l\
-exec ln -s '{}' /var/lib/docker/overlay2/ \;
cp -P /mnt/docker-images/$i/overlay2/l/* /var/lib/docker/overlay2/l/
rsync -a /mnt/docker-images/$i/image/ /var/lib/docker/image/
# Accumulate image definitions
cp /tmp/repositories.json /tmp/repositories.json.tmp
jq -s '.[0] * .[1]' \
/tmp/repositories.json.tmp \
/mnt/docker-images/$i/image/overlay2/repositories.json \
> /tmp/repositories.json
done
mv /tmp/repositories.json /var/lib/docker/image/overlay2/repositories.json
'';
serviceConfig = {
Type = "oneshot";
};
};
};
}

View File

@ -32,7 +32,7 @@ in
}; };
package = mkOption { package = mkOption {
type = types.package; type = types.nullOr types.package;
default = config.boot.kernelPackages.prl-tools; default = config.boot.kernelPackages.prl-tools;
defaultText = "config.boot.kernelPackages.prl-tools"; defaultText = "config.boot.kernelPackages.prl-tools";
example = literalExample "config.boot.kernelPackages.prl-tools"; example = literalExample "config.boot.kernelPackages.prl-tools";

View File

@ -264,7 +264,6 @@ in
{ {
imports = [ imports = [
../profiles/qemu-guest.nix ../profiles/qemu-guest.nix
./docker-preloader.nix
]; ];
options = { options = {

View File

@ -34,6 +34,7 @@ in
bind = handleTest ./bind.nix {}; bind = handleTest ./bind.nix {};
bitcoind = handleTest ./bitcoind.nix {}; bitcoind = handleTest ./bitcoind.nix {};
bittorrent = handleTest ./bittorrent.nix {}; bittorrent = handleTest ./bittorrent.nix {};
bitwarden = handleTest ./bitwarden.nix {};
blockbook-frontend = handleTest ./blockbook-frontend.nix {}; blockbook-frontend = handleTest ./blockbook-frontend.nix {};
buildkite-agents = handleTest ./buildkite-agents.nix {}; buildkite-agents = handleTest ./buildkite-agents.nix {};
boot = handleTestOn ["x86_64-linux"] ./boot.nix {}; # syslinux is unsupported on aarch64 boot = handleTestOn ["x86_64-linux"] ./boot.nix {}; # syslinux is unsupported on aarch64
@ -65,11 +66,13 @@ in
containers-macvlans = handleTest ./containers-macvlans.nix {}; containers-macvlans = handleTest ./containers-macvlans.nix {};
containers-physical_interfaces = handleTest ./containers-physical_interfaces.nix {}; containers-physical_interfaces = handleTest ./containers-physical_interfaces.nix {};
containers-portforward = handleTest ./containers-portforward.nix {}; containers-portforward = handleTest ./containers-portforward.nix {};
containers-reloadable = handleTest ./containers-reloadable.nix {};
containers-restart_networking = handleTest ./containers-restart_networking.nix {}; containers-restart_networking = handleTest ./containers-restart_networking.nix {};
containers-tmpfs = handleTest ./containers-tmpfs.nix {}; containers-tmpfs = handleTest ./containers-tmpfs.nix {};
convos = handleTest ./convos.nix {}; convos = handleTest ./convos.nix {};
corerad = handleTest ./corerad.nix {}; corerad = handleTest ./corerad.nix {};
couchdb = handleTest ./couchdb.nix {}; couchdb = handleTest ./couchdb.nix {};
cri-o = handleTestOn ["x86_64-linux"] ./cri-o.nix {};
deluge = handleTest ./deluge.nix {}; deluge = handleTest ./deluge.nix {};
dhparams = handleTest ./dhparams.nix {}; dhparams = handleTest ./dhparams.nix {};
dnscrypt-proxy2 = handleTestOn ["x86_64-linux"] ./dnscrypt-proxy2.nix {}; dnscrypt-proxy2 = handleTestOn ["x86_64-linux"] ./dnscrypt-proxy2.nix {};
@ -78,15 +81,13 @@ in
docker = handleTestOn ["x86_64-linux"] ./docker.nix {}; docker = handleTestOn ["x86_64-linux"] ./docker.nix {};
oci-containers = handleTestOn ["x86_64-linux"] ./oci-containers.nix {}; oci-containers = handleTestOn ["x86_64-linux"] ./oci-containers.nix {};
docker-edge = handleTestOn ["x86_64-linux"] ./docker-edge.nix {}; docker-edge = handleTestOn ["x86_64-linux"] ./docker-edge.nix {};
docker-preloader = handleTestOn ["x86_64-linux"] ./docker-preloader.nix {};
docker-registry = handleTest ./docker-registry.nix {}; docker-registry = handleTest ./docker-registry.nix {};
docker-tools = handleTestOn ["x86_64-linux"] ./docker-tools.nix {}; docker-tools = handleTestOn ["x86_64-linux"] ./docker-tools.nix {};
docker-tools-overlay = handleTestOn ["x86_64-linux"] ./docker-tools-overlay.nix {}; docker-tools-overlay = handleTestOn ["x86_64-linux"] ./docker-tools-overlay.nix {};
documize = handleTest ./documize.nix {}; documize = handleTest ./documize.nix {};
dokuwiki = handleTest ./dokuwiki.nix {}; dokuwiki = handleTest ./dokuwiki.nix {};
dovecot = handleTest ./dovecot.nix {}; dovecot = handleTest ./dovecot.nix {};
# ec2-config doesn't work in a sandbox as the simulated ec2 instance needs network access ec2-config = (handleTestOn ["x86_64-linux"] ./ec2.nix {}).boot-ec2-config or {};
#ec2-config = (handleTestOn ["x86_64-linux"] ./ec2.nix {}).boot-ec2-config or {};
ec2-nixops = (handleTestOn ["x86_64-linux"] ./ec2.nix {}).boot-ec2-nixops or {}; ec2-nixops = (handleTestOn ["x86_64-linux"] ./ec2.nix {}).boot-ec2-nixops or {};
ecryptfs = handleTest ./ecryptfs.nix {}; ecryptfs = handleTest ./ecryptfs.nix {};
ejabberd = handleTest ./xmpp/ejabberd.nix {}; ejabberd = handleTest ./xmpp/ejabberd.nix {};
@ -306,6 +307,7 @@ in
sanoid = handleTest ./sanoid.nix {}; sanoid = handleTest ./sanoid.nix {};
sddm = handleTest ./sddm.nix {}; sddm = handleTest ./sddm.nix {};
service-runner = handleTest ./service-runner.nix {}; service-runner = handleTest ./service-runner.nix {};
shadowsocks = handleTest ./shadowsocks.nix {};
shattered-pixel-dungeon = handleTest ./shattered-pixel-dungeon.nix {}; shattered-pixel-dungeon = handleTest ./shattered-pixel-dungeon.nix {};
shiori = handleTest ./shiori.nix {}; shiori = handleTest ./shiori.nix {};
signal-desktop = handleTest ./signal-desktop.nix {}; signal-desktop = handleTest ./signal-desktop.nix {};
@ -320,6 +322,7 @@ in
spike = handleTest ./spike.nix {}; spike = handleTest ./spike.nix {};
sonarr = handleTest ./sonarr.nix {}; sonarr = handleTest ./sonarr.nix {};
sslh = handleTest ./sslh.nix {}; sslh = handleTest ./sslh.nix {};
sssd = handleTestOn ["x86_64-linux"] ./sssd.nix {};
strongswan-swanctl = handleTest ./strongswan-swanctl.nix {}; strongswan-swanctl = handleTest ./strongswan-swanctl.nix {};
sudo = handleTest ./sudo.nix {}; sudo = handleTest ./sudo.nix {};
switchTest = handleTest ./switch-test.nix {}; switchTest = handleTest ./switch-test.nix {};

188
nixos/tests/bitwarden.nix Normal file
View File

@ -0,0 +1,188 @@
{ system ? builtins.currentSystem
, config ? { }
, pkgs ? import ../.. { inherit system config; }
}:
# These tests will:
# * Set up a bitwarden-rs server
# * Have Firefox use the web vault to create an account, log in, and save a password to the valut
# * Have the bw cli log in and read that password from the vault
#
# Note that Firefox must be on the same machine as the server for WebCrypto APIs to be available (or HTTPS must be configured)
#
# The same tests should work without modification on the official bitwarden server, if we ever package that.
with import ../lib/testing-python.nix { inherit system pkgs; };
with pkgs.lib;
let
backends = [ "sqlite" "mysql" "postgresql" ];
dbPassword = "please_dont_hack";
userEmail = "meow@example.com";
userPassword = "also_super_secret_ZJWpBKZi668QGt"; # Must be complex to avoid interstitial warning on the signup page
storedPassword = "seeeecret";
makeBitwardenTest = backend: makeTest {
name = "bitwarden_rs-${backend}";
meta = {
maintainers = with pkgs.stdenv.lib.maintainers; [ jjjollyjim ];
};
nodes = {
server = { pkgs, ... }:
let backendConfig = {
mysql = {
services.mysql = {
enable = true;
initialScript = pkgs.writeText "mysql-init.sql" ''
CREATE DATABASE bitwarden;
CREATE USER 'bitwardenuser'@'localhost' IDENTIFIED BY '${dbPassword}';
GRANT ALL ON `bitwarden`.* TO 'bitwardenuser'@'localhost';
FLUSH PRIVILEGES;
'';
package = pkgs.mysql;
};
services.bitwarden_rs.config.databaseUrl = "mysql://bitwardenuser:${dbPassword}@localhost/bitwarden";
systemd.services.bitwarden_rs.after = [ "mysql.service" ];
};
postgresql = {
services.postgresql = {
enable = true;
initialScript = pkgs.writeText "postgresql-init.sql" ''
CREATE DATABASE bitwarden;
CREATE USER bitwardenuser WITH PASSWORD '${dbPassword}';
GRANT ALL PRIVILEGES ON DATABASE bitwarden TO bitwardenuser;
'';
};
services.bitwarden_rs.config.databaseUrl = "postgresql://bitwardenuser:${dbPassword}@localhost/bitwarden";
systemd.services.bitwarden_rs.after = [ "postgresql.service" ];
};
sqlite = { };
};
in
mkMerge [
backendConfig.${backend}
{
services.bitwarden_rs = {
enable = true;
dbBackend = backend;
config.rocketPort = 80;
};
networking.firewall.allowedTCPPorts = [ 80 ];
environment.systemPackages =
let
testRunner = pkgs.writers.writePython3Bin "test-runner"
{
libraries = [ pkgs.python3Packages.selenium ];
} ''
from selenium.webdriver import Firefox
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
options = Options()
options.add_argument('--headless')
driver = Firefox(options=options)
driver.implicitly_wait(20)
driver.get('http://localhost/#/register')
wait = WebDriverWait(driver, 10)
wait.until(EC.title_contains("Create Account"))
driver.find_element_by_css_selector('input#email').send_keys(
'${userEmail}'
)
driver.find_element_by_css_selector('input#name').send_keys(
'A Cat'
)
driver.find_element_by_css_selector('input#masterPassword').send_keys(
'${userPassword}'
)
driver.find_element_by_css_selector('input#masterPasswordRetype').send_keys(
'${userPassword}'
)
driver.find_element_by_xpath("//button[contains(., 'Submit')]").click()
wait.until_not(EC.title_contains("Create Account"))
driver.find_element_by_css_selector('input#masterPassword').send_keys(
'${userPassword}'
)
driver.find_element_by_xpath("//button[contains(., 'Log In')]").click()
wait.until(EC.title_contains("My Vault"))
driver.find_element_by_xpath("//button[contains(., 'Add Item')]").click()
driver.find_element_by_css_selector('input#name').send_keys(
'secrets'
)
driver.find_element_by_css_selector('input#loginPassword').send_keys(
'${storedPassword}'
)
driver.find_element_by_xpath("//button[contains(., 'Save')]").click()
'';
in
[ pkgs.firefox-unwrapped pkgs.geckodriver testRunner ];
virtualisation.memorySize = 768;
}
];
client = { pkgs, ... }:
{
environment.systemPackages = [ pkgs.bitwarden-cli ];
};
};
testScript = ''
start_all()
server.wait_for_unit("bitwarden_rs.service")
server.wait_for_open_port(80)
with subtest("configure the cli"):
client.succeed("bw --nointeraction config server http://server")
with subtest("can't login to nonexistant account"):
client.fail(
"bw --nointeraction --raw login ${userEmail} ${userPassword}"
)
with subtest("use the web interface to sign up, log in, and save a password"):
server.succeed("PYTHONUNBUFFERED=1 test-runner | systemd-cat -t test-runner")
with subtest("log in with the cli"):
key = client.succeed(
"bw --nointeraction --raw login ${userEmail} ${userPassword}"
).strip()
with subtest("sync with the cli"):
client.succeed(f"bw --nointeraction --raw --session {key} sync -f")
with subtest("get the password with the cli"):
password = client.succeed(
f"bw --nointeraction --raw --session {key} list items | ${pkgs.jq}/bin/jq -r .[].login.password"
)
assert password.strip() == "${storedPassword}"
'';
};
in
builtins.listToAttrs (
map
(backend: { name = backend; value = makeBitwardenTest backend; })
backends
)

View File

@ -20,30 +20,44 @@ with pkgs.lib;
in makeTest { in makeTest {
name = "ec2-" + name; name = "ec2-" + name;
nodes = {}; nodes = {};
testScript = testScript = ''
'' import os
my $imageDir = ($ENV{'TMPDIR'} // "/tmp") . "/vm-state-machine"; import subprocess
mkdir $imageDir, 0700;
my $diskImage = "$imageDir/machine.qcow2";
system("qemu-img create -f qcow2 -o backing_file=${image} $diskImage") == 0 or die;
system("qemu-img resize $diskImage 10G") == 0 or die;
# Note: we use net=169.0.0.0/8 rather than image_dir = os.path.join(
# net=169.254.0.0/16 to prevent dhcpcd from getting horribly os.environ.get("TMPDIR", tempfile.gettempdir()), "tmp", "vm-state-machine"
# confused. (It would get a DHCP lease in the 169.254.* )
# range, which it would then configure and prompty delete os.makedirs(image_dir, mode=0o700, exist_ok=True)
# again when it deletes link-local addresses.) Ideally we'd disk_image = os.path.join(image_dir, "machine.qcow2")
# turn off the DHCP server, but qemu does not have an option subprocess.check_call(
# to do that. [
my $startCommand = "qemu-kvm -m 1024"; "qemu-img",
$startCommand .= " -device virtio-net-pci,netdev=vlan0"; "create",
$startCommand .= " -netdev 'user,id=vlan0,net=169.0.0.0/8,guestfwd=tcp:169.254.169.254:80-cmd:${pkgs.micro-httpd}/bin/micro_httpd ${metaData}'"; "-f",
$startCommand .= " -drive file=$diskImage,if=virtio,werror=report"; "qcow2",
$startCommand .= " \$QEMU_OPTS"; "-o",
"backing_file=${image}",
disk_image,
]
)
subprocess.check_call(["qemu-img", "resize", disk_image, "10G"])
my $machine = createMachine({ startCommand => $startCommand }); # Note: we use net=169.0.0.0/8 rather than
# net=169.254.0.0/16 to prevent dhcpcd from getting horribly
# confused. (It would get a DHCP lease in the 169.254.*
# range, which it would then configure and prompty delete
# again when it deletes link-local addresses.) Ideally we'd
# turn off the DHCP server, but qemu does not have an option
# to do that.
start_command = (
"qemu-kvm -m 1024"
+ " -device virtio-net-pci,netdev=vlan0"
+ " -netdev 'user,id=vlan0,net=169.0.0.0/8,guestfwd=tcp:169.254.169.254:80-cmd:${pkgs.micro-httpd}/bin/micro_httpd ${metaData}'"
+ f" -drive file={disk_image},if=virtio,werror=report"
+ " $QEMU_OPTS"
)
${script} machine = create_machine({"startCommand": start_command})
''; '' + script;
}; };
} }

View File

@ -9,13 +9,13 @@ let
}; };
}; };
# prevent make-test.nix to change IP # prevent make-test-python.nix to change IP
networking.interfaces = { networking.interfaces = {
eth1.ipv4.addresses = lib.mkOverride 0 [ ]; eth1.ipv4.addresses = lib.mkOverride 0 [ ];
}; };
}; };
in { in {
name = "cotnainers-reloadable"; name = "containers-reloadable";
meta = with pkgs.stdenv.lib.maintainers; { meta = with pkgs.stdenv.lib.maintainers; {
maintainers = [ danbst ]; maintainers = [ danbst ];
}; };

19
nixos/tests/cri-o.nix Normal file
View File

@ -0,0 +1,19 @@
# This test runs CRI-O and verifies via critest
import ./make-test-python.nix ({ pkgs, ... }: {
name = "cri-o";
maintainers = with pkgs.stdenv.lib.maintainers; teams.podman.members;
nodes = {
crio = {
virtualisation.cri-o.enable = true;
};
};
testScript = ''
start_all()
crio.wait_for_unit("crio.service")
crio.succeed(
"critest --ginkgo.focus='Runtime info' --runtime-endpoint unix:///var/run/crio/crio.sock"
)
'';
})

View File

@ -1,27 +0,0 @@
import ./make-test.nix ({ pkgs, ...} : {
name = "docker-preloader";
meta = with pkgs.stdenv.lib.maintainers; {
maintainers = [ lewo ];
};
nodes = {
docker =
{ pkgs, ... }:
{
virtualisation.docker.enable = true;
virtualisation.dockerPreloader.images = [ pkgs.dockerTools.examples.nix pkgs.dockerTools.examples.bash ];
services.openssh.enable = true;
services.openssh.permitRootLogin = "yes";
services.openssh.extraConfig = "PermitEmptyPasswords yes";
users.extraUsers.root.password = "";
};
};
testScript = ''
startAll;
$docker->waitForUnit("sockets.target");
$docker->succeed("docker run nix nix-store --version");
$docker->succeed("docker run bash bash --version");
'';
})

View File

@ -3,58 +3,58 @@
pkgs ? import ../.. { inherit system config; } pkgs ? import ../.. { inherit system config; }
}: }:
with import ../lib/testing.nix { inherit system pkgs; }; with import ../lib/testing-python.nix { inherit system pkgs; };
with pkgs.lib; with pkgs.lib;
with import common/ec2.nix { inherit makeTest pkgs; }; with import common/ec2.nix { inherit makeTest pkgs; };
let let
imageCfg = imageCfg = (import ../lib/eval-config.nix {
(import ../lib/eval-config.nix { inherit system;
inherit system; modules = [
modules = [ ../maintainers/scripts/ec2/amazon-image.nix
../maintainers/scripts/ec2/amazon-image.nix ../modules/testing/test-instrumentation.nix
../modules/testing/test-instrumentation.nix ../modules/profiles/qemu-guest.nix
../modules/profiles/qemu-guest.nix {
{ ec2.hvm = true; ec2.hvm = true;
# Hack to make the partition resizing work in QEMU. # Hack to make the partition resizing work in QEMU.
boot.initrd.postDeviceCommands = mkBefore boot.initrd.postDeviceCommands = mkBefore ''
'' ln -s vda /dev/xvda
ln -s vda /dev/xvda ln -s vda1 /dev/xvda1
ln -s vda1 /dev/xvda1 '';
'';
# Needed by nixos-rebuild due to the lack of network # Needed by nixos-rebuild due to the lack of network
# access. Determined by trial and error. # access. Determined by trial and error.
system.extraDependencies = system.extraDependencies = with pkgs; ( [
with pkgs; ( # Needed for a nixos-rebuild.
[ busybox
# Needed for a nixos-rebuild. cloud-utils
busybox desktop-file-utils
stdenv libxslt.bin
stdenvNoCC mkinitcpio-nfs-utils
mkinitcpio-nfs-utils stdenv
unionfs-fuse stdenvNoCC
cloud-utils texinfo
desktop-file-utils unionfs-fuse
texinfo xorg.lndir
libxslt.bin
xorg.lndir
# These are used in the configure-from-userdata tests # These are used in the configure-from-userdata tests
# for EC2. Httpd and valgrind are requested by the # for EC2. Httpd and valgrind are requested by the
# configuration. # configuration.
apacheHttpd apacheHttpd.doc apacheHttpd.man valgrind.doc apacheHttpd
] apacheHttpd.doc
); apacheHttpd.man
} valgrind.doc
]; ]);
}).config; }
];
}).config;
image = "${imageCfg.system.build.amazonImage}/${imageCfg.amazonImage.name}.vhd"; image = "${imageCfg.system.build.amazonImage}/${imageCfg.amazonImage.name}.vhd";
sshKeys = import ./ssh-keys.nix pkgs; sshKeys = import ./ssh-keys.nix pkgs;
snakeOilPrivateKey = sshKeys.snakeOilPrivateKey.text; snakeOilPrivateKey = sshKeys.snakeOilPrivateKey.text;
snakeOilPrivateKeyFile = pkgs.writeText "private-key" snakeOilPrivateKey;
snakeOilPublicKey = sshKeys.snakeOilPublicKey; snakeOilPublicKey = sshKeys.snakeOilPublicKey;
in { in {
@ -68,43 +68,47 @@ in {
SSH_HOST_ED25519_KEY:${replaceStrings ["\n"] ["|"] snakeOilPrivateKey} SSH_HOST_ED25519_KEY:${replaceStrings ["\n"] ["|"] snakeOilPrivateKey}
''; '';
script = '' script = ''
$machine->start; machine.start()
$machine->waitForFile("/etc/ec2-metadata/user-data"); machine.wait_for_file("/etc/ec2-metadata/user-data")
$machine->waitForUnit("sshd.service"); machine.wait_for_unit("sshd.service")
$machine->succeed("grep unknown /etc/ec2-metadata/ami-manifest-path"); machine.succeed("grep unknown /etc/ec2-metadata/ami-manifest-path")
# We have no keys configured on the client side yet, so this should fail # We have no keys configured on the client side yet, so this should fail
$machine->fail("ssh -o BatchMode=yes localhost exit"); machine.fail("ssh -o BatchMode=yes localhost exit")
# Let's install our client private key # Let's install our client private key
$machine->succeed("mkdir -p ~/.ssh"); machine.succeed("mkdir -p ~/.ssh")
$machine->succeed("echo '${snakeOilPrivateKey}' > ~/.ssh/id_ed25519"); machine.copy_from_host_via_shell(
$machine->succeed("chmod 600 ~/.ssh/id_ed25519"); "${snakeOilPrivateKeyFile}", "~/.ssh/id_ed25519"
)
machine.succeed("chmod 600 ~/.ssh/id_ed25519")
# We haven't configured the host key yet, so this should still fail # We haven't configured the host key yet, so this should still fail
$machine->fail("ssh -o BatchMode=yes localhost exit"); machine.fail("ssh -o BatchMode=yes localhost exit")
# Add the host key; ssh should finally succeed # Add the host key; ssh should finally succeed
$machine->succeed("echo localhost,127.0.0.1 ${snakeOilPublicKey} > ~/.ssh/known_hosts"); machine.succeed(
$machine->succeed("ssh -o BatchMode=yes localhost exit"); "echo localhost,127.0.0.1 ${snakeOilPublicKey} > ~/.ssh/known_hosts"
)
machine.succeed("ssh -o BatchMode=yes localhost exit")
# Test whether the root disk was resized. # Test whether the root disk was resized.
my $blocks = $machine->succeed("stat -c %b -f /"); blocks, block_size = map(int, machine.succeed("stat -c %b:%S -f /").split(":"))
my $bsize = $machine->succeed("stat -c %S -f /"); GB = 1024 ** 3
my $size = $blocks * $bsize; assert 9.7 * GB <= blocks * block_size <= 10 * GB
die "wrong free space $size" if $size < 9.7 * 1024 * 1024 * 1024 || $size > 10 * 1024 * 1024 * 1024;
# Just to make sure resizing is idempotent. # Just to make sure resizing is idempotent.
$machine->shutdown; machine.shutdown()
$machine->start; machine.start()
$machine->waitForFile("/etc/ec2-metadata/user-data"); machine.wait_for_file("/etc/ec2-metadata/user-data")
''; '';
}; };
boot-ec2-config = makeEc2Test { boot-ec2-config = makeEc2Test {
name = "config-userdata"; name = "config-userdata";
meta.broken = true; # amazon-init wants to download from the internet while building the system
inherit image; inherit image;
sshPublicKey = snakeOilPublicKey; sshPublicKey = snakeOilPublicKey;
@ -133,17 +137,17 @@ in {
} }
''; '';
script = '' script = ''
$machine->start; machine.start()
# amazon-init must succeed. if it fails, make the test fail # amazon-init must succeed. if it fails, make the test fail
# immediately instead of timing out in waitForFile. # immediately instead of timing out in wait_for_file.
$machine->waitForUnit('amazon-init.service'); machine.wait_for_unit("amazon-init.service")
$machine->waitForFile("/etc/testFile"); machine.wait_for_file("/etc/testFile")
$machine->succeed("cat /etc/testFile | grep -q 'whoa'"); assert "whoa" in machine.succeed("cat /etc/testFile")
$machine->waitForUnit("httpd.service"); machine.wait_for_unit("httpd.service")
$machine->succeed("curl http://localhost | grep Valgrind"); assert "Valgrind" in machine.succeed("curl http://localhost")
''; '';
}; };
} }

View File

@ -23,6 +23,13 @@ import ./make-test-python.nix ({ pkgs, lib, ...} : {
services.xserver.desktopManager.gnome3.enable = true; services.xserver.desktopManager.gnome3.enable = true;
services.xserver.desktopManager.gnome3.debug = true; services.xserver.desktopManager.gnome3.debug = true;
environment.systemPackages = [
(pkgs.makeAutostartItem {
name = "org.gnome.Terminal";
package = pkgs.gnome3.gnome-terminal;
})
];
virtualisation.memorySize = 1024; virtualisation.memorySize = 1024;
}; };
@ -65,9 +72,6 @@ import ./make-test-python.nix ({ pkgs, lib, ...} : {
) )
with subtest("Open Gnome Terminal"): with subtest("Open Gnome Terminal"):
machine.succeed(
"${gnomeTerminalCommand}"
)
# correct output should be (true, '"gnome-terminal-server"') # correct output should be (true, '"gnome-terminal-server"')
machine.wait_until_succeeds( machine.wait_until_succeeds(
"${wmClass} | grep -q 'gnome-terminal-server'" "${wmClass} | grep -q 'gnome-terminal-server'"

View File

@ -1,4 +1,4 @@
import ./make-test.nix ({ pkgs, latestKernel ? false, ... } : { import ./make-test-python.nix ({ pkgs, latestKernel ? false, ... } : {
name = "hardened"; name = "hardened";
meta = with pkgs.stdenv.lib.maintainers; { meta = with pkgs.stdenv.lib.maintainers; {
maintainers = [ joachifm ]; maintainers = [ joachifm ];
@ -47,84 +47,88 @@ import ./make-test.nix ({ pkgs, latestKernel ? false, ... } : {
}; };
in in
'' ''
$machine->waitForUnit("multi-user.target"); machine.wait_for_unit("multi-user.target")
with subtest("AppArmor profiles are loaded"):
machine.succeed("systemctl status apparmor.service")
subtest "apparmor-loaded", sub {
$machine->succeed("systemctl status apparmor.service");
};
# AppArmor securityfs # AppArmor securityfs
subtest "apparmor-securityfs", sub { with subtest("AppArmor securityfs is mounted"):
$machine->succeed("mountpoint -q /sys/kernel/security"); machine.succeed("mountpoint -q /sys/kernel/security")
$machine->succeed("cat /sys/kernel/security/apparmor/profiles"); machine.succeed("cat /sys/kernel/security/apparmor/profiles")
};
# Test loading out-of-tree modules # Test loading out-of-tree modules
subtest "extra-module-packages", sub { with subtest("Out-of-tree modules can be loaded"):
$machine->succeed("grep -Fq wireguard /proc/modules"); machine.succeed("grep -Fq wireguard /proc/modules")
};
# Test hidepid # Test hidepid
subtest "hidepid", sub { with subtest("hidepid=2 option is applied and works"):
$machine->succeed("grep -Fq hidepid=2 /proc/mounts"); machine.succeed("grep -Fq hidepid=2 /proc/mounts")
# cannot use pgrep -u here, it segfaults when access to process info is denied # cannot use pgrep -u here, it segfaults when access to process info is denied
$machine->succeed("[ `su - sybil -c 'ps --no-headers --user root | wc -l'` = 0 ]"); machine.succeed("[ `su - sybil -c 'ps --no-headers --user root | wc -l'` = 0 ]")
$machine->succeed("[ `su - alice -c 'ps --no-headers --user root | wc -l'` != 0 ]"); machine.succeed("[ `su - alice -c 'ps --no-headers --user root | wc -l'` != 0 ]")
};
# Test kernel module hardening # Test kernel module hardening
subtest "lock-modules", sub { with subtest("No more kernel modules can be loaded"):
# note: this better a be module we normally wouldn't load ... # note: this better a be module we normally wouldn't load ...
$machine->fail("modprobe dccp"); machine.fail("modprobe dccp")
};
# Test userns # Test userns
subtest "userns", sub { with subtest("User namespaces are restricted"):
$machine->succeed("unshare --user true"); machine.succeed("unshare --user true")
$machine->fail("su -l alice -c 'unshare --user true'"); machine.fail("su -l alice -c 'unshare --user true'")
};
# Test dmesg restriction # Test dmesg restriction
subtest "dmesg", sub { with subtest("Regular users cannot access dmesg"):
$machine->fail("su -l alice -c dmesg"); machine.fail("su -l alice -c dmesg")
};
# Test access to kcore # Test access to kcore
subtest "kcore", sub { with subtest("Kcore is inaccessible as root"):
$machine->fail("cat /proc/kcore"); machine.fail("cat /proc/kcore")
};
# Test deferred mount # Test deferred mount
subtest "mount", sub { with subtest("Deferred mounts work"):
$machine->fail("mountpoint -q /efi"); # was deferred machine.fail("mountpoint -q /efi") # was deferred
$machine->execute("mkdir -p /efi"); machine.execute("mkdir -p /efi")
$machine->succeed("mount /dev/disk/by-label/EFISYS /efi"); machine.succeed("mount /dev/disk/by-label/EFISYS /efi")
$machine->succeed("mountpoint -q /efi"); # now mounted machine.succeed("mountpoint -q /efi") # now mounted
};
# Test Nix dæmon usage # Test Nix dæmon usage
subtest "nix-daemon", sub { with subtest("nix-daemon cannot be used by all users"):
$machine->fail("su -l nobody -s /bin/sh -c 'nix ping-store'"); machine.fail("su -l nobody -s /bin/sh -c 'nix ping-store'")
$machine->succeed("su -l alice -c 'nix ping-store'") =~ "OK"; machine.succeed("su -l alice -c 'nix ping-store'")
};
# Test kernel image protection # Test kernel image protection
subtest "kernelimage", sub { with subtest("The kernel image is protected"):
$machine->fail("systemctl hibernate"); machine.fail("systemctl hibernate")
$machine->fail("systemctl kexec"); machine.fail("systemctl kexec")
};
# Test hardened memory allocator # Test hardened memory allocator
sub runMallocTestProg { def runMallocTestProg(prog_name, error_text):
my ($progName, $errorText) = @_; text = "fatal allocator error: " + error_text
my $text = "fatal allocator error: " . $errorText; if not text in machine.fail(
$machine->fail("${hardened-malloc-tests}/bin/" . $progName) =~ $text; "${hardened-malloc-tests}/bin/"
}; + prog_name
+ " 2>&1"
):
raise Exception("Hardened malloc does not work for {}".format(error_text))
subtest "hardenedmalloc", sub {
runMallocTestProg("double_free_large", "invalid free"); with subtest("The hardened memory allocator works"):
runMallocTestProg("unaligned_free_small", "invalid unaligned free"); runMallocTestProg("double_free_large", "invalid free")
runMallocTestProg("write_after_free_small", "detected write after free"); runMallocTestProg("unaligned_free_small", "invalid unaligned free")
}; runMallocTestProg("write_after_free_small", "detected write after free")
''; '';
}) })

View File

@ -1,15 +1,16 @@
import ../make-test.nix ({ pkgs, ...} : { import ../make-test-python.nix ({ pkgs, ...} : {
name = "test-hocker-fetchdocker"; name = "test-hocker-fetchdocker";
meta = with pkgs.stdenv.lib.maintainers; { meta = with pkgs.stdenv.lib.maintainers; {
maintainers = [ ixmatus ]; maintainers = [ ixmatus ];
broken = true; # tries to download from registry-1.docker.io - how did this ever work?
}; };
machine = import ./machine.nix; machine = import ./machine.nix;
testScript = '' testScript = ''
startAll; start_all()
$machine->waitForUnit("sockets.target"); machine.wait_for_unit("sockets.target")
$machine->waitUntilSucceeds("docker run registry-1.docker.io/v2/library/hello-world:latest"); machine.wait_until_succeeds("docker run registry-1.docker.io/v2/library/hello-world:latest")
''; '';
}) })

View File

@ -74,7 +74,7 @@ let
throw "Non-EFI boot methods are only supported on i686 / x86_64" throw "Non-EFI boot methods are only supported on i686 / x86_64"
else '' else ''
def assemble_qemu_flags(): def assemble_qemu_flags():
flags = "-cpu host" flags = "-cpu max"
${if system == "x86_64-linux" ${if system == "x86_64-linux"
then ''flags += " -m 768"'' then ''flags += " -m 768"''
else ''flags += " -m 512 -enable-kvm -machine virt,gic-version=host"'' else ''flags += " -m 512 -enable-kvm -machine virt,gic-version=host"''
@ -317,6 +317,7 @@ let
texinfo texinfo
unionfs-fuse unionfs-fuse
xorg.lndir xorg.lndir
(lvm2.override { udev = null; }) # for initrd (extra-utils)
# add curl so that rather than seeing the test attempt to download # add curl so that rather than seeing the test attempt to download
# curl's tarball, we see what it's trying to download # curl's tarball, we see what it's trying to download
@ -799,7 +800,7 @@ in {
"btrfs subvol create /mnt/badpath/boot", "btrfs subvol create /mnt/badpath/boot",
"btrfs subvol create /mnt/nixos", "btrfs subvol create /mnt/nixos",
"btrfs subvol set-default " "btrfs subvol set-default "
+ "$(btrfs subvol list /mnt | grep 'nixos' | awk '{print \$2}') /mnt", + "$(btrfs subvol list /mnt | grep 'nixos' | awk '{print $2}') /mnt",
"umount /mnt", "umount /mnt",
"mount -o defaults LABEL=root /mnt", "mount -o defaults LABEL=root /mnt",
"mkdir -p /mnt/badpath/boot", # Help ensure the detection mechanism "mkdir -p /mnt/badpath/boot", # Help ensure the detection mechanism

View File

@ -3,30 +3,30 @@
pkgs ? import ../.. { inherit system config; } pkgs ? import ../.. { inherit system config; }
}: }:
with import ../lib/testing.nix { inherit system pkgs; }; with import ../lib/testing-python.nix { inherit system pkgs; };
with pkgs.lib; with pkgs.lib;
with import common/ec2.nix { inherit makeTest pkgs; }; with import common/ec2.nix { inherit makeTest pkgs; };
let let
image = image = (import ../lib/eval-config.nix {
(import ../lib/eval-config.nix { inherit system;
inherit system; modules = [
modules = [ ../maintainers/scripts/openstack/openstack-image.nix
../maintainers/scripts/openstack/openstack-image.nix ../modules/testing/test-instrumentation.nix
../modules/testing/test-instrumentation.nix ../modules/profiles/qemu-guest.nix
../modules/profiles/qemu-guest.nix {
{ # Needed by nixos-rebuild due to lack of network access.
# Needed by nixos-rebuild due to lack of network access. system.extraDependencies = with pkgs; [
system.extraDependencies = with pkgs; [ stdenv
stdenv ];
]; }
} ];
]; }).config.system.build.openstackImage + "/nixos.qcow2";
}).config.system.build.openstackImage + "/nixos.qcow2";
sshKeys = import ./ssh-keys.nix pkgs; sshKeys = import ./ssh-keys.nix pkgs;
snakeOilPrivateKey = sshKeys.snakeOilPrivateKey.text; snakeOilPrivateKey = sshKeys.snakeOilPrivateKey.text;
snakeOilPrivateKeyFile = pkgs.writeText "private-key" snakeOilPrivateKey;
snakeOilPublicKey = sshKeys.snakeOilPublicKey; snakeOilPublicKey = sshKeys.snakeOilPublicKey;
in { in {
@ -39,32 +39,36 @@ in {
SSH_HOST_ED25519_KEY:${replaceStrings ["\n"] ["|"] snakeOilPrivateKey} SSH_HOST_ED25519_KEY:${replaceStrings ["\n"] ["|"] snakeOilPrivateKey}
''; '';
script = '' script = ''
$machine->start; machine.start()
$machine->waitForFile("/etc/ec2-metadata/user-data"); machine.wait_for_file("/etc/ec2-metadata/user-data")
$machine->waitForUnit("sshd.service"); machine.wait_for_unit("sshd.service")
$machine->succeed("grep unknown /etc/ec2-metadata/ami-manifest-path"); machine.succeed("grep unknown /etc/ec2-metadata/ami-manifest-path")
# We have no keys configured on the client side yet, so this should fail # We have no keys configured on the client side yet, so this should fail
$machine->fail("ssh -o BatchMode=yes localhost exit"); machine.fail("ssh -o BatchMode=yes localhost exit")
# Let's install our client private key # Let's install our client private key
$machine->succeed("mkdir -p ~/.ssh"); machine.succeed("mkdir -p ~/.ssh")
$machine->succeed("echo '${snakeOilPrivateKey}' > ~/.ssh/id_ed25519"); machine.copy_from_host_via_shell(
$machine->succeed("chmod 600 ~/.ssh/id_ed25519"); "${snakeOilPrivateKeyFile}", "~/.ssh/id_ed25519"
)
machine.succeed("chmod 600 ~/.ssh/id_ed25519")
# We haven't configured the host key yet, so this should still fail # We haven't configured the host key yet, so this should still fail
$machine->fail("ssh -o BatchMode=yes localhost exit"); machine.fail("ssh -o BatchMode=yes localhost exit")
# Add the host key; ssh should finally succeed # Add the host key; ssh should finally succeed
$machine->succeed("echo localhost,127.0.0.1 ${snakeOilPublicKey} > ~/.ssh/known_hosts"); machine.succeed(
$machine->succeed("ssh -o BatchMode=yes localhost exit"); "echo localhost,127.0.0.1 ${snakeOilPublicKey} > ~/.ssh/known_hosts"
)
machine.succeed("ssh -o BatchMode=yes localhost exit")
# Just to make sure resizing is idempotent. # Just to make sure resizing is idempotent.
$machine->shutdown; machine.shutdown()
$machine->start; machine.start()
$machine->waitForFile("/etc/ec2-metadata/user-data"); machine.wait_for_file("/etc/ec2-metadata/user-data")
''; '';
}; };
@ -86,9 +90,9 @@ in {
} }
''; '';
script = '' script = ''
$machine->start; machine.start()
$machine->waitForFile("/etc/testFile"); machine.wait_for_file("/etc/testFile")
$machine->succeed("cat /etc/testFile | grep -q 'whoa'"); assert "whoa" in machine.succeed("cat /etc/testFile")
''; '';
}; };
} }

View File

@ -1,4 +1,4 @@
import ./make-test.nix ({pkgs, lib, ...}: import ./make-test-python.nix ({pkgs, lib, ...}:
let let
# A filesystem image with a (presumably) bootable debian # A filesystem image with a (presumably) bootable debian
debianImage = pkgs.vmTools.diskImageFuns.debian9i386 { debianImage = pkgs.vmTools.diskImageFuns.debian9i386 {
@ -34,9 +34,6 @@ let
''; '';
}; };
# options to add the disk to the test vm
QEMU_OPTS = "-drive index=2,file=${debianImage}/disk-image.qcow2,read-only,if=virtio";
# a part of the configuration of the test vm # a part of the configuration of the test vm
simpleConfig = { simpleConfig = {
boot.loader.grub = { boot.loader.grub = {
@ -71,7 +68,7 @@ in {
machine = { config, pkgs, ... }: (simpleConfig // { machine = { config, pkgs, ... }: (simpleConfig // {
imports = [ ../modules/profiles/installation-device.nix imports = [ ../modules/profiles/installation-device.nix
../modules/profiles/base.nix ]; ../modules/profiles/base.nix ];
virtualisation.memorySize = 1024; virtualisation.memorySize = 1300;
# The test cannot access the network, so any packages # The test cannot access the network, so any packages
# nixos-rebuild needs must be included in the VM. # nixos-rebuild needs must be included in the VM.
system.extraDependencies = with pkgs; system.extraDependencies = with pkgs;
@ -99,22 +96,28 @@ in {
testScript = '' testScript = ''
# hack to add the secondary disk # hack to add the secondary disk
$machine->{startCommand} = "QEMU_OPTS=\"\$QEMU_OPTS \"${lib.escapeShellArg QEMU_OPTS} ".$machine->{startCommand}; os.environ[
"QEMU_OPTS"
] = "-drive index=2,file=${debianImage}/disk-image.qcow2,read-only,if=virtio"
$machine->start; machine.start()
$machine->succeed("udevadm settle"); machine.succeed("udevadm settle")
$machine->waitForUnit("multi-user.target"); machine.wait_for_unit("multi-user.target")
print(machine.succeed("lsblk"))
# check that os-prober works standalone # check that os-prober works standalone
$machine->succeed("${pkgs.os-prober}/bin/os-prober | grep /dev/vdb1"); machine.succeed(
"${pkgs.os-prober}/bin/os-prober | grep /dev/vdb1"
)
# rebuild and test that debian is available in the grub menu # rebuild and test that debian is available in the grub menu
$machine->succeed("nixos-generate-config"); machine.succeed("nixos-generate-config")
$machine->copyFileFromHost( machine.copy_from_host(
"${configFile}", "${configFile}",
"/etc/nixos/configuration.nix"); "/etc/nixos/configuration.nix",
$machine->succeed("nixos-rebuild boot >&2"); )
machine.succeed("nixos-rebuild boot >&2")
$machine->succeed("egrep 'menuentry.*debian' /boot/grub/grub.cfg"); machine.succeed("egrep 'menuentry.*debian' /boot/grub/grub.cfg")
''; '';
}) })

View File

@ -1,103 +1,111 @@
{ system ? builtins.currentSystem
, config ? { }
, pkgs ? import ../.. { inherit system config; } }:
with import ../lib/testing.nix { inherit system pkgs; };
with pkgs.lib;
let let
makePostgresqlWalReceiverTest = subTestName: postgresqlPackage: let # Makes a test for a PostgreSQL package, given by name and looked up from `pkgs`.
makePostgresqlWalReceiverTest = postgresqlPackage:
{
name = postgresqlPackage;
value =
import ./make-test-python.nix ({ pkgs, lib, ... }: let
postgresqlDataDir = "/var/db/postgresql/test"; pkg = pkgs."${postgresqlPackage}";
replicationUser = "wal_receiver_user"; postgresqlDataDir = "/var/lib/postgresql/${pkg.psqlSchema}";
replicationSlot = "wal_receiver_slot"; replicationUser = "wal_receiver_user";
replicationConn = "postgresql://${replicationUser}@localhost"; replicationSlot = "wal_receiver_slot";
baseBackupDir = "/tmp/pg_basebackup"; replicationConn = "postgresql://${replicationUser}@localhost";
walBackupDir = "/tmp/pg_wal"; baseBackupDir = "/tmp/pg_basebackup";
atLeast12 = versionAtLeast postgresqlPackage.version "12.0"; walBackupDir = "/tmp/pg_wal";
restoreCommand = '' atLeast12 = lib.versionAtLeast pkg.version "12.0";
restore_command = 'cp ${walBackupDir}/%f %p' restoreCommand = ''
''; restore_command = 'cp ${walBackupDir}/%f %p'
recoveryFile = if atLeast12
then pkgs.writeTextDir "recovery.signal" ""
else pkgs.writeTextDir "recovery.conf" "${restoreCommand}";
in makeTest {
name = "postgresql-wal-receiver-${subTestName}";
meta.maintainers = with maintainers; [ pacien ];
machine = { ... }: {
# Needed because this test uses a non-default 'services.postgresql.dataDir'.
systemd.tmpfiles.rules = [
"d /var/db/postgresql 0700 postgres postgres"
];
services.postgresql = {
package = postgresqlPackage;
enable = true;
dataDir = postgresqlDataDir;
extraConfig = ''
wal_level = archive # alias for replica on pg >= 9.6
max_wal_senders = 10
max_replication_slots = 10
'' + optionalString atLeast12 ''
${restoreCommand}
recovery_end_command = 'touch recovery.done'
''; '';
authentication = ''
host replication ${replicationUser} all trust
'';
initialScript = pkgs.writeText "init.sql" ''
create user ${replicationUser} replication;
select * from pg_create_physical_replication_slot('${replicationSlot}');
'';
};
services.postgresqlWalReceiver.receivers.main = { recoveryFile = if atLeast12
inherit postgresqlPackage; then pkgs.writeTextDir "recovery.signal" ""
connection = replicationConn; else pkgs.writeTextDir "recovery.conf" "${restoreCommand}";
slot = replicationSlot;
directory = walBackupDir; in {
}; name = "postgresql-wal-receiver-${postgresqlPackage}";
# This is only to speedup test, it isn't time racing. Service is set to autorestart always, meta.maintainers = with lib.maintainers; [ pacien ];
# default 60sec is fine for real system, but is too much for a test
systemd.services.postgresql-wal-receiver-main.serviceConfig.RestartSec = mkForce 5; machine = { ... }: {
services.postgresql = {
package = pkg;
enable = true;
extraConfig = ''
wal_level = archive # alias for replica on pg >= 9.6
max_wal_senders = 10
max_replication_slots = 10
'' + lib.optionalString atLeast12 ''
${restoreCommand}
recovery_end_command = 'touch recovery.done'
'';
authentication = ''
host replication ${replicationUser} all trust
'';
initialScript = pkgs.writeText "init.sql" ''
create user ${replicationUser} replication;
select * from pg_create_physical_replication_slot('${replicationSlot}');
'';
};
services.postgresqlWalReceiver.receivers.main = {
postgresqlPackage = pkg;
connection = replicationConn;
slot = replicationSlot;
directory = walBackupDir;
};
# This is only to speedup test, it isn't time racing. Service is set to autorestart always,
# default 60sec is fine for real system, but is too much for a test
systemd.services.postgresql-wal-receiver-main.serviceConfig.RestartSec = lib.mkForce 5;
};
testScript = ''
# make an initial base backup
machine.wait_for_unit("postgresql")
machine.wait_for_unit("postgresql-wal-receiver-main")
# WAL receiver healthchecks PG every 5 seconds, so let's be sure they have connected each other
# required only for 9.4
machine.sleep(5)
machine.succeed(
"${pkg}/bin/pg_basebackup --dbname=${replicationConn} --pgdata=${baseBackupDir}"
)
# create a dummy table with 100 records
machine.succeed(
"sudo -u postgres psql --command='create table dummy as select * from generate_series(1, 100) as val;'"
)
# stop postgres and destroy data
machine.systemctl("stop postgresql")
machine.systemctl("stop postgresql-wal-receiver-main")
machine.succeed("rm -r ${postgresqlDataDir}/{base,global,pg_*}")
# restore the base backup
machine.succeed(
"cp -r ${baseBackupDir}/* ${postgresqlDataDir} && chown postgres:postgres -R ${postgresqlDataDir}"
)
# prepare WAL and recovery
machine.succeed("chmod a+rX -R ${walBackupDir}")
machine.execute(
"for part in ${walBackupDir}/*.partial; do mv $part ''${part%%.*}; done"
) # make use of partial segments too
machine.succeed(
"cp ${recoveryFile}/* ${postgresqlDataDir}/ && chmod 666 ${postgresqlDataDir}/recovery*"
)
# replay WAL
machine.systemctl("start postgresql")
machine.wait_for_file("${postgresqlDataDir}/recovery.done")
machine.systemctl("restart postgresql")
machine.wait_for_unit("postgresql")
# check that our records have been restored
machine.succeed(
"test $(sudo -u postgres psql --pset='pager=off' --tuples-only --command='select count(distinct val) from dummy;') -eq 100"
)
'';
});
}; };
testScript = '' # Maps the generic function over all attributes of PostgreSQL packages
# make an initial base backup in builtins.listToAttrs (map makePostgresqlWalReceiverTest (builtins.attrNames (import ../../pkgs/servers/sql/postgresql { })))
$machine->waitForUnit('postgresql');
$machine->waitForUnit('postgresql-wal-receiver-main');
# WAL receiver healthchecks PG every 5 seconds, so let's be sure they have connected each other
# required only for 9.4
$machine->sleep(5);
$machine->succeed('${postgresqlPackage}/bin/pg_basebackup --dbname=${replicationConn} --pgdata=${baseBackupDir}');
# create a dummy table with 100 records
$machine->succeed('sudo -u postgres psql --command="create table dummy as select * from generate_series(1, 100) as val;"');
# stop postgres and destroy data
$machine->systemctl('stop postgresql');
$machine->systemctl('stop postgresql-wal-receiver-main');
$machine->succeed('rm -r ${postgresqlDataDir}/{base,global,pg_*}');
# restore the base backup
$machine->succeed('cp -r ${baseBackupDir}/* ${postgresqlDataDir} && chown postgres:postgres -R ${postgresqlDataDir}');
# prepare WAL and recovery
$machine->succeed('chmod a+rX -R ${walBackupDir}');
$machine->execute('for part in ${walBackupDir}/*.partial; do mv $part ''${part%%.*}; done'); # make use of partial segments too
$machine->succeed('cp ${recoveryFile}/* ${postgresqlDataDir}/ && chmod 666 ${postgresqlDataDir}/recovery*');
# replay WAL
$machine->systemctl('start postgresql');
$machine->waitForFile('${postgresqlDataDir}/recovery.done');
$machine->systemctl('restart postgresql');
$machine->waitForUnit('postgresql');
# check that our records have been restored
$machine->succeed('test $(sudo -u postgres psql --pset="pager=off" --tuples-only --command="select count(distinct val) from dummy;") -eq 100');
'';
};
in mapAttrs makePostgresqlWalReceiverTest (import ../../pkgs/servers/sql/postgresql pkgs)

View File

@ -158,7 +158,10 @@ in import ./make-test-python.nix {
s3 = { pkgs, ... } : { s3 = { pkgs, ... } : {
# Minio requires at least 1GiB of free disk space to run. # Minio requires at least 1GiB of free disk space to run.
virtualisation.diskSize = 2 * 1024; virtualisation = {
diskSize = 2 * 1024;
memorySize = 1024;
};
networking.firewall.allowedTCPPorts = [ minioPort ]; networking.firewall.allowedTCPPorts = [ minioPort ];
services.minio = { services.minio = {
@ -235,7 +238,7 @@ in import ./make-test-python.nix {
# Test if the Thanos bucket command is able to retrieve blocks from the S3 bucket # Test if the Thanos bucket command is able to retrieve blocks from the S3 bucket
# and check if the blocks have the correct labels: # and check if the blocks have the correct labels:
store.succeed( store.succeed(
"thanos bucket ls " "thanos tools bucket ls "
+ "--objstore.config-file=${nodes.store.config.services.thanos.store.objstore.config-file} " + "--objstore.config-file=${nodes.store.config.services.thanos.store.objstore.config-file} "
+ "--output=json | " + "--output=json | "
+ "jq .thanos.labels.some_label | " + "jq .thanos.labels.some_label | "

View File

@ -0,0 +1,80 @@
import ./make-test-python.nix ({ pkgs, lib, ... }: {
name = "shadowsocks";
meta = {
maintainers = with lib.maintainers; [ hmenke ];
};
nodes = {
server = {
boot.kernel.sysctl."net.ipv4.ip_forward" = "1";
networking.useDHCP = false;
networking.interfaces.eth1.ipv4.addresses = [
{ address = "192.168.0.1"; prefixLength = 24; }
];
networking.firewall.rejectPackets = true;
networking.firewall.allowedTCPPorts = [ 8488 ];
networking.firewall.allowedUDPPorts = [ 8488 ];
services.shadowsocks = {
enable = true;
encryptionMethod = "chacha20-ietf-poly1305";
password = "pa$$w0rd";
localAddress = [ "0.0.0.0" ];
port = 8488;
fastOpen = false;
mode = "tcp_and_udp";
plugin = "${pkgs.shadowsocks-v2ray-plugin}/bin/v2ray-plugin";
pluginOpts = "server;host=nixos.org";
};
services.nginx = {
enable = true;
virtualHosts.server = {
locations."/".root = pkgs.writeTextDir "index.html" "It works!";
};
};
};
client = {
networking.useDHCP = false;
networking.interfaces.eth1.ipv4.addresses = [
{ address = "192.168.0.2"; prefixLength = 24; }
];
systemd.services.shadowsocks-client = {
description = "connect to shadowsocks";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
path = with pkgs; [
shadowsocks-libev
shadowsocks-v2ray-plugin
];
script = ''
exec ss-local \
-s 192.168.0.1 \
-p 8488 \
-l 1080 \
-k 'pa$$w0rd' \
-m chacha20-ietf-poly1305 \
-a nobody \
--plugin "${pkgs.shadowsocks-v2ray-plugin}/bin/v2ray-plugin" \
--plugin-opts "host=nixos.org"
'';
};
};
};
testScript = ''
start_all()
server.wait_for_unit("shadowsocks-libev.service")
client.wait_for_unit("shadowsocks-client.service")
client.fail(
"${pkgs.curl}/bin/curl 192.168.0.1:80"
)
msg = client.succeed(
"${pkgs.curl}/bin/curl --socks5 localhost:1080 192.168.0.1:80"
)
assert msg == "It works!", "Could not connect through shadowsocks"
'';
}
)

17
nixos/tests/sssd.nix Normal file
View File

@ -0,0 +1,17 @@
import ./make-test-python.nix ({ pkgs, ... }:
{
name = "sssd";
meta = with pkgs.stdenv.lib.maintainers; {
maintainers = [ bbigras ];
};
machine = { pkgs, ... }: {
services.sssd.enable = true;
};
testScript = ''
start_all()
machine.wait_for_unit("multi-user.target")
machine.wait_for_unit("sssd.service")
'';
})

View File

@ -31,7 +31,7 @@ import ./make-test-python.nix ({pkgs, ...}: {
firewall.enable = false; firewall.enable = false;
interfaces.eth1.ipv4.addresses = lib.mkForce []; # no need for legacy IP interfaces.eth1.ipv4.addresses = lib.mkForce []; # no need for legacy IP
interfaces.eth1.ipv6.addresses = lib.mkForce [ interfaces.eth1.ipv6.addresses = lib.mkForce [
{ address = "2001:DB8::"; prefixLength = 64; } { address = "2001:DB8::1"; prefixLength = 64; }
]; ];
}; };
@ -260,7 +260,7 @@ import ./make-test-python.nix ({pkgs, ...}: {
client.wait_until_succeeds("ping -6 -c 1 FD42::1") client.wait_until_succeeds("ping -6 -c 1 FD42::1")
# the global IP of the ISP router should still not be a reachable # the global IP of the ISP router should still not be a reachable
router.fail("ping -6 -c 1 2001:DB8::") router.fail("ping -6 -c 1 2001:DB8::1")
# Once we have internal connectivity boot up the ISP # Once we have internal connectivity boot up the ISP
isp.start() isp.start()
@ -273,11 +273,11 @@ import ./make-test-python.nix ({pkgs, ...}: {
# wait until the uplink interface has a good status # wait until the uplink interface has a good status
router.wait_for_unit("network-online.target") router.wait_for_unit("network-online.target")
router.wait_until_succeeds("ping -6 -c1 2001:DB8::") router.wait_until_succeeds("ping -6 -c1 2001:DB8::1")
# shortly after that the client should have received it's global IPv6 # shortly after that the client should have received it's global IPv6
# address and thus be able to ping the ISP # address and thus be able to ping the ISP
client.wait_until_succeeds("ping -6 -c1 2001:DB8::") client.wait_until_succeeds("ping -6 -c1 2001:DB8::1")
# verify that we got a globally scoped address in eth1 from the # verify that we got a globally scoped address in eth1 from the
# documentation prefix # documentation prefix

View File

@ -4,7 +4,10 @@ import ./make-test-python.nix ({ pkgs, ... }: {
machine = { lib, ... }: { machine = { lib, ... }: {
imports = [ common/user-account.nix common/x11.nix ]; imports = [ common/user-account.nix common/x11.nix ];
virtualisation.emptyDiskImages = [ 512 ]; virtualisation.emptyDiskImages = [ 512 512 ];
virtualisation.memorySize = 1024;
environment.systemPackages = [ pkgs.cryptsetup ];
fileSystems = lib.mkVMOverride { fileSystems = lib.mkVMOverride {
"/test-x-initrd-mount" = { "/test-x-initrd-mount" = {
@ -144,5 +147,25 @@ import ./make-test-python.nix ({ pkgs, ... }: {
assert "RuntimeWatchdogUSec=30s" in output assert "RuntimeWatchdogUSec=30s" in output
assert "RebootWatchdogUSec=10m" in output assert "RebootWatchdogUSec=10m" in output
assert "KExecWatchdogUSec=5m" in output assert "KExecWatchdogUSec=5m" in output
# Test systemd cryptsetup support
with subtest("systemd successfully reads /etc/crypttab and unlocks volumes"):
# create a luks volume and put a filesystem on it
machine.succeed(
"echo -n supersecret | cryptsetup luksFormat -q /dev/vdc -",
"echo -n supersecret | cryptsetup luksOpen --key-file - /dev/vdc foo",
"mkfs.ext3 /dev/mapper/foo",
)
# create a keyfile and /etc/crypttab
machine.succeed("echo -n supersecret > /var/lib/luks-keyfile")
machine.succeed("chmod 600 /var/lib/luks-keyfile")
machine.succeed("echo 'luks1 /dev/vdc /var/lib/luks-keyfile luks' > /etc/crypttab")
# after a reboot, systemd should unlock the volume and we should be able to mount it
machine.shutdown()
machine.succeed("systemctl status systemd-cryptsetup@luks1.service")
machine.succeed("mkdir -p /tmp/luks1")
machine.succeed("mount /dev/mapper/luks1 /tmp/luks1")
''; '';
}) })

View File

@ -9,6 +9,8 @@ import ./make-test-python.nix ({ pkgs, ...} : {
networking.firewall.allowedTCPPorts = [ 9091 ]; networking.firewall.allowedTCPPorts = [ 9091 ];
security.apparmor.enable = true;
services.transmission.enable = true; services.transmission.enable = true;
}; };

View File

@ -15,7 +15,7 @@
assert use64bitGuest -> useKvmNestedVirt; assert use64bitGuest -> useKvmNestedVirt;
with import ../lib/testing.nix { inherit system pkgs; }; with import ../lib/testing-python.nix { inherit system pkgs; };
with pkgs.lib; with pkgs.lib;
let let
@ -91,13 +91,15 @@ let
(isYes "SERIAL_8250_CONSOLE") (isYes "SERIAL_8250_CONSOLE")
(isYes "SERIAL_8250") (isYes "SERIAL_8250")
]; ];
networking.usePredictableInterfaceNames = false;
}; };
mkLog = logfile: tag: let mkLog = logfile: tag: let
rotated = map (i: "${logfile}.${toString i}") (range 1 9); rotated = map (i: "${logfile}.${toString i}") (range 1 9);
all = concatMapStringsSep " " (f: "\"${f}\"") ([logfile] ++ rotated); all = concatMapStringsSep " " (f: "\"${f}\"") ([logfile] ++ rotated);
logcmd = "tail -F ${all} 2> /dev/null | logger -t \"${tag}\""; logcmd = "tail -F ${all} 2> /dev/null | logger -t \"${tag}\"";
in optionalString debug "$machine->execute(ru '${logcmd} & disown');"; in if debug then "machine.execute(ru('${logcmd} & disown'))" else "pass";
testVM = vmName: vmScript: let testVM = vmName: vmScript: let
cfg = (import ../lib/eval-config.nix { cfg = (import ../lib/eval-config.nix {
@ -204,96 +206,105 @@ let
}; };
testSubs = '' testSubs = ''
my ${"$" + name}_sharepath = '${sharePath}';
sub checkRunning_${name} {
my $cmd = 'VBoxManage list runningvms | grep -q "^\"${name}\""';
my ($status, $out) = $machine->execute(ru $cmd);
return $status == 0;
}
sub cleanup_${name} { ${name}_sharepath = "${sharePath}"
$machine->execute(ru "VBoxManage controlvm ${name} poweroff")
if checkRunning_${name};
$machine->succeed("rm -rf ${sharePath}");
$machine->succeed("mkdir -p ${sharePath}");
$machine->succeed("chown alice.users ${sharePath}");
}
sub createVM_${name} {
vbm("createvm --name ${name} ${createFlags}");
vbm("modifyvm ${name} ${vmFlags}");
vbm("setextradata ${name} VBoxInternal/PDM/HaltOnReset 1");
vbm("storagectl ${name} ${controllerFlags}");
vbm("storageattach ${name} ${diskFlags}");
vbm("sharedfolder add ${name} ${sharedFlags}");
vbm("sharedfolder add ${name} ${nixstoreFlags}");
cleanup_${name};
${mkLog "$HOME/VirtualBox VMs/${name}/Logs/VBox.log" "HOST-${name}"} def check_running_${name}():
} cmd = "VBoxManage list runningvms | grep -q '^\"${name}\"'"
(status, _) = machine.execute(ru(cmd))
return status == 0
sub destroyVM_${name} {
cleanup_${name};
vbm("unregistervm ${name} --delete");
}
sub waitForVMBoot_${name} { def cleanup_${name}():
$machine->execute(ru( if check_running_${name}():
'set -e; i=0; '. machine.execute(ru("VBoxManage controlvm ${name} poweroff"))
'while ! test -e ${sharePath}/boot-done; do '. machine.succeed("rm -rf ${sharePath}")
'sleep 10; i=$(($i + 10)); [ $i -le 3600 ]; '. machine.succeed("mkdir -p ${sharePath}")
'VBoxManage list runningvms | grep -q "^\"${name}\""; '. machine.succeed("chown alice.users ${sharePath}")
'done'
));
}
sub waitForIP_${name} ($) {
my $property = "/VirtualBox/GuestInfo/Net/$_[0]/V4/IP";
my $getip = "VBoxManage guestproperty get ${name} $property | ".
"sed -n -e 's/^Value: //p'";
my $ip = $machine->succeed(ru(
'for i in $(seq 1000); do '.
'if ipaddr="$('.$getip.')" && [ -n "$ipaddr" ]; then '.
'echo "$ipaddr"; exit 0; '.
'fi; '.
'sleep 1; '.
'done; '.
'echo "Could not get IPv4 address for ${name}!" >&2; '.
'exit 1'
));
chomp $ip;
return $ip;
}
sub waitForStartup_${name} { def create_vm_${name}():
for (my $i = 0; $i <= 120; $i += 10) { # fmt: off
$machine->sleep(10); vbm(f"createvm --name ${name} ${createFlags}")
return if checkRunning_${name}; vbm(f"modifyvm ${name} ${vmFlags}")
eval { $_[0]->() } if defined $_[0]; vbm(f"setextradata ${name} VBoxInternal/PDM/HaltOnReset 1")
} vbm(f"storagectl ${name} ${controllerFlags}")
die "VirtualBox VM didn't start up within 2 minutes"; vbm(f"storageattach ${name} ${diskFlags}")
} vbm(f"sharedfolder add ${name} ${sharedFlags}")
vbm(f"sharedfolder add ${name} ${nixstoreFlags}")
cleanup_${name}()
sub waitForShutdown_${name} { ${mkLog "$HOME/VirtualBox VMs/${name}/Logs/VBox.log" "HOST-${name}"}
for (my $i = 0; $i <= 120; $i += 10) { # fmt: on
$machine->sleep(10);
return unless checkRunning_${name};
}
die "VirtualBox VM didn't shut down within 2 minutes";
}
sub shutdownVM_${name} {
$machine->succeed(ru "touch ${sharePath}/shutdown"); def destroy_vm_${name}():
$machine->execute( cleanup_${name}()
'set -e; i=0; '. vbm("unregistervm ${name} --delete")
'while test -e ${sharePath}/shutdown '.
' -o -e ${sharePath}/boot-done; do '.
'sleep 1; i=$(($i + 1)); [ $i -le 3600 ]; '. def wait_for_vm_boot_${name}():
'done' machine.execute(
); ru(
waitForShutdown_${name}; "set -e; i=0; "
} "while ! test -e ${sharePath}/boot-done; do "
"sleep 10; i=$(($i + 10)); [ $i -le 3600 ]; "
"VBoxManage list runningvms | grep -q '^\"${name}\"'; "
"done"
)
)
def wait_for_ip_${name}(interface):
property = f"/VirtualBox/GuestInfo/Net/{interface}/V4/IP"
# fmt: off
getip = f"VBoxManage guestproperty get ${name} {property} | sed -n -e 's/^Value: //p'"
# fmt: on
ip = machine.succeed(
ru(
"for i in $(seq 1000); do "
f'if ipaddr="$({getip})" && [ -n "$ipaddr" ]; then '
'echo "$ipaddr"; exit 0; '
"fi; "
"sleep 1; "
"done; "
"echo 'Could not get IPv4 address for ${name}!' >&2; "
"exit 1"
)
).strip()
return ip
def wait_for_startup_${name}(nudge=lambda: None):
for _ in range(0, 130, 10):
machine.sleep(10)
if check_running_${name}():
return
nudge()
raise Exception("VirtualBox VM didn't start up within 2 minutes")
def wait_for_shutdown_${name}():
for _ in range(0, 130, 10):
machine.sleep(10)
if not check_running_${name}():
return
raise Exception("VirtualBox VM didn't shut down within 2 minutes")
def shutdown_vm_${name}():
machine.succeed(ru("touch ${sharePath}/shutdown"))
machine.execute(
"set -e; i=0; "
"while test -e ${sharePath}/shutdown "
" -o -e ${sharePath}/boot-done; do "
"sleep 1; i=$(($i + 1)); [ $i -le 3600 ]; "
"done"
)
wait_for_shutdown_${name}()
''; '';
}; };
@ -364,26 +375,31 @@ let
}; };
testScript = '' testScript = ''
sub ru ($) { from shlex import quote
my $esc = $_[0] =~ s/'/'\\${"'"}'/gr;
return "su - alice -c '$esc'";
}
sub vbm {
$machine->succeed(ru("VBoxManage ".$_[0]));
};
sub removeUUIDs {
return join("\n", grep { $_ !~ /^UUID:/ } split(/\n/, $_[0]))."\n";
}
${concatStrings (mapAttrsToList (_: getAttr "testSubs") vms)} ${concatStrings (mapAttrsToList (_: getAttr "testSubs") vms)}
$machine->waitForX; def ru(cmd: str) -> str:
return f"su - alice -c {quote(cmd)}"
def vbm(cmd: str) -> str:
return machine.succeed(ru(f"VBoxManage {cmd}"))
def remove_uuids(output: str) -> str:
return "\n".join(
[line for line in (output or "").splitlines() if not line.startswith("UUID:")]
)
machine.wait_for_x()
# fmt: off
${mkLog "$HOME/.config/VirtualBox/VBoxSVC.log" "HOST-SVC"} ${mkLog "$HOME/.config/VirtualBox/VBoxSVC.log" "HOST-SVC"}
# fmt: on
${testScript} ${testScript}
# (keep black happy)
''; '';
meta = with pkgs.stdenv.lib.maintainers; { meta = with pkgs.stdenv.lib.maintainers; {
@ -393,133 +409,129 @@ let
unfreeTests = mapAttrs (mkVBoxTest true vboxVMsWithExtpack) { unfreeTests = mapAttrs (mkVBoxTest true vboxVMsWithExtpack) {
enable-extension-pack = '' enable-extension-pack = ''
createVM_testExtensionPack; create_vm_testExtensionPack()
vbm("startvm testExtensionPack"); vbm("startvm testExtensionPack")
waitForStartup_testExtensionPack; wait_for_startup_testExtensionPack()
$machine->screenshot("cli_started"); machine.screenshot("cli_started")
waitForVMBoot_testExtensionPack; wait_for_vm_boot_testExtensionPack()
$machine->screenshot("cli_booted"); machine.screenshot("cli_booted")
$machine->nest("Checking for privilege escalation", sub { with machine.nested("Checking for privilege escalation"):
$machine->fail("test -e '/root/VirtualBox VMs'"); machine.fail("test -e '/root/VirtualBox VMs'")
$machine->fail("test -e '/root/.config/VirtualBox'"); machine.fail("test -e '/root/.config/VirtualBox'")
$machine->succeed("test -e '/home/alice/VirtualBox VMs'"); machine.succeed("test -e '/home/alice/VirtualBox VMs'")
});
shutdownVM_testExtensionPack; shutdown_vm_testExtensionPack()
destroyVM_testExtensionPack; destroy_vm_testExtensionPack()
''; '';
}; };
in mapAttrs (mkVBoxTest false vboxVMs) { in mapAttrs (mkVBoxTest false vboxVMs) {
simple-gui = '' simple-gui = ''
createVM_simple;
$machine->succeed(ru "VirtualBox &");
$machine->waitUntilSucceeds(
ru "xprop -name 'Oracle VM VirtualBox Manager'"
);
$machine->sleep(5);
$machine->screenshot("gui_manager_started");
# Home to select Tools, down to move to the VM, enter to start it. # Home to select Tools, down to move to the VM, enter to start it.
$machine->sendKeys("home"); def send_vm_startup():
$machine->sendKeys("down"); machine.send_key("home")
$machine->sendKeys("ret"); machine.send_key("down")
$machine->screenshot("gui_manager_sent_startup"); machine.send_key("ret")
waitForStartup_simple (sub {
$machine->sendKeys("home");
$machine->sendKeys("down"); create_vm_simple()
$machine->sendKeys("ret"); machine.succeed(ru("VirtualBox &"))
}); machine.wait_until_succeeds(ru("xprop -name 'Oracle VM VirtualBox Manager'"))
$machine->screenshot("gui_started"); machine.sleep(5)
waitForVMBoot_simple; machine.screenshot("gui_manager_started")
$machine->screenshot("gui_booted"); send_vm_startup()
shutdownVM_simple; machine.screenshot("gui_manager_sent_startup")
$machine->sleep(5); wait_for_startup_simple(send_vm_startup)
$machine->screenshot("gui_stopped"); machine.screenshot("gui_started")
$machine->sendKeys("ctrl-q"); wait_for_vm_boot_simple()
$machine->sleep(5); machine.screenshot("gui_booted")
$machine->screenshot("gui_manager_stopped"); shutdown_vm_simple()
destroyVM_simple; machine.sleep(5)
machine.screenshot("gui_stopped")
machine.send_key("ctrl-q")
machine.sleep(5)
machine.screenshot("gui_manager_stopped")
destroy_vm_simple()
''; '';
simple-cli = '' simple-cli = ''
createVM_simple; create_vm_simple()
vbm("startvm simple"); vbm("startvm simple")
waitForStartup_simple; wait_for_startup_simple()
$machine->screenshot("cli_started"); machine.screenshot("cli_started")
waitForVMBoot_simple; wait_for_vm_boot_simple()
$machine->screenshot("cli_booted"); machine.screenshot("cli_booted")
$machine->nest("Checking for privilege escalation", sub { with machine.nested("Checking for privilege escalation"):
$machine->fail("test -e '/root/VirtualBox VMs'"); machine.fail("test -e '/root/VirtualBox VMs'")
$machine->fail("test -e '/root/.config/VirtualBox'"); machine.fail("test -e '/root/.config/VirtualBox'")
$machine->succeed("test -e '/home/alice/VirtualBox VMs'"); machine.succeed("test -e '/home/alice/VirtualBox VMs'")
});
shutdownVM_simple; shutdown_vm_simple()
destroyVM_simple; destroy_vm_simple()
''; '';
headless = '' headless = ''
createVM_headless; create_vm_headless()
$machine->succeed(ru("VBoxHeadless --startvm headless & disown %1")); machine.succeed(ru("VBoxHeadless --startvm headless & disown %1"))
waitForStartup_headless; wait_for_startup_headless()
waitForVMBoot_headless; wait_for_vm_boot_headless()
shutdownVM_headless; shutdown_vm_headless()
destroyVM_headless; destroy_vm_headless()
''; '';
host-usb-permissions = '' host-usb-permissions = ''
my $userUSB = removeUUIDs vbm("list usbhost"); user_usb = remove_uuids(vbm("list usbhost"))
print STDERR $userUSB; print(user_usb, file=sys.stderr)
my $rootUSB = removeUUIDs $machine->succeed("VBoxManage list usbhost"); root_usb = remove_uuids(machine.succeed("VBoxManage list usbhost"))
print STDERR $rootUSB; print(root_usb, file=sys.stderr)
die "USB host devices differ for root and normal user" if user_usb != root_usb:
if $userUSB ne $rootUSB; raise Exception("USB host devices differ for root and normal user")
die "No USB host devices found" if $userUSB =~ /<none>/; if "<none>" in user_usb:
raise Exception("No USB host devices found")
''; '';
systemd-detect-virt = '' systemd-detect-virt = ''
createVM_detectvirt; create_vm_detectvirt()
vbm("startvm detectvirt"); vbm("startvm detectvirt")
waitForStartup_detectvirt; wait_for_startup_detectvirt()
waitForVMBoot_detectvirt; wait_for_vm_boot_detectvirt()
shutdownVM_detectvirt; shutdown_vm_detectvirt()
my $result = $machine->succeed("cat '$detectvirt_sharepath/result'"); result = machine.succeed(f"cat '{detectvirt_sharepath}/result'").strip()
chomp $result; destroy_vm_detectvirt()
destroyVM_detectvirt; if result != "oracle":
die "systemd-detect-virt returned \"$result\" instead of \"oracle\"" raise Exception(f'systemd-detect-virt returned "{result}" instead of "oracle"')
if $result ne "oracle";
''; '';
net-hostonlyif = '' net-hostonlyif = ''
createVM_test1; create_vm_test1()
createVM_test2; create_vm_test2()
vbm("startvm test1"); vbm("startvm test1")
waitForStartup_test1; wait_for_startup_test1()
waitForVMBoot_test1; wait_for_vm_boot_test1()
vbm("startvm test2"); vbm("startvm test2")
waitForStartup_test2; wait_for_startup_test2()
waitForVMBoot_test2; wait_for_vm_boot_test2()
$machine->screenshot("net_booted"); machine.screenshot("net_booted")
my $test1IP = waitForIP_test1 1; test1_ip = wait_for_ip_test1(1)
my $test2IP = waitForIP_test2 1; test2_ip = wait_for_ip_test2(1)
$machine->succeed("echo '$test2IP' | nc -N '$test1IP' 1234"); machine.succeed(f"echo '{test2_ip}' | nc -N '{test1_ip}' 1234")
$machine->succeed("echo '$test1IP' | nc -N '$test2IP' 1234"); machine.succeed(f"echo '{test1_ip}' | nc -N '{test2_ip}' 1234")
$machine->waitUntilSucceeds("nc -N '$test1IP' 5678 < /dev/null >&2"); machine.wait_until_succeeds(f"nc -N '{test1_ip}' 5678 < /dev/null >&2")
$machine->waitUntilSucceeds("nc -N '$test2IP' 5678 < /dev/null >&2"); machine.wait_until_succeeds(f"nc -N '{test2_ip}' 5678 < /dev/null >&2")
shutdownVM_test1; shutdown_vm_test1()
shutdownVM_test2; shutdown_vm_test2()
destroyVM_test1; destroy_vm_test1()
destroyVM_test2; destroy_vm_test2()
''; '';
} // (if enableUnfree then unfreeTests else {}) } // (if enableUnfree then unfreeTests else {})

View File

@ -0,0 +1,26 @@
{ stdenv, fetchFromGitHub, cmake, pkg-config, mpd_clientlib, meson, ninja }:
stdenv.mkDerivation rec {
pname = "ashuffle";
version = "3.4.0";
src = fetchFromGitHub {
owner = "joshkunz";
repo = "ashuffle";
rev = "v${version}";
sha256 = "09q6lwgc1dc8bg1mb9js9qz3xcsxph3548nxzvyb4v8111gixrp7";
fetchSubmodules = true;
};
dontUseCmakeConfigure = true;
nativeBuildInputs = [ cmake pkg-config meson ninja ];
buildInputs = [ mpd_clientlib ];
meta = with stdenv.lib; {
homepage = "https://github.com/joshkunz/ashuffle";
description = "Automatic library-wide shuffle for mpd";
maintainers = [ maintainers.tcbravo ];
platforms = platforms.unix;
license = licenses.mit;
};
}

View File

@ -17,7 +17,7 @@ stdenv.mkDerivation rec {
# When updating, please check if https://github.com/csound/csound/issues/1078 # When updating, please check if https://github.com/csound/csound/issues/1078
# has been fixed in the new version so we can use the normal fluidsynth # has been fixed in the new version so we can use the normal fluidsynth
# version and remove fluidsynth 1.x from nixpkgs again. # version and remove fluidsynth 1.x from nixpkgs again.
version = "6.13.0"; version = "6.14.0";
enableParallelBuilding = true; enableParallelBuilding = true;
@ -27,7 +27,7 @@ stdenv.mkDerivation rec {
owner = "csound"; owner = "csound";
repo = "csound"; repo = "csound";
rev = version; rev = version;
sha256 = "14822ybqyp31z18gky2y9zadr9dkbhabg97y139py73w7v3af1bh"; sha256 = "1sr9knfhbm2m0wpkjq2l5n471vnl51wy4p6j4m95zqybimzb4s2j";
}; };
cmakeFlags = [ "-DBUILD_CSOUND_AC=0" ] # fails to find Score.hpp cmakeFlags = [ "-DBUILD_CSOUND_AC=0" ] # fails to find Score.hpp

View File

@ -7,13 +7,13 @@
stdenv.mkDerivation rec { stdenv.mkDerivation rec {
pname = "ft2-clone"; pname = "ft2-clone";
version = "1.26"; version = "1.28";
src = fetchFromGitHub { src = fetchFromGitHub {
owner = "8bitbubsy"; owner = "8bitbubsy";
repo = "ft2-clone"; repo = "ft2-clone";
rev = "v${version}"; rev = "v${version}";
sha256 = "0fqb4415qy2nwjz7ahi43nk795ifswb2b37sc7p5n9m4yc8h53wv"; sha256 = "1hbcl89cpx9bsafxrjyfx6vrbs4h3lnzmqm12smcvdg8ksfgzj0d";
}; };
nativeBuildInputs = [ cmake ]; nativeBuildInputs = [ cmake ];

View File

@ -1,5 +1,16 @@
{ stdenv, fetchFromGitHub , xorg, freetype, alsaLib, curl, libjack2 { stdenv
, lv2, pkgconfig, libGLU, libGL }: , fetchFromGitHub
, fetchpatch
, xorg
, freetype
, alsaLib
, curl
, libjack2
, lv2
, pkgconfig
, libGLU
, libGL
}:
stdenv.mkDerivation { stdenv.mkDerivation {
version = "0.9.0"; version = "0.9.0";
@ -20,7 +31,15 @@
CXXFLAGS = "-DHAVE_LROUND"; CXXFLAGS = "-DHAVE_LROUND";
patchPhase = '' patches = [
# gcc9 compatibility https://github.com/mtytel/helm/pull/233
(fetchpatch {
url = "https://github.com/mtytel/helm/commit/cb611a80bd5a36d31bfc31212ebbf79aa86c6f08.patch";
sha256 = "1i2289srcfz17c3zzab6f51aznzdj62kk53l4afr32bkjh9s4ixk";
})
];
prePatch = ''
sed -i 's|usr/||g' Makefile sed -i 's|usr/||g' Makefile
''; '';

View File

@ -0,0 +1,28 @@
{ stdenv, fetchzip, pkgconfig, lv2, gtkmm2, boost }:
stdenv.mkDerivation rec {
pname = "lv2-cpp-tools";
version = "1.0.5";
src = fetchzip {
url = "http://deb.debian.org/debian/pool/main/l/lv2-c++-tools/lv2-c++-tools_${version}.orig.tar.bz2";
sha256 = "039bq7d7s2bhfcnlsfq0mqxr9a9iqwg5bwcpxfi24c6yl6krydsi";
};
preConfigure = ''
sed -r 's,/bin/bash,${stdenv.shell},g' -i ./configure
sed -r 's,/sbin/ldconfig,ldconfig,g' -i ./Makefile.template
'';
nativeBuildInputs = [ pkgconfig ];
buildInputs = [ lv2 gtkmm2 boost ];
meta = with stdenv.lib; {
homepage = "http://ll-plugins.nongnu.org/hacking.html";
description = "Tools and libraries that may come in handy when writing LV2 plugins in C++";
license = licenses.gpl3;
maintainers = [ maintainers.michalrus ];
platforms = platforms.linux;
};
}

View File

@ -1,4 +1,4 @@
{ stdenv, fetchFromGitHub, cmake, eigen, libav_all }: { stdenv, fetchFromGitHub, cmake, eigen, libav }:
stdenv.mkDerivation { stdenv.mkDerivation {
pname = "musly"; pname = "musly";
version = "unstable-2017-04-26"; version = "unstable-2017-04-26";
@ -9,7 +9,7 @@ stdenv.mkDerivation {
sha256 = "1q42wvdwy2pac7bhfraqqj2czw7w2m33ms3ifjl8phm7d87i8825"; sha256 = "1q42wvdwy2pac7bhfraqqj2czw7w2m33ms3ifjl8phm7d87i8825";
}; };
nativeBuildInputs = [ cmake ]; nativeBuildInputs = [ cmake ];
buildInputs = [ eigen (libav_all.override { vaapiSupport = stdenv.isLinux; }).libav_11 ]; buildInputs = [ eigen (libav.override { vaapiSupport = stdenv.isLinux; }) ];
fixupPhase = if stdenv.isDarwin then '' fixupPhase = if stdenv.isDarwin then ''
install_name_tool -change libmusly.dylib $out/lib/libmusly.dylib $out/bin/musly install_name_tool -change libmusly.dylib $out/lib/libmusly.dylib $out/bin/musly
install_name_tool -change libmusly_resample.dylib $out/lib/libmusly_resample.dylib $out/bin/musly install_name_tool -change libmusly_resample.dylib $out/lib/libmusly_resample.dylib $out/bin/musly

View File

@ -10,13 +10,13 @@ assert pcreSupport -> pcre != null;
stdenv.mkDerivation rec { stdenv.mkDerivation rec {
pname = "ncmpc"; pname = "ncmpc";
version = "0.38"; version = "0.39";
src = fetchFromGitHub { src = fetchFromGitHub {
owner = "MusicPlayerDaemon"; owner = "MusicPlayerDaemon";
repo = "ncmpc"; repo = "ncmpc";
rev = "v${version}"; rev = "v${version}";
sha256 = "1kidpd1xrfax3v31q93r9g9b7jd841476q47wgd94h1a86b70gs9"; sha256 = "08xrcinfm1a7hjycf8la7gnsxbp3six70ks987dr7j42kd42irfq";
}; };
buildInputs = [ glib ncurses mpd_clientlib boost ] buildInputs = [ glib ncurses mpd_clientlib boost ]

View File

@ -12,13 +12,13 @@ let
; ;
in pythonPackages.buildPythonApplication rec { in pythonPackages.buildPythonApplication rec {
pname = "picard"; pname = "picard";
version = "2.4.1"; version = "2.4.2";
src = fetchFromGitHub { src = fetchFromGitHub {
owner = "metabrainz"; owner = "metabrainz";
repo = pname; repo = pname;
rev = "release-${version}"; rev = "release-${version}";
sha256 = "0s4jmcg1n6ayxf7x0amq67rgn6y127h98s2k4fcna6n9477krrwf"; sha256 = "0sbccsisk9w0gnblvhg7wk1c5ydppldjbvaa0zhl3yrid5a363ah";
}; };
nativeBuildInputs = [ gettext qt5.wrapQtAppsHook qt5.qtbase ] nativeBuildInputs = [ gettext qt5.wrapQtAppsHook qt5.qtbase ]

View File

@ -4,11 +4,11 @@
}: }:
stdenv.mkDerivation rec { stdenv.mkDerivation rec {
name = "snd-20.2"; name = "snd-20.3";
src = fetchurl { src = fetchurl {
url = "mirror://sourceforge/snd/${name}.tar.gz"; url = "mirror://sourceforge/snd/${name}.tar.gz";
sha256 = "0ip4sfyxqlbghlggipmvvqjqs1a7qas0zcmzw8d1nwg6krjkfj0r"; sha256 = "016slh34gb6qqb38m8k9yg48rbhc5p12084szcwvanhh5v7fc7mk";
}; };
nativeBuildInputs = [ pkgconfig ]; nativeBuildInputs = [ pkgconfig ];

View File

@ -0,0 +1,36 @@
{ stdenv, fetchFromGitHub, lv2, fftwFloat, pkgconfig }:
stdenv.mkDerivation rec {
pname = "talentedhack";
version = "1.86";
src = fetchFromGitHub {
owner = "jeremysalwen";
repo = "talentedhack";
rev = "v${version}";
sha256 = "0kwvayalysmk7y49jq0k16al252md8d45z58hphzsksmyz6148bx";
};
nativeBuildInputs = [ pkgconfig ];
buildInputs = [ lv2 fftwFloat ];
# To avoid name clashes, plugins should be compiled with symbols hidden, except for `lv2_descriptor`:
preConfigure = ''
sed -r 's/^CFLAGS.*$/\0 -fvisibility=hidden/' -i Makefile
'';
installPhase = ''
d=$out/lib/lv2/talentedhack.lv2
mkdir -p $d
cp *.so *.ttl $d
'';
meta = with stdenv.lib; {
homepage = "https://github.com/jeremysalwen/TalentedHack";
description = "LV2 port of Autotalent pitch correction plugin";
license = licenses.gpl3;
maintainers = [ maintainers.michalrus ];
platforms = platforms.linux;
};
}

View File

@ -0,0 +1,89 @@
{ stdenv
, fetchzip
, libX11
, libXi
, libGL
, alsaLib
, SDL2
, autoPatchelfHook
}:
stdenv.mkDerivation rec {
pname = "virtual-ans";
version = "3.0.2c";
src = fetchzip {
url = "https://warmplace.ru/soft/ans/virtual_ans-${version}.zip";
sha256 = "03r1v3l7rd59dakr7ndvgsqchv00ppkvi6sslgf1ng07r3rsvb1n";
};
nativeBuildInputs = [
autoPatchelfHook
];
buildInputs = [
stdenv.cc.cc.lib
libX11
libXi
libGL
alsaLib
SDL2
];
installPhase = ''
mkdir -p $out
cp -R ./* $out/
# Remove all executables except for current architecture
ls -1d $out/START* | grep -v ${startScript} | xargs rm -rf
ls -1d $out/bin/pixilang_linux* | grep -v ${linuxExecutable} | xargs rm -rf
# Start script performs relative search for resources, so it cannot be moved
# to bin directory
ln -s $out/${startScript} $out/bin/virtual-ans
'';
startScript = if stdenv.isx86_32 then "START_LINUX_X86"
else if stdenv.isx86_64 then "START_LINUX_X86_64"
#else if stdenv.isDarwin then "START_MACOS.app" # disabled because I cannot test on Darwin
else abort "Unsupported platform: ${stdenv.platform.kernelArch}.";
linuxExecutable = if stdenv.isx86_32 then "pixilang_linux_x86"
else if stdenv.isx86_64 then "pixilang_linux_x86_64"
else "";
meta = with stdenv.lib; {
description = "Photoelectronic microtonal/spectral musical instrument";
longDescription = ''
Virtual ANS is a software simulator of the unique Russian synthesizer ANS
- photoelectronic musical instrument created by Evgeny Murzin from 1938 to
1958. The ANS made it possible to draw music in the form of a spectrogram
(sonogram), without live instruments and performers. It was used by
Stanislav Kreichi, Alfred Schnittke, Edward Artemiev and other Soviet
composers in their experimental works. You can also hear the sound of the
ANS in Andrei Tarkovsky's movies Solaris, The Mirror, Stalker.
The simulator extends the capabilities of the original instrument. Now
it's a full-featured graphics editor where you can convert sound into an
image, load and play pictures, draw microtonal/spectral music and create
some unusual deep atmospheric sounds. This app is for everyone who loves
experiments and is looking for something new.
Key features:
+ unlimited number of pure tone generators;
+ powerful sonogram editor - you can draw the spectrum and play it at the same time;
+ any sound (from a WAV file or a Microphone/Line-in) can be converted to image (sonogram) and vice versa;
+ support for MIDI devices;
+ polyphonic synth mode with MIDI mapping;
+ supported file formats: WAV, AIFF, PNG, JPEG, GIF;
+ supported sound systems: ASIO, DirectSound, MME, ALSA, OSS, JACK, Audiobus, IAA.
'';
homepage = "https://warmplace.ru/soft/ans/";
license = licenses.free;
# I cannot test the Darwin version, so I'll leave it disabled
platforms = [ "x86_64-linux" "i686-linux" ];
maintainers = with maintainers; [ jacg ];
};
}

View File

@ -0,0 +1,27 @@
{ stdenv, fetchzip, pkgconfig, lvtk, lv2, fftw, lv2-cpp-tools, gtkmm2 }:
stdenv.mkDerivation rec {
pname = "vocproc";
version = "0.2.1";
src = fetchzip {
url = "https://hyperglitch.com/files/vocproc/${pname}-${version}.default.tar.gz";
sha256 = "07a1scyz14mg2jdbw6fpv4qg91zsw61qqii64n9qbnny9d5pn8n2";
};
nativeBuildInputs = [ pkgconfig ];
buildInputs = [ lv2 fftw lv2-cpp-tools gtkmm2 ];
makeFlags = [
"INSTALL_DIR=$(out)/lib/lv2"
];
meta = with stdenv.lib; {
homepage = "https://hyperglitch.com/dev/VocProc";
description = "An LV2 plugin for pitch shifting (with or without formant correction), vocoding, automatic pitch correction and harmonizing of singing voice (harmonizer)";
license = licenses.gpl2;
maintainers = [ maintainers.michalrus ];
platforms = platforms.linux;
};
}

View File

@ -7,13 +7,13 @@ with stdenv.lib;
mkDerivation rec { mkDerivation rec {
name = "bitcoin" + (toString (optional (!withGui) "d")) + "-abc-" + version; name = "bitcoin" + (toString (optional (!withGui) "d")) + "-abc-" + version;
version = "0.21.12"; version = "0.21.13";
src = fetchFromGitHub { src = fetchFromGitHub {
owner = "bitcoin-ABC"; owner = "bitcoin-ABC";
repo = "bitcoin-abc"; repo = "bitcoin-abc";
rev = "v${version}"; rev = "v${version}";
sha256 = "1mad3aqfwrxi06135nf8hv13d67nilmxpx4dw5vjcy1zi3lljj1j"; sha256 = "1x8xcdi1vcskggk9bqkwr3ah4vi9b7sj2h8hf7spac6dvz8lmzav";
}; };
patches = [ ./fix-bitcoin-qt-build.patch ]; patches = [ ./fix-bitcoin-qt-build.patch ];

View File

@ -4,11 +4,11 @@
with stdenv.lib; with stdenv.lib;
stdenv.mkDerivation rec { stdenv.mkDerivation rec {
pname = "clightning"; pname = "clightning";
version = "0.9.0"; version = "0.9.0-1";
src = fetchurl { src = fetchurl {
url = "https://github.com/ElementsProject/lightning/releases/download/v${version}/clightning-v${version}.zip"; url = "https://github.com/ElementsProject/lightning/releases/download/v${version}/clightning-v${version}.zip";
sha256 = "11ig5bqxvhx82gq9nl7c5iqaf3x8xbwfx7cf2318pyqdimz4r1v6"; sha256 = "01cwcrqysqsrf96bbbj0grm8j5m46a3acgwy0kzxdx05jdzld9sc";
}; };
enableParallelBuilding = true; enableParallelBuilding = true;

View File

@ -2,11 +2,11 @@
stdenv.mkDerivation rec { stdenv.mkDerivation rec {
pname = "ergo"; pname = "ergo";
version = "3.3.0"; version = "3.3.1";
src = fetchurl { src = fetchurl {
url = "https://github.com/ergoplatform/ergo/releases/download/v${version}/ergo-${version}.jar"; url = "https://github.com/ergoplatform/ergo/releases/download/v${version}/ergo-${version}.jar";
sha256 = "1lja4ba6bm1jk0lh2ra5v8i5g3f1gy7mk2b3yrx1w7x02ll9gr06"; sha256 = "1qr1vfb6mhm2hxl2ksydkhadm7phadn93lwm3f9zni01plk56bb5";
}; };
nativeBuildInputs = [ makeWrapper ]; nativeBuildInputs = [ makeWrapper ];

View File

@ -2,13 +2,13 @@
buildGoModule rec { buildGoModule rec {
pname = "go-ethereum"; pname = "go-ethereum";
version = "1.9.19"; version = "1.9.20";
src = fetchFromGitHub { src = fetchFromGitHub {
owner = "ethereum"; owner = "ethereum";
repo = pname; repo = pname;
rev = "v${version}"; rev = "v${version}";
sha256 = "08wf7qklk31dky2z0l2j9vbyr8721gkvy4dsc60afwrwihwd8lrp"; sha256 = "031cbl8yqw5g5yrm5h1x8s5ckdw2xkym46009l579zvafn2vcnj7";
}; };
runVend = true; runVend = true;
@ -42,6 +42,6 @@ buildGoModule rec {
homepage = "https://geth.ethereum.org/"; homepage = "https://geth.ethereum.org/";
description = "Official golang implementation of the Ethereum protocol"; description = "Official golang implementation of the Ethereum protocol";
license = with licenses; [ lgpl3 gpl3 ]; license = with licenses; [ lgpl3 gpl3 ];
maintainers = with maintainers; [ adisbladis lionello xrelkd ]; maintainers = with maintainers; [ adisbladis lionello xrelkd RaghavSood ];
}; };
} }

View File

@ -10,13 +10,13 @@ assert stdenv.isDarwin -> IOKit != null;
stdenv.mkDerivation rec { stdenv.mkDerivation rec {
pname = "monero"; pname = "monero";
version = "0.16.0.1"; version = "0.16.0.3";
src = fetchFromGitHub { src = fetchFromGitHub {
owner = "monero-project"; owner = "monero-project";
repo = "monero"; repo = "monero";
rev = "v${version}"; rev = "v${version}";
sha256 = "0n2cviqm8radpynx70fc0819k1xknjc58cvb4whlc49ilyvh8ky6"; sha256 = "1r9x3712vhb24dxxirfiwj5f9x0h4m7x0ngiiavf5983dfdlgz33";
fetchSubmodules = true; fetchSubmodules = true;
}; };

View File

@ -19,9 +19,9 @@ let
sha256Hash = "11lkwcbzdl86cyz4lci65cx9z5jjhrc4z40maqx2r5hw1xka9290"; sha256Hash = "11lkwcbzdl86cyz4lci65cx9z5jjhrc4z40maqx2r5hw1xka9290";
}; };
latestVersion = { # canary & dev latestVersion = { # canary & dev
version = "4.2.0.5"; # "Android Studio 4.2 Canary 5" version = "4.2.0.7"; # "Android Studio 4.2 Canary 7"
build = "201.6682321"; build = "201.6720134";
sha256Hash = "076q6d7kmi0wcsqak7n6ggp1qns4xj1134xcpdzb92qk3dmg3wrh"; sha256Hash = "1c9s6rd0z596qr7hbil5rl3fqby7c8h7ma52d1qj5rxra73k77nz";
}; };
in { in {
# Attributes are named by their corresponding release channels # Attributes are named by their corresponding release channels

View File

@ -1,18 +1,18 @@
{ stdenv, pkgconfig, qt5, fetchFromGitHub }: { stdenv, mkDerivation, pkgconfig, qmake, qttools, qtbase, qtsvg, qtx11extras, fetchFromGitHub }:
mkDerivation rec {
with qt5;
stdenv.mkDerivation rec {
version = "0.10.0";
pname = "featherpad"; pname = "featherpad";
version = "0.10.0";
src = fetchFromGitHub { src = fetchFromGitHub {
owner = "tsujan"; owner = "tsujan";
repo = "FeatherPad"; repo = "FeatherPad";
rev = "V${version}"; rev = "V${version}";
sha256 = "1wrbs6kni9s3x39cckm9kzpglryxn5vyarilvh9pafbzpc6rc57p"; sha256 = "1wrbs6kni9s3x39cckm9kzpglryxn5vyarilvh9pafbzpc6rc57p";
}; };
nativeBuildInputs = [ qmake pkgconfig qttools ]; nativeBuildInputs = [ qmake pkgconfig qttools ];
buildInputs = [ qtbase qtsvg qtx11extras ]; buildInputs = [ qtbase qtsvg qtx11extras ];
meta = with stdenv.lib; { meta = with stdenv.lib; {
description = "Lightweight Qt5 Plain-Text Editor for Linux"; description = "Lightweight Qt5 Plain-Text Editor for Linux";
homepage = "https://github.com/tsujan/FeatherPad"; homepage = "https://github.com/tsujan/FeatherPad";

View File

@ -0,0 +1,27 @@
{ stdenv, fetchFromGitLab }:
stdenv.mkDerivation {
name = "case.kak";
version = "unstable-2020-04-06";
src = fetchFromGitLab {
owner = "FlyingWombat";
repo = "case.kak";
rev = "6f1511820aa3abfa118e0f856118adc8113e2185";
sha256 = "002njrlwgakqgp74wivbppr9qyn57dn4n5bxkr6k6nglk9qndwdp";
};
installPhase = ''
mkdir -p $out/share/kak/autoload/plugins
cp -r rc/case.kak $out/share/kak/autoload/plugins
'';
meta = with stdenv.lib; {
description = "Ease navigation between opened buffers in Kakoune";
homepage = "https://gitlab.com/FlyingWombat/case.kak";
license = licenses.unlicense;
maintainers = with maintainers; [ eraserhd ];
platform = platforms.all;
};
}

Some files were not shown because too many files have changed in this diff Show More